mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge tag 'v3.10.103'
This is the 3.10.103 stable release
This commit is contained in:
@@ -255,19 +255,23 @@ scmd->allowed.
|
||||
|
||||
3. scmd recovered
|
||||
ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
|
||||
- shost->host_failed--
|
||||
- clear scmd->eh_eflags
|
||||
- scsi_setup_cmd_retry()
|
||||
- move from local eh_work_q to local eh_done_q
|
||||
LOCKING: none
|
||||
CONCURRENCY: at most one thread per separate eh_work_q to
|
||||
keep queue manipulation lockless
|
||||
|
||||
4. EH completes
|
||||
ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
|
||||
layer of failure.
|
||||
layer of failure. May be called concurrently but must have
|
||||
a no more than one thread per separate eh_work_q to
|
||||
manipulate the queue locklessly
|
||||
- scmd is removed from eh_done_q and scmd->eh_entry is cleared
|
||||
- if retry is necessary, scmd is requeued using
|
||||
scsi_queue_insert()
|
||||
- otherwise, scsi_finish_command() is invoked for scmd
|
||||
- zero shost->host_failed
|
||||
LOCKING: queue or finish function performs appropriate locking
|
||||
|
||||
|
||||
|
||||
@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
|
||||
- nr_open
|
||||
- overflowuid
|
||||
- overflowgid
|
||||
- pipe-user-pages-hard
|
||||
- pipe-user-pages-soft
|
||||
- protected_hardlinks
|
||||
- protected_symlinks
|
||||
- suid_dumpable
|
||||
@@ -159,6 +161,27 @@ The default is 65534.
|
||||
|
||||
==============================================================
|
||||
|
||||
pipe-user-pages-hard:
|
||||
|
||||
Maximum total number of pages a non-privileged user may allocate for pipes.
|
||||
Once this limit is reached, no new pipes may be allocated until usage goes
|
||||
below the limit again. When set to 0, no limit is applied, which is the default
|
||||
setting.
|
||||
|
||||
==============================================================
|
||||
|
||||
pipe-user-pages-soft:
|
||||
|
||||
Maximum total number of pages a non-privileged user may allocate for pipes
|
||||
before the pipe size gets limited to a single page. Once this limit is reached,
|
||||
new pipes will be limited to a single page in size for this user in order to
|
||||
limit total memory usage, and trying to increase them using fcntl() will be
|
||||
denied until usage goes below the limit again. The default value allows to
|
||||
allocate up to 1024 pipes at their default size. When set to 0, no limit is
|
||||
applied.
|
||||
|
||||
==============================================================
|
||||
|
||||
protected_hardlinks:
|
||||
|
||||
A long-standing class of security issues is the hardlink-based
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 102
|
||||
SUBLEVEL = 103
|
||||
EXTRAVERSION =
|
||||
NAME = TOSSUG Baby Fish
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
||||
* prelogue is setup (callee regs saved and then fp set and not other
|
||||
* way around
|
||||
*/
|
||||
pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
|
||||
pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
|
||||
return 0;
|
||||
|
||||
#endif
|
||||
|
||||
@@ -219,7 +219,7 @@ ex_saved_reg1:
|
||||
#ifdef CONFIG_SMP
|
||||
sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
|
||||
GET_CPU_ID r0 ; get to per cpu scratch mem,
|
||||
lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
||||
asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
|
||||
add r0, @ex_saved_reg1, r0
|
||||
#else
|
||||
st r0, [@ex_saved_reg1]
|
||||
@@ -239,7 +239,7 @@ ex_saved_reg1:
|
||||
.macro TLBMISS_RESTORE_REGS
|
||||
#ifdef CONFIG_SMP
|
||||
GET_CPU_ID r0 ; get to per cpu scratch mem
|
||||
lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
||||
asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
|
||||
add r0, @ex_saved_reg1, r0
|
||||
ld_s r3, [r0,12]
|
||||
ld_s r2, [r0, 8]
|
||||
|
||||
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vfp_flush_hwstate(thread);
|
||||
thread->vfpstate.hard = new_vfp;
|
||||
vfp_flush_hwstate(thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -275,8 +275,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
|
||||
mm_segment_t fs;
|
||||
long ret, err, i;
|
||||
|
||||
if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
|
||||
if (maxevents <= 0 ||
|
||||
maxevents > (INT_MAX/sizeof(*kbuf)) ||
|
||||
maxevents > (INT_MAX/sizeof(*events)))
|
||||
return -EINVAL;
|
||||
if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
|
||||
return -EFAULT;
|
||||
kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
@@ -313,6 +317,8 @@ asmlinkage long sys_oabi_semtimedop(int semid,
|
||||
|
||||
if (nsops < 1 || nsops > SEMOPM)
|
||||
return -EINVAL;
|
||||
if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
|
||||
return -EFAULT;
|
||||
sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
|
||||
if (!sops)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
|
||||
" DCACHE [%2], %0\n"
|
||||
#endif
|
||||
"2:\n"
|
||||
: "=&d" (temp), "=&da" (retval)
|
||||
: "=&d" (temp), "=&d" (retval)
|
||||
: "da" (m), "bd" (old), "da" (new)
|
||||
: "cc"
|
||||
);
|
||||
|
||||
@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
|
||||
} while (1);
|
||||
}
|
||||
|
||||
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
|
||||
|
||||
static void prom_putchar_ar71xx(unsigned char ch)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
|
||||
|
||||
prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
|
||||
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
|
||||
__raw_writel(ch, base + UART_TX * 4);
|
||||
prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
|
||||
prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
|
||||
}
|
||||
|
||||
static void prom_putchar_ar933x(unsigned char ch)
|
||||
|
||||
@@ -349,6 +349,7 @@ struct kvm_mips_tlb {
|
||||
#define KVM_MIPS_GUEST_TLB_SIZE 64
|
||||
struct kvm_vcpu_arch {
|
||||
void *host_ebase, *guest_ebase;
|
||||
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
unsigned long host_stack;
|
||||
unsigned long host_gp;
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
|
||||
* User space process size: 2GB. This is hardcoded into a few places,
|
||||
* so don't change it unless you know what you are doing.
|
||||
*/
|
||||
#define TASK_SIZE 0x7fff8000UL
|
||||
#define TASK_SIZE 0x80000000UL
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
@@ -45,13 +45,13 @@ typedef struct siginfo {
|
||||
|
||||
/* kill() */
|
||||
struct {
|
||||
pid_t _pid; /* sender's pid */
|
||||
__kernel_pid_t _pid; /* sender's pid */
|
||||
__ARCH_SI_UID_T _uid; /* sender's uid */
|
||||
} _kill;
|
||||
|
||||
/* POSIX.1b timers */
|
||||
struct {
|
||||
timer_t _tid; /* timer id */
|
||||
__kernel_timer_t _tid; /* timer id */
|
||||
int _overrun; /* overrun count */
|
||||
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
|
||||
sigval_t _sigval; /* same as below */
|
||||
@@ -60,26 +60,26 @@ typedef struct siginfo {
|
||||
|
||||
/* POSIX.1b signals */
|
||||
struct {
|
||||
pid_t _pid; /* sender's pid */
|
||||
__kernel_pid_t _pid; /* sender's pid */
|
||||
__ARCH_SI_UID_T _uid; /* sender's uid */
|
||||
sigval_t _sigval;
|
||||
} _rt;
|
||||
|
||||
/* SIGCHLD */
|
||||
struct {
|
||||
pid_t _pid; /* which child */
|
||||
__kernel_pid_t _pid; /* which child */
|
||||
__ARCH_SI_UID_T _uid; /* sender's uid */
|
||||
int _status; /* exit code */
|
||||
clock_t _utime;
|
||||
clock_t _stime;
|
||||
__kernel_clock_t _utime;
|
||||
__kernel_clock_t _stime;
|
||||
} _sigchld;
|
||||
|
||||
/* IRIX SIGCHLD */
|
||||
struct {
|
||||
pid_t _pid; /* which child */
|
||||
clock_t _utime;
|
||||
__kernel_pid_t _pid; /* which child */
|
||||
__kernel_clock_t _utime;
|
||||
int _status; /* exit code */
|
||||
clock_t _stime;
|
||||
__kernel_clock_t _stime;
|
||||
} _irix_sigchld;
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
|
||||
@@ -349,7 +349,7 @@ EXPORT(sysn32_call_table)
|
||||
PTR sys_ni_syscall /* available, was setaltroot */
|
||||
PTR sys_add_key
|
||||
PTR sys_request_key
|
||||
PTR sys_keyctl /* 6245 */
|
||||
PTR compat_sys_keyctl /* 6245 */
|
||||
PTR sys_set_thread_area
|
||||
PTR sys_inotify_init
|
||||
PTR sys_inotify_add_watch
|
||||
|
||||
@@ -474,7 +474,7 @@ sys_call_table:
|
||||
PTR sys_ni_syscall /* available, was setaltroot */
|
||||
PTR sys_add_key /* 4280 */
|
||||
PTR sys_request_key
|
||||
PTR sys_keyctl
|
||||
PTR compat_sys_keyctl
|
||||
PTR sys_set_thread_area
|
||||
PTR sys_inotify_init
|
||||
PTR sys_inotify_add_watch /* 4285 */
|
||||
|
||||
@@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
|
||||
/* Jump to guest */
|
||||
eret
|
||||
.set pop
|
||||
EXPORT(__kvm_mips_vcpu_run_end)
|
||||
|
||||
VECTOR(MIPSX(exception), unknown)
|
||||
/*
|
||||
|
||||
@@ -343,6 +343,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
memcpy(gebase + offset, mips32_GuestException,
|
||||
mips32_GuestExceptionEnd - mips32_GuestException);
|
||||
|
||||
#ifdef MODULE
|
||||
offset += mips32_GuestExceptionEnd - mips32_GuestException;
|
||||
memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
|
||||
__kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
|
||||
vcpu->arch.vcpu_run = gebase + offset;
|
||||
#else
|
||||
vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
|
||||
#endif
|
||||
|
||||
/* Invalidate the icache for these ranges */
|
||||
mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
|
||||
|
||||
@@ -426,7 +435,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
kvm_guest_enter();
|
||||
|
||||
r = __kvm_mips_vcpu_run(run, vcpu);
|
||||
r = vcpu->arch.vcpu_run(run, vcpu);
|
||||
|
||||
kvm_guest_exit();
|
||||
local_irq_enable();
|
||||
|
||||
@@ -972,8 +972,13 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
preempt_disable();
|
||||
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
|
||||
|
||||
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
|
||||
kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
|
||||
if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
|
||||
kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
|
||||
kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, va, vcpu, read_c0_entryhi());
|
||||
er = EMULATE_FAIL;
|
||||
preempt_enable();
|
||||
goto done;
|
||||
}
|
||||
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
|
||||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
|
||||
@@ -1006,11 +1011,16 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
|
||||
run, vcpu);
|
||||
preempt_enable();
|
||||
goto dont_update_pc;
|
||||
} else {
|
||||
/* We fault an entry from the guest tlb to the shadow host TLB */
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
||||
NULL,
|
||||
NULL);
|
||||
}
|
||||
/* We fault an entry from the guest tlb to the shadow host TLB */
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
||||
NULL, NULL)) {
|
||||
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, va, index, vcpu,
|
||||
read_c0_entryhi());
|
||||
er = EMULATE_FAIL;
|
||||
preempt_enable();
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -1821,8 +1831,13 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
|
||||
tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
|
||||
#endif
|
||||
/* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
|
||||
NULL);
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
|
||||
NULL, NULL)) {
|
||||
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, va, index, vcpu,
|
||||
read_c0_entryhi());
|
||||
er = EMULATE_FAIL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,8 @@
|
||||
#define MIPS_EXC_MAX 12
|
||||
/* XXXSL More to follow */
|
||||
|
||||
extern char __kvm_mips_vcpu_run_end[];
|
||||
|
||||
#define C_TI (_ULCAST_(1) << 30)
|
||||
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
|
||||
|
||||
@@ -312,7 +312,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
|
||||
gfn, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
@@ -397,22 +397,39 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
pfn_t pfn0, pfn1;
|
||||
gfn_t gfn0, gfn1;
|
||||
long tlb_lo[2];
|
||||
|
||||
tlb_lo[0] = tlb->tlb_lo0;
|
||||
tlb_lo[1] = tlb->tlb_lo1;
|
||||
|
||||
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
|
||||
pfn0 = 0;
|
||||
pfn1 = 0;
|
||||
} else {
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
/*
|
||||
* The commpage address must not be mapped to anything else if the guest
|
||||
* TLB contains entries nearby, or commpage accesses will break.
|
||||
*/
|
||||
if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
|
||||
VPN2_MASK & (PAGE_MASK << 1)))
|
||||
tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
|
||||
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
|
||||
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
|
||||
gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
|
||||
gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
|
||||
if (gfn0 >= kvm->arch.guest_pmap_npages ||
|
||||
gfn1 >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
|
||||
__func__, gfn0, gfn1, tlb->tlb_hi);
|
||||
kvm_mips_dump_guest_tlbs(vcpu);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (kvm_mips_map_page(kvm, gfn0) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, gfn1) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[gfn0];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn1];
|
||||
|
||||
if (hpa0)
|
||||
*hpa0 = pfn0 << PAGE_SHIFT;
|
||||
|
||||
@@ -423,9 +440,9 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
|
||||
kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
|
||||
(tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
|
||||
(tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
|
||||
|
||||
#ifdef DEBUG
|
||||
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
||||
@@ -909,10 +926,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.
|
||||
guest_tlb[index],
|
||||
NULL, NULL);
|
||||
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.guest_tlb[index],
|
||||
NULL, NULL)) {
|
||||
kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, opc, index, vcpu,
|
||||
read_c0_entryhi());
|
||||
kvm_mips_dump_guest_tlbs(vcpu);
|
||||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
inst = *(opc);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
@@ -684,9 +684,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
|
||||
case spec_op:
|
||||
switch (insn.r_format.func) {
|
||||
case jalr_op:
|
||||
regs->regs[insn.r_format.rd] =
|
||||
regs->cp0_epc + dec_insn.pc_inc +
|
||||
dec_insn.next_pc_inc;
|
||||
if (insn.r_format.rd != 0) {
|
||||
regs->regs[insn.r_format.rd] =
|
||||
regs->cp0_epc + dec_insn.pc_inc +
|
||||
dec_insn.next_pc_inc;
|
||||
}
|
||||
/* Fall through */
|
||||
case jr_op:
|
||||
*contpc = regs->regs[insn.r_format.rs];
|
||||
|
||||
@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
break;
|
||||
}
|
||||
|
||||
if (modify && R1(regs->iir))
|
||||
if (ret == 0 && modify && R1(regs->iir))
|
||||
regs->gr[R1(regs->iir)] = newbase;
|
||||
|
||||
|
||||
@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
|
||||
|
||||
if (ret)
|
||||
{
|
||||
/*
|
||||
* The unaligned handler failed.
|
||||
* If we were called by __get_user() or __put_user() jump
|
||||
* to it's exception fixup handler instead of crashing.
|
||||
*/
|
||||
if (!user_mode(regs) && fixup_exception(regs))
|
||||
return;
|
||||
|
||||
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
|
||||
die_if_kernel("Unaligned data reference", regs, 28);
|
||||
|
||||
|
||||
@@ -643,7 +643,7 @@
|
||||
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
|
||||
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
|
||||
#define SPRN_MMCR1 798
|
||||
#define SPRN_MMCR2 769
|
||||
#define SPRN_MMCR2 785
|
||||
#define SPRN_MMCRA 0x312
|
||||
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
|
||||
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
|
||||
@@ -677,13 +677,13 @@
|
||||
#define SPRN_PMC6 792
|
||||
#define SPRN_PMC7 793
|
||||
#define SPRN_PMC8 794
|
||||
#define SPRN_SIAR 780
|
||||
#define SPRN_SDAR 781
|
||||
#define SPRN_SIER 784
|
||||
#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
|
||||
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
|
||||
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
|
||||
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
|
||||
#define SPRN_SIAR 796
|
||||
#define SPRN_SDAR 797
|
||||
|
||||
#define SPRN_PA6T_MMCR0 795
|
||||
#define PA6T_MMCR0_EN0 0x0000000000000001UL
|
||||
|
||||
@@ -857,11 +857,6 @@ hv_facility_unavailable_relon_trampoline:
|
||||
#endif
|
||||
STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
|
||||
|
||||
/* Other future vectors */
|
||||
.align 7
|
||||
.globl __end_interrupts
|
||||
__end_interrupts:
|
||||
|
||||
.align 7
|
||||
system_call_entry_direct:
|
||||
#if defined(CONFIG_RELOCATABLE)
|
||||
@@ -1191,6 +1186,17 @@ __end_handlers:
|
||||
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
|
||||
STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
|
||||
|
||||
/*
|
||||
* The __end_interrupts marker must be past the out-of-line (OOL)
|
||||
* handlers, so that they are copied to real address 0x100 when running
|
||||
* a relocatable kernel. This ensures they can be reached from the short
|
||||
* trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
|
||||
* directly, without using LOAD_HANDLER().
|
||||
*/
|
||||
.align 7
|
||||
.globl __end_interrupts
|
||||
__end_interrupts:
|
||||
|
||||
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
|
||||
/*
|
||||
* Data area reserved for FWNMI option.
|
||||
|
||||
@@ -1088,6 +1088,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
||||
current->thread.regs = regs - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Clear any transactional state, we're exec()ing. The cause is
|
||||
* not important as there will never be a recheckpoint so it's not
|
||||
* user visible.
|
||||
*/
|
||||
if (MSR_TM_SUSPENDED(mfmsr()))
|
||||
tm_reclaim_current(0);
|
||||
#endif
|
||||
|
||||
memset(regs->gpr, 0, sizeof(regs->gpr));
|
||||
regs->ctr = 0;
|
||||
regs->link = 0;
|
||||
|
||||
@@ -551,29 +551,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
|
||||
{
|
||||
int config_addr;
|
||||
int ret;
|
||||
/* Waiting 0.2s maximum before skipping configuration */
|
||||
int max_wait = 200;
|
||||
|
||||
/* Figure out the PE address */
|
||||
config_addr = pe->config_addr;
|
||||
if (pe->addr)
|
||||
config_addr = pe->addr;
|
||||
|
||||
/* Use new configure-pe function, if supported */
|
||||
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else {
|
||||
return -EFAULT;
|
||||
while (max_wait > 0) {
|
||||
/* Use new configure-pe function, if supported */
|
||||
if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
|
||||
ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
|
||||
config_addr, BUID_HI(pe->phb->buid),
|
||||
BUID_LO(pe->phb->buid));
|
||||
} else {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If RTAS returns a delay value that's above 100ms, cut it
|
||||
* down to 100ms in case firmware made a mistake. For more
|
||||
* on how these delay values work see rtas_busy_delay_time
|
||||
*/
|
||||
if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
|
||||
ret <= RTAS_EXTENDED_DELAY_MAX)
|
||||
ret = RTAS_EXTENDED_DELAY_MIN+2;
|
||||
|
||||
max_wait -= rtas_busy_delay_time(ret);
|
||||
|
||||
if (max_wait < 0)
|
||||
break;
|
||||
|
||||
rtas_busy_delay(ret);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
pr_warning("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, ret);
|
||||
|
||||
pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
|
||||
__func__, pe->phb->global_number, pe->addr, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -858,7 +858,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
|
||||
static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
||||
struct ddw_query_response *query)
|
||||
{
|
||||
struct eeh_dev *edev;
|
||||
struct device_node *dn;
|
||||
struct pci_dn *pdn;
|
||||
u32 cfg_addr;
|
||||
u64 buid;
|
||||
int ret;
|
||||
@@ -869,11 +870,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
||||
* Retrieve them from the pci device, not the node with the
|
||||
* dma-window property
|
||||
*/
|
||||
edev = pci_dev_to_eeh_dev(dev);
|
||||
cfg_addr = edev->config_addr;
|
||||
if (edev->pe_config_addr)
|
||||
cfg_addr = edev->pe_config_addr;
|
||||
buid = edev->phb->buid;
|
||||
dn = pci_device_to_OF_node(dev);
|
||||
pdn = PCI_DN(dn);
|
||||
buid = pdn->phb->buid;
|
||||
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
|
||||
|
||||
ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
|
||||
cfg_addr, BUID_HI(buid), BUID_LO(buid));
|
||||
@@ -887,7 +887,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
||||
struct ddw_create_response *create, int page_shift,
|
||||
int window_shift)
|
||||
{
|
||||
struct eeh_dev *edev;
|
||||
struct device_node *dn;
|
||||
struct pci_dn *pdn;
|
||||
u32 cfg_addr;
|
||||
u64 buid;
|
||||
int ret;
|
||||
@@ -898,11 +899,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
|
||||
* Retrieve them from the pci device, not the node with the
|
||||
* dma-window property
|
||||
*/
|
||||
edev = pci_dev_to_eeh_dev(dev);
|
||||
cfg_addr = edev->config_addr;
|
||||
if (edev->pe_config_addr)
|
||||
cfg_addr = edev->pe_config_addr;
|
||||
buid = edev->phb->buid;
|
||||
dn = pci_device_to_OF_node(dev);
|
||||
pdn = PCI_DN(dn);
|
||||
buid = pdn->phb->buid;
|
||||
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
|
||||
|
||||
do {
|
||||
/* extra outputs are LIOBN and dma-addr (hi, lo) */
|
||||
|
||||
@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
regs->gprs[2] = error ? -error : val;
|
||||
regs->gprs[2] = error ? error : val;
|
||||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
|
||||
@@ -168,6 +168,9 @@ isoimage: $(obj)/bzImage
|
||||
for i in lib lib64 share end ; do \
|
||||
if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
|
||||
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
|
||||
if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
|
||||
cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
|
||||
fi ; \
|
||||
break ; \
|
||||
fi ; \
|
||||
if [ $$i = end ] ; then exit 1 ; fi ; \
|
||||
|
||||
@@ -42,7 +42,34 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
#endif
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/* Re-load page tables */
|
||||
/*
|
||||
* Re-load page tables.
|
||||
*
|
||||
* This logic has an ordering constraint:
|
||||
*
|
||||
* CPU 0: Write to a PTE for 'next'
|
||||
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
|
||||
* CPU 1: set bit 1 in next's mm_cpumask
|
||||
* CPU 1: load from the PTE that CPU 0 writes (implicit)
|
||||
*
|
||||
* We need to prevent an outcome in which CPU 1 observes
|
||||
* the new PTE value and CPU 0 observes bit 1 clear in
|
||||
* mm_cpumask. (If that occurs, then the IPI will never
|
||||
* be sent, and CPU 0's TLB will contain a stale entry.)
|
||||
*
|
||||
* The bad outcome can occur if either CPU's load is
|
||||
* reordered before that CPU's store, so both CPUs must
|
||||
* execute full barriers to prevent this from happening.
|
||||
*
|
||||
* Thus, switch_mm needs a full barrier between the
|
||||
* store to mm_cpumask and any operation that could load
|
||||
* from next->pgd. TLB fills are special and can happen
|
||||
* due to instruction fetches or for no reason at all,
|
||||
* and neither LOCK nor MFENCE orders them.
|
||||
* Fortunately, load_cr3() is serializing and gives the
|
||||
* ordering guarantee we need.
|
||||
*
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
|
||||
/* Stop flush ipis for the previous mm */
|
||||
@@ -65,10 +92,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* schedule, protecting us from simultaneous changes.
|
||||
*/
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload CR3
|
||||
* to make sure to use no freed page tables.
|
||||
*
|
||||
* As above, load_cr3() is serializing and orders TLB
|
||||
* fills with respect to the mm_cpumask write.
|
||||
*/
|
||||
load_cr3(next->pgd);
|
||||
load_LDT_nolock(&next->context);
|
||||
|
||||
@@ -67,8 +67,8 @@ int amd_cache_northbridges(void)
|
||||
while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
|
||||
i++;
|
||||
|
||||
if (i == 0)
|
||||
return 0;
|
||||
if (!i)
|
||||
return -ENODEV;
|
||||
|
||||
nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
|
||||
if (!nb)
|
||||
|
||||
@@ -392,7 +392,7 @@ static struct cpuidle_device apm_cpuidle_device;
|
||||
/*
|
||||
* Local variables
|
||||
*/
|
||||
static struct {
|
||||
__visible struct {
|
||||
unsigned long offset;
|
||||
unsigned short segment;
|
||||
} apm_bios_entry;
|
||||
|
||||
@@ -2241,13 +2241,16 @@ __init int intel_pmu_init(void)
|
||||
* counter, so do not extend mask to generic counters
|
||||
*/
|
||||
for_each_event_constraint(c, x86_pmu.event_constraints) {
|
||||
if (c->cmask != X86_RAW_EVENT_MASK
|
||||
|| c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
|
||||
if (c->cmask == X86_RAW_EVENT_MASK
|
||||
&& c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
|
||||
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
|
||||
c->weight += x86_pmu.num_counters;
|
||||
c->idxmsk64 &=
|
||||
~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
|
||||
c->weight = hweight64(c->idxmsk64);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -908,7 +908,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
* normal page fault.
|
||||
*/
|
||||
regs->ip = (unsigned long)cur->addr;
|
||||
/*
|
||||
* Trap flag (TF) has been set here because this fault
|
||||
* happened where the single stepping will be done.
|
||||
* So clear it by resetting the current kprobe:
|
||||
*/
|
||||
regs->flags &= ~X86_EFLAGS_TF;
|
||||
|
||||
/*
|
||||
* If the TF flag was set before the kprobe hit,
|
||||
* don't touch it:
|
||||
*/
|
||||
regs->flags |= kcb->kprobe_old_flags;
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
|
||||
@@ -2966,6 +2966,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
||||
if (dbgregs->flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (dbgregs->dr6 & ~0xffffffffull)
|
||||
return -EINVAL;
|
||||
if (dbgregs->dr7 & ~0xffffffffull)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
|
||||
vcpu->arch.dr6 = dbgregs->dr6;
|
||||
vcpu->arch.dr7 = dbgregs->dr7;
|
||||
|
||||
@@ -149,7 +149,9 @@ void flush_tlb_current_task(void)
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/* This is an implicit full barrier that synchronizes with switch_mm. */
|
||||
local_flush_tlb();
|
||||
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
|
||||
preempt_enable();
|
||||
@@ -188,11 +190,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned act_entries, tlb_entries = 0;
|
||||
|
||||
preempt_disable();
|
||||
if (current->active_mm != mm)
|
||||
if (current->active_mm != mm) {
|
||||
/* Synchronize with switch_mm. */
|
||||
smp_mb();
|
||||
|
||||
goto flush_all;
|
||||
}
|
||||
|
||||
if (!current->mm) {
|
||||
leave_mm(smp_processor_id());
|
||||
|
||||
/* Synchronize with switch_mm. */
|
||||
smp_mb();
|
||||
|
||||
goto flush_all;
|
||||
}
|
||||
|
||||
@@ -242,10 +252,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
|
||||
preempt_disable();
|
||||
|
||||
if (current->active_mm == mm) {
|
||||
if (current->mm)
|
||||
if (current->mm) {
|
||||
/*
|
||||
* Implicit full barrier (INVLPG) that synchronizes
|
||||
* with switch_mm.
|
||||
*/
|
||||
__flush_tlb_one(start);
|
||||
else
|
||||
} else {
|
||||
leave_mm(smp_processor_id());
|
||||
|
||||
/* Synchronize with switch_mm. */
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
|
||||
@@ -829,6 +829,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
|
||||
if (iter) {
|
||||
class_dev_iter_exit(iter);
|
||||
kfree(iter);
|
||||
seqf->private = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -716,7 +716,9 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
|
||||
|
||||
ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
|
||||
CRYPTO_ALG_TYPE_HASH,
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK);
|
||||
CRYPTO_ALG_TYPE_AHASH_MASK |
|
||||
crypto_requires_sync(algt->type,
|
||||
algt->mask));
|
||||
if (IS_ERR(ghash_alg))
|
||||
return ERR_CAST(ghash_alg);
|
||||
|
||||
|
||||
@@ -68,7 +68,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
||||
|
||||
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
|
||||
{
|
||||
if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
|
||||
if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
|
||||
!(walk->offset & (PAGE_SIZE - 1)))
|
||||
scatterwalk_pagedone(walk, out, more);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scatterwalk_done);
|
||||
|
||||
@@ -385,6 +385,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
int result;
|
||||
struct acpi_pci_root *root;
|
||||
u32 flags, base_flags;
|
||||
bool no_aspm = false, clear_aspm = false;
|
||||
|
||||
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
|
||||
if (!root)
|
||||
@@ -445,31 +446,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
|
||||
acpi_pci_osc_support(root, flags);
|
||||
|
||||
/*
|
||||
* TBD: Need PCI interface for enumeration/configuration of roots.
|
||||
*/
|
||||
|
||||
mutex_lock(&acpi_pci_root_lock);
|
||||
list_add_tail(&root->node, &acpi_pci_roots);
|
||||
mutex_unlock(&acpi_pci_root_lock);
|
||||
|
||||
/*
|
||||
* Scan the Root Bridge
|
||||
* --------------------
|
||||
* Must do this prior to any attempt to bind the root device, as the
|
||||
* PCI namespace does not get created until this call is made (and
|
||||
* thus the root bridge's pci_dev does not exist).
|
||||
*/
|
||||
root->bus = pci_acpi_scan_root(root);
|
||||
if (!root->bus) {
|
||||
printk(KERN_ERR PREFIX
|
||||
"Bus %04x:%02x not present in PCI namespace\n",
|
||||
root->segment, (unsigned int)root->secondary.start);
|
||||
result = -ENODEV;
|
||||
goto out_del_root;
|
||||
}
|
||||
|
||||
/* Indicate support for various _OSC capabilities. */
|
||||
if (pci_ext_cfg_avail())
|
||||
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
|
||||
if (pcie_aspm_support_enabled()) {
|
||||
@@ -483,7 +463,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
if (ACPI_FAILURE(status)) {
|
||||
dev_info(&device->dev, "ACPI _OSC support "
|
||||
"notification failed, disabling PCIe ASPM\n");
|
||||
pcie_no_aspm();
|
||||
no_aspm = true;
|
||||
flags = base_flags;
|
||||
}
|
||||
}
|
||||
@@ -515,7 +495,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
* We have ASPM control, but the FADT indicates
|
||||
* that it's unsupported. Clear it.
|
||||
*/
|
||||
pcie_clear_aspm(root->bus);
|
||||
clear_aspm = true;
|
||||
}
|
||||
} else {
|
||||
dev_info(&device->dev,
|
||||
@@ -524,7 +504,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
acpi_format_exception(status), flags);
|
||||
pr_info("ACPI _OSC control for PCIe not granted, "
|
||||
"disabling ASPM\n");
|
||||
pcie_no_aspm();
|
||||
/*
|
||||
* We want to disable ASPM here, but aspm_disabled
|
||||
* needs to remain in its state from boot so that we
|
||||
* properly handle PCIe 1.1 devices. So we set this
|
||||
* flag here, to defer the action until after the ACPI
|
||||
* root scan.
|
||||
*/
|
||||
no_aspm = true;
|
||||
}
|
||||
} else {
|
||||
dev_info(&device->dev,
|
||||
@@ -532,6 +519,33 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
"(_OSC support mask: 0x%02x)\n", flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* TBD: Need PCI interface for enumeration/configuration of roots.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Scan the Root Bridge
|
||||
* --------------------
|
||||
* Must do this prior to any attempt to bind the root device, as the
|
||||
* PCI namespace does not get created until this call is made (and
|
||||
* thus the root bridge's pci_dev does not exist).
|
||||
*/
|
||||
root->bus = pci_acpi_scan_root(root);
|
||||
if (!root->bus) {
|
||||
dev_err(&device->dev,
|
||||
"Bus %04x:%02x not present in PCI namespace\n",
|
||||
root->segment, (unsigned int)root->secondary.start);
|
||||
result = -ENODEV;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (clear_aspm) {
|
||||
dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
|
||||
pcie_clear_aspm(root->bus);
|
||||
}
|
||||
if (no_aspm)
|
||||
pcie_no_aspm();
|
||||
|
||||
pci_acpi_add_bus_pm_notifier(device, root->bus);
|
||||
if (device->wakeup.flags.run_wake)
|
||||
device_set_run_wake(root->bus->bridge, true);
|
||||
@@ -548,11 +562,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
||||
pci_bus_add_devices(root->bus);
|
||||
return 1;
|
||||
|
||||
out_del_root:
|
||||
mutex_lock(&acpi_pci_root_lock);
|
||||
list_del(&root->node);
|
||||
mutex_unlock(&acpi_pci_root_lock);
|
||||
|
||||
end:
|
||||
kfree(root);
|
||||
return result;
|
||||
|
||||
@@ -604,7 +604,7 @@ void ata_scsi_error(struct Scsi_Host *host)
|
||||
ata_scsi_port_error_handler(host, ap);
|
||||
|
||||
/* finish or retry handled scmd's and clean up */
|
||||
WARN_ON(host->host_failed || !list_empty(&eh_work_q));
|
||||
WARN_ON(!list_empty(&eh_work_q));
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
|
||||
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
|
||||
|
||||
static void module_create_drivers_dir(struct module_kobject *mk)
|
||||
{
|
||||
if (!mk || mk->drivers_dir)
|
||||
return;
|
||||
static DEFINE_MUTEX(drivers_dir_mutex);
|
||||
|
||||
mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
|
||||
mutex_lock(&drivers_dir_mutex);
|
||||
if (mk && !mk->drivers_dir)
|
||||
mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
|
||||
mutex_unlock(&drivers_dir_mutex);
|
||||
}
|
||||
|
||||
void module_add_driver(struct module *mod, struct device_driver *drv)
|
||||
|
||||
@@ -806,7 +806,7 @@ int hash_process_data(
|
||||
&device_data->state);
|
||||
memmove(req_ctx->state.buffer,
|
||||
device_data->state.buffer,
|
||||
HASH_BLOCK_SIZE / sizeof(u32));
|
||||
HASH_BLOCK_SIZE);
|
||||
if (ret) {
|
||||
dev_err(device_data->dev, "[%s] "
|
||||
"hash_resume_state()"
|
||||
@@ -858,7 +858,7 @@ int hash_process_data(
|
||||
|
||||
memmove(device_data->state.buffer,
|
||||
req_ctx->state.buffer,
|
||||
HASH_BLOCK_SIZE / sizeof(u32));
|
||||
HASH_BLOCK_SIZE);
|
||||
if (ret) {
|
||||
dev_err(device_data->dev, "[%s] "
|
||||
"hash_save_state()"
|
||||
|
||||
@@ -75,7 +75,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
|
||||
#define MAX_BANK 5
|
||||
#define BANK_SZ 8
|
||||
|
||||
#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
|
||||
#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
|
||||
|
||||
struct pca953x_chip {
|
||||
unsigned gpio_start;
|
||||
|
||||
@@ -1391,7 +1391,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
int n, int width, int height)
|
||||
{
|
||||
int c, o;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_helper_funcs *connector_funcs;
|
||||
struct drm_encoder *encoder;
|
||||
@@ -1410,7 +1409,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
if (modes[n] == NULL)
|
||||
return best_score;
|
||||
|
||||
crtcs = kzalloc(dev->mode_config.num_connector *
|
||||
crtcs = kzalloc(fb_helper->connector_count *
|
||||
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
|
||||
if (!crtcs)
|
||||
return best_score;
|
||||
@@ -1456,7 +1455,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
if (score > best_score) {
|
||||
best_score = score;
|
||||
memcpy(best_crtcs, crtcs,
|
||||
dev->mode_config.num_connector *
|
||||
fb_helper->connector_count *
|
||||
sizeof(struct drm_fb_helper_crtc *));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ static const char *const dsi_errors[] = {
|
||||
"RX Prot Violation",
|
||||
"HS Generic Write FIFO Full",
|
||||
"LP Generic Write FIFO Full",
|
||||
"Generic Read Data Avail"
|
||||
"Generic Read Data Avail",
|
||||
"Special Packet Sent",
|
||||
"Tearing Effect",
|
||||
};
|
||||
|
||||
@@ -1128,7 +1128,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
le16_to_cpu(firmware_info->info.usReferenceClock);
|
||||
p1pll->reference_div = 0;
|
||||
|
||||
if (crev < 2)
|
||||
if ((frev < 2) && (crev < 2))
|
||||
p1pll->pll_out_min =
|
||||
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
|
||||
else
|
||||
@@ -1137,7 +1137,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
|
||||
p1pll->pll_out_max =
|
||||
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
|
||||
|
||||
if (crev >= 4) {
|
||||
if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
|
||||
p1pll->lcd_pll_out_min =
|
||||
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
|
||||
if (p1pll->lcd_pll_out_min == 0)
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "radeon_acpi.h"
|
||||
|
||||
@@ -256,6 +257,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
|
||||
if (!info)
|
||||
return -EIO;
|
||||
kfree(info);
|
||||
|
||||
/* 200ms delay is required after off */
|
||||
if (state == 0)
|
||||
msleep(200);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1812,7 +1812,6 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
1);
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
connector->interlace_allowed = true;
|
||||
connector->doublescan_allowed = true;
|
||||
break;
|
||||
@@ -2037,8 +2036,10 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
}
|
||||
|
||||
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
|
||||
if (i2c_bus->valid)
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
if (i2c_bus->valid) {
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
}
|
||||
} else
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
@@ -2114,7 +2115,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
||||
1);
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
connector->interlace_allowed = true;
|
||||
connector->doublescan_allowed = true;
|
||||
break;
|
||||
@@ -2199,10 +2199,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
||||
}
|
||||
|
||||
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
|
||||
if (i2c_bus->valid)
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
if (i2c_bus->valid) {
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
}
|
||||
} else
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
connector->display_info.subpixel_order = subpixel_order;
|
||||
drm_sysfs_connector_add(connector);
|
||||
}
|
||||
|
||||
@@ -549,6 +549,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
|
||||
/*
|
||||
* GPU helpers function.
|
||||
*/
|
||||
|
||||
/**
|
||||
* radeon_device_is_virtual - check if we are running is a virtual environment
|
||||
*
|
||||
* Check if the asic has been passed through to a VM (all asics).
|
||||
* Used at driver startup.
|
||||
* Returns true if virtual or false if not.
|
||||
*/
|
||||
static bool radeon_device_is_virtual(void)
|
||||
{
|
||||
#ifdef CONFIG_X86
|
||||
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_card_posted - check if the hw has already been initialized
|
||||
*
|
||||
@@ -562,6 +579,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t reg;
|
||||
|
||||
/* for pass through, always force asic_init */
|
||||
if (radeon_device_is_virtual())
|
||||
return false;
|
||||
|
||||
/* required for EFI mode on macbook2,1 which uses an r5xx asic */
|
||||
if (efi_enabled(EFI_BOOT) &&
|
||||
(rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
|
||||
|
||||
@@ -1084,7 +1084,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
|
||||
return;
|
||||
|
||||
/* report the usage code as scancode if the key status has changed */
|
||||
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
|
||||
if (usage->type == EV_KEY && (!!test_bit(usage->code, input->key)) != value)
|
||||
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
|
||||
|
||||
input_event(input, usage->type, usage->code, value);
|
||||
|
||||
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
|
||||
goto inval;
|
||||
} else if (uref->usage_index >= field->report_count)
|
||||
goto inval;
|
||||
|
||||
else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
|
||||
(uref_multi->num_values > HID_MAX_MULTI_USAGES ||
|
||||
uref->usage_index + uref_multi->num_values > field->report_count))
|
||||
goto inval;
|
||||
}
|
||||
|
||||
if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
|
||||
(uref_multi->num_values > HID_MAX_MULTI_USAGES ||
|
||||
uref->usage_index + uref_multi->num_values > field->report_count))
|
||||
goto inval;
|
||||
|
||||
switch (cmd) {
|
||||
case HIDIOCGUSAGE:
|
||||
uref->value = field->value[uref->usage_index];
|
||||
|
||||
@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
|
||||
|
||||
mutex_lock(&st->buf_lock);
|
||||
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto error_ret;
|
||||
st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
|
||||
st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
|
||||
@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
|
||||
break;
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto error_ret;
|
||||
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
|
||||
ret = IIO_VAL_INT_PLUS_MICRO;
|
||||
|
||||
@@ -406,7 +406,7 @@ static int ad7266_probe(struct spi_device *spi)
|
||||
st = iio_priv(indio_dev);
|
||||
|
||||
st->reg = regulator_get(&spi->dev, "vref");
|
||||
if (!IS_ERR_OR_NULL(st->reg)) {
|
||||
if (!IS_ERR(st->reg)) {
|
||||
ret = regulator_enable(st->reg);
|
||||
if (ret)
|
||||
goto error_put_reg;
|
||||
@@ -417,6 +417,10 @@ static int ad7266_probe(struct spi_device *spi)
|
||||
|
||||
st->vref_uv = ret;
|
||||
} else {
|
||||
/* Any other error indicates that the regulator does exist */
|
||||
if (PTR_ERR(st->reg) != -ENODEV)
|
||||
return PTR_ERR(st->reg);
|
||||
|
||||
/* Use internal reference */
|
||||
st->vref_uv = 2500000;
|
||||
}
|
||||
|
||||
@@ -203,22 +203,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
|
||||
|
||||
/* Prevent the module from being removed whilst attached to a trigger */
|
||||
__module_get(pf->indio_dev->info->driver_module);
|
||||
|
||||
/* Get irq number */
|
||||
pf->irq = iio_trigger_get_irq(trig);
|
||||
if (pf->irq < 0)
|
||||
goto out_put_module;
|
||||
|
||||
/* Request irq */
|
||||
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
|
||||
pf->type, pf->name,
|
||||
pf);
|
||||
if (ret < 0) {
|
||||
module_put(pf->indio_dev->info->driver_module);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out_put_irq;
|
||||
|
||||
/* Enable trigger in driver */
|
||||
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
|
||||
ret = trig->ops->set_trigger_state(trig, true);
|
||||
if (ret < 0)
|
||||
module_put(pf->indio_dev->info->driver_module);
|
||||
goto out_free_irq;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_free_irq:
|
||||
free_irq(pf->irq, pf);
|
||||
out_put_irq:
|
||||
iio_trigger_put_irq(trig, pf->irq);
|
||||
out_put_module:
|
||||
module_put(pf->indio_dev->info->driver_module);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
#include <rdma/ib_cm.h>
|
||||
#include <rdma/ib_user_cm.h>
|
||||
#include <rdma/ib_marshall.h>
|
||||
@@ -1104,6 +1105,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
|
||||
struct ib_ucm_cmd_hdr hdr;
|
||||
ssize_t result;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
return -EACCES;
|
||||
|
||||
if (len < sizeof(hdr))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
#include <rdma/rdma_user_cm.h>
|
||||
#include <rdma/ib_marshall.h>
|
||||
#include <rdma/rdma_cm.h>
|
||||
@@ -1249,6 +1250,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
||||
struct rdma_ucm_cmd_hdr hdr;
|
||||
ssize_t ret;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
return -EACCES;
|
||||
|
||||
if (len < sizeof(hdr))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -48,6 +48,8 @@
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
|
||||
#include "uverbs.h"
|
||||
|
||||
MODULE_AUTHOR("Roland Dreier");
|
||||
@@ -588,6 +590,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
struct ib_uverbs_file *file = filp->private_data;
|
||||
struct ib_uverbs_cmd_hdr hdr;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
|
||||
return -EACCES;
|
||||
|
||||
if (count < sizeof hdr)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@@ -65,6 +65,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
|
||||
ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
|
||||
ah->av.ib.g_slid = ah_attr->src_path_bits;
|
||||
ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
|
||||
if (ah_attr->ah_flags & IB_AH_GRH) {
|
||||
ah->av.ib.g_slid |= 0x80;
|
||||
ah->av.ib.gid_index = ah_attr->grh.sgid_index;
|
||||
@@ -82,7 +83,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
|
||||
!(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
|
||||
--ah->av.ib.stat_rate;
|
||||
}
|
||||
ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
|
||||
|
||||
return &ah->ibah;
|
||||
}
|
||||
|
||||
@@ -346,7 +346,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
|
||||
sizeof (struct mlx4_wqe_raddr_seg);
|
||||
case MLX4_IB_QPT_RC:
|
||||
return sizeof (struct mlx4_wqe_ctrl_seg) +
|
||||
sizeof (struct mlx4_wqe_atomic_seg) +
|
||||
sizeof (struct mlx4_wqe_masked_atomic_seg) +
|
||||
sizeof (struct mlx4_wqe_raddr_seg);
|
||||
case MLX4_IB_QPT_SMI:
|
||||
case MLX4_IB_QPT_GSI:
|
||||
|
||||
@@ -45,6 +45,8 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <rdma/ib.h>
|
||||
|
||||
#include "qib.h"
|
||||
#include "qib_common.h"
|
||||
#include "qib_user_sdma.h"
|
||||
@@ -1977,6 +1979,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
|
||||
ssize_t ret = 0;
|
||||
void *dest;
|
||||
|
||||
if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
|
||||
return -EACCES;
|
||||
|
||||
if (count < sizeof(cmd.type)) {
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
|
||||
@@ -887,7 +887,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
|
||||
neigh = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
neigh->alive = jiffies;
|
||||
|
||||
if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
|
||||
neigh->alive = jiffies;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -845,6 +845,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
|
||||
struct usb_endpoint_descriptor *ep_irq_in;
|
||||
int i, error;
|
||||
|
||||
if (intf->cur_altsetting->desc.bNumEndpoints != 2)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; xpad_device[i].idVendor; i++) {
|
||||
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
|
||||
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
|
||||
|
||||
@@ -835,9 +835,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
|
||||
|
||||
static long uinput_compat_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
if (cmd == UI_SET_PHYS_COMPAT)
|
||||
cmd = UI_SET_PHYS;
|
||||
|
||||
return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -28,7 +28,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define W8001_MAX_LENGTH 11
|
||||
#define W8001_MAX_LENGTH 13
|
||||
#define W8001_LEAD_MASK 0x80
|
||||
#define W8001_LEAD_BYTE 0x80
|
||||
#define W8001_TAB_MASK 0x40
|
||||
|
||||
@@ -2295,8 +2295,8 @@ _hfcpci_softirq(struct device *dev, void *arg)
|
||||
static void
|
||||
hfcpci_softirq(void *arg)
|
||||
{
|
||||
(void) driver_for_each_device(&hfc_driver.driver, NULL, arg,
|
||||
_hfcpci_softirq);
|
||||
WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
|
||||
_hfcpci_softirq) != 0);
|
||||
|
||||
/* if next event would be in the past ... */
|
||||
if ((s32)(hfc_jiffies + tics - jiffies) <= 0)
|
||||
|
||||
@@ -286,10 +286,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
|
||||
pb->bio_submitted = true;
|
||||
|
||||
/*
|
||||
* Map reads as normal.
|
||||
* Map reads as normal only if corrupt_bio_byte set.
|
||||
*/
|
||||
if (bio_data_dir(bio) == READ)
|
||||
goto map_bio;
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
/* If flags were specified, only corrupt those that match. */
|
||||
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
|
||||
all_corrupt_bio_flags_match(bio, fc))
|
||||
goto map_bio;
|
||||
else
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop writes?
|
||||
@@ -327,12 +333,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
|
||||
|
||||
/*
|
||||
* Corrupt successful READs while in down state.
|
||||
* If flags were specified, only corrupt those that match.
|
||||
*/
|
||||
if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
|
||||
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
|
||||
all_corrupt_bio_flags_match(bio, fc))
|
||||
corrupt_bio_data(bio, fc);
|
||||
if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
|
||||
if (fc->corrupt_bio_byte)
|
||||
corrupt_bio_data(bio, fc);
|
||||
else
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
|
||||
.len = len + 1
|
||||
};
|
||||
|
||||
if (1 + len > sizeof(buf)) {
|
||||
if (1 + len > sizeof(cmdbuf)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: i2c wr: len=%d is too big!\n",
|
||||
KBUILD_MODNAME, len);
|
||||
|
||||
@@ -1055,6 +1055,11 @@ static int match_child(struct device *dev, void *data)
|
||||
return !strcmp(dev_name(dev), (char *)data);
|
||||
}
|
||||
|
||||
static void s5p_mfc_memdev_release(struct device *dev)
|
||||
{
|
||||
dma_release_declared_memory(dev);
|
||||
}
|
||||
|
||||
static void *mfc_get_drv_data(struct platform_device *pdev);
|
||||
|
||||
#ifdef CONFIG_EXYNOS_IOMMU
|
||||
@@ -1089,6 +1094,9 @@ static int s5p_mfc_alloc_memdevs_noiommu(struct s5p_mfc_dev *dev)
|
||||
mfc_err("Not enough memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
|
||||
dev->mem_dev_l->release = s5p_mfc_memdev_release;
|
||||
device_initialize(dev->mem_dev_l);
|
||||
of_property_read_u32_array(dev->plat_dev->dev.of_node,
|
||||
"samsung,mfc-l", mem_info, 2);
|
||||
@@ -1106,6 +1114,9 @@ static int s5p_mfc_alloc_memdevs_noiommu(struct s5p_mfc_dev *dev)
|
||||
mfc_err("Not enough memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
|
||||
dev->mem_dev_r->release = s5p_mfc_memdev_release;
|
||||
device_initialize(dev->mem_dev_r);
|
||||
of_property_read_u32_array(dev->plat_dev->dev.of_node,
|
||||
"samsung,mfc-r", mem_info, 2);
|
||||
|
||||
@@ -1635,8 +1635,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
|
||||
|
||||
packed_cmd_hdr = packed->cmd_hdr;
|
||||
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
|
||||
packed_cmd_hdr[0] = (packed->nr_entries << 16) |
|
||||
(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
|
||||
packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
|
||||
(PACKED_CMD_WR << 8) | PACKED_CMD_VER);
|
||||
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
|
||||
|
||||
/*
|
||||
@@ -1650,14 +1650,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
|
||||
((brq->data.blocks * brq->data.blksz) >=
|
||||
card->ext_csd.data_tag_unit_size);
|
||||
/* Argument of CMD23 */
|
||||
packed_cmd_hdr[(i * 2)] =
|
||||
packed_cmd_hdr[(i * 2)] = cpu_to_le32(
|
||||
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
|
||||
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
|
||||
blk_rq_sectors(prq);
|
||||
blk_rq_sectors(prq));
|
||||
/* Argument of CMD18 or CMD25 */
|
||||
packed_cmd_hdr[((i * 2)) + 1] =
|
||||
packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
|
||||
mmc_card_blockaddr(card) ?
|
||||
blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
|
||||
blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
|
||||
packed->blocks += blk_rq_sectors(prq);
|
||||
i++;
|
||||
}
|
||||
|
||||
@@ -997,6 +997,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
goto out_detach;
|
||||
}
|
||||
|
||||
/* Make device "available" before it becomes accessible via sysfs */
|
||||
ubi_devices[ubi_num] = ubi;
|
||||
|
||||
err = uif_init(ubi, &ref);
|
||||
if (err)
|
||||
goto out_detach;
|
||||
@@ -1041,7 +1044,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
||||
wake_up_process(ubi->bgt_thread);
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
ubi_devices[ubi_num] = ubi;
|
||||
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
|
||||
return ubi_num;
|
||||
|
||||
@@ -1052,6 +1054,7 @@ out_uif:
|
||||
ubi_assert(ref);
|
||||
uif_close(ubi);
|
||||
out_detach:
|
||||
ubi_devices[ubi_num] = NULL;
|
||||
ubi_wl_close(ubi);
|
||||
ubi_free_internal_volumes(ubi);
|
||||
vfree(ubi->vtbl);
|
||||
|
||||
@@ -534,13 +534,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
|
||||
spin_unlock(&ubi->volumes_lock);
|
||||
}
|
||||
|
||||
/* Change volume table record */
|
||||
vtbl_rec = ubi->vtbl[vol_id];
|
||||
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
|
||||
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
|
||||
if (err)
|
||||
goto out_acc;
|
||||
|
||||
if (pebs < 0) {
|
||||
for (i = 0; i < -pebs; i++) {
|
||||
err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
|
||||
@@ -558,6 +551,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
|
||||
spin_unlock(&ubi->volumes_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* When we shrink a volume we have to flush all pending (erase) work.
|
||||
* Otherwise it can happen that upon next attach UBI finds a LEB with
|
||||
* lnum > highest_lnum and refuses to attach.
|
||||
*/
|
||||
if (pebs < 0) {
|
||||
err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
|
||||
if (err)
|
||||
goto out_acc;
|
||||
}
|
||||
|
||||
/* Change volume table record */
|
||||
vtbl_rec = ubi->vtbl[vol_id];
|
||||
vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
|
||||
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
|
||||
if (err)
|
||||
goto out_acc;
|
||||
|
||||
vol->reserved_pebs = reserved_pebs;
|
||||
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
|
||||
vol->used_ebs = reserved_pebs;
|
||||
|
||||
@@ -731,9 +731,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
|
||||
|
||||
/* upper group completed, look again in lower */
|
||||
if (priv->rx_next > get_mb_rx_low_last(priv) &&
|
||||
quota > 0 && mb > get_mb_rx_last(priv)) {
|
||||
mb > get_mb_rx_last(priv)) {
|
||||
priv->rx_next = get_mb_rx_first(priv);
|
||||
goto again;
|
||||
if (quota > 0)
|
||||
goto again;
|
||||
}
|
||||
|
||||
return received;
|
||||
|
||||
@@ -772,6 +772,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void can_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static struct rtnl_link_ops can_link_ops __read_mostly = {
|
||||
.kind = "can",
|
||||
.maxtype = IFLA_CAN_MAX,
|
||||
@@ -779,6 +784,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
|
||||
.setup = can_setup,
|
||||
.newlink = can_newlink,
|
||||
.changelink = can_changelink,
|
||||
.dellink = can_dellink,
|
||||
.get_size = can_get_size,
|
||||
.fill_info = can_fill_info,
|
||||
.get_xstats_size = can_get_xstats_size,
|
||||
|
||||
@@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
|
||||
while (!cur_buf->skb && next != rxq->read_idx) {
|
||||
struct alx_rfd *rfd = &rxq->rfd[cur];
|
||||
|
||||
skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
|
||||
skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
/* Workround for the HW RX DMA overflow issue */
|
||||
if (((unsigned long)skb->data & 0xfff) == 0xfc0)
|
||||
skb_reserve(skb, 64);
|
||||
|
||||
dma = dma_map_single(&alx->hw.pdev->dev,
|
||||
skb->data, alx->rxbuf_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
@@ -210,7 +210,7 @@
|
||||
/* Various constants */
|
||||
|
||||
/* Coalescing */
|
||||
#define MVNETA_TXDONE_COAL_PKTS 1
|
||||
#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
|
||||
#define MVNETA_RX_COAL_PKTS 32
|
||||
#define MVNETA_RX_COAL_USEC 100
|
||||
|
||||
|
||||
@@ -477,6 +477,13 @@ advance:
|
||||
if (cdc_ncm_setup(ctx))
|
||||
goto error2;
|
||||
|
||||
/* Some firmwares need a pause here or they will silently fail
|
||||
* to set up the interface properly. This value was decided
|
||||
* empirically on a Sierra Wireless MC7455 running 02.08.02.00
|
||||
* firmware.
|
||||
*/
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
/* configure data interface */
|
||||
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
|
||||
if (temp)
|
||||
@@ -598,24 +605,13 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
|
||||
|
||||
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* MBIM backwards compatible function? */
|
||||
cdc_ncm_select_altsetting(dev, intf);
|
||||
if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
|
||||
return -ENODEV;
|
||||
|
||||
/* NCM data altsetting is always 1 */
|
||||
ret = cdc_ncm_bind_common(dev, intf, 1);
|
||||
|
||||
/*
|
||||
* We should get an event when network connection is "connected" or
|
||||
* "disconnected". Set network connection in "disconnected" state
|
||||
* (carrier is OFF) during attach, so the IP network stack does not
|
||||
* start IPv6 negotiation and more.
|
||||
*/
|
||||
usbnet_link_change(dev, 0, 0);
|
||||
return ret;
|
||||
return cdc_ncm_bind_common(dev, intf, 1);
|
||||
}
|
||||
|
||||
static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
|
||||
@@ -1161,7 +1157,8 @@ static void cdc_ncm_disconnect(struct usb_interface *intf)
|
||||
|
||||
static const struct driver_info cdc_ncm_info = {
|
||||
.description = "CDC NCM",
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET,
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
||||
| FLAG_LINK_INTR,
|
||||
.bind = cdc_ncm_bind,
|
||||
.unbind = cdc_ncm_unbind,
|
||||
.check_connect = cdc_ncm_check_connect,
|
||||
@@ -1175,7 +1172,7 @@ static const struct driver_info cdc_ncm_info = {
|
||||
static const struct driver_info wwan_info = {
|
||||
.description = "Mobile Broadband Network Device",
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
||||
| FLAG_WWAN,
|
||||
| FLAG_LINK_INTR | FLAG_WWAN,
|
||||
.bind = cdc_ncm_bind,
|
||||
.unbind = cdc_ncm_unbind,
|
||||
.check_connect = cdc_ncm_check_connect,
|
||||
@@ -1189,7 +1186,7 @@ static const struct driver_info wwan_info = {
|
||||
static const struct driver_info wwan_noarp_info = {
|
||||
.description = "Mobile Broadband Network Device (NO ARP)",
|
||||
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
|
||||
| FLAG_WWAN | FLAG_NOARP,
|
||||
| FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
|
||||
.bind = cdc_ncm_bind,
|
||||
.unbind = cdc_ncm_unbind,
|
||||
.check_connect = cdc_ncm_check_connect,
|
||||
|
||||
@@ -77,7 +77,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
|
||||
/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
|
||||
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
|
||||
/* HP Compaq C700 (nitrousnrg@gmail.com) */
|
||||
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
|
||||
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
|
||||
/* LiteOn AR5BXB63 (magooz@salug.it) */
|
||||
{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
|
||||
/* IBM-specific AR5212 (all others) */
|
||||
|
||||
@@ -1931,6 +1931,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
|
||||
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
|
||||
!info->attrs[HWSIM_ATTR_FLAGS] ||
|
||||
!info->attrs[HWSIM_ATTR_COOKIE] ||
|
||||
!info->attrs[HWSIM_ATTR_SIGNAL] ||
|
||||
!info->attrs[HWSIM_ATTR_TX_INFO])
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -1392,9 +1392,9 @@ void rtl_watchdog_wq_callback(void *data)
|
||||
if (((rtlpriv->link_info.num_rx_inperiod +
|
||||
rtlpriv->link_info.num_tx_inperiod) > 8) ||
|
||||
(rtlpriv->link_info.num_rx_inperiod > 2))
|
||||
rtlpriv->enter_ps = true;
|
||||
else
|
||||
rtlpriv->enter_ps = false;
|
||||
else
|
||||
rtlpriv->enter_ps = true;
|
||||
|
||||
/* LeisurePS only work in infra mode. */
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
|
||||
@@ -173,9 +173,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
|
||||
struct pci_bus_region region;
|
||||
bool bar_too_big = false, bar_disabled = false;
|
||||
|
||||
if (dev->non_compliant_bars)
|
||||
return 0;
|
||||
|
||||
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
|
||||
|
||||
/* No printks while decoding is disabled! */
|
||||
@@ -295,6 +292,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
{
|
||||
unsigned int pos, reg;
|
||||
|
||||
if (dev->non_compliant_bars)
|
||||
return;
|
||||
|
||||
for (pos = 0; pos < howmany; pos++) {
|
||||
struct resource *res = &dev->resource[pos];
|
||||
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
|
||||
|
||||
@@ -640,6 +640,11 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
|
||||
sizeof(wireless), 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (wireless & 0x1) {
|
||||
wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
|
||||
RFKILL_TYPE_WLAN,
|
||||
|
||||
@@ -898,6 +898,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
||||
qeth_l2_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
||||
@@ -3333,6 +3333,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
||||
qeth_l3_set_offline(cgdev);
|
||||
|
||||
if (card->dev) {
|
||||
netif_napi_del(&card->napi);
|
||||
unregister_netdev(card->dev);
|
||||
card->dev = NULL;
|
||||
}
|
||||
|
||||
@@ -590,10 +590,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
/* We used to udelay() here but that absorbed
|
||||
* a CPU when a timeout occured. Not very
|
||||
* useful. */
|
||||
cpu_relax();
|
||||
/*
|
||||
* Allow other processes / CPUS to use core
|
||||
*/
|
||||
schedule();
|
||||
}
|
||||
} else if (down_interruptible(&fibptr->event_wait)) {
|
||||
/* Do nothing ... satisfy
|
||||
@@ -1920,6 +1920,10 @@ int aac_command_thread(void *data)
|
||||
if (difference <= 0)
|
||||
difference = 1;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
schedule_timeout(difference);
|
||||
|
||||
if (kthread_should_stop())
|
||||
|
||||
@@ -2978,7 +2978,7 @@ be_sgl_create_contiguous(void *virtual_address,
|
||||
{
|
||||
WARN_ON(!virtual_address);
|
||||
WARN_ON(!physical_address);
|
||||
WARN_ON(!length > 0);
|
||||
WARN_ON(!length);
|
||||
WARN_ON(!sgl);
|
||||
|
||||
sgl->va = virtual_address;
|
||||
|
||||
@@ -9607,6 +9607,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
|
||||
ioa_cfg->intr_flag = IPR_USE_MSI;
|
||||
else {
|
||||
ioa_cfg->intr_flag = IPR_USE_LSI;
|
||||
ioa_cfg->clear_isr = 1;
|
||||
ioa_cfg->nvectors = 1;
|
||||
dev_info(&pdev->dev, "Cannot enable MSI.\n");
|
||||
}
|
||||
|
||||
@@ -898,7 +898,6 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
|
||||
*/
|
||||
void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
|
||||
{
|
||||
scmd->device->host->host_failed--;
|
||||
scmd->eh_eflags = 0;
|
||||
list_move_tail(&scmd->eh_entry, done_q);
|
||||
}
|
||||
@@ -1892,6 +1891,9 @@ int scsi_error_handler(void *data)
|
||||
else
|
||||
scsi_unjam_host(shost);
|
||||
|
||||
/* All scmds have been handled */
|
||||
shost->host_failed = 0;
|
||||
|
||||
/*
|
||||
* Note - if the above fails completely, the action is to take
|
||||
* individual devices offline and flush the queue of any
|
||||
|
||||
@@ -546,66 +546,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
|
||||
|
||||
static void __scsi_release_buffers(struct scsi_cmnd *, int);
|
||||
|
||||
/*
|
||||
* Function: scsi_end_request()
|
||||
*
|
||||
* Purpose: Post-processing of completed commands (usually invoked at end
|
||||
* of upper level post-processing and scsi_io_completion).
|
||||
*
|
||||
* Arguments: cmd - command that is complete.
|
||||
* error - 0 if I/O indicates success, < 0 for I/O error.
|
||||
* bytes - number of bytes of completed I/O
|
||||
* requeue - indicates whether we should requeue leftovers.
|
||||
*
|
||||
* Lock status: Assumed that lock is not held upon entry.
|
||||
*
|
||||
* Returns: cmd if requeue required, NULL otherwise.
|
||||
*
|
||||
* Notes: This is called for block device requests in order to
|
||||
* mark some number of sectors as complete.
|
||||
*
|
||||
* We are guaranteeing that the request queue will be goosed
|
||||
* at some point during this call.
|
||||
* Notes: If cmd was requeued, upon return it will be a stale pointer.
|
||||
*/
|
||||
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
|
||||
int bytes, int requeue)
|
||||
{
|
||||
struct request_queue *q = cmd->device->request_queue;
|
||||
struct request *req = cmd->request;
|
||||
|
||||
/*
|
||||
* If there are blocks left over at the end, set up the command
|
||||
* to queue the remainder of them.
|
||||
*/
|
||||
if (blk_end_request(req, error, bytes)) {
|
||||
/* kill remainder if no retrys */
|
||||
if (error && scsi_noretry_cmd(cmd))
|
||||
blk_end_request_all(req, error);
|
||||
else {
|
||||
if (requeue) {
|
||||
/*
|
||||
* Bleah. Leftovers again. Stick the
|
||||
* leftovers in the front of the
|
||||
* queue, and goose the queue again.
|
||||
*/
|
||||
scsi_release_buffers(cmd);
|
||||
scsi_requeue_command(q, cmd);
|
||||
cmd = NULL;
|
||||
}
|
||||
return cmd;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This will goose the queue request function at the end, so we don't
|
||||
* need to worry about launching another command.
|
||||
*/
|
||||
__scsi_release_buffers(cmd, 0);
|
||||
scsi_next_command(cmd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline unsigned int scsi_sgtable_index(unsigned short nents)
|
||||
{
|
||||
unsigned int index;
|
||||
@@ -735,16 +675,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
|
||||
*
|
||||
* Returns: Nothing
|
||||
*
|
||||
* Notes: This function is matched in terms of capabilities to
|
||||
* the function that created the scatter-gather list.
|
||||
* In other words, if there are no bounce buffers
|
||||
* (the normal case for most drivers), we don't need
|
||||
* the logic to deal with cleaning up afterwards.
|
||||
*
|
||||
* We must call scsi_end_request(). This will finish off
|
||||
* the specified number of sectors. If we are done, the
|
||||
* command block will be released and the queue function
|
||||
* will be goosed. If we are not done then we have to
|
||||
* Notes: We will finish off the specified number of sectors. If we
|
||||
* are done, the command block will be released and the queue
|
||||
* function will be goosed. If we are not done then we have to
|
||||
* figure out what to do next:
|
||||
*
|
||||
* a) We can call scsi_requeue_command(). The request
|
||||
@@ -753,7 +686,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
|
||||
* be used if we made forward progress, or if we want
|
||||
* to switch from READ(10) to READ(6) for example.
|
||||
*
|
||||
* b) We can call scsi_queue_insert(). The request will
|
||||
* b) We can call __scsi_queue_insert(). The request will
|
||||
* be put back on the queue and retried using the same
|
||||
* command as before, possibly after a delay.
|
||||
*
|
||||
@@ -857,12 +790,28 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
}
|
||||
|
||||
/*
|
||||
* A number of bytes were successfully read. If there
|
||||
* are leftovers and there is some kind of error
|
||||
* (result != 0), retry the rest.
|
||||
* special case: failed zero length commands always need to
|
||||
* drop down into the retry code. Otherwise, if we finished
|
||||
* all bytes in the request we are done now.
|
||||
*/
|
||||
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
|
||||
return;
|
||||
if (!(blk_rq_bytes(req) == 0 && error) &&
|
||||
!blk_end_request(req, error, good_bytes))
|
||||
goto next_command;
|
||||
|
||||
/*
|
||||
* Kill remainder if no retrys.
|
||||
*/
|
||||
if (error && scsi_noretry_cmd(cmd)) {
|
||||
blk_end_request_all(req, error);
|
||||
goto next_command;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there had been no error, but we have leftover bytes in the
|
||||
* requeues just queue the command up again.
|
||||
*/
|
||||
if (result == 0)
|
||||
goto requeue;
|
||||
|
||||
error = __scsi_error_from_host_byte(cmd, result);
|
||||
|
||||
@@ -984,7 +933,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
switch (action) {
|
||||
case ACTION_FAIL:
|
||||
/* Give up and fail the remainder of the request */
|
||||
scsi_release_buffers(cmd);
|
||||
if (!(req->cmd_flags & REQ_QUIET)) {
|
||||
if (description)
|
||||
scmd_printk(KERN_INFO, cmd, "%s\n",
|
||||
@@ -994,12 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
scsi_print_sense("", cmd);
|
||||
scsi_print_command(cmd);
|
||||
}
|
||||
if (blk_end_request_err(req, error))
|
||||
scsi_requeue_command(q, cmd);
|
||||
else
|
||||
scsi_next_command(cmd);
|
||||
break;
|
||||
if (!blk_end_request_err(req, error))
|
||||
goto next_command;
|
||||
/*FALLTHRU*/
|
||||
case ACTION_REPREP:
|
||||
requeue:
|
||||
/* Unprep the request and put it back at the head of the queue.
|
||||
* A new command will be prepared and issued.
|
||||
*/
|
||||
@@ -1015,6 +962,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
|
||||
break;
|
||||
}
|
||||
return;
|
||||
|
||||
next_command:
|
||||
__scsi_release_buffers(cmd, 0);
|
||||
scsi_next_command(cmd);
|
||||
}
|
||||
|
||||
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
||||
|
||||
@@ -315,7 +315,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
|
||||
}
|
||||
|
||||
/* See if there is more data to send */
|
||||
if (!xspi->remaining_bytes > 0)
|
||||
if (xspi->remaining_bytes <= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -588,7 +588,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
|
||||
goto error_ret_mut;
|
||||
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
|
||||
mutex_unlock(&st->lock);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
goto error_ret;
|
||||
val = ret;
|
||||
if (base_freq > 0)
|
||||
|
||||
@@ -371,34 +371,22 @@ static void to_utf8(struct vc_data *vc, uint c)
|
||||
|
||||
static void do_compute_shiftstate(void)
|
||||
{
|
||||
unsigned int i, j, k, sym, val;
|
||||
unsigned int k, sym, val;
|
||||
|
||||
shift_state = 0;
|
||||
memset(shift_down, 0, sizeof(shift_down));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(key_down); i++) {
|
||||
|
||||
if (!key_down[i])
|
||||
for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
|
||||
sym = U(key_maps[0][k]);
|
||||
if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
|
||||
continue;
|
||||
|
||||
k = i * BITS_PER_LONG;
|
||||
val = KVAL(sym);
|
||||
if (val == KVAL(K_CAPSSHIFT))
|
||||
val = KVAL(K_SHIFT);
|
||||
|
||||
for (j = 0; j < BITS_PER_LONG; j++, k++) {
|
||||
|
||||
if (!test_bit(k, key_down))
|
||||
continue;
|
||||
|
||||
sym = U(key_maps[0][k]);
|
||||
if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
|
||||
continue;
|
||||
|
||||
val = KVAL(sym);
|
||||
if (val == KVAL(K_CAPSSHIFT))
|
||||
val = KVAL(K_SHIFT);
|
||||
|
||||
shift_down[val]++;
|
||||
shift_state |= (1 << val);
|
||||
}
|
||||
shift_down[val]++;
|
||||
shift_state |= BIT(val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1106,10 +1106,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
|
||||
|
||||
static int proc_connectinfo(struct dev_state *ps, void __user *arg)
|
||||
{
|
||||
struct usbdevfs_connectinfo ci = {
|
||||
.devnum = ps->dev->devnum,
|
||||
.slow = ps->dev->speed == USB_SPEED_LOW
|
||||
};
|
||||
struct usbdevfs_connectinfo ci;
|
||||
|
||||
memset(&ci, 0, sizeof(ci));
|
||||
ci.devnum = ps->dev->devnum;
|
||||
ci.slow = ps->dev->speed == USB_SPEED_LOW;
|
||||
|
||||
if (copy_to_user(arg, &ci, sizeof(ci)))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -117,6 +117,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
|
||||
#define HUB_DEBOUNCE_STEP 25
|
||||
#define HUB_DEBOUNCE_STABLE 100
|
||||
|
||||
static void hub_release(struct kref *kref);
|
||||
static int usb_reset_and_verify_device(struct usb_device *udev);
|
||||
|
||||
static inline char *portspeed(struct usb_hub *hub, int portstatus)
|
||||
@@ -1028,10 +1029,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
||||
unsigned delay;
|
||||
|
||||
/* Continue a partial initialization */
|
||||
if (type == HUB_INIT2)
|
||||
goto init2;
|
||||
if (type == HUB_INIT3)
|
||||
if (type == HUB_INIT2 || type == HUB_INIT3) {
|
||||
device_lock(hub->intfdev);
|
||||
|
||||
/* Was the hub disconnected while we were waiting? */
|
||||
if (hub->disconnected) {
|
||||
device_unlock(hub->intfdev);
|
||||
kref_put(&hub->kref, hub_release);
|
||||
return;
|
||||
}
|
||||
if (type == HUB_INIT2)
|
||||
goto init2;
|
||||
goto init3;
|
||||
}
|
||||
kref_get(&hub->kref);
|
||||
|
||||
/* The superspeed hub except for root hub has to use Hub Depth
|
||||
* value as an offset into the route string to locate the bits
|
||||
@@ -1228,6 +1239,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
||||
PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
|
||||
schedule_delayed_work(&hub->init_work,
|
||||
msecs_to_jiffies(delay));
|
||||
device_unlock(hub->intfdev);
|
||||
return; /* Continues at init3: below */
|
||||
} else {
|
||||
msleep(delay);
|
||||
@@ -1248,6 +1260,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
||||
/* Allow autosuspend if it was suppressed */
|
||||
if (type <= HUB_INIT3)
|
||||
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
|
||||
|
||||
if (type == HUB_INIT2 || type == HUB_INIT3)
|
||||
device_unlock(hub->intfdev);
|
||||
|
||||
kref_put(&hub->kref, hub_release);
|
||||
}
|
||||
|
||||
/* Implement the continuations for the delays above */
|
||||
|
||||
@@ -170,14 +170,6 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* INTEL VALUE SSD */
|
||||
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
{ } /* terminating entry must be last */
|
||||
};
|
||||
|
||||
static const struct usb_device_id usb_interface_quirk_list[] = {
|
||||
/* Logitech UVC Cameras */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
|
||||
.driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* ASUS Base Station(T100) */
|
||||
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
|
||||
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
|
||||
@@ -191,6 +183,14 @@ static const struct usb_device_id usb_interface_quirk_list[] = {
|
||||
{ } /* terminating entry must be last */
|
||||
};
|
||||
|
||||
static const struct usb_device_id usb_interface_quirk_list[] = {
|
||||
/* Logitech UVC Cameras */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
|
||||
.driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
{ } /* terminating entry must be last */
|
||||
};
|
||||
|
||||
static bool usb_match_any_interface(struct usb_device *udev,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
|
||||
@@ -581,14 +581,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
|
||||
musb_writew(ep->regs, MUSB_TXCSR, 0);
|
||||
|
||||
/* scrub all previous state, clearing toggle */
|
||||
} else {
|
||||
csr = musb_readw(ep->regs, MUSB_RXCSR);
|
||||
if (csr & MUSB_RXCSR_RXPKTRDY)
|
||||
WARNING("rx%d, packet/%d ready?\n", ep->epnum,
|
||||
musb_readw(ep->regs, MUSB_RXCOUNT));
|
||||
|
||||
musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
|
||||
}
|
||||
csr = musb_readw(ep->regs, MUSB_RXCSR);
|
||||
if (csr & MUSB_RXCSR_RXPKTRDY)
|
||||
WARNING("rx%d, packet/%d ready?\n", ep->epnum,
|
||||
musb_readw(ep->regs, MUSB_RXCOUNT));
|
||||
|
||||
musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
|
||||
|
||||
/* target addr and (for multipoint) hub addr/port */
|
||||
if (musb->is_multipoint) {
|
||||
@@ -948,9 +947,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
|
||||
if (is_in) {
|
||||
dma = is_dma_capable() ? ep->rx_channel : NULL;
|
||||
|
||||
/* clear nak timeout bit */
|
||||
/*
|
||||
* Need to stop the transaction by clearing REQPKT first
|
||||
* then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
|
||||
* DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
|
||||
*/
|
||||
rx_csr = musb_readw(epio, MUSB_RXCSR);
|
||||
rx_csr |= MUSB_RXCSR_H_WZC_BITS;
|
||||
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
|
||||
musb_writew(epio, MUSB_RXCSR, rx_csr);
|
||||
rx_csr &= ~MUSB_RXCSR_DATAERROR;
|
||||
musb_writew(epio, MUSB_RXCSR, rx_csr);
|
||||
|
||||
|
||||
@@ -558,6 +558,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
|
||||
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
|
||||
struct usbhs_pipe *pipe;
|
||||
int ret = -EIO;
|
||||
unsigned long flags;
|
||||
|
||||
usbhs_lock(priv, flags);
|
||||
|
||||
/*
|
||||
* if it already have pipe,
|
||||
@@ -566,7 +569,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
|
||||
if (uep->pipe) {
|
||||
usbhs_pipe_clear(uep->pipe);
|
||||
usbhs_pipe_sequence_data0(uep->pipe);
|
||||
return 0;
|
||||
ret = 0;
|
||||
goto usbhsg_ep_enable_end;
|
||||
}
|
||||
|
||||
pipe = usbhs_pipe_malloc(priv,
|
||||
@@ -594,6 +598,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
usbhsg_ep_enable_end:
|
||||
usbhs_unlock(priv, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -274,6 +274,7 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
|
||||
#define TELIT_PRODUCT_LE920 0x1200
|
||||
#define TELIT_PRODUCT_LE910 0x1201
|
||||
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
|
||||
|
||||
/* ZTE PRODUCTS */
|
||||
#define ZTE_VENDOR_ID 0x19d2
|
||||
@@ -1206,6 +1207,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
||||
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
|
||||
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
|
||||
.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
||||
|
||||
@@ -177,6 +177,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
|
||||
num = min(num, ARRAY_SIZE(vb->pfns));
|
||||
|
||||
mutex_lock(&vb->balloon_lock);
|
||||
/* We can't release more pages than taken */
|
||||
num = min(num, (size_t)vb->num_pages);
|
||||
for (vb->num_pfns = 0; vb->num_pfns < num;
|
||||
vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
|
||||
page = balloon_page_dequeue(vb_dev_info);
|
||||
|
||||
@@ -426,36 +426,7 @@ upload:
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int __init check_prereq(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (!xen_initial_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (!acpi_gbl_FADT.smi_command)
|
||||
return -ENODEV;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
||||
if (!cpu_has(c, X86_FEATURE_EST))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
/* Copied from powernow-k8.h, can't include ../cpufreq/powernow
|
||||
* as we get compile warnings for the static functions.
|
||||
*/
|
||||
#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
|
||||
#define USE_HW_PSTATE 0x00000080
|
||||
u32 eax, ebx, ecx, edx;
|
||||
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
|
||||
if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
/* acpi_perf_data is a pointer to percpu data. */
|
||||
static struct acpi_processor_performance __percpu *acpi_perf_data;
|
||||
|
||||
@@ -511,10 +482,10 @@ static struct syscore_ops xap_syscore_ops = {
|
||||
static int __init xen_acpi_processor_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
int rc = check_prereq();
|
||||
int rc;
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
if (!xen_initial_domain())
|
||||
return -ENODEV;
|
||||
|
||||
nr_acpi_bits = get_max_acpi_id() + 1;
|
||||
acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
|
||||
|
||||
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
|
||||
field_start = OFFSET(cfg_entry);
|
||||
field_end = OFFSET(cfg_entry) + field->size;
|
||||
|
||||
if ((req_start >= field_start && req_start < field_end)
|
||||
|| (req_end > field_start && req_end <= field_end)) {
|
||||
if (req_end > field_start && field_end > req_start) {
|
||||
err = conf_space_read(dev, cfg_entry, field_start,
|
||||
&tmp_val);
|
||||
if (err)
|
||||
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
|
||||
field_start = OFFSET(cfg_entry);
|
||||
field_end = OFFSET(cfg_entry) + field->size;
|
||||
|
||||
if ((req_start >= field_start && req_start < field_end)
|
||||
|| (req_end > field_start && req_end <= field_end)) {
|
||||
if (req_end > field_start && field_end > req_start) {
|
||||
tmp_val = 0;
|
||||
|
||||
err = xen_pcibk_config_read(dev, field_start,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user