mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge tag 'lsk-v3.14-15.08' of https://git.linaro.org/kernel/linux-linaro-stable into odroidc2-3.14.y-linarohk
LSK 15.08 v3.14
This commit is contained in:
@@ -91,5 +91,5 @@ mpp61 61 gpo, dev(wen1), uart1(txd), audio(rclk)
|
||||
mpp62 62 gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
|
||||
audio(mclk), uart0(cts)
|
||||
mpp63 63 gpo, spi0(sck), tclk
|
||||
mpp64 64 gpio, spi0(miso), spi0-1(cs1)
|
||||
mpp65 65 gpio, spi0(mosi), spi0-1(cs2)
|
||||
mpp64 64 gpio, spi0(miso), spi0(cs1)
|
||||
mpp65 65 gpio, spi0(mosi), spi0(cs2)
|
||||
|
||||
@@ -41,15 +41,15 @@ mpp20 20 gpio, ge0(rxd4), ge1(rxd2), lcd(d20), ptp(clk)
|
||||
mpp21 21 gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
|
||||
mpp22 22 gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
|
||||
mpp23 23 gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
|
||||
mpp24 24 gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
|
||||
mpp25 25 gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
|
||||
mpp26 26 gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
|
||||
mpp24 24 gpio, lcd(hsync), sata1(prsnt), tdm(rst)
|
||||
mpp25 25 gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
|
||||
mpp26 26 gpio, lcd(clk), tdm(fsync)
|
||||
mpp27 27 gpio, lcd(e), tdm(dtx), ptp(trig)
|
||||
mpp28 28 gpio, lcd(pwm), tdm(drx), ptp(evreq)
|
||||
mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
|
||||
mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk)
|
||||
mpp30 30 gpio, tdm(int1), sd0(clk)
|
||||
mpp31 31 gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
|
||||
mpp32 32 gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
|
||||
mpp31 31 gpio, tdm(int2), sd0(cmd)
|
||||
mpp32 32 gpio, tdm(int3), sd0(d0)
|
||||
mpp33 33 gpio, tdm(int4), sd0(d1), mem(bat)
|
||||
mpp34 34 gpio, tdm(int5), sd0(d2), sata0(prsnt)
|
||||
mpp35 35 gpio, tdm(int6), sd0(d3), sata1(prsnt)
|
||||
@@ -57,21 +57,18 @@ mpp36 36 gpio, spi(mosi)
|
||||
mpp37 37 gpio, spi(miso)
|
||||
mpp38 38 gpio, spi(sck)
|
||||
mpp39 39 gpio, spi(cs0)
|
||||
mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
|
||||
pcie(clkreq0)
|
||||
mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
|
||||
mpp41 41 gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
|
||||
pcie(clkreq1)
|
||||
mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
|
||||
vdd(cpu0-pd)
|
||||
mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
|
||||
vdd(cpu2-3-pd){1}
|
||||
mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
|
||||
mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
|
||||
mpp44 44 gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
|
||||
mem(bat)
|
||||
mpp45 45 gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
|
||||
mpp46 46 gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
|
||||
mpp47 47 gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
|
||||
ref(clkout)
|
||||
mpp48 48 gpio, tclk, dev(burst/last)
|
||||
mpp48 48 gpio, dev(clkout), dev(burst/last)
|
||||
|
||||
* Marvell Armada XP (mv78260 and mv78460 only)
|
||||
|
||||
@@ -83,9 +80,9 @@ mpp51 51 gpio, dev(ad16)
|
||||
mpp52 52 gpio, dev(ad17)
|
||||
mpp53 53 gpio, dev(ad18)
|
||||
mpp54 54 gpio, dev(ad19)
|
||||
mpp55 55 gpio, dev(ad20), vdd(cpu0-pd)
|
||||
mpp56 56 gpio, dev(ad21), vdd(cpu1-pd)
|
||||
mpp57 57 gpio, dev(ad22), vdd(cpu2-3-pd){1}
|
||||
mpp55 55 gpio, dev(ad20)
|
||||
mpp56 56 gpio, dev(ad21)
|
||||
mpp57 57 gpio, dev(ad22)
|
||||
mpp58 58 gpio, dev(ad23)
|
||||
mpp59 59 gpio, dev(ad24)
|
||||
mpp60 60 gpio, dev(ad25)
|
||||
@@ -95,6 +92,3 @@ mpp63 63 gpio, dev(ad28)
|
||||
mpp64 64 gpio, dev(ad29)
|
||||
mpp65 65 gpio, dev(ad30)
|
||||
mpp66 66 gpio, dev(ad31)
|
||||
|
||||
Notes:
|
||||
* {1} vdd(cpu2-3-pd) only available on mv78460.
|
||||
|
||||
@@ -4,9 +4,9 @@ Required properties:
|
||||
- compatible : "arm,pl022", "arm,primecell"
|
||||
- reg : Offset and length of the register set for the device
|
||||
- interrupts : Should contain SPI controller interrupt
|
||||
- num-cs : total number of chipselects
|
||||
|
||||
Optional properties:
|
||||
- num-cs : total number of chipselects
|
||||
- cs-gpios : should specify GPIOs used for chipselects.
|
||||
The gpios will be referred to as reg = <index> in the SPI child nodes.
|
||||
If unspecified, a single SPI device without a chip select can be used.
|
||||
|
||||
@@ -2350,7 +2350,8 @@ should be created before this ioctl is invoked.
|
||||
|
||||
Possible features:
|
||||
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
|
||||
Depends on KVM_CAP_ARM_PSCI.
|
||||
Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
|
||||
and execute guest code when KVM_RUN is called.
|
||||
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
|
||||
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 45
|
||||
SUBLEVEL = 50
|
||||
EXTRAVERSION =
|
||||
NAME = Remembering Coco
|
||||
|
||||
|
||||
@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
|
||||
" scond %3, [%1] \n"
|
||||
" bnz 1b \n"
|
||||
"2: \n"
|
||||
: "=&r"(prev)
|
||||
: "r"(ptr), "ir"(expected),
|
||||
"r"(new) /* can't be "ir". scond can't take limm for "b" */
|
||||
: "cc");
|
||||
: "=&r"(prev) /* Early clobber, to prevent reg reuse */
|
||||
: "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
|
||||
"ir"(expected),
|
||||
"r"(new) /* can't be "ir". scond can't take LIMM for "b" */
|
||||
: "cc", "memory"); /* so that gcc knows memory is being written here */
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ struct callee_regs {
|
||||
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
|
||||
};
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->ret)
|
||||
#define instruction_pointer(regs) (unsigned long)((regs)->ret)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
|
||||
/* return 1 if user mode or 0 if kernel mode */
|
||||
|
||||
@@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr = HCR_GUEST_MASK;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1;
|
||||
|
||||
@@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
||||
void free_boot_hyp_pgd(void);
|
||||
void free_hyp_pgds(void);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
@@ -116,13 +117,14 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
|
||||
(__boundary - 1 < (end) - 1)? __boundary: (end); \
|
||||
})
|
||||
|
||||
#define kvm_pgd_index(addr) pgd_index(addr)
|
||||
|
||||
static inline bool kvm_page_empty(void *ptr)
|
||||
{
|
||||
struct page *ptr_page = virt_to_page(ptr);
|
||||
return page_count(ptr_page) == 1;
|
||||
}
|
||||
|
||||
|
||||
#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
|
||||
#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
|
||||
#define kvm_pud_table_empty(pudp) (0)
|
||||
|
||||
@@ -74,6 +74,7 @@ struct secondary_data {
|
||||
};
|
||||
extern struct secondary_data secondary_data;
|
||||
extern volatile int pen_release;
|
||||
extern void secondary_startup(void);
|
||||
|
||||
extern int __cpu_disable(void);
|
||||
|
||||
|
||||
@@ -134,9 +134,7 @@ ENTRY(__hyp_stub_install_secondary)
|
||||
mcr p15, 4, r7, c1, c1, 3 @ HSTR
|
||||
|
||||
THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
orr r7, #(1 << 9) @ HSCTLR.EE
|
||||
#endif
|
||||
ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
|
||||
mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
|
||||
|
||||
mrc p15, 4, r7, c1, c1, 1 @ HDCR
|
||||
|
||||
@@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
int err;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu) {
|
||||
err = -ENOMEM;
|
||||
@@ -419,6 +424,7 @@ static void update_vttbr(struct kvm *kvm)
|
||||
|
||||
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
int ret;
|
||||
|
||||
if (likely(vcpu->arch.has_run_once))
|
||||
@@ -430,12 +436,20 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||
* Initialize the VGIC before running a vcpu the first time on
|
||||
* this VM.
|
||||
*/
|
||||
if (unlikely(!vgic_initialized(vcpu->kvm))) {
|
||||
ret = kvm_vgic_init(vcpu->kvm);
|
||||
if (unlikely(!vgic_initialized(kvm))) {
|
||||
ret = kvm_vgic_init(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable the arch timers only if we have an in-kernel VGIC
|
||||
* and it has been properly initialized, since we cannot handle
|
||||
* interrupts from the virtual timer with a userspace gic.
|
||||
*/
|
||||
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
|
||||
kvm_timer_enable(kvm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -658,11 +672,22 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Ensure a rebooted VM will fault in RAM pages and detect if the
|
||||
* guest MMU is turned off and flush the caches as needed.
|
||||
*/
|
||||
if (vcpu->arch.has_run_once)
|
||||
stage2_unmap_vm(vcpu->kvm);
|
||||
|
||||
vcpu_reset_hcr(vcpu);
|
||||
|
||||
/*
|
||||
* Handle the "start in power-off" case by marking the VCPU as paused.
|
||||
*/
|
||||
if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
|
||||
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
|
||||
vcpu->arch.pause = true;
|
||||
else
|
||||
vcpu->arch.pause = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr = HCR_GUEST_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -159,13 +159,9 @@ __kvm_vcpu_return:
|
||||
@ Don't trap coprocessor accesses for host kernel
|
||||
set_hstr vmexit
|
||||
set_hdcr vmexit
|
||||
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
|
||||
|
||||
#ifdef CONFIG_VFPv3
|
||||
@ Save floating point registers we if let guest use them.
|
||||
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
bne after_vfp_restore
|
||||
|
||||
@ Switch VFP/NEON hardware state to the host's
|
||||
add r7, vcpu, #VCPU_VFP_GUEST
|
||||
store_vfp_state r7
|
||||
@@ -177,6 +173,8 @@ after_vfp_restore:
|
||||
@ Restore FPEXC_EN which we clobbered on entry
|
||||
pop {r2}
|
||||
VFPFMXR FPEXC, r2
|
||||
#else
|
||||
after_vfp_restore:
|
||||
#endif
|
||||
|
||||
@ Reset Hyp-role
|
||||
@@ -472,7 +470,7 @@ switch_to_guest_vfp:
|
||||
push {r3-r7}
|
||||
|
||||
@ NEON/VFP used. Turn on VFP access.
|
||||
set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
|
||||
@ Switch VFP/NEON hardware state to the guest's
|
||||
add r7, r0, #VCPU_VFP_HOST
|
||||
|
||||
@@ -592,8 +592,13 @@ ARM_BE8(rev r6, r6 )
|
||||
.endm
|
||||
|
||||
/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
|
||||
* (hardware reset value is 0). Keep previous value in r2. */
|
||||
.macro set_hcptr operation, mask
|
||||
* (hardware reset value is 0). Keep previous value in r2.
|
||||
* An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
|
||||
* VFP wasn't already enabled (always executed on vmtrap).
|
||||
* If a label is specified with vmexit, it is branched to if VFP wasn't
|
||||
* enabled.
|
||||
*/
|
||||
.macro set_hcptr operation, mask, label = none
|
||||
mrc p15, 4, r2, c1, c1, 2
|
||||
ldr r3, =\mask
|
||||
.if \operation == vmentry
|
||||
@@ -602,6 +607,17 @@ ARM_BE8(rev r6, r6 )
|
||||
bic r3, r2, r3 @ Don't trap defined coproc-accesses
|
||||
.endif
|
||||
mcr p15, 4, r3, c1, c1, 2
|
||||
.if \operation != vmentry
|
||||
.if \operation == vmexit
|
||||
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
beq 1f
|
||||
.endif
|
||||
isb
|
||||
.if \label != none
|
||||
b \label
|
||||
.endif
|
||||
1:
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
|
||||
|
||||
@@ -194,10 +194,11 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
|
||||
phys_addr_t addr = start, end = start + size;
|
||||
phys_addr_t next;
|
||||
|
||||
pgd = pgdp + pgd_index(addr);
|
||||
pgd = pgdp + kvm_pgd_index(addr);
|
||||
do {
|
||||
next = kvm_pgd_addr_end(addr, end);
|
||||
unmap_puds(kvm, pgd, addr, next);
|
||||
if (!pgd_none(*pgd))
|
||||
unmap_puds(kvm, pgd, addr, next);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
@@ -263,7 +264,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
|
||||
phys_addr_t next;
|
||||
pgd_t *pgd;
|
||||
|
||||
pgd = kvm->arch.pgd + pgd_index(addr);
|
||||
pgd = kvm->arch.pgd + kvm_pgd_index(addr);
|
||||
do {
|
||||
next = kvm_pgd_addr_end(addr, end);
|
||||
stage2_flush_puds(kvm, pgd, addr, next);
|
||||
@@ -555,6 +556,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
||||
unmap_range(kvm, kvm->arch.pgd, start, size);
|
||||
}
|
||||
|
||||
static void stage2_unmap_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
hva_t hva = memslot->userspace_addr;
|
||||
phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
|
||||
phys_addr_t size = PAGE_SIZE * memslot->npages;
|
||||
hva_t reg_end = hva + size;
|
||||
|
||||
/*
|
||||
* A memory region could potentially cover multiple VMAs, and any holes
|
||||
* between them, so iterate over all of them to find out if we should
|
||||
* unmap any of them.
|
||||
*
|
||||
* +--------------------------------------------+
|
||||
* +---------------+----------------+ +----------------+
|
||||
* | : VMA 1 | VMA 2 | | VMA 3 : |
|
||||
* +---------------+----------------+ +----------------+
|
||||
* | memory region |
|
||||
* +--------------------------------------------+
|
||||
*/
|
||||
do {
|
||||
struct vm_area_struct *vma = find_vma(current->mm, hva);
|
||||
hva_t vm_start, vm_end;
|
||||
|
||||
if (!vma || vma->vm_start >= reg_end)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Take the intersection of this VMA with the memory region
|
||||
*/
|
||||
vm_start = max(hva, vma->vm_start);
|
||||
vm_end = min(reg_end, vma->vm_end);
|
||||
|
||||
if (!(vma->vm_flags & VM_PFNMAP)) {
|
||||
gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
|
||||
unmap_stage2_range(kvm, gpa, vm_end - vm_start);
|
||||
}
|
||||
hva = vm_end;
|
||||
} while (hva < reg_end);
|
||||
}
|
||||
|
||||
/**
|
||||
* stage2_unmap_vm - Unmap Stage-2 RAM mappings
|
||||
* @kvm: The struct kvm pointer
|
||||
*
|
||||
* Go through the memregions and unmap any reguler RAM
|
||||
* backing memory already mapped to the VM.
|
||||
*/
|
||||
void stage2_unmap_vm(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
int idx;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
kvm_for_each_memslot(memslot, slots)
|
||||
stage2_unmap_memslot(kvm, memslot);
|
||||
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_free_stage2_pgd - free all stage-2 tables
|
||||
* @kvm: The KVM struct pointer for the VM.
|
||||
@@ -583,7 +649,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
pgd = kvm->arch.pgd + pgd_index(addr);
|
||||
pgd = kvm->arch.pgd + kvm_pgd_index(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud)) {
|
||||
if (!cache)
|
||||
@@ -754,6 +820,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
static bool kvm_is_device_pfn(unsigned long pfn)
|
||||
{
|
||||
return !pfn_valid(pfn);
|
||||
}
|
||||
|
||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_memory_slot *memslot, unsigned long hva,
|
||||
unsigned long fault_status)
|
||||
@@ -777,6 +848,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
/* Let's check if we will get back a huge page backed by hugetlbfs */
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
vma = find_vma_intersection(current->mm, hva, hva + 1);
|
||||
if (unlikely(!vma)) {
|
||||
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
hugetlb = true;
|
||||
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
|
||||
@@ -817,7 +894,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (is_error_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
if (kvm_is_mmio_pfn(pfn))
|
||||
if (kvm_is_device_pfn(pfn))
|
||||
mem_type = PAGE_S2_DEVICE;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
@@ -843,7 +920,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
}
|
||||
coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
|
||||
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
|
||||
mem_type == PAGE_S2_DEVICE);
|
||||
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
|
||||
}
|
||||
|
||||
|
||||
@@ -916,6 +993,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Userspace should not be able to register out-of-bounds IPAs */
|
||||
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
|
||||
|
||||
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
@@ -1140,6 +1220,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
/*
|
||||
* Prevent userspace from creating a memory region outside of the IPA
|
||||
* space addressable by the KVM guest IPA space.
|
||||
*/
|
||||
if (memslot->base_gfn + memslot->npages >=
|
||||
(KVM_PHYS_SIZE >> PAGE_SHIFT))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ static void __init dove_dt_init(void)
|
||||
#ifdef CONFIG_CACHE_TAUROS2
|
||||
tauros2_init(0);
|
||||
#endif
|
||||
BUG_ON(mvebu_mbus_dt_init());
|
||||
BUG_ON(mvebu_mbus_dt_init(false));
|
||||
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -406,7 +406,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
|
||||
clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
|
||||
clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
|
||||
clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
|
||||
clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
|
||||
clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
|
||||
clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
|
||||
clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
|
||||
clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
|
||||
|
||||
@@ -116,7 +116,7 @@ static void __init kirkwood_dt_init(void)
|
||||
*/
|
||||
writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
|
||||
|
||||
BUG_ON(mvebu_mbus_dt_init());
|
||||
BUG_ON(mvebu_mbus_dt_init(false));
|
||||
|
||||
kirkwood_l2_init();
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ static void __init armada_370_xp_timer_and_clk_init(void)
|
||||
of_clk_init(NULL);
|
||||
clocksource_of_init();
|
||||
coherency_init();
|
||||
BUG_ON(mvebu_mbus_dt_init());
|
||||
BUG_ON(mvebu_mbus_dt_init(coherency_available()));
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
l2x0_of_init(0, ~0UL);
|
||||
#endif
|
||||
|
||||
@@ -121,6 +121,20 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
|
||||
.notifier_call = mvebu_hwcc_platform_notifier,
|
||||
};
|
||||
|
||||
/*
|
||||
* Keep track of whether we have IO hardware coherency enabled or not.
|
||||
* On Armada 370's we will not be using it for example. We need to make
|
||||
* that available [through coherency_available()] so the mbus controller
|
||||
* doesn't enable the IO coherency bit in the attribute bits of the
|
||||
* chip selects.
|
||||
*/
|
||||
static int coherency_enabled;
|
||||
|
||||
int coherency_available(void)
|
||||
{
|
||||
return coherency_enabled;
|
||||
}
|
||||
|
||||
int __init coherency_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
@@ -164,6 +178,7 @@ int __init coherency_init(void)
|
||||
coherency_base = of_iomap(np, 0);
|
||||
coherency_cpu_base = of_iomap(np, 1);
|
||||
set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
|
||||
coherency_enabled = 1;
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
extern unsigned long coherency_phys_base;
|
||||
|
||||
int set_cpu_coherent(unsigned int cpu_id, int smp_group_id);
|
||||
int coherency_available(void);
|
||||
int coherency_init(void);
|
||||
|
||||
#endif /* __MACH_370_XP_COHERENCY_H */
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
obj-$(CONFIG_ARCH_SUNXI) += sunxi.o
|
||||
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
|
||||
obj-$(CONFIG_SMP) += platsmp.o
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
.section ".text.head", "ax"
|
||||
|
||||
ENTRY(sun6i_secondary_startup)
|
||||
msr cpsr_fsxc, #0xd3
|
||||
b secondary_startup
|
||||
ENDPROC(sun6i_secondary_startup)
|
||||
@@ -82,7 +82,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu,
|
||||
spin_lock(&cpu_lock);
|
||||
|
||||
/* Set CPU boot address */
|
||||
writel(virt_to_phys(sun6i_secondary_startup),
|
||||
writel(virt_to_phys(secondary_startup),
|
||||
cpucfg_membase + CPUCFG_PRIVATE0_REG);
|
||||
|
||||
/* Assert the CPU core in reset */
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#ifndef __ARM64_KVM_ARM_H__
|
||||
#define __ARM64_KVM_ARM_H__
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/* Hyp Configuration Register (HCR) bits */
|
||||
@@ -185,13 +186,13 @@
|
||||
|
||||
/* Exception Syndrome Register (ESR) bits */
|
||||
#define ESR_EL2_EC_SHIFT (26)
|
||||
#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT)
|
||||
#define ESR_EL2_IL (1U << 25)
|
||||
#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
|
||||
#define ESR_EL2_IL (UL(1) << 25)
|
||||
#define ESR_EL2_ISS (ESR_EL2_IL - 1)
|
||||
#define ESR_EL2_ISV_SHIFT (24)
|
||||
#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT)
|
||||
#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
|
||||
#define ESR_EL2_SAS_SHIFT (22)
|
||||
#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT)
|
||||
#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
|
||||
#define ESR_EL2_SSE (1 << 21)
|
||||
#define ESR_EL2_SRT_SHIFT (16)
|
||||
#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
|
||||
@@ -205,16 +206,16 @@
|
||||
#define ESR_EL2_FSC_TYPE (0x3c)
|
||||
|
||||
#define ESR_EL2_CV_SHIFT (24)
|
||||
#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT)
|
||||
#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
|
||||
#define ESR_EL2_COND_SHIFT (20)
|
||||
#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT)
|
||||
#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
|
||||
|
||||
|
||||
#define FSC_FAULT (0x04)
|
||||
#define FSC_PERM (0x0c)
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
#define HPFAR_MASK (~0xFUL)
|
||||
#define HPFAR_MASK (~UL(0xf))
|
||||
|
||||
#define ESR_EL2_EC_UNKNOWN (0x00)
|
||||
#define ESR_EL2_EC_WFI (0x01)
|
||||
|
||||
@@ -38,6 +38,13 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
|
||||
|
||||
@@ -69,11 +69,14 @@
|
||||
#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
|
||||
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
|
||||
|
||||
#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
|
||||
|
||||
int create_hyp_mappings(void *from, void *to);
|
||||
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
||||
void free_boot_hyp_pgd(void);
|
||||
void free_hyp_pgds(void);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
@@ -125,6 +128,7 @@ static inline bool kvm_page_empty(void *ptr)
|
||||
#endif
|
||||
#define kvm_pud_table_empty(pudp) (0)
|
||||
|
||||
|
||||
struct kvm;
|
||||
|
||||
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
||||
|
||||
@@ -537,6 +537,7 @@ el0_dbg:
|
||||
disable_step x1
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
adr lr, ret_from_exception
|
||||
b do_debug_exception
|
||||
el0_inv:
|
||||
ct_user_exit
|
||||
|
||||
@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
|
||||
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
|
||||
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
||||
|
||||
# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
|
||||
# down to collect2, resulting in silent corruption of the vDSO image.
|
||||
ccflags-y += -Wl,-shared
|
||||
|
||||
obj-y += vdso.o
|
||||
extra-y += vdso.lds vdso-offsets.h
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
@@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
|
||||
* Instead, we invalidate Stage-2 for this IPA, and the
|
||||
* whole of Stage-1. Weep...
|
||||
*/
|
||||
lsr x1, x1, #12
|
||||
tlbi ipas2e1is, x1
|
||||
/*
|
||||
* We have to ensure completion of the invalidation at Stage-2,
|
||||
|
||||
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (!cpu_has_32bit_el1())
|
||||
return -EINVAL;
|
||||
cpu_reset = &default_regs_reset32;
|
||||
vcpu->arch.hcr_el2 &= ~HCR_RW;
|
||||
} else {
|
||||
cpu_reset = &default_regs_reset;
|
||||
}
|
||||
|
||||
@@ -92,6 +92,14 @@ static void reset_context(void *info)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
|
||||
/*
|
||||
* current->active_mm could be init_mm for the idle thread immediately
|
||||
* after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
|
||||
* the reserved value, so no need to reset any context.
|
||||
*/
|
||||
if (mm == &init_mm)
|
||||
return;
|
||||
|
||||
smp_rmb();
|
||||
asid = cpu_last_asid + cpu;
|
||||
|
||||
|
||||
@@ -116,8 +116,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
|
||||
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
addr = page_address(page);
|
||||
if (flags & __GFP_ZERO)
|
||||
memset(addr, 0, size);
|
||||
memset(addr, 0, size);
|
||||
return addr;
|
||||
} else {
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
||||
|
||||
@@ -46,13 +46,13 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
|
||||
}
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
return !(pud_val(pud) & PUD_TABLE_BIT);
|
||||
return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
@@ -268,7 +268,7 @@ static void __init free_unused_memmap(void)
|
||||
* memmap entries are valid from the bank end aligned to
|
||||
* MAX_ORDER_NR_PAGES.
|
||||
*/
|
||||
prev_end = ALIGN(start + __phys_to_pfn(reg->size),
|
||||
prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
|
||||
MAX_ORDER_NR_PAGES);
|
||||
}
|
||||
|
||||
|
||||
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&clk_lock, flags);
|
||||
__clk_enable(clk);
|
||||
spin_unlock_irqrestore(&clk_lock, flags);
|
||||
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(clk))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&clk_lock, flags);
|
||||
__clk_disable(clk);
|
||||
spin_unlock_irqrestore(&clk_lock, flags);
|
||||
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
|
||||
unsigned long flags;
|
||||
unsigned long rate;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&clk_lock, flags);
|
||||
rate = clk->get_rate(clk);
|
||||
spin_unlock_irqrestore(&clk_lock, flags);
|
||||
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
unsigned long flags, actual_rate;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
if (!clk->set_rate)
|
||||
return -ENOSYS;
|
||||
|
||||
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
|
||||
unsigned long flags;
|
||||
long ret;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
if (!clk->set_rate)
|
||||
return -ENOSYS;
|
||||
|
||||
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
if (!clk->set_parent)
|
||||
return -ENOSYS;
|
||||
|
||||
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
|
||||
|
||||
struct clk *clk_get_parent(struct clk *clk)
|
||||
{
|
||||
return clk->parent;
|
||||
return !clk ? NULL : clk->parent;
|
||||
}
|
||||
EXPORT_SYMBOL(clk_get_parent);
|
||||
|
||||
|
||||
@@ -94,7 +94,11 @@
|
||||
#endif
|
||||
|
||||
#ifndef FIXADDR_TOP
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
|
||||
#else
|
||||
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_MACH_GENERIC_SPACES_H */
|
||||
|
||||
@@ -1626,7 +1626,7 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
if (vcpu->mmio_needed == 2)
|
||||
*gpr = *(int16_t *) run->mmio.data;
|
||||
else
|
||||
*gpr = *(int16_t *) run->mmio.data;
|
||||
*gpr = *(uint16_t *)run->mmio.data;
|
||||
|
||||
break;
|
||||
case 1:
|
||||
|
||||
@@ -124,7 +124,16 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
|
||||
|
||||
static bool regs_use_siar(struct pt_regs *regs)
|
||||
{
|
||||
return !!regs->result;
|
||||
/*
|
||||
* When we take a performance monitor exception the regs are setup
|
||||
* using perf_read_regs() which overloads some fields, in particular
|
||||
* regs->result to tell us whether to use SIAR.
|
||||
*
|
||||
* However if the regs are from another exception, eg. a syscall, then
|
||||
* they have not been setup using perf_read_regs() and so regs->result
|
||||
* is something random.
|
||||
*/
|
||||
return ((TRAP(regs) == 0xf00) && regs->result);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -276,6 +276,8 @@ ENTRY(_sclp_print_early)
|
||||
jno .Lesa2
|
||||
ahi %r15,-80
|
||||
stmh %r6,%r15,96(%r15) # store upper register halves
|
||||
basr %r13,0
|
||||
lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
|
||||
.Lesa2:
|
||||
#endif
|
||||
lr %r10,%r2 # save string pointer
|
||||
@@ -299,6 +301,8 @@ ENTRY(_sclp_print_early)
|
||||
#endif
|
||||
lm %r6,%r15,120(%r15) # restore registers
|
||||
br %r14
|
||||
.Lzeroes:
|
||||
.fill 64,4,0
|
||||
|
||||
.LwritedataS4:
|
||||
.long 0x00760005 # SCLP command for write data
|
||||
|
||||
@@ -2307,7 +2307,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
|
||||
if (len & (8UL - 1))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(len, GFP_KERNEL);
|
||||
buf = kzalloc(len, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
||||
@@ -1146,7 +1146,7 @@ static void __init load_hv_initrd(void)
|
||||
|
||||
void __init free_initrd_mem(unsigned long begin, unsigned long end)
|
||||
{
|
||||
free_bootmem(__pa(begin), end - begin);
|
||||
free_bootmem_late(__pa(begin), end - begin);
|
||||
}
|
||||
|
||||
static int __init setup_initrd(char *str)
|
||||
|
||||
@@ -161,7 +161,7 @@ config SBUS
|
||||
|
||||
config NEED_DMA_MAP_STATE
|
||||
def_bool y
|
||||
depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
|
||||
depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
def_bool y
|
||||
@@ -2441,9 +2441,19 @@ config X86_DMA_REMAP
|
||||
depends on STA2X11
|
||||
|
||||
config IOSF_MBI
|
||||
tristate
|
||||
default m
|
||||
tristate "Intel System On Chip IOSF Sideband support"
|
||||
depends on PCI
|
||||
---help---
|
||||
Enables sideband access to mailbox registers on SoC's. The sideband is
|
||||
available on the following platforms. This list is not meant to be
|
||||
exclusive.
|
||||
- BayTrail
|
||||
- Cherryview
|
||||
- Braswell
|
||||
- Quark
|
||||
|
||||
You should say Y if you are running a kernel on one of these
|
||||
platforms.
|
||||
|
||||
source "net/Kconfig"
|
||||
|
||||
|
||||
@@ -559,6 +559,10 @@ static efi_status_t setup_e820(struct boot_params *params,
|
||||
unsigned int e820_type = 0;
|
||||
unsigned long m = efi->efi_memmap;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
m |= (u64)efi->efi_memmap_hi << 32;
|
||||
#endif
|
||||
|
||||
d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
|
||||
switch (d->type) {
|
||||
case EFI_RESERVED_TYPE:
|
||||
|
||||
@@ -54,7 +54,7 @@ ENTRY(efi_pe_entry)
|
||||
call reloc
|
||||
reloc:
|
||||
popl %ecx
|
||||
subl reloc, %ecx
|
||||
subl $reloc, %ecx
|
||||
movl %ecx, BP_code32_start(%eax)
|
||||
|
||||
sub $0x4, %esp
|
||||
|
||||
@@ -567,7 +567,7 @@ struct kvm_arch {
|
||||
struct kvm_pic *vpic;
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_pit *vpit;
|
||||
int vapics_in_nmi_mode;
|
||||
atomic_t vapics_in_nmi_mode;
|
||||
struct mutex apic_map_lock;
|
||||
struct kvm_apic_map *apic_map;
|
||||
|
||||
|
||||
@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
|
||||
unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
|
||||
int i;
|
||||
|
||||
while (leftover) {
|
||||
while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
|
||||
mc_header = (struct microcode_header_intel *)ucode_ptr;
|
||||
|
||||
mc_size = get_totalsize(mc_header);
|
||||
|
||||
@@ -326,13 +326,16 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
||||
{
|
||||
struct insn insn;
|
||||
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
||||
int length;
|
||||
|
||||
kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
|
||||
insn_get_length(&insn);
|
||||
length = insn.length;
|
||||
|
||||
/* Another subsystem puts a breakpoint, failed to recover */
|
||||
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
||||
return 0;
|
||||
memcpy(dest, insn.kaddr, insn.length);
|
||||
memcpy(dest, insn.kaddr, length);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (insn_rip_relative(&insn)) {
|
||||
@@ -362,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
||||
*(s32 *) disp = (s32) newdisp;
|
||||
}
|
||||
#endif
|
||||
return insn.length;
|
||||
return length;
|
||||
}
|
||||
|
||||
static int __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
|
||||
@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
|
||||
* LVT0 to NMI delivery. Other PIC interrupts are just sent to
|
||||
* VCPU0, and only if its LVT0 is in EXTINT mode.
|
||||
*/
|
||||
if (kvm->arch.vapics_in_nmi_mode > 0)
|
||||
if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_apic_nmi_wd_deliver(vcpu);
|
||||
}
|
||||
|
||||
@@ -1109,10 +1109,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
|
||||
if (!nmi_wd_enabled) {
|
||||
apic_debug("Receive NMI setting on APIC_LVT0 "
|
||||
"for cpu %d\n", apic->vcpu->vcpu_id);
|
||||
apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
|
||||
atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
||||
}
|
||||
} else if (nmi_wd_enabled)
|
||||
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
|
||||
atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
||||
}
|
||||
|
||||
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
|
||||
@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (svm->vmcb->control.next_rip != 0)
|
||||
if (svm->vmcb->control.next_rip != 0) {
|
||||
WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
|
||||
svm->next_rip = svm->vmcb->control.next_rip;
|
||||
}
|
||||
|
||||
if (!svm->next_rip) {
|
||||
if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
|
||||
@@ -4246,7 +4248,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
}
|
||||
|
||||
vmcb->control.next_rip = info->next_rip;
|
||||
/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
|
||||
if (static_cpu_has(X86_FEATURE_NRIPS))
|
||||
vmcb->control.next_rip = info->next_rip;
|
||||
vmcb->control.exit_code = icpt_info.exit_code;
|
||||
vmexit = nested_svm_exit_handled(svm);
|
||||
|
||||
|
||||
@@ -84,6 +84,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
},
|
||||
},
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
|
||||
{
|
||||
.callback = set_use_crs,
|
||||
.ident = "Foxconn K8M890-8237A",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
},
|
||||
},
|
||||
|
||||
/* Now for the blacklist.. */
|
||||
|
||||
@@ -124,8 +135,10 @@ void __init pci_acpi_crs_quirks(void)
|
||||
{
|
||||
int year;
|
||||
|
||||
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
|
||||
pci_use_crs = false;
|
||||
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
|
||||
if (iomem_resource.end <= 0xffffffff)
|
||||
pci_use_crs = false;
|
||||
}
|
||||
|
||||
dmi_check_system(pci_crs_quirks);
|
||||
|
||||
|
||||
@@ -703,8 +703,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
return -EINVAL;
|
||||
|
||||
disk = get_gendisk(MKDEV(major, minor), &part);
|
||||
if (!disk || part)
|
||||
if (!disk)
|
||||
return -EINVAL;
|
||||
if (part) {
|
||||
put_disk(disk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(disk->queue->queue_lock);
|
||||
|
||||
@@ -175,10 +175,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
|
||||
* Obtain a permanent mapping for the FACS. This is required for the
|
||||
* Global Lock and the Firmware Waking Vector
|
||||
*/
|
||||
status = acpi_tb_initialize_facs();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
||||
return_ACPI_STATUS(status);
|
||||
if (!(flags & ACPI_NO_FACS_INIT)) {
|
||||
status = acpi_tb_initialize_facs();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
|
||||
@@ -450,6 +450,16 @@ static int __init acpi_bus_init_irq(void)
|
||||
u8 acpi_gbl_permanent_mmap;
|
||||
|
||||
|
||||
/**
|
||||
* acpi_early_init - Initialize ACPICA and populate the ACPI namespace.
|
||||
*
|
||||
* The ACPI tables are accessible after this, but the handling of events has not
|
||||
* been initialized and the global lock is not available yet, so AML should not
|
||||
* be executed at this point.
|
||||
*
|
||||
* Doing this before switching the EFI runtime services to virtual mode allows
|
||||
* the EfiBootServices memory to be freed slightly earlier on boot.
|
||||
*/
|
||||
void __init acpi_early_init(void)
|
||||
{
|
||||
acpi_status status;
|
||||
@@ -510,26 +520,42 @@ void __init acpi_early_init(void)
|
||||
acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
|
||||
error0:
|
||||
disable_acpi();
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_subsystem_init - Finalize the early initialization of ACPI.
|
||||
*
|
||||
* Switch over the platform to the ACPI mode (if possible), initialize the
|
||||
* handling of ACPI events, install the interrupt and global lock handlers.
|
||||
*
|
||||
* Doing this too early is generally unsafe, but at the same time it needs to be
|
||||
* done before all things that really depend on ACPI. The right spot appears to
|
||||
* be before finalizing the EFI initialization.
|
||||
*/
|
||||
void __init acpi_subsystem_init(void)
|
||||
{
|
||||
acpi_status status;
|
||||
|
||||
if (acpi_disabled)
|
||||
return;
|
||||
|
||||
status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
|
||||
goto error0;
|
||||
disable_acpi();
|
||||
} else {
|
||||
/*
|
||||
* If the system is using ACPI then we can be reasonably
|
||||
* confident that any regulators are managed by the firmware
|
||||
* so tell the regulator core it has everything it needs to
|
||||
* know.
|
||||
*/
|
||||
regulator_has_full_constraints();
|
||||
}
|
||||
|
||||
/*
|
||||
* If the system is using ACPI then we can be reasonably
|
||||
* confident that any regulators are managed by the firmware
|
||||
* so tell the regulator core it has everything it needs to
|
||||
* know.
|
||||
*/
|
||||
regulator_has_full_constraints();
|
||||
|
||||
return;
|
||||
|
||||
error0:
|
||||
disable_acpi();
|
||||
return;
|
||||
}
|
||||
|
||||
static int __init acpi_bus_init(void)
|
||||
|
||||
@@ -4173,9 +4173,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
||||
ATA_HORKAGE_FIRMWARE_WARN },
|
||||
|
||||
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
||||
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
|
||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
|
||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||
Windows driver .inf file - also several Linux problem reports */
|
||||
@@ -4229,6 +4230,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
|
||||
/* devices that don't properly handle TRIM commands */
|
||||
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
|
||||
/*
|
||||
* Some WD SATA-I drives spin up and down erratically when the link
|
||||
* is put into the slumber mode. We don't have full list of the
|
||||
@@ -4533,7 +4537,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
||||
else /* In the ancient relic department - skip all of this */
|
||||
return 0;
|
||||
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
||||
/* On some disks, this command causes spin-up, so we need longer timeout */
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
|
||||
|
||||
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
||||
return err_mask;
|
||||
|
||||
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
||||
ATA_LFLAG_NO_SRST |
|
||||
ATA_LFLAG_ASSUME_ATA;
|
||||
}
|
||||
} else if (vendor == 0x11ab && devid == 0x4140) {
|
||||
/* Marvell 4140 quirks */
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
/* port 4 is for SEMB device and it doesn't like SRST */
|
||||
if (link->pmp == 4)
|
||||
link->flags |= ATA_LFLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2510,7 +2510,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
|
||||
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
|
||||
rbuf[15] = lowest_aligned;
|
||||
|
||||
if (ata_id_has_trim(args->id)) {
|
||||
if (ata_id_has_trim(args->id) &&
|
||||
!(dev->horkage & ATA_HORKAGE_NOTRIM)) {
|
||||
rbuf[14] |= 0x80; /* TPE */
|
||||
|
||||
if (ata_id_has_zero_after_trim(args->id))
|
||||
|
||||
@@ -544,10 +544,8 @@ static void fw_dev_release(struct device *dev)
|
||||
kfree(fw_priv);
|
||||
}
|
||||
|
||||
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
||||
|
||||
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
|
||||
return -ENOMEM;
|
||||
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
|
||||
@@ -558,6 +556,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&fw_lock);
|
||||
if (fw_priv->buf)
|
||||
err = do_firmware_uevent(fw_priv, env);
|
||||
mutex_unlock(&fw_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct class firmware_class = {
|
||||
.name = "firmware",
|
||||
.class_attrs = firmware_class_attrs,
|
||||
|
||||
@@ -808,11 +808,10 @@ EXPORT_SYMBOL_GPL(devm_regmap_init);
|
||||
static void regmap_field_init(struct regmap_field *rm_field,
|
||||
struct regmap *regmap, struct reg_field reg_field)
|
||||
{
|
||||
int field_bits = reg_field.msb - reg_field.lsb + 1;
|
||||
rm_field->regmap = regmap;
|
||||
rm_field->reg = reg_field.reg;
|
||||
rm_field->shift = reg_field.lsb;
|
||||
rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
|
||||
rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
|
||||
rm_field->id_size = reg_field.id_size;
|
||||
rm_field->id_offset = reg_field.id_offset;
|
||||
}
|
||||
@@ -1947,7 +1946,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
||||
&ival);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
memcpy(val + (i * val_bytes), &ival, val_bytes);
|
||||
map->format.format_val(val + (i * val_bytes), ival, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1826,11 +1826,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
|
||||
rbd_assert(obj_request_type_valid(type));
|
||||
|
||||
size = strlen(object_name) + 1;
|
||||
name = kmalloc(size, GFP_KERNEL);
|
||||
name = kmalloc(size, GFP_NOIO);
|
||||
if (!name)
|
||||
return NULL;
|
||||
|
||||
obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
|
||||
obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
|
||||
if (!obj_request) {
|
||||
kfree(name);
|
||||
return NULL;
|
||||
|
||||
@@ -79,6 +79,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x0489, 0xe057) },
|
||||
{ USB_DEVICE(0x0489, 0xe056) },
|
||||
{ USB_DEVICE(0x0489, 0xe05f) },
|
||||
{ USB_DEVICE(0x0489, 0xe076) },
|
||||
{ USB_DEVICE(0x0489, 0xe078) },
|
||||
{ USB_DEVICE(0x04c5, 0x1330) },
|
||||
{ USB_DEVICE(0x04CA, 0x3004) },
|
||||
@@ -109,6 +110,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x13d3, 0x3402) },
|
||||
{ USB_DEVICE(0x13d3, 0x3408) },
|
||||
{ USB_DEVICE(0x13d3, 0x3432) },
|
||||
{ USB_DEVICE(0x13d3, 0x3474) },
|
||||
|
||||
/* Atheros AR5BBU12 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xE02C) },
|
||||
@@ -133,6 +135,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
@@ -163,6 +166,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* Atheros AR5BBU22 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
@@ -157,6 +157,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
@@ -187,6 +188,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* Atheros AR5BBU12 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
|
||||
@@ -1291,6 +1293,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
}
|
||||
fw_ptr = fw->data;
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
/* This Intel specific command enables the manufacturer mode of the
|
||||
* controller.
|
||||
*
|
||||
|
||||
@@ -701,7 +701,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
|
||||
phys_addr_t sdramwins_phys_base,
|
||||
size_t sdramwins_size)
|
||||
{
|
||||
struct device_node *np;
|
||||
int win;
|
||||
|
||||
mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
|
||||
@@ -714,12 +713,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
|
||||
if (np) {
|
||||
mbus->hw_io_coherency = 1;
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
for (win = 0; win < mbus->soc->num_wins; win++)
|
||||
mvebu_mbus_disable_window(mbus, win);
|
||||
|
||||
@@ -889,7 +882,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
|
||||
}
|
||||
}
|
||||
|
||||
int __init mvebu_mbus_dt_init(void)
|
||||
int __init mvebu_mbus_dt_init(bool is_coherent)
|
||||
{
|
||||
struct resource mbuswins_res, sdramwins_res;
|
||||
struct device_node *np, *controller;
|
||||
@@ -928,6 +921,8 @@ int __init mvebu_mbus_dt_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mbus_state.hw_io_coherency = is_coherent;
|
||||
|
||||
/* Get optional pcie-{mem,io}-aperture properties */
|
||||
mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
|
||||
&mbus_state.pcie_io_aperture);
|
||||
|
||||
@@ -586,7 +586,7 @@ static inline int needs_ilk_vtd_wa(void)
|
||||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
*/
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
|
||||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
||||
intel_iommu_gfx_mapped)
|
||||
return 1;
|
||||
|
||||
@@ -579,6 +579,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ibmvtpm->dev = dev;
|
||||
ibmvtpm->vdev = vio_dev;
|
||||
|
||||
crq_q = &ibmvtpm->crq_queue;
|
||||
crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!crq_q->crq_addr) {
|
||||
@@ -623,8 +626,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
|
||||
crq_q->index = 0;
|
||||
|
||||
ibmvtpm->dev = dev;
|
||||
ibmvtpm->vdev = vio_dev;
|
||||
TPM_VPRIV(chip) = (void *)ibmvtpm;
|
||||
|
||||
spin_lock_init(&ibmvtpm->rtce_lock);
|
||||
|
||||
@@ -422,15 +422,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
|
||||
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
|
||||
|
||||
if (mct_int_type == MCT_INT_SPI) {
|
||||
evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
|
||||
if (request_irq(evt->irq, exynos4_mct_tick_isr,
|
||||
IRQF_TIMER | IRQF_NOBALANCING,
|
||||
evt->name, mevt)) {
|
||||
pr_err("exynos-mct: cannot register IRQ %d\n",
|
||||
evt->irq);
|
||||
|
||||
if (evt->irq == -1)
|
||||
return -EIO;
|
||||
}
|
||||
irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
|
||||
|
||||
irq_force_affinity(evt->irq, cpumask_of(cpu));
|
||||
enable_irq(evt->irq);
|
||||
} else {
|
||||
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
|
||||
}
|
||||
@@ -443,10 +440,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
|
||||
static void exynos4_local_timer_stop(struct clock_event_device *evt)
|
||||
{
|
||||
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
||||
if (mct_int_type == MCT_INT_SPI)
|
||||
free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
|
||||
else
|
||||
if (mct_int_type == MCT_INT_SPI) {
|
||||
if (evt->irq != -1)
|
||||
disable_irq_nosync(evt->irq);
|
||||
} else {
|
||||
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
|
||||
}
|
||||
}
|
||||
|
||||
static int exynos4_mct_cpu_notify(struct notifier_block *self,
|
||||
@@ -478,7 +477,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
|
||||
|
||||
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
|
||||
{
|
||||
int err;
|
||||
int err, cpu;
|
||||
struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
|
||||
struct clk *mct_clk, *tick_clk;
|
||||
|
||||
@@ -505,7 +504,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
|
||||
WARN(err, "MCT: can't request IRQ %d (%d)\n",
|
||||
mct_irqs[MCT_L0_IRQ], err);
|
||||
} else {
|
||||
irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
|
||||
for_each_possible_cpu(cpu) {
|
||||
int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
|
||||
struct mct_clock_event_device *pcpu_mevt =
|
||||
per_cpu_ptr(&percpu_mct_tick, cpu);
|
||||
|
||||
pcpu_mevt->evt.irq = -1;
|
||||
|
||||
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
|
||||
if (request_irq(mct_irq,
|
||||
exynos4_mct_tick_isr,
|
||||
IRQF_TIMER | IRQF_NOBALANCING,
|
||||
pcpu_mevt->name, pcpu_mevt)) {
|
||||
pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
|
||||
cpu);
|
||||
|
||||
continue;
|
||||
}
|
||||
pcpu_mevt->evt.irq = mct_irq;
|
||||
}
|
||||
}
|
||||
|
||||
err = register_cpu_notifier(&exynos4_mct_cpu_nb);
|
||||
|
||||
@@ -417,7 +417,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
|
||||
|
||||
val |= vid;
|
||||
|
||||
wrmsrl(MSR_IA32_PERF_CTL, val);
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
|
||||
}
|
||||
|
||||
#define BYT_BCLK_FREQS 5
|
||||
|
||||
@@ -131,6 +131,9 @@ int cpuidle_idle_call(void)
|
||||
|
||||
/* ask the governor for the next state */
|
||||
next_state = cpuidle_curr_governor->select(drv, dev);
|
||||
if (next_state < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (need_resched()) {
|
||||
dev->last_residency = 0;
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
|
||||
@@ -302,7 +302,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
data->needs_update = 0;
|
||||
}
|
||||
|
||||
data->last_state_idx = 0;
|
||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
|
||||
data->exit_us = 0;
|
||||
|
||||
/* Special case when user has set very strict latency requirement */
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
/* Buffer, its dma address and lock */
|
||||
struct buf_data {
|
||||
u8 buf[RN_BUF_SIZE];
|
||||
u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
|
||||
dma_addr_t addr;
|
||||
struct completion filled;
|
||||
u32 hw_desc[DESC_JOB_O_LEN];
|
||||
|
||||
@@ -927,7 +927,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
|
||||
sg_count--;
|
||||
link_tbl_ptr--;
|
||||
}
|
||||
be16_add_cpu(&link_tbl_ptr->len, cryptlen);
|
||||
link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
|
||||
+ cryptlen);
|
||||
|
||||
/* tag end of link table */
|
||||
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
|
||||
@@ -2563,6 +2564,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
|
||||
kfree(t_alg);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
||||
@@ -316,7 +316,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
dma_cookie_t cookie = 0;
|
||||
int busy = mv_chan_is_busy(mv_chan);
|
||||
u32 current_desc = mv_chan_get_current_desc(mv_chan);
|
||||
int seen_current = 0;
|
||||
int current_cleaned = 0;
|
||||
struct mv_xor_desc *hw_desc;
|
||||
|
||||
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
|
||||
dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
|
||||
@@ -328,38 +329,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
|
||||
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
|
||||
chain_node) {
|
||||
prefetch(_iter);
|
||||
prefetch(&_iter->async_tx);
|
||||
|
||||
/* do not advance past the current descriptor loaded into the
|
||||
* hardware channel, subsequent descriptors are either in
|
||||
* process or have not been submitted
|
||||
*/
|
||||
if (seen_current)
|
||||
break;
|
||||
/* clean finished descriptors */
|
||||
hw_desc = iter->hw_desc;
|
||||
if (hw_desc->status & XOR_DESC_SUCCESS) {
|
||||
cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
|
||||
cookie);
|
||||
|
||||
/* stop the search if we reach the current descriptor and the
|
||||
* channel is busy
|
||||
*/
|
||||
if (iter->async_tx.phys == current_desc) {
|
||||
seen_current = 1;
|
||||
if (busy)
|
||||
/* done processing desc, clean slot */
|
||||
mv_xor_clean_slot(iter, mv_chan);
|
||||
|
||||
/* break if we did cleaned the current */
|
||||
if (iter->async_tx.phys == current_desc) {
|
||||
current_cleaned = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (iter->async_tx.phys == current_desc) {
|
||||
current_cleaned = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
|
||||
|
||||
if (mv_xor_clean_slot(iter, mv_chan))
|
||||
break;
|
||||
}
|
||||
|
||||
if ((busy == 0) && !list_empty(&mv_chan->chain)) {
|
||||
struct mv_xor_desc_slot *chain_head;
|
||||
chain_head = list_entry(mv_chan->chain.next,
|
||||
struct mv_xor_desc_slot,
|
||||
chain_node);
|
||||
|
||||
mv_xor_start_new_chain(mv_chan, chain_head);
|
||||
if (current_cleaned) {
|
||||
/*
|
||||
* current descriptor cleaned and removed, run
|
||||
* from list head
|
||||
*/
|
||||
iter = list_entry(mv_chan->chain.next,
|
||||
struct mv_xor_desc_slot,
|
||||
chain_node);
|
||||
mv_xor_start_new_chain(mv_chan, iter);
|
||||
} else {
|
||||
if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
|
||||
/*
|
||||
* descriptors are still waiting after
|
||||
* current, trigger them
|
||||
*/
|
||||
iter = list_entry(iter->chain_node.next,
|
||||
struct mv_xor_desc_slot,
|
||||
chain_node);
|
||||
mv_xor_start_new_chain(mv_chan, iter);
|
||||
} else {
|
||||
/*
|
||||
* some descriptors are still waiting
|
||||
* to be cleaned
|
||||
*/
|
||||
tasklet_schedule(&mv_chan->irq_tasklet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cookie > 0)
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#define XOR_OPERATION_MODE_XOR 0
|
||||
#define XOR_OPERATION_MODE_MEMCPY 2
|
||||
#define XOR_DESCRIPTOR_SWAP BIT(14)
|
||||
#define XOR_DESC_SUCCESS 0x40000000
|
||||
|
||||
#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
|
||||
#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
|
||||
|
||||
@@ -765,7 +765,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
u32 reg;
|
||||
u64 limit, prv = 0;
|
||||
u64 tmp_mb;
|
||||
u32 mb, kb;
|
||||
u32 gb, mb;
|
||||
u32 rir_way;
|
||||
|
||||
/*
|
||||
@@ -775,15 +775,17 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
pvt->tolm = pvt->info.get_tolm(pvt);
|
||||
tmp_mb = (1 + pvt->tolm) >> 20;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
|
||||
gb, (mb*1000)/1024, (u64)pvt->tolm);
|
||||
|
||||
/* Address range is already 45:25 */
|
||||
pvt->tohm = pvt->info.get_tohm(pvt);
|
||||
tmp_mb = (1 + pvt->tohm) >> 20;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
|
||||
gb, (mb*1000)/1024, (u64)pvt->tohm);
|
||||
|
||||
/*
|
||||
* Step 2) Get SAD range and SAD Interleave list
|
||||
@@ -805,11 +807,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
break;
|
||||
|
||||
tmp_mb = (limit + 1) >> 20;
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
|
||||
n_sads,
|
||||
get_dram_attr(reg),
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
|
||||
reg);
|
||||
@@ -840,9 +842,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
break;
|
||||
tmp_mb = (limit + 1) >> 20;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
|
||||
n_tads, mb, kb,
|
||||
n_tads, gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
(u32)TAD_SOCK(reg),
|
||||
(u32)TAD_CH(reg),
|
||||
@@ -865,10 +867,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
tad_ch_nilv_offset[j],
|
||||
®);
|
||||
tmp_mb = TAD_OFFSET(reg) >> 20;
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
|
||||
i, j,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
reg);
|
||||
}
|
||||
@@ -890,10 +892,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
|
||||
tmp_mb = RIR_LIMIT(reg) >> 20;
|
||||
rir_way = 1 << RIR_WAY(reg);
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
|
||||
i, j,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
rir_way,
|
||||
reg);
|
||||
@@ -904,10 +906,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
®);
|
||||
tmp_mb = RIR_OFFSET(reg) << 6;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
|
||||
i, j, k,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
(u32)RIR_RNK_TGT(reg),
|
||||
reg);
|
||||
@@ -945,7 +947,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
u8 ch_way, sck_way, pkg, sad_ha = 0;
|
||||
u32 tad_offset;
|
||||
u32 rir_way;
|
||||
u32 mb, kb;
|
||||
u32 mb, gb;
|
||||
u64 ch_addr, offset, limit = 0, prv = 0;
|
||||
|
||||
|
||||
@@ -1183,10 +1185,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
continue;
|
||||
|
||||
limit = RIR_LIMIT(reg);
|
||||
mb = div_u64_rem(limit >> 20, 1000, &kb);
|
||||
gb = div_u64_rem(limit >> 20, 1024, &mb);
|
||||
edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
|
||||
n_rir,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
limit,
|
||||
1 << RIR_WAY(reg));
|
||||
if (ch_addr <= limit)
|
||||
|
||||
@@ -2155,8 +2155,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -EINVAL;
|
||||
|
||||
/* For some reason crtc x/y offsets are signed internally. */
|
||||
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
|
||||
/*
|
||||
* Universal plane src offsets are only 16.16, prevent havoc for
|
||||
* drivers using universal plane code internally.
|
||||
*/
|
||||
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
|
||||
return -ERANGE;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
@@ -1529,6 +1529,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
||||
return MODE_BANDWIDTH;
|
||||
}
|
||||
|
||||
if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
|
||||
(mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
|
||||
return MODE_H_ILLEGAL;
|
||||
}
|
||||
|
||||
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
|
||||
mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
|
||||
mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
|
||||
|
||||
@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
|
||||
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_SURFACE_CMD_CREATE;
|
||||
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
|
||||
cmd->u.surface_create.format = surf->surf.format;
|
||||
cmd->u.surface_create.width = surf->surf.width;
|
||||
cmd->u.surface_create.height = surf->surf.height;
|
||||
|
||||
@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
|
||||
qobj = gem_to_qxl_bo(gobj);
|
||||
|
||||
ret = qxl_release_list_add(release, qobj);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return qobj;
|
||||
}
|
||||
|
||||
@@ -4148,6 +4148,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
|
||||
WDOORBELL32(ring->doorbell_index, ring->wptr);
|
||||
}
|
||||
|
||||
static void cik_compute_stop(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring)
|
||||
{
|
||||
u32 j, tmp;
|
||||
|
||||
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
|
||||
/* Disable wptr polling. */
|
||||
tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
|
||||
tmp &= ~WPTR_POLL_EN;
|
||||
WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
|
||||
/* Disable HQD. */
|
||||
if (RREG32(CP_HQD_ACTIVE) & 1) {
|
||||
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
|
||||
for (j = 0; j < rdev->usec_timeout; j++) {
|
||||
if (!(RREG32(CP_HQD_ACTIVE) & 1))
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
|
||||
WREG32(CP_HQD_PQ_RPTR, 0);
|
||||
WREG32(CP_HQD_PQ_WPTR, 0);
|
||||
}
|
||||
cik_srbm_select(rdev, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_cp_compute_enable - enable/disable the compute CP MEs
|
||||
*
|
||||
@@ -4161,6 +4186,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
|
||||
if (enable)
|
||||
WREG32(CP_MEC_CNTL, 0);
|
||||
else {
|
||||
/*
|
||||
* To make hibernation reliable we need to clear compute ring
|
||||
* configuration before halting the compute ring.
|
||||
*/
|
||||
mutex_lock(&rdev->srbm_mutex);
|
||||
cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
|
||||
cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
|
||||
mutex_unlock(&rdev->srbm_mutex);
|
||||
|
||||
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
|
||||
|
||||
@@ -266,6 +266,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
|
||||
}
|
||||
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
|
||||
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
|
||||
|
||||
/* FIXME use something else than big hammer but after few days can not
|
||||
* seem to find good combination so reset SDMA blocks as it seems we
|
||||
* do not shut them down properly. This fix hibernation and does not
|
||||
* affect suspend to ram.
|
||||
*/
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
||||
(void)RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
(void)RREG32(SRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -251,8 +251,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
if (rdev->gart.ptr) {
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -294,8 +296,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
if (rdev->gart.ptr) {
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,10 +79,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
|
||||
mutex_lock(&mode_config->mutex);
|
||||
if (mode_config->num_connector) {
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head)
|
||||
radeon_connector_hotplug(connector);
|
||||
}
|
||||
mutex_unlock(&mode_config->mutex);
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
@@ -2914,6 +2914,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
||||
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
||||
@@ -31,14 +31,11 @@
|
||||
/* output format */
|
||||
#define MCP3021_SAR_SHIFT 2
|
||||
#define MCP3021_SAR_MASK 0x3ff
|
||||
|
||||
#define MCP3021_OUTPUT_RES 10 /* 10-bit resolution */
|
||||
#define MCP3021_OUTPUT_SCALE 4
|
||||
|
||||
#define MCP3221_SAR_SHIFT 0
|
||||
#define MCP3221_SAR_MASK 0xfff
|
||||
#define MCP3221_OUTPUT_RES 12 /* 12-bit resolution */
|
||||
#define MCP3221_OUTPUT_SCALE 1
|
||||
|
||||
enum chips {
|
||||
mcp3021,
|
||||
@@ -54,7 +51,6 @@ struct mcp3021_data {
|
||||
u16 sar_shift;
|
||||
u16 sar_mask;
|
||||
u8 output_res;
|
||||
u8 output_scale;
|
||||
};
|
||||
|
||||
static int mcp3021_read16(struct i2c_client *client)
|
||||
@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
|
||||
|
||||
static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
|
||||
{
|
||||
if (val == 0)
|
||||
return 0;
|
||||
|
||||
val = val * data->output_scale - data->output_scale / 2;
|
||||
|
||||
return val * DIV_ROUND_CLOSEST(data->vdd,
|
||||
(1 << data->output_res) * data->output_scale);
|
||||
return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
|
||||
}
|
||||
|
||||
static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
|
||||
@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
|
||||
data->sar_shift = MCP3021_SAR_SHIFT;
|
||||
data->sar_mask = MCP3021_SAR_MASK;
|
||||
data->output_res = MCP3021_OUTPUT_RES;
|
||||
data->output_scale = MCP3021_OUTPUT_SCALE;
|
||||
break;
|
||||
|
||||
case mcp3221:
|
||||
data->sar_shift = MCP3221_SAR_SHIFT;
|
||||
data->sar_mask = MCP3221_SAR_MASK;
|
||||
data->output_res = MCP3221_OUTPUT_RES;
|
||||
data->output_scale = MCP3221_OUTPUT_SCALE;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -62,6 +62,9 @@
|
||||
#define AT91_TWI_UNRE 0x0080 /* Underrun Error */
|
||||
#define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
|
||||
|
||||
#define AT91_TWI_INT_MASK \
|
||||
(AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
|
||||
|
||||
#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
|
||||
#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
|
||||
#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
|
||||
@@ -117,13 +120,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
|
||||
|
||||
static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
|
||||
{
|
||||
at91_twi_write(dev, AT91_TWI_IDR,
|
||||
AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
|
||||
at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
|
||||
}
|
||||
|
||||
static void at91_twi_irq_save(struct at91_twi_dev *dev)
|
||||
{
|
||||
dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
|
||||
dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
|
||||
at91_disable_twi_interrupts(dev);
|
||||
}
|
||||
|
||||
@@ -213,6 +215,14 @@ static void at91_twi_write_data_dma_callback(void *data)
|
||||
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
|
||||
dev->buf_len, DMA_TO_DEVICE);
|
||||
|
||||
/*
|
||||
* When this callback is called, THR/TX FIFO is likely not to be empty
|
||||
* yet. So we have to wait for TXCOMP or NACK bits to be set into the
|
||||
* Status Register to be sure that the STOP bit has been sent and the
|
||||
* transfer is completed. The NACK interrupt has already been enabled,
|
||||
* we just have to enable TXCOMP one.
|
||||
*/
|
||||
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
|
||||
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
|
||||
}
|
||||
|
||||
@@ -307,7 +317,7 @@ static void at91_twi_read_data_dma_callback(void *data)
|
||||
/* The last two bytes have to be read without using dma */
|
||||
dev->buf += dev->buf_len - 2;
|
||||
dev->buf_len = 2;
|
||||
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
|
||||
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
|
||||
}
|
||||
|
||||
static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
|
||||
@@ -368,7 +378,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
|
||||
/* catch error flags */
|
||||
dev->transfer_status |= status;
|
||||
|
||||
if (irqstatus & AT91_TWI_TXCOMP) {
|
||||
if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
|
||||
at91_disable_twi_interrupts(dev);
|
||||
complete(&dev->cmd_complete);
|
||||
}
|
||||
@@ -381,6 +391,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
|
||||
int ret;
|
||||
bool has_unre_flag = dev->pdata->has_unre_flag;
|
||||
|
||||
/*
|
||||
* WARNING: the TXCOMP bit in the Status Register is NOT a clear on
|
||||
* read flag but shows the state of the transmission at the time the
|
||||
* Status Register is read. According to the programmer datasheet,
|
||||
* TXCOMP is set when both holding register and internal shifter are
|
||||
* empty and STOP condition has been sent.
|
||||
* Consequently, we should enable NACK interrupt rather than TXCOMP to
|
||||
* detect transmission failure.
|
||||
*
|
||||
* Besides, the TXCOMP bit is already set before the i2c transaction
|
||||
* has been started. For read transactions, this bit is cleared when
|
||||
* writing the START bit into the Control Register. So the
|
||||
* corresponding interrupt can safely be enabled just after.
|
||||
* However for write transactions managed by the CPU, we first write
|
||||
* into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
|
||||
* interrupt. If TXCOMP interrupt were enabled before writing into THR,
|
||||
* the interrupt handler would be called immediately and the i2c command
|
||||
* would be reported as completed.
|
||||
* Also when a write transaction is managed by the DMA controller,
|
||||
* enabling the TXCOMP interrupt in this function may lead to a race
|
||||
* condition since we don't know whether the TXCOMP interrupt is enabled
|
||||
* before or after the DMA has started to write into THR. So the TXCOMP
|
||||
* interrupt is enabled later by at91_twi_write_data_dma_callback().
|
||||
* Immediately after in that DMA callback, we still need to send the
|
||||
* STOP condition manually writing the corresponding bit into the
|
||||
* Control Register.
|
||||
*/
|
||||
|
||||
dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
|
||||
(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
|
||||
|
||||
@@ -411,26 +449,24 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
|
||||
* seems to be the best solution.
|
||||
*/
|
||||
if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
|
||||
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
|
||||
at91_twi_read_data_dma(dev);
|
||||
/*
|
||||
* It is important to enable TXCOMP irq here because
|
||||
* doing it only when transferring the last two bytes
|
||||
* will mask NACK errors since TXCOMP is set when a
|
||||
* NACK occurs.
|
||||
*/
|
||||
} else {
|
||||
at91_twi_write(dev, AT91_TWI_IER,
|
||||
AT91_TWI_TXCOMP);
|
||||
} else
|
||||
at91_twi_write(dev, AT91_TWI_IER,
|
||||
AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
|
||||
AT91_TWI_TXCOMP |
|
||||
AT91_TWI_NACK |
|
||||
AT91_TWI_RXRDY);
|
||||
}
|
||||
} else {
|
||||
if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
|
||||
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
|
||||
at91_twi_write_data_dma(dev);
|
||||
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
|
||||
} else {
|
||||
at91_twi_write_next_byte(dev);
|
||||
at91_twi_write(dev, AT91_TWI_IER,
|
||||
AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
|
||||
AT91_TWI_TXCOMP |
|
||||
AT91_TWI_NACK |
|
||||
AT91_TWI_TXRDY);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -58,7 +58,7 @@ struct at91_adc_caps {
|
||||
u8 ts_pen_detect_sensitivity;
|
||||
|
||||
/* startup time calculate function */
|
||||
u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
|
||||
u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
|
||||
|
||||
u8 num_channels;
|
||||
struct at91_adc_reg_desc registers;
|
||||
@@ -82,7 +82,7 @@ struct at91_adc_state {
|
||||
u8 num_channels;
|
||||
void __iomem *reg_base;
|
||||
struct at91_adc_reg_desc *registers;
|
||||
u8 startup_time;
|
||||
u32 startup_time;
|
||||
u8 sample_hold_time;
|
||||
bool sleep_mode;
|
||||
struct iio_trigger **trig;
|
||||
@@ -590,7 +590,7 @@ ret:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
|
||||
static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
|
||||
{
|
||||
/*
|
||||
* Number of ticks needed to cover the startup time of the ADC
|
||||
@@ -601,7 +601,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
|
||||
return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
|
||||
}
|
||||
|
||||
static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
|
||||
static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
|
||||
{
|
||||
/*
|
||||
* For sama5d3x and at91sam9x5, the formula changes to:
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#include "ad5624r.h"
|
||||
|
||||
static int ad5624r_spi_write(struct spi_device *spi,
|
||||
u8 cmd, u8 addr, u16 val, u8 len)
|
||||
u8 cmd, u8 addr, u16 val, u8 shift)
|
||||
{
|
||||
u32 data;
|
||||
u8 msg[3];
|
||||
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
|
||||
* 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
|
||||
* for the AD5664R, AD5644R, and AD5624R, respectively.
|
||||
*/
|
||||
data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
|
||||
data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
|
||||
msg[0] = data >> 16;
|
||||
msg[1] = data >> 8;
|
||||
msg[2] = data;
|
||||
|
||||
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
|
||||
struct tmp006_data *data = iio_priv(indio_dev);
|
||||
int i;
|
||||
|
||||
if (mask != IIO_CHAN_INFO_SAMP_FREQ)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
|
||||
if ((val == tmp006_freqs[i][0]) &&
|
||||
(val2 == tmp006_freqs[i][1])) {
|
||||
|
||||
@@ -59,6 +59,8 @@ static int
|
||||
isert_rdma_accept(struct isert_conn *isert_conn);
|
||||
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
|
||||
|
||||
static void isert_release_work(struct work_struct *work);
|
||||
|
||||
static void
|
||||
isert_qp_event_callback(struct ib_event *e, void *context)
|
||||
{
|
||||
@@ -206,7 +208,7 @@ fail:
|
||||
static void
|
||||
isert_free_rx_descriptors(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
||||
struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
|
||||
struct iser_rx_desc *rx_desc;
|
||||
int i;
|
||||
|
||||
@@ -534,6 +536,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
||||
mutex_init(&isert_conn->conn_mutex);
|
||||
spin_lock_init(&isert_conn->conn_lock);
|
||||
INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
|
||||
INIT_WORK(&isert_conn->release_work, isert_release_work);
|
||||
|
||||
isert_conn->conn_cm_id = cma_id;
|
||||
isert_conn->responder_resources = event->param.conn.responder_resources;
|
||||
@@ -647,9 +650,9 @@ out:
|
||||
static void
|
||||
isert_connect_release(struct isert_conn *isert_conn)
|
||||
{
|
||||
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
||||
struct isert_device *device = isert_conn->conn_device;
|
||||
int cq_index;
|
||||
struct ib_device *ib_dev = device->ib_device;
|
||||
|
||||
pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
@@ -657,7 +660,8 @@ isert_connect_release(struct isert_conn *isert_conn)
|
||||
isert_conn_free_fastreg_pool(isert_conn);
|
||||
|
||||
isert_free_rx_descriptors(isert_conn);
|
||||
rdma_destroy_id(isert_conn->conn_cm_id);
|
||||
if (isert_conn->conn_cm_id)
|
||||
rdma_destroy_id(isert_conn->conn_cm_id);
|
||||
|
||||
if (isert_conn->conn_qp) {
|
||||
cq_index = ((struct isert_cq_desc *)
|
||||
@@ -799,6 +803,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
|
||||
{
|
||||
struct isert_np *isert_np = cma_id->context;
|
||||
struct isert_conn *isert_conn;
|
||||
bool terminating = false;
|
||||
|
||||
if (isert_np->np_cm_id == cma_id)
|
||||
return isert_np_cma_handler(cma_id->context, event);
|
||||
@@ -806,21 +811,37 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
|
||||
isert_conn = cma_id->qp->qp_context;
|
||||
|
||||
mutex_lock(&isert_conn->conn_mutex);
|
||||
terminating = (isert_conn->state == ISER_CONN_TERMINATING);
|
||||
isert_conn_terminate(isert_conn);
|
||||
mutex_unlock(&isert_conn->conn_mutex);
|
||||
|
||||
pr_info("conn %p completing conn_wait\n", isert_conn);
|
||||
complete(&isert_conn->conn_wait);
|
||||
|
||||
if (terminating)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&isert_np->np_accept_mutex);
|
||||
if (!list_empty(&isert_conn->conn_accept_node)) {
|
||||
list_del_init(&isert_conn->conn_accept_node);
|
||||
isert_put_conn(isert_conn);
|
||||
queue_work(isert_release_wq, &isert_conn->release_work);
|
||||
}
|
||||
mutex_unlock(&isert_np->np_accept_mutex);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
isert_connect_error(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
||||
|
||||
isert_conn->conn_cm_id = NULL;
|
||||
isert_put_conn(isert_conn);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -850,7 +871,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
||||
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
|
||||
case RDMA_CM_EVENT_CONNECT_ERROR:
|
||||
isert_connect_error(cma_id);
|
||||
ret = isert_connect_error(cma_id);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unhandled RDMA CMA event: %d\n", event->event);
|
||||
@@ -2944,7 +2965,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
|
||||
|
||||
wait_for_completion(&isert_conn->conn_wait_comp_err);
|
||||
|
||||
INIT_WORK(&isert_conn->release_work, isert_release_work);
|
||||
queue_work(isert_release_wq, &isert_conn->release_work);
|
||||
}
|
||||
|
||||
|
||||
@@ -625,6 +625,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* TSC-25 data sheet specifies a delay after the RESET command */
|
||||
msleep(150);
|
||||
|
||||
/* set coordinate output rate */
|
||||
buf[0] = buf[1] = 0xFF;
|
||||
ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
|
||||
|
||||
@@ -1922,9 +1922,15 @@ static void free_pt_##LVL (unsigned long __pt) \
|
||||
pt = (u64 *)__pt; \
|
||||
\
|
||||
for (i = 0; i < 512; ++i) { \
|
||||
/* PTE present? */ \
|
||||
if (!IOMMU_PTE_PRESENT(pt[i])) \
|
||||
continue; \
|
||||
\
|
||||
/* Large PTE? */ \
|
||||
if (PM_PTE_LEVEL(pt[i]) == 0 || \
|
||||
PM_PTE_LEVEL(pt[i]) == 7) \
|
||||
continue; \
|
||||
\
|
||||
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
|
||||
FN(p); \
|
||||
} \
|
||||
|
||||
@@ -178,6 +178,7 @@ void led_classdev_resume(struct led_classdev *led_cdev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(led_classdev_resume);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int led_suspend(struct device *dev)
|
||||
{
|
||||
struct led_classdev *led_cdev = dev_get_drvdata(dev);
|
||||
@@ -197,11 +198,9 @@ static int led_resume(struct device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops leds_class_dev_pm_ops = {
|
||||
.suspend = led_suspend,
|
||||
.resume = led_resume,
|
||||
};
|
||||
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
|
||||
|
||||
/**
|
||||
* led_classdev_register - register a new object of led_classdev class.
|
||||
|
||||
@@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md,
|
||||
return -EINVAL;
|
||||
|
||||
if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
|
||||
if (!divisor)
|
||||
return -EINVAL;
|
||||
step = end - start;
|
||||
if (do_div(step, divisor))
|
||||
step++;
|
||||
|
||||
@@ -6232,7 +6232,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
||||
mddev->ctime != info->ctime ||
|
||||
mddev->level != info->level ||
|
||||
/* mddev->layout != info->layout || */
|
||||
!mddev->persistent != info->not_persistent||
|
||||
mddev->persistent != !info->not_persistent ||
|
||||
mddev->chunk_sectors != info->chunk_size >> 9 ||
|
||||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
|
||||
((state^info->state) & 0xfffffe00)
|
||||
|
||||
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
|
||||
if (s < 0 && nr_center < -s) {
|
||||
/* not enough in central node */
|
||||
shift(left, center, nr_center);
|
||||
s = nr_center - target;
|
||||
shift(left, center, -nr_center);
|
||||
s += nr_center;
|
||||
shift(left, right, s);
|
||||
nr_right += s;
|
||||
} else
|
||||
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
if (s > 0 && nr_center < s) {
|
||||
/* not enough in central node */
|
||||
shift(center, right, nr_center);
|
||||
s = target - nr_center;
|
||||
s -= nr_center;
|
||||
shift(left, right, s);
|
||||
nr_left -= s;
|
||||
} else
|
||||
|
||||
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
|
||||
int r;
|
||||
struct del_stack *s;
|
||||
|
||||
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
||||
s = kmalloc(sizeof(*s), GFP_NOIO);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
s->info = info;
|
||||
|
||||
@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
|
||||
smm->recursion_count++;
|
||||
}
|
||||
|
||||
static int apply_bops(struct sm_metadata *smm)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
while (!brb_empty(&smm->uncommitted)) {
|
||||
struct block_op bop;
|
||||
|
||||
r = brb_pop(&smm->uncommitted, &bop);
|
||||
if (r) {
|
||||
DMERR("bug in bop ring buffer");
|
||||
break;
|
||||
}
|
||||
|
||||
r = commit_bop(smm, &bop);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int out(struct sm_metadata *smm)
|
||||
{
|
||||
int r = 0;
|
||||
@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (smm->recursion_count == 1) {
|
||||
while (!brb_empty(&smm->uncommitted)) {
|
||||
struct block_op bop;
|
||||
|
||||
r = brb_pop(&smm->uncommitted, &bop);
|
||||
if (r) {
|
||||
DMERR("bug in bop ring buffer");
|
||||
break;
|
||||
}
|
||||
|
||||
r = commit_bop(smm, &bop);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (smm->recursion_count == 1)
|
||||
apply_bops(smm);
|
||||
|
||||
smm->recursion_count--;
|
||||
|
||||
@@ -702,6 +710,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
|
||||
}
|
||||
old_len = smm->begin;
|
||||
|
||||
r = apply_bops(smm);
|
||||
if (r) {
|
||||
DMERR("%s: apply_bops failed", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = sm_ll_commit(&smm->ll);
|
||||
if (r)
|
||||
goto out;
|
||||
@@ -771,6 +785,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = apply_bops(smm);
|
||||
if (r) {
|
||||
DMERR("%s: apply_bops failed", __func__);
|
||||
return r;
|
||||
}
|
||||
|
||||
return sm_metadata_commit(sm);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user