mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge branch 'linux-linaro-lsk-v3.14' into linux-linaro-lsk-v3.14-android
This commit is contained in:
@@ -4,11 +4,13 @@ Specifying interrupt information for devices
|
||||
1) Interrupt client nodes
|
||||
-------------------------
|
||||
|
||||
Nodes that describe devices which generate interrupts must contain an either an
|
||||
"interrupts" property or an "interrupts-extended" property. These properties
|
||||
contain a list of interrupt specifiers, one per output interrupt. The format of
|
||||
the interrupt specifier is determined by the interrupt controller to which the
|
||||
interrupts are routed; see section 2 below for details.
|
||||
Nodes that describe devices which generate interrupts must contain an
|
||||
"interrupts" property, an "interrupts-extended" property, or both. If both are
|
||||
present, the latter should take precedence; the former may be provided simply
|
||||
for compatibility with software that does not recognize the latter. These
|
||||
properties contain a list of interrupt specifiers, one per output interrupt. The
|
||||
format of the interrupt specifier is determined by the interrupt controller to
|
||||
which the interrupts are routed; see section 2 below for details.
|
||||
|
||||
Example:
|
||||
interrupt-parent = <&intc1>;
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 19
|
||||
SUBLEVEL = 20
|
||||
EXTRAVERSION =
|
||||
NAME = Remembering Coco
|
||||
|
||||
|
||||
@@ -50,13 +50,13 @@
|
||||
|
||||
mcspi1_pins: pinmux_mcspi1_pins {
|
||||
pinctrl-single,pins = <
|
||||
0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */
|
||||
0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */
|
||||
0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */
|
||||
0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */
|
||||
0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */
|
||||
0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */
|
||||
0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */
|
||||
0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */
|
||||
0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */
|
||||
0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */
|
||||
0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */
|
||||
0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs1 */
|
||||
0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */
|
||||
0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */
|
||||
>;
|
||||
};
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio2: gpio@48055000 {
|
||||
@@ -189,7 +189,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio3: gpio@48057000 {
|
||||
@@ -200,7 +200,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio4: gpio@48059000 {
|
||||
@@ -211,7 +211,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio5: gpio@4805b000 {
|
||||
@@ -222,7 +222,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio6: gpio@4805d000 {
|
||||
@@ -233,7 +233,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio7: gpio@48051000 {
|
||||
@@ -244,7 +244,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio8: gpio@48053000 {
|
||||
@@ -255,7 +255,7 @@
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
uart1: serial@4806a000 {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
#ifndef __ASMARM_TLS_H
|
||||
#define __ASMARM_TLS_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#include <asm/asm-offsets.h>
|
||||
.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
|
||||
@@ -50,6 +53,49 @@
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline void set_tls(unsigned long val)
|
||||
{
|
||||
struct thread_info *thread;
|
||||
|
||||
thread = current_thread_info();
|
||||
|
||||
thread->tp_value[0] = val;
|
||||
|
||||
/*
|
||||
* This code runs with preemption enabled and therefore must
|
||||
* be reentrant with respect to switch_tls.
|
||||
*
|
||||
* We need to ensure ordering between the shadow state and the
|
||||
* hardware state, so that we don't corrupt the hardware state
|
||||
* with a stale shadow state during context switch.
|
||||
*
|
||||
* If we're preempted here, switch_tls will load TPIDRURO from
|
||||
* thread_info upon resuming execution and the following mcr
|
||||
* is merely redundant.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
if (!tls_emu) {
|
||||
if (has_tls_reg) {
|
||||
asm("mcr p15, 0, %0, c13, c0, 3"
|
||||
: : "r" (val));
|
||||
} else {
|
||||
#ifdef CONFIG_KUSER_HELPERS
|
||||
/*
|
||||
* User space must never try to access this
|
||||
* directly. Expect your app to break
|
||||
* eventually if you do so. The user helper
|
||||
* at 0xffff0fe0 must be used instead. (see
|
||||
* entry-armv.S for details)
|
||||
*/
|
||||
*((unsigned int *)0xffff0ff0) = val;
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long get_tpuser(void)
|
||||
{
|
||||
unsigned long reg = 0;
|
||||
@@ -59,5 +105,23 @@ static inline unsigned long get_tpuser(void)
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static inline void set_tpuser(unsigned long val)
|
||||
{
|
||||
/* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
|
||||
* we need not update thread_info.
|
||||
*/
|
||||
if (has_tls_reg && !tls_emu) {
|
||||
asm("mcr p15, 0, %0, c13, c0, 2"
|
||||
: : "r" (val));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void flush_tls(void)
|
||||
{
|
||||
set_tls(0);
|
||||
set_tpuser(0);
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* __ASMARM_TLS_H */
|
||||
|
||||
@@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||
c = irq_data_get_irq_chip(d);
|
||||
if (!c->irq_set_affinity)
|
||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
||||
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
||||
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
||||
cpumask_copy(d->affinity, affinity);
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -461,6 +461,8 @@ void flush_thread(void)
|
||||
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
|
||||
memset(&thread->fpstate, 0, sizeof(union fp_state));
|
||||
|
||||
flush_tls();
|
||||
|
||||
thread_notify(THREAD_NOTIFY_FLUSH, thread);
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
thread->thumbee_state = 0;
|
||||
teehbr_write(0);
|
||||
break;
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
current_thread_info()->thumbee_state = teehbr_read();
|
||||
|
||||
@@ -578,7 +578,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
|
||||
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
|
||||
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
||||
{
|
||||
struct thread_info *thread = current_thread_info();
|
||||
siginfo_t info;
|
||||
|
||||
if ((no >> 16) != (__ARM_NR_BASE>> 16))
|
||||
@@ -629,21 +628,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
||||
return regs->ARM_r0;
|
||||
|
||||
case NR(set_tls):
|
||||
thread->tp_value[0] = regs->ARM_r0;
|
||||
if (tls_emu)
|
||||
return 0;
|
||||
if (has_tls_reg) {
|
||||
asm ("mcr p15, 0, %0, c13, c0, 3"
|
||||
: : "r" (regs->ARM_r0));
|
||||
} else {
|
||||
/*
|
||||
* User space must never try to access this directly.
|
||||
* Expect your app to break eventually if you do so.
|
||||
* The user helper at 0xffff0fe0 must be used instead.
|
||||
* (see entry-armv.S for details)
|
||||
*/
|
||||
*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
|
||||
}
|
||||
set_tls(regs->ARM_r0);
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
|
||||
|
||||
@@ -89,6 +89,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
else
|
||||
kvm_vcpu_block(vcpu);
|
||||
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -98,6 +98,10 @@ __do_hyp_init:
|
||||
mrc p15, 0, r0, c10, c2, 1
|
||||
mcr p15, 4, r0, c10, c2, 1
|
||||
|
||||
@ Invalidate the stale TLBs from Bootloader
|
||||
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
|
||||
dsb ish
|
||||
|
||||
@ Set the HSCTLR to:
|
||||
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
|
||||
@ - Endianness: Kernel config
|
||||
|
||||
@@ -3349,6 +3349,9 @@ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois)
|
||||
if (!ois)
|
||||
return 0;
|
||||
|
||||
if (ois[0] == NULL) /* Empty list */
|
||||
return 0;
|
||||
|
||||
if (!linkspace) {
|
||||
if (_alloc_linkspace(ois)) {
|
||||
pr_err("omap_hwmod: could not allocate link space\n");
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include "i2c.h"
|
||||
#include "mmc.h"
|
||||
#include "wd_timer.h"
|
||||
#include "soc.h"
|
||||
|
||||
/* Base offset for all DRA7XX interrupts external to MPUSS */
|
||||
#define DRA7XX_IRQ_GIC_START 32
|
||||
@@ -2707,7 +2708,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&dra7xx_l4_per3__usb_otg_ss1,
|
||||
&dra7xx_l4_per3__usb_otg_ss2,
|
||||
&dra7xx_l4_per3__usb_otg_ss3,
|
||||
&dra7xx_l4_per3__usb_otg_ss4,
|
||||
&dra7xx_l3_main_1__vcp1,
|
||||
&dra7xx_l4_per2__vcp1,
|
||||
&dra7xx_l3_main_1__vcp2,
|
||||
@@ -2716,8 +2716,26 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = {
|
||||
&dra7xx_l4_per3__usb_otg_ss4,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
int __init dra7xx_hwmod_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
omap_hwmod_init();
|
||||
return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
|
||||
ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
|
||||
|
||||
if (!ret && soc_is_dra74x())
|
||||
return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs);
|
||||
else if (!ret && soc_is_dra72x())
|
||||
return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437)
|
||||
#define soc_is_omap54xx() 0
|
||||
#define soc_is_omap543x() 0
|
||||
#define soc_is_dra7xx() 0
|
||||
#define soc_is_dra74x() 0
|
||||
#define soc_is_dra72x() 0
|
||||
|
||||
#if defined(MULTI_OMAP2)
|
||||
# if defined(CONFIG_ARCH_OMAP2)
|
||||
@@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430)
|
||||
|
||||
#if defined(CONFIG_SOC_DRA7XX)
|
||||
#undef soc_is_dra7xx
|
||||
#undef soc_is_dra74x
|
||||
#undef soc_is_dra72x
|
||||
#define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7"))
|
||||
#define soc_is_dra74x() (of_machine_is_compatible("ti,dra74"))
|
||||
#define soc_is_dra72x() (of_machine_is_compatible("ti,dra72"))
|
||||
#endif
|
||||
|
||||
/* Various silicon revisions for omap2 */
|
||||
|
||||
@@ -17,12 +17,6 @@
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v6_early_abort)
|
||||
#ifdef CONFIG_CPU_V6
|
||||
sub r1, sp, #4 @ Get unused stack location
|
||||
strex r0, r1, [r1] @ Clear the exclusive monitor
|
||||
#elif defined(CONFIG_CPU_32v6K)
|
||||
clrex
|
||||
#endif
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
/*
|
||||
|
||||
@@ -13,12 +13,6 @@
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v7_early_abort)
|
||||
/*
|
||||
* The effect of data aborts on on the exclusive access monitor are
|
||||
* UNPREDICTABLE. Do a CLREX to clear the state
|
||||
*/
|
||||
clrex
|
||||
|
||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
* This code is not portable to processors with late data abort handling.
|
||||
*/
|
||||
#define CODING_BITS(i) (i & 0x0e000000)
|
||||
#define COND_BITS(i) (i & 0xf0000000)
|
||||
|
||||
#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
|
||||
#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
|
||||
@@ -817,6 +818,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case 0x04000000: /* ldr or str immediate */
|
||||
if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
|
||||
goto bad;
|
||||
offset.un = OFFSET_BITS(instr);
|
||||
handler = do_alignment_ldrstr;
|
||||
break;
|
||||
|
||||
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg,
|
||||
*/
|
||||
#define ARM_MAX_BRP 16
|
||||
#define ARM_MAX_WRP 16
|
||||
#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP)
|
||||
|
||||
/* Virtual debug register bases. */
|
||||
#define AARCH64_DBG_REG_BVR 0
|
||||
|
||||
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
|
||||
return false;
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
affinity = cpu_online_mask;
|
||||
ret = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* when using forced irq_set_affinity we must ensure that the cpu
|
||||
* being offlined is not present in the affinity mask, it may be
|
||||
* selected as the target CPU otherwise
|
||||
*/
|
||||
affinity = cpu_online_mask;
|
||||
c = irq_data_get_irq_chip(d);
|
||||
if (!c->irq_set_affinity)
|
||||
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
|
||||
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
|
||||
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
|
||||
cpumask_copy(d->affinity, affinity);
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -188,9 +188,27 @@ void exit_thread(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void tls_thread_flush(void)
|
||||
{
|
||||
asm ("msr tpidr_el0, xzr");
|
||||
|
||||
if (is_compat_task()) {
|
||||
current->thread.tp_value = 0;
|
||||
|
||||
/*
|
||||
* We need to ensure ordering between the shadow state and the
|
||||
* hardware state, so that we don't corrupt the hardware state
|
||||
* with a stale shadow state during context switch.
|
||||
*/
|
||||
barrier();
|
||||
asm ("msr tpidrro_el0, xzr");
|
||||
}
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
fpsimd_flush_thread();
|
||||
tls_thread_flush();
|
||||
flush_ptrace_hw_breakpoint(current);
|
||||
}
|
||||
|
||||
|
||||
@@ -85,7 +85,8 @@ static void ptrace_hbptriggered(struct perf_event *bp,
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) {
|
||||
|
||||
for (i = 0; i < ARM_MAX_WRP; ++i) {
|
||||
if (current->thread.debug.hbp_watch[i] == bp) {
|
||||
info.si_errno = -((i << 1) + 1);
|
||||
break;
|
||||
|
||||
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
|
||||
|
||||
case __ARM_NR_compat_set_tls:
|
||||
current->thread.tp_value = regs->regs[0];
|
||||
|
||||
/*
|
||||
* Protect against register corruption from context switch.
|
||||
* See comment in tls_thread_flush.
|
||||
*/
|
||||
barrier();
|
||||
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -62,6 +62,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
else
|
||||
kvm_vcpu_block(vcpu);
|
||||
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -74,6 +74,10 @@ __do_hyp_init:
|
||||
msr mair_el2, x4
|
||||
isb
|
||||
|
||||
/* Invalidate the stale TLBs from Bootloader */
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
|
||||
mrs x4, sctlr_el2
|
||||
and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
|
||||
ldr x5, =SCTLR_EL2_FLAGS
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
|
||||
|
||||
@@ -123,7 +123,11 @@ NESTED(_mcount, PT_SIZE, ra)
|
||||
nop
|
||||
#endif
|
||||
b ftrace_stub
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp, sp, 8
|
||||
#else
|
||||
nop
|
||||
#endif
|
||||
|
||||
static_trace:
|
||||
MCOUNT_SAVE_REGS
|
||||
@@ -133,6 +137,9 @@ static_trace:
|
||||
move a1, AT /* arg2: parent's return address */
|
||||
|
||||
MCOUNT_RESTORE_REGS
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp, sp, 8
|
||||
#endif
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
RETURN_BACK
|
||||
@@ -177,6 +184,11 @@ NESTED(ftrace_graph_caller, PT_SIZE, ra)
|
||||
jal prepare_ftrace_return
|
||||
nop
|
||||
MCOUNT_RESTORE_REGS
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp, sp, 8
|
||||
#endif
|
||||
#endif
|
||||
RETURN_BACK
|
||||
END(ftrace_graph_caller)
|
||||
|
||||
|
||||
@@ -48,7 +48,12 @@ cflags-y := -pipe
|
||||
|
||||
# These flags should be implied by an hppa-linux configuration, but they
|
||||
# are not in gcc 3.2.
|
||||
cflags-y += -mno-space-regs -mfast-indirect-calls
|
||||
cflags-y += -mno-space-regs
|
||||
|
||||
# -mfast-indirect-calls is only relevant for 32-bit kernels.
|
||||
ifndef CONFIG_64BIT
|
||||
cflags-y += -mfast-indirect-calls
|
||||
endif
|
||||
|
||||
# Currently we save and restore fpregs on all kernel entry/interruption paths.
|
||||
# If that gets optimized, we might need to disable the use of fpregs in the
|
||||
|
||||
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page)
|
||||
/* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
|
||||
/* Light-weight-syscall entry must always be located at 0xb0 */
|
||||
/* WARNING: Keep this number updated with table size changes */
|
||||
#define __NR_lws_entries (2)
|
||||
#define __NR_lws_entries (3)
|
||||
|
||||
lws_entry:
|
||||
gate lws_start, %r0 /* increase privilege */
|
||||
@@ -502,7 +502,7 @@ lws_exit:
|
||||
|
||||
|
||||
/***************************************************
|
||||
Implementing CAS as an atomic operation:
|
||||
Implementing 32bit CAS as an atomic operation:
|
||||
|
||||
%r26 - Address to examine
|
||||
%r25 - Old value to check (old)
|
||||
@@ -659,6 +659,230 @@ cas_action:
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
|
||||
|
||||
|
||||
/***************************************************
|
||||
New CAS implementation which uses pointers and variable size
|
||||
information. The value pointed by old and new MUST NOT change
|
||||
while performing CAS. The lock only protect the value at %r26.
|
||||
|
||||
%r26 - Address to examine
|
||||
%r25 - Pointer to the value to check (old)
|
||||
%r24 - Pointer to the value to set (new)
|
||||
%r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit)
|
||||
%r28 - Return non-zero on failure
|
||||
%r21 - Kernel error code
|
||||
|
||||
%r21 has the following meanings:
|
||||
|
||||
EAGAIN - CAS is busy, ldcw failed, try again.
|
||||
EFAULT - Read or write failed.
|
||||
|
||||
Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only)
|
||||
|
||||
****************************************************/
|
||||
|
||||
/* ELF32 Process entry path */
|
||||
lws_compare_and_swap_2:
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Clip the input registers */
|
||||
depdi 0, 31, 32, %r26
|
||||
depdi 0, 31, 32, %r25
|
||||
depdi 0, 31, 32, %r24
|
||||
depdi 0, 31, 32, %r23
|
||||
#endif
|
||||
|
||||
/* Check the validity of the size pointer */
|
||||
subi,>>= 4, %r23, %r0
|
||||
b,n lws_exit_nosys
|
||||
|
||||
/* Jump to the functions which will load the old and new values into
|
||||
registers depending on the their size */
|
||||
shlw %r23, 2, %r29
|
||||
blr %r29, %r0
|
||||
nop
|
||||
|
||||
/* 8bit load */
|
||||
4: ldb 0(%sr3,%r25), %r25
|
||||
b cas2_lock_start
|
||||
5: ldb 0(%sr3,%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 16bit load */
|
||||
6: ldh 0(%sr3,%r25), %r25
|
||||
b cas2_lock_start
|
||||
7: ldh 0(%sr3,%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 32bit load */
|
||||
8: ldw 0(%sr3,%r25), %r25
|
||||
b cas2_lock_start
|
||||
9: ldw 0(%sr3,%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 64bit load */
|
||||
#ifdef CONFIG_64BIT
|
||||
10: ldd 0(%sr3,%r25), %r25
|
||||
11: ldd 0(%sr3,%r24), %r24
|
||||
#else
|
||||
/* Load new value into r22/r23 - high/low */
|
||||
10: ldw 0(%sr3,%r25), %r22
|
||||
11: ldw 4(%sr3,%r25), %r23
|
||||
/* Load new value into fr4 for atomic store later */
|
||||
12: flddx 0(%sr3,%r24), %fr4
|
||||
#endif
|
||||
|
||||
cas2_lock_start:
|
||||
/* Load start of lock table */
|
||||
ldil L%lws_lock_start, %r20
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract four bits from r26 and hash lock (Bits 4-7) */
|
||||
extru %r26, 27, 4, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
and add to the lock table offset. */
|
||||
shlw %r20, 4, %r20
|
||||
add %r20, %r28, %r20
|
||||
|
||||
rsm PSW_SM_I, %r0 /* Disable interrupts */
|
||||
/* COW breaks can cause contention on UP systems */
|
||||
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
|
||||
cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */
|
||||
cas2_wouldblock:
|
||||
ldo 2(%r0), %r28 /* 2nd case */
|
||||
ssm PSW_SM_I, %r0
|
||||
b lws_exit /* Contended... */
|
||||
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
|
||||
|
||||
/*
|
||||
prev = *addr;
|
||||
if ( prev == old )
|
||||
*addr = new;
|
||||
return prev;
|
||||
*/
|
||||
|
||||
/* NOTES:
|
||||
This all works becuse intr_do_signal
|
||||
and schedule both check the return iasq
|
||||
and see that we are on the kernel page
|
||||
so this process is never scheduled off
|
||||
or is ever sent any signal of any sort,
|
||||
thus it is wholly atomic from usrspaces
|
||||
perspective
|
||||
*/
|
||||
cas2_action:
|
||||
/* Jump to the correct function */
|
||||
blr %r29, %r0
|
||||
/* Set %r28 as non-zero for now */
|
||||
ldo 1(%r0),%r28
|
||||
|
||||
/* 8bit CAS */
|
||||
13: ldb,ma 0(%sr3,%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
14: stb,ma %r24, 0(%sr3,%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 16bit CAS */
|
||||
15: ldh,ma 0(%sr3,%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
16: sth,ma %r24, 0(%sr3,%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 32bit CAS */
|
||||
17: ldw,ma 0(%sr3,%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
18: stw,ma %r24, 0(%sr3,%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 64bit CAS */
|
||||
#ifdef CONFIG_64BIT
|
||||
19: ldd,ma 0(%sr3,%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
20: std,ma %r24, 0(%sr3,%r26)
|
||||
copy %r0, %r28
|
||||
#else
|
||||
/* Compare first word */
|
||||
19: ldw,ma 0(%sr3,%r26), %r29
|
||||
sub,= %r29, %r22, %r0
|
||||
b,n cas2_end
|
||||
/* Compare second word */
|
||||
20: ldw,ma 4(%sr3,%r26), %r29
|
||||
sub,= %r29, %r23, %r0
|
||||
b,n cas2_end
|
||||
/* Perform the store */
|
||||
21: fstdx %fr4, 0(%sr3,%r26)
|
||||
copy %r0, %r28
|
||||
#endif
|
||||
|
||||
cas2_end:
|
||||
/* Free lock */
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
/* Enable interrupts */
|
||||
ssm PSW_SM_I, %r0
|
||||
/* Return to userspace, set no error */
|
||||
b lws_exit
|
||||
copy %r0, %r21
|
||||
|
||||
22:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
ssm PSW_SM_I, %r0
|
||||
ldo 1(%r0),%r28
|
||||
b lws_exit
|
||||
ldo -EFAULT(%r0),%r21 /* set errno */
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
/* Exception table entries, for the load and store, return EFAULT.
|
||||
Each of the entries must be relocated. */
|
||||
ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
#ifndef CONFIG_64BIT
|
||||
ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page)
|
||||
#endif
|
||||
|
||||
/* Make sure nothing else is placed on this page */
|
||||
.align PAGE_SIZE
|
||||
END(linux_gateway_page)
|
||||
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page)
|
||||
/* Light-weight-syscall table */
|
||||
/* Start of lws table. */
|
||||
ENTRY(lws_table)
|
||||
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
|
||||
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
|
||||
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */
|
||||
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */
|
||||
LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */
|
||||
END(lws_table)
|
||||
/* End of lws table */
|
||||
|
||||
|
||||
@@ -47,6 +47,12 @@
|
||||
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
|
||||
#define STACK_FRAME_MARKER 12
|
||||
|
||||
#if defined(_CALL_ELF) && _CALL_ELF == 2
|
||||
#define STACK_FRAME_MIN_SIZE 32
|
||||
#else
|
||||
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
|
||||
#endif
|
||||
|
||||
/* Size of dummy stack frame allocated when calling signal handler. */
|
||||
#define __SIGNAL_FRAMESIZE 128
|
||||
#define __SIGNAL_FRAMESIZE32 64
|
||||
@@ -60,6 +66,7 @@
|
||||
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
|
||||
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
|
||||
#define STACK_FRAME_MARKER 2
|
||||
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
|
||||
|
||||
/* Size of stack frame allocated when calling signal handler. */
|
||||
#define __SIGNAL_FRAMESIZE 64
|
||||
|
||||
@@ -61,6 +61,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_mb();
|
||||
return !arch_spin_value_unlocked(*lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -70,12 +70,16 @@ void __rw_yield(arch_rwlock_t *rw)
|
||||
|
||||
void arch_spin_unlock_wait(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_mb();
|
||||
|
||||
while (lock->slock) {
|
||||
HMT_low();
|
||||
if (SHARED_PROCESSOR)
|
||||
__spin_yield(lock);
|
||||
}
|
||||
HMT_medium();
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(arch_spin_unlock_wait);
|
||||
|
||||
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
|
||||
return 0; /* must be 16-byte aligned */
|
||||
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
|
||||
return 0;
|
||||
if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
|
||||
if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
|
||||
return 1;
|
||||
/*
|
||||
* sp could decrease when we jump off an interrupt stack
|
||||
|
||||
@@ -810,11 +810,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
retry:
|
||||
ptep = get_locked_pte(current->mm, addr, &ptl);
|
||||
if (unlikely(!ptep)) {
|
||||
up_read(&mm->mmap_sem);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (!(pte_val(*ptep) & _PAGE_INVALID) &&
|
||||
(pte_val(*ptep) & _PAGE_PROTECT)) {
|
||||
pte_unmap_unlock(*ptep, ptl);
|
||||
if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
|
||||
up_read(&mm->mmap_sem);
|
||||
return -EFAULT;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
|
||||
new = old = pgste_get_lock(ptep);
|
||||
pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
|
||||
|
||||
@@ -183,12 +183,27 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
|
||||
static bool mem_avoid_overlap(struct mem_vector *img)
|
||||
{
|
||||
int i;
|
||||
struct setup_data *ptr;
|
||||
|
||||
for (i = 0; i < MEM_AVOID_MAX; i++) {
|
||||
if (mem_overlaps(img, &mem_avoid[i]))
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Avoid all entries in the setup_data linked list. */
|
||||
ptr = (struct setup_data *)(unsigned long)real_mode->hdr.setup_data;
|
||||
while (ptr) {
|
||||
struct mem_vector avoid;
|
||||
|
||||
avoid.start = (u64)ptr;
|
||||
avoid.size = sizeof(*ptr) + ptr->len;
|
||||
|
||||
if (mem_overlaps(img, &avoid))
|
||||
return true;
|
||||
|
||||
ptr = (struct setup_data *)(unsigned long)ptr->next;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -123,14 +123,14 @@ enum fixed_addresses {
|
||||
__end_of_permanent_fixed_addresses,
|
||||
|
||||
/*
|
||||
* 256 temporary boot-time mappings, used by early_ioremap(),
|
||||
* 512 temporary boot-time mappings, used by early_ioremap(),
|
||||
* before ioremap() is functional.
|
||||
*
|
||||
* If necessary we round it up to the next 256 pages boundary so
|
||||
* If necessary we round it up to the next 512 pages boundary so
|
||||
* that we can have a single pgd entry and a single pte table:
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 64
|
||||
#define FIX_BTMAPS_SLOTS 4
|
||||
#define FIX_BTMAPS_SLOTS 8
|
||||
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
|
||||
FIX_BTMAP_END =
|
||||
(__end_of_permanent_fixed_addresses ^
|
||||
|
||||
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512];
|
||||
extern pmd_t level2_kernel_pgt[512];
|
||||
extern pmd_t level2_fixmap_pgt[512];
|
||||
extern pmd_t level2_ident_pgt[512];
|
||||
extern pte_t level1_fixmap_pgt[512];
|
||||
extern pgd_t init_level4_pgt[];
|
||||
|
||||
#define swapper_pg_dir init_level4_pgt
|
||||
|
||||
@@ -511,6 +511,7 @@ static int rapl_cpu_prepare(int cpu)
|
||||
struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
|
||||
int phys_id = topology_physical_package_id(cpu);
|
||||
u64 ms;
|
||||
u64 msr_rapl_power_unit_bits;
|
||||
|
||||
if (pmu)
|
||||
return 0;
|
||||
@@ -518,6 +519,9 @@ static int rapl_cpu_prepare(int cpu)
|
||||
if (phys_id < 0)
|
||||
return -1;
|
||||
|
||||
if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
|
||||
return -1;
|
||||
|
||||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!pmu)
|
||||
return -1;
|
||||
@@ -531,8 +535,7 @@ static int rapl_cpu_prepare(int cpu)
|
||||
*
|
||||
* we cache in local PMU instance
|
||||
*/
|
||||
rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit);
|
||||
pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
|
||||
pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
|
||||
pmu->pmu = &rapl_pmu_class;
|
||||
|
||||
/*
|
||||
@@ -649,7 +652,9 @@ static int __init rapl_pmu_init(void)
|
||||
get_online_cpus();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rapl_cpu_prepare(cpu);
|
||||
ret = rapl_cpu_prepare(cpu);
|
||||
if (ret)
|
||||
goto out;
|
||||
rapl_cpu_init(cpu);
|
||||
}
|
||||
|
||||
@@ -672,6 +677,7 @@ static int __init rapl_pmu_init(void)
|
||||
hweight32(rapl_cntr_mask),
|
||||
ktime_to_ms(pmu->timer_interval));
|
||||
|
||||
out:
|
||||
put_online_cpus();
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1287,6 +1287,9 @@ static void remove_siblinginfo(int cpu)
|
||||
|
||||
for_each_cpu(sibling, cpu_sibling_mask(cpu))
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
|
||||
for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
|
||||
cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
|
||||
cpumask_clear(cpu_llc_shared_mask(cpu));
|
||||
cpumask_clear(cpu_sibling_mask(cpu));
|
||||
cpumask_clear(cpu_core_mask(cpu));
|
||||
c->phys_proc_id = 0;
|
||||
|
||||
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
|
||||
*
|
||||
* We can construct this by grafting the Xen provided pagetable into
|
||||
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
|
||||
* level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
|
||||
* means that only the kernel has a physical mapping to start with -
|
||||
* but that's enough to get __va working. We need to fill in the rest
|
||||
* of the physical mapping once some sort of allocator has been set
|
||||
* up.
|
||||
* NOTE: for PVH, the page tables are native.
|
||||
* level2_ident_pgt, and level2_kernel_pgt. This means that only the
|
||||
* kernel has a physical mapping to start with - but that's enough to
|
||||
* get __va working. We need to fill in the rest of the physical
|
||||
* mapping once some sort of allocator has been set up. NOTE: for
|
||||
* PVH, the page tables are native.
|
||||
*/
|
||||
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
||||
{
|
||||
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
||||
/* L3_i[0] -> level2_ident_pgt */
|
||||
convert_pfn_mfn(level3_ident_pgt);
|
||||
/* L3_k[510] -> level2_kernel_pgt
|
||||
* L3_i[511] -> level2_fixmap_pgt */
|
||||
* L3_k[511] -> level2_fixmap_pgt */
|
||||
convert_pfn_mfn(level3_kernel_pgt);
|
||||
|
||||
/* L3_k[511][506] -> level1_fixmap_pgt */
|
||||
convert_pfn_mfn(level2_fixmap_pgt);
|
||||
}
|
||||
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
|
||||
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
|
||||
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
||||
addr[1] = (unsigned long)l3;
|
||||
addr[2] = (unsigned long)l2;
|
||||
/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
|
||||
* Both L4[272][0] and L4[511][511] have entries that point to the same
|
||||
* Both L4[272][0] and L4[511][510] have entries that point to the same
|
||||
* L2 (PMD) tables. Meaning that if you modify it in __va space
|
||||
* it will be also modified in the __ka space! (But if you just
|
||||
* modify the PMD table to point to other PTE's or none, then you
|
||||
* are OK - which is what cleanup_highmap does) */
|
||||
copy_page(level2_ident_pgt, l2);
|
||||
/* Graft it onto L4[511][511] */
|
||||
/* Graft it onto L4[511][510] */
|
||||
copy_page(level2_kernel_pgt, l2);
|
||||
|
||||
/* Get [511][510] and graft that in level2_fixmap_pgt */
|
||||
l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
|
||||
l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
|
||||
copy_page(level2_fixmap_pgt, l2);
|
||||
/* Note that we don't do anything with level1_fixmap_pgt which
|
||||
* we don't need. */
|
||||
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
/* Make pagetable pieces RO */
|
||||
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
|
||||
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
||||
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
|
||||
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
|
||||
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
|
||||
set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
|
||||
|
||||
/* Pin down new L4 */
|
||||
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
|
||||
|
||||
@@ -67,7 +67,12 @@
|
||||
#define VMALLOC_START 0xC0000000
|
||||
#define VMALLOC_END 0xC7FEFFFF
|
||||
#define TLBTEMP_BASE_1 0xC7FF0000
|
||||
#define TLBTEMP_BASE_2 0xC7FF8000
|
||||
#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
|
||||
#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
|
||||
#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
|
||||
#else
|
||||
#define TLBTEMP_SIZE ICACHE_WAY_SIZE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For the Xtensa architecture, the PTE layout is as follows:
|
||||
|
||||
@@ -52,7 +52,12 @@
|
||||
*/
|
||||
.macro get_fs ad, sp
|
||||
GET_CURRENT(\ad,\sp)
|
||||
#if THREAD_CURRENT_DS > 1020
|
||||
addi \ad, \ad, TASK_THREAD
|
||||
l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD
|
||||
#else
|
||||
l32i \ad, \ad, THREAD_CURRENT_DS
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
||||
@@ -28,17 +28,17 @@
|
||||
#define TCSETSW 0x5403
|
||||
#define TCSETSF 0x5404
|
||||
|
||||
#define TCGETA _IOR('t', 23, struct termio)
|
||||
#define TCSETA _IOW('t', 24, struct termio)
|
||||
#define TCSETAW _IOW('t', 25, struct termio)
|
||||
#define TCSETAF _IOW('t', 28, struct termio)
|
||||
#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
|
||||
#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
|
||||
#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
|
||||
#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */
|
||||
|
||||
#define TCSBRK _IO('t', 29)
|
||||
#define TCXONC _IO('t', 30)
|
||||
#define TCFLSH _IO('t', 31)
|
||||
|
||||
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
|
||||
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
|
||||
#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
|
||||
#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
|
||||
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
|
||||
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
|
||||
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
|
||||
@@ -88,7 +88,6 @@
|
||||
#define TIOCSETD _IOW('T', 35, int)
|
||||
#define TIOCGETD _IOR('T', 36, int)
|
||||
#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
|
||||
#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
|
||||
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
|
||||
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
|
||||
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
|
||||
@@ -114,8 +113,10 @@
|
||||
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
|
||||
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
|
||||
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
|
||||
#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
|
||||
#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
|
||||
#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */
|
||||
/* _IOR('T', 90, struct serial_multiport_struct) */
|
||||
#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */
|
||||
/* _IOW('T', 91, struct serial_multiport_struct) */
|
||||
|
||||
#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
|
||||
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
|
||||
|
||||
@@ -1001,9 +1001,8 @@ ENTRY(fast_syscall_xtensa)
|
||||
movi a7, 4 # sizeof(unsigned int)
|
||||
access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp
|
||||
|
||||
addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1
|
||||
_bgeui a6, SYS_XTENSA_COUNT - 1, .Lill
|
||||
_bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp
|
||||
_bgeui a6, SYS_XTENSA_COUNT, .Lill
|
||||
_bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp
|
||||
|
||||
/* Fall through for ATOMIC_CMP_SWP. */
|
||||
|
||||
@@ -1015,27 +1014,26 @@ TRY s32i a5, a3, 0 # different, modify value
|
||||
l32i a7, a2, PT_AREG7 # restore a7
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, 1 # and return 1
|
||||
addi a6, a6, 1 # restore a6 (really necessary?)
|
||||
rfe
|
||||
|
||||
1: l32i a7, a2, PT_AREG7 # restore a7
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, 0 # return 0 (note that we cannot set
|
||||
addi a6, a6, 1 # restore a6 (really necessary?)
|
||||
rfe
|
||||
|
||||
.Lnswp: /* Atomic set, add, and exg_add. */
|
||||
|
||||
TRY l32i a7, a3, 0 # orig
|
||||
addi a6, a6, -SYS_XTENSA_ATOMIC_SET
|
||||
add a0, a4, a7 # + arg
|
||||
moveqz a0, a4, a6 # set
|
||||
addi a6, a6, SYS_XTENSA_ATOMIC_SET
|
||||
TRY s32i a0, a3, 0 # write new value
|
||||
|
||||
mov a0, a2
|
||||
mov a2, a7
|
||||
l32i a7, a0, PT_AREG7 # restore a7
|
||||
l32i a0, a0, PT_AREG0 # restore a0
|
||||
addi a6, a6, 1 # restore a6 (really necessary?)
|
||||
rfe
|
||||
|
||||
CATCH
|
||||
@@ -1044,7 +1042,7 @@ CATCH
|
||||
movi a2, -EFAULT
|
||||
rfe
|
||||
|
||||
.Lill: l32i a7, a2, PT_AREG0 # restore a7
|
||||
.Lill: l32i a7, a2, PT_AREG7 # restore a7
|
||||
l32i a0, a2, PT_AREG0 # restore a0
|
||||
movi a2, -EINVAL
|
||||
rfe
|
||||
@@ -1565,7 +1563,7 @@ ENTRY(fast_second_level_miss)
|
||||
rsr a0, excvaddr
|
||||
bltu a0, a3, 2f
|
||||
|
||||
addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT))
|
||||
addi a1, a0, -TLBTEMP_SIZE
|
||||
bgeu a1, a3, 2f
|
||||
|
||||
/* Check if we have to restore an ITLB mapping. */
|
||||
@@ -1820,7 +1818,6 @@ ENTRY(_switch_to)
|
||||
|
||||
entry a1, 16
|
||||
|
||||
mov a10, a2 # preserve 'prev' (a2)
|
||||
mov a11, a3 # and 'next' (a3)
|
||||
|
||||
l32i a4, a2, TASK_THREAD_INFO
|
||||
@@ -1828,8 +1825,14 @@ ENTRY(_switch_to)
|
||||
|
||||
save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
||||
|
||||
s32i a0, a10, THREAD_RA # save return address
|
||||
s32i a1, a10, THREAD_SP # save stack pointer
|
||||
#if THREAD_RA > 1020 || THREAD_SP > 1020
|
||||
addi a10, a2, TASK_THREAD
|
||||
s32i a0, a10, THREAD_RA - TASK_THREAD # save return address
|
||||
s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer
|
||||
#else
|
||||
s32i a0, a2, THREAD_RA # save return address
|
||||
s32i a1, a2, THREAD_SP # save stack pointer
|
||||
#endif
|
||||
|
||||
/* Disable ints while we manipulate the stack pointer. */
|
||||
|
||||
@@ -1870,7 +1873,6 @@ ENTRY(_switch_to)
|
||||
load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
|
||||
|
||||
wsr a14, ps
|
||||
mov a2, a10 # return 'prev'
|
||||
rsync
|
||||
|
||||
retw
|
||||
|
||||
@@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
|
||||
|
||||
/* We currently don't support coherent memory outside KSEG */
|
||||
|
||||
if (ret < XCHAL_KSEG_CACHED_VADDR
|
||||
|| ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
|
||||
BUG();
|
||||
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
|
||||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
||||
|
||||
|
||||
if (ret != 0) {
|
||||
@@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent);
|
||||
void dma_free_coherent(struct device *hwdev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
|
||||
unsigned long addr = (unsigned long)vaddr +
|
||||
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
|
||||
|
||||
if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
|
||||
BUG();
|
||||
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
|
||||
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
|
||||
|
||||
free_pages(addr, get_order(size));
|
||||
}
|
||||
|
||||
@@ -1275,12 +1275,16 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||
static void
|
||||
cfq_update_group_weight(struct cfq_group *cfqg)
|
||||
{
|
||||
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
||||
|
||||
if (cfqg->new_weight) {
|
||||
cfqg->weight = cfqg->new_weight;
|
||||
cfqg->new_weight = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
cfq_update_group_leaf_weight(struct cfq_group *cfqg)
|
||||
{
|
||||
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
||||
|
||||
if (cfqg->new_leaf_weight) {
|
||||
cfqg->leaf_weight = cfqg->new_leaf_weight;
|
||||
@@ -1299,7 +1303,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||
/* add to the service tree */
|
||||
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
|
||||
|
||||
cfq_update_group_weight(cfqg);
|
||||
cfq_update_group_leaf_weight(cfqg);
|
||||
__cfq_group_service_tree_add(st, cfqg);
|
||||
|
||||
/*
|
||||
@@ -1323,6 +1327,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
|
||||
*/
|
||||
while ((parent = cfqg_parent(pos))) {
|
||||
if (propagate) {
|
||||
cfq_update_group_weight(pos);
|
||||
propagate = !parent->nr_active++;
|
||||
parent->children_weight += pos->weight;
|
||||
}
|
||||
|
||||
@@ -28,10 +28,10 @@ struct kobject *block_depr;
|
||||
/* for extended dynamic devt allocation, currently only one major is used */
|
||||
#define NR_EXT_DEVT (1 << MINORBITS)
|
||||
|
||||
/* For extended devt allocation. ext_devt_mutex prevents look up
|
||||
/* For extended devt allocation. ext_devt_lock prevents look up
|
||||
* results from going away underneath its user.
|
||||
*/
|
||||
static DEFINE_MUTEX(ext_devt_mutex);
|
||||
static DEFINE_SPINLOCK(ext_devt_lock);
|
||||
static DEFINE_IDR(ext_devt_idr);
|
||||
|
||||
static struct device_type disk_type;
|
||||
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
||||
}
|
||||
|
||||
/* allocate ext devt */
|
||||
mutex_lock(&ext_devt_mutex);
|
||||
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
|
||||
mutex_unlock(&ext_devt_mutex);
|
||||
idr_preload(GFP_KERNEL);
|
||||
|
||||
spin_lock(&ext_devt_lock);
|
||||
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
|
||||
spin_unlock(&ext_devt_lock);
|
||||
|
||||
idr_preload_end();
|
||||
if (idx < 0)
|
||||
return idx == -ENOSPC ? -EBUSY : idx;
|
||||
|
||||
@@ -441,15 +445,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
||||
*/
|
||||
void blk_free_devt(dev_t devt)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (devt == MKDEV(0, 0))
|
||||
return;
|
||||
|
||||
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
|
||||
mutex_lock(&ext_devt_mutex);
|
||||
spin_lock(&ext_devt_lock);
|
||||
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
mutex_unlock(&ext_devt_mutex);
|
||||
spin_unlock(&ext_devt_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -665,7 +667,6 @@ void del_gendisk(struct gendisk *disk)
|
||||
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
|
||||
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
|
||||
device_del(disk_to_dev(disk));
|
||||
blk_free_devt(disk_to_dev(disk)->devt);
|
||||
}
|
||||
EXPORT_SYMBOL(del_gendisk);
|
||||
|
||||
@@ -690,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
|
||||
} else {
|
||||
struct hd_struct *part;
|
||||
|
||||
mutex_lock(&ext_devt_mutex);
|
||||
spin_lock(&ext_devt_lock);
|
||||
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
if (part && get_disk(part_to_disk(part))) {
|
||||
*partno = part->partno;
|
||||
disk = part_to_disk(part);
|
||||
}
|
||||
mutex_unlock(&ext_devt_mutex);
|
||||
spin_unlock(&ext_devt_lock);
|
||||
}
|
||||
|
||||
return disk;
|
||||
@@ -1098,6 +1099,7 @@ static void disk_release(struct device *dev)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
|
||||
blk_free_devt(dev->devt);
|
||||
disk_release_events(disk);
|
||||
kfree(disk->random);
|
||||
disk_replace_part_tbl(disk, NULL);
|
||||
|
||||
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = {
|
||||
static void part_release(struct device *dev)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
blk_free_devt(dev->devt);
|
||||
free_part_stats(p);
|
||||
free_part_info(p);
|
||||
kfree(p);
|
||||
@@ -264,7 +265,6 @@ void delete_partition(struct gendisk *disk, int partno)
|
||||
rcu_assign_pointer(ptbl->last_lookup, NULL);
|
||||
kobject_put(part->holder_dir);
|
||||
device_del(part_to_dev(part));
|
||||
blk_free_devt(part_devt(part));
|
||||
|
||||
hd_struct_put(part);
|
||||
}
|
||||
|
||||
@@ -253,7 +253,7 @@ int aix_partition(struct parsed_partitions *state)
|
||||
continue;
|
||||
}
|
||||
lv_ix = be16_to_cpu(p->lv_ix) - 1;
|
||||
if (lv_ix > state->limit) {
|
||||
if (lv_ix >= state->limit) {
|
||||
cur_lv_ix = -1;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
|
||||
void *handler_context, void *region_context)
|
||||
{
|
||||
int i;
|
||||
u8 *value = (u8 *)&value64;
|
||||
u8 *value = (u8 *)value64;
|
||||
|
||||
if (address > 0xff || !value64)
|
||||
return AE_BAD_PARAMETER;
|
||||
|
||||
@@ -254,6 +254,7 @@ struct acpi_create_field_info {
|
||||
u32 field_bit_position;
|
||||
u32 field_bit_length;
|
||||
u16 resource_length;
|
||||
u16 pin_number_index;
|
||||
u8 field_flags;
|
||||
u8 attribute;
|
||||
u8 field_type;
|
||||
|
||||
@@ -263,6 +263,7 @@ struct acpi_object_region_field {
|
||||
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
|
||||
union acpi_operand_object *region_obj; /* Containing op_region object */
|
||||
u8 *resource_buffer; /* resource_template for serial regions/fields */
|
||||
u16 pin_number_index; /* Index relative to previous Connection/Template */
|
||||
};
|
||||
|
||||
struct acpi_object_bank_field {
|
||||
|
||||
@@ -360,6 +360,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
|
||||
*/
|
||||
info->resource_buffer = NULL;
|
||||
info->connection_node = NULL;
|
||||
info->pin_number_index = 0;
|
||||
|
||||
/*
|
||||
* A Connection() is either an actual resource descriptor (buffer)
|
||||
@@ -437,6 +438,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
|
||||
}
|
||||
|
||||
info->field_bit_position += info->field_bit_length;
|
||||
info->pin_number_index++; /* Index relative to previous Connection() */
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -142,6 +142,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||
union acpi_operand_object *region_obj2;
|
||||
void *region_context = NULL;
|
||||
struct acpi_connection_info *context;
|
||||
acpi_physical_address address;
|
||||
|
||||
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
|
||||
|
||||
@@ -231,25 +232,23 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||
/* We have everything we need, we can invoke the address space handler */
|
||||
|
||||
handler = handler_desc->address_space.handler;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
||||
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
||||
®ion_obj->region.handler->address_space, handler,
|
||||
ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
|
||||
region_offset),
|
||||
acpi_ut_get_region_name(region_obj->region.
|
||||
space_id)));
|
||||
address = (region_obj->region.address + region_offset);
|
||||
|
||||
/*
|
||||
* Special handling for generic_serial_bus and general_purpose_io:
|
||||
* There are three extra parameters that must be passed to the
|
||||
* handler via the context:
|
||||
* 1) Connection buffer, a resource template from Connection() op.
|
||||
* 2) Length of the above buffer.
|
||||
* 3) Actual access length from the access_as() op.
|
||||
* 1) Connection buffer, a resource template from Connection() op
|
||||
* 2) Length of the above buffer
|
||||
* 3) Actual access length from the access_as() op
|
||||
*
|
||||
* In addition, for general_purpose_io, the Address and bit_width fields
|
||||
* are defined as follows:
|
||||
* 1) Address is the pin number index of the field (bit offset from
|
||||
* the previous Connection)
|
||||
* 2) bit_width is the actual bit length of the field (number of pins)
|
||||
*/
|
||||
if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
|
||||
(region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
|
||||
if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
|
||||
context && field_obj) {
|
||||
|
||||
/* Get the Connection (resource_template) buffer */
|
||||
@@ -258,6 +257,24 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||
context->length = field_obj->field.resource_length;
|
||||
context->access_length = field_obj->field.access_length;
|
||||
}
|
||||
if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
|
||||
context && field_obj) {
|
||||
|
||||
/* Get the Connection (resource_template) buffer */
|
||||
|
||||
context->connection = field_obj->field.resource_buffer;
|
||||
context->length = field_obj->field.resource_length;
|
||||
context->access_length = field_obj->field.access_length;
|
||||
address = field_obj->field.pin_number_index;
|
||||
bit_width = field_obj->field.bit_length;
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
||||
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
||||
®ion_obj->region.handler->address_space, handler,
|
||||
ACPI_FORMAT_NATIVE_UINT(address),
|
||||
acpi_ut_get_region_name(region_obj->region.
|
||||
space_id)));
|
||||
|
||||
if (!(handler_desc->address_space.handler_flags &
|
||||
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
|
||||
@@ -271,9 +288,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||
|
||||
/* Call the handler */
|
||||
|
||||
status = handler(function,
|
||||
(region_obj->region.address + region_offset),
|
||||
bit_width, value, context,
|
||||
status = handler(function, address, bit_width, value, context,
|
||||
region_obj2->extra.region_context);
|
||||
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
||||
@@ -178,6 +178,37 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
|
||||
buffer = &buffer_desc->integer.value;
|
||||
}
|
||||
|
||||
if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
|
||||
(obj_desc->field.region_obj->region.space_id ==
|
||||
ACPI_ADR_SPACE_GPIO)) {
|
||||
/*
|
||||
* For GPIO (general_purpose_io), the Address will be the bit offset
|
||||
* from the previous Connection() operator, making it effectively a
|
||||
* pin number index. The bit_length is the length of the field, which
|
||||
* is thus the number of pins.
|
||||
*/
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
||||
"GPIO FieldRead [FROM]: Pin %u Bits %u\n",
|
||||
obj_desc->field.pin_number_index,
|
||||
obj_desc->field.bit_length));
|
||||
|
||||
/* Lock entire transaction if requested */
|
||||
|
||||
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
|
||||
|
||||
/* Perform the write */
|
||||
|
||||
status = acpi_ex_access_region(obj_desc, 0,
|
||||
(u64 *)buffer, ACPI_READ);
|
||||
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
acpi_ut_remove_reference(buffer_desc);
|
||||
} else {
|
||||
*ret_buffer_desc = buffer_desc;
|
||||
}
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
||||
"FieldRead [TO]: Obj %p, Type %X, Buf %p, ByteLen %X\n",
|
||||
obj_desc, obj_desc->common.type, buffer,
|
||||
@@ -325,6 +356,42 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
|
||||
|
||||
*result_desc = buffer_desc;
|
||||
return_ACPI_STATUS(status);
|
||||
} else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) &&
|
||||
(obj_desc->field.region_obj->region.space_id ==
|
||||
ACPI_ADR_SPACE_GPIO)) {
|
||||
/*
|
||||
* For GPIO (general_purpose_io), we will bypass the entire field
|
||||
* mechanism and handoff the bit address and bit width directly to
|
||||
* the handler. The Address will be the bit offset
|
||||
* from the previous Connection() operator, making it effectively a
|
||||
* pin number index. The bit_length is the length of the field, which
|
||||
* is thus the number of pins.
|
||||
*/
|
||||
if (source_desc->common.type != ACPI_TYPE_INTEGER) {
|
||||
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
|
||||
"GPIO FieldWrite [FROM]: (%s:%X), Val %.8X [TO]: Pin %u Bits %u\n",
|
||||
acpi_ut_get_type_name(source_desc->common.
|
||||
type),
|
||||
source_desc->common.type,
|
||||
(u32)source_desc->integer.value,
|
||||
obj_desc->field.pin_number_index,
|
||||
obj_desc->field.bit_length));
|
||||
|
||||
buffer = &source_desc->integer.value;
|
||||
|
||||
/* Lock entire transaction if requested */
|
||||
|
||||
acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
|
||||
|
||||
/* Perform the write */
|
||||
|
||||
status = acpi_ex_access_region(obj_desc, 0,
|
||||
(u64 *)buffer, ACPI_WRITE);
|
||||
acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* Get a pointer to the data to be written */
|
||||
|
||||
@@ -484,6 +484,8 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
|
||||
obj_desc->field.resource_length = info->resource_length;
|
||||
}
|
||||
|
||||
obj_desc->field.pin_number_index = info->pin_number_index;
|
||||
|
||||
/* Allow full data read from EC address space */
|
||||
|
||||
if ((obj_desc->field.region_obj->region.space_id ==
|
||||
|
||||
@@ -96,6 +96,13 @@ static void container_device_detach(struct acpi_device *adev)
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
static void container_device_online(struct acpi_device *adev)
|
||||
{
|
||||
struct device *dev = acpi_driver_data(adev);
|
||||
|
||||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
static struct acpi_scan_handler container_handler = {
|
||||
.ids = container_device_ids,
|
||||
.attach = container_device_attach,
|
||||
@@ -103,6 +110,7 @@ static struct acpi_scan_handler container_handler = {
|
||||
.hotplug = {
|
||||
.enabled = true,
|
||||
.demand_offline = true,
|
||||
.notify_online = container_device_online,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
|
||||
list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
|
||||
count = snprintf(&modalias[len], size, "%s:", id->id);
|
||||
if (count < 0)
|
||||
return EINVAL;
|
||||
return -EINVAL;
|
||||
if (count >= size)
|
||||
return -ENOMEM;
|
||||
len += count;
|
||||
@@ -2068,6 +2068,9 @@ static void acpi_bus_attach(struct acpi_device *device)
|
||||
ok:
|
||||
list_for_each_entry(child, &device->children, node)
|
||||
acpi_bus_attach(child);
|
||||
|
||||
if (device->handler && device->handler->hotplug.notify_online)
|
||||
device->handler->hotplug.notify_online(device);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -306,6 +306,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
|
||||
|
||||
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
||||
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
@@ -443,6 +451,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
|
||||
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
|
||||
.driver_data = board_ahci_yes_fbs }, /* 88se9182 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
|
||||
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
|
||||
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
|
||||
|
||||
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
|
||||
{ 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
|
||||
/* SATA Controller IDE (Coleto Creek) */
|
||||
{ 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
|
||||
/* SATA Controller IDE (9 Series) */
|
||||
{ 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
|
||||
/* SATA Controller IDE (9 Series) */
|
||||
{ 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
|
||||
/* SATA Controller IDE (9 Series) */
|
||||
{ 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
|
||||
/* SATA Controller IDE (9 Series) */
|
||||
{ 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
@@ -4227,7 +4227,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
|
||||
/*
|
||||
* Some WD SATA-I drives spin up and down erratically when the link
|
||||
|
||||
@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
|
||||
* Note: Original code is ata_bus_softreset().
|
||||
*/
|
||||
|
||||
static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
||||
static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
||||
unsigned long deadline)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
@@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
|
||||
udelay(20);
|
||||
out_be32(ioaddr->ctl_addr, ap->ctl);
|
||||
|
||||
scc_wait_after_reset(&ap->link, devmask, deadline);
|
||||
|
||||
return 0;
|
||||
return scc_wait_after_reset(&ap->link, devmask, deadline);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
|
||||
{
|
||||
struct ata_port *ap = link->ap;
|
||||
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
|
||||
unsigned int devmask = 0, err_mask;
|
||||
unsigned int devmask = 0;
|
||||
int rc;
|
||||
u8 err;
|
||||
|
||||
DPRINTK("ENTER\n");
|
||||
@@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
|
||||
|
||||
/* issue bus reset */
|
||||
DPRINTK("about to softreset, devmask=%x\n", devmask);
|
||||
err_mask = scc_bus_softreset(ap, devmask, deadline);
|
||||
if (err_mask) {
|
||||
ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
|
||||
rc = scc_bus_softreset(ap, devmask, deadline);
|
||||
if (rc) {
|
||||
ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
||||
@@ -701,7 +701,7 @@ int regcache_sync_block(struct regmap *map, void *block,
|
||||
unsigned int block_base, unsigned int start,
|
||||
unsigned int end)
|
||||
{
|
||||
if (regmap_can_raw_write(map))
|
||||
if (regmap_can_raw_write(map) && !map->use_single_rw)
|
||||
return regcache_sync_block_raw(map, block, cache_present,
|
||||
block_base, start, end);
|
||||
else
|
||||
|
||||
@@ -105,7 +105,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
|
||||
|
||||
bool regmap_volatile(struct regmap *map, unsigned int reg)
|
||||
{
|
||||
if (!regmap_readable(map, reg))
|
||||
if (!map->format.format_write && !regmap_readable(map, reg))
|
||||
return false;
|
||||
|
||||
if (map->volatile_reg)
|
||||
|
||||
@@ -1487,6 +1487,7 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
|
||||
static void clk_change_rate(struct clk *clk)
|
||||
{
|
||||
struct clk *child;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long old_rate;
|
||||
unsigned long best_parent_rate = 0;
|
||||
bool skip_set_rate = false;
|
||||
@@ -1525,7 +1526,11 @@ static void clk_change_rate(struct clk *clk)
|
||||
if (clk->notifier_count && old_rate != clk->rate)
|
||||
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
|
||||
|
||||
hlist_for_each_entry(child, &clk->children, child_node) {
|
||||
/*
|
||||
* Use safe iteration, as change_rate can actually swap parents
|
||||
* for certain clock types.
|
||||
*/
|
||||
hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) {
|
||||
/* Skip children who will be reparented to another clock */
|
||||
if (child->new_parent && child->new_parent != clk)
|
||||
continue;
|
||||
|
||||
@@ -37,6 +37,8 @@
|
||||
#define P_PLL2 2
|
||||
#define P_PLL3 3
|
||||
|
||||
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
|
||||
|
||||
static u8 mmcc_pxo_pll8_pll2_map[] = {
|
||||
[P_PXO] = 0,
|
||||
[P_PLL8] = 2,
|
||||
@@ -58,8 +60,8 @@ static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
|
||||
|
||||
static const char *mmcc_pxo_pll8_pll2_pll3[] = {
|
||||
"pxo",
|
||||
"pll2",
|
||||
"pll8_vote",
|
||||
"pll2",
|
||||
"pll3",
|
||||
};
|
||||
|
||||
@@ -709,18 +711,18 @@ static struct clk_branch csiphy2_timer_clk = {
|
||||
};
|
||||
|
||||
static struct freq_tbl clk_tbl_gfx2d[] = {
|
||||
{ 27000000, P_PXO, 1, 0 },
|
||||
{ 48000000, P_PLL8, 1, 8 },
|
||||
{ 54857000, P_PLL8, 1, 7 },
|
||||
{ 64000000, P_PLL8, 1, 6 },
|
||||
{ 76800000, P_PLL8, 1, 5 },
|
||||
{ 96000000, P_PLL8, 1, 4 },
|
||||
{ 128000000, P_PLL8, 1, 3 },
|
||||
{ 145455000, P_PLL2, 2, 11 },
|
||||
{ 160000000, P_PLL2, 1, 5 },
|
||||
{ 177778000, P_PLL2, 2, 9 },
|
||||
{ 200000000, P_PLL2, 1, 4 },
|
||||
{ 228571000, P_PLL2, 2, 7 },
|
||||
F_MN( 27000000, P_PXO, 1, 0),
|
||||
F_MN( 48000000, P_PLL8, 1, 8),
|
||||
F_MN( 54857000, P_PLL8, 1, 7),
|
||||
F_MN( 64000000, P_PLL8, 1, 6),
|
||||
F_MN( 76800000, P_PLL8, 1, 5),
|
||||
F_MN( 96000000, P_PLL8, 1, 4),
|
||||
F_MN(128000000, P_PLL8, 1, 3),
|
||||
F_MN(145455000, P_PLL2, 2, 11),
|
||||
F_MN(160000000, P_PLL2, 1, 5),
|
||||
F_MN(177778000, P_PLL2, 2, 9),
|
||||
F_MN(200000000, P_PLL2, 1, 4),
|
||||
F_MN(228571000, P_PLL2, 2, 7),
|
||||
{ }
|
||||
};
|
||||
|
||||
@@ -841,22 +843,22 @@ static struct clk_branch gfx2d1_clk = {
|
||||
};
|
||||
|
||||
static struct freq_tbl clk_tbl_gfx3d[] = {
|
||||
{ 27000000, P_PXO, 1, 0 },
|
||||
{ 48000000, P_PLL8, 1, 8 },
|
||||
{ 54857000, P_PLL8, 1, 7 },
|
||||
{ 64000000, P_PLL8, 1, 6 },
|
||||
{ 76800000, P_PLL8, 1, 5 },
|
||||
{ 96000000, P_PLL8, 1, 4 },
|
||||
{ 128000000, P_PLL8, 1, 3 },
|
||||
{ 145455000, P_PLL2, 2, 11 },
|
||||
{ 160000000, P_PLL2, 1, 5 },
|
||||
{ 177778000, P_PLL2, 2, 9 },
|
||||
{ 200000000, P_PLL2, 1, 4 },
|
||||
{ 228571000, P_PLL2, 2, 7 },
|
||||
{ 266667000, P_PLL2, 1, 3 },
|
||||
{ 300000000, P_PLL3, 1, 4 },
|
||||
{ 320000000, P_PLL2, 2, 5 },
|
||||
{ 400000000, P_PLL2, 1, 2 },
|
||||
F_MN( 27000000, P_PXO, 1, 0),
|
||||
F_MN( 48000000, P_PLL8, 1, 8),
|
||||
F_MN( 54857000, P_PLL8, 1, 7),
|
||||
F_MN( 64000000, P_PLL8, 1, 6),
|
||||
F_MN( 76800000, P_PLL8, 1, 5),
|
||||
F_MN( 96000000, P_PLL8, 1, 4),
|
||||
F_MN(128000000, P_PLL8, 1, 3),
|
||||
F_MN(145455000, P_PLL2, 2, 11),
|
||||
F_MN(160000000, P_PLL2, 1, 5),
|
||||
F_MN(177778000, P_PLL2, 2, 9),
|
||||
F_MN(200000000, P_PLL2, 1, 4),
|
||||
F_MN(228571000, P_PLL2, 2, 7),
|
||||
F_MN(266667000, P_PLL2, 1, 3),
|
||||
F_MN(300000000, P_PLL3, 1, 4),
|
||||
F_MN(320000000, P_PLL2, 2, 5),
|
||||
F_MN(400000000, P_PLL2, 1, 2),
|
||||
{ }
|
||||
};
|
||||
|
||||
@@ -896,7 +898,7 @@ static struct clk_dyn_rcg gfx3d_src = {
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.name = "gfx3d_src",
|
||||
.parent_names = mmcc_pxo_pll8_pll2_pll3,
|
||||
.num_parents = 3,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_dyn_rcg_ops,
|
||||
},
|
||||
},
|
||||
@@ -994,7 +996,7 @@ static struct clk_rcg jpegd_src = {
|
||||
.ns_reg = 0x00ac,
|
||||
.p = {
|
||||
.pre_div_shift = 12,
|
||||
.pre_div_width = 2,
|
||||
.pre_div_width = 4,
|
||||
},
|
||||
.s = {
|
||||
.src_sel_shift = 0,
|
||||
@@ -1114,7 +1116,7 @@ static struct clk_branch mdp_lut_clk = {
|
||||
.enable_reg = 0x016c,
|
||||
.enable_mask = BIT(0),
|
||||
.hw.init = &(struct clk_init_data){
|
||||
.parent_names = (const char *[]){ "mdp_clk" },
|
||||
.parent_names = (const char *[]){ "mdp_src" },
|
||||
.num_parents = 1,
|
||||
.name = "mdp_lut_clk",
|
||||
.ops = &clk_branch_ops,
|
||||
@@ -1341,15 +1343,15 @@ static struct clk_branch hdmi_app_clk = {
|
||||
};
|
||||
|
||||
static struct freq_tbl clk_tbl_vcodec[] = {
|
||||
{ 27000000, P_PXO, 1, 0 },
|
||||
{ 32000000, P_PLL8, 1, 12 },
|
||||
{ 48000000, P_PLL8, 1, 8 },
|
||||
{ 54860000, P_PLL8, 1, 7 },
|
||||
{ 96000000, P_PLL8, 1, 4 },
|
||||
{ 133330000, P_PLL2, 1, 6 },
|
||||
{ 200000000, P_PLL2, 1, 4 },
|
||||
{ 228570000, P_PLL2, 2, 7 },
|
||||
{ 266670000, P_PLL2, 1, 3 },
|
||||
F_MN( 27000000, P_PXO, 1, 0),
|
||||
F_MN( 32000000, P_PLL8, 1, 12),
|
||||
F_MN( 48000000, P_PLL8, 1, 8),
|
||||
F_MN( 54860000, P_PLL8, 1, 7),
|
||||
F_MN( 96000000, P_PLL8, 1, 4),
|
||||
F_MN(133330000, P_PLL2, 1, 6),
|
||||
F_MN(200000000, P_PLL2, 1, 4),
|
||||
F_MN(228570000, P_PLL2, 2, 7),
|
||||
F_MN(266670000, P_PLL2, 1, 3),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
@@ -1225,6 +1225,8 @@ err_get_freq:
|
||||
per_cpu(cpufreq_cpu_data, j) = NULL;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
up_write(&policy->rwsem);
|
||||
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
err_set_policy_cpu:
|
||||
|
||||
@@ -7,7 +7,6 @@ TODO for slave dma
|
||||
- imx-dma
|
||||
- imx-sdma
|
||||
- mxs-dma.c
|
||||
- dw_dmac
|
||||
- intel_mid_dma
|
||||
4. Check other subsystems for dma drivers and merge/move to dmaengine
|
||||
5. Remove dma_slave_config's dma direction.
|
||||
|
||||
@@ -279,6 +279,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
||||
channel_set_bit(dw, CH_EN, dwc->mask);
|
||||
}
|
||||
|
||||
static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
|
||||
{
|
||||
if (list_empty(&dwc->queue))
|
||||
return;
|
||||
|
||||
list_move(dwc->queue.next, &dwc->active_list);
|
||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------*/
|
||||
|
||||
static void
|
||||
@@ -335,10 +344,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
* the completed ones.
|
||||
*/
|
||||
list_splice_init(&dwc->active_list, &list);
|
||||
if (!list_empty(&dwc->queue)) {
|
||||
list_move(dwc->queue.next, &dwc->active_list);
|
||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
}
|
||||
dwc_dostart_first_queued(dwc);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
@@ -467,10 +473,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
/* Try to continue after resetting the channel... */
|
||||
dwc_chan_disable(dw, dwc);
|
||||
|
||||
if (!list_empty(&dwc->queue)) {
|
||||
list_move(dwc->queue.next, &dwc->active_list);
|
||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
}
|
||||
dwc_dostart_first_queued(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
|
||||
@@ -677,17 +680,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
* possible, perhaps even appending to those already submitted
|
||||
* for DMA. But this is hard to do in a race-free manner.
|
||||
*/
|
||||
if (list_empty(&dwc->active_list)) {
|
||||
dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
|
||||
desc->txd.cookie);
|
||||
list_add_tail(&desc->desc_node, &dwc->active_list);
|
||||
dwc_dostart(dwc, dwc_first_active(dwc));
|
||||
} else {
|
||||
dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
|
||||
desc->txd.cookie);
|
||||
|
||||
list_add_tail(&desc->desc_node, &dwc->queue);
|
||||
}
|
||||
dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
|
||||
list_add_tail(&desc->desc_node, &dwc->queue);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
@@ -1092,9 +1087,12 @@ dwc_tx_status(struct dma_chan *chan,
|
||||
static void dwc_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
if (!list_empty(&dwc->queue))
|
||||
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
if (list_empty(&dwc->active_list))
|
||||
dwc_dostart_first_queued(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
|
||||
static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
||||
|
||||
@@ -100,7 +100,7 @@ static int ast_detect_chip(struct drm_device *dev)
|
||||
}
|
||||
ast->vga2_clone = false;
|
||||
} else {
|
||||
ast->chip = 2000;
|
||||
ast->chip = AST2000;
|
||||
DRM_INFO("AST 2000 detected\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1426,10 +1426,13 @@ unlock:
|
||||
out:
|
||||
switch (ret) {
|
||||
case -EIO:
|
||||
/* If this -EIO is due to a gpu hang, give the reset code a
|
||||
* chance to clean up the mess. Otherwise return the proper
|
||||
* SIGBUS. */
|
||||
if (i915_terminally_wedged(&dev_priv->gpu_error)) {
|
||||
/*
|
||||
* We eat errors when the gpu is terminally wedged to avoid
|
||||
* userspace unduly crashing (gl has no provisions for mmaps to
|
||||
* fail). But any other -EIO isn't ours (e.g. swap in failure)
|
||||
* and so needs to be reported.
|
||||
*/
|
||||
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -839,7 +839,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
}
|
||||
|
||||
static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
||||
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_DEBUG_KMS("Falling back to manually reading VBT from "
|
||||
"VBIOS ROM for %s\n",
|
||||
|
||||
@@ -750,7 +750,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
|
||||
static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
|
||||
return 1;
|
||||
|
||||
@@ -544,7 +544,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
|
||||
static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
|
||||
return 1;
|
||||
|
||||
@@ -475,6 +475,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
|
||||
}
|
||||
}
|
||||
|
||||
/* Enforce ordering by reading HEAD register back */
|
||||
I915_READ_HEAD(ring);
|
||||
|
||||
/* Initialize the ring. This must happen _after_ we've cleared the ring
|
||||
* registers with the above sequence (the readback of the HEAD registers
|
||||
* also enforces ordering), otherwise the hw might lose the new ring
|
||||
|
||||
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder)
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
|
||||
intel_wait_for_vblank(encoder->base.dev,
|
||||
to_intel_crtc(encoder->base.crtc)->pipe);
|
||||
|
||||
I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,16 @@ void
|
||||
nouveau_vga_fini(struct nouveau_drm *drm)
|
||||
{
|
||||
struct drm_device *dev = drm->dev;
|
||||
bool runtime = false;
|
||||
|
||||
if (nouveau_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
|
||||
runtime = true;
|
||||
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
|
||||
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
|
||||
@@ -851,6 +851,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
|
||||
WREG32_SMC(CG_THERMAL_CTRL, tmp);
|
||||
#endif
|
||||
|
||||
rdev->pm.dpm.thermal.min_temp = low_temp;
|
||||
rdev->pm.dpm.thermal.max_temp = high_temp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -922,7 +925,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
|
||||
pi->vddc_leakage.count = 0;
|
||||
pi->vddci_leakage.count = 0;
|
||||
|
||||
if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
|
||||
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
|
||||
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
|
||||
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
||||
if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
|
||||
continue;
|
||||
if (vddc != 0 && vddc != virtual_voltage_id) {
|
||||
pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
|
||||
pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
|
||||
pi->vddc_leakage.count++;
|
||||
}
|
||||
}
|
||||
} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
|
||||
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
|
||||
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
||||
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
|
||||
|
||||
@@ -4392,7 +4392,7 @@ struct bonaire_mqd
|
||||
*/
|
||||
static int cik_cp_compute_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r, i, idx;
|
||||
int r, i, j, idx;
|
||||
u32 tmp;
|
||||
bool use_doorbell = true;
|
||||
u64 hqd_gpu_addr;
|
||||
@@ -4511,7 +4511,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
|
||||
mqd->queue_state.cp_hqd_pq_wptr= 0;
|
||||
if (RREG32(CP_HQD_ACTIVE) & 1) {
|
||||
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
|
||||
for (i = 0; i < rdev->usec_timeout; i++) {
|
||||
for (j = 0; j < rdev->usec_timeout; j++) {
|
||||
if (!(RREG32(CP_HQD_ACTIVE) & 1))
|
||||
break;
|
||||
udelay(1);
|
||||
@@ -5545,12 +5545,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
|
||||
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
|
||||
|
||||
if (vm == NULL)
|
||||
return;
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
|
||||
WRITE_DATA_DST_SEL(0)));
|
||||
if (vm->id < 8) {
|
||||
radeon_ring_write(ring,
|
||||
@@ -5600,7 +5601,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
radeon_ring_write(ring, 1 << vm->id);
|
||||
|
||||
/* compute doesn't have PFP */
|
||||
if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
if (usepfp) {
|
||||
/* sync PFP to ME, otherwise we might get invalid PFP reads */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
|
||||
radeon_ring_write(ring, 0x0);
|
||||
|
||||
@@ -461,13 +461,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
r = cik_sdma_load_microcode(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
|
||||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
|
||||
@@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
||||
else
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
||||
|
||||
|
||||
@@ -294,6 +294,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
|
||||
u16 *vddc, u16 *vddci,
|
||||
u16 virtual_voltage_id,
|
||||
u16 vbios_voltage_id);
|
||||
int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
|
||||
u16 virtual_voltage_id,
|
||||
u16 *voltage);
|
||||
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
|
||||
u8 voltage_type,
|
||||
u16 nominal_voltage,
|
||||
|
||||
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
|
||||
if ((dev->pdev->device == 0x9805) &&
|
||||
(dev->pdev->subsystem_vendor == 0x1734) &&
|
||||
(dev->pdev->subsystem_device == 0x11bd)) {
|
||||
if (*connector_type == DRM_MODE_CONNECTOR_VGA)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -1955,7 +1962,7 @@ static const char *thermal_controller_names[] = {
|
||||
"adm1032",
|
||||
"adm1030",
|
||||
"max6649",
|
||||
"lm64",
|
||||
"lm63", /* lm64 */
|
||||
"f75375",
|
||||
"asc7xxx",
|
||||
};
|
||||
@@ -1966,7 +1973,7 @@ static const char *pp_lib_thermal_controller_names[] = {
|
||||
"adm1032",
|
||||
"adm1030",
|
||||
"max6649",
|
||||
"lm64",
|
||||
"lm63", /* lm64 */
|
||||
"f75375",
|
||||
"RV6xx",
|
||||
"RV770",
|
||||
@@ -2273,19 +2280,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
|
||||
} else if ((controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
|
||||
(controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
|
||||
(controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
|
||||
DRM_INFO("Special thermal controller config\n");
|
||||
} else if (controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
|
||||
DRM_INFO("External GPIO thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
|
||||
} else if (controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
|
||||
DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
|
||||
} else if (controller->ucType ==
|
||||
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
|
||||
DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
|
||||
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
|
||||
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
|
||||
pp_lib_thermal_controller_names[controller->ucType],
|
||||
controller->ucI2cAddress >> 1,
|
||||
(controller->ucFanParameters &
|
||||
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
|
||||
rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
|
||||
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
|
||||
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
|
||||
if (rdev->pm.i2c_bus) {
|
||||
@@ -3228,6 +3247,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
|
||||
return 0;
|
||||
}
|
||||
|
||||
union get_voltage_info {
|
||||
struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
|
||||
struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
|
||||
};
|
||||
|
||||
int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
|
||||
u16 virtual_voltage_id,
|
||||
u16 *voltage)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
|
||||
u32 entry_id;
|
||||
u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
|
||||
union get_voltage_info args;
|
||||
|
||||
for (entry_id = 0; entry_id < count; entry_id++) {
|
||||
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
|
||||
virtual_voltage_id)
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_id >= count)
|
||||
return -EINVAL;
|
||||
|
||||
args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
|
||||
args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
|
||||
args.in.ulSCLKFreq =
|
||||
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
|
||||
u16 voltage_level, u8 voltage_type,
|
||||
u32 *gpio_value, u32 *gpio_mask)
|
||||
|
||||
@@ -1314,7 +1314,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
@@ -1334,7 +1334,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
radeon_agp_disable(rdev);
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((radeon_testing & 1)) {
|
||||
@@ -1356,6 +1356,11 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
|
||||
}
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
if (runtime)
|
||||
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
|
||||
@@ -1376,6 +1381,8 @@ void radeon_device_fini(struct radeon_device *rdev)
|
||||
radeon_bo_evict_vram(rdev);
|
||||
radeon_fini(rdev);
|
||||
vga_switcheroo_unregister_client(rdev->pdev);
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
||||
vga_client_register(rdev->pdev, NULL, NULL, NULL);
|
||||
if (rdev->rio_mem)
|
||||
pci_iounmap(rdev->pdev, rdev->rio_mem);
|
||||
@@ -1600,7 +1607,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
|
||||
radeon_save_bios_scratch_regs(rdev);
|
||||
/* block TTM */
|
||||
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
|
||||
radeon_pm_suspend(rdev);
|
||||
radeon_suspend(rdev);
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
@@ -1646,9 +1652,24 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
radeon_pm_resume(rdev);
|
||||
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
|
||||
/* do dpm late init */
|
||||
r = radeon_pm_late_init(rdev);
|
||||
if (r) {
|
||||
rdev->pm.dpm_enabled = false;
|
||||
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
|
||||
}
|
||||
} else {
|
||||
/* resume old pm late */
|
||||
radeon_pm_resume(rdev);
|
||||
}
|
||||
|
||||
drm_helper_resume_force_mode(rdev->ddev);
|
||||
|
||||
/* set the power state here in case we are a PX system or headless */
|
||||
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
|
||||
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
|
||||
if (r) {
|
||||
/* bad news, how to tell it to userspace ? */
|
||||
|
||||
@@ -254,7 +254,14 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_ACCEL_WORKING2:
|
||||
*value = rdev->accel_working;
|
||||
if (rdev->family == CHIP_HAWAII) {
|
||||
if (rdev->accel_working)
|
||||
*value = 2;
|
||||
else
|
||||
*value = 0;
|
||||
} else {
|
||||
*value = rdev->accel_working;
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_TILING_CONFIG:
|
||||
if (rdev->family >= CHIP_BONAIRE)
|
||||
|
||||
@@ -458,10 +458,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
|
||||
struct radeon_device *rdev = ddev->dev_private;
|
||||
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
|
||||
|
||||
if ((rdev->flags & RADEON_IS_PX) &&
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return snprintf(buf, PAGE_SIZE, "off\n");
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
|
||||
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
|
||||
@@ -475,11 +471,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct radeon_device *rdev = ddev->dev_private;
|
||||
|
||||
/* Can't set dpm state when the card is off */
|
||||
if ((rdev->flags & RADEON_IS_PX) &&
|
||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
if (strncmp("battery", buf, strlen("battery")) == 0)
|
||||
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
|
||||
@@ -493,7 +484,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
|
||||
goto fail;
|
||||
}
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
|
||||
/* Can't set dpm state when the card is off */
|
||||
if (!(rdev->flags & RADEON_IS_PX) ||
|
||||
(ddev->switch_power_state == DRM_SWITCH_POWER_ON))
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
|
||||
fail:
|
||||
return count;
|
||||
}
|
||||
@@ -1276,10 +1272,6 @@ int radeon_pm_init(struct radeon_device *rdev)
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
case CHIP_RV770:
|
||||
case CHIP_BARTS:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
case CHIP_CAYMAN:
|
||||
/* DPM requires the RLC, RV770+ dGPU requires SMC */
|
||||
if (!rdev->rlc_fw)
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
@@ -1303,6 +1295,10 @@ int radeon_pm_init(struct radeon_device *rdev)
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
case CHIP_BARTS:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
case CHIP_CAYMAN:
|
||||
case CHIP_ARUBA:
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
int radeon_semaphore_create(struct radeon_device *rdev,
|
||||
struct radeon_semaphore **semaphore)
|
||||
{
|
||||
uint32_t *cpu_addr;
|
||||
uint64_t *cpu_addr;
|
||||
int i, r;
|
||||
|
||||
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
|
||||
|
||||
@@ -4810,7 +4810,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
|
||||
/* write new base address */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
|
||||
WRITE_DATA_DST_SEL(0)));
|
||||
|
||||
if (vm->id < 8) {
|
||||
|
||||
@@ -1877,7 +1877,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
|
||||
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
|
||||
pi->at[i] = TRINITY_AT_DFLT;
|
||||
|
||||
pi->enable_bapm = false;
|
||||
/* There are stability issues reported on with
|
||||
* bapm enabled when switching between AC and battery
|
||||
* power. At the same time, some MSI boards hang
|
||||
* if it's not enabled and dpm is enabled. Just enable
|
||||
* it for MSI boards right now.
|
||||
*/
|
||||
if (rdev->pdev->subsystem_vendor == 0x1462)
|
||||
pi->enable_bapm = true;
|
||||
else
|
||||
pi->enable_bapm = false;
|
||||
pi->enable_nbps_policy = true;
|
||||
pi->enable_sclk_ds = true;
|
||||
pi->enable_gfx_power_gating = true;
|
||||
|
||||
@@ -122,6 +122,7 @@ static int tilcdc_unload(struct drm_device *dev)
|
||||
struct tilcdc_drm_private *priv = dev->dev_private;
|
||||
struct tilcdc_module *mod, *cur;
|
||||
|
||||
drm_fbdev_cma_fini(priv->fbdev);
|
||||
drm_kms_helper_poll_fini(dev);
|
||||
drm_mode_config_cleanup(dev);
|
||||
drm_vblank_cleanup(dev);
|
||||
@@ -628,10 +629,10 @@ static int __init tilcdc_drm_init(void)
|
||||
static void __exit tilcdc_drm_fini(void)
|
||||
{
|
||||
DBG("fini");
|
||||
tilcdc_tfp410_fini();
|
||||
tilcdc_slave_fini();
|
||||
tilcdc_panel_fini();
|
||||
platform_driver_unregister(&tilcdc_platform_driver);
|
||||
tilcdc_panel_fini();
|
||||
tilcdc_slave_fini();
|
||||
tilcdc_tfp410_fini();
|
||||
}
|
||||
|
||||
late_initcall(tilcdc_drm_init);
|
||||
|
||||
@@ -151,6 +151,7 @@ struct panel_connector {
|
||||
static void panel_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct panel_connector *panel_connector = to_panel_connector(connector);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(panel_connector);
|
||||
}
|
||||
@@ -285,10 +286,8 @@ static void panel_destroy(struct tilcdc_module *mod)
|
||||
{
|
||||
struct panel_module *panel_mod = to_panel_module(mod);
|
||||
|
||||
if (panel_mod->timings) {
|
||||
if (panel_mod->timings)
|
||||
display_timings_release(panel_mod->timings);
|
||||
kfree(panel_mod->timings);
|
||||
}
|
||||
|
||||
tilcdc_module_cleanup(mod);
|
||||
kfree(panel_mod->info);
|
||||
|
||||
@@ -166,6 +166,7 @@ struct slave_connector {
|
||||
static void slave_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct slave_connector *slave_connector = to_slave_connector(connector);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(slave_connector);
|
||||
}
|
||||
|
||||
@@ -167,6 +167,7 @@ struct tfp410_connector {
|
||||
static void tfp410_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
|
||||
drm_sysfs_connector_remove(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(tfp410_connector);
|
||||
}
|
||||
|
||||
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
|
||||
*
|
||||
* @pool: to free the pages from
|
||||
* @free_all: If set to true will free all pages in pool
|
||||
* @gfp: GFP flags.
|
||||
**/
|
||||
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
|
||||
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct page *p;
|
||||
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
|
||||
if (NUM_PAGES_TO_ALLOC < nr_free)
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
|
||||
if (!pages_to_free) {
|
||||
pr_err("Failed to allocate memory for pool free operation\n");
|
||||
return 0;
|
||||
@@ -382,32 +383,35 @@ out:
|
||||
*
|
||||
* XXX: (dchinner) Deadlock warning!
|
||||
*
|
||||
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
|
||||
* this can deadlock when called a sc->gfp_mask that is not equal to
|
||||
* GFP_KERNEL.
|
||||
* We need to pass sc->gfp_mask to ttm_page_pool_free().
|
||||
*
|
||||
* This code is crying out for a shrinker per pool....
|
||||
*/
|
||||
static unsigned long
|
||||
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
static atomic_t start_pool = ATOMIC_INIT(0);
|
||||
static DEFINE_MUTEX(lock);
|
||||
static unsigned start_pool;
|
||||
unsigned i;
|
||||
unsigned pool_offset = atomic_add_return(1, &start_pool);
|
||||
unsigned pool_offset;
|
||||
struct ttm_page_pool *pool;
|
||||
int shrink_pages = sc->nr_to_scan;
|
||||
unsigned long freed = 0;
|
||||
|
||||
pool_offset = pool_offset % NUM_POOLS;
|
||||
if (!mutex_trylock(&lock))
|
||||
return SHRINK_STOP;
|
||||
pool_offset = ++start_pool % NUM_POOLS;
|
||||
/* select start pool in round robin fashion */
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
unsigned nr_free = shrink_pages;
|
||||
if (shrink_pages == 0)
|
||||
break;
|
||||
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
|
||||
shrink_pages = ttm_page_pool_free(pool, nr_free);
|
||||
shrink_pages = ttm_page_pool_free(pool, nr_free,
|
||||
sc->gfp_mask);
|
||||
freed += nr_free - shrink_pages;
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
return freed;
|
||||
}
|
||||
|
||||
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
if (npages)
|
||||
ttm_page_pool_free(pool, npages);
|
||||
ttm_page_pool_free(pool, npages, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
|
||||
ttm_pool_mm_shrink_fini(_manager);
|
||||
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
|
||||
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
|
||||
GFP_KERNEL);
|
||||
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
|
||||
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
||||
*
|
||||
* @pool: to free the pages from
|
||||
* @nr_free: If set to true will free all pages in pool
|
||||
* @gfp: GFP flags.
|
||||
**/
|
||||
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
|
||||
static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct dma_page *dma_p, *tmp;
|
||||
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
|
||||
npages_to_free, nr_free);
|
||||
}
|
||||
#endif
|
||||
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
|
||||
|
||||
if (!pages_to_free) {
|
||||
pr_err("%s: Failed to allocate memory for pool free operation\n",
|
||||
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
|
||||
if (pool->type != type)
|
||||
continue;
|
||||
/* Takes a spinlock.. */
|
||||
ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
|
||||
ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
|
||||
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
|
||||
/* This code path is called after _all_ references to the
|
||||
* struct device has been dropped - so nobody should be
|
||||
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
|
||||
/* shrink pool if necessary (only on !is_cached pools)*/
|
||||
if (npages)
|
||||
ttm_dma_page_pool_free(pool, npages);
|
||||
ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
||||
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
||||
*
|
||||
* XXX: (dchinner) Deadlock warning!
|
||||
*
|
||||
* ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
|
||||
* needs to be paid to sc->gfp_mask to determine if this can be done or not.
|
||||
* GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
|
||||
* bad.
|
||||
* We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
|
||||
*
|
||||
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
|
||||
* shrinkers
|
||||
@@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
|
||||
static unsigned long
|
||||
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
static atomic_t start_pool = ATOMIC_INIT(0);
|
||||
static unsigned start_pool;
|
||||
unsigned idx = 0;
|
||||
unsigned pool_offset = atomic_add_return(1, &start_pool);
|
||||
unsigned pool_offset;
|
||||
unsigned shrink_pages = sc->nr_to_scan;
|
||||
struct device_pools *p;
|
||||
unsigned long freed = 0;
|
||||
@@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
if (list_empty(&_manager->pools))
|
||||
return SHRINK_STOP;
|
||||
|
||||
mutex_lock(&_manager->lock);
|
||||
pool_offset = pool_offset % _manager->npools;
|
||||
if (!mutex_trylock(&_manager->lock))
|
||||
return SHRINK_STOP;
|
||||
if (!_manager->npools)
|
||||
goto out;
|
||||
pool_offset = ++start_pool % _manager->npools;
|
||||
list_for_each_entry(p, &_manager->pools, pools) {
|
||||
unsigned nr_free;
|
||||
|
||||
@@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
if (++idx < pool_offset)
|
||||
continue;
|
||||
nr_free = shrink_pages;
|
||||
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
|
||||
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
|
||||
sc->gfp_mask);
|
||||
freed += nr_free - shrink_pages;
|
||||
|
||||
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
|
||||
p->pool->dev_name, p->pool->name, current->pid,
|
||||
nr_free, shrink_pages);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&_manager->lock);
|
||||
return freed;
|
||||
}
|
||||
@@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
struct device_pools *p;
|
||||
unsigned long count = 0;
|
||||
|
||||
mutex_lock(&_manager->lock);
|
||||
if (!mutex_trylock(&_manager->lock))
|
||||
return 0;
|
||||
list_for_each_entry(p, &_manager->pools, pools)
|
||||
count += p->pool->npages_free;
|
||||
mutex_unlock(&_manager->lock);
|
||||
|
||||
@@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
;
|
||||
|
||||
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
|
||||
@@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
|
||||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
|
||||
|
||||
void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
|
||||
{
|
||||
dev->pm_domain = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
|
||||
|
||||
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
@@ -687,7 +687,6 @@ static int logi_dj_raw_event(struct hid_device *hdev,
|
||||
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
|
||||
struct dj_report *dj_report = (struct dj_report *) data;
|
||||
unsigned long flags;
|
||||
bool report_processed = false;
|
||||
|
||||
dbg_hid("%s, size:%d\n", __func__, size);
|
||||
|
||||
@@ -714,34 +713,42 @@ static int logi_dj_raw_event(struct hid_device *hdev,
|
||||
* device (via hid_input_report() ) and return 1 so hid-core does not do
|
||||
* anything else with it.
|
||||
*/
|
||||
|
||||
/* case 1) */
|
||||
if (data[0] != REPORT_ID_DJ_SHORT)
|
||||
return false;
|
||||
|
||||
if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
|
||||
(dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
|
||||
dev_err(&hdev->dev, "%s: invalid device index:%d\n",
|
||||
/*
|
||||
* Device index is wrong, bail out.
|
||||
* This driver can ignore safely the receiver notifications,
|
||||
* so ignore those reports too.
|
||||
*/
|
||||
if (dj_report->device_index != DJ_RECEIVER_INDEX)
|
||||
dev_err(&hdev->dev, "%s: invalid device index:%d\n",
|
||||
__func__, dj_report->device_index);
|
||||
return false;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&djrcv_dev->lock, flags);
|
||||
if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
|
||||
switch (dj_report->report_type) {
|
||||
case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
|
||||
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
|
||||
logi_dj_recv_queue_notification(djrcv_dev, dj_report);
|
||||
break;
|
||||
case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
|
||||
if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
|
||||
STATUS_LINKLOSS) {
|
||||
logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
logi_dj_recv_forward_report(djrcv_dev, dj_report);
|
||||
switch (dj_report->report_type) {
|
||||
case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
|
||||
case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
|
||||
logi_dj_recv_queue_notification(djrcv_dev, dj_report);
|
||||
break;
|
||||
case REPORT_TYPE_NOTIF_CONNECTION_STATUS:
|
||||
if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] ==
|
||||
STATUS_LINKLOSS) {
|
||||
logi_dj_recv_forward_null_report(djrcv_dev, dj_report);
|
||||
}
|
||||
report_processed = true;
|
||||
break;
|
||||
default:
|
||||
logi_dj_recv_forward_report(djrcv_dev, dj_report);
|
||||
}
|
||||
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
|
||||
|
||||
return report_processed;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int logi_dj_probe(struct hid_device *hdev,
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
#define DJ_MAX_PAIRED_DEVICES 6
|
||||
#define DJ_MAX_NUMBER_NOTIFICATIONS 8
|
||||
#define DJ_RECEIVER_INDEX 0
|
||||
#define DJ_DEVICE_INDEX_MIN 1
|
||||
#define DJ_DEVICE_INDEX_MAX 6
|
||||
|
||||
|
||||
@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
||||
if (size < 4 || ((size - 4) % 9) != 0)
|
||||
return 0;
|
||||
npoints = (size - 4) / 9;
|
||||
if (npoints > 15) {
|
||||
hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
|
||||
size);
|
||||
return 0;
|
||||
}
|
||||
msc->ntouches = 0;
|
||||
for (ii = 0; ii < npoints; ii++)
|
||||
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
|
||||
@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
|
||||
if (size < 6 || ((size - 6) % 8) != 0)
|
||||
return 0;
|
||||
npoints = (size - 6) / 8;
|
||||
if (npoints > 15) {
|
||||
hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
|
||||
size);
|
||||
return 0;
|
||||
}
|
||||
msc->ntouches = 0;
|
||||
for (ii = 0; ii < npoints; ii++)
|
||||
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user