mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge branch 'linux-linaro-lsk-v4.4-android' of git://git.linaro.org/kernel/linux-linaro-stable.git
* linux-linaro-lsk-v4.4-android: (434 commits) Linux 4.4.52 kvm: vmx: ensure VMCS is current while enabling PML Revert "usb: chipidea: imx: enable CI_HDRC_SET_NON_ZERO_TTHA" rtlwifi: rtl_usb: Fix for URB leaking when doing ifconfig up/down block: fix double-free in the failure path of cgwb_bdi_init() goldfish: Sanitize the broken interrupt handler x86/platform/goldfish: Prevent unconditional loading USB: serial: ark3116: fix register-accessor error handling USB: serial: opticon: fix CTS retrieval at open USB: serial: spcp8x5: fix modem-status handling USB: serial: ftdi_sio: fix line-status over-reporting USB: serial: ftdi_sio: fix extreme low-latency setting USB: serial: ftdi_sio: fix modem-status error handling USB: serial: cp210x: add new IDs for GE Bx50v3 boards USB: serial: mos7840: fix another NULL-deref at open tty: serial: msm: Fix module autoload net: socket: fix recvmmsg not returning error from sock_error ip: fix IP_CHECKSUM handling irda: Fix lockdep annotations in hashbin_delete(). dccp: fix freeing skb too early for IPV6_RECVPKTINFO ... Conflicts: drivers/mmc/core/mmc.c drivers/usb/dwc3/ep0.c drivers/usb/host/xhci.h Change-Id: Icf331a68162ab686d01996a3f43fa2e97543f62e
This commit is contained in:
@@ -77,7 +77,7 @@ Examples:
|
||||
clks: ccm@53f80000{
|
||||
compatible = "fsl,imx31-ccm";
|
||||
reg = <0x53f80000 0x4000>;
|
||||
interrupts = <0 31 0x04 0 53 0x04>;
|
||||
interrupts = <31>, <53>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
|
||||
@@ -1261,6 +1261,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
When zero, profiling data is discarded and associated
|
||||
debugfs files are removed at module unload time.
|
||||
|
||||
goldfish [X86] Enable the goldfish android emulator platform.
|
||||
Don't use this when you are not running on the
|
||||
android emulator
|
||||
|
||||
gpt [EFI] Forces disk with valid GPT signature but
|
||||
invalid Protective MBR to be treated as GPT. If the
|
||||
primary GPT is corrupted, it enables the backup/alternate
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 41
|
||||
SUBLEVEL = 52
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
# CONFIG_VT is not set
|
||||
CONFIG_ANDROID_TIMED_GPIO=y
|
||||
CONFIG_ARM_KERNMEM_PERMS=y
|
||||
CONFIG_ARM64_SW_TTBR0_PAN=y
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
|
||||
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
|
||||
" lp 1f \n"
|
||||
" nop \n"
|
||||
"1: \n"
|
||||
: : "r"(loops));
|
||||
:
|
||||
: "r"(loops)
|
||||
: "lp_count");
|
||||
}
|
||||
|
||||
extern void __bad_udelay(void);
|
||||
|
||||
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
||||
if (state.fault)
|
||||
goto fault;
|
||||
|
||||
/* clear any remanants of delay slot */
|
||||
if (delay_mode(regs)) {
|
||||
regs->ret = regs->bta;
|
||||
regs->ret = regs->bta & ~1U;
|
||||
regs->status32 &= ~STATUS_DE_MASK;
|
||||
} else {
|
||||
regs->ret += state.instr_len;
|
||||
|
||||
@@ -85,6 +85,7 @@
|
||||
#size-cells = <1>;
|
||||
compatible = "m25p64";
|
||||
spi-max-frequency = <30000000>;
|
||||
m25p,fast-read;
|
||||
reg = <0>;
|
||||
partition@0 {
|
||||
label = "U-Boot-SPL";
|
||||
|
||||
@@ -30,11 +30,11 @@
|
||||
};
|
||||
};
|
||||
|
||||
avic: avic-interrupt-controller@60000000 {
|
||||
avic: interrupt-controller@68000000 {
|
||||
compatible = "fsl,imx31-avic", "fsl,avic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
reg = <0x60000000 0x100000>;
|
||||
reg = <0x68000000 0x100000>;
|
||||
};
|
||||
|
||||
soc {
|
||||
@@ -110,13 +110,6 @@
|
||||
interrupts = <19>;
|
||||
clocks = <&clks 25>;
|
||||
};
|
||||
|
||||
clks: ccm@53f80000{
|
||||
compatible = "fsl,imx31-ccm";
|
||||
reg = <0x53f80000 0x4000>;
|
||||
interrupts = <0 31 0x04 0 53 0x04>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
||||
aips@53f00000 { /* AIPS2 */
|
||||
@@ -126,6 +119,13 @@
|
||||
reg = <0x53f00000 0x100000>;
|
||||
ranges;
|
||||
|
||||
clks: ccm@53f80000{
|
||||
compatible = "fsl,imx31-ccm";
|
||||
reg = <0x53f80000 0x4000>;
|
||||
interrupts = <31>, <53>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
gpt: timer@53f90000 {
|
||||
compatible = "fsl,imx31-gpt";
|
||||
reg = <0x53f90000 0x4000>;
|
||||
|
||||
@@ -319,8 +319,6 @@
|
||||
compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
|
||||
"fsl,imx-audio-sgtl5000";
|
||||
model = "imx6q-nitrogen6_max-sgtl5000";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_sgtl5000>;
|
||||
ssi-controller = <&ssi1>;
|
||||
audio-codec = <&codec>;
|
||||
audio-routing =
|
||||
@@ -401,6 +399,8 @@
|
||||
|
||||
codec: sgtl5000@0a {
|
||||
compatible = "fsl,sgtl5000";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_sgtl5000>;
|
||||
reg = <0x0a>;
|
||||
clocks = <&clks 201>;
|
||||
VDDA-supply = <®_2p5v>;
|
||||
|
||||
@@ -1023,7 +1023,7 @@
|
||||
mstp7_clks: mstp7_clks@e615014c {
|
||||
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
|
||||
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
|
||||
clocks = <&mp_clk>, <&mp_clk>,
|
||||
clocks = <&mp_clk>, <&hp_clk>,
|
||||
<&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
|
||||
<&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>;
|
||||
#clock-cells = <1>;
|
||||
|
||||
@@ -87,8 +87,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
||||
u32 *rki = ctx->key_enc + (i * kwords);
|
||||
u32 *rko = rki + kwords;
|
||||
|
||||
#ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
|
||||
rko[0] = rko[0] ^ rki[0] ^ rcon[i];
|
||||
#else
|
||||
rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
|
||||
rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
|
||||
#endif
|
||||
rko[1] = rko[0] ^ rki[1];
|
||||
rko[2] = rko[1] ^ rki[2];
|
||||
rko[3] = rko[2] ^ rki[3];
|
||||
|
||||
@@ -81,6 +81,9 @@
|
||||
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
|
||||
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
|
||||
|
||||
/* Qualcomm implemented cores */
|
||||
#define ARM_CPU_PART_SCORPION 0x510002d0
|
||||
|
||||
extern unsigned int processor_id;
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
|
||||
@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
|
||||
* whenever a WFI is issued, even if the core is not powered down, in
|
||||
* violation of the architecture. When DBGPRSR.SPD is set, accesses to
|
||||
* breakpoint and watchpoint registers are treated as undefined, so
|
||||
* this results in boot time and runtime failures when these are
|
||||
* accessed and we unexpectedly take a trap.
|
||||
*
|
||||
* It's not clear if/how this can be worked around, so we blacklist
|
||||
* Scorpion CPUs to avoid these issues.
|
||||
*/
|
||||
if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
|
||||
pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
has_ossr = core_has_os_save_restore();
|
||||
|
||||
/* Determine how many BRPs/WRPs are available. */
|
||||
|
||||
@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct pt_regs newregs;
|
||||
struct pt_regs newregs = *task_pt_regs(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&newregs,
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
*/
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
|
||||
static inline void ipi_flush_tlb_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
|
||||
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
||||
|
||||
uaccess_restore(__ua_flags);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_page(void *arg)
|
||||
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
|
||||
static inline void ipi_flush_tlb_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
|
||||
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
||||
|
||||
uaccess_restore(__ua_flags);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
|
||||
@@ -67,7 +67,7 @@ ENTRY(__get_user_4)
|
||||
ENDPROC(__get_user_4)
|
||||
|
||||
ENTRY(__get_user_8)
|
||||
check_uaccess r0, 8, r1, r2, __get_user_bad
|
||||
check_uaccess r0, 8, r1, r2, __get_user_bad8
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
5: TUSER(ldr) r2, [r0]
|
||||
6: TUSER(ldr) r3, [r0, #4]
|
||||
|
||||
@@ -298,6 +298,16 @@ static struct clk emac_clk = {
|
||||
.gpsc = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* In order to avoid adding the emac_clk to the clock lookup table twice (and
|
||||
* screwing up the linked list in the process) create a separate clock for
|
||||
* mdio inheriting the rate from emac_clk.
|
||||
*/
|
||||
static struct clk mdio_clk = {
|
||||
.name = "mdio",
|
||||
.parent = &emac_clk,
|
||||
};
|
||||
|
||||
static struct clk mcasp_clk = {
|
||||
.name = "mcasp",
|
||||
.parent = &pll0_sysclk2,
|
||||
@@ -462,7 +472,7 @@ static struct clk_lookup da850_clks[] = {
|
||||
CLK(NULL, "arm", &arm_clk),
|
||||
CLK(NULL, "rmii", &rmii_clk),
|
||||
CLK("davinci_emac.1", NULL, &emac_clk),
|
||||
CLK("davinci_mdio.0", "fck", &emac_clk),
|
||||
CLK("davinci_mdio.0", "fck", &mdio_clk),
|
||||
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
|
||||
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
|
||||
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
|
||||
|
||||
@@ -243,10 +243,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
|
||||
save_state = 1;
|
||||
break;
|
||||
case PWRDM_POWER_RET:
|
||||
if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) {
|
||||
if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
|
||||
save_state = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* CPUx CSWR is invalid hardware state. Also CPUx OSWR
|
||||
|
||||
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
|
||||
*/
|
||||
bool prcmu_is_cpu_in_wfi(int cpu)
|
||||
{
|
||||
return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
|
||||
PRCM_ARM_WFI_STANDBY_WFI0;
|
||||
return readl(PRCM_ARM_WFI_STANDBY) &
|
||||
(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -59,7 +59,7 @@ void __iomem *zynq_scu_base;
|
||||
static void __init zynq_memory_init(void)
|
||||
{
|
||||
if (!__pa(PAGE_OFFSET))
|
||||
memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
|
||||
memblock_reserve(__pa(PAGE_OFFSET), 0x80000);
|
||||
}
|
||||
|
||||
static struct platform_device zynq_cpuidle_device = {
|
||||
|
||||
@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
|
||||
|
||||
void __init early_abt_enable(void)
|
||||
{
|
||||
fsr_info[22].fn = early_abort_handler;
|
||||
fsr_info[FSR_FS_AEA].fn = early_abort_handler;
|
||||
local_abt_enable();
|
||||
fsr_info[22].fn = do_bad;
|
||||
fsr_info[FSR_FS_AEA].fn = do_bad;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
|
||||
@@ -11,11 +11,15 @@
|
||||
#define FSR_FS5_0 (0x3f)
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define FSR_FS_AEA 17
|
||||
|
||||
static inline int fsr_fs(unsigned int fsr)
|
||||
{
|
||||
return fsr & FSR_FS5_0;
|
||||
}
|
||||
#else
|
||||
#define FSR_FS_AEA 22
|
||||
|
||||
static inline int fsr_fs(unsigned int fsr)
|
||||
{
|
||||
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
|
||||
|
||||
@@ -720,7 +720,7 @@ config SETEND_EMULATION
|
||||
endif
|
||||
|
||||
config ARM64_SW_TTBR0_PAN
|
||||
bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
|
||||
bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
|
||||
help
|
||||
Enabling this option prevents the kernel from accessing
|
||||
user-space memory directly by pointing TTBR0_EL1 to a reserved
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
.arch armv8-a+crypto
|
||||
@@ -19,7 +20,7 @@
|
||||
*/
|
||||
ENTRY(ce_aes_ccm_auth_data)
|
||||
ldr w8, [x3] /* leftover from prev round? */
|
||||
ld1 {v0.2d}, [x0] /* load mac */
|
||||
ld1 {v0.16b}, [x0] /* load mac */
|
||||
cbz w8, 1f
|
||||
sub w8, w8, #16
|
||||
eor v1.16b, v1.16b, v1.16b
|
||||
@@ -31,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data)
|
||||
beq 8f /* out of input? */
|
||||
cbnz w8, 0b
|
||||
eor v0.16b, v0.16b, v1.16b
|
||||
1: ld1 {v3.2d}, [x4] /* load first round key */
|
||||
1: ld1 {v3.16b}, [x4] /* load first round key */
|
||||
prfm pldl1strm, [x1]
|
||||
cmp w5, #12 /* which key size? */
|
||||
add x6, x4, #16
|
||||
@@ -41,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data)
|
||||
mov v5.16b, v3.16b
|
||||
b 4f
|
||||
2: mov v4.16b, v3.16b
|
||||
ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
|
||||
ld1 {v5.16b}, [x6], #16 /* load 2nd round key */
|
||||
3: aese v0.16b, v4.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
4: ld1 {v3.2d}, [x6], #16 /* load next round key */
|
||||
4: ld1 {v3.16b}, [x6], #16 /* load next round key */
|
||||
aese v0.16b, v5.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
5: ld1 {v4.2d}, [x6], #16 /* load next round key */
|
||||
5: ld1 {v4.16b}, [x6], #16 /* load next round key */
|
||||
subs w7, w7, #3
|
||||
aese v0.16b, v3.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
ld1 {v5.2d}, [x6], #16 /* load next round key */
|
||||
ld1 {v5.16b}, [x6], #16 /* load next round key */
|
||||
bpl 3b
|
||||
aese v0.16b, v4.16b
|
||||
subs w2, w2, #16 /* last data? */
|
||||
@@ -60,7 +61,7 @@ ENTRY(ce_aes_ccm_auth_data)
|
||||
ld1 {v1.16b}, [x1], #16 /* load next input block */
|
||||
eor v0.16b, v0.16b, v1.16b /* xor with mac */
|
||||
bne 1b
|
||||
6: st1 {v0.2d}, [x0] /* store mac */
|
||||
6: st1 {v0.16b}, [x0] /* store mac */
|
||||
beq 10f
|
||||
adds w2, w2, #16
|
||||
beq 10f
|
||||
@@ -79,7 +80,7 @@ ENTRY(ce_aes_ccm_auth_data)
|
||||
adds w7, w7, #1
|
||||
bne 9b
|
||||
eor v0.16b, v0.16b, v1.16b
|
||||
st1 {v0.2d}, [x0]
|
||||
st1 {v0.16b}, [x0]
|
||||
10: str w8, [x3]
|
||||
ret
|
||||
ENDPROC(ce_aes_ccm_auth_data)
|
||||
@@ -89,27 +90,27 @@ ENDPROC(ce_aes_ccm_auth_data)
|
||||
* u32 rounds);
|
||||
*/
|
||||
ENTRY(ce_aes_ccm_final)
|
||||
ld1 {v3.2d}, [x2], #16 /* load first round key */
|
||||
ld1 {v0.2d}, [x0] /* load mac */
|
||||
ld1 {v3.16b}, [x2], #16 /* load first round key */
|
||||
ld1 {v0.16b}, [x0] /* load mac */
|
||||
cmp w3, #12 /* which key size? */
|
||||
sub w3, w3, #2 /* modified # of rounds */
|
||||
ld1 {v1.2d}, [x1] /* load 1st ctriv */
|
||||
ld1 {v1.16b}, [x1] /* load 1st ctriv */
|
||||
bmi 0f
|
||||
bne 3f
|
||||
mov v5.16b, v3.16b
|
||||
b 2f
|
||||
0: mov v4.16b, v3.16b
|
||||
1: ld1 {v5.2d}, [x2], #16 /* load next round key */
|
||||
1: ld1 {v5.16b}, [x2], #16 /* load next round key */
|
||||
aese v0.16b, v4.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v4.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
2: ld1 {v3.2d}, [x2], #16 /* load next round key */
|
||||
2: ld1 {v3.16b}, [x2], #16 /* load next round key */
|
||||
aese v0.16b, v5.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v5.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
3: ld1 {v4.2d}, [x2], #16 /* load next round key */
|
||||
3: ld1 {v4.16b}, [x2], #16 /* load next round key */
|
||||
subs w3, w3, #3
|
||||
aese v0.16b, v3.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
@@ -120,47 +121,47 @@ ENTRY(ce_aes_ccm_final)
|
||||
aese v1.16b, v4.16b
|
||||
/* final round key cancels out */
|
||||
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
|
||||
st1 {v0.2d}, [x0] /* store result */
|
||||
st1 {v0.16b}, [x0] /* store result */
|
||||
ret
|
||||
ENDPROC(ce_aes_ccm_final)
|
||||
|
||||
.macro aes_ccm_do_crypt,enc
|
||||
ldr x8, [x6, #8] /* load lower ctr */
|
||||
ld1 {v0.2d}, [x5] /* load mac */
|
||||
rev x8, x8 /* keep swabbed ctr in reg */
|
||||
ld1 {v0.16b}, [x5] /* load mac */
|
||||
CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
|
||||
0: /* outer loop */
|
||||
ld1 {v1.1d}, [x6] /* load upper ctr */
|
||||
ld1 {v1.8b}, [x6] /* load upper ctr */
|
||||
prfm pldl1strm, [x1]
|
||||
add x8, x8, #1
|
||||
rev x9, x8
|
||||
cmp w4, #12 /* which key size? */
|
||||
sub w7, w4, #2 /* get modified # of rounds */
|
||||
ins v1.d[1], x9 /* no carry in lower ctr */
|
||||
ld1 {v3.2d}, [x3] /* load first round key */
|
||||
ld1 {v3.16b}, [x3] /* load first round key */
|
||||
add x10, x3, #16
|
||||
bmi 1f
|
||||
bne 4f
|
||||
mov v5.16b, v3.16b
|
||||
b 3f
|
||||
1: mov v4.16b, v3.16b
|
||||
ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
|
||||
ld1 {v5.16b}, [x10], #16 /* load 2nd round key */
|
||||
2: /* inner loop: 3 rounds, 2x interleaved */
|
||||
aese v0.16b, v4.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v4.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
3: ld1 {v3.2d}, [x10], #16 /* load next round key */
|
||||
3: ld1 {v3.16b}, [x10], #16 /* load next round key */
|
||||
aese v0.16b, v5.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v5.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
4: ld1 {v4.2d}, [x10], #16 /* load next round key */
|
||||
4: ld1 {v4.16b}, [x10], #16 /* load next round key */
|
||||
subs w7, w7, #3
|
||||
aese v0.16b, v3.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v3.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
ld1 {v5.2d}, [x10], #16 /* load next round key */
|
||||
ld1 {v5.16b}, [x10], #16 /* load next round key */
|
||||
bpl 2b
|
||||
aese v0.16b, v4.16b
|
||||
aese v1.16b, v4.16b
|
||||
@@ -177,14 +178,14 @@ ENDPROC(ce_aes_ccm_final)
|
||||
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
|
||||
st1 {v1.16b}, [x0], #16 /* write output block */
|
||||
bne 0b
|
||||
rev x8, x8
|
||||
st1 {v0.2d}, [x5] /* store mac */
|
||||
CPU_LE( rev x8, x8 )
|
||||
st1 {v0.16b}, [x5] /* store mac */
|
||||
str x8, [x6, #8] /* store lsb end of ctr (BE) */
|
||||
5: ret
|
||||
|
||||
6: eor v0.16b, v0.16b, v5.16b /* final round mac */
|
||||
eor v1.16b, v1.16b, v5.16b /* final round enc */
|
||||
st1 {v0.2d}, [x5] /* store mac */
|
||||
st1 {v0.16b}, [x5] /* store mac */
|
||||
add w2, w2, #16 /* process partial tail block */
|
||||
7: ldrb w9, [x1], #1 /* get 1 byte of input */
|
||||
umov w6, v1.b[0] /* get top crypted ctr byte */
|
||||
|
||||
@@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
|
||||
kernel_neon_begin_partial(4);
|
||||
|
||||
__asm__(" ld1 {v0.16b}, %[in] ;"
|
||||
" ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" cmp %w[rounds], #10 ;"
|
||||
" bmi 0f ;"
|
||||
" bne 3f ;"
|
||||
" mov v3.16b, v1.16b ;"
|
||||
" b 2f ;"
|
||||
"0: mov v2.16b, v1.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
"1: aese v0.16b, v2.16b ;"
|
||||
" aesmc v0.16b, v0.16b ;"
|
||||
"2: ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
"2: ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" aese v0.16b, v3.16b ;"
|
||||
" aesmc v0.16b, v0.16b ;"
|
||||
"3: ld1 {v2.2d}, [%[key]], #16 ;"
|
||||
"3: ld1 {v2.16b}, [%[key]], #16 ;"
|
||||
" subs %w[rounds], %w[rounds], #3 ;"
|
||||
" aese v0.16b, v1.16b ;"
|
||||
" aesmc v0.16b, v0.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
" bpl 1b ;"
|
||||
" aese v0.16b, v2.16b ;"
|
||||
" eor v0.16b, v0.16b, v3.16b ;"
|
||||
@@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
|
||||
kernel_neon_begin_partial(4);
|
||||
|
||||
__asm__(" ld1 {v0.16b}, %[in] ;"
|
||||
" ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" cmp %w[rounds], #10 ;"
|
||||
" bmi 0f ;"
|
||||
" bne 3f ;"
|
||||
" mov v3.16b, v1.16b ;"
|
||||
" b 2f ;"
|
||||
"0: mov v2.16b, v1.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
"1: aesd v0.16b, v2.16b ;"
|
||||
" aesimc v0.16b, v0.16b ;"
|
||||
"2: ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
"2: ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" aesd v0.16b, v3.16b ;"
|
||||
" aesimc v0.16b, v0.16b ;"
|
||||
"3: ld1 {v2.2d}, [%[key]], #16 ;"
|
||||
"3: ld1 {v2.16b}, [%[key]], #16 ;"
|
||||
" subs %w[rounds], %w[rounds], #3 ;"
|
||||
" aesd v0.16b, v1.16b ;"
|
||||
" aesimc v0.16b, v0.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
" bpl 1b ;"
|
||||
" aesd v0.16b, v2.16b ;"
|
||||
" eor v0.16b, v0.16b, v3.16b ;"
|
||||
@@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
||||
u32 *rki = ctx->key_enc + (i * kwords);
|
||||
u32 *rko = rki + kwords;
|
||||
|
||||
#ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
|
||||
#else
|
||||
rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
|
||||
rki[0];
|
||||
#endif
|
||||
rko[1] = rko[0] ^ rki[1];
|
||||
rko[2] = rko[1] ^ rki[2];
|
||||
rko[3] = rko[2] ^ rki[3];
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#define AES_ENTRY(func) ENTRY(ce_ ## func)
|
||||
#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
|
||||
|
||||
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
|
||||
cbz w6, .Lcbcencloop
|
||||
|
||||
ld1 {v0.16b}, [x5] /* get iv */
|
||||
enc_prepare w3, x2, x5
|
||||
enc_prepare w3, x2, x6
|
||||
|
||||
.Lcbcencloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
||||
encrypt_block v0, w3, x2, x5, w6
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
st1 {v0.16b}, [x0], #16
|
||||
subs w4, w4, #1
|
||||
bne .Lcbcencloop
|
||||
st1 {v0.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_encrypt)
|
||||
|
||||
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
cbz w6, .LcbcdecloopNx
|
||||
|
||||
ld1 {v7.16b}, [x5] /* get iv */
|
||||
dec_prepare w3, x2, x5
|
||||
dec_prepare w3, x2, x6
|
||||
|
||||
.LcbcdecloopNx:
|
||||
#if INTERLEAVE >= 2
|
||||
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
.Lcbcdecloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||
decrypt_block v0, w3, x2, x5, w6
|
||||
decrypt_block v0, w3, x2, x6, w7
|
||||
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
||||
mov v7.16b, v1.16b /* ct is next iv */
|
||||
st1 {v0.16b}, [x0], #16
|
||||
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
bne .Lcbcdecloop
|
||||
.Lcbcdecout:
|
||||
FRAME_POP
|
||||
st1 {v7.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_decrypt)
|
||||
|
||||
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
|
||||
|
||||
AES_ENTRY(aes_ctr_encrypt)
|
||||
FRAME_PUSH
|
||||
cbnz w6, .Lctrfirst /* 1st time around? */
|
||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x5, x5
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w5, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrinc
|
||||
add x5, x5, #1 /* increment BE ctr */
|
||||
b .LctrincNx
|
||||
#else
|
||||
b .Lctrinc
|
||||
#endif
|
||||
.Lctrfirst:
|
||||
cbz w6, .Lctrnotfirst /* 1st time around? */
|
||||
enc_prepare w3, x2, x6
|
||||
ld1 {v4.16b}, [x5]
|
||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x5, x5
|
||||
|
||||
.Lctrnotfirst:
|
||||
umov x8, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x8, x8
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w5, w4 /* 32 bit overflow? */
|
||||
cmn w8, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrloop
|
||||
.LctrloopNx:
|
||||
subs w4, w4, #INTERLEAVE
|
||||
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
#if INTERLEAVE == 2
|
||||
mov v0.8b, v4.8b
|
||||
mov v1.8b, v4.8b
|
||||
rev x7, x5
|
||||
add x5, x5, #1
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v0.d[1], x7
|
||||
rev x7, x5
|
||||
add x5, x5, #1
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v1.d[1], x7
|
||||
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
||||
do_encrypt_block2x
|
||||
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
||||
dup v7.4s, w5
|
||||
dup v7.4s, w8
|
||||
mov v0.16b, v4.16b
|
||||
add v7.4s, v7.4s, v8.4s
|
||||
mov v1.16b, v4.16b
|
||||
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
eor v2.16b, v7.16b, v2.16b
|
||||
eor v3.16b, v5.16b, v3.16b
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
add x5, x5, #INTERLEAVE
|
||||
add x8, x8, #INTERLEAVE
|
||||
#endif
|
||||
cbz w4, .LctroutNx
|
||||
.LctrincNx:
|
||||
rev x7, x5
|
||||
rev x7, x8
|
||||
ins v4.d[1], x7
|
||||
cbz w4, .Lctrout
|
||||
b .LctrloopNx
|
||||
.LctroutNx:
|
||||
sub x5, x5, #1
|
||||
rev x7, x5
|
||||
ins v4.d[1], x7
|
||||
b .Lctrout
|
||||
.Lctr1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
beq .Lctrout
|
||||
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
.Lctrloop:
|
||||
mov v0.16b, v4.16b
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
|
||||
adds x8, x8, #1 /* increment BE ctr */
|
||||
rev x7, x8
|
||||
ins v4.d[1], x7
|
||||
bcs .Lctrcarry /* overflow? */
|
||||
|
||||
.Lctrcarrydone:
|
||||
subs w4, w4, #1
|
||||
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
||||
ld1 {v3.16b}, [x1], #16
|
||||
eor v3.16b, v0.16b, v3.16b
|
||||
st1 {v3.16b}, [x0], #16
|
||||
beq .Lctrout
|
||||
.Lctrinc:
|
||||
adds x5, x5, #1 /* increment BE ctr */
|
||||
rev x7, x5
|
||||
ins v4.d[1], x7
|
||||
bcc .Lctrloop /* no overflow? */
|
||||
bne .Lctrloop
|
||||
|
||||
.Lctrout:
|
||||
st1 {v4.16b}, [x5] /* return next CTR value */
|
||||
FRAME_POP
|
||||
ret
|
||||
|
||||
.Lctrhalfblock:
|
||||
ld1 {v3.8b}, [x1]
|
||||
eor v3.8b, v0.8b, v3.8b
|
||||
st1 {v3.8b}, [x0]
|
||||
FRAME_POP
|
||||
ret
|
||||
|
||||
.Lctrcarry:
|
||||
umov x7, v4.d[0] /* load upper word of ctr */
|
||||
rev x7, x7 /* ... to handle the carry */
|
||||
add x7, x7, #1
|
||||
rev x7, x7
|
||||
ins v4.d[0], x7
|
||||
b .Lctrloop
|
||||
.Lctrhalfblock:
|
||||
ld1 {v3.8b}, [x1]
|
||||
eor v3.8b, v0.8b, v3.8b
|
||||
st1 {v3.8b}, [x0]
|
||||
.Lctrout:
|
||||
FRAME_POP
|
||||
ret
|
||||
b .Lctrcarrydone
|
||||
AES_ENDPROC(aes_ctr_encrypt)
|
||||
.ltorg
|
||||
|
||||
@@ -386,7 +382,8 @@ AES_ENDPROC(aes_ctr_encrypt)
|
||||
.endm
|
||||
|
||||
.Lxts_mul_x:
|
||||
.word 1, 0, 0x87, 0
|
||||
CPU_LE( .quad 1, 0x87 )
|
||||
CPU_BE( .quad 0x87, 1 )
|
||||
|
||||
AES_ENTRY(aes_xts_encrypt)
|
||||
FRAME_PUSH
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#define AES_ENTRY(func) ENTRY(neon_ ## func)
|
||||
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
|
||||
@@ -83,13 +84,13 @@
|
||||
.endm
|
||||
|
||||
.macro do_block, enc, in, rounds, rk, rkp, i
|
||||
ld1 {v15.16b}, [\rk]
|
||||
ld1 {v15.4s}, [\rk]
|
||||
add \rkp, \rk, #16
|
||||
mov \i, \rounds
|
||||
1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
|
||||
tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
|
||||
sub_bytes \in
|
||||
ld1 {v15.16b}, [\rkp], #16
|
||||
ld1 {v15.4s}, [\rkp], #16
|
||||
subs \i, \i, #1
|
||||
beq 2222f
|
||||
.if \enc == 1
|
||||
@@ -229,7 +230,7 @@
|
||||
.endm
|
||||
|
||||
.macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
|
||||
ld1 {v15.16b}, [\rk]
|
||||
ld1 {v15.4s}, [\rk]
|
||||
add \rkp, \rk, #16
|
||||
mov \i, \rounds
|
||||
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
|
||||
@@ -237,7 +238,7 @@
|
||||
sub_bytes_2x \in0, \in1
|
||||
tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
|
||||
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
|
||||
ld1 {v15.16b}, [\rkp], #16
|
||||
ld1 {v15.4s}, [\rkp], #16
|
||||
subs \i, \i, #1
|
||||
beq 2222f
|
||||
.if \enc == 1
|
||||
@@ -254,7 +255,7 @@
|
||||
.endm
|
||||
|
||||
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
|
||||
ld1 {v15.16b}, [\rk]
|
||||
ld1 {v15.4s}, [\rk]
|
||||
add \rkp, \rk, #16
|
||||
mov \i, \rounds
|
||||
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
|
||||
@@ -266,7 +267,7 @@
|
||||
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
|
||||
tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
|
||||
tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
|
||||
ld1 {v15.16b}, [\rkp], #16
|
||||
ld1 {v15.4s}, [\rkp], #16
|
||||
subs \i, \i, #1
|
||||
beq 2222f
|
||||
.if \enc == 1
|
||||
@@ -306,12 +307,16 @@
|
||||
.text
|
||||
.align 4
|
||||
.LForward_ShiftRows:
|
||||
.byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
|
||||
.byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
|
||||
CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 )
|
||||
CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb )
|
||||
CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 )
|
||||
CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 )
|
||||
|
||||
.LReverse_ShiftRows:
|
||||
.byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
|
||||
.byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
|
||||
CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb )
|
||||
CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 )
|
||||
CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 )
|
||||
CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 )
|
||||
|
||||
.LForward_Sbox:
|
||||
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
* struct ghash_key const *k, const char *head)
|
||||
*/
|
||||
ENTRY(pmull_ghash_update)
|
||||
ld1 {SHASH.16b}, [x3]
|
||||
ld1 {XL.16b}, [x1]
|
||||
ld1 {SHASH.2d}, [x3]
|
||||
ld1 {XL.2d}, [x1]
|
||||
movi MASK.16b, #0xe1
|
||||
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
|
||||
shl MASK.2d, MASK.2d, #57
|
||||
@@ -74,6 +74,6 @@ CPU_LE( rev64 T1.16b, T1.16b )
|
||||
|
||||
cbnz w0, 0b
|
||||
|
||||
st1 {XL.16b}, [x1]
|
||||
st1 {XL.2d}, [x1]
|
||||
ret
|
||||
ENDPROC(pmull_ghash_update)
|
||||
|
||||
@@ -78,7 +78,7 @@ ENTRY(sha1_ce_transform)
|
||||
ld1r {k3.4s}, [x6]
|
||||
|
||||
/* load state */
|
||||
ldr dga, [x0]
|
||||
ld1 {dgav.4s}, [x0]
|
||||
ldr dgb, [x0, #16]
|
||||
|
||||
/* load sha1_ce_state::finalize */
|
||||
@@ -144,7 +144,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
|
||||
b 1b
|
||||
|
||||
/* store new state */
|
||||
3: str dga, [x0]
|
||||
3: st1 {dgav.4s}, [x0]
|
||||
str dgb, [x0, #16]
|
||||
ret
|
||||
ENDPROC(sha1_ce_transform)
|
||||
|
||||
@@ -85,7 +85,7 @@ ENTRY(sha2_ce_transform)
|
||||
ld1 {v12.4s-v15.4s}, [x8]
|
||||
|
||||
/* load state */
|
||||
ldp dga, dgb, [x0]
|
||||
ld1 {dgav.4s, dgbv.4s}, [x0]
|
||||
|
||||
/* load sha256_ce_state::finalize */
|
||||
ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
|
||||
@@ -148,6 +148,6 @@ CPU_LE( rev32 v19.16b, v19.16b )
|
||||
b 1b
|
||||
|
||||
/* store new state */
|
||||
3: stp dga, dgb, [x0]
|
||||
3: st1 {dgav.4s, dgbv.4s}, [x0]
|
||||
ret
|
||||
ENDPROC(sha2_ce_transform)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
#define __ASM_ALTERNATIVE_H
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@@ -90,24 +91,15 @@ void apply_alternatives(void *start, size_t length);
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Begin an alternative code sequence.
|
||||
* Alternative sequences
|
||||
*
|
||||
* The code that follows this macro will be assembled and linked as
|
||||
* normal. There are no restrictions on this code.
|
||||
*/
|
||||
.macro alternative_if_not cap
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provide the alternative code sequence.
|
||||
* The code for the case where the capability is not present will be
|
||||
* assembled and linked as normal. There are no restrictions on this
|
||||
* code.
|
||||
*
|
||||
* The code that follows this macro is assembled into a special
|
||||
* section to be used for dynamic patching. Code that follows this
|
||||
* macro must:
|
||||
* The code for the case where the capability is present will be
|
||||
* assembled into a special section to be used for dynamic patching.
|
||||
* Code for that case must:
|
||||
*
|
||||
* 1. Be exactly the same length (in bytes) as the default code
|
||||
* sequence.
|
||||
@@ -116,8 +108,38 @@ void apply_alternatives(void *start, size_t length);
|
||||
* alternative sequence it is defined in (branches into an
|
||||
* alternative sequence are not fixed up).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Begin an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_if_not cap
|
||||
.set .Lasm_alt_mode, 0
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
.macro alternative_if cap
|
||||
.set .Lasm_alt_mode, 1
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
|
||||
.popsection
|
||||
.pushsection .altinstr_replacement, "ax"
|
||||
.align 2 /* So GAS knows label 661 is suitably aligned */
|
||||
661:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provide the other half of the alternative code sequence.
|
||||
*/
|
||||
.macro alternative_else
|
||||
662: .pushsection .altinstr_replacement, "ax"
|
||||
662:
|
||||
.if .Lasm_alt_mode==0
|
||||
.pushsection .altinstr_replacement, "ax"
|
||||
.else
|
||||
.popsection
|
||||
.endif
|
||||
663:
|
||||
.endm
|
||||
|
||||
@@ -125,11 +147,25 @@ void apply_alternatives(void *start, size_t length);
|
||||
* Complete an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_endif
|
||||
664: .popsection
|
||||
664:
|
||||
.if .Lasm_alt_mode==0
|
||||
.popsection
|
||||
.endif
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provides a trivial alternative or default sequence consisting solely
|
||||
* of NOPs. The number of NOPs is chosen automatically to match the
|
||||
* previous case.
|
||||
*/
|
||||
.macro alternative_else_nop_endif
|
||||
alternative_else
|
||||
nops (662b-661b) / AARCH64_INSN_SIZE
|
||||
alternative_endif
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
|
||||
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
|
||||
|
||||
|
||||
@@ -107,6 +107,15 @@
|
||||
dmb \opt
|
||||
.endm
|
||||
|
||||
/*
|
||||
* NOP sequence
|
||||
*/
|
||||
.macro nops, num
|
||||
.rept \num
|
||||
nop
|
||||
.endr
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Emit an entry into the exception table
|
||||
*/
|
||||
@@ -383,15 +392,11 @@ alternative_endif
|
||||
*/
|
||||
.macro post_ttbr0_update_workaround
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
alternative_else
|
||||
alternative_if ARM64_WORKAROUND_CAVIUM_27456
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
alternative_endif
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
||||
@@ -20,6 +20,9 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
|
||||
#define nops(n) asm volatile(__nops(n))
|
||||
|
||||
#define sev() asm volatile("sev" : : : "memory")
|
||||
#define wfe() asm volatile("wfe" : : : "memory")
|
||||
#define wfi() asm volatile("wfi" : : : "memory")
|
||||
|
||||
@@ -21,10 +21,7 @@
|
||||
#include <linux/futex.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
|
||||
do { \
|
||||
|
||||
@@ -218,9 +218,11 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
|
||||
* value may have not been initialised yet (activate_mm caller) or the
|
||||
* ASID has changed since the last run (following the context switch
|
||||
* of another thread of the same process).
|
||||
* of another thread of the same process). Avoid setting the reserved
|
||||
* TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
|
||||
*/
|
||||
update_saved_ttbr0(tsk, next);
|
||||
if (next != &init_mm)
|
||||
update_saved_ttbr0(tsk, next);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
@@ -21,8 +21,6 @@
|
||||
|
||||
#include <uapi/asm/ptrace.h>
|
||||
|
||||
#define _PSR_PAN_BIT 22
|
||||
|
||||
/* Current Exception Level values, as contained in CurrentEL */
|
||||
#define CurrentEL_EL1 (1 << 2)
|
||||
#define CurrentEL_EL2 (2 << 2)
|
||||
|
||||
@@ -47,10 +47,10 @@ typedef unsigned long mm_segment_t;
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
mm_segment_t addr_limit; /* address limit */
|
||||
struct task_struct *task; /* main task structure */
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||
#endif
|
||||
struct task_struct *task; /* main task structure */
|
||||
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
int cpu; /* cpu */
|
||||
};
|
||||
|
||||
@@ -18,6 +18,10 @@
|
||||
#ifndef __ASM_UACCESS_H
|
||||
#define __ASM_UACCESS_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
@@ -26,11 +30,8 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/compiler.h>
|
||||
@@ -130,7 +131,7 @@ static inline void set_fs(mm_segment_t fs)
|
||||
* User access enabling/disabling.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
static inline void uaccess_ttbr0_disable(void)
|
||||
static inline void __uaccess_ttbr0_disable(void)
|
||||
{
|
||||
unsigned long ttbr;
|
||||
|
||||
@@ -140,7 +141,7 @@ static inline void uaccess_ttbr0_disable(void)
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void uaccess_ttbr0_enable(void)
|
||||
static inline void __uaccess_ttbr0_enable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -154,30 +155,44 @@ static inline void uaccess_ttbr0_enable(void)
|
||||
isb();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#else
|
||||
static inline void uaccess_ttbr0_disable(void)
|
||||
|
||||
static inline bool uaccess_ttbr0_disable(void)
|
||||
{
|
||||
if (!system_uses_ttbr0_pan())
|
||||
return false;
|
||||
__uaccess_ttbr0_disable();
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void uaccess_ttbr0_enable(void)
|
||||
static inline bool uaccess_ttbr0_enable(void)
|
||||
{
|
||||
if (!system_uses_ttbr0_pan())
|
||||
return false;
|
||||
__uaccess_ttbr0_enable();
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static inline bool uaccess_ttbr0_disable(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool uaccess_ttbr0_enable(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define __uaccess_disable(alt) \
|
||||
do { \
|
||||
if (system_uses_ttbr0_pan()) \
|
||||
uaccess_ttbr0_disable(); \
|
||||
else \
|
||||
if (!uaccess_ttbr0_disable()) \
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
|
||||
CONFIG_ARM64_PAN)); \
|
||||
} while (0)
|
||||
|
||||
#define __uaccess_enable(alt) \
|
||||
do { \
|
||||
if (system_uses_ttbr0_pan()) \
|
||||
uaccess_ttbr0_enable(); \
|
||||
else \
|
||||
if (!uaccess_ttbr0_enable()) \
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
|
||||
CONFIG_ARM64_PAN)); \
|
||||
} while (0)
|
||||
@@ -407,69 +422,62 @@ extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
|
||||
/*
|
||||
* User access enabling/disabling macros.
|
||||
*/
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
.macro __uaccess_ttbr0_disable, tmp1
|
||||
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
|
||||
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1
|
||||
.macro __uaccess_ttbr0_enable, tmp1
|
||||
get_thread_info \tmp1
|
||||
ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1
|
||||
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
|
||||
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp2 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1
|
||||
restore_irq \tmp2
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
#else
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These macros are no-ops when UAO is present.
|
||||
*/
|
||||
.macro uaccess_disable_not_uao, tmp1
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
uaccess_ttbr0_disable \tmp1
|
||||
alternative_else
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
alternative_endif
|
||||
#endif
|
||||
alternative_if_not ARM64_ALT_PAN_NOT_UAO
|
||||
nop
|
||||
alternative_else
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(1)
|
||||
alternative_endif
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp2 // avoid preemption
|
||||
uaccess_ttbr0_enable \tmp1
|
||||
restore_irq \tmp2
|
||||
alternative_else
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
alternative_endif
|
||||
#endif
|
||||
alternative_if_not ARM64_ALT_PAN_NOT_UAO
|
||||
nop
|
||||
alternative_else
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(0)
|
||||
alternative_endif
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
|
||||
__uint128_t vregs[32];
|
||||
__u32 fpsr;
|
||||
__u32 fpcr;
|
||||
__u32 __reserved[2];
|
||||
};
|
||||
|
||||
struct user_hwdebug_state {
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
@@ -38,11 +38,11 @@ int main(void)
|
||||
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
|
||||
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
|
||||
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0));
|
||||
#endif
|
||||
DEFINE(TI_TASK, offsetof(struct thread_info, task));
|
||||
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
|
||||
BLANK();
|
||||
|
||||
@@ -120,11 +120,9 @@
|
||||
* feature as all TTBR0_EL1 accesses are disabled, not just those to
|
||||
* user mappings.
|
||||
*/
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
nop
|
||||
alternative_else
|
||||
alternative_if ARM64_HAS_PAN
|
||||
b 1f // skip TTBR0 PAN
|
||||
alternative_endif
|
||||
alternative_else_nop_endif
|
||||
|
||||
.if \el != 0
|
||||
mrs x21, ttbr0_el1
|
||||
@@ -134,7 +132,7 @@ alternative_endif
|
||||
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
|
||||
.endif
|
||||
|
||||
uaccess_ttbr0_disable x21
|
||||
__uaccess_ttbr0_disable x21
|
||||
1:
|
||||
#endif
|
||||
|
||||
@@ -181,17 +179,15 @@ alternative_endif
|
||||
* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
|
||||
* PAN bit checking.
|
||||
*/
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
nop
|
||||
alternative_else
|
||||
alternative_if ARM64_HAS_PAN
|
||||
b 2f // skip TTBR0 PAN
|
||||
alternative_endif
|
||||
alternative_else_nop_endif
|
||||
|
||||
.if \el != 0
|
||||
tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled
|
||||
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
|
||||
.endif
|
||||
|
||||
uaccess_ttbr0_enable x0
|
||||
__uaccess_ttbr0_enable x0
|
||||
|
||||
.if \el == 0
|
||||
/*
|
||||
@@ -681,7 +677,7 @@ el0_inv:
|
||||
mov x0, sp
|
||||
mov x1, #BAD_SYNC
|
||||
mov x2, x25
|
||||
bl bad_mode
|
||||
bl bad_el0_sync
|
||||
b ret_to_user
|
||||
ENDPROC(el0_sync)
|
||||
|
||||
|
||||
@@ -550,6 +550,8 @@ static int hw_break_set(struct task_struct *target,
|
||||
/* (address, ctrl) registers */
|
||||
limit = regset->n * regset->size;
|
||||
while (count && offset < limit) {
|
||||
if (count < PTRACE_HBP_ADDR_SZ)
|
||||
return -EINVAL;
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
|
||||
offset, offset + PTRACE_HBP_ADDR_SZ);
|
||||
if (ret)
|
||||
@@ -559,6 +561,8 @@ static int hw_break_set(struct task_struct *target,
|
||||
return ret;
|
||||
offset += PTRACE_HBP_ADDR_SZ;
|
||||
|
||||
if (!count)
|
||||
break;
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
|
||||
offset, offset + PTRACE_HBP_CTRL_SZ);
|
||||
if (ret)
|
||||
@@ -595,7 +599,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct user_pt_regs newregs;
|
||||
struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
|
||||
if (ret)
|
||||
@@ -625,7 +629,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct user_fpsimd_state newstate;
|
||||
struct user_fpsimd_state newstate =
|
||||
target->thread.fpsimd_state.user_fpsimd;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
|
||||
if (ret)
|
||||
@@ -649,7 +654,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
unsigned long tls;
|
||||
unsigned long tls = target->thread.tp_value;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
|
||||
if (ret)
|
||||
@@ -675,7 +680,8 @@ static int system_call_set(struct task_struct *target,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int syscallno, ret;
|
||||
int syscallno = task_pt_regs(target)->syscallno;
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
|
||||
if (ret)
|
||||
@@ -947,7 +953,7 @@ static int compat_tls_set(struct task_struct *target,
|
||||
const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
compat_ulong_t tls;
|
||||
compat_ulong_t tls = target->thread.tp_value;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
|
||||
if (ret)
|
||||
|
||||
@@ -469,16 +469,33 @@ const char *esr_get_class_string(u32 esr)
|
||||
}
|
||||
|
||||
/*
|
||||
* bad_mode handles the impossible case in the exception vector.
|
||||
* bad_mode handles the impossible case in the exception vector. This is always
|
||||
* fatal.
|
||||
*/
|
||||
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
||||
{
|
||||
console_verbose();
|
||||
|
||||
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
|
||||
handler[reason], esr, esr_get_class_string(esr));
|
||||
|
||||
die("Oops - bad mode", regs, 0);
|
||||
local_irq_disable();
|
||||
panic("bad mode");
|
||||
}
|
||||
|
||||
/*
|
||||
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
|
||||
* exceptions taken from EL0. Unlike bad_mode, this returns.
|
||||
*/
|
||||
asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
|
||||
{
|
||||
siginfo_t info;
|
||||
void __user *pc = (void __user *)instruction_pointer(regs);
|
||||
console_verbose();
|
||||
|
||||
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
|
||||
handler[reason], esr, esr_get_class_string(esr));
|
||||
pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
|
||||
smp_processor_id(), esr, esr_get_class_string(esr));
|
||||
__show_regs(regs);
|
||||
|
||||
info.si_signo = SIGILL;
|
||||
@@ -486,7 +503,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
|
||||
info.si_code = ILL_ILLOPC;
|
||||
info.si_addr = pc;
|
||||
|
||||
arm64_notify_die("Oops - bad mode", regs, &info, 0);
|
||||
current->thread.fault_address = 0;
|
||||
current->thread.fault_code = 0;
|
||||
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
}
|
||||
|
||||
void __pte_error(const char *file, int line, unsigned long val)
|
||||
|
||||
@@ -17,9 +17,6 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
.text
|
||||
|
||||
@@ -16,10 +16,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
|
||||
@@ -18,10 +18,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
|
||||
@@ -16,10 +16,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* flush_icache_range(start,end)
|
||||
@@ -48,6 +49,7 @@ ENTRY(flush_icache_range)
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
ENTRY(__flush_cache_user_range)
|
||||
uaccess_ttbr0_enable x2, x3
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
bic x4, x0, x3
|
||||
@@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
|
||||
dsb ish
|
||||
isb
|
||||
mov x0, #0
|
||||
1:
|
||||
uaccess_ttbr0_disable x1
|
||||
ret
|
||||
9:
|
||||
mov x0, #-EFAULT
|
||||
ret
|
||||
b 1b
|
||||
ENDPROC(flush_icache_range)
|
||||
ENDPROC(__flush_cache_user_range)
|
||||
|
||||
|
||||
@@ -510,10 +510,10 @@ static const struct fault_info {
|
||||
{ do_bad, SIGBUS, 0, "unknown 17" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 18" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 19" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 25" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 26" },
|
||||
|
||||
@@ -90,7 +90,6 @@ ENTRY(privcmd_call)
|
||||
mov x2, x3
|
||||
mov x3, x4
|
||||
mov x4, x5
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
/*
|
||||
* Privcmd calls are issued by the userspace. The kernel needs to
|
||||
* enable access to TTBR0_EL1 as the hypervisor would issue stage 1
|
||||
@@ -99,15 +98,12 @@ ENTRY(privcmd_call)
|
||||
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
|
||||
* is enabled (it implies that hardware UAO and PAN disabled).
|
||||
*/
|
||||
uaccess_enable_not_uao x6, x7
|
||||
#endif
|
||||
uaccess_ttbr0_enable x6, x7
|
||||
hvc XEN_IMM
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
/*
|
||||
* Disable userspace access from kernel once the hyp call completed.
|
||||
*/
|
||||
uaccess_disable_not_uao x6
|
||||
#endif
|
||||
uaccess_ttbr0_disable x6
|
||||
ret
|
||||
ENDPROC(privcmd_call);
|
||||
|
||||
@@ -10,6 +10,9 @@
|
||||
|
||||
asflags-y += $(LINUXINCLUDE)
|
||||
ccflags-y += -O2 $(LINUXINCLUDE)
|
||||
|
||||
ifdef CONFIG_ETRAX_AXISFLASHMAP
|
||||
|
||||
arch-$(CONFIG_ETRAX_ARCH_V10) = v10
|
||||
arch-$(CONFIG_ETRAX_ARCH_V32) = v32
|
||||
|
||||
@@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE
|
||||
$(call if_changed,objcopy)
|
||||
cp -p $(obj)/rescue.bin $(objtree)
|
||||
|
||||
else
|
||||
$(obj)/rescue.bin:
|
||||
|
||||
endif
|
||||
|
||||
$(obj)/testrescue.bin: $(obj)/testrescue.o
|
||||
$(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
|
||||
# Pad it to 784 bytes
|
||||
|
||||
@@ -324,8 +324,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
#endif
|
||||
|
||||
/* Invalidate the icache for these ranges */
|
||||
local_flush_icache_range((unsigned long)gebase,
|
||||
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
||||
flush_icache_range((unsigned long)gebase,
|
||||
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
||||
|
||||
/*
|
||||
* Allocate comm page for guest kernel, a TLB will be reserved for
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
@@ -17,6 +17,12 @@
|
||||
* to include/asm-i386/bitops.h or kerneldoc
|
||||
*/
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
|
||||
|
||||
|
||||
|
||||
@@ -3,10 +3,8 @@
|
||||
|
||||
#if defined(__LP64__)
|
||||
#define __BITS_PER_LONG 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define __BITS_PER_LONG 32
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#ifndef _PARISC_SWAB_H
|
||||
#define _PARISC_SWAB_H
|
||||
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#if BITS_PER_LONG > 32
|
||||
#if __BITS_PER_LONG > 32
|
||||
/*
|
||||
** From "PA-RISC 2.0 Architecture", HP Professional Books.
|
||||
** See Appendix I page 8 , "Endian Byte Swapping".
|
||||
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
|
||||
return x;
|
||||
}
|
||||
#define __arch_swab64 __arch_swab64
|
||||
#endif /* BITS_PER_LONG > 32 */
|
||||
#endif /* __BITS_PER_LONG > 32 */
|
||||
|
||||
#endif /* _PARISC_SWAB_H */
|
||||
|
||||
@@ -485,7 +485,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
|
||||
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
|
||||
{
|
||||
struct eeh_pe *pe = (struct eeh_pe *)data;
|
||||
bool *clear_sw_state = flag;
|
||||
bool clear_sw_state = *(bool *)flag;
|
||||
int i, rc = 1;
|
||||
|
||||
for (i = 0; rc && i < 3; i++)
|
||||
|
||||
@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
|
||||
static int ibmebus_create_devices(const struct of_device_id *matches)
|
||||
{
|
||||
struct device_node *root, *child;
|
||||
struct device *dev;
|
||||
int ret = 0;
|
||||
|
||||
root = of_find_node_by_path("/");
|
||||
@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
|
||||
if (!of_match_node(matches, child))
|
||||
continue;
|
||||
|
||||
if (bus_find_device(&ibmebus_bus_type, NULL, child,
|
||||
ibmebus_match_node))
|
||||
dev = bus_find_device(&ibmebus_bus_type, NULL, child,
|
||||
ibmebus_match_node);
|
||||
if (dev) {
|
||||
put_device(dev);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = ibmebus_create_device(child);
|
||||
if (ret) {
|
||||
@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct device_node *dn = NULL;
|
||||
struct device *dev;
|
||||
char *path;
|
||||
ssize_t rc = 0;
|
||||
|
||||
@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
if (bus_find_device(&ibmebus_bus_type, NULL, path,
|
||||
ibmebus_match_path)) {
|
||||
dev = bus_find_device(&ibmebus_bus_type, NULL, path,
|
||||
ibmebus_match_path);
|
||||
if (dev) {
|
||||
put_device(dev);
|
||||
printk(KERN_WARNING "%s: %s has already been probed\n",
|
||||
__func__, path);
|
||||
rc = -EEXIST;
|
||||
@@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
|
||||
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
|
||||
ibmebus_match_path))) {
|
||||
of_device_unregister(to_platform_device(dev));
|
||||
put_device(dev);
|
||||
|
||||
kfree(path);
|
||||
return count;
|
||||
|
||||
@@ -313,7 +313,7 @@ _GLOBAL(flush_instruction_cache)
|
||||
lis r3, KERNELBASE@h
|
||||
iccci 0,r3
|
||||
#endif
|
||||
#elif CONFIG_FSL_BOOKE
|
||||
#elif defined(CONFIG_FSL_BOOKE)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r3,SPRN_L1CSR0
|
||||
ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
|
||||
|
||||
@@ -2664,6 +2664,9 @@ static void __init prom_find_boot_cpu(void)
|
||||
|
||||
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
|
||||
|
||||
if (!PHANDLE_VALID(cpu_pkg))
|
||||
return;
|
||||
|
||||
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
|
||||
prom.cpu = be32_to_cpu(rval);
|
||||
|
||||
|
||||
@@ -565,8 +565,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
|
||||
prng_data->prngws.byte_counter += n;
|
||||
prng_data->prngws.reseed_counter += n;
|
||||
|
||||
if (copy_to_user(ubuf, prng_data->buf, chunk))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(ubuf, prng_data->buf, chunk)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
nbytes -= chunk;
|
||||
ret += chunk;
|
||||
|
||||
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
if (MACHINE_HAS_VX)
|
||||
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
|
||||
else
|
||||
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
|
||||
|
||||
/* If setting FPC, must validate it first. */
|
||||
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
|
||||
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
|
||||
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
|
||||
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
|
||||
if (rc == 0)
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
|
||||
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct pt_regs regs;
|
||||
struct pt_regs regs = *task_pt_regs(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0,
|
||||
sizeof(regs));
|
||||
|
||||
@@ -2115,6 +2115,7 @@ static inline void __init check_timer(void)
|
||||
if (idx != -1 && irq_trigger(idx))
|
||||
unmask_ioapic_irq(irq_get_chip_data(0));
|
||||
}
|
||||
irq_domain_deactivate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
if (timer_irq_works()) {
|
||||
if (disable_timer_pin_1 > 0)
|
||||
@@ -2136,6 +2137,7 @@ static inline void __init check_timer(void)
|
||||
* legacy devices should be connected to IO APIC #0
|
||||
*/
|
||||
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
|
||||
irq_domain_deactivate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
legacy_pic->unmask(0);
|
||||
if (timer_irq_works()) {
|
||||
|
||||
@@ -1129,7 +1129,7 @@ static __init int setup_disablecpuid(char *arg)
|
||||
{
|
||||
int bit;
|
||||
|
||||
if (get_option(&arg, &bit) && bit < NCAPINTS*32)
|
||||
if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
|
||||
setup_clear_cpu_cap(bit);
|
||||
else
|
||||
return 0;
|
||||
|
||||
@@ -351,6 +351,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
|
||||
} else {
|
||||
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
|
||||
|
||||
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
|
||||
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
|
||||
disable_irq(hdev->irq);
|
||||
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
|
||||
|
||||
@@ -180,7 +180,8 @@ GLOBAL(ftrace_graph_call)
|
||||
jmp ftrace_stub
|
||||
#endif
|
||||
|
||||
GLOBAL(ftrace_stub)
|
||||
/* This is weak to keep gas from relaxing the jumps */
|
||||
WEAK(ftrace_stub)
|
||||
retq
|
||||
END(ftrace_caller)
|
||||
|
||||
|
||||
@@ -172,6 +172,7 @@
|
||||
#define NearBranch ((u64)1 << 52) /* Near branches */
|
||||
#define No16 ((u64)1 << 53) /* No 16 bit operand */
|
||||
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
|
||||
#define Aligned16 ((u64)1 << 55) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
|
||||
|
||||
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
|
||||
|
||||
@@ -434,6 +435,26 @@ FOP_END;
|
||||
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
|
||||
FOP_END;
|
||||
|
||||
/*
|
||||
* XXX: inoutclob user must know where the argument is being expanded.
|
||||
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
|
||||
*/
|
||||
#define asm_safe(insn, inoutclob...) \
|
||||
({ \
|
||||
int _fault = 0; \
|
||||
\
|
||||
asm volatile("1:" insn "\n" \
|
||||
"2:\n" \
|
||||
".pushsection .fixup, \"ax\"\n" \
|
||||
"3: movl $1, %[_fault]\n" \
|
||||
" jmp 2b\n" \
|
||||
".popsection\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: [_fault] "+qm"(_fault) inoutclob ); \
|
||||
\
|
||||
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
|
||||
})
|
||||
|
||||
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
|
||||
enum x86_intercept intercept,
|
||||
enum x86_intercept_stage stage)
|
||||
@@ -620,21 +641,24 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
|
||||
* depending on whether they're AVX encoded or not.
|
||||
*
|
||||
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
|
||||
* subject to the same check.
|
||||
* subject to the same check. FXSAVE and FXRSTOR are checked here too as their
|
||||
* 512 bytes of data must be aligned to a 16 byte boundary.
|
||||
*/
|
||||
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
|
||||
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
|
||||
{
|
||||
if (likely(size < 16))
|
||||
return false;
|
||||
return 1;
|
||||
|
||||
if (ctxt->d & Aligned)
|
||||
return true;
|
||||
return size;
|
||||
else if (ctxt->d & Unaligned)
|
||||
return false;
|
||||
return 1;
|
||||
else if (ctxt->d & Avx)
|
||||
return false;
|
||||
return 1;
|
||||
else if (ctxt->d & Aligned16)
|
||||
return 16;
|
||||
else
|
||||
return true;
|
||||
return size;
|
||||
}
|
||||
|
||||
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||
@@ -692,7 +716,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
|
||||
if (la & (insn_alignment(ctxt, size) - 1))
|
||||
return emulate_gp(ctxt, 0);
|
||||
return X86EMUL_CONTINUE;
|
||||
bad:
|
||||
@@ -779,6 +803,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
||||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
||||
}
|
||||
|
||||
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
||||
struct segmented_address addr,
|
||||
void *data,
|
||||
unsigned int size)
|
||||
{
|
||||
int rc;
|
||||
ulong linear;
|
||||
|
||||
rc = linearize(ctxt, addr, size, true, &linear);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefetch the remaining bytes of the instruction without crossing page
|
||||
* boundary if they are not in fetch_cache yet.
|
||||
@@ -1532,7 +1570,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
&ctxt->exception);
|
||||
}
|
||||
|
||||
/* Does not support long mode */
|
||||
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
u16 selector, int seg, u8 cpl,
|
||||
enum x86_transfer_type transfer,
|
||||
@@ -1569,20 +1606,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
|
||||
rpl = selector & 3;
|
||||
|
||||
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
|
||||
if ((seg == VCPU_SREG_CS
|
||||
|| (seg == VCPU_SREG_SS
|
||||
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|
||||
|| seg == VCPU_SREG_TR)
|
||||
&& null_selector)
|
||||
goto exception;
|
||||
|
||||
/* TR should be in GDT only */
|
||||
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
||||
goto exception;
|
||||
|
||||
if (null_selector) /* for NULL selector skip all following checks */
|
||||
/* NULL selector is not valid for TR, CS and (except for long mode) SS */
|
||||
if (null_selector) {
|
||||
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
|
||||
goto exception;
|
||||
|
||||
if (seg == VCPU_SREG_SS) {
|
||||
if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
|
||||
goto exception;
|
||||
|
||||
/*
|
||||
* ctxt->ops->set_segment expects the CPL to be in
|
||||
* SS.DPL, so fake an expand-up 32-bit data segment.
|
||||
*/
|
||||
seg_desc.type = 3;
|
||||
seg_desc.p = 1;
|
||||
seg_desc.s = 1;
|
||||
seg_desc.dpl = cpl;
|
||||
seg_desc.d = 1;
|
||||
seg_desc.g = 1;
|
||||
}
|
||||
|
||||
/* Skip all following checks */
|
||||
goto load;
|
||||
}
|
||||
|
||||
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
@@ -1698,6 +1749,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
u16 selector, int seg)
|
||||
{
|
||||
u8 cpl = ctxt->ops->cpl(ctxt);
|
||||
|
||||
/*
|
||||
* None of MOV, POP and LSS can load a NULL selector in CPL=3, but
|
||||
* they can load it at CPL<3 (Intel's manual says only LSS can,
|
||||
* but it's wrong).
|
||||
*
|
||||
* However, the Intel manual says that putting IST=1/DPL=3 in
|
||||
* an interrupt gate will result in SS=3 (the AMD manual instead
|
||||
* says it doesn't), so allow SS=3 in __load_segment_descriptor
|
||||
* and only forbid it here.
|
||||
*/
|
||||
if (seg == VCPU_SREG_SS && selector == 3 &&
|
||||
ctxt->mode == X86EMUL_MODE_PROT64)
|
||||
return emulate_exception(ctxt, GP_VECTOR, 0, true);
|
||||
|
||||
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
||||
X86_TRANSFER_NONE, NULL);
|
||||
}
|
||||
@@ -3646,8 +3712,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
|
||||
}
|
||||
/* Disable writeback. */
|
||||
ctxt->dst.type = OP_NONE;
|
||||
return segmented_write(ctxt, ctxt->dst.addr.mem,
|
||||
&desc_ptr, 2 + ctxt->op_bytes);
|
||||
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
|
||||
&desc_ptr, 2 + ctxt->op_bytes);
|
||||
}
|
||||
|
||||
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
|
||||
@@ -3830,6 +3896,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
u32 eax = 1, ebx, ecx = 0, edx;
|
||||
|
||||
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
|
||||
if (!(edx & FFL(FXSR)))
|
||||
return emulate_ud(ctxt);
|
||||
|
||||
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
|
||||
return emulate_nm(ctxt);
|
||||
|
||||
/*
|
||||
* Don't emulate a case that should never be hit, instead of working
|
||||
* around a lack of fxsave64/fxrstor64 on old compilers.
|
||||
*/
|
||||
if (ctxt->mode >= X86EMUL_MODE_PROT64)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
|
||||
* 1) 16 bit mode
|
||||
* 2) 32 bit mode
|
||||
* - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
|
||||
* preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
|
||||
* save and restore
|
||||
* 3) 64-bit mode with REX.W prefix
|
||||
* - like (2), but XMM 8-15 are being saved and restored
|
||||
* 4) 64-bit mode without REX.W prefix
|
||||
* - like (3), but FIP and FDP are 64 bit
|
||||
*
|
||||
* Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
|
||||
* desired result. (4) is not emulated.
|
||||
*
|
||||
* Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
|
||||
* and FPU DS) should match.
|
||||
*/
|
||||
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
struct fxregs_state fx_state;
|
||||
size_t size;
|
||||
int rc;
|
||||
|
||||
rc = check_fxsr(ctxt);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
ctxt->ops->get_fpu(ctxt);
|
||||
|
||||
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
|
||||
|
||||
ctxt->ops->put_fpu(ctxt);
|
||||
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
|
||||
size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
|
||||
else
|
||||
size = offsetof(struct fxregs_state, xmm_space[0]);
|
||||
|
||||
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
||||
}
|
||||
|
||||
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
||||
struct fxregs_state *new)
|
||||
{
|
||||
int rc = X86EMUL_CONTINUE;
|
||||
struct fxregs_state old;
|
||||
|
||||
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
/*
|
||||
* 64 bit host will restore XMM 8-15, which is not correct on non-64
|
||||
* bit guests. Load the current values in order to preserve 64 bit
|
||||
* XMMs after fxrstor.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
/* XXX: accessing XMM 8-15 very awkwardly */
|
||||
memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
|
||||
* does save and restore MXCSR.
|
||||
*/
|
||||
if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
|
||||
memcpy(new->xmm_space, old.xmm_space, 8 * 16);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
struct fxregs_state fx_state;
|
||||
int rc;
|
||||
|
||||
rc = check_fxsr(ctxt);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
if (fx_state.mxcsr >> 16)
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
ctxt->ops->get_fpu(ctxt);
|
||||
|
||||
if (ctxt->mode < X86EMUL_MODE_PROT64)
|
||||
rc = fxrstor_fixup(ctxt, &fx_state);
|
||||
|
||||
if (rc == X86EMUL_CONTINUE)
|
||||
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
|
||||
|
||||
ctxt->ops->put_fpu(ctxt);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool valid_cr(int nr)
|
||||
{
|
||||
switch (nr) {
|
||||
@@ -4182,7 +4373,9 @@ static const struct gprefix pfx_0f_ae_7 = {
|
||||
};
|
||||
|
||||
static const struct group_dual group15 = { {
|
||||
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
|
||||
I(ModRM | Aligned16, em_fxsave),
|
||||
I(ModRM | Aligned16, em_fxrstor),
|
||||
N, N, N, N, N, GP(0, &pfx_0f_ae_7),
|
||||
}, {
|
||||
N, N, N, N, N, N, N, N,
|
||||
} };
|
||||
@@ -5054,21 +5247,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
bool fault = false;
|
||||
int rc;
|
||||
|
||||
ctxt->ops->get_fpu(ctxt);
|
||||
asm volatile("1: fwait \n\t"
|
||||
"2: \n\t"
|
||||
".pushsection .fixup,\"ax\" \n\t"
|
||||
"3: \n\t"
|
||||
"movb $1, %[fault] \n\t"
|
||||
"jmp 2b \n\t"
|
||||
".popsection \n\t"
|
||||
_ASM_EXTABLE(1b, 3b)
|
||||
: [fault]"+qm"(fault));
|
||||
rc = asm_safe("fwait");
|
||||
ctxt->ops->put_fpu(ctxt);
|
||||
|
||||
if (unlikely(fault))
|
||||
if (unlikely(rc != X86EMUL_CONTINUE))
|
||||
return emulate_exception(ctxt, MF_VECTOR, 0, false);
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
|
||||
@@ -2187,3 +2187,9 @@ void kvm_lapic_init(void)
|
||||
jump_label_rate_limit(&apic_hw_disabled, HZ);
|
||||
jump_label_rate_limit(&apic_sw_disabled, HZ);
|
||||
}
|
||||
|
||||
void kvm_lapic_exit(void)
|
||||
{
|
||||
static_key_deferred_flush(&apic_hw_disabled);
|
||||
static_key_deferred_flush(&apic_sw_disabled);
|
||||
}
|
||||
|
||||
@@ -95,6 +95,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
|
||||
void kvm_lapic_init(void);
|
||||
void kvm_lapic_exit(void);
|
||||
|
||||
static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
|
||||
{
|
||||
|
||||
@@ -4867,6 +4867,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
||||
if (vmx_xsaves_supported())
|
||||
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
|
||||
|
||||
if (enable_pml) {
|
||||
ASSERT(vmx->pml_pg);
|
||||
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
|
||||
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -7839,22 +7845,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
|
||||
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
|
||||
}
|
||||
|
||||
static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
|
||||
{
|
||||
struct page *pml_pg;
|
||||
|
||||
pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!pml_pg)
|
||||
return -ENOMEM;
|
||||
|
||||
vmx->pml_pg = pml_pg;
|
||||
|
||||
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
|
||||
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
|
||||
{
|
||||
if (vmx->pml_pg) {
|
||||
@@ -8789,14 +8779,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
||||
if (err)
|
||||
goto free_vcpu;
|
||||
|
||||
err = -ENOMEM;
|
||||
|
||||
/*
|
||||
* If PML is turned on, failure on enabling PML just results in failure
|
||||
* of creating the vcpu, therefore we can simplify PML logic (by
|
||||
* avoiding dealing with cases, such as enabling PML partially on vcpus
|
||||
* for the guest, etc.
|
||||
*/
|
||||
if (enable_pml) {
|
||||
vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!vmx->pml_pg)
|
||||
goto uninit_vcpu;
|
||||
}
|
||||
|
||||
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
|
||||
> PAGE_SIZE);
|
||||
|
||||
err = -ENOMEM;
|
||||
if (!vmx->guest_msrs) {
|
||||
goto uninit_vcpu;
|
||||
}
|
||||
if (!vmx->guest_msrs)
|
||||
goto free_pml;
|
||||
|
||||
vmx->loaded_vmcs = &vmx->vmcs01;
|
||||
vmx->loaded_vmcs->vmcs = alloc_vmcs();
|
||||
@@ -8840,18 +8842,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
||||
vmx->nested.current_vmptr = -1ull;
|
||||
vmx->nested.current_vmcs12 = NULL;
|
||||
|
||||
/*
|
||||
* If PML is turned on, failure on enabling PML just results in failure
|
||||
* of creating the vcpu, therefore we can simplify PML logic (by
|
||||
* avoiding dealing with cases, such as enabling PML partially on vcpus
|
||||
* for the guest, etc.
|
||||
*/
|
||||
if (enable_pml) {
|
||||
err = vmx_create_pml_buffer(vmx);
|
||||
if (err)
|
||||
goto free_vmcs;
|
||||
}
|
||||
|
||||
return &vmx->vcpu;
|
||||
|
||||
free_vmcs:
|
||||
@@ -8859,6 +8849,8 @@ free_vmcs:
|
||||
free_loaded_vmcs(vmx->loaded_vmcs);
|
||||
free_msrs:
|
||||
kfree(vmx->guest_msrs);
|
||||
free_pml:
|
||||
vmx_destroy_pml_buffer(vmx);
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(&vmx->vcpu);
|
||||
free_vcpu:
|
||||
|
||||
@@ -2949,6 +2949,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
||||
memset(&events->reserved, 0, sizeof(events->reserved));
|
||||
}
|
||||
|
||||
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
|
||||
|
||||
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events)
|
||||
{
|
||||
@@ -2981,10 +2983,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
||||
|
||||
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
||||
u32 hflags = vcpu->arch.hflags;
|
||||
if (events->smi.smm)
|
||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
||||
hflags |= HF_SMM_MASK;
|
||||
else
|
||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
||||
hflags &= ~HF_SMM_MASK;
|
||||
kvm_set_hflags(vcpu, hflags);
|
||||
|
||||
vcpu->arch.smi_pending = events->smi.pending;
|
||||
if (events->smi.smm_inside_nmi)
|
||||
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
||||
@@ -3052,6 +3057,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
||||
memcpy(dest, xsave, XSAVE_HDR_OFFSET);
|
||||
|
||||
/* Set XSTATE_BV */
|
||||
xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
|
||||
*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
|
||||
|
||||
/*
|
||||
@@ -5837,6 +5843,7 @@ out:
|
||||
|
||||
void kvm_arch_exit(void)
|
||||
{
|
||||
kvm_lapic_exit();
|
||||
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
|
||||
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
|
||||
},
|
||||
},
|
||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
|
||||
{
|
||||
.callback = set_nouse_crs,
|
||||
.ident = "Supermicro X8DTH",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
|
||||
},
|
||||
},
|
||||
|
||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
|
||||
{
|
||||
|
||||
@@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static bool goldfish_enable __initdata;
|
||||
|
||||
static int __init goldfish_setup(char *str)
|
||||
{
|
||||
goldfish_enable = true;
|
||||
return 0;
|
||||
}
|
||||
__setup("goldfish", goldfish_setup);
|
||||
|
||||
static int __init goldfish_init(void)
|
||||
{
|
||||
if (!goldfish_enable)
|
||||
return -ENODEV;
|
||||
|
||||
platform_device_register_simple("goldfish_pdev_bus", -1,
|
||||
goldfish_pdev_bus_resources, 2);
|
||||
goldfish_pdev_bus_resources, 2);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(goldfish_init);
|
||||
|
||||
@@ -842,7 +842,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
return WORK_CPU_UNBOUND;
|
||||
|
||||
if (--hctx->next_cpu_batch <= 0) {
|
||||
int cpu = hctx->next_cpu, next_cpu;
|
||||
int next_cpu;
|
||||
|
||||
next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
|
||||
if (next_cpu >= nr_cpu_ids)
|
||||
@@ -850,8 +850,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
|
||||
hctx->next_cpu = next_cpu;
|
||||
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
return hctx->next_cpu;
|
||||
@@ -1261,12 +1259,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
||||
if (blk_attempt_plug_merge(q, bio, &request_count,
|
||||
&same_queue_rq))
|
||||
return BLK_QC_T_NONE;
|
||||
} else
|
||||
request_count = blk_plug_queued_count(q);
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
@@ -1357,9 +1352,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return BLK_QC_T_NONE;
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
||||
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return BLK_QC_T_NONE;
|
||||
} else
|
||||
request_count = blk_plug_queued_count(q);
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
|
||||
@@ -1572,7 +1572,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
|
||||
{
|
||||
struct cfq_group_data *cgd;
|
||||
|
||||
cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
|
||||
cgd = kzalloc(sizeof(*cgd), gfp);
|
||||
if (!cgd)
|
||||
return NULL;
|
||||
return &cgd->cpd;
|
||||
|
||||
@@ -357,6 +357,7 @@ int crypto_register_alg(struct crypto_alg *alg)
|
||||
struct crypto_larval *larval;
|
||||
int err;
|
||||
|
||||
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
|
||||
err = crypto_check_alg(alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -847,6 +847,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||
if (ghes_read_estatus(ghes, 1)) {
|
||||
ghes_clear_estatus(ghes);
|
||||
continue;
|
||||
} else {
|
||||
ret = NMI_HANDLED;
|
||||
}
|
||||
|
||||
sev = ghes_severity(ghes->estatus->error_severity);
|
||||
@@ -858,12 +860,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||
|
||||
__process_error(ghes);
|
||||
ghes_clear_estatus(ghes);
|
||||
|
||||
ret = NMI_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
irq_work_queue(&ghes_proc_irq_work);
|
||||
if (ret == NMI_HANDLED)
|
||||
irq_work_queue(&ghes_proc_irq_work);
|
||||
#endif
|
||||
atomic_dec(&ghes_in_nmi);
|
||||
return ret;
|
||||
|
||||
@@ -2048,7 +2048,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
|
||||
binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
|
||||
proc->pid, thread->pid,
|
||||
extra_buffers_size);
|
||||
(u64)extra_buffers_size);
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_bad_offset;
|
||||
}
|
||||
|
||||
@@ -4139,10 +4139,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/*
|
||||
* Device times out with higher max sects.
|
||||
* These devices time out with higher max sects.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||
*/
|
||||
{ "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
|
||||
@@ -4121,6 +4121,9 @@ static int mv_platform_probe(struct platform_device *pdev)
|
||||
host->iomap = NULL;
|
||||
hpriv->base = devm_ioremap(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
if (!hpriv->base)
|
||||
return -ENOMEM;
|
||||
|
||||
hpriv->base -= SATAHC0_REG_BASE;
|
||||
|
||||
hpriv->clk = clk_get(&pdev->dev, NULL);
|
||||
|
||||
@@ -388,30 +388,29 @@ static ssize_t show_valid_zones(struct device *dev,
|
||||
{
|
||||
struct memory_block *mem = to_memory_block(dev);
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned long valid_start, valid_end;
|
||||
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
|
||||
struct page *first_page;
|
||||
struct zone *zone;
|
||||
|
||||
start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
||||
end_pfn = start_pfn + nr_pages;
|
||||
first_page = pfn_to_page(start_pfn);
|
||||
|
||||
/* The block contains more than one zone can not be offlined. */
|
||||
if (!test_pages_in_a_zone(start_pfn, end_pfn))
|
||||
if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
|
||||
return sprintf(buf, "none\n");
|
||||
|
||||
zone = page_zone(first_page);
|
||||
zone = page_zone(pfn_to_page(valid_start));
|
||||
|
||||
if (zone_idx(zone) == ZONE_MOVABLE - 1) {
|
||||
/*The mem block is the last memoryblock of this zone.*/
|
||||
if (end_pfn == zone_end_pfn(zone))
|
||||
if (valid_end == zone_end_pfn(zone))
|
||||
return sprintf(buf, "%s %s\n",
|
||||
zone->name, (zone + 1)->name);
|
||||
}
|
||||
|
||||
if (zone_idx(zone) == ZONE_MOVABLE) {
|
||||
/*The mem block is the first memoryblock of ZONE_MOVABLE.*/
|
||||
if (start_pfn == zone->zone_start_pfn)
|
||||
if (valid_start == zone->zone_start_pfn)
|
||||
return sprintf(buf, "%s %s\n",
|
||||
zone->name, (zone - 1)->name);
|
||||
}
|
||||
|
||||
@@ -20,14 +20,22 @@ static inline void pm_runtime_early_init(struct device *dev)
|
||||
extern void pm_runtime_init(struct device *dev);
|
||||
extern void pm_runtime_remove(struct device *dev);
|
||||
|
||||
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
|
||||
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
|
||||
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
|
||||
WAKE_IRQ_DEDICATED_MANAGED)
|
||||
|
||||
struct wake_irq {
|
||||
struct device *dev;
|
||||
unsigned int status;
|
||||
int irq;
|
||||
bool dedicated_irq:1;
|
||||
};
|
||||
|
||||
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
|
||||
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
|
||||
extern void dev_pm_enable_wake_irq_check(struct device *dev,
|
||||
bool can_change_status);
|
||||
extern void dev_pm_disable_wake_irq_check(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
@@ -102,6 +110,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dev_pm_enable_wake_irq_check(struct device *dev,
|
||||
bool can_change_status)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dev_pm_disable_wake_irq_check(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
@@ -515,7 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
|
||||
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
|
||||
|
||||
dev_pm_enable_wake_irq(dev);
|
||||
dev_pm_enable_wake_irq_check(dev, true);
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval)
|
||||
goto fail;
|
||||
@@ -554,7 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
return retval;
|
||||
|
||||
fail:
|
||||
dev_pm_disable_wake_irq(dev);
|
||||
dev_pm_disable_wake_irq_check(dev);
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
dev->power.deferred_resume = false;
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
@@ -737,12 +737,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||
|
||||
callback = RPM_GET_CALLBACK(dev, runtime_resume);
|
||||
|
||||
dev_pm_disable_wake_irq(dev);
|
||||
dev_pm_disable_wake_irq_check(dev);
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval) {
|
||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||
pm_runtime_cancel_pending(dev);
|
||||
dev_pm_enable_wake_irq(dev);
|
||||
dev_pm_enable_wake_irq_check(dev, false);
|
||||
} else {
|
||||
no_callback:
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
|
||||
@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
|
||||
dev->power.wakeirq = NULL;
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
||||
if (wirq->dedicated_irq)
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
|
||||
free_irq(wirq->irq, wirq);
|
||||
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
|
||||
}
|
||||
kfree(wirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
|
||||
@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||
|
||||
wirq->dev = dev;
|
||||
wirq->irq = irq;
|
||||
wirq->dedicated_irq = true;
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
|
||||
/*
|
||||
@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||
if (err)
|
||||
goto err_free_irq;
|
||||
|
||||
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
|
||||
|
||||
return err;
|
||||
|
||||
err_free_irq:
|
||||
@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
|
||||
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
|
||||
* @dev: Device
|
||||
*
|
||||
* Called from the bus code or the device driver for
|
||||
* runtime_suspend() to enable the wake-up interrupt while
|
||||
* the device is running.
|
||||
* Optionally called from the bus code or the device driver for
|
||||
* runtime_resume() to override the PM runtime core managed wake-up
|
||||
* interrupt handling to enable the wake-up interrupt.
|
||||
*
|
||||
* Note that for runtime_suspend()) the wake-up interrupts
|
||||
* should be unconditionally enabled unlike for suspend()
|
||||
@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (wirq && wirq->dedicated_irq)
|
||||
if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
|
||||
enable_irq(wirq->irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
|
||||
@@ -231,19 +234,72 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
|
||||
* dev_pm_disable_wake_irq - Disable device wake-up interrupt
|
||||
* @dev: Device
|
||||
*
|
||||
* Called from the bus code or the device driver for
|
||||
* runtime_resume() to disable the wake-up interrupt while
|
||||
* the device is running.
|
||||
* Optionally called from the bus code or the device driver for
|
||||
* runtime_suspend() to override the PM runtime core managed wake-up
|
||||
* interrupt handling to disable the wake-up interrupt.
|
||||
*/
|
||||
void dev_pm_disable_wake_irq(struct device *dev)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (wirq && wirq->dedicated_irq)
|
||||
if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
|
||||
disable_irq_nosync(wirq->irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
|
||||
|
||||
/**
|
||||
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
|
||||
* @dev: Device
|
||||
* @can_change_status: Can change wake-up interrupt status
|
||||
*
|
||||
* Enables wakeirq conditionally. We need to enable wake-up interrupt
|
||||
* lazily on the first rpm_suspend(). This is needed as the consumer device
|
||||
* starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
|
||||
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
|
||||
* starts disabled with IRQ_NOAUTOEN set.
|
||||
*
|
||||
* Should be only called from rpm_suspend() and rpm_resume() path.
|
||||
* Caller must hold &dev->power.lock to change wirq->status
|
||||
*/
|
||||
void dev_pm_enable_wake_irq_check(struct device *dev,
|
||||
bool can_change_status)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
|
||||
return;
|
||||
|
||||
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
|
||||
goto enable;
|
||||
} else if (can_change_status) {
|
||||
wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
|
||||
goto enable;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
enable:
|
||||
enable_irq(wirq->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
|
||||
* @dev: Device
|
||||
*
|
||||
* Disables wake-up interrupt conditionally based on status.
|
||||
* Should be only called from rpm_suspend() and rpm_resume() path.
|
||||
*/
|
||||
void dev_pm_disable_wake_irq_check(struct device *dev)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
|
||||
return;
|
||||
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
|
||||
disable_irq_nosync(wirq->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_arm_wake_irq - Arm device wake-up
|
||||
* @wirq: Device wake-up interrupt
|
||||
|
||||
@@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
|
||||
{
|
||||
struct device_node *bridge;
|
||||
struct device *parent;
|
||||
int ret;
|
||||
|
||||
bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
|
||||
if (!bridge)
|
||||
@@ -181,7 +182,11 @@ static int vexpress_config_populate(struct device_node *node)
|
||||
if (WARN_ON(!parent))
|
||||
return -ENODEV;
|
||||
|
||||
return of_platform_populate(node, NULL, NULL, parent);
|
||||
ret = of_platform_populate(node, NULL, NULL, parent);
|
||||
|
||||
put_device(parent);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init vexpress_config_init(void)
|
||||
|
||||
@@ -247,7 +247,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw)
|
||||
if (ret < 0) {
|
||||
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
|
||||
ret);
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
return (ret & WM831X_CLKOUT_ENA) != 0;
|
||||
|
||||
@@ -157,10 +157,8 @@ static void __init _mx31_clocks_init(unsigned long fref)
|
||||
}
|
||||
}
|
||||
|
||||
int __init mx31_clocks_init(void)
|
||||
int __init mx31_clocks_init(unsigned long fref)
|
||||
{
|
||||
u32 fref = 26000000; /* default */
|
||||
|
||||
_mx31_clocks_init(fref);
|
||||
|
||||
clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
|
||||
|
||||
@@ -482,6 +482,7 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
|
||||
if (mct_int_type == MCT_INT_SPI) {
|
||||
if (evt->irq != -1)
|
||||
disable_irq_nosync(evt->irq);
|
||||
exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
|
||||
} else {
|
||||
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
|
||||
}
|
||||
|
||||
@@ -373,8 +373,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
|
||||
if (unlikely(rebooting) && new_index != get_nominal_index())
|
||||
return 0;
|
||||
|
||||
if (!throttled)
|
||||
if (!throttled) {
|
||||
/* we don't want to be preempted while
|
||||
* checking if the CPU frequency has been throttled
|
||||
*/
|
||||
preempt_disable();
|
||||
powernv_cpufreq_throttle_check(NULL);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
|
||||
|
||||
|
||||
@@ -449,6 +449,9 @@ struct dma_pl330_chan {
|
||||
|
||||
/* for cyclic capability */
|
||||
bool cyclic;
|
||||
|
||||
/* for runtime pm tracking */
|
||||
bool active;
|
||||
};
|
||||
|
||||
struct pl330_dmac {
|
||||
@@ -2040,6 +2043,7 @@ static void pl330_tasklet(unsigned long data)
|
||||
_stop(pch->thread);
|
||||
spin_unlock(&pch->thread->dmac->lock);
|
||||
power_down = true;
|
||||
pch->active = false;
|
||||
} else {
|
||||
/* Make sure the PL330 Channel thread is active */
|
||||
spin_lock(&pch->thread->dmac->lock);
|
||||
@@ -2061,6 +2065,7 @@ static void pl330_tasklet(unsigned long data)
|
||||
desc->status = PREP;
|
||||
list_move_tail(&desc->node, &pch->work_list);
|
||||
if (power_down) {
|
||||
pch->active = true;
|
||||
spin_lock(&pch->thread->dmac->lock);
|
||||
_start(pch->thread);
|
||||
spin_unlock(&pch->thread->dmac->lock);
|
||||
@@ -2175,6 +2180,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
|
||||
unsigned long flags;
|
||||
struct pl330_dmac *pl330 = pch->dmac;
|
||||
LIST_HEAD(list);
|
||||
bool power_down = false;
|
||||
|
||||
pm_runtime_get_sync(pl330->ddma.dev);
|
||||
spin_lock_irqsave(&pch->lock, flags);
|
||||
@@ -2185,6 +2191,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
|
||||
pch->thread->req[0].desc = NULL;
|
||||
pch->thread->req[1].desc = NULL;
|
||||
pch->thread->req_running = -1;
|
||||
power_down = pch->active;
|
||||
pch->active = false;
|
||||
|
||||
/* Mark all desc done */
|
||||
list_for_each_entry(desc, &pch->submitted_list, node) {
|
||||
@@ -2202,6 +2210,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
|
||||
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
pm_runtime_mark_last_busy(pl330->ddma.dev);
|
||||
if (power_down)
|
||||
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
||||
pm_runtime_put_autosuspend(pl330->ddma.dev);
|
||||
|
||||
return 0;
|
||||
@@ -2348,6 +2358,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
|
||||
* updated on work_list emptiness status.
|
||||
*/
|
||||
WARN_ON(list_empty(&pch->submitted_list));
|
||||
pch->active = true;
|
||||
pm_runtime_get_sync(pch->dmac->ddma.dev);
|
||||
}
|
||||
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
|
||||
|
||||
@@ -1812,7 +1812,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
||||
mgr->payloads[i].num_slots = req_payload.num_slots;
|
||||
} else if (mgr->payloads[i].num_slots) {
|
||||
mgr->payloads[i].num_slots = 0;
|
||||
drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
|
||||
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
|
||||
req_payload.payload_state = mgr->payloads[i].payload_state;
|
||||
mgr->payloads[i].start_slot = 0;
|
||||
}
|
||||
|
||||
@@ -1401,6 +1401,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
||||
return NULL;
|
||||
|
||||
mode->type |= DRM_MODE_TYPE_USERDEF;
|
||||
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
|
||||
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
|
||||
mode->hdisplay = 1366;
|
||||
mode->hsync_start--;
|
||||
mode->hsync_end--;
|
||||
drm_mode_set_name(mode);
|
||||
}
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
return mode;
|
||||
}
|
||||
|
||||
@@ -445,6 +445,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
|
||||
struct edid *edid;
|
||||
struct i2c_adapter *i2c;
|
||||
bool ret = false;
|
||||
|
||||
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
|
||||
|
||||
@@ -461,17 +462,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
*/
|
||||
if (!is_digital) {
|
||||
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
|
||||
return true;
|
||||
ret = true;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
|
||||
}
|
||||
|
||||
kfree(edid);
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
||||
@@ -3948,10 +3948,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
|
||||
drm_crtc_vblank_put(&intel_crtc->base);
|
||||
|
||||
wake_up_all(&dev_priv->pending_flip_queue);
|
||||
queue_work(dev_priv->wq, &work->work);
|
||||
|
||||
trace_i915_flip_complete(intel_crtc->plane,
|
||||
work->pending_flip_obj);
|
||||
|
||||
queue_work(dev_priv->wq, &work->work);
|
||||
}
|
||||
|
||||
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
|
||||
|
||||
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
|
||||
uint32_t mpllP;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
|
||||
mpllP = (mpllP >> 8) & 0xf;
|
||||
if (!mpllP)
|
||||
mpllP = 4;
|
||||
|
||||
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
|
||||
uint32_t clock;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
|
||||
return clock;
|
||||
return clock / 1000;
|
||||
}
|
||||
|
||||
ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
|
||||
|
||||
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
|
||||
);
|
||||
}
|
||||
for (i = 0; i < size; i++)
|
||||
nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
|
||||
nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
|
||||
for (; i < 0x60; i++)
|
||||
nvkm_wr32(device, 0x61c440 + soff, (i << 8));
|
||||
nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
|
||||
|
||||
@@ -146,6 +146,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
||||
int xorigin = 0, yorigin = 0;
|
||||
int w = radeon_crtc->cursor_width;
|
||||
|
||||
radeon_crtc->cursor_x = x;
|
||||
radeon_crtc->cursor_y = y;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
/* avivo cursor are offset into the total surface */
|
||||
x += crtc->x;
|
||||
@@ -202,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
||||
}
|
||||
|
||||
if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
|
||||
x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
|
||||
y >= (crtc->y + crtc->mode.crtc_vdisplay))
|
||||
x >= (crtc->x + crtc->mode.hdisplay) ||
|
||||
y >= (crtc->y + crtc->mode.vdisplay))
|
||||
goto out_of_bounds;
|
||||
|
||||
x += xorigin;
|
||||
@@ -240,9 +243,6 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
||||
yorigin * 256);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_x = x;
|
||||
radeon_crtc->cursor_y = y;
|
||||
|
||||
if (radeon_crtc->cursor_out_of_bounds) {
|
||||
radeon_crtc->cursor_out_of_bounds = false;
|
||||
if (radeon_crtc->cursor_bo)
|
||||
|
||||
@@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||
(rdev->pdev->device == 0x6817) ||
|
||||
(rdev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (rdev->family == CHIP_VERDE) {
|
||||
if ((rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6820) ||
|
||||
(rdev->pdev->device == 0x6821) ||
|
||||
(rdev->pdev->device == 0x6822) ||
|
||||
(rdev->pdev->device == 0x6823) ||
|
||||
(rdev->pdev->device == 0x682A) ||
|
||||
(rdev->pdev->device == 0x682B)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
if ((rdev->pdev->revision == 0xC7) ||
|
||||
(rdev->pdev->revision == 0x80) ||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user