mirror of
https://github.com/hardkernel/linux.git
synced 2026-04-11 07:28:10 +09:00
Merge tag 'lsk-v3.10-15.09-android'
LSK Android 15.09 v3.10
This commit is contained in:
@@ -4,12 +4,15 @@ dm-crypt
|
||||
Device-Mapper's "crypt" target provides transparent encryption of block devices
|
||||
using the kernel crypto API.
|
||||
|
||||
For a more detailed description of supported parameters see:
|
||||
http://code.google.com/p/cryptsetup/wiki/DMCrypt
|
||||
|
||||
Parameters: <cipher> <key> <iv_offset> <device path> \
|
||||
<offset> [<#opt_params> <opt_params>]
|
||||
|
||||
<cipher>
|
||||
Encryption cipher and an optional IV generation mode.
|
||||
(In format cipher[:keycount]-chainmode-ivopts:ivmode).
|
||||
(In format cipher[:keycount]-chainmode-ivmode[:ivopts]).
|
||||
Examples:
|
||||
des
|
||||
aes-cbc-essiv:sha256
|
||||
@@ -19,7 +22,11 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
|
||||
|
||||
<key>
|
||||
Key used for encryption. It is encoded as a hexadecimal number.
|
||||
You can only use key sizes that are valid for the selected cipher.
|
||||
You can only use key sizes that are valid for the selected cipher
|
||||
in combination with the selected iv mode.
|
||||
Note that for some iv modes the key string can contain additional
|
||||
keys (for example IV seed) so the key contains more parts concatenated
|
||||
into a single string.
|
||||
|
||||
<keycount>
|
||||
Multi-key compatibility mode. You can define <keycount> keys and
|
||||
@@ -44,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
|
||||
Otherwise #opt_params is the number of following arguments.
|
||||
|
||||
Example of optional parameters section:
|
||||
1 allow_discards
|
||||
2 allow_discards same_cpu_crypt
|
||||
|
||||
allow_discards
|
||||
Block discard requests (a.k.a. TRIM) are passed through the crypt device.
|
||||
@@ -56,6 +63,11 @@ allow_discards
|
||||
used space etc.) if the discarded blocks can be located easily on the
|
||||
device later.
|
||||
|
||||
same_cpu_crypt
|
||||
Perform encryption using the same cpu that IO was submitted on.
|
||||
The default is to use an unbound workqueue so that encryption work
|
||||
is automatically balanced between available CPUs.
|
||||
|
||||
Example scripts
|
||||
===============
|
||||
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
|
||||
|
||||
@@ -91,5 +91,5 @@ mpp61 61 gpo, dev(wen1), uart1(txd), audio(rclk)
|
||||
mpp62 62 gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
|
||||
audio(mclk), uart0(cts)
|
||||
mpp63 63 gpo, spi0(sck), tclk
|
||||
mpp64 64 gpio, spi0(miso), spi0-1(cs1)
|
||||
mpp65 65 gpio, spi0(mosi), spi0-1(cs2)
|
||||
mpp64 64 gpio, spi0(miso), spi0(cs1)
|
||||
mpp65 65 gpio, spi0(mosi), spi0(cs2)
|
||||
|
||||
@@ -41,15 +41,15 @@ mpp20 20 gpio, ge0(rxd4), ge1(rxd2), lcd(d20), ptp(clk)
|
||||
mpp21 21 gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
|
||||
mpp22 22 gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
|
||||
mpp23 23 gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
|
||||
mpp24 24 gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
|
||||
mpp25 25 gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
|
||||
mpp26 26 gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
|
||||
mpp24 24 gpio, lcd(hsync), sata1(prsnt), tdm(rst)
|
||||
mpp25 25 gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
|
||||
mpp26 26 gpio, lcd(clk), tdm(fsync)
|
||||
mpp27 27 gpio, lcd(e), tdm(dtx), ptp(trig)
|
||||
mpp28 28 gpio, lcd(pwm), tdm(drx), ptp(evreq)
|
||||
mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
|
||||
mpp29 29 gpio, lcd(ref-clk), tdm(int0), ptp(clk)
|
||||
mpp30 30 gpio, tdm(int1), sd0(clk)
|
||||
mpp31 31 gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
|
||||
mpp32 32 gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
|
||||
mpp31 31 gpio, tdm(int2), sd0(cmd)
|
||||
mpp32 32 gpio, tdm(int3), sd0(d0)
|
||||
mpp33 33 gpio, tdm(int4), sd0(d1), mem(bat)
|
||||
mpp34 34 gpio, tdm(int5), sd0(d2), sata0(prsnt)
|
||||
mpp35 35 gpio, tdm(int6), sd0(d3), sata1(prsnt)
|
||||
@@ -57,21 +57,18 @@ mpp36 36 gpio, spi(mosi)
|
||||
mpp37 37 gpio, spi(miso)
|
||||
mpp38 38 gpio, spi(sck)
|
||||
mpp39 39 gpio, spi(cs0)
|
||||
mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
|
||||
pcie(clkreq0)
|
||||
mpp40 40 gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
|
||||
mpp41 41 gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
|
||||
pcie(clkreq1)
|
||||
mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
|
||||
vdd(cpu0-pd)
|
||||
mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
|
||||
vdd(cpu2-3-pd){1}
|
||||
mpp42 42 gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
|
||||
mpp43 43 gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
|
||||
mpp44 44 gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
|
||||
mem(bat)
|
||||
mpp45 45 gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
|
||||
mpp46 46 gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
|
||||
mpp47 47 gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
|
||||
ref(clkout)
|
||||
mpp48 48 gpio, tclk, dev(burst/last)
|
||||
mpp48 48 gpio, dev(clkout), dev(burst/last)
|
||||
|
||||
* Marvell Armada XP (mv78260 and mv78460 only)
|
||||
|
||||
@@ -83,9 +80,9 @@ mpp51 51 gpio, dev(ad16)
|
||||
mpp52 52 gpio, dev(ad17)
|
||||
mpp53 53 gpio, dev(ad18)
|
||||
mpp54 54 gpio, dev(ad19)
|
||||
mpp55 55 gpio, dev(ad20), vdd(cpu0-pd)
|
||||
mpp56 56 gpio, dev(ad21), vdd(cpu1-pd)
|
||||
mpp57 57 gpio, dev(ad22), vdd(cpu2-3-pd){1}
|
||||
mpp55 55 gpio, dev(ad20)
|
||||
mpp56 56 gpio, dev(ad21)
|
||||
mpp57 57 gpio, dev(ad22)
|
||||
mpp58 58 gpio, dev(ad23)
|
||||
mpp59 59 gpio, dev(ad24)
|
||||
mpp60 60 gpio, dev(ad25)
|
||||
@@ -95,6 +92,3 @@ mpp63 63 gpio, dev(ad28)
|
||||
mpp64 64 gpio, dev(ad29)
|
||||
mpp65 65 gpio, dev(ad30)
|
||||
mpp66 66 gpio, dev(ad31)
|
||||
|
||||
Notes:
|
||||
* {1} vdd(cpu2-3-pd) only available on mv78460.
|
||||
|
||||
@@ -4,9 +4,9 @@ Required properties:
|
||||
- compatible : "arm,pl022", "arm,primecell"
|
||||
- reg : Offset and length of the register set for the device
|
||||
- interrupts : Should contain SPI controller interrupt
|
||||
- num-cs : total number of chipselects
|
||||
|
||||
Optional properties:
|
||||
- num-cs : total number of chipselects
|
||||
- cs-gpios : should specify GPIOs used for chipselects.
|
||||
The gpios will be referred to as reg = <index> in the SPI child nodes.
|
||||
If unspecified, a single SPI device without a chip select can be used.
|
||||
|
||||
8
Makefile
8
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 79
|
||||
SUBLEVEL = 88
|
||||
EXTRAVERSION =
|
||||
NAME = TOSSUG Baby Fish
|
||||
|
||||
@@ -254,7 +254,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
|
||||
|
||||
HOSTCC = gcc
|
||||
HOSTCXX = g++
|
||||
HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
|
||||
HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
|
||||
HOSTCXXFLAGS = -O2
|
||||
|
||||
# Decide whether to build built-in, modular, or both.
|
||||
@@ -392,7 +392,9 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-fno-delete-null-pointer-checks
|
||||
-fno-delete-null-pointer-checks \
|
||||
-std=gnu89
|
||||
|
||||
KBUILD_AFLAGS_KERNEL :=
|
||||
KBUILD_CFLAGS_KERNEL :=
|
||||
KBUILD_AFLAGS := -D__ASSEMBLY__
|
||||
|
||||
@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
|
||||
" scond %3, [%1] \n"
|
||||
" bnz 1b \n"
|
||||
"2: \n"
|
||||
: "=&r"(prev)
|
||||
: "r"(ptr), "ir"(expected),
|
||||
"r"(new) /* can't be "ir". scond can't take limm for "b" */
|
||||
: "cc");
|
||||
: "=&r"(prev) /* Early clobber, to prevent reg reuse */
|
||||
: "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
|
||||
"ir"(expected),
|
||||
"r"(new) /* can't be "ir". scond can't take LIMM for "b" */
|
||||
: "cc", "memory"); /* so that gcc knows memory is being written here */
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ struct callee_regs {
|
||||
long r13;
|
||||
};
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->ret)
|
||||
#define instruction_pointer(regs) (unsigned long)((regs)->ret)
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
|
||||
/* return 1 if user mode or 0 if kernel mode */
|
||||
|
||||
@@ -290,7 +290,7 @@
|
||||
|
||||
fec: ethernet@1002b000 {
|
||||
compatible = "fsl,imx27-fec";
|
||||
reg = <0x1002b000 0x4000>;
|
||||
reg = <0x1002b000 0x1000>;
|
||||
interrupts = <50>;
|
||||
clocks = <&clks 48>, <&clks 67>, <&clks 0>;
|
||||
clock-names = "ipg", "ahb", "ptp";
|
||||
|
||||
@@ -358,7 +358,8 @@ ENDPROC(__pabt_svc)
|
||||
.endm
|
||||
|
||||
.macro kuser_cmpxchg_check
|
||||
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
||||
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
|
||||
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
||||
#ifndef CONFIG_MMU
|
||||
#warning "NPTL on non MMU needs fixing"
|
||||
#else
|
||||
|
||||
@@ -32,7 +32,9 @@ ret_fast_syscall:
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.cantunwind )
|
||||
disable_irq @ disable interrupts
|
||||
ldr r1, [tsk, #TI_FLAGS]
|
||||
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
|
||||
tst r1, #_TIF_SYSCALL_WORK
|
||||
bne __sys_trace_return
|
||||
tst r1, #_TIF_WORK_MASK
|
||||
bne fast_work_pending
|
||||
asm_trace_hardirqs_on
|
||||
|
||||
@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
|
||||
|
||||
void set_fiq_handler(void *start, unsigned int length)
|
||||
{
|
||||
#if defined(CONFIG_CPU_USE_DOMAINS)
|
||||
void *base = (void *)0xffff0000;
|
||||
#else
|
||||
void *base = vectors_page;
|
||||
#endif
|
||||
unsigned offset = FIQ_OFFSET;
|
||||
|
||||
memcpy(base + offset, start, length);
|
||||
if (!cache_is_vipt_nonaliasing())
|
||||
flush_icache_range((unsigned long)base + offset, offset +
|
||||
length);
|
||||
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
|
||||
if (!vectors_high())
|
||||
flush_icache_range(offset, offset + length);
|
||||
}
|
||||
|
||||
int claim_fiq(struct fiq_handler *f)
|
||||
|
||||
@@ -159,13 +159,9 @@ __kvm_vcpu_return:
|
||||
@ Don't trap coprocessor accesses for host kernel
|
||||
set_hstr vmexit
|
||||
set_hdcr vmexit
|
||||
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
|
||||
|
||||
#ifdef CONFIG_VFPv3
|
||||
@ Save floating point registers we if let guest use them.
|
||||
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
bne after_vfp_restore
|
||||
|
||||
@ Switch VFP/NEON hardware state to the host's
|
||||
add r7, vcpu, #VCPU_VFP_GUEST
|
||||
store_vfp_state r7
|
||||
@@ -177,6 +173,8 @@ after_vfp_restore:
|
||||
@ Restore FPEXC_EN which we clobbered on entry
|
||||
pop {r2}
|
||||
VFPFMXR FPEXC, r2
|
||||
#else
|
||||
after_vfp_restore:
|
||||
#endif
|
||||
|
||||
@ Reset Hyp-role
|
||||
@@ -472,7 +470,7 @@ switch_to_guest_vfp:
|
||||
push {r3-r7}
|
||||
|
||||
@ NEON/VFP used. Turn on VFP access.
|
||||
set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
|
||||
@ Switch VFP/NEON hardware state to the guest's
|
||||
add r7, r0, #VCPU_VFP_HOST
|
||||
|
||||
@@ -592,8 +592,13 @@ ARM_BE8(rev r6, r6 )
|
||||
.endm
|
||||
|
||||
/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
|
||||
* (hardware reset value is 0). Keep previous value in r2. */
|
||||
.macro set_hcptr operation, mask
|
||||
* (hardware reset value is 0). Keep previous value in r2.
|
||||
* An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
|
||||
* VFP wasn't already enabled (always executed on vmtrap).
|
||||
* If a label is specified with vmexit, it is branched to if VFP wasn't
|
||||
* enabled.
|
||||
*/
|
||||
.macro set_hcptr operation, mask, label = none
|
||||
mrc p15, 4, r2, c1, c1, 2
|
||||
ldr r3, =\mask
|
||||
.if \operation == vmentry
|
||||
@@ -602,6 +607,17 @@ ARM_BE8(rev r6, r6 )
|
||||
bic r3, r2, r3 @ Don't trap defined coproc-accesses
|
||||
.endif
|
||||
mcr p15, 4, r3, c1, c1, 2
|
||||
.if \operation != vmentry
|
||||
.if \operation == vmexit
|
||||
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
beq 1f
|
||||
.endif
|
||||
isb
|
||||
.if \label != none
|
||||
b \label
|
||||
.endif
|
||||
1:
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
|
||||
|
||||
@@ -226,7 +226,7 @@ void __init dove_init_early(void)
|
||||
orion_time_set_base(TIMER_VIRT_BASE);
|
||||
mvebu_mbus_init("marvell,dove-mbus",
|
||||
BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
|
||||
DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ);
|
||||
DOVE_MC_WINS_BASE, DOVE_MC_WINS_SZ, 0);
|
||||
}
|
||||
|
||||
static int __init dove_find_tclk(void)
|
||||
|
||||
@@ -515,7 +515,7 @@ int __init mx6q_clocks_init(void)
|
||||
clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
|
||||
clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
|
||||
clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
|
||||
clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
|
||||
clk[sata] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
|
||||
clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
|
||||
clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
|
||||
clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
|
||||
|
||||
@@ -530,7 +530,7 @@ void __init kirkwood_init_early(void)
|
||||
|
||||
mvebu_mbus_init("marvell,kirkwood-mbus",
|
||||
BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
|
||||
DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
|
||||
DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ, 0);
|
||||
}
|
||||
|
||||
int kirkwood_tclk;
|
||||
|
||||
@@ -337,11 +337,11 @@ void __init mv78xx0_init_early(void)
|
||||
if (mv78xx0_core_index() == 0)
|
||||
mvebu_mbus_init("marvell,mv78xx0-mbus",
|
||||
BRIDGE_WINS_CPU0_BASE, BRIDGE_WINS_SZ,
|
||||
DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ);
|
||||
DDR_WINDOW_CPU0_BASE, DDR_WINDOW_CPU_SZ, 0);
|
||||
else
|
||||
mvebu_mbus_init("marvell,mv78xx0-mbus",
|
||||
BRIDGE_WINS_CPU1_BASE, BRIDGE_WINS_SZ,
|
||||
DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ);
|
||||
DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ, 0);
|
||||
}
|
||||
|
||||
void __init_refok mv78xx0_timer_init(void)
|
||||
|
||||
@@ -66,7 +66,8 @@ void __init armada_370_xp_init_early(void)
|
||||
ARMADA_370_XP_MBUS_WINS_BASE,
|
||||
ARMADA_370_XP_MBUS_WINS_SIZE,
|
||||
ARMADA_370_XP_SDRAM_WINS_BASE,
|
||||
ARMADA_370_XP_SDRAM_WINS_SIZE);
|
||||
ARMADA_370_XP_SDRAM_WINS_SIZE,
|
||||
coherency_available());
|
||||
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
l2x0_of_init(0, ~0UL);
|
||||
|
||||
@@ -137,6 +137,20 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
|
||||
.notifier_call = mvebu_hwcc_platform_notifier,
|
||||
};
|
||||
|
||||
/*
|
||||
* Keep track of whether we have IO hardware coherency enabled or not.
|
||||
* On Armada 370's we will not be using it for example. We need to make
|
||||
* that available [through coherency_available()] so the mbus controller
|
||||
* doesn't enable the IO coherency bit in the attribute bits of the
|
||||
* chip selects.
|
||||
*/
|
||||
static int coherency_enabled;
|
||||
|
||||
int coherency_available(void)
|
||||
{
|
||||
return coherency_enabled;
|
||||
}
|
||||
|
||||
int __init coherency_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
@@ -170,6 +184,7 @@ int __init coherency_init(void)
|
||||
coherency_base = of_iomap(np, 0);
|
||||
coherency_cpu_base = of_iomap(np, 1);
|
||||
set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
|
||||
coherency_enabled = 1;
|
||||
bus_register_notifier(&platform_bus_type,
|
||||
&mvebu_hwcc_platform_nb);
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ int coherency_get_cpu_count(void);
|
||||
#endif
|
||||
|
||||
int set_cpu_coherent(int cpu_id, int smp_group_id);
|
||||
int coherency_available(void);
|
||||
int coherency_init(void);
|
||||
|
||||
#endif /* __MACH_370_XP_COHERENCY_H */
|
||||
|
||||
@@ -213,7 +213,7 @@ void __init orion5x_init_early(void)
|
||||
mbus_soc_name = NULL;
|
||||
mvebu_mbus_init(mbus_soc_name, ORION5X_BRIDGE_WINS_BASE,
|
||||
ORION5X_BRIDGE_WINS_SZ,
|
||||
ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ);
|
||||
ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ, 0);
|
||||
}
|
||||
|
||||
void orion5x_setup_wins(void)
|
||||
|
||||
@@ -56,6 +56,8 @@
|
||||
#define PAGE_OFFSET1 (PAGE_OFFSET + 0x10000000)
|
||||
#define PAGE_OFFSET2 (PAGE_OFFSET + 0x30000000)
|
||||
|
||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||
|
||||
#define __phys_to_virt(phys) \
|
||||
((phys) >= 0x80000000 ? (phys) - 0x80000000 + PAGE_OFFSET2 : \
|
||||
(phys) >= 0x20000000 ? (phys) - 0x20000000 + PAGE_OFFSET1 : \
|
||||
|
||||
@@ -165,7 +165,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
* Other callers might not initialize the si_lsb field,
|
||||
* so check explicitely for the right codes here.
|
||||
*/
|
||||
if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
|
||||
if (from->si_signo == SIGBUS &&
|
||||
(from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
|
||||
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
|
||||
#endif
|
||||
break;
|
||||
@@ -200,8 +201,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
memset(to, 0, sizeof *to);
|
||||
|
||||
if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
|
||||
copy_from_user(to->_sifields._pad,
|
||||
from->_sifields._pad, SI_PAD_SIZE))
|
||||
|
||||
@@ -317,16 +317,12 @@ static void __init parse_dt_cpu_power(void)
|
||||
cpu_capacity(cpu) = capacity;
|
||||
}
|
||||
|
||||
/* If min and max capacities are equal we bypass the update of the
|
||||
* cpu_scale because all CPUs have the same capacity. Otherwise, we
|
||||
* compute a middle_capacity factor that will ensure that the capacity
|
||||
/* compute a middle_capacity factor that will ensure that the capacity
|
||||
* of an 'average' CPU of the system will be as close as possible to
|
||||
* SCHED_POWER_SCALE, which is the default value, but with the
|
||||
* constraint explained near table_efficiency[].
|
||||
*/
|
||||
if (min_capacity == max_capacity)
|
||||
return;
|
||||
else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
|
||||
if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
|
||||
middle_capacity = (min_capacity + max_capacity)
|
||||
>> (SCHED_POWER_SHIFT+1);
|
||||
else
|
||||
|
||||
@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
|
||||
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
|
||||
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
||||
|
||||
# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
|
||||
# down to collect2, resulting in silent corruption of the vDSO image.
|
||||
ccflags-y += -Wl,-shared
|
||||
|
||||
obj-y += vdso.o
|
||||
extra-y += vdso.lds vdso-offsets.h
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
@@ -92,6 +92,14 @@ static void reset_context(void *info)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
|
||||
/*
|
||||
* current->active_mm could be init_mm for the idle thread immediately
|
||||
* after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
|
||||
* the reserved value, so no need to reset any context.
|
||||
*/
|
||||
if (mm == &init_mm)
|
||||
return;
|
||||
|
||||
smp_rmb();
|
||||
asid = cpu_last_asid + cpu;
|
||||
|
||||
|
||||
@@ -308,7 +308,7 @@ static void __init free_unused_memmap(void)
|
||||
* memmap entries are valid from the bank end aligned to
|
||||
* MAX_ORDER_NR_PAGES.
|
||||
*/
|
||||
prev_end = ALIGN(start + __phys_to_pfn(reg->size),
|
||||
prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
|
||||
MAX_ORDER_NR_PAGES);
|
||||
}
|
||||
|
||||
|
||||
@@ -47,22 +47,14 @@ static int mmap_is_legacy(void)
|
||||
return sysctl_legacy_va_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since get_random_int() returns the same value within a 1 jiffy window, we
|
||||
* will almost always get the same randomisation for the stack and mmap
|
||||
* region. This will mean the relative distance between stack and mmap will be
|
||||
* the same.
|
||||
*
|
||||
* To avoid this we can shift the randomness by 1 bit.
|
||||
*/
|
||||
static unsigned long mmap_rnd(void)
|
||||
{
|
||||
unsigned long rnd = 0;
|
||||
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
|
||||
rnd = (long)get_random_int() & STACK_RND_MASK;
|
||||
|
||||
return rnd << (PAGE_SHIFT + 1);
|
||||
return rnd << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long mmap_base(void)
|
||||
|
||||
@@ -90,7 +90,11 @@
|
||||
#endif
|
||||
|
||||
#ifndef FIXADDR_TOP
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
|
||||
#else
|
||||
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_MACH_GENERIC_SPACES_H */
|
||||
|
||||
@@ -150,8 +150,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
||||
* Make sure the buddy is global too (if it's !none,
|
||||
* it better already be global)
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* For SMP, multiple CPUs can race, so we need to do
|
||||
* this atomically.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define LL_INSN "lld"
|
||||
#define SC_INSN "scd"
|
||||
#else /* CONFIG_32BIT */
|
||||
#define LL_INSN "ll"
|
||||
#define SC_INSN "sc"
|
||||
#endif
|
||||
unsigned long page_global = _PAGE_GLOBAL;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n"
|
||||
" .set noreorder\n"
|
||||
"1: " LL_INSN " %[tmp], %[buddy]\n"
|
||||
" bnez %[tmp], 2f\n"
|
||||
" or %[tmp], %[tmp], %[global]\n"
|
||||
" " SC_INSN " %[tmp], %[buddy]\n"
|
||||
" beqz %[tmp], 1b\n"
|
||||
" nop\n"
|
||||
"2:\n"
|
||||
" .set pop"
|
||||
: [buddy] "+m" (buddy->pte),
|
||||
[tmp] "=&r" (tmp)
|
||||
: [global] "r" (page_global));
|
||||
#else /* !CONFIG_SMP */
|
||||
if (pte_none(*buddy))
|
||||
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ void __init init_IRQ(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_STACKOVERFLOW
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
static inline void check_stack_overflow(void)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
||||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
unsigned int real_len;
|
||||
cpumask_t mask;
|
||||
cpumask_t allowed, mask;
|
||||
int retval;
|
||||
struct task_struct *p;
|
||||
|
||||
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
||||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
|
||||
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
||||
cpumask_and(&mask, &allowed, cpu_active_mask);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
@@ -368,8 +368,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
memset(to, 0, sizeof *to);
|
||||
|
||||
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
||||
copy_from_user(to->_sifields._pad,
|
||||
from->_sifields._pad, SI_PAD_SIZE32))
|
||||
|
||||
@@ -1626,7 +1626,7 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
if (vcpu->mmio_needed == 2)
|
||||
*gpr = *(int16_t *) run->mmio.data;
|
||||
else
|
||||
*gpr = *(int16_t *) run->mmio.data;
|
||||
*gpr = *(uint16_t *)run->mmio.data;
|
||||
|
||||
break;
|
||||
case 1:
|
||||
|
||||
@@ -949,8 +949,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
|
||||
{
|
||||
memset(to, 0, sizeof *to);
|
||||
|
||||
if (copy_from_user(to, from, 3*sizeof(int)) ||
|
||||
copy_from_user(to->_sifields._pad,
|
||||
from->_sifields._pad, SI_PAD_SIZE32))
|
||||
|
||||
@@ -213,6 +213,7 @@ SECTIONS
|
||||
*(.opd)
|
||||
}
|
||||
|
||||
. = ALIGN(256);
|
||||
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
||||
__toc_start = .;
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
|
||||
@@ -112,7 +112,16 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
|
||||
|
||||
static bool regs_use_siar(struct pt_regs *regs)
|
||||
{
|
||||
return !!regs->result;
|
||||
/*
|
||||
* When we take a performance monitor exception the regs are setup
|
||||
* using perf_read_regs() which overloads some fields, in particular
|
||||
* regs->result to tell us whether to use SIAR.
|
||||
*
|
||||
* However if the regs are from another exception, eg. a syscall, then
|
||||
* they have not been setup using perf_read_regs() and so regs->result
|
||||
* is something random.
|
||||
*/
|
||||
return ((TRAP(regs) == 0xf00) && regs->result);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -16,11 +16,12 @@
|
||||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
struct ghash_ctx {
|
||||
u8 icv[16];
|
||||
u8 key[16];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
u8 icv[GHASH_BLOCK_SIZE];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
u8 buffer[GHASH_BLOCK_SIZE];
|
||||
u32 bytes;
|
||||
};
|
||||
@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
|
||||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
memset(dctx, 0, sizeof(*dctx));
|
||||
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
||||
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
unsigned int n;
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
|
||||
src += n;
|
||||
|
||||
if (!dctx->bytes) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
|
||||
GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
|
||||
|
||||
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
|
||||
if (n) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
|
||||
if (ret != n)
|
||||
return -EIO;
|
||||
src += n;
|
||||
@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
static int ghash_flush(struct ghash_desc_ctx *dctx)
|
||||
{
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
|
||||
memset(pos, 0, dctx->bytes);
|
||||
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
|
||||
dctx->bytes = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
int ret;
|
||||
|
||||
ret = ghash_flush(ctx, dctx);
|
||||
ret = ghash_flush(dctx);
|
||||
if (!ret)
|
||||
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
||||
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -276,6 +276,8 @@ ENTRY(_sclp_print_early)
|
||||
jno .Lesa2
|
||||
ahi %r15,-80
|
||||
stmh %r6,%r15,96(%r15) # store upper register halves
|
||||
basr %r13,0
|
||||
lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
|
||||
.Lesa2:
|
||||
#endif
|
||||
lr %r10,%r2 # save string pointer
|
||||
@@ -299,6 +301,8 @@ ENTRY(_sclp_print_early)
|
||||
#endif
|
||||
lm %r6,%r15,120(%r15) # restore registers
|
||||
br %r14
|
||||
.Lzeroes:
|
||||
.fill 64,4,0
|
||||
|
||||
.LwritedataS4:
|
||||
.long 0x00760005 # SCLP command for write data
|
||||
|
||||
@@ -28,18 +28,20 @@
|
||||
* Must preserve %o5 between VISEntryHalf and VISExitHalf */
|
||||
|
||||
#define VISEntryHalf \
|
||||
VISEntry
|
||||
|
||||
#define VISExitHalf \
|
||||
VISExit
|
||||
|
||||
#define VISEntryHalfFast(fail_label) \
|
||||
rd %fprs, %o5; \
|
||||
andcc %o5, FPRS_FEF, %g0; \
|
||||
be,pt %icc, 297f; \
|
||||
sethi %hi(298f), %g7; \
|
||||
sethi %hi(VISenterhalf), %g1; \
|
||||
jmpl %g1 + %lo(VISenterhalf), %g0; \
|
||||
or %g7, %lo(298f), %g7; \
|
||||
clr %o5; \
|
||||
297: wr %o5, FPRS_FEF, %fprs; \
|
||||
298:
|
||||
nop; \
|
||||
ba,a,pt %xcc, fail_label; \
|
||||
297: wr %o5, FPRS_FEF, %fprs;
|
||||
|
||||
#define VISExitHalf \
|
||||
#define VISExitHalfFast \
|
||||
wr %o5, 0, %fprs;
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@@ -2306,7 +2306,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
|
||||
if (len & (8UL - 1))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(len, GFP_KERNEL);
|
||||
buf = kzalloc(len, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
||||
@@ -41,6 +41,10 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(EX_LD) && !defined(EX_ST)
|
||||
#define NON_USER_COPY
|
||||
#endif
|
||||
|
||||
#ifndef EX_LD
|
||||
#define EX_LD(x) x
|
||||
#endif
|
||||
@@ -197,9 +201,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
mov EX_RETVAL(%o3), %o0
|
||||
|
||||
.Llarge_src_unaligned:
|
||||
#ifdef NON_USER_COPY
|
||||
VISEntryHalfFast(.Lmedium_vis_entry_fail)
|
||||
#else
|
||||
VISEntryHalf
|
||||
#endif
|
||||
andn %o2, 0x3f, %o4
|
||||
sub %o2, %o4, %o2
|
||||
VISEntryHalf
|
||||
alignaddr %o1, %g0, %g1
|
||||
add %o1, %o4, %o1
|
||||
EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
|
||||
@@ -232,14 +240,21 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
add %o0, 0x40, %o0
|
||||
bne,pt %icc, 1b
|
||||
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
|
||||
#ifdef NON_USER_COPY
|
||||
VISExitHalfFast
|
||||
#else
|
||||
VISExitHalf
|
||||
|
||||
#endif
|
||||
brz,pn %o2, .Lexit
|
||||
cmp %o2, 19
|
||||
ble,pn %icc, .Lsmall_unaligned
|
||||
nop
|
||||
ba,a,pt %icc, .Lmedium_unaligned
|
||||
|
||||
#ifdef NON_USER_COPY
|
||||
.Lmedium_vis_entry_fail:
|
||||
or %o0, %o1, %g2
|
||||
#endif
|
||||
.Lmedium:
|
||||
LOAD(prefetch, %o1 + 0x40, #n_reads_strong)
|
||||
andcc %g2, 0x7, %g0
|
||||
|
||||
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
|
||||
|
||||
stx %g3, [%g6 + TI_GSR]
|
||||
2: add %g6, %g1, %g3
|
||||
cmp %o5, FPRS_DU
|
||||
be,pn %icc, 6f
|
||||
sll %g1, 3, %g1
|
||||
mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
|
||||
sll %g1, 3, %g1
|
||||
stb %o5, [%g3 + TI_FPSAVED]
|
||||
rd %gsr, %g2
|
||||
add %g6, %g1, %g3
|
||||
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
|
||||
.align 32
|
||||
80: jmpl %g7 + %g0, %g0
|
||||
nop
|
||||
|
||||
6: ldub [%g3 + TI_FPSAVED], %o5
|
||||
or %o5, FPRS_DU, %o5
|
||||
add %g6, TI_FPREGS+0x80, %g2
|
||||
stb %o5, [%g3 + TI_FPSAVED]
|
||||
|
||||
sll %g1, 5, %g1
|
||||
add %g6, TI_FPREGS+0xc0, %g3
|
||||
wr %g0, FPRS_FEF, %fprs
|
||||
membar #Sync
|
||||
stda %f32, [%g2 + %g1] ASI_BLK_P
|
||||
stda %f48, [%g3 + %g1] ASI_BLK_P
|
||||
membar #Sync
|
||||
ba,pt %xcc, 80f
|
||||
nop
|
||||
|
||||
.align 32
|
||||
80: jmpl %g7 + %g0, %g0
|
||||
nop
|
||||
|
||||
.align 32
|
||||
VISenterhalf:
|
||||
ldub [%g6 + TI_FPDEPTH], %g1
|
||||
brnz,a,pn %g1, 1f
|
||||
cmp %g1, 1
|
||||
stb %g0, [%g6 + TI_FPSAVED]
|
||||
stx %fsr, [%g6 + TI_XFSR]
|
||||
clr %o5
|
||||
jmpl %g7 + %g0, %g0
|
||||
wr %g0, FPRS_FEF, %fprs
|
||||
|
||||
1: bne,pn %icc, 2f
|
||||
srl %g1, 1, %g1
|
||||
ba,pt %xcc, vis1
|
||||
sub %g7, 8, %g7
|
||||
2: addcc %g6, %g1, %g3
|
||||
sll %g1, 3, %g1
|
||||
andn %o5, FPRS_DU, %g2
|
||||
stb %g2, [%g3 + TI_FPSAVED]
|
||||
|
||||
rd %gsr, %g2
|
||||
add %g6, %g1, %g3
|
||||
stx %g2, [%g3 + TI_GSR]
|
||||
add %g6, %g1, %g2
|
||||
stx %fsr, [%g2 + TI_XFSR]
|
||||
sll %g1, 5, %g1
|
||||
3: andcc %o5, FPRS_DL, %g0
|
||||
be,pn %icc, 4f
|
||||
add %g6, TI_FPREGS, %g2
|
||||
|
||||
add %g6, TI_FPREGS+0x40, %g3
|
||||
membar #Sync
|
||||
stda %f0, [%g2 + %g1] ASI_BLK_P
|
||||
stda %f16, [%g3 + %g1] ASI_BLK_P
|
||||
membar #Sync
|
||||
ba,pt %xcc, 4f
|
||||
nop
|
||||
|
||||
.align 32
|
||||
4: and %o5, FPRS_DU, %o5
|
||||
jmpl %g7 + %g0, %g0
|
||||
wr %o5, FPRS_FEF, %fprs
|
||||
|
||||
@@ -126,10 +126,6 @@ EXPORT_SYMBOL(copy_user_page);
|
||||
void VISenter(void);
|
||||
EXPORT_SYMBOL(VISenter);
|
||||
|
||||
/* CRYPTO code needs this */
|
||||
void VISenterhalf(void);
|
||||
EXPORT_SYMBOL(VISenterhalf);
|
||||
|
||||
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
|
||||
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
|
||||
unsigned long *);
|
||||
|
||||
@@ -1064,7 +1064,7 @@ static void __init load_hv_initrd(void)
|
||||
|
||||
void __init free_initrd_mem(unsigned long begin, unsigned long end)
|
||||
{
|
||||
free_bootmem(__pa(begin), end - begin);
|
||||
free_bootmem_late(__pa(begin), end - begin);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
@@ -155,7 +155,7 @@ config SBUS
|
||||
|
||||
config NEED_DMA_MAP_STATE
|
||||
def_bool y
|
||||
depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG
|
||||
depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG || SWIOTLB
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
def_bool y
|
||||
|
||||
@@ -54,7 +54,7 @@ ENTRY(efi_pe_entry)
|
||||
call reloc
|
||||
reloc:
|
||||
popl %ecx
|
||||
subl reloc, %ecx
|
||||
subl $reloc, %ecx
|
||||
movl %ecx, BP_code32_start(%eax)
|
||||
|
||||
sub $0x4, %esp
|
||||
|
||||
@@ -544,7 +544,7 @@ struct kvm_arch {
|
||||
struct kvm_pic *vpic;
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_pit *vpit;
|
||||
int vapics_in_nmi_mode;
|
||||
atomic_t vapics_in_nmi_mode;
|
||||
struct mutex apic_map_lock;
|
||||
struct kvm_apic_map *apic_map;
|
||||
|
||||
|
||||
@@ -648,48 +648,48 @@ static __initconst const struct x86_pmu amd_pmu = {
|
||||
.cpu_dead = amd_pmu_cpu_dead,
|
||||
};
|
||||
|
||||
static int setup_event_constraints(void)
|
||||
static int __init amd_core_pmu_init(void)
|
||||
{
|
||||
if (boot_cpu_data.x86 == 0x15)
|
||||
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
|
||||
return 0;
|
||||
}
|
||||
if (!cpu_has_perfctr_core)
|
||||
return 0;
|
||||
|
||||
static int setup_perfctr_core(void)
|
||||
{
|
||||
if (!cpu_has_perfctr_core) {
|
||||
WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
|
||||
KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
|
||||
switch (boot_cpu_data.x86) {
|
||||
case 0x15:
|
||||
pr_cont("Fam15h ");
|
||||
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("core perfctr but no constraints; unknown hardware!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
|
||||
KERN_ERR "hw perf events core counters need constraints handler!");
|
||||
|
||||
/*
|
||||
* If core performance counter extensions exists, we must use
|
||||
* MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
|
||||
* x86_pmu_addr_offset().
|
||||
* amd_pmu_addr_offset().
|
||||
*/
|
||||
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
|
||||
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
|
||||
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
|
||||
|
||||
printk(KERN_INFO "perf: AMD core performance counters detected\n");
|
||||
|
||||
pr_cont("core perfctr, ");
|
||||
return 0;
|
||||
}
|
||||
|
||||
__init int amd_pmu_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Performance-monitoring supported from K7 and later: */
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return -ENODEV;
|
||||
|
||||
x86_pmu = amd_pmu;
|
||||
|
||||
setup_event_constraints();
|
||||
setup_perfctr_core();
|
||||
ret = amd_core_pmu_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Events are common for all AMDs */
|
||||
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
|
||||
|
||||
@@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start,
|
||||
unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
|
||||
int i;
|
||||
|
||||
while (leftover) {
|
||||
while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
|
||||
mc_header = (struct microcode_header_intel *)ucode_ptr;
|
||||
|
||||
mc_size = get_totalsize(mc_header);
|
||||
|
||||
@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
|
||||
* LVT0 to NMI delivery. Other PIC interrupts are just sent to
|
||||
* VCPU0, and only if its LVT0 is in EXTINT mode.
|
||||
*/
|
||||
if (kvm->arch.vapics_in_nmi_mode > 0)
|
||||
if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
kvm_apic_nmi_wd_deliver(vcpu);
|
||||
}
|
||||
|
||||
@@ -1123,10 +1123,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
|
||||
if (!nmi_wd_enabled) {
|
||||
apic_debug("Receive NMI setting on APIC_LVT0 "
|
||||
"for cpu %d\n", apic->vcpu->vcpu_id);
|
||||
apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
|
||||
atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
||||
}
|
||||
} else if (nmi_wd_enabled)
|
||||
apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
|
||||
atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
|
||||
}
|
||||
|
||||
static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
||||
|
||||
@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
|
||||
|
||||
static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.apic->pending_events;
|
||||
return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
|
||||
}
|
||||
|
||||
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
|
||||
|
||||
@@ -3975,7 +3975,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
||||
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = mask.smep_andnot_wp = 1;
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
|
||||
if (detect_write_misaligned(sp, gpa, bytes) ||
|
||||
detect_write_flooding(sp)) {
|
||||
|
||||
@@ -495,8 +495,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (svm->vmcb->control.next_rip != 0)
|
||||
if (svm->vmcb->control.next_rip != 0) {
|
||||
WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
|
||||
svm->next_rip = svm->vmcb->control.next_rip;
|
||||
}
|
||||
|
||||
if (!svm->next_rip) {
|
||||
if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
|
||||
@@ -4229,7 +4231,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
|
||||
break;
|
||||
}
|
||||
|
||||
vmcb->control.next_rip = info->next_rip;
|
||||
/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
|
||||
if (static_cpu_has(X86_FEATURE_NRIPS))
|
||||
vmcb->control.next_rip = info->next_rip;
|
||||
vmcb->control.exit_code = icpt_info.exit_code;
|
||||
vmexit = nested_svm_exit_handled(svm);
|
||||
|
||||
|
||||
@@ -84,6 +84,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
},
|
||||
},
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
|
||||
{
|
||||
.callback = set_use_crs,
|
||||
.ident = "Foxconn K8M890-8237A",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
|
||||
},
|
||||
},
|
||||
|
||||
/* Now for the blacklist.. */
|
||||
|
||||
|
||||
@@ -481,6 +481,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
||||
pte_t pte;
|
||||
unsigned long pfn;
|
||||
struct page *page;
|
||||
unsigned char dummy;
|
||||
|
||||
ptep = lookup_address((unsigned long)v, &level);
|
||||
BUG_ON(ptep == NULL);
|
||||
@@ -490,6 +491,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
||||
|
||||
pte = pfn_pte(pfn, prot);
|
||||
|
||||
/*
|
||||
* Careful: update_va_mapping() will fail if the virtual address
|
||||
* we're poking isn't populated in the page tables. We don't
|
||||
* need to worry about the direct map (that's always in the page
|
||||
* tables), but we need to be careful about vmap space. In
|
||||
* particular, the top level page table can lazily propagate
|
||||
* entries between processes, so if we've switched mms since we
|
||||
* vmapped the target in the first place, we might not have the
|
||||
* top-level page table entry populated.
|
||||
*
|
||||
* We disable preemption because we want the same mm active when
|
||||
* we probe the target and when we issue the hypercall. We'll
|
||||
* have the same nominal mm, but if we're a kernel thread, lazy
|
||||
* mm dropping could change our pgd.
|
||||
*
|
||||
* Out of an abundance of caution, this uses __get_user() to fault
|
||||
* in the target address just in case there's some obscure case
|
||||
* in which the target address isn't readable.
|
||||
*/
|
||||
|
||||
preempt_disable();
|
||||
|
||||
pagefault_disable(); /* Avoid warnings due to being atomic. */
|
||||
__get_user(dummy, (unsigned char __user __force *)v);
|
||||
pagefault_enable();
|
||||
|
||||
if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
|
||||
BUG();
|
||||
|
||||
@@ -501,6 +528,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
|
||||
BUG();
|
||||
} else
|
||||
kmap_flush_unused();
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
@@ -508,6 +537,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We need to mark the all aliases of the LDT pages RO. We
|
||||
* don't need to call vm_flush_aliases(), though, since that's
|
||||
* only responsible for flushing aliases out the TLBs, not the
|
||||
* page tables, and Xen will flush the TLB for us if needed.
|
||||
*
|
||||
* To avoid confusing future readers: none of this is necessary
|
||||
* to load the LDT. The hypervisor only checks this when the
|
||||
* LDT is faulted in due to subsequent descriptor access.
|
||||
*/
|
||||
|
||||
for(i = 0; i < entries; i += entries_per_page)
|
||||
set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
|
||||
}
|
||||
|
||||
@@ -720,8 +720,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
return -EINVAL;
|
||||
|
||||
disk = get_gendisk(MKDEV(major, minor), &part);
|
||||
if (!disk || part)
|
||||
if (!disk)
|
||||
return -EINVAL;
|
||||
if (part) {
|
||||
put_disk(disk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(disk->queue->queue_lock);
|
||||
|
||||
@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
|
||||
/* allocate ext devt */
|
||||
idr_preload(GFP_KERNEL);
|
||||
|
||||
spin_lock(&ext_devt_lock);
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
|
||||
spin_unlock(&ext_devt_lock);
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
|
||||
idr_preload_end();
|
||||
if (idx < 0)
|
||||
@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
|
||||
return;
|
||||
|
||||
if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
|
||||
spin_lock(&ext_devt_lock);
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
spin_unlock(&ext_devt_lock);
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -691,13 +691,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
|
||||
} else {
|
||||
struct hd_struct *part;
|
||||
|
||||
spin_lock(&ext_devt_lock);
|
||||
spin_lock_bh(&ext_devt_lock);
|
||||
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
|
||||
if (part && get_disk(part_to_disk(part))) {
|
||||
*partno = part->partno;
|
||||
disk = part_to_disk(part);
|
||||
}
|
||||
spin_unlock(&ext_devt_lock);
|
||||
spin_unlock_bh(&ext_devt_lock);
|
||||
}
|
||||
|
||||
return disk;
|
||||
|
||||
@@ -63,19 +63,15 @@
|
||||
#define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val))
|
||||
|
||||
/*
|
||||
* printf() format helpers
|
||||
* printf() format helper. This macros is a workaround for the difficulties
|
||||
* with emitting 64-bit integers and 64-bit pointers with the same code
|
||||
* for both 32-bit and 64-bit hosts.
|
||||
*/
|
||||
|
||||
/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
|
||||
|
||||
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
|
||||
|
||||
#if ACPI_MACHINE_WIDTH == 64
|
||||
#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
|
||||
#else
|
||||
#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros for moving data around to/from buffers that are possibly unaligned.
|
||||
* If the hardware supports the transfer of unaligned data, just do the store.
|
||||
|
||||
@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
|
||||
obj_desc,
|
||||
ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
|
||||
ACPI_FORMAT_UINT64(obj_desc->region.address),
|
||||
obj_desc->region.length));
|
||||
|
||||
/* Now the address and length are valid for this opregion */
|
||||
@@ -544,7 +544,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
|
||||
obj_desc,
|
||||
ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
|
||||
ACPI_FORMAT_UINT64(obj_desc->region.address),
|
||||
obj_desc->region.length));
|
||||
|
||||
/* Now the address and length are valid for this opregion */
|
||||
|
||||
@@ -276,7 +276,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
||||
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
||||
®ion_obj->region.handler->address_space, handler,
|
||||
ACPI_FORMAT_NATIVE_UINT(address),
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
acpi_ut_get_region_name(region_obj->region.
|
||||
space_id)));
|
||||
|
||||
|
||||
@@ -621,8 +621,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
|
||||
acpi_os_printf("\n");
|
||||
} else {
|
||||
acpi_os_printf(" base %8.8X%8.8X Length %X\n",
|
||||
ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
|
||||
address),
|
||||
ACPI_FORMAT_UINT64(obj_desc->region.
|
||||
address),
|
||||
obj_desc->region.length);
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -269,17 +269,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
|
||||
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
|
||||
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
|
||||
acpi_ut_get_region_name(rgn_desc->region.
|
||||
space_id),
|
||||
rgn_desc->region.space_id,
|
||||
obj_desc->common_field.access_byte_width,
|
||||
obj_desc->common_field.base_byte_offset,
|
||||
field_datum_byte_offset, ACPI_CAST_PTR(void,
|
||||
(rgn_desc->
|
||||
region.
|
||||
address +
|
||||
region_offset))));
|
||||
field_datum_byte_offset,
|
||||
ACPI_FORMAT_UINT64(rgn_desc->region.address +
|
||||
region_offset)));
|
||||
|
||||
/* Invoke the appropriate address_space/op_region handler */
|
||||
|
||||
|
||||
@@ -176,7 +176,7 @@ acpi_ex_system_memory_space_handler(u32 function,
|
||||
if (!mem_info->mapped_logical_address) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory at 0x%8.8X%8.8X, size %u",
|
||||
ACPI_FORMAT_NATIVE_UINT(address),
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
(u32) map_length));
|
||||
mem_info->mapped_length = 0;
|
||||
return_ACPI_STATUS(AE_NO_MEMORY);
|
||||
@@ -197,8 +197,7 @@ acpi_ex_system_memory_space_handler(u32 function,
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
|
||||
bit_width, function,
|
||||
ACPI_FORMAT_NATIVE_UINT(address)));
|
||||
bit_width, function, ACPI_FORMAT_UINT64(address)));
|
||||
|
||||
/*
|
||||
* Perform the memory read or write
|
||||
@@ -300,8 +299,7 @@ acpi_ex_system_io_space_handler(u32 function,
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
|
||||
bit_width, function,
|
||||
ACPI_FORMAT_NATIVE_UINT(address)));
|
||||
bit_width, function, ACPI_FORMAT_UINT64(address)));
|
||||
|
||||
/* Decode the function parameter */
|
||||
|
||||
|
||||
@@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
|
||||
byte_width = ACPI_DIV_8(bit_width);
|
||||
last_address = address + byte_width - 1;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
|
||||
ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
|
||||
last_address),
|
||||
byte_width));
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
||||
"Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
ACPI_FORMAT_UINT64(last_address), byte_width));
|
||||
|
||||
/* Maximum 16-bit address in I/O space */
|
||||
|
||||
if (last_address > ACPI_UINT16_MAX) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Illegal I/O port address/length above 64K: %p/0x%X",
|
||||
ACPI_CAST_PTR(void, address), byte_width));
|
||||
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
|
||||
ACPI_FORMAT_UINT64(address), byte_width));
|
||||
return_ACPI_STATUS(AE_LIMIT);
|
||||
}
|
||||
|
||||
@@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
|
||||
|
||||
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_IO,
|
||||
"Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
|
||||
ACPI_CAST_PTR(void, address),
|
||||
"Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
byte_width, port_info->name,
|
||||
port_info->start,
|
||||
port_info->end));
|
||||
|
||||
@@ -258,12 +258,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
||||
switch (type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
|
||||
acpi_os_printf("ID %02X Len %02X Addr %p\n",
|
||||
acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
|
||||
obj_desc->processor.proc_id,
|
||||
obj_desc->processor.length,
|
||||
ACPI_CAST_PTR(void,
|
||||
obj_desc->processor.
|
||||
address));
|
||||
ACPI_FORMAT_UINT64(obj_desc->processor.
|
||||
address));
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
@@ -334,8 +333,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
|
||||
space_id));
|
||||
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
|
||||
acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
|
||||
ACPI_FORMAT_NATIVE_UINT
|
||||
(obj_desc->region.address),
|
||||
ACPI_FORMAT_UINT64(obj_desc->
|
||||
region.
|
||||
address),
|
||||
obj_desc->region.length);
|
||||
} else {
|
||||
acpi_os_printf
|
||||
|
||||
@@ -246,16 +246,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
||||
{
|
||||
struct acpi_table_header local_header;
|
||||
|
||||
/*
|
||||
* The reason that the Address is cast to a void pointer is so that we
|
||||
* can use %p which will work properly on both 32-bit and 64-bit hosts.
|
||||
*/
|
||||
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
|
||||
|
||||
/* FACS only has signature and length fields */
|
||||
|
||||
ACPI_INFO((AE_INFO, "%4.4s %p %05X",
|
||||
header->signature, ACPI_CAST_PTR(void, address),
|
||||
ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X %05X",
|
||||
header->signature, ACPI_FORMAT_UINT64(address),
|
||||
header->length));
|
||||
} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
|
||||
|
||||
@@ -266,8 +262,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
||||
header)->oem_id, ACPI_OEM_ID_SIZE);
|
||||
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
|
||||
|
||||
ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
|
||||
ACPI_CAST_PTR (void, address),
|
||||
ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %05X (v%.2d %6.6s)",
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
|
||||
revision >
|
||||
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
|
||||
@@ -281,8 +277,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
|
||||
acpi_tb_cleanup_table_header(&local_header, header);
|
||||
|
||||
ACPI_INFO((AE_INFO,
|
||||
"%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
|
||||
local_header.signature, ACPI_CAST_PTR(void, address),
|
||||
"%-4.4s 0x%8.8X%8.8X %05X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
|
||||
local_header.signature, ACPI_FORMAT_UINT64(address),
|
||||
local_header.length, local_header.revision,
|
||||
local_header.oem_id, local_header.oem_table_id,
|
||||
local_header.oem_revision,
|
||||
@@ -474,8 +470,8 @@ acpi_tb_install_table(acpi_physical_address address,
|
||||
table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
|
||||
if (!table) {
|
||||
ACPI_ERROR((AE_INFO,
|
||||
"Could not map memory for table [%s] at %p",
|
||||
signature, ACPI_CAST_PTR(void, address)));
|
||||
"Could not map memory for table [%s] at %8.8X%8.8X",
|
||||
signature, ACPI_FORMAT_UINT64(address)));
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id,
|
||||
acpi_gbl_address_range_list[space_id] = range_info;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
|
||||
"\nAdded [%4.4s] address range: 0x%p-0x%p\n",
|
||||
"\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
|
||||
acpi_ut_get_node_name(range_info->region_node),
|
||||
ACPI_CAST_PTR(void, address),
|
||||
ACPI_CAST_PTR(void, range_info->end_address)));
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
ACPI_FORMAT_UINT64(range_info->end_address)));
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
@@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
|
||||
"\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
|
||||
"\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n",
|
||||
acpi_ut_get_node_name(range_info->
|
||||
region_node),
|
||||
ACPI_CAST_PTR(void,
|
||||
range_info->
|
||||
start_address),
|
||||
ACPI_CAST_PTR(void,
|
||||
range_info->
|
||||
end_address)));
|
||||
ACPI_FORMAT_UINT64(range_info->
|
||||
start_address),
|
||||
ACPI_FORMAT_UINT64(range_info->
|
||||
end_address)));
|
||||
|
||||
ACPI_FREE(range_info);
|
||||
return_VOID;
|
||||
@@ -244,9 +242,9 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id,
|
||||
region_node);
|
||||
|
||||
ACPI_WARNING((AE_INFO,
|
||||
"0x%p-0x%p %s conflicts with Region %s %d",
|
||||
ACPI_CAST_PTR(void, address),
|
||||
ACPI_CAST_PTR(void, end_address),
|
||||
"0x%8.8X%8.8X-0x%8.8X%8.8X %s conflicts with Region %s %d",
|
||||
ACPI_FORMAT_UINT64(address),
|
||||
ACPI_FORMAT_UINT64(end_address),
|
||||
acpi_ut_get_region_name(space_id),
|
||||
pathname, overlap_count));
|
||||
ACPI_FREE(pathname);
|
||||
|
||||
@@ -165,10 +165,12 @@ acpi_status acpi_enable_subsystem(u32 flags)
|
||||
* Obtain a permanent mapping for the FACS. This is required for the
|
||||
* Global Lock and the Firmware Waking Vector
|
||||
*/
|
||||
status = acpi_tb_initialize_facs();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
||||
return_ACPI_STATUS(status);
|
||||
if (!(flags & ACPI_NO_FACS_INIT)) {
|
||||
status = acpi_tb_initialize_facs();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
}
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
||||
|
||||
@@ -173,7 +173,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
|
||||
request_mem_region(addr, length, desc);
|
||||
}
|
||||
|
||||
static int __init acpi_reserve_resources(void)
|
||||
static void __init acpi_reserve_resources(void)
|
||||
{
|
||||
acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
|
||||
"ACPI PM1a_EVT_BLK");
|
||||
@@ -202,10 +202,7 @@ static int __init acpi_reserve_resources(void)
|
||||
if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
|
||||
acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
|
||||
acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(acpi_reserve_resources);
|
||||
|
||||
void acpi_os_printf(const char *fmt, ...)
|
||||
{
|
||||
@@ -1727,6 +1724,7 @@ acpi_status __init acpi_os_initialize(void)
|
||||
|
||||
acpi_status __init acpi_os_initialize1(void)
|
||||
{
|
||||
acpi_reserve_resources();
|
||||
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
|
||||
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
|
||||
kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
|
||||
|
||||
@@ -1684,8 +1684,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
|
||||
if (unlikely(resetting))
|
||||
status &= ~PORT_IRQ_BAD_PMP;
|
||||
|
||||
/* if LPM is enabled, PHYRDY doesn't mean anything */
|
||||
if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
|
||||
if (sata_lpm_ignore_phy_events(&ap->link)) {
|
||||
status &= ~PORT_IRQ_PHYRDY;
|
||||
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
|
||||
}
|
||||
|
||||
@@ -4150,9 +4150,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
||||
ATA_HORKAGE_FIRMWARE_WARN },
|
||||
|
||||
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
||||
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
|
||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
|
||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||
Windows driver .inf file - also several Linux problem reports */
|
||||
@@ -4200,6 +4201,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
|
||||
/* devices that don't properly handle TRIM commands */
|
||||
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
|
||||
/*
|
||||
* Some WD SATA-I drives spin up and down erratically when the link
|
||||
* is put into the slumber mode. We don't have full list of the
|
||||
@@ -4504,7 +4508,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
||||
else /* In the ancient relic department - skip all of this */
|
||||
return 0;
|
||||
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
||||
/* On some disks, this command causes spin-up, so we need longer timeout */
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
|
||||
|
||||
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
||||
return err_mask;
|
||||
@@ -6800,6 +6805,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
* sata_lpm_ignore_phy_events - test if PHY event should be ignored
|
||||
* @link: Link receiving the event
|
||||
*
|
||||
* Test whether the received PHY event has to be ignored or not.
|
||||
*
|
||||
* LOCKING:
|
||||
* None:
|
||||
*
|
||||
* RETURNS:
|
||||
* True if the event has to be ignored.
|
||||
*/
|
||||
bool sata_lpm_ignore_phy_events(struct ata_link *link)
|
||||
{
|
||||
unsigned long lpm_timeout = link->last_lpm_change +
|
||||
msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
|
||||
|
||||
/* if LPM is enabled, PHYRDY doesn't mean anything */
|
||||
if (link->lpm_policy > ATA_LPM_MAX_POWER)
|
||||
return true;
|
||||
|
||||
/* ignore the first PHY event after the LPM policy changed
|
||||
* as it is might be spurious
|
||||
*/
|
||||
if ((link->flags & ATA_LFLAG_CHANGED) &&
|
||||
time_before(jiffies, lpm_timeout))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
|
||||
|
||||
/*
|
||||
* Dummy port_ops
|
||||
*/
|
||||
|
||||
@@ -3481,6 +3481,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
||||
}
|
||||
}
|
||||
|
||||
link->last_lpm_change = jiffies;
|
||||
link->flags |= ATA_LFLAG_CHANGED;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
||||
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
||||
ATA_LFLAG_NO_SRST |
|
||||
ATA_LFLAG_ASSUME_ATA;
|
||||
}
|
||||
} else if (vendor == 0x11ab && devid == 0x4140) {
|
||||
/* Marvell 4140 quirks */
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
/* port 4 is for SEMB device and it doesn't like SRST */
|
||||
if (link->pmp == 4)
|
||||
link->flags |= ATA_LFLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2512,7 +2512,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
|
||||
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
|
||||
rbuf[15] = lowest_aligned;
|
||||
|
||||
if (ata_id_has_trim(args->id)) {
|
||||
if (ata_id_has_trim(args->id) &&
|
||||
!(dev->horkage & ATA_HORKAGE_NOTRIM)) {
|
||||
rbuf[14] |= 0x80; /* TPE */
|
||||
|
||||
if (ata_id_has_zero_after_trim(args->id))
|
||||
|
||||
@@ -1068,7 +1068,7 @@ static struct of_device_id octeon_cf_match[] = {
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, octeon_i2c_match);
|
||||
MODULE_DEVICE_TABLE(of, octeon_cf_match);
|
||||
|
||||
static struct platform_driver octeon_cf_driver = {
|
||||
.probe = octeon_cf_probe,
|
||||
|
||||
@@ -543,10 +543,8 @@ static void fw_dev_release(struct device *dev)
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
||||
|
||||
if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
|
||||
return -ENOMEM;
|
||||
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
|
||||
@@ -557,6 +555,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct firmware_priv *fw_priv = to_firmware_priv(dev);
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&fw_lock);
|
||||
if (fw_priv->buf)
|
||||
err = do_firmware_uevent(fw_priv, env);
|
||||
mutex_unlock(&fw_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct class firmware_class = {
|
||||
.name = "firmware",
|
||||
.class_attrs = firmware_class_attrs,
|
||||
|
||||
@@ -1589,7 +1589,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
|
||||
&ival);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
memcpy(val + (i * val_bytes), &ival, val_bytes);
|
||||
map->format.format_val(val + (i * val_bytes), ival, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -457,6 +457,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
|
||||
# define rbd_assert(expr) ((void) 0)
|
||||
#endif /* !RBD_DEBUG */
|
||||
|
||||
static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
|
||||
static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
|
||||
static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
|
||||
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
|
||||
@@ -1670,6 +1671,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
|
||||
obj_request_done_set(obj_request);
|
||||
}
|
||||
|
||||
static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
|
||||
{
|
||||
dout("%s: obj %p\n", __func__, obj_request);
|
||||
|
||||
if (obj_request_img_data_test(obj_request))
|
||||
rbd_osd_copyup_callback(obj_request);
|
||||
else
|
||||
obj_request_done_set(obj_request);
|
||||
}
|
||||
|
||||
static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
||||
struct ceph_msg *msg)
|
||||
{
|
||||
@@ -1708,6 +1719,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
|
||||
rbd_osd_stat_callback(obj_request);
|
||||
break;
|
||||
case CEPH_OSD_OP_CALL:
|
||||
rbd_osd_call_callback(obj_request);
|
||||
break;
|
||||
case CEPH_OSD_OP_NOTIFY_ACK:
|
||||
case CEPH_OSD_OP_WATCH:
|
||||
rbd_osd_trivial_callback(obj_request);
|
||||
@@ -1851,11 +1864,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
|
||||
rbd_assert(obj_request_type_valid(type));
|
||||
|
||||
size = strlen(object_name) + 1;
|
||||
name = kmalloc(size, GFP_KERNEL);
|
||||
name = kmalloc(size, GFP_NOIO);
|
||||
if (!name)
|
||||
return NULL;
|
||||
|
||||
obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
|
||||
obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
|
||||
if (!obj_request) {
|
||||
kfree(name);
|
||||
return NULL;
|
||||
@@ -2305,13 +2318,15 @@ out_unwind:
|
||||
}
|
||||
|
||||
static void
|
||||
rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
|
||||
rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
|
||||
{
|
||||
struct rbd_img_request *img_request;
|
||||
struct rbd_device *rbd_dev;
|
||||
struct page **pages;
|
||||
u32 page_count;
|
||||
|
||||
dout("%s: obj %p\n", __func__, obj_request);
|
||||
|
||||
rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
|
||||
rbd_assert(obj_request_img_data_test(obj_request));
|
||||
img_request = obj_request->img_request;
|
||||
@@ -2337,9 +2352,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
|
||||
if (!obj_request->result)
|
||||
obj_request->xferred = obj_request->length;
|
||||
|
||||
/* Finish up with the normal image object callback */
|
||||
|
||||
rbd_img_obj_callback(obj_request);
|
||||
obj_request_done_set(obj_request);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -2436,7 +2449,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
|
||||
|
||||
/* All set, send it off. */
|
||||
|
||||
orig_request->callback = rbd_img_obj_copyup_callback;
|
||||
osdc = &rbd_dev->rbd_client->client->osdc;
|
||||
img_result = rbd_obj_request_submit(osdc, orig_request);
|
||||
if (!img_result)
|
||||
|
||||
@@ -1234,6 +1234,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
}
|
||||
fw_ptr = fw->data;
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
/* This Intel specific command enables the manufacturer mode of the
|
||||
* controller.
|
||||
*
|
||||
|
||||
@@ -838,7 +838,7 @@ fs_initcall(mvebu_mbus_debugfs_init);
|
||||
int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
|
||||
size_t mbuswins_size,
|
||||
phys_addr_t sdramwins_phys_base,
|
||||
size_t sdramwins_size)
|
||||
size_t sdramwins_size, int is_coherent)
|
||||
{
|
||||
struct mvebu_mbus_state *mbus = &mbus_state;
|
||||
const struct of_device_id *of_id;
|
||||
@@ -865,8 +865,7 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
|
||||
mbus->hw_io_coherency = 1;
|
||||
mbus->hw_io_coherency = is_coherent;
|
||||
|
||||
for (win = 0; win < mbus->soc->num_wins; win++)
|
||||
mvebu_mbus_disable_window(mbus, win);
|
||||
|
||||
@@ -583,7 +583,7 @@ static inline int needs_ilk_vtd_wa(void)
|
||||
/* Query intel_iommu to see if we need the workaround. Presumably that
|
||||
* was loaded first.
|
||||
*/
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
|
||||
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
|
||||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
|
||||
intel_iommu_gfx_mapped)
|
||||
return 1;
|
||||
|
||||
@@ -2717,7 +2717,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
|
||||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
|
||||
schedule_timeout_uninterruptible(1);
|
||||
smi_result = smi_info->handlers->event(
|
||||
smi_info->si_sm, 100);
|
||||
smi_info->si_sm, jiffies_to_usecs(1));
|
||||
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
|
||||
smi_result = smi_info->handlers->event(
|
||||
smi_info->si_sm, 0);
|
||||
|
||||
@@ -618,6 +618,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ibmvtpm->dev = dev;
|
||||
ibmvtpm->vdev = vio_dev;
|
||||
|
||||
crq_q = &ibmvtpm->crq_queue;
|
||||
crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!crq_q->crq_addr) {
|
||||
@@ -662,8 +665,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
|
||||
crq_q->index = 0;
|
||||
|
||||
ibmvtpm->dev = dev;
|
||||
ibmvtpm->vdev = vio_dev;
|
||||
TPM_VPRIV(chip) = (void *)ibmvtpm;
|
||||
|
||||
spin_lock_init(&ibmvtpm->rtce_lock);
|
||||
|
||||
@@ -22,6 +22,8 @@
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/cputime.h>
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
#include <asm/bL_switcher.h>
|
||||
@@ -49,6 +51,12 @@ struct all_cpufreq_stats {
|
||||
unsigned int *freq_table;
|
||||
};
|
||||
|
||||
struct cpufreq_power_stats {
|
||||
unsigned int state_num;
|
||||
unsigned int *curr;
|
||||
unsigned int *freq_table;
|
||||
};
|
||||
|
||||
struct all_freq_table {
|
||||
unsigned int *freq_table;
|
||||
unsigned int table_size;
|
||||
@@ -58,6 +66,7 @@ static struct all_freq_table *all_freq_table;
|
||||
|
||||
static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
|
||||
static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
|
||||
static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
|
||||
|
||||
struct cpufreq_stats_attribute {
|
||||
struct attribute attr;
|
||||
@@ -128,6 +137,47 @@ static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
|
||||
return -1;
|
||||
}
|
||||
|
||||
void acct_update_power(struct task_struct *task, cputime_t cputime) {
|
||||
struct cpufreq_power_stats *powerstats;
|
||||
struct cpufreq_stats *stats;
|
||||
unsigned int cpu_num, curr;
|
||||
|
||||
if (!task)
|
||||
return;
|
||||
cpu_num = task_cpu(task);
|
||||
powerstats = per_cpu(cpufreq_power_stats, cpu_num);
|
||||
stats = per_cpu(cpufreq_stats_table, cpu_num);
|
||||
if (!powerstats || !stats)
|
||||
return;
|
||||
|
||||
curr = powerstats->curr[stats->last_index];
|
||||
task->cpu_power += curr * cputime_to_usecs(cputime);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acct_update_power);
|
||||
|
||||
static ssize_t show_current_in_state(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t len = 0;
|
||||
unsigned int i, cpu;
|
||||
struct cpufreq_power_stats *powerstats;
|
||||
|
||||
spin_lock(&cpufreq_stats_lock);
|
||||
for_each_possible_cpu(cpu) {
|
||||
powerstats = per_cpu(cpufreq_power_stats, cpu);
|
||||
if (!powerstats)
|
||||
continue;
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
|
||||
for (i = 0; i < powerstats->state_num; i++)
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len,
|
||||
"%d=%d ", powerstats->freq_table[i],
|
||||
powerstats->curr[i]);
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
}
|
||||
spin_unlock(&cpufreq_stats_lock);
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t show_all_time_in_state(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
@@ -237,6 +287,9 @@ static struct attribute_group stats_attr_group = {
|
||||
static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
|
||||
0444, show_all_time_in_state, NULL);
|
||||
|
||||
static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
|
||||
0444, show_current_in_state, NULL);
|
||||
|
||||
static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
|
||||
{
|
||||
int index;
|
||||
@@ -306,10 +359,27 @@ static void cpufreq_allstats_free(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table)
|
||||
static void cpufreq_powerstats_free(void)
|
||||
{
|
||||
unsigned int i, j, count = 0, ret = 0;
|
||||
int cpu;
|
||||
struct cpufreq_power_stats *powerstats;
|
||||
|
||||
sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
powerstats = per_cpu(cpufreq_power_stats, cpu);
|
||||
if (!powerstats)
|
||||
continue;
|
||||
kfree(powerstats->curr);
|
||||
kfree(powerstats);
|
||||
per_cpu(cpufreq_power_stats, cpu) = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table, int count)
|
||||
{
|
||||
unsigned int i, j, ret = 0;
|
||||
struct cpufreq_stats *stat;
|
||||
struct cpufreq_policy *data;
|
||||
unsigned int alloc_size;
|
||||
@@ -333,12 +403,6 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
|
||||
stat->cpu = cpu;
|
||||
per_cpu(cpufreq_stats_table, cpu) = stat;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
count++;
|
||||
}
|
||||
|
||||
alloc_size = count * sizeof(int) + count * sizeof(u64);
|
||||
|
||||
@@ -396,6 +460,54 @@ static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
|
||||
stat->cpu = policy->cpu;
|
||||
}
|
||||
|
||||
static void cpufreq_powerstats_create(unsigned int cpu,
|
||||
struct cpufreq_frequency_table *table, int count) {
|
||||
unsigned int alloc_size, i = 0, j = 0, ret = 0;
|
||||
struct cpufreq_power_stats *powerstats;
|
||||
struct device_node *cpu_node;
|
||||
char device_path[16];
|
||||
|
||||
powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
|
||||
GFP_KERNEL);
|
||||
if (!powerstats)
|
||||
return;
|
||||
|
||||
/* Allocate memory for freq table per cpu as well as clockticks per
|
||||
* freq*/
|
||||
alloc_size = count * sizeof(unsigned int) +
|
||||
count * sizeof(unsigned int);
|
||||
powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (!powerstats->curr) {
|
||||
kfree(powerstats);
|
||||
return;
|
||||
}
|
||||
powerstats->freq_table = powerstats->curr + count;
|
||||
|
||||
spin_lock(&cpufreq_stats_lock);
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
powerstats->freq_table[j++] = freq;
|
||||
}
|
||||
powerstats->state_num = j;
|
||||
|
||||
snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
|
||||
cpu_node = of_find_node_by_path(device_path);
|
||||
if (cpu_node) {
|
||||
ret = of_property_read_u32_array(cpu_node, "current",
|
||||
powerstats->curr, count);
|
||||
if (ret) {
|
||||
kfree(powerstats->curr);
|
||||
kfree(powerstats);
|
||||
powerstats = NULL;
|
||||
}
|
||||
}
|
||||
per_cpu(cpufreq_power_stats, cpu) = powerstats;
|
||||
spin_unlock(&cpufreq_stats_lock);
|
||||
}
|
||||
|
||||
static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
|
||||
{
|
||||
unsigned int lhs = *(const unsigned int *)(lhs_ptr);
|
||||
@@ -440,24 +552,14 @@ static void add_all_freq_table(unsigned int freq)
|
||||
all_freq_table->freq_table[all_freq_table->table_size++] = freq;
|
||||
}
|
||||
|
||||
static void cpufreq_allstats_create(unsigned int cpu)
|
||||
static void cpufreq_allstats_create(unsigned int cpu,
|
||||
struct cpufreq_frequency_table *table, int count)
|
||||
{
|
||||
int i , j = 0;
|
||||
unsigned int alloc_size, count = 0;
|
||||
struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu);
|
||||
unsigned int alloc_size;
|
||||
struct all_cpufreq_stats *all_stat;
|
||||
bool sort_needed = false;
|
||||
|
||||
if (!table)
|
||||
return;
|
||||
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
count++;
|
||||
}
|
||||
|
||||
all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
|
||||
GFP_KERNEL);
|
||||
if (!all_stat) {
|
||||
@@ -499,7 +601,7 @@ static void cpufreq_allstats_create(unsigned int cpu)
|
||||
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
int ret;
|
||||
int ret, count = 0, i;
|
||||
struct cpufreq_policy *policy = data;
|
||||
struct cpufreq_frequency_table *table;
|
||||
unsigned int cpu = policy->cpu;
|
||||
@@ -515,10 +617,21 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
|
||||
if (!table)
|
||||
return 0;
|
||||
|
||||
if (!per_cpu(all_cpufreq_stats, cpu))
|
||||
cpufreq_allstats_create(cpu);
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
|
||||
ret = cpufreq_stats_create_table(policy, table);
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
count++;
|
||||
}
|
||||
|
||||
if (!per_cpu(all_cpufreq_stats, cpu))
|
||||
cpufreq_allstats_create(cpu, table, count);
|
||||
|
||||
if (!per_cpu(cpufreq_power_stats, cpu))
|
||||
cpufreq_powerstats_create(cpu, table, count);
|
||||
|
||||
ret = cpufreq_stats_create_table(policy, table, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
return 0;
|
||||
@@ -564,7 +677,7 @@ static int cpufreq_stats_create_table_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
struct cpufreq_frequency_table *table;
|
||||
int ret = -ENODEV;
|
||||
int ret = -ENODEV, i, count = 0;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
@@ -574,10 +687,21 @@ static int cpufreq_stats_create_table_cpu(unsigned int cpu)
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
if (!per_cpu(all_cpufreq_stats, cpu))
|
||||
cpufreq_allstats_create(cpu);
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
unsigned int freq = table[i].frequency;
|
||||
|
||||
ret = cpufreq_stats_create_table(policy, table);
|
||||
if (freq == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
count++;
|
||||
}
|
||||
|
||||
if (!per_cpu(all_cpufreq_stats, cpu))
|
||||
cpufreq_allstats_create(cpu, table, count);
|
||||
|
||||
if (!per_cpu(cpufreq_power_stats, cpu))
|
||||
cpufreq_powerstats_create(cpu, table, count);
|
||||
|
||||
ret = cpufreq_stats_create_table(policy, table, count);
|
||||
|
||||
out:
|
||||
cpufreq_cpu_put(policy);
|
||||
@@ -655,7 +779,12 @@ static int cpufreq_stats_setup(void)
|
||||
ret = sysfs_create_file(cpufreq_global_kobject,
|
||||
&_attr_all_time_in_state.attr);
|
||||
if (ret)
|
||||
pr_warn("Error creating sysfs file for cpufreq stats\n");
|
||||
pr_warn("Cannot create sysfs file for cpufreq stats\n");
|
||||
|
||||
ret = sysfs_create_file(cpufreq_global_kobject,
|
||||
&_attr_current_in_state.attr);
|
||||
if (ret)
|
||||
pr_warn("Cannot create sysfs file for cpufreq current stats\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -674,6 +803,7 @@ static void cpufreq_stats_cleanup(void)
|
||||
cpufreq_stats_free_sysfs(cpu);
|
||||
}
|
||||
cpufreq_allstats_free();
|
||||
cpufreq_powerstats_free();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
|
||||
@@ -135,6 +135,9 @@ int cpuidle_idle_call(void)
|
||||
|
||||
/* ask the governor for the next state */
|
||||
next_state = cpuidle_curr_governor->select(drv, dev);
|
||||
if (next_state < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (need_resched()) {
|
||||
dev->last_residency = 0;
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
|
||||
@@ -274,7 +274,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
data->needs_update = 0;
|
||||
}
|
||||
|
||||
data->last_state_idx = 0;
|
||||
data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
|
||||
data->exit_us = 0;
|
||||
|
||||
/* Special case when user has set very strict latency requirement */
|
||||
|
||||
@@ -895,13 +895,14 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
state->buflen_1;
|
||||
u32 *sh_desc = ctx->sh_desc_fin, *desc;
|
||||
dma_addr_t ptr = ctx->sh_desc_fin_dma;
|
||||
int sec4_sg_bytes;
|
||||
int sec4_sg_bytes, sec4_sg_src_index;
|
||||
int digestsize = crypto_ahash_digestsize(ahash);
|
||||
struct ahash_edesc *edesc;
|
||||
int ret = 0;
|
||||
int sh_len;
|
||||
|
||||
sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
|
||||
sec4_sg_src_index = 1 + (buflen ? 1 : 0);
|
||||
sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
|
||||
|
||||
/* allocate space for base edesc and hw desc commands, link tables */
|
||||
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
|
||||
@@ -928,7 +929,7 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
|
||||
buf, state->buf_dma, buflen,
|
||||
last_buflen);
|
||||
(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
|
||||
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
|
||||
|
||||
append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
|
||||
LDST_SGF);
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
|
||||
/* Buffer, its dma address and lock */
|
||||
struct buf_data {
|
||||
u8 buf[RN_BUF_SIZE];
|
||||
u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
|
||||
dma_addr_t addr;
|
||||
struct completion filled;
|
||||
u32 hw_desc[DESC_JOB_O_LEN];
|
||||
|
||||
@@ -915,7 +915,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
|
||||
crypt->mode |= NPE_OP_NOT_IN_PLACE;
|
||||
/* This was never tested by Intel
|
||||
* for more than one dst buffer, I think. */
|
||||
BUG_ON(req->dst->length < nbytes);
|
||||
req_ctx->dst = NULL;
|
||||
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
|
||||
flags, DMA_FROM_DEVICE))
|
||||
|
||||
@@ -935,7 +935,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
|
||||
sg_count--;
|
||||
link_tbl_ptr--;
|
||||
}
|
||||
be16_add_cpu(&link_tbl_ptr->len, cryptlen);
|
||||
link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
|
||||
+ cryptlen);
|
||||
|
||||
/* tag end of link table */
|
||||
link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
|
||||
@@ -2621,6 +2622,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
|
||||
kfree(t_alg);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
||||
@@ -393,7 +393,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
dma_cookie_t cookie = 0;
|
||||
int busy = mv_chan_is_busy(mv_chan);
|
||||
u32 current_desc = mv_chan_get_current_desc(mv_chan);
|
||||
int seen_current = 0;
|
||||
int current_cleaned = 0;
|
||||
struct mv_xor_desc *hw_desc;
|
||||
|
||||
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
|
||||
dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
|
||||
@@ -405,38 +406,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
|
||||
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
|
||||
chain_node) {
|
||||
prefetch(_iter);
|
||||
prefetch(&_iter->async_tx);
|
||||
|
||||
/* do not advance past the current descriptor loaded into the
|
||||
* hardware channel, subsequent descriptors are either in
|
||||
* process or have not been submitted
|
||||
*/
|
||||
if (seen_current)
|
||||
break;
|
||||
/* clean finished descriptors */
|
||||
hw_desc = iter->hw_desc;
|
||||
if (hw_desc->status & XOR_DESC_SUCCESS) {
|
||||
cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
|
||||
cookie);
|
||||
|
||||
/* stop the search if we reach the current descriptor and the
|
||||
* channel is busy
|
||||
*/
|
||||
if (iter->async_tx.phys == current_desc) {
|
||||
seen_current = 1;
|
||||
if (busy)
|
||||
/* done processing desc, clean slot */
|
||||
mv_xor_clean_slot(iter, mv_chan);
|
||||
|
||||
/* break if we did cleaned the current */
|
||||
if (iter->async_tx.phys == current_desc) {
|
||||
current_cleaned = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (iter->async_tx.phys == current_desc) {
|
||||
current_cleaned = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
|
||||
|
||||
if (mv_xor_clean_slot(iter, mv_chan))
|
||||
break;
|
||||
}
|
||||
|
||||
if ((busy == 0) && !list_empty(&mv_chan->chain)) {
|
||||
struct mv_xor_desc_slot *chain_head;
|
||||
chain_head = list_entry(mv_chan->chain.next,
|
||||
struct mv_xor_desc_slot,
|
||||
chain_node);
|
||||
|
||||
mv_xor_start_new_chain(mv_chan, chain_head);
|
||||
if (current_cleaned) {
|
||||
/*
|
||||
* current descriptor cleaned and removed, run
|
||||
* from list head
|
||||
*/
|
||||
iter = list_entry(mv_chan->chain.next,
|
||||
struct mv_xor_desc_slot,
|
||||
chain_node);
|
||||
mv_xor_start_new_chain(mv_chan, iter);
|
||||
} else {
|
||||
if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
|
||||
/*
|
||||
* descriptors are still waiting after
|
||||
* current, trigger them
|
||||
*/
|
||||
iter = list_entry(iter->chain_node.next,
|
||||
struct mv_xor_desc_slot,
|
||||
chain_node);
|
||||
mv_xor_start_new_chain(mv_chan, iter);
|
||||
} else {
|
||||
/*
|
||||
* some descriptors are still waiting
|
||||
* to be cleaned
|
||||
*/
|
||||
tasklet_schedule(&mv_chan->irq_tasklet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cookie > 0)
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#define XOR_OPERATION_MODE_XOR 0
|
||||
#define XOR_OPERATION_MODE_MEMCPY 2
|
||||
#define XOR_OPERATION_MODE_MEMSET 4
|
||||
#define XOR_DESC_SUCCESS 0x40000000
|
||||
|
||||
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
|
||||
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
|
||||
|
||||
@@ -921,7 +921,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
|
||||
*/
|
||||
|
||||
for (row = 0; row < mci->nr_csrows; row++) {
|
||||
struct csrow_info *csi = &mci->csrows[row];
|
||||
struct csrow_info *csi = mci->csrows[row];
|
||||
|
||||
/*
|
||||
* Get the configuration settings for this
|
||||
|
||||
@@ -623,7 +623,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
u32 reg;
|
||||
u64 limit, prv = 0;
|
||||
u64 tmp_mb;
|
||||
u32 mb, kb;
|
||||
u32 gb, mb;
|
||||
u32 rir_way;
|
||||
|
||||
/*
|
||||
@@ -636,8 +636,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
pvt->tolm = GET_TOLM(reg);
|
||||
tmp_mb = (1 + pvt->tolm) >> 20;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
|
||||
gb, (mb*1000)/1024, (u64)pvt->tolm);
|
||||
|
||||
/* Address range is already 45:25 */
|
||||
pci_read_config_dword(pvt->pci_sad1, TOHM,
|
||||
@@ -645,8 +646,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
pvt->tohm = GET_TOHM(reg);
|
||||
tmp_mb = (1 + pvt->tohm) >> 20;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
|
||||
gb, (mb*1000)/1024, (u64)pvt->tohm);
|
||||
|
||||
/*
|
||||
* Step 2) Get SAD range and SAD Interleave list
|
||||
@@ -668,11 +670,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
break;
|
||||
|
||||
tmp_mb = (limit + 1) >> 20;
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
|
||||
n_sads,
|
||||
get_dram_attr(reg),
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
|
||||
reg);
|
||||
@@ -702,9 +704,9 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
break;
|
||||
tmp_mb = (limit + 1) >> 20;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
|
||||
n_tads, mb, kb,
|
||||
n_tads, gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
(u32)TAD_SOCK(reg),
|
||||
(u32)TAD_CH(reg),
|
||||
@@ -727,10 +729,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
tad_ch_nilv_offset[j],
|
||||
®);
|
||||
tmp_mb = TAD_OFFSET(reg) >> 20;
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
|
||||
i, j,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
reg);
|
||||
}
|
||||
@@ -752,10 +754,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
|
||||
tmp_mb = RIR_LIMIT(reg) >> 20;
|
||||
rir_way = 1 << RIR_WAY(reg);
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
|
||||
i, j,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
rir_way,
|
||||
reg);
|
||||
@@ -766,10 +768,10 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
|
||||
®);
|
||||
tmp_mb = RIR_OFFSET(reg) << 6;
|
||||
|
||||
mb = div_u64_rem(tmp_mb, 1000, &kb);
|
||||
gb = div_u64_rem(tmp_mb, 1024, &mb);
|
||||
edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
|
||||
i, j, k,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
((u64)tmp_mb) << 20L,
|
||||
(u32)RIR_RNK_TGT(reg),
|
||||
reg);
|
||||
@@ -806,7 +808,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
u8 ch_way,sck_way;
|
||||
u32 tad_offset;
|
||||
u32 rir_way;
|
||||
u32 mb, kb;
|
||||
u32 mb, gb;
|
||||
u64 ch_addr, offset, limit, prv = 0;
|
||||
|
||||
|
||||
@@ -1022,10 +1024,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
continue;
|
||||
|
||||
limit = RIR_LIMIT(reg);
|
||||
mb = div_u64_rem(limit >> 20, 1000, &kb);
|
||||
gb = div_u64_rem(limit >> 20, 1024, &mb);
|
||||
edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
|
||||
n_rir,
|
||||
mb, kb,
|
||||
gb, (mb*1000)/1024,
|
||||
limit,
|
||||
1 << RIR_WAY(reg));
|
||||
if (ch_addr <= limit)
|
||||
|
||||
@@ -1955,8 +1955,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -EINVAL;
|
||||
|
||||
/* For some reason crtc x/y offsets are signed internally. */
|
||||
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
|
||||
/*
|
||||
* Universal plane src offsets are only 16.16, prevent havoc for
|
||||
* drivers using universal plane code internally.
|
||||
*/
|
||||
if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
|
||||
return -ERANGE;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
@@ -1804,7 +1804,7 @@ void i915_teardown_sysfs(struct drm_device *dev_priv);
|
||||
/* intel_i2c.c */
|
||||
extern int intel_setup_gmbus(struct drm_device *dev);
|
||||
extern void intel_teardown_gmbus(struct drm_device *dev);
|
||||
extern inline bool intel_gmbus_is_port_valid(unsigned port)
|
||||
static inline bool intel_gmbus_is_port_valid(unsigned port)
|
||||
{
|
||||
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
|
||||
}
|
||||
@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
|
||||
struct drm_i915_private *dev_priv, unsigned port);
|
||||
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
|
||||
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
|
||||
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
|
||||
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
|
||||
{
|
||||
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
|
||||
}
|
||||
|
||||
@@ -441,7 +441,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
struct intel_gmbus,
|
||||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
int i, reg_offset;
|
||||
int i = 0, inc, try = 0, reg_offset;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->gmbus_mutex);
|
||||
@@ -453,12 +453,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
|
||||
|
||||
reg_offset = dev_priv->gpio_mmio_base;
|
||||
|
||||
retry:
|
||||
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
for (; i < num; i += inc) {
|
||||
inc = 1;
|
||||
if (gmbus_is_index_read(msgs, i, num)) {
|
||||
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
|
||||
i += 1; /* set i to the index of the read xfer */
|
||||
inc = 2; /* an index read is two msgs */
|
||||
} else if (msgs[i].flags & I2C_M_RD) {
|
||||
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
|
||||
} else {
|
||||
@@ -530,6 +532,18 @@ clear_err:
|
||||
adapter->name, msgs[i].addr,
|
||||
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
|
||||
|
||||
/*
|
||||
* Passive adapters sometimes NAK the first probe. Retry the first
|
||||
* message once on -ENXIO for GMBUS transfers; the bit banging algorithm
|
||||
* has retries internally. See also the retry loop in
|
||||
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
|
||||
*/
|
||||
if (ret == -ENXIO && i == 0 && try++ == 0) {
|
||||
DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
|
||||
adapter->name);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
timeout:
|
||||
|
||||
@@ -1487,6 +1487,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
|
||||
return MODE_BANDWIDTH;
|
||||
}
|
||||
|
||||
if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
|
||||
(mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
|
||||
return MODE_H_ILLEGAL;
|
||||
}
|
||||
|
||||
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
|
||||
mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
|
||||
mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user