mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge tag 'v6.6.88' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into odroid-6.6.y
This is the 6.6.88 stable release Change-Id: I84c7972984488b8c803b5cea9c7545c5cf9bfa44
This commit is contained in:
@@ -55,8 +55,7 @@ properties:
|
||||
- const: arm,primecell
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
@@ -41,8 +41,7 @@ properties:
|
||||
- const: arm,primecell
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
@@ -71,7 +71,7 @@ properties:
|
||||
description:
|
||||
Any lane can be inverted or not.
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
maxItems: 3
|
||||
|
||||
required:
|
||||
- data-lanes
|
||||
|
||||
@@ -892,11 +892,10 @@ attribute-sets:
|
||||
-
|
||||
name: prop-list
|
||||
type: nest
|
||||
nested-attributes: link-attrs
|
||||
nested-attributes: prop-list-link-attrs
|
||||
-
|
||||
name: alt-ifname
|
||||
type: string
|
||||
multi-attr: true
|
||||
-
|
||||
name: perm-address
|
||||
type: binary
|
||||
@@ -931,6 +930,13 @@ attribute-sets:
|
||||
-
|
||||
name: gro-ipv4-max-size
|
||||
type: u32
|
||||
-
|
||||
name: prop-list-link-attrs
|
||||
subset-of: link-attrs
|
||||
attributes:
|
||||
-
|
||||
name: alt-ifname
|
||||
multi-attr: true
|
||||
-
|
||||
name: af-spec-attrs
|
||||
attributes:
|
||||
@@ -1193,9 +1199,10 @@ attribute-sets:
|
||||
type: u32
|
||||
-
|
||||
name: mctp-attrs
|
||||
name-prefix: ifla-mctp-
|
||||
attributes:
|
||||
-
|
||||
name: mctp-net
|
||||
name: net
|
||||
type: u32
|
||||
-
|
||||
name: stats-attrs
|
||||
@@ -1362,7 +1369,6 @@ operations:
|
||||
- min-mtu
|
||||
- max-mtu
|
||||
- prop-list
|
||||
- alt-ifname
|
||||
- perm-address
|
||||
- proto-down-reason
|
||||
- parent-dev-name
|
||||
|
||||
@@ -4784,6 +4784,7 @@ S: Maintained
|
||||
F: Documentation/admin-guide/module-signing.rst
|
||||
F: certs/
|
||||
F: scripts/sign-file.c
|
||||
F: scripts/ssl-common.h
|
||||
F: tools/certs/
|
||||
|
||||
CFAG12864B LCD DRIVER
|
||||
|
||||
5
Makefile
5
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 87
|
||||
SUBLEVEL = 88
|
||||
EXTRAVERSION =
|
||||
NAME = Pinguïn Aangedreven
|
||||
|
||||
@@ -1004,6 +1004,9 @@ ifdef CONFIG_CC_IS_GCC
|
||||
KBUILD_CFLAGS += -fconserve-stack
|
||||
endif
|
||||
|
||||
# Ensure compilers do not transform certain loops into calls to wcslen()
|
||||
KBUILD_CFLAGS += -fno-builtin-wcslen
|
||||
|
||||
# change __FILE__ to the relative path from the srctree
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
|
||||
|
||||
|
||||
@@ -1246,8 +1246,7 @@
|
||||
};
|
||||
|
||||
pwm0: pwm@1401e000 {
|
||||
compatible = "mediatek,mt8173-disp-pwm",
|
||||
"mediatek,mt6595-disp-pwm";
|
||||
compatible = "mediatek,mt8173-disp-pwm";
|
||||
reg = <0 0x1401e000 0 0x1000>;
|
||||
#pwm-cells = <2>;
|
||||
clocks = <&mmsys CLK_MM_DISP_PWM026M>,
|
||||
@@ -1257,8 +1256,7 @@
|
||||
};
|
||||
|
||||
pwm1: pwm@1401f000 {
|
||||
compatible = "mediatek,mt8173-disp-pwm",
|
||||
"mediatek,mt6595-disp-pwm";
|
||||
compatible = "mediatek,mt8173-disp-pwm";
|
||||
reg = <0 0x1401f000 0 0x1000>;
|
||||
#pwm-cells = <2>;
|
||||
clocks = <&mmsys CLK_MM_DISP_PWM126M>,
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
#define ARM_CPU_PART_CORTEX_A76 0xD0B
|
||||
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
|
||||
#define ARM_CPU_PART_CORTEX_A77 0xD0D
|
||||
#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
|
||||
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
|
||||
#define ARM_CPU_PART_CORTEX_A78 0xD41
|
||||
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
||||
@@ -119,6 +120,7 @@
|
||||
#define QCOM_CPU_PART_KRYO 0x200
|
||||
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
|
||||
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
|
||||
#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
|
||||
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
|
||||
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
|
||||
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
|
||||
@@ -158,6 +160,7 @@
|
||||
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
|
||||
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
|
||||
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
|
||||
#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
|
||||
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
|
||||
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
|
||||
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
||||
@@ -195,6 +198,7 @@
|
||||
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
|
||||
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
|
||||
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
|
||||
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
|
||||
|
||||
@@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void);
|
||||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
||||
u8 spectre_bhb_loop_affected(int scope);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
|
||||
|
||||
|
||||
@@ -369,31 +369,33 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||
#define __flush_tlb_range_op(op, start, pages, stride, \
|
||||
asid, tlb_level, tlbi_user) \
|
||||
do { \
|
||||
typeof(start) __flush_start = start; \
|
||||
typeof(pages) __flush_pages = pages; \
|
||||
int num = 0; \
|
||||
int scale = 3; \
|
||||
unsigned long addr; \
|
||||
\
|
||||
while (pages > 0) { \
|
||||
while (__flush_pages > 0) { \
|
||||
if (!system_supports_tlb_range() || \
|
||||
pages == 1) { \
|
||||
addr = __TLBI_VADDR(start, asid); \
|
||||
__flush_pages == 1) { \
|
||||
addr = __TLBI_VADDR(__flush_start, asid); \
|
||||
__tlbi_level(op, addr, tlb_level); \
|
||||
if (tlbi_user) \
|
||||
__tlbi_user_level(op, addr, tlb_level); \
|
||||
start += stride; \
|
||||
pages -= stride >> PAGE_SHIFT; \
|
||||
__flush_start += stride; \
|
||||
__flush_pages -= stride >> PAGE_SHIFT; \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
num = __TLBI_RANGE_NUM(pages, scale); \
|
||||
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
|
||||
if (num >= 0) { \
|
||||
addr = __TLBI_VADDR_RANGE(start, asid, scale, \
|
||||
num, tlb_level); \
|
||||
addr = __TLBI_VADDR_RANGE(__flush_start, asid, \
|
||||
scale, num, tlb_level); \
|
||||
__tlbi(r##op, addr); \
|
||||
if (tlbi_user) \
|
||||
__tlbi_user(r##op, addr); \
|
||||
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
||||
pages -= __TLBI_RANGE_PAGES(num, scale); \
|
||||
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
||||
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
|
||||
} \
|
||||
scale--; \
|
||||
} \
|
||||
|
||||
@@ -845,52 +845,86 @@ static unsigned long system_bhb_mitigations;
|
||||
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
||||
* SCOPE_SYSTEM call will give the right answer.
|
||||
*/
|
||||
u8 spectre_bhb_loop_affected(int scope)
|
||||
static bool is_spectre_bhb_safe(int scope)
|
||||
{
|
||||
static const struct midr_range spectre_bhb_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||
{},
|
||||
};
|
||||
static bool all_safe = true;
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return all_safe;
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
|
||||
return true;
|
||||
|
||||
all_safe = false;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u8 spectre_bhb_loop_affected(void)
|
||||
{
|
||||
u8 k = 0;
|
||||
static u8 max_bhb_k;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU) {
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k11_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k132_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k38_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k11_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
{},
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
||||
k = 11;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
max_bhb_k = max(max_bhb_k, k);
|
||||
} else {
|
||||
k = max_bhb_k;
|
||||
}
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list))
|
||||
k = 132;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list))
|
||||
k = 38;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
||||
k = 11;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
return k;
|
||||
}
|
||||
@@ -916,29 +950,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_spectre_bhb_fw_affected(int scope)
|
||||
static bool has_spectre_bhb_fw_mitigation(void)
|
||||
{
|
||||
static bool system_affected;
|
||||
enum mitigation_state fw_state;
|
||||
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
|
||||
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
{},
|
||||
};
|
||||
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
|
||||
spectre_bhb_firmware_mitigated_list);
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return system_affected;
|
||||
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
|
||||
system_affected = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return has_smccc && fw_state == SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
static bool supports_ecbhb(int scope)
|
||||
@@ -954,6 +972,8 @@ static bool supports_ecbhb(int scope)
|
||||
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
|
||||
}
|
||||
|
||||
static u8 max_bhb_k;
|
||||
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
@@ -962,16 +982,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||
if (supports_csv2p3(scope))
|
||||
return false;
|
||||
|
||||
if (supports_clearbhb(scope))
|
||||
return true;
|
||||
if (is_spectre_bhb_safe(scope))
|
||||
return false;
|
||||
|
||||
if (spectre_bhb_loop_affected(scope))
|
||||
return true;
|
||||
/*
|
||||
* At this point the core isn't known to be "safe" so we're going to
|
||||
* assume it's vulnerable. We still need to update `max_bhb_k` though,
|
||||
* but only if we aren't mitigating with clearbhb though.
|
||||
*/
|
||||
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
|
||||
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
|
||||
|
||||
if (is_spectre_bhb_fw_affected(scope))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
@@ -1002,7 +1024,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
bp_hardening_cb_t cpu_cb;
|
||||
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
|
||||
enum mitigation_state state = SPECTRE_VULNERABLE;
|
||||
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
|
||||
|
||||
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
|
||||
@@ -1028,7 +1050,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_INSN, &system_bhb_mitigations);
|
||||
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
|
||||
} else if (spectre_bhb_loop_affected()) {
|
||||
/*
|
||||
* Ensure KVM uses the indirect vector which will have the
|
||||
* branchy-loop added. A57/A72-r0 will already have selected
|
||||
@@ -1041,32 +1063,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_LOOP, &system_bhb_mitigations);
|
||||
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (fw_state == SPECTRE_MITIGATED) {
|
||||
/*
|
||||
* Ensure KVM uses one of the spectre bp_hardening
|
||||
* vectors. The indirect vector doesn't include the EL3
|
||||
* call, so needs upgrading to
|
||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||
*/
|
||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||
data->slot += 1;
|
||||
} else if (has_spectre_bhb_fw_mitigation()) {
|
||||
/*
|
||||
* Ensure KVM uses one of the spectre bp_hardening
|
||||
* vectors. The indirect vector doesn't include the EL3
|
||||
* call, so needs upgrading to
|
||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||
*/
|
||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||
data->slot += 1;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||
|
||||
/*
|
||||
* The WA3 call in the vectors supersedes the WA1 call
|
||||
* made during context-switch. Uninstall any firmware
|
||||
* bp_hardening callback.
|
||||
*/
|
||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||
/*
|
||||
* The WA3 call in the vectors supersedes the WA1 call
|
||||
* made during context-switch. Uninstall any firmware
|
||||
* bp_hardening callback.
|
||||
*/
|
||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
|
||||
update_mitigation_state(&spectre_bhb_state, state);
|
||||
@@ -1100,7 +1119,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
||||
{
|
||||
u8 rd;
|
||||
u32 insn;
|
||||
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
|
||||
|
||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||
|
||||
@@ -1109,7 +1127,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
||||
|
||||
insn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
|
||||
insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
@@ -391,7 +391,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return kvm_share_hyp(vcpu, vcpu + 1);
|
||||
err = kvm_share_hyp(vcpu, vcpu + 1);
|
||||
if (err)
|
||||
kvm_vgic_vcpu_destroy(vcpu);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
|
||||
@@ -1328,7 +1328,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
|
||||
__remove_pgd_mapping(swapper_pg_dir,
|
||||
__phys_to_virt(start), size);
|
||||
else {
|
||||
max_pfn = PFN_UP(start + size);
|
||||
/* Address of hotplugged memory can be smaller */
|
||||
max_pfn = max(max_pfn, PFN_UP(start + size));
|
||||
max_low_pfn = max_pfn;
|
||||
}
|
||||
|
||||
|
||||
@@ -216,18 +216,6 @@ static __init int setup_node(int pxm)
|
||||
return acpi_map_pxm_to_node(pxm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
|
||||
* I/O localities since SRAT does not list them. I/O localities are
|
||||
* not supported at this point.
|
||||
*/
|
||||
unsigned int numa_distance_cnt;
|
||||
|
||||
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
|
||||
{
|
||||
return slit->locality_count;
|
||||
}
|
||||
|
||||
void __init numa_set_distance(int from, int to, int distance)
|
||||
{
|
||||
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
|
||||
|
||||
@@ -42,7 +42,7 @@ int (*__pmax_close)(int);
|
||||
* Detect which PROM the DECSTATION has, and set the callback vectors
|
||||
* appropriately.
|
||||
*/
|
||||
void __init which_prom(s32 magic, s32 *prom_vec)
|
||||
static void __init which_prom(s32 magic, s32 *prom_vec)
|
||||
{
|
||||
/*
|
||||
* No sign of the REX PROM's magic number means we assume a non-REX
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#define __ASM_DS1287_H
|
||||
|
||||
extern int ds1287_timer_state(void);
|
||||
extern void ds1287_set_base_clock(unsigned int clock);
|
||||
extern int ds1287_set_base_clock(unsigned int hz);
|
||||
extern int ds1287_clockevent_init(int irq);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include <linux/mc146818rtc.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/ds1287.h>
|
||||
#include <asm/time.h>
|
||||
|
||||
int ds1287_timer_state(void)
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/reboot.h>
|
||||
@@ -1839,6 +1840,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
|
||||
|| nargs + nret > ARRAY_SIZE(args.args))
|
||||
return -EINVAL;
|
||||
|
||||
nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
|
||||
nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
|
||||
|
||||
/* Copy in args. */
|
||||
if (copy_from_user(args.args, uargs->args,
|
||||
nargs * sizeof(rtas_arg_t)) != 0)
|
||||
|
||||
@@ -19,16 +19,9 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
void arch_kgdb_breakpoint(void);
|
||||
extern unsigned long kgdb_compiled_break;
|
||||
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm(".global kgdb_compiled_break\n"
|
||||
".option norvc\n"
|
||||
"kgdb_compiled_break: ebreak\n"
|
||||
".option rvc\n");
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define DBG_REG_ZERO "zero"
|
||||
|
||||
@@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
||||
unsigned long *args)
|
||||
{
|
||||
args[0] = regs->orig_a0;
|
||||
args++;
|
||||
memcpy(args, ®s->a1, 5 * sizeof(args[0]));
|
||||
args[1] = regs->a1;
|
||||
args[2] = regs->a2;
|
||||
args[3] = regs->a3;
|
||||
args[4] = regs->a4;
|
||||
args[5] = regs->a5;
|
||||
}
|
||||
|
||||
static inline int syscall_get_arch(struct task_struct *task)
|
||||
|
||||
@@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
||||
regs->epc = pc;
|
||||
}
|
||||
|
||||
noinline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
asm(".global kgdb_compiled_break\n"
|
||||
"kgdb_compiled_break: ebreak\n");
|
||||
}
|
||||
|
||||
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
|
||||
char *remcom_out_buffer)
|
||||
{
|
||||
|
||||
@@ -73,6 +73,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
|
||||
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
|
||||
#endif
|
||||
|
||||
static int num_standard_resources;
|
||||
static struct resource *standard_resources;
|
||||
|
||||
static int __init add_resource(struct resource *parent,
|
||||
struct resource *res)
|
||||
{
|
||||
@@ -146,7 +149,7 @@ static void __init init_resources(void)
|
||||
struct resource *res = NULL;
|
||||
struct resource *mem_res = NULL;
|
||||
size_t mem_res_sz = 0;
|
||||
int num_resources = 0, res_idx = 0;
|
||||
int num_resources = 0, res_idx = 0, non_resv_res = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
|
||||
@@ -215,6 +218,7 @@ static void __init init_resources(void)
|
||||
/* Add /memory regions to the resource tree */
|
||||
for_each_mem_region(region) {
|
||||
res = &mem_res[res_idx--];
|
||||
non_resv_res++;
|
||||
|
||||
if (unlikely(memblock_is_nomap(region))) {
|
||||
res->name = "Reserved";
|
||||
@@ -232,6 +236,9 @@ static void __init init_resources(void)
|
||||
goto error;
|
||||
}
|
||||
|
||||
num_standard_resources = non_resv_res;
|
||||
standard_resources = &mem_res[res_idx + 1];
|
||||
|
||||
/* Clean-up any unused pre-allocated resources */
|
||||
if (res_idx >= 0)
|
||||
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
|
||||
@@ -243,6 +250,33 @@ static void __init init_resources(void)
|
||||
memblock_free(mem_res, mem_res_sz);
|
||||
}
|
||||
|
||||
static int __init reserve_memblock_reserved_regions(void)
|
||||
{
|
||||
u64 i, j;
|
||||
|
||||
for (i = 0; i < num_standard_resources; i++) {
|
||||
struct resource *mem = &standard_resources[i];
|
||||
phys_addr_t r_start, r_end, mem_size = resource_size(mem);
|
||||
|
||||
if (!memblock_is_region_reserved(mem->start, mem_size))
|
||||
continue;
|
||||
|
||||
for_each_reserved_mem_range(j, &r_start, &r_end) {
|
||||
resource_size_t start, end;
|
||||
|
||||
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
|
||||
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
|
||||
|
||||
if (start > mem->end || end < mem->start)
|
||||
continue;
|
||||
|
||||
reserve_region_with_split(mem, start, end, "Reserved");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(reserve_memblock_reserved_regions);
|
||||
|
||||
static void __init parse_dtb(void)
|
||||
{
|
||||
|
||||
@@ -931,7 +931,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte, unsigned int nr)
|
||||
{
|
||||
arch_enter_lazy_mmu_mode();
|
||||
for (;;) {
|
||||
__set_pte_at(mm, addr, ptep, pte, 0);
|
||||
if (--nr == 0)
|
||||
@@ -940,7 +939,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
|
||||
pte_val(pte) += PAGE_SIZE;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
arch_leave_lazy_mmu_mode();
|
||||
}
|
||||
#define set_ptes set_ptes
|
||||
|
||||
|
||||
@@ -52,8 +52,10 @@ out:
|
||||
|
||||
void arch_enter_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
|
||||
struct tlb_batch *tb;
|
||||
|
||||
preempt_disable();
|
||||
tb = this_cpu_ptr(&tlb_batch);
|
||||
tb->active = 1;
|
||||
}
|
||||
|
||||
@@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void)
|
||||
if (tb->tlb_nr)
|
||||
flush_tlb_pending();
|
||||
tb->active = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
|
||||
@@ -881,6 +881,7 @@ config INTEL_TDX_GUEST
|
||||
depends on X86_64 && CPU_SUP_INTEL
|
||||
depends on X86_X2APIC
|
||||
depends on EFI_STUB
|
||||
depends on PARAVIRT
|
||||
select ARCH_HAS_CC_PLATFORM
|
||||
select X86_MEM_ENCRYPT
|
||||
select X86_MCE
|
||||
|
||||
@@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
|
||||
|
||||
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
static bool sevsnp;
|
||||
|
||||
/* Platform-specific memory-acceptance call goes here */
|
||||
if (early_is_tdx_guest()) {
|
||||
if (!tdx_accept_memory(start, end))
|
||||
panic("TDX: Failed to accept memory\n");
|
||||
} else if (sev_snp_enabled()) {
|
||||
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
|
||||
sevsnp = true;
|
||||
snp_accept_memory(start, end);
|
||||
} else {
|
||||
error("Cannot accept memory: unknown platform\n");
|
||||
|
||||
@@ -135,10 +135,7 @@ bool sev_snp_enabled(void)
|
||||
|
||||
static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
u64 val, msr;
|
||||
|
||||
/*
|
||||
* If private -> shared then invalidate the page before requesting the
|
||||
@@ -147,6 +144,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
|
||||
|
||||
/* Save the current GHCB MSR value */
|
||||
msr = sev_es_rd_ghcb_msr();
|
||||
|
||||
/* Issue VMGEXIT to change the page state in RMP table. */
|
||||
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
|
||||
VMGEXIT();
|
||||
@@ -156,6 +156,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
/* Restore the GHCB MSR value */
|
||||
sev_es_wr_ghcb_msr(msr);
|
||||
|
||||
/*
|
||||
* Now that page state is changed in the RMP table, validate it so that it is
|
||||
* consistent with the RMP entry.
|
||||
@@ -166,11 +169,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
|
||||
|
||||
void snp_set_page_private(unsigned long paddr)
|
||||
{
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
|
||||
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
|
||||
}
|
||||
|
||||
void snp_set_page_shared(unsigned long paddr)
|
||||
{
|
||||
if (!sev_snp_enabled())
|
||||
return;
|
||||
|
||||
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
|
||||
}
|
||||
|
||||
@@ -194,56 +203,10 @@ static bool early_setup_ghcb(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
|
||||
phys_addr_t pa, phys_addr_t pa_end)
|
||||
{
|
||||
struct psc_hdr *hdr;
|
||||
struct psc_entry *e;
|
||||
unsigned int i;
|
||||
|
||||
hdr = &desc->hdr;
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
|
||||
e = desc->entries;
|
||||
|
||||
i = 0;
|
||||
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
|
||||
hdr->end_entry = i;
|
||||
|
||||
e->gfn = pa >> PAGE_SHIFT;
|
||||
e->operation = SNP_PAGE_STATE_PRIVATE;
|
||||
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
|
||||
e->pagesize = RMP_PG_SIZE_2M;
|
||||
pa += PMD_SIZE;
|
||||
} else {
|
||||
e->pagesize = RMP_PG_SIZE_4K;
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
|
||||
e++;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (vmgexit_psc(boot_ghcb, desc))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
pvalidate_pages(desc);
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
struct snp_psc_desc desc = {};
|
||||
unsigned int i;
|
||||
phys_addr_t pa;
|
||||
|
||||
if (!boot_ghcb && !early_setup_ghcb())
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
|
||||
|
||||
pa = start;
|
||||
while (pa < end)
|
||||
pa = __snp_accept_memory(&desc, pa, end);
|
||||
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
|
||||
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
|
||||
}
|
||||
|
||||
void sev_es_shutdown_ghcb(void)
|
||||
|
||||
@@ -12,11 +12,13 @@
|
||||
|
||||
bool sev_snp_enabled(void);
|
||||
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
|
||||
u64 sev_get_status(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool sev_snp_enabled(void) { return false; }
|
||||
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
|
||||
static inline u64 sev_get_status(void) { return 0; }
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <asm/ia32.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/insn-eval.h>
|
||||
#include <asm/paravirt_types.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
@@ -334,7 +335,7 @@ static int handle_halt(struct ve_info *ve)
|
||||
return ve_instr_len(ve);
|
||||
}
|
||||
|
||||
void __cpuidle tdx_safe_halt(void)
|
||||
void __cpuidle tdx_halt(void)
|
||||
{
|
||||
const bool irq_disabled = false;
|
||||
|
||||
@@ -345,6 +346,16 @@ void __cpuidle tdx_safe_halt(void)
|
||||
WARN_ONCE(1, "HLT instruction emulation failed\n");
|
||||
}
|
||||
|
||||
static void __cpuidle tdx_safe_halt(void)
|
||||
{
|
||||
tdx_halt();
|
||||
/*
|
||||
* "__cpuidle" section doesn't support instrumentation, so stick
|
||||
* with raw_* variant that avoids tracing hooks.
|
||||
*/
|
||||
raw_local_irq_enable();
|
||||
}
|
||||
|
||||
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
|
||||
{
|
||||
struct tdx_hypercall_args args = {
|
||||
@@ -888,6 +899,19 @@ void __init tdx_early_init(void)
|
||||
x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
|
||||
x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
|
||||
|
||||
/*
|
||||
* Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
|
||||
* will enable interrupts before HLT TDCALL invocation if executed
|
||||
* in STI-shadow, possibly resulting in missed wakeup events.
|
||||
*
|
||||
* Modify all possible HLT execution paths to use TDX specific routines
|
||||
* that directly execute TDCALL and toggle the interrupt state as
|
||||
* needed after TDCALL completion. This also reduces HLT related #VEs
|
||||
* in addition to having a reliable halt logic execution.
|
||||
*/
|
||||
pv_ops.irq.safe_halt = tdx_safe_halt;
|
||||
pv_ops.irq.halt = tdx_halt;
|
||||
|
||||
/*
|
||||
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
|
||||
* bringup low level code. That raises #VE which cannot be handled
|
||||
|
||||
@@ -1203,8 +1203,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
|
||||
* + precise_ip < 2 for the non event IP
|
||||
* + For RTM TSX weight we need GPRs for the abort code.
|
||||
*/
|
||||
gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
|
||||
(attr->sample_regs_intr & PEBS_GP_REGS);
|
||||
gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
|
||||
(attr->sample_regs_intr & PEBS_GP_REGS)) ||
|
||||
((sample_type & PERF_SAMPLE_REGS_USER) &&
|
||||
(attr->sample_regs_user & PEBS_GP_REGS));
|
||||
|
||||
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
|
||||
((attr->config & INTEL_ARCH_EVENT_MASK) ==
|
||||
@@ -1856,7 +1858,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
|
||||
regs->flags &= ~PERF_EFLAGS_EXACT;
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_INTR)
|
||||
if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
|
||||
adaptive_pebs_save_regs(regs, gprs);
|
||||
}
|
||||
|
||||
|
||||
@@ -4882,28 +4882,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
@@ -5476,37 +5476,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
|
||||
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
|
||||
/* Free-Running IIO CLOCKS Counter */
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type icx_uncore_iio_free_running = {
|
||||
.name = "iio_free_running",
|
||||
.num_counters = 9,
|
||||
@@ -5514,7 +5483,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
|
||||
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = icx_iio_freerunning,
|
||||
.ops = &skx_uncore_iio_freerunning_ops,
|
||||
.event_descs = icx_uncore_iio_freerunning_events,
|
||||
.event_descs = snr_uncore_iio_freerunning_events,
|
||||
.format_group = &skx_uncore_iio_freerunning_format_group,
|
||||
};
|
||||
|
||||
@@ -6241,69 +6210,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
|
||||
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
|
||||
/* Free-Running IIO CLOCKS Counter */
|
||||
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
|
||||
/* Free-Running IIO BANDWIDTH IN Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
|
||||
/* Free-Running IIO BANDWIDTH OUT Counters */
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
|
||||
INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
static struct intel_uncore_type spr_uncore_iio_free_running = {
|
||||
.name = "iio_free_running",
|
||||
.num_counters = 17,
|
||||
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = spr_iio_freerunning,
|
||||
.ops = &skx_uncore_iio_freerunning_ops,
|
||||
.event_descs = spr_uncore_iio_freerunning_events,
|
||||
.event_descs = snr_uncore_iio_freerunning_events,
|
||||
.format_group = &skx_uncore_iio_freerunning_format_group,
|
||||
};
|
||||
|
||||
|
||||
@@ -56,6 +56,28 @@ static __always_inline void native_halt(void)
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* Used in the idle loop; sti takes one instruction cycle
|
||||
* to complete:
|
||||
*/
|
||||
static __always_inline void arch_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
}
|
||||
|
||||
/*
|
||||
* Used when interrupts are already enabled or to
|
||||
* shutdown the processor:
|
||||
*/
|
||||
static __always_inline void halt(void)
|
||||
{
|
||||
native_halt();
|
||||
}
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
@@ -77,24 +99,6 @@ static __always_inline void arch_local_irq_enable(void)
|
||||
native_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Used in the idle loop; sti takes one instruction cycle
|
||||
* to complete:
|
||||
*/
|
||||
static __always_inline void arch_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
}
|
||||
|
||||
/*
|
||||
* Used when interrupts are already enabled or to
|
||||
* shutdown the processor:
|
||||
*/
|
||||
static __always_inline void halt(void)
|
||||
{
|
||||
native_halt();
|
||||
}
|
||||
|
||||
/*
|
||||
* For spinlocks, etc:
|
||||
*/
|
||||
|
||||
@@ -103,6 +103,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
|
||||
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
|
||||
}
|
||||
|
||||
static __always_inline void arch_safe_halt(void)
|
||||
{
|
||||
PVOP_VCALL0(irq.safe_halt);
|
||||
}
|
||||
|
||||
static inline void halt(void)
|
||||
{
|
||||
PVOP_VCALL0(irq.halt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
static inline void load_sp0(unsigned long sp0)
|
||||
{
|
||||
@@ -168,16 +178,6 @@ static inline void __write_cr4(unsigned long x)
|
||||
PVOP_VCALL1(cpu.write_cr4, x);
|
||||
}
|
||||
|
||||
static __always_inline void arch_safe_halt(void)
|
||||
{
|
||||
PVOP_VCALL0(irq.safe_halt);
|
||||
}
|
||||
|
||||
static inline void halt(void)
|
||||
{
|
||||
PVOP_VCALL0(irq.halt);
|
||||
}
|
||||
|
||||
extern noinstr void pv_native_wbinvd(void);
|
||||
|
||||
static __always_inline void wbinvd(void)
|
||||
|
||||
@@ -130,10 +130,9 @@ struct pv_irq_ops {
|
||||
struct paravirt_callee_save save_fl;
|
||||
struct paravirt_callee_save irq_disable;
|
||||
struct paravirt_callee_save irq_enable;
|
||||
|
||||
#endif
|
||||
void (*safe_halt)(void);
|
||||
void (*halt)(void);
|
||||
#endif
|
||||
} __no_randomize_layout;
|
||||
|
||||
struct pv_mmu_ops {
|
||||
|
||||
@@ -46,7 +46,7 @@ void tdx_get_ve_info(struct ve_info *ve);
|
||||
|
||||
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
|
||||
|
||||
void tdx_safe_halt(void);
|
||||
void tdx_halt(void);
|
||||
|
||||
bool tdx_early_handle_ve(struct pt_regs *regs);
|
||||
|
||||
@@ -55,7 +55,7 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
|
||||
#else
|
||||
|
||||
static inline void tdx_early_init(void) { };
|
||||
static inline void tdx_safe_halt(void) { };
|
||||
static inline void tdx_halt(void) { };
|
||||
|
||||
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
|
||||
|
||||
|
||||
@@ -62,11 +62,6 @@ void xen_arch_unregister_cpu(int num);
|
||||
#ifdef CONFIG_PVH
|
||||
void __init xen_pvh_init(struct boot_params *boot_params);
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
void __init xen_reserve_extra_memory(struct boot_params *bootp);
|
||||
#else
|
||||
static inline void xen_reserve_extra_memory(struct boot_params *bootp) { }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Lazy mode for batching updates / context switch */
|
||||
|
||||
@@ -825,7 +825,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
|
||||
* (model = 0x14) and later actually support it.
|
||||
* (AMD Erratum #110, docId: 25759).
|
||||
*/
|
||||
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
|
||||
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
||||
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
|
||||
if (!rdmsrl_amd_safe(0xc001100d, &value)) {
|
||||
value &= ~BIT_64(32);
|
||||
@@ -1039,6 +1039,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
|
||||
|
||||
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
|
||||
setup_force_cpu_bug(X86_BUG_DIV0);
|
||||
|
||||
/*
|
||||
* Turn off the Instructions Retired free counter on machines that are
|
||||
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (c->x86_model < 0x30) {
|
||||
msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
clear_cpu_cap(c, X86_FEATURE_IRPERF);
|
||||
}
|
||||
}
|
||||
|
||||
static bool cpu_has_zenbleed_microcode(void)
|
||||
@@ -1185,13 +1195,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
|
||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||
|
||||
/*
|
||||
* Turn on the Instructions Retired free counter on machines not
|
||||
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||
* Counter May Be Inaccurate".
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF) &&
|
||||
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
|
||||
/* Enable the Instructions Retired free counter */
|
||||
if (cpu_has(c, X86_FEATURE_IRPERF))
|
||||
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||
|
||||
check_null_seg_clears_base(c);
|
||||
|
||||
@@ -1168,7 +1168,13 @@ static void __split_lock_reenable(struct work_struct *work)
|
||||
{
|
||||
sld_update_msr(true);
|
||||
}
|
||||
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
|
||||
/*
|
||||
* In order for each CPU to schedule its delayed work independently of the
|
||||
* others, delayed work struct must be per-CPU. This is not required when
|
||||
* sysctl_sld_mitigate is enabled because of the semaphore that limits
|
||||
* the number of simultaneously scheduled delayed works to 1.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
|
||||
|
||||
/*
|
||||
* If a CPU goes offline with pending delayed work to re-enable split lock
|
||||
@@ -1189,7 +1195,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
|
||||
|
||||
static void split_lock_warn(unsigned long ip)
|
||||
{
|
||||
struct delayed_work *work;
|
||||
struct delayed_work *work = NULL;
|
||||
int cpu;
|
||||
|
||||
if (!current->reported_split_lock)
|
||||
@@ -1211,11 +1217,17 @@ static void split_lock_warn(unsigned long ip)
|
||||
if (down_interruptible(&buslock_sem) == -EINTR)
|
||||
return;
|
||||
work = &sl_reenable_unlock;
|
||||
} else {
|
||||
work = &sl_reenable;
|
||||
}
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
if (!work) {
|
||||
work = this_cpu_ptr(&sl_reenable);
|
||||
/* Deferred initialization of per-CPU struct */
|
||||
if (!work->work.func)
|
||||
INIT_DELAYED_WORK(work, __split_lock_reenable);
|
||||
}
|
||||
|
||||
schedule_delayed_work_on(cpu, work, 2);
|
||||
|
||||
/* Disable split lock detection on this CPU to make progress */
|
||||
|
||||
@@ -201,6 +201,12 @@ static bool need_sha_check(u32 cur_rev)
|
||||
case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
case 0xb0021: return cur_rev <= 0xb002146; break;
|
||||
case 0xb1010: return cur_rev <= 0xb101046; break;
|
||||
case 0xb2040: return cur_rev <= 0xb204031; break;
|
||||
case 0xb4040: return cur_rev <= 0xb404031; break;
|
||||
case 0xb6000: return cur_rev <= 0xb600031; break;
|
||||
case 0xb7000: return cur_rev <= 0xb700031; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
@@ -216,8 +222,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
|
||||
struct sha256_state s;
|
||||
int i;
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
|
||||
x86_family(bsp_cpuid_1_eax) > 0x19)
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17)
|
||||
return true;
|
||||
|
||||
if (!need_sha_check(cur_rev))
|
||||
|
||||
@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
|
||||
void __init e820__register_nosave_regions(unsigned long limit_pfn)
|
||||
{
|
||||
int i;
|
||||
unsigned long pfn = 0;
|
||||
u64 last_addr = 0;
|
||||
|
||||
for (i = 0; i < e820_table->nr_entries; i++) {
|
||||
struct e820_entry *entry = &e820_table->entries[i];
|
||||
|
||||
if (pfn < PFN_UP(entry->addr))
|
||||
register_nosave_region(pfn, PFN_UP(entry->addr));
|
||||
|
||||
pfn = PFN_DOWN(entry->addr + entry->size);
|
||||
|
||||
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
|
||||
register_nosave_region(PFN_UP(entry->addr), pfn);
|
||||
continue;
|
||||
|
||||
if (pfn >= limit_pfn)
|
||||
break;
|
||||
if (last_addr < entry->addr)
|
||||
register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
|
||||
|
||||
last_addr = entry->addr + entry->size;
|
||||
}
|
||||
|
||||
register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
@@ -142,6 +142,11 @@ int paravirt_disable_iospace(void)
|
||||
return request_resource(&ioport_resource, &reserve_ioports);
|
||||
}
|
||||
|
||||
static noinstr void pv_native_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
static noinstr void pv_native_write_cr2(unsigned long val)
|
||||
{
|
||||
@@ -162,11 +167,6 @@ noinstr void pv_native_wbinvd(void)
|
||||
{
|
||||
native_wbinvd();
|
||||
}
|
||||
|
||||
static noinstr void pv_native_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
}
|
||||
#endif
|
||||
|
||||
struct pv_info pv_info = {
|
||||
@@ -224,9 +224,11 @@ struct paravirt_patch_template pv_ops = {
|
||||
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
|
||||
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
|
||||
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
|
||||
#endif /* CONFIG_PARAVIRT_XXL */
|
||||
|
||||
/* Irq HLT ops. */
|
||||
.irq.safe_halt = pv_native_safe_halt,
|
||||
.irq.halt = native_halt,
|
||||
#endif /* CONFIG_PARAVIRT_XXL */
|
||||
|
||||
/* Mmu ops. */
|
||||
.mmu.flush_tlb_user = native_flush_tlb_local,
|
||||
|
||||
@@ -955,7 +955,7 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
|
||||
static_call_update(x86_idle, mwait_idle);
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
|
||||
pr_info("using TDX aware idle routine\n");
|
||||
static_call_update(x86_idle, tdx_safe_halt);
|
||||
static_call_update(x86_idle, tdx_halt);
|
||||
} else
|
||||
static_call_update(x86_idle, default_idle);
|
||||
}
|
||||
|
||||
@@ -33,25 +33,55 @@
|
||||
#include <asm/smap.h>
|
||||
#include <asm/gsseg.h>
|
||||
|
||||
/*
|
||||
* The first GDT descriptor is reserved as 'NULL descriptor'. As bits 0
|
||||
* and 1 of a segment selector, i.e., the RPL bits, are NOT used to index
|
||||
* GDT, selector values 0~3 all point to the NULL descriptor, thus values
|
||||
* 0, 1, 2 and 3 are all valid NULL selector values.
|
||||
*
|
||||
* However IRET zeros ES, FS, GS, and DS segment registers if any of them
|
||||
* is found to have any nonzero NULL selector value, which can be used by
|
||||
* userspace in pre-FRED systems to spot any interrupt/exception by loading
|
||||
* a nonzero NULL selector and waiting for it to become zero. Before FRED
|
||||
* there was nothing software could do to prevent such an information leak.
|
||||
*
|
||||
* ERETU, the only legit instruction to return to userspace from kernel
|
||||
* under FRED, by design does NOT zero any segment register to avoid this
|
||||
* problem behavior.
|
||||
*
|
||||
* As such, leave NULL selector values 0~3 unchanged.
|
||||
*/
|
||||
static inline u16 fixup_rpl(u16 sel)
|
||||
{
|
||||
return sel <= 3 ? sel : sel | 3;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
#include <asm/ia32_unistd.h>
|
||||
|
||||
static inline void reload_segments(struct sigcontext_32 *sc)
|
||||
{
|
||||
unsigned int cur;
|
||||
u16 cur;
|
||||
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
savesegment(gs, cur);
|
||||
if ((sc->gs | 0x03) != cur)
|
||||
load_gs_index(sc->gs | 0x03);
|
||||
if (fixup_rpl(sc->gs) != cur)
|
||||
load_gs_index(fixup_rpl(sc->gs));
|
||||
savesegment(fs, cur);
|
||||
if ((sc->fs | 0x03) != cur)
|
||||
loadsegment(fs, sc->fs | 0x03);
|
||||
if (fixup_rpl(sc->fs) != cur)
|
||||
loadsegment(fs, fixup_rpl(sc->fs));
|
||||
|
||||
savesegment(ds, cur);
|
||||
if ((sc->ds | 0x03) != cur)
|
||||
loadsegment(ds, sc->ds | 0x03);
|
||||
if (fixup_rpl(sc->ds) != cur)
|
||||
loadsegment(ds, fixup_rpl(sc->ds));
|
||||
savesegment(es, cur);
|
||||
if ((sc->es | 0x03) != cur)
|
||||
loadsegment(es, sc->es | 0x03);
|
||||
if (fixup_rpl(sc->es) != cur)
|
||||
loadsegment(es, fixup_rpl(sc->es));
|
||||
}
|
||||
|
||||
#define sigset32_t compat_sigset_t
|
||||
@@ -105,18 +135,12 @@ static bool ia32_restore_sigcontext(struct pt_regs *regs,
|
||||
regs->orig_ax = -1;
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
/*
|
||||
* Reload fs and gs if they have changed in the signal
|
||||
* handler. This does not handle long fs/gs base changes in
|
||||
* the handler, but does not clobber them at least in the
|
||||
* normal case.
|
||||
*/
|
||||
reload_segments(&sc);
|
||||
#else
|
||||
loadsegment(gs, sc.gs);
|
||||
regs->fs = sc.fs;
|
||||
regs->es = sc.es;
|
||||
regs->ds = sc.ds;
|
||||
loadsegment(gs, fixup_rpl(sc.gs));
|
||||
regs->fs = fixup_rpl(sc.fs);
|
||||
regs->es = fixup_rpl(sc.es);
|
||||
regs->ds = fixup_rpl(sc.ds);
|
||||
#endif
|
||||
|
||||
return fpu__restore_sig(compat_ptr(sc.fpstate), 1);
|
||||
|
||||
@@ -1011,8 +1011,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
}
|
||||
break;
|
||||
case 0xa: { /* Architectural Performance Monitoring */
|
||||
union cpuid10_eax eax;
|
||||
union cpuid10_edx edx;
|
||||
union cpuid10_eax eax = { };
|
||||
union cpuid10_edx edx = { };
|
||||
|
||||
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
|
||||
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
|
||||
@@ -1028,8 +1028,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
|
||||
if (kvm_pmu_cap.version)
|
||||
edx.split.anythread_deprecated = 1;
|
||||
edx.split.reserved1 = 0;
|
||||
edx.split.reserved2 = 0;
|
||||
|
||||
entry->eax = eax.full;
|
||||
entry->ebx = kvm_pmu_cap.events_mask;
|
||||
@@ -1303,7 +1301,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
break;
|
||||
/* AMD Extended Performance Monitoring and Debug */
|
||||
case 0x80000022: {
|
||||
union cpuid_0x80000022_ebx ebx;
|
||||
union cpuid_0x80000022_ebx ebx = { };
|
||||
|
||||
entry->ecx = entry->edx = 0;
|
||||
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
|
||||
|
||||
@@ -11396,6 +11396,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
||||
if (kvm_mpx_supported())
|
||||
kvm_load_guest_fpu(vcpu);
|
||||
|
||||
kvm_vcpu_srcu_read_lock(vcpu);
|
||||
|
||||
r = kvm_apic_accept_events(vcpu);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
@@ -11409,6 +11411,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
||||
mp_state->mp_state = vcpu->arch.mp_state;
|
||||
|
||||
out:
|
||||
kvm_vcpu_srcu_read_unlock(vcpu);
|
||||
|
||||
if (kvm_mpx_supported())
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
|
||||
@@ -2374,7 +2374,7 @@ static int __set_pages_np(struct page *page, int numpages)
|
||||
.pgd = NULL,
|
||||
.numpages = numpages,
|
||||
.mask_set = __pgprot(0),
|
||||
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
|
||||
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
|
||||
.flags = CPA_NO_CHECK_ALIAS };
|
||||
|
||||
/*
|
||||
@@ -2453,7 +2453,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
|
||||
.pgd = pgd,
|
||||
.numpages = numpages,
|
||||
.mask_set = __pgprot(0),
|
||||
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
|
||||
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
|
||||
.flags = CPA_NO_CHECK_ALIAS,
|
||||
};
|
||||
|
||||
@@ -2496,7 +2496,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
|
||||
.pgd = pgd,
|
||||
.numpages = numpages,
|
||||
.mask_set = __pgprot(0),
|
||||
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
|
||||
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
|
||||
.flags = CPA_NO_CHECK_ALIAS,
|
||||
};
|
||||
|
||||
|
||||
@@ -74,9 +74,6 @@ static void __init init_pvh_bootparams(bool xen_guest)
|
||||
} else
|
||||
xen_raw_printk("Warning: Can fit ISA range into e820\n");
|
||||
|
||||
if (xen_guest)
|
||||
xen_reserve_extra_memory(&pvh_bootparams);
|
||||
|
||||
pvh_bootparams.hdr.cmd_line_ptr =
|
||||
pvh_start_info.cmdline_paddr;
|
||||
|
||||
|
||||
@@ -75,6 +75,9 @@ EXPORT_SYMBOL(xen_start_flags);
|
||||
*/
|
||||
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
|
||||
|
||||
/* Number of pages released from the initial allocation. */
|
||||
unsigned long xen_released_pages;
|
||||
|
||||
static __ref void xen_get_vendor(void)
|
||||
{
|
||||
init_cpu_devs();
|
||||
@@ -471,6 +474,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
|
||||
xen_free_unpopulated_pages(1, &pg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Account for the region being in the physmap but unpopulated.
|
||||
* The value in xen_released_pages is used by the balloon
|
||||
* driver to know how much of the physmap is unpopulated and
|
||||
* set an accurate initial memory target.
|
||||
*/
|
||||
xen_released_pages += xen_extra_mem[i].n_pfns;
|
||||
/* Zero so region is not also added to the balloon driver. */
|
||||
xen_extra_mem[i].n_pfns = 0;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <asm/xen/interface.h>
|
||||
@@ -26,47 +27,6 @@
|
||||
bool __ro_after_init xen_pvh;
|
||||
EXPORT_SYMBOL_GPL(xen_pvh);
|
||||
|
||||
void __init xen_pvh_init(struct boot_params *boot_params)
|
||||
{
|
||||
xen_pvh = 1;
|
||||
xen_domain_type = XEN_HVM_DOMAIN;
|
||||
xen_start_flags = pvh_start_info.flags;
|
||||
|
||||
if (xen_initial_domain())
|
||||
x86_init.oem.arch_setup = xen_add_preferred_consoles;
|
||||
x86_init.oem.banner = xen_banner;
|
||||
|
||||
xen_efi_init(boot_params);
|
||||
|
||||
if (xen_initial_domain()) {
|
||||
struct xen_platform_op op = {
|
||||
.cmd = XENPF_get_dom0_console,
|
||||
};
|
||||
int ret = HYPERVISOR_platform_op(&op);
|
||||
|
||||
if (ret > 0)
|
||||
xen_init_vga(&op.u.dom0_console,
|
||||
min(ret * sizeof(char),
|
||||
sizeof(op.u.dom0_console)),
|
||||
&boot_params->screen_info);
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
|
||||
{
|
||||
struct xen_memory_map memmap;
|
||||
int rc;
|
||||
|
||||
memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
|
||||
set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
|
||||
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
|
||||
if (rc) {
|
||||
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
|
||||
BUG();
|
||||
}
|
||||
boot_params_p->e820_entries = memmap.nr_entries;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve e820 UNUSABLE regions to inflate the memory balloon.
|
||||
*
|
||||
@@ -81,8 +41,9 @@ void __init mem_map_via_hcall(struct boot_params *boot_params_p)
|
||||
* hypervisor should notify us which memory ranges are suitable for creating
|
||||
* foreign mappings, but that's not yet implemented.
|
||||
*/
|
||||
void __init xen_reserve_extra_memory(struct boot_params *bootp)
|
||||
static void __init pvh_reserve_extra_memory(void)
|
||||
{
|
||||
struct boot_params *bootp = &boot_params;
|
||||
unsigned int i, ram_pages = 0, extra_pages;
|
||||
|
||||
for (i = 0; i < bootp->e820_entries; i++) {
|
||||
@@ -133,3 +94,51 @@ void __init xen_reserve_extra_memory(struct boot_params *bootp)
|
||||
xen_add_extra_mem(PFN_UP(e->addr), pages);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init pvh_arch_setup(void)
|
||||
{
|
||||
pvh_reserve_extra_memory();
|
||||
|
||||
if (xen_initial_domain())
|
||||
xen_add_preferred_consoles();
|
||||
}
|
||||
|
||||
void __init xen_pvh_init(struct boot_params *boot_params)
|
||||
{
|
||||
xen_pvh = 1;
|
||||
xen_domain_type = XEN_HVM_DOMAIN;
|
||||
xen_start_flags = pvh_start_info.flags;
|
||||
|
||||
x86_init.oem.arch_setup = pvh_arch_setup;
|
||||
x86_init.oem.banner = xen_banner;
|
||||
|
||||
xen_efi_init(boot_params);
|
||||
|
||||
if (xen_initial_domain()) {
|
||||
struct xen_platform_op op = {
|
||||
.cmd = XENPF_get_dom0_console,
|
||||
};
|
||||
int ret = HYPERVISOR_platform_op(&op);
|
||||
|
||||
if (ret > 0)
|
||||
xen_init_vga(&op.u.dom0_console,
|
||||
min(ret * sizeof(char),
|
||||
sizeof(op.u.dom0_console)),
|
||||
&boot_params->screen_info);
|
||||
}
|
||||
}
|
||||
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
|
||||
{
|
||||
struct xen_memory_map memmap;
|
||||
int rc;
|
||||
|
||||
memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
|
||||
set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
|
||||
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
|
||||
if (rc) {
|
||||
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
|
||||
BUG();
|
||||
}
|
||||
boot_params_p->e820_entries = memmap.nr_entries;
|
||||
}
|
||||
|
||||
@@ -38,9 +38,6 @@
|
||||
|
||||
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
|
||||
|
||||
/* Number of pages released from the initial allocation. */
|
||||
unsigned long xen_released_pages;
|
||||
|
||||
/* Memory map would allow PCI passthrough. */
|
||||
bool xen_pv_pci_possible;
|
||||
|
||||
|
||||
@@ -854,6 +854,8 @@ out_unregister_ia_ranges:
|
||||
out_debugfs_remove:
|
||||
blk_debugfs_remove(disk);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_sysfs_unregister(disk);
|
||||
out_put_queue_kobj:
|
||||
kobject_put(&disk->queue_kobj);
|
||||
mutex_unlock(&q->sysfs_dir_lock);
|
||||
|
||||
@@ -84,5 +84,5 @@ targets += x509_revocation_list
|
||||
|
||||
hostprogs := extract-cert
|
||||
|
||||
HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null)
|
||||
HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) -I$(srctree)/scripts
|
||||
HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
|
||||
|
||||
@@ -21,14 +21,17 @@
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/engine.h>
|
||||
|
||||
/*
|
||||
* OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
|
||||
*
|
||||
* Remove this if/when that API is no longer used
|
||||
*/
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
#if OPENSSL_VERSION_MAJOR >= 3
|
||||
# define USE_PKCS11_PROVIDER
|
||||
# include <openssl/provider.h>
|
||||
# include <openssl/store.h>
|
||||
#else
|
||||
# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0)
|
||||
# define USE_PKCS11_ENGINE
|
||||
# include <openssl/engine.h>
|
||||
# endif
|
||||
#endif
|
||||
#include "ssl-common.h"
|
||||
|
||||
#define PKEY_ID_PKCS7 2
|
||||
|
||||
@@ -40,41 +43,6 @@ void format(void)
|
||||
exit(2);
|
||||
}
|
||||
|
||||
static void display_openssl_errors(int l)
|
||||
{
|
||||
const char *file;
|
||||
char buf[120];
|
||||
int e, line;
|
||||
|
||||
if (ERR_peek_error() == 0)
|
||||
return;
|
||||
fprintf(stderr, "At main.c:%d:\n", l);
|
||||
|
||||
while ((e = ERR_get_error_line(&file, &line))) {
|
||||
ERR_error_string(e, buf);
|
||||
fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line);
|
||||
}
|
||||
}
|
||||
|
||||
static void drain_openssl_errors(void)
|
||||
{
|
||||
const char *file;
|
||||
int line;
|
||||
|
||||
if (ERR_peek_error() == 0)
|
||||
return;
|
||||
while (ERR_get_error_line(&file, &line)) {}
|
||||
}
|
||||
|
||||
#define ERR(cond, fmt, ...) \
|
||||
do { \
|
||||
bool __cond = (cond); \
|
||||
display_openssl_errors(__LINE__); \
|
||||
if (__cond) { \
|
||||
err(1, fmt, ## __VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
static const char *key_pass;
|
||||
static BIO *wb;
|
||||
static char *cert_dst;
|
||||
@@ -94,6 +62,66 @@ static void write_cert(X509 *x509)
|
||||
fprintf(stderr, "Extracted cert: %s\n", buf);
|
||||
}
|
||||
|
||||
static X509 *load_cert_pkcs11(const char *cert_src)
|
||||
{
|
||||
X509 *cert = NULL;
|
||||
#ifdef USE_PKCS11_PROVIDER
|
||||
OSSL_STORE_CTX *store;
|
||||
|
||||
if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true))
|
||||
ERR(1, "OSSL_PROVIDER_try_load(pkcs11)");
|
||||
if (!OSSL_PROVIDER_try_load(NULL, "default", true))
|
||||
ERR(1, "OSSL_PROVIDER_try_load(default)");
|
||||
|
||||
store = OSSL_STORE_open(cert_src, NULL, NULL, NULL, NULL);
|
||||
ERR(!store, "OSSL_STORE_open");
|
||||
|
||||
while (!OSSL_STORE_eof(store)) {
|
||||
OSSL_STORE_INFO *info = OSSL_STORE_load(store);
|
||||
|
||||
if (!info) {
|
||||
drain_openssl_errors(__LINE__, 0);
|
||||
continue;
|
||||
}
|
||||
if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_CERT) {
|
||||
cert = OSSL_STORE_INFO_get1_CERT(info);
|
||||
ERR(!cert, "OSSL_STORE_INFO_get1_CERT");
|
||||
}
|
||||
OSSL_STORE_INFO_free(info);
|
||||
if (cert)
|
||||
break;
|
||||
}
|
||||
OSSL_STORE_close(store);
|
||||
#elif defined(USE_PKCS11_ENGINE)
|
||||
ENGINE *e;
|
||||
struct {
|
||||
const char *cert_id;
|
||||
X509 *cert;
|
||||
} parms;
|
||||
|
||||
parms.cert_id = cert_src;
|
||||
parms.cert = NULL;
|
||||
|
||||
ENGINE_load_builtin_engines();
|
||||
drain_openssl_errors(__LINE__, 1);
|
||||
e = ENGINE_by_id("pkcs11");
|
||||
ERR(!e, "Load PKCS#11 ENGINE");
|
||||
if (ENGINE_init(e))
|
||||
drain_openssl_errors(__LINE__, 1);
|
||||
else
|
||||
ERR(1, "ENGINE_init");
|
||||
if (key_pass)
|
||||
ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
|
||||
ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
|
||||
ERR(!parms.cert, "Get X.509 from PKCS#11");
|
||||
cert = parms.cert;
|
||||
#else
|
||||
fprintf(stderr, "no pkcs11 engine/provider available\n");
|
||||
exit(1);
|
||||
#endif
|
||||
return cert;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char *cert_src;
|
||||
@@ -122,28 +150,10 @@ int main(int argc, char **argv)
|
||||
fclose(f);
|
||||
exit(0);
|
||||
} else if (!strncmp(cert_src, "pkcs11:", 7)) {
|
||||
ENGINE *e;
|
||||
struct {
|
||||
const char *cert_id;
|
||||
X509 *cert;
|
||||
} parms;
|
||||
X509 *cert = load_cert_pkcs11(cert_src);
|
||||
|
||||
parms.cert_id = cert_src;
|
||||
parms.cert = NULL;
|
||||
|
||||
ENGINE_load_builtin_engines();
|
||||
drain_openssl_errors();
|
||||
e = ENGINE_by_id("pkcs11");
|
||||
ERR(!e, "Load PKCS#11 ENGINE");
|
||||
if (ENGINE_init(e))
|
||||
drain_openssl_errors();
|
||||
else
|
||||
ERR(1, "ENGINE_init");
|
||||
if (key_pass)
|
||||
ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN");
|
||||
ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
|
||||
ERR(!parms.cert, "Get X.509 from PKCS#11");
|
||||
write_cert(parms.cert);
|
||||
ERR(!cert, "load_cert_pkcs11 failed");
|
||||
write_cert(cert);
|
||||
} else {
|
||||
BIO *b;
|
||||
X509 *x509;
|
||||
|
||||
@@ -22,8 +22,8 @@ static const char * const profile_names[] = {
|
||||
};
|
||||
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
|
||||
|
||||
static ssize_t platform_profile_choices_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
static ssize_t platform_profile_choices_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
int len = 0;
|
||||
@@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev,
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t platform_profile_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
static ssize_t platform_profile_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
|
||||
@@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev,
|
||||
return sysfs_emit(buf, "%s\n", profile_names[profile]);
|
||||
}
|
||||
|
||||
static ssize_t platform_profile_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
static ssize_t platform_profile_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int err, i;
|
||||
@@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RO(platform_profile_choices);
|
||||
static DEVICE_ATTR_RW(platform_profile);
|
||||
static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
|
||||
static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
|
||||
|
||||
static struct attribute *platform_profile_attrs[] = {
|
||||
&dev_attr_platform_profile_choices.attr,
|
||||
&dev_attr_platform_profile.attr,
|
||||
&attr_platform_profile_choices.attr,
|
||||
&attr_platform_profile.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
||||
@@ -591,6 +591,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
|
||||
|
||||
@@ -1496,8 +1496,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
|
||||
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
|
||||
tf.command = ATA_CMD_PACKET;
|
||||
|
||||
/* is it pointless to prefer PIO for "safety reasons"? */
|
||||
if (ap->flags & ATA_FLAG_PIO_DMA) {
|
||||
/*
|
||||
* Do not use DMA if the connected device only supports PIO, even if the
|
||||
* port prefers PIO commands via DMA.
|
||||
*
|
||||
* Ideally, we should call atapi_check_dma() to check if it is safe for
|
||||
* the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
|
||||
* Since we can't check the command, perhaps we should only use pio?
|
||||
*/
|
||||
if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
|
||||
tf.protocol = ATAPI_PROT_DMA;
|
||||
tf.feature |= ATAPI_PKT_DMA;
|
||||
} else {
|
||||
|
||||
@@ -1365,6 +1365,8 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
|
||||
unsigned int err_mask, tag;
|
||||
u8 *sense, sk = 0, asc = 0, ascq = 0;
|
||||
u64 sense_valid, val;
|
||||
u16 extended_sense;
|
||||
bool aux_icc_valid;
|
||||
int ret = 0;
|
||||
|
||||
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
|
||||
@@ -1384,6 +1386,8 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
|
||||
|
||||
sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
|
||||
((u64)buf[10] << 16) | ((u64)buf[11] << 24);
|
||||
extended_sense = get_unaligned_le16(&buf[14]);
|
||||
aux_icc_valid = extended_sense & BIT(15);
|
||||
|
||||
ata_qc_for_each_raw(ap, qc, tag) {
|
||||
if (!(qc->flags & ATA_QCFLAG_EH) ||
|
||||
@@ -1411,6 +1415,17 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
|
||||
continue;
|
||||
}
|
||||
|
||||
qc->result_tf.nsect = sense[6];
|
||||
qc->result_tf.hob_nsect = sense[7];
|
||||
qc->result_tf.lbal = sense[8];
|
||||
qc->result_tf.lbam = sense[9];
|
||||
qc->result_tf.lbah = sense[10];
|
||||
qc->result_tf.hob_lbal = sense[11];
|
||||
qc->result_tf.hob_lbam = sense[12];
|
||||
qc->result_tf.hob_lbah = sense[13];
|
||||
if (aux_icc_valid)
|
||||
qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
|
||||
|
||||
/* Set sense without also setting scsicmd->result */
|
||||
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
|
||||
qc->scsicmd->sense_buffer, sk,
|
||||
|
||||
@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
|
||||
|
||||
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
|
||||
resource_size(cmd_res));
|
||||
if (!ap->ioaddr.cmd_addr)
|
||||
return -ENOMEM;
|
||||
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
|
||||
resource_size(ctl_res));
|
||||
if (!ap->ioaddr.ctl_addr)
|
||||
return -ENOMEM;
|
||||
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
|
||||
resource_size(dma_res));
|
||||
if (!ap->ioaddr.bmdma_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Adjust register offsets
|
||||
|
||||
@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
|
||||
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
pdc_i2c_read_data[i].reg,
|
||||
&spd0[pdc_i2c_read_data[i].ofs]);
|
||||
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
|
||||
pdc_i2c_read_data[i].reg,
|
||||
&spd0[pdc_i2c_read_data[i].ofs])) {
|
||||
dev_err(host->dev,
|
||||
"Failed in i2c read at index %d: device=%#x, reg=%#x\n",
|
||||
i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
|
||||
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
|
||||
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
|
||||
|
||||
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
|
||||
size = pdc20621_prog_dimm0(host);
|
||||
if (size < 0)
|
||||
return size;
|
||||
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
|
||||
|
||||
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
|
||||
|
||||
@@ -687,6 +687,13 @@ int devres_release_group(struct device *dev, void *id)
|
||||
spin_unlock_irqrestore(&dev->devres_lock, flags);
|
||||
|
||||
release_nodes(dev, &todo);
|
||||
} else if (list_empty(&dev->devres_head)) {
|
||||
/*
|
||||
* dev is probably dying via devres_release_all(): groups
|
||||
* have already been removed and are on the process of
|
||||
* being released - don't touch and don't warn.
|
||||
*/
|
||||
spin_unlock_irqrestore(&dev->devres_lock, flags);
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
spin_unlock_irqrestore(&dev->devres_lock, flags);
|
||||
|
||||
@@ -624,19 +624,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
* dependency.
|
||||
*/
|
||||
fput(old_file);
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
|
||||
if (partscan)
|
||||
loop_reread_partitions(lo);
|
||||
|
||||
error = 0;
|
||||
done:
|
||||
/* enable and uncork uevent now that we are done */
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
|
||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||
return error;
|
||||
|
||||
out_err:
|
||||
loop_global_unlock(lo, is_loop);
|
||||
out_putf:
|
||||
fput(file);
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -1104,8 +1105,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
|
||||
if (partscan)
|
||||
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
|
||||
|
||||
/* enable and uncork uevent now that we are done */
|
||||
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
|
||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||
|
||||
loop_global_unlock(lo, is_loop);
|
||||
if (partscan)
|
||||
|
||||
@@ -807,6 +807,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
const char *firmware_name)
|
||||
{
|
||||
struct qca_fw_config config = {};
|
||||
const char *variant = "";
|
||||
int err;
|
||||
u8 rom_ver = 0;
|
||||
u32 soc_ver;
|
||||
@@ -901,13 +902,11 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
case QCA_WCN3990:
|
||||
case QCA_WCN3991:
|
||||
case QCA_WCN3998:
|
||||
if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
|
||||
snprintf(config.fwname, sizeof(config.fwname),
|
||||
"qca/crnv%02xu.bin", rom_ver);
|
||||
} else {
|
||||
snprintf(config.fwname, sizeof(config.fwname),
|
||||
"qca/crnv%02x.bin", rom_ver);
|
||||
}
|
||||
if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
|
||||
variant = "u";
|
||||
|
||||
snprintf(config.fwname, sizeof(config.fwname),
|
||||
"qca/crnv%02x%s.bin", rom_ver, variant);
|
||||
break;
|
||||
case QCA_WCN3988:
|
||||
snprintf(config.fwname, sizeof(config.fwname),
|
||||
|
||||
@@ -1194,6 +1194,8 @@ next:
|
||||
rtl_dev_err(hdev, "mandatory config file %s not found",
|
||||
btrtl_dev->ic_info->cfg_name);
|
||||
ret = btrtl_dev->cfg_len;
|
||||
if (!ret)
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
|
||||
if (!skb) {
|
||||
percpu_down_read(&hu->proto_lock);
|
||||
|
||||
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
|
||||
if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
|
||||
test_bit(HCI_UART_PROTO_INIT, &hu->flags))
|
||||
skb = hu->proto->dequeue(hu);
|
||||
|
||||
percpu_up_read(&hu->proto_lock);
|
||||
@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
|
||||
if (!percpu_down_read_trylock(&hu->proto_lock))
|
||||
return 0;
|
||||
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
|
||||
!test_bit(HCI_UART_PROTO_INIT, &hu->flags))
|
||||
goto no_schedule;
|
||||
|
||||
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
|
||||
@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
percpu_down_read(&hu->proto_lock);
|
||||
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
|
||||
!test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
|
||||
percpu_up_read(&hu->proto_lock);
|
||||
return -EUNATCH;
|
||||
}
|
||||
@@ -582,7 +585,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
|
||||
if (tty != hu->tty)
|
||||
return;
|
||||
|
||||
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
|
||||
if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
|
||||
test_bit(HCI_UART_PROTO_INIT, &hu->flags))
|
||||
hci_uart_tx_wakeup(hu);
|
||||
}
|
||||
|
||||
@@ -608,7 +612,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
|
||||
|
||||
percpu_down_read(&hu->proto_lock);
|
||||
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
|
||||
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
|
||||
!test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
|
||||
percpu_up_read(&hu->proto_lock);
|
||||
return;
|
||||
}
|
||||
@@ -704,12 +709,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
|
||||
|
||||
hu->proto = p;
|
||||
|
||||
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
|
||||
|
||||
err = hci_uart_register_dev(hu);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
set_bit(HCI_UART_PROTO_READY, &hu->flags);
|
||||
clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -89,6 +89,7 @@ struct hci_uart {
|
||||
#define HCI_UART_REGISTERED 1
|
||||
#define HCI_UART_PROTO_READY 2
|
||||
#define HCI_UART_NO_SUSPEND_NOTIFIER 3
|
||||
#define HCI_UART_PROTO_INIT 4
|
||||
|
||||
/* TX states */
|
||||
#define HCI_UART_SENDING 1
|
||||
|
||||
@@ -289,18 +289,18 @@ static void vhci_coredump(struct hci_dev *hdev)
|
||||
|
||||
static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
char buf[80];
|
||||
const char *buf;
|
||||
|
||||
snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n");
|
||||
buf = "Controller Name: vhci_ctrl\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
|
||||
snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n");
|
||||
buf = "Firmware Version: vhci_fw\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
|
||||
snprintf(buf, sizeof(buf), "Driver: vhci_drv\n");
|
||||
buf = "Driver: vhci_drv\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
|
||||
snprintf(buf, sizeof(buf), "Vendor: vhci\n");
|
||||
buf = "Vendor: vhci\n";
|
||||
skb_put_data(skb, buf, strlen(buf));
|
||||
}
|
||||
|
||||
|
||||
@@ -1204,11 +1204,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
|
||||
struct mhi_ring_element *mhi_tre;
|
||||
struct mhi_buf_info *buf_info;
|
||||
int eot, eob, chain, bei;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
/* Protect accesses for reading and incrementing WP */
|
||||
write_lock_bh(&mhi_chan->lock);
|
||||
|
||||
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
buf_ring = &mhi_chan->buf_ring;
|
||||
tre_ring = &mhi_chan->tre_ring;
|
||||
|
||||
@@ -1226,10 +1231,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
|
||||
|
||||
if (!info->pre_mapped) {
|
||||
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
|
||||
if (ret) {
|
||||
write_unlock_bh(&mhi_chan->lock);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
eob = !!(flags & MHI_EOB);
|
||||
@@ -1246,9 +1249,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
|
||||
mhi_add_ring_element(mhi_cntrl, tre_ring);
|
||||
mhi_add_ring_element(mhi_cntrl, buf_ring);
|
||||
|
||||
out:
|
||||
write_unlock_bh(&mhi_chan->lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
|
||||
|
||||
@@ -165,6 +165,11 @@ int tpm_try_get_ops(struct tpm_chip *chip)
|
||||
goto out_ops;
|
||||
|
||||
mutex_lock(&chip->tpm_mutex);
|
||||
|
||||
/* tmp_chip_start may issue IO that is denied while suspended */
|
||||
if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
|
||||
goto out_lock;
|
||||
|
||||
rc = tpm_chip_start(chip);
|
||||
if (rc)
|
||||
goto out_lock;
|
||||
|
||||
@@ -468,18 +468,11 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
|
||||
if (!chip)
|
||||
return -ENODEV;
|
||||
|
||||
/* Give back zero bytes, as TPM chip has not yet fully resumed: */
|
||||
if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
rc = tpm2_get_random(chip, out, max);
|
||||
else
|
||||
rc = tpm1_get_random(chip, out, max);
|
||||
|
||||
out:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -114,11 +114,10 @@ again:
|
||||
return 0;
|
||||
/* process status changes without irq support */
|
||||
do {
|
||||
usleep_range(priv->timeout_min, priv->timeout_max);
|
||||
status = chip->ops->status(chip);
|
||||
if ((status & mask) == mask)
|
||||
return 0;
|
||||
usleep_range(priv->timeout_min,
|
||||
priv->timeout_max);
|
||||
} while (time_before(jiffies, stop));
|
||||
return -ETIME;
|
||||
}
|
||||
@@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
|
||||
|
||||
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
|
||||
&priv->int_queue, false) < 0) {
|
||||
rc = -ETIME;
|
||||
if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
rc = -ETIME;
|
||||
goto out_err;
|
||||
}
|
||||
status = tpm_tis_status(chip);
|
||||
@@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
|
||||
|
||||
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
|
||||
&priv->int_queue, false) < 0) {
|
||||
rc = -ETIME;
|
||||
if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
rc = -ETIME;
|
||||
goto out_err;
|
||||
}
|
||||
status = tpm_tis_status(chip);
|
||||
@@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
|
||||
if (rc >= 0)
|
||||
/* Data transfer done successfully */
|
||||
break;
|
||||
else if (rc != -EIO)
|
||||
else if (rc != -EAGAIN && rc != -EIO)
|
||||
/* Data transfer failed, not recoverable */
|
||||
return rc;
|
||||
|
||||
usleep_range(priv->timeout_min, priv->timeout_max);
|
||||
}
|
||||
|
||||
/* go and do it */
|
||||
@@ -1147,6 +1154,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
|
||||
}
|
||||
|
||||
if (priv->manufacturer_id == TPM_VID_IFX)
|
||||
set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
|
||||
|
||||
if (is_bsw()) {
|
||||
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
|
||||
ILB_REMAP_SIZE);
|
||||
|
||||
@@ -89,6 +89,7 @@ enum tpm_tis_flags {
|
||||
TPM_TIS_INVALID_STATUS = 1,
|
||||
TPM_TIS_DEFAULT_CANCELLATION = 2,
|
||||
TPM_TIS_IRQ_TESTED = 3,
|
||||
TPM_TIS_STATUS_VALID_RETRY = 4,
|
||||
};
|
||||
|
||||
struct tpm_tis_data {
|
||||
|
||||
@@ -27,7 +27,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
|
||||
|
||||
static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
|
||||
{
|
||||
bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
|
||||
bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
|
||||
u32 val;
|
||||
|
||||
regmap_read(br->clkr.regmap, br->halt_reg, &val);
|
||||
@@ -43,7 +43,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
|
||||
{
|
||||
u32 val;
|
||||
u32 mask;
|
||||
bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
|
||||
bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
|
||||
|
||||
mask = CBCR_NOC_FSM_STATUS;
|
||||
mask |= CBCR_CLK_OFF;
|
||||
|
||||
@@ -292,6 +292,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
|
||||
*/
|
||||
udelay(1);
|
||||
|
||||
if (sc->flags & RETAIN_FF_ENABLE)
|
||||
gdsc_retain_ff_on(sc);
|
||||
|
||||
/* Turn on HW trigger mode if supported */
|
||||
if (sc->flags & HW_CTRL) {
|
||||
ret = gdsc_hwctrl(sc, true);
|
||||
@@ -308,9 +311,6 @@ static int gdsc_enable(struct generic_pm_domain *domain)
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
if (sc->flags & RETAIN_FF_ENABLE)
|
||||
gdsc_retain_ff_on(sc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -420,13 +420,6 @@ static int gdsc_init(struct gdsc *sc)
|
||||
goto err_disable_supply;
|
||||
}
|
||||
|
||||
/* Turn on HW trigger mode if supported */
|
||||
if (sc->flags & HW_CTRL) {
|
||||
ret = gdsc_hwctrl(sc, true);
|
||||
if (ret < 0)
|
||||
goto err_disable_supply;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the retain bit is set if the GDSC is already on,
|
||||
* otherwise we end up turning off the GDSC and destroying all
|
||||
@@ -434,6 +427,14 @@ static int gdsc_init(struct gdsc *sc)
|
||||
*/
|
||||
if (sc->flags & RETAIN_FF_ENABLE)
|
||||
gdsc_retain_ff_on(sc);
|
||||
|
||||
/* Turn on HW trigger mode if supported */
|
||||
if (sc->flags & HW_CTRL) {
|
||||
ret = gdsc_hwctrl(sc, true);
|
||||
if (ret < 0)
|
||||
goto err_disable_supply;
|
||||
}
|
||||
|
||||
} else if (sc->flags & ALWAYS_ON) {
|
||||
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
|
||||
gdsc_enable(&sc->pd);
|
||||
@@ -465,6 +466,23 @@ err_disable_supply:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num)
|
||||
{
|
||||
struct device *dev = desc->dev;
|
||||
struct gdsc **scs = desc->scs;
|
||||
int i;
|
||||
|
||||
/* Remove subdomains */
|
||||
for (i = num - 1; i >= 0; i--) {
|
||||
if (!scs[i])
|
||||
continue;
|
||||
if (scs[i]->parent)
|
||||
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
|
||||
else if (!IS_ERR_OR_NULL(dev->pm_domain))
|
||||
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
|
||||
}
|
||||
}
|
||||
|
||||
int gdsc_register(struct gdsc_desc *desc,
|
||||
struct reset_controller_dev *rcdev, struct regmap *regmap)
|
||||
{
|
||||
@@ -509,30 +527,27 @@ int gdsc_register(struct gdsc_desc *desc,
|
||||
if (!scs[i])
|
||||
continue;
|
||||
if (scs[i]->parent)
|
||||
pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
|
||||
ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
|
||||
else if (!IS_ERR_OR_NULL(dev->pm_domain))
|
||||
pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
|
||||
ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
|
||||
if (ret)
|
||||
goto err_pm_subdomain_remove;
|
||||
}
|
||||
|
||||
return of_genpd_add_provider_onecell(dev->of_node, data);
|
||||
|
||||
err_pm_subdomain_remove:
|
||||
gdsc_pm_subdomain_remove(desc, i);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void gdsc_unregister(struct gdsc_desc *desc)
|
||||
{
|
||||
int i;
|
||||
struct device *dev = desc->dev;
|
||||
struct gdsc **scs = desc->scs;
|
||||
size_t num = desc->num;
|
||||
|
||||
/* Remove subdomains */
|
||||
for (i = 0; i < num; i++) {
|
||||
if (!scs[i])
|
||||
continue;
|
||||
if (scs[i]->parent)
|
||||
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
|
||||
else if (!IS_ERR_OR_NULL(dev->pm_domain))
|
||||
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
|
||||
}
|
||||
gdsc_pm_subdomain_remove(desc, num);
|
||||
of_genpd_del_provider(dev->of_node);
|
||||
}
|
||||
|
||||
|
||||
@@ -168,9 +168,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
|
||||
ret = device_init_wakeup(&pdev->dev, true);
|
||||
if (ret)
|
||||
goto out_clk_disable;
|
||||
device_set_wakeup_capable(&pdev->dev, true);
|
||||
|
||||
ret = dev_pm_set_wake_irq(&pdev->dev, irq);
|
||||
if (ret)
|
||||
|
||||
@@ -2725,10 +2725,18 @@ EXPORT_SYMBOL(cpufreq_update_policy);
|
||||
*/
|
||||
void cpufreq_update_limits(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_policy *policy;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
return;
|
||||
|
||||
if (cpufreq_driver->update_limits)
|
||||
cpufreq_driver->update_limits(cpu);
|
||||
else
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
|
||||
|
||||
|
||||
@@ -122,12 +122,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
|
||||
qm_fd_addr_set64(&fd, addr);
|
||||
|
||||
do {
|
||||
refcount_inc(&req->drv_ctx->refcnt);
|
||||
ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
|
||||
if (likely(!ret)) {
|
||||
refcount_inc(&req->drv_ctx->refcnt);
|
||||
if (likely(!ret))
|
||||
return 0;
|
||||
}
|
||||
|
||||
refcount_dec(&req->drv_ctx->refcnt);
|
||||
if (ret != -EBUSY)
|
||||
break;
|
||||
num_retries++;
|
||||
|
||||
@@ -243,14 +243,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
|
||||
pdev_new = to_pci_dev(dev_new);
|
||||
pdev_cur = to_pci_dev(dev_cur);
|
||||
|
||||
if (pdev_new->bus->number < pdev_cur->bus->number)
|
||||
return true;
|
||||
if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
|
||||
return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
|
||||
|
||||
if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
|
||||
return true;
|
||||
if (pdev_new->bus->number != pdev_cur->bus->number)
|
||||
return pdev_new->bus->number < pdev_cur->bus->number;
|
||||
|
||||
if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
|
||||
return true;
|
||||
if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
|
||||
return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
|
||||
|
||||
if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
|
||||
return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -171,7 +171,7 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
|
||||
* the EFI memory map. Other related structures, e.g. x86 e820ext, need
|
||||
* to factor in this headroom requirement as well.
|
||||
*/
|
||||
#define EFI_MMAP_NR_SLACK_SLOTS 8
|
||||
#define EFI_MMAP_NR_SLACK_SLOTS 32
|
||||
|
||||
typedef struct efi_generic_dev_path efi_device_path_protocol_t;
|
||||
|
||||
|
||||
@@ -822,6 +822,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
|
||||
struct gpio_irq_chip *irq;
|
||||
struct tegra_gpio *gpio;
|
||||
struct device_node *np;
|
||||
struct resource *res;
|
||||
char **names;
|
||||
int err;
|
||||
|
||||
@@ -841,19 +842,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
|
||||
gpio->num_banks++;
|
||||
|
||||
/* get register apertures */
|
||||
gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
|
||||
if (IS_ERR(gpio->secure)) {
|
||||
gpio->secure = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(gpio->secure))
|
||||
return PTR_ERR(gpio->secure);
|
||||
}
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
|
||||
if (!res)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
gpio->secure = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(gpio->secure))
|
||||
return PTR_ERR(gpio->secure);
|
||||
|
||||
gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
|
||||
if (IS_ERR(gpio->base)) {
|
||||
gpio->base = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(gpio->base))
|
||||
return PTR_ERR(gpio->base);
|
||||
}
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
|
||||
if (!res)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
gpio->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(gpio->base))
|
||||
return PTR_ERR(gpio->base);
|
||||
|
||||
err = platform_irq_count(pdev);
|
||||
if (err < 0)
|
||||
|
||||
@@ -1018,6 +1018,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
if (ret < 0)
|
||||
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
|
||||
device_init_wakeup(&pdev->dev, 0);
|
||||
gpiochip_remove(&gpio->chip);
|
||||
clk_disable_unprepare(gpio->clk);
|
||||
device_set_wakeup_capable(&pdev->dev, 0);
|
||||
|
||||
@@ -69,6 +69,7 @@ config DRM_USE_DYNAMIC_DEBUG
|
||||
config DRM_KUNIT_TEST_HELPERS
|
||||
tristate
|
||||
depends on DRM && KUNIT
|
||||
select DRM_KMS_HELPER
|
||||
help
|
||||
KUnit Helpers for KMS drivers.
|
||||
|
||||
@@ -79,7 +80,6 @@ config DRM_KUNIT_TEST
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_HELPER
|
||||
select DRM_LIB_RANDOM
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_BUDDY
|
||||
select DRM_EXPORT_FOR_TESTS if m
|
||||
select DRM_KUNIT_TEST_HELPERS
|
||||
|
||||
@@ -6015,6 +6015,7 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
|
||||
{
|
||||
struct dma_fence *old = NULL;
|
||||
|
||||
dma_fence_get(gang);
|
||||
do {
|
||||
dma_fence_put(old);
|
||||
rcu_read_lock();
|
||||
@@ -6024,12 +6025,19 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
|
||||
if (old == gang)
|
||||
break;
|
||||
|
||||
if (!dma_fence_is_signaled(old))
|
||||
if (!dma_fence_is_signaled(old)) {
|
||||
dma_fence_put(gang);
|
||||
return old;
|
||||
}
|
||||
|
||||
} while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
|
||||
old, gang) != old);
|
||||
|
||||
/*
|
||||
* Drop it once for the exchanged reference in adev and once for the
|
||||
* thread local reference acquired in amdgpu_device_get_gang().
|
||||
*/
|
||||
dma_fence_put(old);
|
||||
dma_fence_put(old);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -211,7 +211,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (sgt->sgl->page_link) {
|
||||
if (sg_page(sgt->sgl)) {
|
||||
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
|
||||
@@ -1651,7 +1651,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
|
||||
};
|
||||
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
|
||||
@@ -1724,8 +1723,6 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
|
||||
#endif
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
/* Kaveri */
|
||||
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
|
||||
@@ -1808,7 +1805,6 @@ static const struct pci_device_id pciidlist[] = {
|
||||
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
|
||||
#endif
|
||||
/* topaz */
|
||||
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
|
||||
@@ -2090,14 +2086,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
switch (flags & AMD_ASIC_MASK) {
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
#ifdef CONFIG_DRM_AMDGPU_SI
|
||||
if (!amdgpu_si_support) {
|
||||
switch (flags & AMD_ASIC_MASK) {
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
case CHIP_HAINAN:
|
||||
if (!amdgpu_si_support) {
|
||||
dev_info(&pdev->dev,
|
||||
"SI support provided by radeon.\n");
|
||||
dev_info(&pdev->dev,
|
||||
@@ -2105,16 +2101,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
break;
|
||||
#else
|
||||
dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
|
||||
return -ENODEV;
|
||||
#endif
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
if (!amdgpu_cik_support) {
|
||||
switch (flags & AMD_ASIC_MASK) {
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
if (!amdgpu_cik_support) {
|
||||
dev_info(&pdev->dev,
|
||||
"CIK support provided by radeon.\n");
|
||||
dev_info(&pdev->dev,
|
||||
@@ -2122,8 +2120,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
break;
|
||||
#else
|
||||
dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
|
||||
return -ENODEV;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
|
||||
if (IS_ERR(adev))
|
||||
|
||||
@@ -212,6 +212,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
|
||||
args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
|
||||
pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
|
||||
}
|
||||
|
||||
if (!access_ok((const void __user *) args->read_pointer_address,
|
||||
sizeof(uint32_t))) {
|
||||
pr_err("Can't access read pointer\n");
|
||||
@@ -477,6 +482,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) {
|
||||
args->ring_size = KFD_MIN_QUEUE_RING_SIZE;
|
||||
pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE");
|
||||
}
|
||||
|
||||
properties.queue_address = args->ring_base_address;
|
||||
properties.queue_size = args->ring_size;
|
||||
properties.queue_percent = args->queue_percentage & 0xFF;
|
||||
|
||||
@@ -1388,6 +1388,11 @@ int kfd_debugfs_hang_hws(struct kfd_node *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->kfd->shared_resources.enable_mes) {
|
||||
dev_err(dev->adev->dev, "Inducing MES hang is not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dqm_debugfs_hang_hws(dev->dqm);
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
@@ -1110,6 +1111,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p)
|
||||
p->kobj = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If any GPU is ongoing reset, wait for reset complete.
|
||||
*/
|
||||
static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < p->n_pdds; i++)
|
||||
flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq);
|
||||
}
|
||||
|
||||
/* No process locking is needed in this function, because the process
|
||||
* is not findable any more. We must assume that no other thread is
|
||||
* using it any more, otherwise we couldn't safely free the process
|
||||
@@ -1123,6 +1135,11 @@ static void kfd_process_wq_release(struct work_struct *work)
|
||||
kfd_process_dequeue_from_all_devices(p);
|
||||
pqm_uninit(&p->pqm);
|
||||
|
||||
/*
|
||||
* If GPU in reset, user queues may still running, wait for reset complete.
|
||||
*/
|
||||
kfd_process_wait_gpu_reset_complete(p);
|
||||
|
||||
/* Signal the eviction fence after user mode queues are
|
||||
* destroyed. This allows any BOs to be freed without
|
||||
* triggering pointless evictions or waiting for fences.
|
||||
|
||||
@@ -510,7 +510,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
|
||||
pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
|
||||
pqm->process->pasid,
|
||||
pqn->q->properties.queue_id, retval);
|
||||
if (retval != -ETIME)
|
||||
if (retval != -ETIME && retval != -EIO)
|
||||
goto err_destroy_queue;
|
||||
}
|
||||
|
||||
|
||||
@@ -4484,17 +4484,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
if (link_cnt > (MAX_PIPES * 2)) {
|
||||
DRM_ERROR(
|
||||
"KMS: Cannot support more than %d display indexes\n",
|
||||
MAX_PIPES * 2);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* loops over all connectors on the board */
|
||||
for (i = 0; i < link_cnt; i++) {
|
||||
struct dc_link *link = NULL;
|
||||
|
||||
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
|
||||
DRM_ERROR(
|
||||
"KMS: Cannot support more than %d display indexes\n",
|
||||
AMDGPU_DM_MAX_DISPLAY_INDEX);
|
||||
continue;
|
||||
}
|
||||
|
||||
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
|
||||
if (!aconnector)
|
||||
goto fail;
|
||||
|
||||
@@ -1563,7 +1563,9 @@ struct dc_link {
|
||||
bool dongle_mode_timing_override;
|
||||
bool blank_stream_on_ocs_change;
|
||||
bool read_dpcd204h_on_irq_hpd;
|
||||
bool force_dp_ffe_preset;
|
||||
} wa_flags;
|
||||
union dc_dp_ffe_preset forced_dp_ffe_preset;
|
||||
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
||||
|
||||
struct dc_link_status link_status;
|
||||
|
||||
@@ -1930,20 +1930,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
dc->hwss.get_position(&pipe_ctx, 1, &position);
|
||||
vpos = position.vertical_count;
|
||||
|
||||
/* Avoid wraparound calculation issues */
|
||||
vupdate_start += stream->timing.v_total;
|
||||
vupdate_end += stream->timing.v_total;
|
||||
vpos += stream->timing.v_total;
|
||||
|
||||
if (vpos <= vupdate_start) {
|
||||
/* VPOS is in VACTIVE or back porch. */
|
||||
lines_to_vupdate = vupdate_start - vpos;
|
||||
} else if (vpos > vupdate_end) {
|
||||
/* VPOS is in the front porch. */
|
||||
return;
|
||||
} else {
|
||||
/* VPOS is in VUPDATE. */
|
||||
lines_to_vupdate = 0;
|
||||
lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start;
|
||||
}
|
||||
|
||||
/* Calculate time until VUPDATE in microseconds. */
|
||||
@@ -1951,13 +1942,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
|
||||
stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
|
||||
us_to_vupdate = lines_to_vupdate * us_per_line;
|
||||
|
||||
/* Stall out until the cursor update completes. */
|
||||
if (vupdate_end < vupdate_start)
|
||||
vupdate_end += stream->timing.v_total;
|
||||
|
||||
/* Position is in the range of vupdate start and end*/
|
||||
if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start)
|
||||
us_to_vupdate = 0;
|
||||
|
||||
/* 70 us is a conservative estimate of cursor update time*/
|
||||
if (us_to_vupdate > 70)
|
||||
return;
|
||||
|
||||
/* Stall out until the cursor update completes. */
|
||||
if (vupdate_end < vupdate_start)
|
||||
vupdate_end += stream->timing.v_total;
|
||||
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
|
||||
udelay(us_to_vupdate + us_vupdate);
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable)
|
||||
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
|
||||
|
||||
REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable);
|
||||
REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable);
|
||||
REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1);
|
||||
}
|
||||
|
||||
void hubp31_soft_reset(struct hubp *hubp, bool reset)
|
||||
|
||||
@@ -697,6 +697,8 @@ void override_training_settings(
|
||||
lt_settings->pre_emphasis = overrides->pre_emphasis;
|
||||
if (overrides->post_cursor2 != NULL)
|
||||
lt_settings->post_cursor2 = overrides->post_cursor2;
|
||||
if (link->wa_flags.force_dp_ffe_preset && !dp_is_lttpr_present(link))
|
||||
lt_settings->ffe_preset = &link->forced_dp_ffe_preset;
|
||||
if (overrides->ffe_preset != NULL)
|
||||
lt_settings->ffe_preset = overrides->ffe_preset;
|
||||
/* Override HW lane settings with BIOS forced values if present */
|
||||
|
||||
@@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
|
||||
hwmgr->adev = adev;
|
||||
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
|
||||
hwmgr->device = amdgpu_cgs_create_device(adev);
|
||||
if (!hwmgr->device) {
|
||||
kfree(hwmgr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_init(&hwmgr->msg_lock);
|
||||
hwmgr->chip_family = adev->family;
|
||||
hwmgr->chip_id = adev->asic_type;
|
||||
|
||||
@@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
|
||||
(hwmgr->thermal_controller.fanInfo.
|
||||
ucTachometerPulsesPerRevolution == 0) ||
|
||||
speed == 0 ||
|
||||
(!speed || speed > UINT_MAX/8) ||
|
||||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
|
||||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
|
||||
@@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
||||
int result = 0;
|
||||
|
||||
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
|
||||
speed == 0 ||
|
||||
(!speed || speed > UINT_MAX/8) ||
|
||||
(speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
|
||||
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
|
||||
return -1;
|
||||
return -EINVAL;
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
|
||||
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
|
||||
|
||||
@@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
|
||||
uint32_t tach_period, crystal_clock_freq;
|
||||
int result = 0;
|
||||
|
||||
if (!speed)
|
||||
if (!speed || speed > UINT_MAX/8)
|
||||
return -EINVAL;
|
||||
|
||||
if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
|
||||
|
||||
@@ -1274,6 +1274,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t crystal_clock_freq = 2500;
|
||||
uint32_t tach_period;
|
||||
|
||||
if (!speed || speed > UINT_MAX/8)
|
||||
return -EINVAL;
|
||||
|
||||
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
|
||||
WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT,
|
||||
REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
|
||||
|
||||
@@ -1202,7 +1202,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t crystal_clock_freq = 2500;
|
||||
uint32_t tach_period;
|
||||
|
||||
if (speed == 0)
|
||||
if (!speed || speed > UINT_MAX/8)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* To prevent from possible overheat, some ASICs may have requirement
|
||||
|
||||
@@ -1227,7 +1227,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
|
||||
uint32_t tach_period;
|
||||
int ret;
|
||||
|
||||
if (!speed)
|
||||
if (!speed || speed > UINT_MAX/8)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_v13_0_auto_fan_control(smu, 0);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user