Merge tag 'v6.1.81' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into odroidxu4-6.1.y

This is the 6.1.81 stable release

Change-Id: Ia06c2c1c12a68c95799b7fb01c44c52421f34ae4
This commit is contained in:
Mauro (mdrjr) Ribeiro
2024-05-07 11:18:37 -03:00
223 changed files with 4348 additions and 2417 deletions

View File

@@ -1416,7 +1416,7 @@ execution context provided by the EFI firmware.
The function prototype for the handover entry point looks like this::
efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
efi_stub_entry(void *handle, efi_system_table_t *table, struct boot_params *bp)
'handle' is the EFI image handle passed to the boot loader by the EFI
firmware, 'table' is the EFI system table - these are the first two

View File

@@ -95,6 +95,9 @@ The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
(idle) transitions.
@@ -138,17 +141,30 @@ Mitigation points
When transitioning from kernel to user space the CPU buffers are flushed
on affected CPUs when the mitigation is not disabled on the kernel
command line. The migitation is enabled through the static key
mds_user_clear.
command line. The mitigation is enabled through the feature flag
X86_FEATURE_CLEAR_CPU_BUF.
The mitigation is invoked in prepare_exit_to_usermode() which covers
all but one of the kernel to user space transitions. The exception
is when we return from a Non Maskable Interrupt (NMI), which is
handled directly in do_nmi().
The mitigation is invoked just before transitioning to userspace after
user registers are restored. This is done to minimize the window in
which kernel data could be accessed after VERW e.g. via an NMI after
VERW.
(The reason that NMI is special is that prepare_exit_to_usermode() can
enable IRQs. In NMI context, NMIs are blocked, and we don't want to
enable IRQs with NMIs blocked.)
**Corner case not handled**
Interrupts returning to kernel don't clear CPUs buffers since the
exit-to-user path is expected to do that anyways. But, there could be
a case when an NMI is generated in kernel after the exit-to-user path
has cleared the buffers. This case is not handled and NMI returning to
kernel don't clear CPU buffers because:
1. It is rare to get an NMI after VERW, but before returning to userspace.
2. For an unprivileged user, there is no known way to make that NMI
less rare or target it.
3. It would take a large number of these precisely-timed NMIs to mount
an actual attack. There's presumably not enough bandwidth.
4. The NMI in question occurs after a VERW, i.e. when user state is
restored and most interesting data is already scrubbed. Whats left
is only the data that NMI touches, and that may or may not be of
any interest.
2. C-State transition

View File

@@ -10056,6 +10056,7 @@ F: drivers/infiniband/
F: include/rdma/
F: include/trace/events/ib_mad.h
F: include/trace/events/ib_umad.h
F: include/trace/misc/rdma.h
F: include/uapi/linux/if_infiniband.h
F: include/uapi/rdma/
F: samples/bpf/ibumad_kern.c
@@ -11144,6 +11145,12 @@ F: fs/nfs_common/
F: fs/nfsd/
F: include/linux/lockd/
F: include/linux/sunrpc/
F: include/trace/events/rpcgss.h
F: include/trace/events/rpcrdma.h
F: include/trace/events/sunrpc.h
F: include/trace/misc/fs.h
F: include/trace/misc/nfs.h
F: include/trace/misc/sunrpc.h
F: include/uapi/linux/nfsd/
F: include/uapi/linux/sunrpc/
F: net/sunrpc/

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 80
SUBLEVEL = 81
EXTRAVERSION =
NAME = Curry Ramen

View File

@@ -59,7 +59,7 @@
reg = <0x80000000 0x2000>;
};
dma_apbh: dma-apbh@80004000 {
dma_apbh: dma-controller@80004000 {
compatible = "fsl,imx23-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <0 14 20 0

View File

@@ -78,7 +78,7 @@
status = "disabled";
};
dma_apbh: dma-apbh@80004000 {
dma_apbh: dma-controller@80004000 {
compatible = "fsl,imx28-dma-apbh";
reg = <0x80004000 0x2000>;
interrupts = <82 83 84 85

View File

@@ -150,7 +150,7 @@
interrupt-parent = <&gpc>;
ranges;
dma_apbh: dma-apbh@110000 {
dma_apbh: dma-controller@110000 {
compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x00110000 0x2000>;
interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,

View File

@@ -209,7 +209,7 @@
power-domains = <&pd_pu>;
};
dma_apbh: dma-apbh@1804000 {
dma_apbh: dma-controller@1804000 {
compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,

View File

@@ -164,7 +164,7 @@
<0x00a06000 0x2000>;
};
dma_apbh: dma-apbh@1804000 {
dma_apbh: dma-controller@1804000 {
compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x01804000 0x2000>;
interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,

View File

@@ -1267,14 +1267,13 @@
};
};
dma_apbh: dma-apbh@33000000 {
dma_apbh: dma-controller@33000000 {
compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
reg = <0x33000000 0x2000>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
#dma-cells = <1>;
dma-channels = <4>;
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;

View File

@@ -227,8 +227,19 @@ static int ctr_encrypt(struct skcipher_request *req)
src += blocks * AES_BLOCK_SIZE;
}
if (nbytes && walk.nbytes == walk.total) {
u8 buf[AES_BLOCK_SIZE];
u8 *d = dst;
if (unlikely(nbytes < AES_BLOCK_SIZE))
src = dst = memcpy(buf + sizeof(buf) - nbytes,
src, nbytes);
neon_aes_ctr_encrypt(dst, src, ctx->enc, ctx->key.rounds,
nbytes, walk.iv);
if (unlikely(nbytes < AES_BLOCK_SIZE))
memcpy(d, dst, nbytes);
nbytes = 0;
}
kernel_neon_end();

View File

@@ -103,6 +103,7 @@ static inline void free_screen_info(struct screen_info *si)
}
#define EFI_ALLOC_ALIGN SZ_64K
#define EFI_ALLOC_LIMIT ((1UL << 48) - 1)
/*
* On ARM systems, virtually remapped UEFI runtime services are set up in two

View File

@@ -569,29 +569,6 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table_ops iommu_table_lpar_multi_ops;
/*
* iommu_table_setparms_lpar
*
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
*/
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
struct iommu_table_group *table_group,
const __be32 *dma_window)
{
unsigned long offset, size, liobn;
of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
&iommu_table_lpar_multi_ops);
table_group->tce32_start = offset;
table_group->tce32_size = size;
}
struct iommu_table_ops iommu_table_pseries_ops = {
.set = tce_build_pSeries,
.clear = tce_free_pSeries,
@@ -719,26 +696,71 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
* dynamic 64bit DMA window, walking up the device tree.
*/
static struct device_node *pci_dma_find(struct device_node *dn,
const __be32 **dma_window)
struct dynamic_dma_window_prop *prop)
{
const __be32 *dw = NULL;
const __be32 *default_prop = NULL;
const __be32 *ddw_prop = NULL;
struct device_node *rdn = NULL;
bool default_win = false, ddw_win = false;
for ( ; dn && PCI_DN(dn); dn = dn->parent) {
dw = of_get_property(dn, "ibm,dma-window", NULL);
if (dw) {
if (dma_window)
*dma_window = dw;
return dn;
default_prop = of_get_property(dn, "ibm,dma-window", NULL);
if (default_prop) {
rdn = dn;
default_win = true;
}
dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
if (dw)
return dn;
dw = of_get_property(dn, DMA64_PROPNAME, NULL);
if (dw)
return dn;
ddw_prop = of_get_property(dn, DIRECT64_PROPNAME, NULL);
if (ddw_prop) {
rdn = dn;
ddw_win = true;
break;
}
ddw_prop = of_get_property(dn, DMA64_PROPNAME, NULL);
if (ddw_prop) {
rdn = dn;
ddw_win = true;
break;
}
/* At least found default window, which is the case for normal boot */
if (default_win)
break;
}
return NULL;
/* For PCI devices there will always be a DMA window, either on the device
* or parent bus
*/
WARN_ON(!(default_win | ddw_win));
/* caller doesn't want to get DMA window property */
if (!prop)
return rdn;
/* parse DMA window property. During normal system boot, only default
* DMA window is passed in OF. But, for kdump, a dedicated adapter might
* have both default and DDW in FDT. In this scenario, DDW takes precedence
* over default window.
*/
if (ddw_win) {
struct dynamic_dma_window_prop *p;
p = (struct dynamic_dma_window_prop *)ddw_prop;
prop->liobn = p->liobn;
prop->dma_base = p->dma_base;
prop->tce_shift = p->tce_shift;
prop->window_shift = p->window_shift;
} else if (default_win) {
unsigned long offset, size, liobn;
of_parse_dma_window(rdn, default_prop, &liobn, &offset, &size);
prop->liobn = cpu_to_be32((u32)liobn);
prop->dma_base = cpu_to_be64(offset);
prop->tce_shift = cpu_to_be32(IOMMU_PAGE_SHIFT_4K);
prop->window_shift = cpu_to_be32(order_base_2(size));
}
return rdn;
}
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
@@ -746,17 +768,20 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
const __be32 *dma_window = NULL;
struct dynamic_dma_window_prop prop;
dn = pci_bus_to_OF_node(bus);
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
dn);
pdn = pci_dma_find(dn, &dma_window);
pdn = pci_dma_find(dn, &prop);
if (dma_window == NULL)
pr_debug(" no ibm,dma-window property !\n");
/* In PPC architecture, there will always be DMA window on bus or one of the
* parent bus. During reboot, there will be ibm,dma-window property to
* define DMA window. For kdump, there will at least be default window or DDW
* or both.
*/
ppci = PCI_DN(pdn);
@@ -766,13 +791,24 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
if (dma_window) {
iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
ppci->table_group, dma_window);
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
panic("Failed to initialize iommu table");
}
iommu_table_setparms_common(tbl, ppci->phb->bus->number,
be32_to_cpu(prop.liobn),
be64_to_cpu(prop.dma_base),
1ULL << be32_to_cpu(prop.window_shift),
be32_to_cpu(prop.tce_shift), NULL,
&iommu_table_lpar_multi_ops);
/* Only for normal boot with default window. Doesn't matter even
* if we set these with DDW which is 64bit during kdump, since
* these will not be used during kdump.
*/
ppci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
ppci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
panic("Failed to initialize iommu table");
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
@@ -960,6 +996,12 @@ static void find_existing_ddw_windows_named(const char *name)
continue;
}
/* If at the time of system initialization, there are DDWs in OF,
* it means this is during kexec. DDW could be direct or dynamic.
* We will just mark DDWs as "dynamic" since this is kdump path,
* no need to worry about perforance. ddw_list_new_entry() will
* set window->direct = false.
*/
window = ddw_list_new_entry(pdn, dma64);
if (!window) {
of_node_put(pdn);
@@ -1525,8 +1567,8 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
const __be32 *dma_window = NULL;
struct pci_dn *pci;
struct dynamic_dma_window_prop prop;
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1539,7 +1581,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %pOF\n", dn);
pdn = pci_dma_find(dn, &dma_window);
pdn = pci_dma_find(dn, &prop);
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%pOF\n",
@@ -1552,8 +1594,20 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
if (!pci->table_group) {
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
iommu_table_setparms_lpar(pci->phb, pdn, tbl,
pci->table_group, dma_window);
iommu_table_setparms_common(tbl, pci->phb->bus->number,
be32_to_cpu(prop.liobn),
be64_to_cpu(prop.dma_base),
1ULL << be32_to_cpu(prop.window_shift),
be32_to_cpu(prop.tce_shift), NULL,
&iommu_table_lpar_multi_ops);
/* Only for normal boot with default window. Doesn't matter even
* if we set these with DDW which is 64bit during kdump, since
* these will not be used during kdump.
*/
pci->table_group->tce32_start = be64_to_cpu(prop.dma_base);
pci->table_group->tce32_size = 1 << be32_to_cpu(prop.window_shift);
iommu_init_table(tbl, pci->phb->node, 0, 0);
iommu_register_group(pci->table_group,

View File

@@ -25,6 +25,11 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
extern void *return_address(unsigned int level);
#define ftrace_return_address(n) return_address(n)
void MCOUNT_NAME(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{

View File

@@ -79,7 +79,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
#define vmemmap ((struct page *)VMEMMAP_START)
#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START

View File

@@ -7,6 +7,7 @@ ifdef CONFIG_FTRACE
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
@@ -41,6 +42,7 @@ obj-y += irq.o
obj-y += process.o
obj-y += ptrace.o
obj-y += reset.o
obj-y += return_address.o
obj-y += setup.o
obj-y += signal.o
obj-y += syscall_table.o

View File

@@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* This code come from arch/arm64/kernel/return_address.c
*
* Copyright (C) 2023 SiFive.
*/
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/stacktrace.h>
struct return_address_data {
unsigned int level;
void *addr;
};
static bool save_return_addr(void *d, unsigned long pc)
{
struct return_address_data *data = d;
if (!data->level) {
data->addr = (void *)pc;
return false;
}
--data->level;
return true;
}
NOKPROBE_SYMBOL(save_return_addr);
noinline void *return_address(unsigned int level)
{
struct return_address_data data;
data.level = level + 3;
data.addr = NULL;
arch_stack_walk(save_return_addr, &data, current, NULL);
if (!data.level)
return data.addr;
else
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
NOKPROBE_SYMBOL(return_address);

View File

@@ -1982,6 +1982,23 @@ config EFI_STUB
See Documentation/admin-guide/efi-stub.rst for more information.
config EFI_HANDOVER_PROTOCOL
bool "EFI handover protocol (DEPRECATED)"
depends on EFI_STUB
default y
help
Select this in order to include support for the deprecated EFI
handover protocol, which defines alternative entry points into the
EFI stub. This is a practice that has no basis in the UEFI
specification, and requires a priori knowledge on the part of the
bootloader about Linux/x86 specific ways of passing the command line
and initrd, and where in memory those assets may be loaded.
If in doubt, say Y. Even though the corresponding support is not
present in upstream GRUB or other bootloaders, most distros build
GRUB with numerous downstream patches applied, and may rely on the
handover protocol as as result.
config EFI_MIXED
bool "EFI mixed-mode support"
depends on EFI_STUB && X86_64

View File

@@ -74,6 +74,11 @@ LDFLAGS_vmlinux += -z noexecstack
ifeq ($(CONFIG_LD_IS_BFD),y)
LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments)
endif
ifeq ($(CONFIG_EFI_STUB),y)
# ensure that the static EFI stub library will be pulled in, even if it is
# never referenced explicitly from the startup code
LDFLAGS_vmlinux += -u efi_pe_entry
endif
LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
@@ -100,7 +105,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/ident_map_64.o
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-y += $(obj)/mem_encrypt.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
endif
@@ -108,11 +113,11 @@ endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S

View File

@@ -30,13 +30,13 @@ __efi_get_rsdp_addr(unsigned long cfg_tbl_pa, unsigned int cfg_tbl_len)
* Search EFI system tables for RSDP. Preferred is ACPI_20_TABLE_GUID to
* ACPI_TABLE_GUID because it has more features.
*/
rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
rsdp_addr = efi_find_vendor_table(boot_params_ptr, cfg_tbl_pa, cfg_tbl_len,
ACPI_20_TABLE_GUID);
if (rsdp_addr)
return (acpi_physical_address)rsdp_addr;
/* No ACPI_20_TABLE_GUID found, fallback to ACPI_TABLE_GUID. */
rsdp_addr = efi_find_vendor_table(boot_params, cfg_tbl_pa, cfg_tbl_len,
rsdp_addr = efi_find_vendor_table(boot_params_ptr, cfg_tbl_pa, cfg_tbl_len,
ACPI_TABLE_GUID);
if (rsdp_addr)
return (acpi_physical_address)rsdp_addr;
@@ -56,15 +56,15 @@ static acpi_physical_address efi_get_rsdp_addr(void)
enum efi_type et;
int ret;
et = efi_get_type(boot_params);
et = efi_get_type(boot_params_ptr);
if (et == EFI_TYPE_NONE)
return 0;
systab_pa = efi_get_system_table(boot_params);
systab_pa = efi_get_system_table(boot_params_ptr);
if (!systab_pa)
error("EFI support advertised, but unable to locate system table.");
ret = efi_get_conf_table(boot_params, &cfg_tbl_pa, &cfg_tbl_len);
ret = efi_get_conf_table(boot_params_ptr, &cfg_tbl_pa, &cfg_tbl_len);
if (ret || !cfg_tbl_pa)
error("EFI config table not found.");
@@ -156,7 +156,7 @@ acpi_physical_address get_rsdp_addr(void)
{
acpi_physical_address pa;
pa = boot_params->acpi_rsdp_addr;
pa = boot_params_ptr->acpi_rsdp_addr;
if (!pa)
pa = efi_get_rsdp_addr();
@@ -210,7 +210,7 @@ static unsigned long get_acpi_srat_table(void)
rsdp = (struct acpi_table_rsdp *)get_cmdline_acpi_rsdp();
if (!rsdp)
rsdp = (struct acpi_table_rsdp *)(long)
boot_params->acpi_rsdp_addr;
boot_params_ptr->acpi_rsdp_addr;
if (!rsdp)
return 0;

View File

@@ -14,9 +14,9 @@ static inline char rdfs8(addr_t addr)
#include "../cmdline.c"
unsigned long get_cmd_line_ptr(void)
{
unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
unsigned long cmd_line_ptr = boot_params_ptr->hdr.cmd_line_ptr;
cmd_line_ptr |= (u64)boot_params->ext_cmd_line_ptr << 32;
cmd_line_ptr |= (u64)boot_params_ptr->ext_cmd_line_ptr << 32;
return cmd_line_ptr;
}

View File

@@ -0,0 +1,328 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
*
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
*
* Because this thunking occurs before ExitBootServices() we have to
* restore the firmware's 32-bit GDT and IDT before we make EFI service
* calls.
*
* On the plus side, we don't have to worry about mangling 64-bit
* addresses into 32-bits because we're executing with an identity
* mapped pagetable and haven't transitioned to 64-bit virtual addresses
* yet.
*/
#include <linux/linkage.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
.code64
.text
/*
* When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
* is the first thing that runs after switching to long mode. Depending on
* whether the EFI handover protocol or the compat entry point was used to
* enter the kernel, it will either branch to the common 64-bit EFI stub
* entrypoint efi_stub_entry() directly, or via the 64-bit EFI PE/COFF
* entrypoint efi_pe_entry(). In the former case, the bootloader must provide a
* struct bootparams pointer as the third argument, so the presence of such a
* pointer is used to disambiguate.
*
* +--------------+
* +------------------+ +------------+ +------>| efi_pe_entry |
* | efi32_pe_entry |---->| | | +-----------+--+
* +------------------+ | | +------+----------------+ |
* | startup_32 |---->| startup_64_mixed_mode | |
* +------------------+ | | +------+----------------+ |
* | efi32_stub_entry |---->| | | |
* +------------------+ +------------+ | |
* V |
* +------------+ +----------------+ |
* | startup_64 |<----| efi_stub_entry |<--------+
* +------------+ +----------------+
*/
SYM_FUNC_START(startup_64_mixed_mode)
lea efi32_boot_args(%rip), %rdx
mov 0(%rdx), %edi
mov 4(%rdx), %esi
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
mov 8(%rdx), %edx // saved bootparams pointer
test %edx, %edx
jnz efi_stub_entry
#endif
/*
* efi_pe_entry uses MS calling convention, which requires 32 bytes of
* shadow space on the stack even if all arguments are passed in
* registers. We also need an additional 8 bytes for the space that
* would be occupied by the return address, and this also results in
* the correct stack alignment for entry.
*/
sub $40, %rsp
mov %rdi, %rcx // MS calling convention
mov %rsi, %rdx
jmp efi_pe_entry
SYM_FUNC_END(startup_64_mixed_mode)
SYM_FUNC_START(__efi64_thunk)
push %rbp
push %rbx
movl %ds, %eax
push %rax
movl %es, %eax
push %rax
movl %ss, %eax
push %rax
/* Copy args passed on stack */
movq 0x30(%rsp), %rbp
movq 0x38(%rsp), %rbx
movq 0x40(%rsp), %rax
/*
* Convert x86-64 ABI params to i386 ABI
*/
subq $64, %rsp
movl %esi, 0x0(%rsp)
movl %edx, 0x4(%rsp)
movl %ecx, 0x8(%rsp)
movl %r8d, 0xc(%rsp)
movl %r9d, 0x10(%rsp)
movl %ebp, 0x14(%rsp)
movl %ebx, 0x18(%rsp)
movl %eax, 0x1c(%rsp)
leaq 0x20(%rsp), %rbx
sgdt (%rbx)
sidt 16(%rbx)
leaq 1f(%rip), %rbp
/*
* Switch to IDT and GDT with 32-bit segments. These are the firmware
* GDT and IDT that were installed when the kernel started executing.
* The pointers were saved by the efi32_entry() routine below.
*
* Pass the saved DS selector to the 32-bit code, and use far return to
* restore the saved CS selector.
*/
lidt efi32_boot_idt(%rip)
lgdt efi32_boot_gdt(%rip)
movzwl efi32_boot_ds(%rip), %edx
movzwq efi32_boot_cs(%rip), %rax
pushq %rax
leaq efi_enter32(%rip), %rax
pushq %rax
lretq
1: addq $64, %rsp
movq %rdi, %rax
pop %rbx
movl %ebx, %ss
pop %rbx
movl %ebx, %es
pop %rbx
movl %ebx, %ds
/* Clear out 32-bit selector from FS and GS */
xorl %ebx, %ebx
movl %ebx, %fs
movl %ebx, %gs
/*
* Convert 32-bit status code into 64-bit.
*/
roll $1, %eax
rorq $1, %rax
pop %rbx
pop %rbp
RET
SYM_FUNC_END(__efi64_thunk)
.code32
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
SYM_FUNC_START(efi32_stub_entry)
call 1f
1: popl %ecx
/* Clear BSS */
xorl %eax, %eax
leal (_bss - 1b)(%ecx), %edi
leal (_ebss - 1b)(%ecx), %ecx
subl %edi, %ecx
shrl $2, %ecx
cld
rep stosl
add $0x4, %esp /* Discard return address */
popl %ecx
popl %edx
popl %esi
jmp efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif
/*
* EFI service pointer must be in %edi.
*
* The stack should represent the 32-bit calling convention.
*/
SYM_FUNC_START_LOCAL(efi_enter32)
/* Load firmware selector into data and stack segment registers */
movl %edx, %ds
movl %edx, %es
movl %edx, %fs
movl %edx, %gs
movl %edx, %ss
/* Reload pgtables */
movl %cr3, %eax
movl %eax, %cr3
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
/* Disable long mode via EFER */
movl $MSR_EFER, %ecx
rdmsr
btrl $_EFER_LME, %eax
wrmsr
call *%edi
/* We must preserve return value */
movl %eax, %edi
/*
* Some firmware will return with interrupts enabled. Be sure to
* disable them before we switch GDTs and IDTs.
*/
cli
lidtl 16(%ebx)
lgdtl (%ebx)
movl %cr4, %eax
btsl $(X86_CR4_PAE_BIT), %eax
movl %eax, %cr4
movl %cr3, %eax
movl %eax, %cr3
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
wrmsr
xorl %eax, %eax
lldt %ax
pushl $__KERNEL_CS
pushl %ebp
/* Enable paging */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
lret
SYM_FUNC_END(efi_enter32)
/*
* This is the common EFI stub entry point for mixed mode.
*
* Arguments: %ecx image handle
* %edx EFI system table pointer
* %esi struct bootparams pointer (or NULL when not using
* the EFI handover protocol)
*
* Since this is the point of no return for ordinary execution, no registers
* are considered live except for the function parameters. [Note that the EFI
* stub may still exit and return to the firmware using the Exit() EFI boot
* service.]
*/
SYM_FUNC_START_LOCAL(efi32_entry)
call 1f
1: pop %ebx
/* Save firmware GDTR and code/data selectors */
sgdtl (efi32_boot_gdt - 1b)(%ebx)
movw %cs, (efi32_boot_cs - 1b)(%ebx)
movw %ds, (efi32_boot_ds - 1b)(%ebx)
/* Store firmware IDT descriptor */
sidtl (efi32_boot_idt - 1b)(%ebx)
/* Store boot arguments */
leal (efi32_boot_args - 1b)(%ebx), %ebx
movl %ecx, 0(%ebx)
movl %edx, 4(%ebx)
movl %esi, 8(%ebx)
movb $0x0, 12(%ebx) // efi_is64
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
jmp startup_32
SYM_FUNC_END(efi32_entry)
/*
* efi_status_t efi32_pe_entry(efi_handle_t image_handle,
* efi_system_table_32_t *sys_table)
*/
SYM_FUNC_START(efi32_pe_entry)
pushl %ebp
movl %esp, %ebp
pushl %ebx // save callee-save registers
pushl %edi
call verify_cpu // check for long mode support
testl %eax, %eax
movl $0x80000003, %eax // EFI_UNSUPPORTED
jnz 2f
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
xorl %esi, %esi
jmp efi32_entry // pass %ecx, %edx, %esi
// no other registers remain live
2: popl %edi // restore callee-save registers
popl %ebx
leave
RET
SYM_FUNC_END(efi32_pe_entry)
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
.org efi32_stub_entry + 0x200
.code64
SYM_FUNC_START_NOALIGN(efi64_stub_entry)
jmp efi_handover_entry
SYM_FUNC_END(efi64_stub_entry)
#endif
.data
.balign 8
SYM_DATA_START_LOCAL(efi32_boot_gdt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_gdt)
SYM_DATA_START_LOCAL(efi32_boot_idt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_idt)
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)

View File

@@ -1,195 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
*
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
*
* Because this thunking occurs before ExitBootServices() we have to
* restore the firmware's 32-bit GDT and IDT before we make EFI service
* calls.
*
* On the plus side, we don't have to worry about mangling 64-bit
* addresses into 32-bits because we're executing with an identity
* mapped pagetable and haven't transitioned to 64-bit virtual addresses
* yet.
*/
#include <linux/linkage.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
.code64
.text
SYM_FUNC_START(__efi64_thunk)
push %rbp
push %rbx
movl %ds, %eax
push %rax
movl %es, %eax
push %rax
movl %ss, %eax
push %rax
/* Copy args passed on stack */
movq 0x30(%rsp), %rbp
movq 0x38(%rsp), %rbx
movq 0x40(%rsp), %rax
/*
* Convert x86-64 ABI params to i386 ABI
*/
subq $64, %rsp
movl %esi, 0x0(%rsp)
movl %edx, 0x4(%rsp)
movl %ecx, 0x8(%rsp)
movl %r8d, 0xc(%rsp)
movl %r9d, 0x10(%rsp)
movl %ebp, 0x14(%rsp)
movl %ebx, 0x18(%rsp)
movl %eax, 0x1c(%rsp)
leaq 0x20(%rsp), %rbx
sgdt (%rbx)
addq $16, %rbx
sidt (%rbx)
leaq 1f(%rip), %rbp
/*
* Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
* and IDT that was installed when the kernel started executing. The
* pointers were saved at the EFI stub entry point in head_64.S.
*
* Pass the saved DS selector to the 32-bit code, and use far return to
* restore the saved CS selector.
*/
leaq efi32_boot_idt(%rip), %rax
lidt (%rax)
leaq efi32_boot_gdt(%rip), %rax
lgdt (%rax)
movzwl efi32_boot_ds(%rip), %edx
movzwq efi32_boot_cs(%rip), %rax
pushq %rax
leaq efi_enter32(%rip), %rax
pushq %rax
lretq
1: addq $64, %rsp
movq %rdi, %rax
pop %rbx
movl %ebx, %ss
pop %rbx
movl %ebx, %es
pop %rbx
movl %ebx, %ds
/* Clear out 32-bit selector from FS and GS */
xorl %ebx, %ebx
movl %ebx, %fs
movl %ebx, %gs
/*
* Convert 32-bit status code into 64-bit.
*/
roll $1, %eax
rorq $1, %rax
pop %rbx
pop %rbp
RET
SYM_FUNC_END(__efi64_thunk)
.code32
/*
* EFI service pointer must be in %edi.
*
* The stack should represent the 32-bit calling convention.
*/
SYM_FUNC_START_LOCAL(efi_enter32)
/* Load firmware selector into data and stack segment registers */
movl %edx, %ds
movl %edx, %es
movl %edx, %fs
movl %edx, %gs
movl %edx, %ss
/* Reload pgtables */
movl %cr3, %eax
movl %eax, %cr3
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
/* Disable long mode via EFER */
movl $MSR_EFER, %ecx
rdmsr
btrl $_EFER_LME, %eax
wrmsr
call *%edi
/* We must preserve return value */
movl %eax, %edi
/*
* Some firmware will return with interrupts enabled. Be sure to
* disable them before we switch GDTs and IDTs.
*/
cli
lidtl (%ebx)
subl $16, %ebx
lgdtl (%ebx)
movl %cr4, %eax
btsl $(X86_CR4_PAE_BIT), %eax
movl %eax, %cr4
movl %cr3, %eax
movl %eax, %cr3
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
wrmsr
xorl %eax, %eax
lldt %ax
pushl $__KERNEL_CS
pushl %ebp
/* Enable paging */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
lret
SYM_FUNC_END(efi_enter32)
.data
.balign 8
SYM_DATA_START(efi32_boot_gdt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_gdt)
SYM_DATA_START(efi32_boot_idt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_idt)
SYM_DATA_START(efi32_boot_cs)
.word 0
SYM_DATA_END(efi32_boot_cs)
SYM_DATA_START(efi32_boot_ds)
.word 0
SYM_DATA_END(efi32_boot_ds)

View File

@@ -84,19 +84,6 @@ SYM_FUNC_START(startup_32)
#ifdef CONFIG_RELOCATABLE
leal startup_32@GOTOFF(%edx), %ebx
#ifdef CONFIG_EFI_STUB
/*
* If we were loaded via the EFI LoadImage service, startup_32() will be at an
* offset to the start of the space allocated for the image. efi_pe_entry() will
* set up image_offset to tell us where the image actually starts, so that we
* can use the full available buffer.
* image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations.
*/
subl image_offset@GOTOFF(%edx), %ebx
#endif
movl BP_kernel_alignment(%esi), %eax
decl %eax
addl %eax, %ebx
@@ -150,17 +137,6 @@ SYM_FUNC_START(startup_32)
jmp *%eax
SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_STUB
SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp
movl 8(%esp), %esi /* save boot_params pointer */
call efi_main
/* efi_main returns the possibly relocated address of startup_32 */
jmp *%eax
SYM_FUNC_END(efi32_stub_entry)
SYM_FUNC_ALIAS(efi_stub_entry, efi32_stub_entry)
#endif
.text
SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
@@ -179,15 +155,9 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
*/
/* push arguments for extract_kernel: */
pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */
pushl %ebp /* output address */
pushl input_len@GOTOFF(%ebx) /* input_len */
leal input_data@GOTOFF(%ebx), %eax
pushl %eax /* input_data */
leal boot_heap@GOTOFF(%ebx), %eax
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
call extract_kernel /* returns kernel location in %eax */
call extract_kernel /* returns kernel entry point in %eax */
addl $24, %esp
/*
@@ -208,17 +178,11 @@ SYM_DATA_START_LOCAL(gdt)
.quad 0x00cf92000000ffff /* __KERNEL_DS */
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
/*
* Stack and heap for uncompression
*/
.bss
.balign 4
boot_heap:
.fill BOOT_HEAP_SIZE, 1, 0
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0
boot_stack_end:

View File

@@ -118,7 +118,9 @@ SYM_FUNC_START(startup_32)
1:
/* Setup Exception handling for SEV-ES */
#ifdef CONFIG_AMD_MEM_ENCRYPT
call startup32_load_idt
#endif
/* Make sure cpu supports long mode. */
call verify_cpu
@@ -136,19 +138,6 @@ SYM_FUNC_START(startup_32)
#ifdef CONFIG_RELOCATABLE
movl %ebp, %ebx
#ifdef CONFIG_EFI_STUB
/*
* If we were loaded via the EFI LoadImage service, startup_32 will be at an
* offset to the start of the space allocated for the image. efi_pe_entry will
* set up image_offset to tell us where the image actually starts, so that we
* can use the full available buffer.
* image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations.
*/
subl rva(image_offset)(%ebp), %ebx
#endif
movl BP_kernel_alignment(%esi), %eax
decl %eax
addl %eax, %ebx
@@ -178,12 +167,13 @@ SYM_FUNC_START(startup_32)
*/
/*
* If SEV is active then set the encryption mask in the page tables.
* This will insure that when the kernel is copied and decompressed
* This will ensure that when the kernel is copied and decompressed
* it will be done so encrypted.
*/
call get_sev_encryption_bit
xorl %edx, %edx
#ifdef CONFIG_AMD_MEM_ENCRYPT
call get_sev_encryption_bit
xorl %edx, %edx
testl %eax, %eax
jz 1f
subl $32, %eax /* Encryption bit is always above bit 31 */
@@ -249,6 +239,11 @@ SYM_FUNC_START(startup_32)
movl $__BOOT_TSS, %eax
ltr %ax
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* Check if the C-bit position is correct when SEV is active */
call startup32_check_sev_cbit
#endif
/*
* Setup for the jump to 64bit mode
*
@@ -261,29 +256,11 @@ SYM_FUNC_START(startup_32)
*/
leal rva(startup_64)(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
movl rva(efi32_boot_args)(%ebp), %edi
testl %edi, %edi
jz 1f
leal rva(efi64_stub_entry)(%ebp), %eax
movl rva(efi32_boot_args+4)(%ebp), %esi
movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
testl %edx, %edx
jnz 1f
/*
* efi_pe_entry uses MS calling convention, which requires 32 bytes of
* shadow space on the stack even if all arguments are passed in
* registers. We also need an additional 8 bytes for the space that
* would be occupied by the return address, and this also results in
* the correct stack alignment for entry.
*/
subl $40, %esp
leal rva(efi_pe_entry)(%ebp), %eax
movl %edi, %ecx // MS calling convention
movl %esi, %edx
cmpb $1, rva(efi_is64)(%ebp)
je 1f
leal rva(startup_64_mixed_mode)(%ebp), %eax
1:
#endif
/* Check if the C-bit position is correct when SEV is active */
call startup32_check_sev_cbit
pushl $__KERNEL_CS
pushl %eax
@@ -296,41 +273,6 @@ SYM_FUNC_START(startup_32)
lret
SYM_FUNC_END(startup_32)
#ifdef CONFIG_EFI_MIXED
.org 0x190
SYM_FUNC_START(efi32_stub_entry)
add $0x4, %esp /* Discard return address */
popl %ecx
popl %edx
popl %esi
call 1f
1: pop %ebp
subl $ rva(1b), %ebp
movl %esi, rva(efi32_boot_args+8)(%ebp)
SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
movl %ecx, rva(efi32_boot_args)(%ebp)
movl %edx, rva(efi32_boot_args+4)(%ebp)
movb $0, rva(efi_is64)(%ebp)
/* Save firmware GDTR and code/data selectors */
sgdtl rva(efi32_boot_gdt)(%ebp)
movw %cs, rva(efi32_boot_cs)(%ebp)
movw %ds, rva(efi32_boot_ds)(%ebp)
/* Store firmware IDT descriptor */
sidtl rva(efi32_boot_idt)(%ebp)
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
jmp startup_32
SYM_FUNC_END(efi32_stub_entry)
#endif
.code64
.org 0x200
SYM_CODE_START(startup_64)
@@ -372,20 +314,6 @@ SYM_CODE_START(startup_64)
/* Start with the delta to where the kernel will run at. */
#ifdef CONFIG_RELOCATABLE
leaq startup_32(%rip) /* - $startup_32 */, %rbp
#ifdef CONFIG_EFI_STUB
/*
* If we were loaded via the EFI LoadImage service, startup_32 will be at an
* offset to the start of the space allocated for the image. efi_pe_entry will
* set up image_offset to tell us where the image actually starts, so that we
* can use the full available buffer.
* image_offset = startup_32 - image_base
* Otherwise image_offset will be zero and has no effect on the calculations.
*/
movl image_offset(%rip), %eax
subq %rax, %rbp
#endif
movl BP_kernel_alignment(%rsi), %eax
decl %eax
addq %rax, %rbp
@@ -424,10 +352,6 @@ SYM_CODE_START(startup_64)
* For the trampoline, we need the top page table to reside in lower
* memory as we don't have a way to load 64-bit values into CR3 in
* 32-bit mode.
*
* We go though the trampoline even if we don't have to: if we're
* already in a desired paging mode. This way the trampoline code gets
* tested on every boot.
*/
/* Make sure we have GDT with 32-bit code segment */
@@ -442,10 +366,14 @@ SYM_CODE_START(startup_64)
lretq
.Lon_kernel_cs:
/*
* RSI holds a pointer to a boot_params structure provided by the
* loader, and this needs to be preserved across C function calls. So
* move it into a callee saved register.
*/
movq %rsi, %r15
pushq %rsi
call load_stage1_idt
popq %rsi
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
@@ -456,82 +384,24 @@ SYM_CODE_START(startup_64)
* CPUID instructions being issued, so go ahead and do that now via
* sev_enable(), which will also handle the rest of the SEV-related
* detection/setup to ensure that has been done in advance of any dependent
* code.
* code. Pass the boot_params pointer as the first argument.
*/
pushq %rsi
movq %rsi, %rdi /* real mode address */
movq %r15, %rdi
call sev_enable
popq %rsi
#endif
/*
* paging_prepare() sets up the trampoline and checks if we need to
* enable 5-level paging.
* configure_5level_paging() updates the number of paging levels using
* a trampoline in 32-bit addressable memory if the current number does
* not match the desired number.
*
* paging_prepare() returns a two-quadword structure which lands
* into RDX:RAX:
* - Address of the trampoline is returned in RAX.
* - Non zero RDX means trampoline needs to enable 5-level
* paging.
*
* RSI holds real mode data and needs to be preserved across
* this function call.
* Pass the boot_params pointer as the first argument. The second
* argument is the relocated address of the page table to use instead
* of the page table in trampoline memory (if required).
*/
pushq %rsi
movq %rsi, %rdi /* real mode address */
call paging_prepare
popq %rsi
/* Save the trampoline address in RCX */
movq %rax, %rcx
/* Set up 32-bit addressable stack */
leaq TRAMPOLINE_32BIT_STACK_END(%rcx), %rsp
/*
* Preserve live 64-bit registers on the stack: this is necessary
* because the architecture does not guarantee that GPRs will retain
* their full 64-bit values across a 32-bit mode switch.
*/
pushq %rbp
pushq %rbx
pushq %rsi
/*
* Push the 64-bit address of trampoline_return() onto the new stack.
* It will be used by the trampoline to return to the main code. Due to
* the 32-bit mode switch, it cannot be kept it in a register either.
*/
leaq trampoline_return(%rip), %rdi
pushq %rdi
/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
pushq $__KERNEL32_CS
leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
pushq %rax
lretq
trampoline_return:
/* Restore live 64-bit registers */
popq %rsi
popq %rbx
popq %rbp
/* Restore the stack, the 32-bit trampoline uses its own stack */
leaq rva(boot_stack_end)(%rbx), %rsp
/*
* cleanup_trampoline() would restore trampoline memory.
*
* RDI is address of the page table to use instead of page table
* in trampoline memory (if required).
*
* RSI holds real mode data and needs to be preserved across
* this function call.
*/
pushq %rsi
leaq rva(top_pgtable)(%rbx), %rdi
call cleanup_trampoline
popq %rsi
movq %r15, %rdi
leaq rva(top_pgtable)(%rbx), %rsi
call configure_5level_paging
/* Zero EFLAGS */
pushq $0
@@ -541,7 +411,6 @@ trampoline_return:
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
pushq %rsi
leaq (_bss-8)(%rip), %rsi
leaq rva(_bss-8)(%rbx), %rdi
movl $(_bss - startup_32), %ecx
@@ -549,7 +418,6 @@ trampoline_return:
std
rep movsq
cld
popq %rsi
/*
* The GDT may get overwritten either during the copy we just did or
@@ -568,19 +436,6 @@ trampoline_return:
jmp *%rax
SYM_CODE_END(startup_64)
#ifdef CONFIG_EFI_STUB
.org 0x390
SYM_FUNC_START(efi64_stub_entry)
and $~0xf, %rsp /* realign the stack */
movq %rdx, %rbx /* save boot_params pointer */
call efi_main
movq %rbx,%rsi
leaq rva(startup_64)(%rax), %rax
jmp *%rax
SYM_FUNC_END(efi64_stub_entry)
SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry)
#endif
.text
SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
@@ -594,125 +449,122 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
shrq $3, %rcx
rep stosq
pushq %rsi
call load_stage2_idt
/* Pass boot_params to initialize_identity_maps() */
movq (%rsp), %rdi
movq %r15, %rdi
call initialize_identity_maps
popq %rsi
/*
* Do the extraction, and jump to the new kernel..
*/
pushq %rsi /* Save the real mode argument */
movq %rsi, %rdi /* real mode address */
leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
leaq input_data(%rip), %rdx /* input_data */
movl input_len(%rip), %ecx /* input_len */
movq %rbp, %r8 /* output target address */
movl output_len(%rip), %r9d /* decompressed length, end of relocs */
call extract_kernel /* returns kernel location in %rax */
popq %rsi
/* pass struct boot_params pointer and output target address */
movq %r15, %rdi
movq %rbp, %rsi
call extract_kernel /* returns kernel entry point in %rax */
/*
* Jump to the decompressed kernel.
*/
movq %r15, %rsi
jmp *%rax
SYM_FUNC_END(.Lrelocated)
.code32
/*
* This is the 32-bit trampoline that will be copied over to low memory.
* This is the 32-bit trampoline that will be copied over to low memory. It
* will be called using the ordinary 64-bit calling convention from code
* running in 64-bit mode.
*
* Return address is at the top of the stack (might be above 4G).
* ECX contains the base address of the trampoline memory.
* Non zero RDX means trampoline needs to enable 5-level paging.
* The first argument (EDI) contains the address of the temporary PGD level
* page table in 32-bit addressable memory which will be programmed into
* register CR3.
*/
.section ".rodata", "a", @progbits
SYM_CODE_START(trampoline_32bit_src)
/* Set up data and stack segments */
movl $__KERNEL_DS, %eax
movl %eax, %ds
movl %eax, %ss
/*
* Preserve callee save 64-bit registers on the stack: this is
* necessary because the architecture does not guarantee that GPRs will
* retain their full 64-bit values across a 32-bit mode switch.
*/
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbp
pushq %rbx
/* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
movq %rsp, %rbx
shrq $32, %rbx
/* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
pushq $__KERNEL32_CS
leaq 0f(%rip), %rax
pushq %rax
lretq
/*
* The 32-bit code below will do a far jump back to long mode and end
* up here after reconfiguring the number of paging levels. First, the
* stack pointer needs to be restored to its full 64-bit value before
* the callee save register contents can be popped from the stack.
*/
.Lret:
shlq $32, %rbx
orq %rbx, %rsp
/* Restore the preserved 64-bit registers */
popq %rbx
popq %rbp
popq %r12
popq %r13
popq %r14
popq %r15
retq
.code32
0:
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
/* Check what paging mode we want to be in after the trampoline */
testl %edx, %edx
jz 1f
/* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
movl %cr4, %eax
testl $X86_CR4_LA57, %eax
jnz 3f
jmp 2f
1:
/* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
movl %cr4, %eax
testl $X86_CR4_LA57, %eax
jz 3f
2:
/* Point CR3 to the trampoline's new top level page table */
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
movl %eax, %cr3
3:
movl %edi, %cr3
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
pushl %ecx
pushl %edx
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
/* Avoid writing EFER if no change was made (for TDX guest) */
jc 1f
wrmsr
1: popl %edx
popl %ecx
#ifdef CONFIG_X86_MCE
/*
* Preserve CR4.MCE if the kernel will enable #MC support.
* Clearing MCE may fault in some environments (that also force #MC
* support). Any machine check that occurs before #MC support is fully
* configured will crash the system regardless of the CR4.MCE value set
* here.
*/
movl %cr4, %eax
andl $X86_CR4_MCE, %eax
#else
movl $0, %eax
#endif
/* Enable PAE and LA57 (if required) paging modes */
orl $X86_CR4_PAE, %eax
testl %edx, %edx
jz 1f
orl $X86_CR4_LA57, %eax
1:
/* Toggle CR4.LA57 */
movl %cr4, %eax
btcl $X86_CR4_LA57_BIT, %eax
movl %eax, %cr4
/* Calculate address of paging_enabled() once we are executing in the trampoline */
leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
/* Prepare the stack for far return to Long Mode */
pushl $__KERNEL_CS
pushl %eax
/* Enable paging again. */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
lret
/*
* Return to the 64-bit calling code using LJMP rather than LRET, to
* avoid the need for a 32-bit addressable stack. The destination
* address will be adjusted after the template code is copied into a
* 32-bit addressable buffer.
*/
.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
SYM_CODE_END(trampoline_32bit_src)
.code64
SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
/* Return from the trampoline */
retq
SYM_FUNC_END(.Lpaging_enabled)
/*
* This symbol is placed right after trampoline_32bit_src() so its address can
* be used to infer the size of the trampoline code.
*/
SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
/*
* The trampoline code has a size limit.
@@ -721,7 +573,7 @@ SYM_FUNC_END(.Lpaging_enabled)
*/
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
.code32
.text
SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
@@ -729,6 +581,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
jmp 1b
SYM_FUNC_END(.Lno_longmode)
.globl verify_cpu
#include "../../kernel/verify_cpu.S"
.data
@@ -760,249 +613,11 @@ SYM_DATA_START(boot_idt)
.endr
SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
#ifdef CONFIG_AMD_MEM_ENCRYPT
SYM_DATA_START(boot32_idt_desc)
.word boot32_idt_end - boot32_idt - 1
.long 0
SYM_DATA_END(boot32_idt_desc)
.balign 8
SYM_DATA_START(boot32_idt)
.rept 32
.quad 0
.endr
SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
#endif
#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
#ifdef CONFIG_EFI_MIXED
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
#define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
__HEAD
.code32
SYM_FUNC_START(efi32_pe_entry)
/*
* efi_status_t efi32_pe_entry(efi_handle_t image_handle,
* efi_system_table_32_t *sys_table)
*/
pushl %ebp
movl %esp, %ebp
pushl %eax // dummy push to allocate loaded_image
pushl %ebx // save callee-save registers
pushl %edi
call verify_cpu // check for long mode support
testl %eax, %eax
movl $0x80000003, %eax // EFI_UNSUPPORTED
jnz 2f
call 1f
1: pop %ebx
subl $ rva(1b), %ebx
/* Get the loaded image protocol pointer from the image handle */
leal -4(%ebp), %eax
pushl %eax // &loaded_image
leal rva(loaded_image_proto)(%ebx), %eax
pushl %eax // pass the GUID address
pushl 8(%ebp) // pass the image handle
/*
* Note the alignment of the stack frame.
* sys_table
* handle <-- 16-byte aligned on entry by ABI
* return address
* frame pointer
* loaded_image <-- local variable
* saved %ebx <-- 16-byte aligned here
* saved %edi
* &loaded_image
* &loaded_image_proto
* handle <-- 16-byte aligned for call to handle_protocol
*/
movl 12(%ebp), %eax // sys_table
movl ST32_boottime(%eax), %eax // sys_table->boottime
call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
addl $12, %esp // restore argument space
testl %eax, %eax
jnz 2f
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
movl -4(%ebp), %esi // loaded_image
movl LI32_image_base(%esi), %esi // loaded_image->image_base
movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry
/*
* We need to set the image_offset variable here since startup_32() will
* use it before we get to the 64-bit efi_pe_entry() in C code.
*/
subl %esi, %ebx
movl %ebx, rva(image_offset)(%ebp) // save image_offset
jmp efi32_pe_stub_entry
2: popl %edi // restore callee-save registers
popl %ebx
leave
RET
SYM_FUNC_END(efi32_pe_entry)
.section ".rodata"
/* EFI loaded image protocol GUID */
.balign 4
SYM_DATA_START_LOCAL(loaded_image_proto)
.long 0x5b1b31a1
.word 0x9562, 0x11d2
.byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
SYM_DATA_END(loaded_image_proto)
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
__HEAD
.code32
/*
* Write an IDT entry into boot32_idt
*
* Parameters:
*
* %eax: Handler address
* %edx: Vector number
*
* Physical offset is expected in %ebp
*/
SYM_FUNC_START(startup32_set_idt_entry)
push %ebx
push %ecx
/* IDT entry address to %ebx */
leal rva(boot32_idt)(%ebp), %ebx
shl $3, %edx
addl %edx, %ebx
/* Build IDT entry, lower 4 bytes */
movl %eax, %edx
andl $0x0000ffff, %edx # Target code segment offset [15:0]
movl $__KERNEL32_CS, %ecx # Target code segment selector
shl $16, %ecx
orl %ecx, %edx
/* Store lower 4 bytes to IDT */
movl %edx, (%ebx)
/* Build IDT entry, upper 4 bytes */
movl %eax, %edx
andl $0xffff0000, %edx # Target code segment offset [31:16]
orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
/* Store upper 4 bytes to IDT */
movl %edx, 4(%ebx)
pop %ecx
pop %ebx
RET
SYM_FUNC_END(startup32_set_idt_entry)
#endif
SYM_FUNC_START(startup32_load_idt)
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* #VC handler */
leal rva(startup32_vc_handler)(%ebp), %eax
movl $X86_TRAP_VC, %edx
call startup32_set_idt_entry
/* Load IDT */
leal rva(boot32_idt)(%ebp), %eax
movl %eax, rva(boot32_idt_desc+2)(%ebp)
lidt rva(boot32_idt_desc)(%ebp)
#endif
RET
SYM_FUNC_END(startup32_load_idt)
/*
* Check for the correct C-bit position when the startup_32 boot-path is used.
*
* The check makes use of the fact that all memory is encrypted when paging is
* disabled. The function creates 64 bits of random data using the RDRAND
* instruction. RDRAND is mandatory for SEV guests, so always available. If the
* hypervisor violates that the kernel will crash right here.
*
* The 64 bits of random data are stored to a memory location and at the same
* time kept in the %eax and %ebx registers. Since encryption is always active
* when paging is off the random data will be stored encrypted in main memory.
*
* Then paging is enabled. When the C-bit position is correct all memory is
* still mapped encrypted and comparing the register values with memory will
* succeed. An incorrect C-bit position will map all memory unencrypted, so that
* the compare will use the encrypted random data and fail.
*/
SYM_FUNC_START(startup32_check_sev_cbit)
#ifdef CONFIG_AMD_MEM_ENCRYPT
pushl %eax
pushl %ebx
pushl %ecx
pushl %edx
/* Check for non-zero sev_status */
movl rva(sev_status)(%ebp), %eax
testl %eax, %eax
jz 4f
/*
* Get two 32-bit random values - Don't bail out if RDRAND fails
* because it is better to prevent forward progress if no random value
* can be gathered.
*/
1: rdrand %eax
jnc 1b
2: rdrand %ebx
jnc 2b
/* Store to memory and keep it in the registers */
movl %eax, rva(sev_check_data)(%ebp)
movl %ebx, rva(sev_check_data+4)(%ebp)
/* Enable paging to see if encryption is active */
movl %cr0, %edx /* Backup %cr0 in %edx */
movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
movl %ecx, %cr0
cmpl %eax, rva(sev_check_data)(%ebp)
jne 3f
cmpl %ebx, rva(sev_check_data+4)(%ebp)
jne 3f
movl %edx, %cr0 /* Restore previous %cr0 */
jmp 4f
3: /* Check failed - hlt the machine */
hlt
jmp 3b
4:
popl %edx
popl %ecx
popl %ebx
popl %eax
#endif
RET
SYM_FUNC_END(startup32_check_sev_cbit)
/*
* Stack and heap for uncompression
*/
.bss
.balign 4
SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
SYM_DATA_START_LOCAL(boot_stack)
.fill BOOT_STACK_SIZE, 1, 0
.balign 16

View File

@@ -167,8 +167,9 @@ void initialize_identity_maps(void *rmode)
* or does not touch all the pages covering them.
*/
kernel_add_identity_map((unsigned long)_head, (unsigned long)_end);
boot_params = rmode;
kernel_add_identity_map((unsigned long)boot_params, (unsigned long)(boot_params + 1));
boot_params_ptr = rmode;
kernel_add_identity_map((unsigned long)boot_params_ptr,
(unsigned long)(boot_params_ptr + 1));
cmdline = get_cmd_line_ptr();
kernel_add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
@@ -176,7 +177,7 @@ void initialize_identity_maps(void *rmode)
* Also map the setup_data entries passed via boot_params in case they
* need to be accessed by uncompressed kernel via the identity mapping.
*/
sd = (struct setup_data *)boot_params->hdr.setup_data;
sd = (struct setup_data *)boot_params_ptr->hdr.setup_data;
while (sd) {
unsigned long sd_addr = (unsigned long)sd;

View File

@@ -63,7 +63,7 @@ static unsigned long get_boot_seed(void)
unsigned long hash = 0;
hash = rotate_xor(hash, build_str, sizeof(build_str));
hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
hash = rotate_xor(hash, boot_params_ptr, sizeof(*boot_params_ptr));
return hash;
}
@@ -383,7 +383,7 @@ static void handle_mem_options(void)
static void mem_avoid_init(unsigned long input, unsigned long input_size,
unsigned long output)
{
unsigned long init_size = boot_params->hdr.init_size;
unsigned long init_size = boot_params_ptr->hdr.init_size;
u64 initrd_start, initrd_size;
unsigned long cmd_line, cmd_line_size;
@@ -395,10 +395,10 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
/* Avoid initrd. */
initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
initrd_start |= boot_params->hdr.ramdisk_image;
initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
initrd_size |= boot_params->hdr.ramdisk_size;
initrd_start = (u64)boot_params_ptr->ext_ramdisk_image << 32;
initrd_start |= boot_params_ptr->hdr.ramdisk_image;
initrd_size = (u64)boot_params_ptr->ext_ramdisk_size << 32;
initrd_size |= boot_params_ptr->hdr.ramdisk_size;
mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
/* No need to set mapping for initrd, it will be handled in VO. */
@@ -413,8 +413,8 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
}
/* Avoid boot parameters. */
mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params_ptr;
mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params_ptr);
/* We don't need to set a mapping for setup_data. */
@@ -447,7 +447,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
}
/* Avoid all entries in the setup_data linked list. */
ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
ptr = (struct setup_data *)(unsigned long)boot_params_ptr->hdr.setup_data;
while (ptr) {
struct mem_vector avoid;
@@ -679,7 +679,7 @@ static bool process_mem_region(struct mem_vector *region,
static bool
process_efi_entries(unsigned long minimum, unsigned long image_size)
{
struct efi_info *e = &boot_params->efi_info;
struct efi_info *e = &boot_params_ptr->efi_info;
bool efi_mirror_found = false;
struct mem_vector region;
efi_memory_desc_t *md;
@@ -761,8 +761,8 @@ static void process_e820_entries(unsigned long minimum,
struct boot_e820_entry *entry;
/* Verify potential e820 positions, appending to slots list. */
for (i = 0; i < boot_params->e820_entries; i++) {
entry = &boot_params->e820_table[i];
for (i = 0; i < boot_params_ptr->e820_entries; i++) {
entry = &boot_params_ptr->e820_table[i];
/* Skip non-RAM entries. */
if (entry->type != E820_TYPE_RAM)
continue;
@@ -836,7 +836,7 @@ void choose_random_location(unsigned long input,
return;
}
boot_params->hdr.loadflags |= KASLR_FLAG;
boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
if (IS_ENABLED(CONFIG_X86_32))
mem_limit = KERNEL_IMAGE_SIZE;

View File

@@ -12,16 +12,13 @@
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/asm-offsets.h>
#include <asm/segment.h>
#include <asm/trapnr.h>
.text
.code32
SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
#ifdef CONFIG_AMD_MEM_ENCRYPT
push %ebx
push %ecx
push %edx
movl $0x80000000, %eax /* CPUID to check the highest leaf */
cpuid
@@ -52,12 +49,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
.Lsev_exit:
pop %edx
pop %ecx
pop %ebx
#endif /* CONFIG_AMD_MEM_ENCRYPT */
RET
SYM_FUNC_END(get_sev_encryption_bit)
@@ -98,7 +90,7 @@ SYM_CODE_START_LOCAL(sev_es_req_cpuid)
jmp 1b
SYM_CODE_END(sev_es_req_cpuid)
SYM_CODE_START(startup32_vc_handler)
SYM_CODE_START_LOCAL(startup32_vc_handler)
pushl %eax
pushl %ebx
pushl %ecx
@@ -184,15 +176,149 @@ SYM_CODE_START(startup32_vc_handler)
jmp .Lfail
SYM_CODE_END(startup32_vc_handler)
/*
* Write an IDT entry into boot32_idt
*
* Parameters:
*
* %eax: Handler address
* %edx: Vector number
* %ecx: IDT address
*/
SYM_FUNC_START_LOCAL(startup32_set_idt_entry)
/* IDT entry address to %ecx */
leal (%ecx, %edx, 8), %ecx
/* Build IDT entry, lower 4 bytes */
movl %eax, %edx
andl $0x0000ffff, %edx # Target code segment offset [15:0]
orl $(__KERNEL32_CS << 16), %edx # Target code segment selector
/* Store lower 4 bytes to IDT */
movl %edx, (%ecx)
/* Build IDT entry, upper 4 bytes */
movl %eax, %edx
andl $0xffff0000, %edx # Target code segment offset [31:16]
orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
/* Store upper 4 bytes to IDT */
movl %edx, 4(%ecx)
RET
SYM_FUNC_END(startup32_set_idt_entry)
SYM_FUNC_START(startup32_load_idt)
push %ebp
push %ebx
call 1f
1: pop %ebp
leal (boot32_idt - 1b)(%ebp), %ebx
/* #VC handler */
leal (startup32_vc_handler - 1b)(%ebp), %eax
movl $X86_TRAP_VC, %edx
movl %ebx, %ecx
call startup32_set_idt_entry
/* Load IDT */
leal (boot32_idt_desc - 1b)(%ebp), %ecx
movl %ebx, 2(%ecx)
lidt (%ecx)
pop %ebx
pop %ebp
RET
SYM_FUNC_END(startup32_load_idt)
/*
* Check for the correct C-bit position when the startup_32 boot-path is used.
*
* The check makes use of the fact that all memory is encrypted when paging is
* disabled. The function creates 64 bits of random data using the RDRAND
* instruction. RDRAND is mandatory for SEV guests, so always available. If the
* hypervisor violates that the kernel will crash right here.
*
* The 64 bits of random data are stored to a memory location and at the same
* time kept in the %eax and %ebx registers. Since encryption is always active
* when paging is off the random data will be stored encrypted in main memory.
*
* Then paging is enabled. When the C-bit position is correct all memory is
* still mapped encrypted and comparing the register values with memory will
* succeed. An incorrect C-bit position will map all memory unencrypted, so that
* the compare will use the encrypted random data and fail.
*/
SYM_FUNC_START(startup32_check_sev_cbit)
pushl %ebx
pushl %ebp
call 0f
0: popl %ebp
/* Check for non-zero sev_status */
movl (sev_status - 0b)(%ebp), %eax
testl %eax, %eax
jz 4f
/*
* Get two 32-bit random values - Don't bail out if RDRAND fails
* because it is better to prevent forward progress if no random value
* can be gathered.
*/
1: rdrand %eax
jnc 1b
2: rdrand %ebx
jnc 2b
/* Store to memory and keep it in the registers */
leal (sev_check_data - 0b)(%ebp), %ebp
movl %eax, 0(%ebp)
movl %ebx, 4(%ebp)
/* Enable paging to see if encryption is active */
movl %cr0, %edx /* Backup %cr0 in %edx */
movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
movl %ecx, %cr0
cmpl %eax, 0(%ebp)
jne 3f
cmpl %ebx, 4(%ebp)
jne 3f
movl %edx, %cr0 /* Restore previous %cr0 */
jmp 4f
3: /* Check failed - hlt the machine */
hlt
jmp 3b
4:
popl %ebp
popl %ebx
RET
SYM_FUNC_END(startup32_check_sev_cbit)
.code64
#include "../../kernel/sev_verify_cbit.S"
.data
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
SYM_DATA(sme_me_mask, .quad 0)
SYM_DATA(sev_status, .quad 0)
SYM_DATA(sev_check_data, .quad 0)
#endif
SYM_DATA_START_LOCAL(boot32_idt)
.rept 32
.quad 0
.endr
SYM_DATA_END(boot32_idt)
SYM_DATA_START_LOCAL(boot32_idt_desc)
.word . - boot32_idt - 1
.long 0
SYM_DATA_END(boot32_idt_desc)

View File

@@ -46,7 +46,7 @@ void *memmove(void *dest, const void *src, size_t n);
/*
* This is set up by the setup-routine at boot-time
*/
struct boot_params *boot_params;
struct boot_params *boot_params_ptr;
struct port_io_ops pio_ops;
@@ -132,8 +132,8 @@ void __putstr(const char *s)
if (lines == 0 || cols == 0)
return;
x = boot_params->screen_info.orig_x;
y = boot_params->screen_info.orig_y;
x = boot_params_ptr->screen_info.orig_x;
y = boot_params_ptr->screen_info.orig_y;
while ((c = *s++) != '\0') {
if (c == '\n') {
@@ -154,8 +154,8 @@ void __putstr(const char *s)
}
}
boot_params->screen_info.orig_x = x;
boot_params->screen_info.orig_y = y;
boot_params_ptr->screen_info.orig_x = x;
boot_params_ptr->screen_info.orig_y = y;
pos = (x + cols * y) * 2; /* Update cursor position */
outb(14, vidport);
@@ -277,7 +277,7 @@ static inline void handle_relocations(void *output, unsigned long output_len,
{ }
#endif
static void parse_elf(void *output)
static size_t parse_elf(void *output)
{
#ifdef CONFIG_X86_64
Elf64_Ehdr ehdr;
@@ -293,10 +293,8 @@ static void parse_elf(void *output)
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
ehdr.e_ident[EI_MAG3] != ELFMAG3) {
ehdr.e_ident[EI_MAG3] != ELFMAG3)
error("Kernel is not a valid ELF file");
return;
}
debug_putstr("Parsing ELF... ");
@@ -328,6 +326,35 @@ static void parse_elf(void *output)
}
free(phdrs);
return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
}
const unsigned long kernel_total_size = VO__end - VO__text;
static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
extern unsigned char input_data[];
extern unsigned int input_len, output_len;
unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
void (*error)(char *x))
{
unsigned long entry;
if (!free_mem_ptr) {
free_mem_ptr = (unsigned long)boot_heap;
free_mem_end_ptr = (unsigned long)boot_heap + sizeof(boot_heap);
}
if (__decompress(input_data, input_len, NULL, NULL, outbuf, output_len,
NULL, error) < 0)
return ULONG_MAX;
entry = parse_elf(outbuf);
handle_relocations(outbuf, output_len, virt_addr);
return entry;
}
/*
@@ -347,25 +374,22 @@ static void parse_elf(void *output)
* |-------uncompressed kernel image---------|
*
*/
asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,
unsigned long output_len)
asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
{
const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
memptr heap = (memptr)boot_heap;
unsigned long needed_size;
size_t entry_offset;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
boot_params_ptr = rmode;
/* Clear flags intended for solely in-kernel use. */
boot_params->hdr.loadflags &= ~KASLR_FLAG;
boot_params_ptr->hdr.loadflags &= ~KASLR_FLAG;
sanitize_boot_params(boot_params);
sanitize_boot_params(boot_params_ptr);
if (boot_params->screen_info.orig_video_mode == 7) {
if (boot_params_ptr->screen_info.orig_video_mode == 7) {
vidmem = (char *) 0xb0000;
vidport = 0x3b4;
} else {
@@ -373,8 +397,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
vidport = 0x3d4;
}
lines = boot_params->screen_info.orig_video_lines;
cols = boot_params->screen_info.orig_video_cols;
lines = boot_params_ptr->screen_info.orig_video_lines;
cols = boot_params_ptr->screen_info.orig_video_cols;
init_default_io_ops();
@@ -393,7 +417,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
* so that early debugging output from the RSDP parsing code can be
* collected.
*/
boot_params->acpi_rsdp_addr = get_rsdp_addr();
boot_params_ptr->acpi_rsdp_addr = get_rsdp_addr();
debug_putstr("early console in extract_kernel\n");
@@ -411,7 +435,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
* entries. This ensures the full mapped area is usable RAM
* and doesn't include any reserved areas.
*/
needed_size = max(output_len, kernel_total_size);
needed_size = max_t(unsigned long, output_len, kernel_total_size);
#ifdef CONFIG_X86_64
needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
#endif
@@ -442,7 +466,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#ifdef CONFIG_X86_64
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
if (virt_addr + needed_size > KERNEL_IMAGE_SIZE)
error("Destination virtual address is beyond the kernel mapping area");
#else
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
@@ -454,16 +478,17 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#endif
debug_putstr("\nDecompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, output_len,
NULL, error);
parse_elf(output);
handle_relocations(output, output_len, virt_addr);
debug_putstr("done.\nBooting the kernel.\n");
entry_offset = decompress_kernel(output, virt_addr, error);
debug_putstr("done.\nBooting the kernel (entry_offset: 0x");
debug_puthex(entry_offset);
debug_putstr(").\n");
/* Disable exception handling before booting the kernel */
cleanup_exception_handling();
return output;
return output + entry_offset;
}
void fortify_panic(const char *name)

View File

@@ -52,7 +52,6 @@ extern memptr free_mem_ptr;
extern memptr free_mem_end_ptr;
void *malloc(int size);
void free(void *where);
extern struct boot_params *boot_params;
void __putstr(const char *s);
void __puthex(unsigned long value);
#define error_putstr(__x) __putstr(__x)
@@ -170,9 +169,7 @@ static inline int count_immovable_mem_regions(void) { return 0; }
#endif
/* ident_map_64.c */
#ifdef CONFIG_X86_5LEVEL
extern unsigned int __pgtable_l5_enabled, pgdir_shift, ptrs_per_p4d;
#endif
extern void kernel_add_identity_map(unsigned long start, unsigned long end);
/* Used by PAGE_KERN* macros: */

View File

@@ -3,18 +3,16 @@
#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
#define TRAMPOLINE_32BIT_CODE_SIZE 0x80
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
#ifndef __ASSEMBLER__
extern unsigned long *trampoline_32bit;
extern void trampoline_32bit_src(void *return_ptr);
extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
extern const u16 trampoline_ljmp_imm_offset;
#endif /* __ASSEMBLER__ */
#endif /* BOOT_COMPRESSED_PAGETABLE_H */

View File

@@ -16,11 +16,6 @@ unsigned int __section(".data") pgdir_shift = 39;
unsigned int __section(".data") ptrs_per_p4d = 1;
#endif
struct paging_config {
unsigned long trampoline_start;
unsigned long l5_required;
};
/* Buffer to preserve trampoline memory */
static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
@@ -29,11 +24,10 @@ static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
* purposes.
*
* Avoid putting the pointer into .bss as it will be cleared between
* paging_prepare() and extract_kernel().
* configure_5level_paging() and extract_kernel().
*/
unsigned long *trampoline_32bit __section(".data");
extern struct boot_params *boot_params;
int cmdline_find_option_bool(const char *option);
static unsigned long find_trampoline_placement(void)
@@ -54,7 +48,7 @@ static unsigned long find_trampoline_placement(void)
*
* Only look for values in the legacy ROM for non-EFI system.
*/
signature = (char *)&boot_params->efi_info.efi_loader_signature;
signature = (char *)&boot_params_ptr->efi_info.efi_loader_signature;
if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
ebda_start = *(unsigned short *)0x40e << 4;
@@ -70,10 +64,10 @@ static unsigned long find_trampoline_placement(void)
bios_start = round_down(bios_start, PAGE_SIZE);
/* Find the first usable memory region under bios_start. */
for (i = boot_params->e820_entries - 1; i >= 0; i--) {
for (i = boot_params_ptr->e820_entries - 1; i >= 0; i--) {
unsigned long new = bios_start;
entry = &boot_params->e820_table[i];
entry = &boot_params_ptr->e820_table[i];
/* Skip all entries above bios_start. */
if (bios_start <= entry->addr)
@@ -106,12 +100,13 @@ static unsigned long find_trampoline_placement(void)
return bios_start - TRAMPOLINE_32BIT_SIZE;
}
struct paging_config paging_prepare(void *rmode)
asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
{
struct paging_config paging_config = {};
void (*toggle_la57)(void *cr3);
bool l5_required = false;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
boot_params = rmode;
boot_params_ptr = bp;
/*
* Check if LA57 is desired and supported.
@@ -129,12 +124,22 @@ struct paging_config paging_prepare(void *rmode)
!cmdline_find_option_bool("no5lvl") &&
native_cpuid_eax(0) >= 7 &&
(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) {
paging_config.l5_required = 1;
l5_required = true;
/* Initialize variables for 5-level paging */
__pgtable_l5_enabled = 1;
pgdir_shift = 48;
ptrs_per_p4d = 512;
}
paging_config.trampoline_start = find_trampoline_placement();
/*
* The trampoline will not be used if the paging mode is already set to
* the desired one.
*/
if (l5_required == !!(native_read_cr4() & X86_CR4_LA57))
return;
trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
trampoline_32bit = (unsigned long *)find_trampoline_placement();
/* Preserve trampoline memory */
memcpy(trampoline_save, trampoline_32bit, TRAMPOLINE_32BIT_SIZE);
@@ -143,32 +148,32 @@ struct paging_config paging_prepare(void *rmode)
memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE);
/* Copy trampoline code in place */
memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
toggle_la57 = memcpy(trampoline_32bit +
TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
&trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE);
/*
* Avoid the need for a stack in the 32-bit trampoline code, by using
* LJMP rather than LRET to return back to long mode. LJMP takes an
* immediate absolute address, which needs to be adjusted based on the
* placement of the trampoline.
*/
*(u32 *)((u8 *)toggle_la57 + trampoline_ljmp_imm_offset) +=
(unsigned long)toggle_la57;
/*
* The code below prepares page table in trampoline memory.
*
* The new page table will be used by trampoline code for switching
* from 4- to 5-level paging or vice versa.
*
* If switching is not required, the page table is unused: trampoline
* code wouldn't touch CR3.
*/
/*
* We are not going to use the page table in trampoline memory if we
* are already in the desired paging mode.
*/
if (paging_config.l5_required == !!(native_read_cr4() & X86_CR4_LA57))
goto out;
if (paging_config.l5_required) {
if (l5_required) {
/*
* For 4- to 5-level paging transition, set up current CR3 as
* the first and the only entry in a new top-level page table.
*/
trampoline_32bit[TRAMPOLINE_32BIT_PGTABLE_OFFSET] = __native_read_cr3() | _PAGE_TABLE_NOENC;
*trampoline_32bit = __native_read_cr3() | _PAGE_TABLE_NOENC;
} else {
unsigned long src;
@@ -181,38 +186,17 @@ struct paging_config paging_prepare(void *rmode)
* may be above 4G.
*/
src = *(unsigned long *)__native_read_cr3() & PAGE_MASK;
memcpy(trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long),
(void *)src, PAGE_SIZE);
memcpy(trampoline_32bit, (void *)src, PAGE_SIZE);
}
out:
return paging_config;
}
void cleanup_trampoline(void *pgtable)
{
void *trampoline_pgtable;
trampoline_pgtable = trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long);
toggle_la57(trampoline_32bit);
/*
* Move the top level page table out of trampoline memory,
* if it's there.
* Move the top level page table out of trampoline memory.
*/
if ((void *)__native_read_cr3() == trampoline_pgtable) {
memcpy(pgtable, trampoline_pgtable, PAGE_SIZE);
native_write_cr3((unsigned long)pgtable);
}
memcpy(pgtable, trampoline_32bit, PAGE_SIZE);
native_write_cr3((unsigned long)pgtable);
/* Restore trampoline memory */
memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE);
/* Initialize variables for 5-level paging */
#ifdef CONFIG_X86_5LEVEL
if (__read_cr4() & X86_CR4_LA57) {
__pgtable_l5_enabled = 1;
pgdir_shift = 48;
ptrs_per_p4d = 512;
}
#endif
}

View File

@@ -327,20 +327,25 @@ static void enforce_vmpl0(void)
*/
#define SNP_FEATURES_PRESENT (0)
u64 snp_get_unsupported_features(u64 status)
{
if (!(status & MSR_AMD64_SEV_SNP_ENABLED))
return 0;
return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
}
void snp_check_features(void)
{
u64 unsupported;
if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
return;
/*
* Terminate the boot if hypervisor has enabled any feature lacking
* guest side implementation. Pass on the unsupported features mask through
* EXIT_INFO_2 of the GHCB protocol so that those features can be reported
* as part of the guest boot failure.
*/
unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
unsupported = snp_get_unsupported_features(sev_status);
if (unsupported) {
if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
@@ -350,10 +355,45 @@ void snp_check_features(void)
}
}
void sev_enable(struct boot_params *bp)
/*
* sev_check_cpu_support - Check for SEV support in the CPU capabilities
*
* Returns < 0 if SEV is not supported, otherwise the position of the
* encryption bit in the page table descriptors.
*/
static int sev_check_cpu_support(void)
{
unsigned int eax, ebx, ecx, edx;
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return -ENODEV;
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
return -ENODEV;
return ebx & 0x3f;
}
void sev_enable(struct boot_params *bp)
{
struct msr m;
int bitpos;
bool snp;
/*
@@ -373,26 +413,7 @@ void sev_enable(struct boot_params *bp)
* which is good enough.
*/
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
/*
* Check for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
if (sev_check_cpu_support() < 0)
return;
/*
@@ -403,26 +424,8 @@ void sev_enable(struct boot_params *bp)
/* Now repeat the checks with the SNP CPUID table. */
/* Recheck the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
return;
/*
* Recheck for the SME/SEV feature:
* CPUID Fn8000_001F[EAX]
* - Bit 0 - Secure Memory Encryption support
* - Bit 1 - Secure Encrypted Virtualization support
* CPUID Fn8000_001F[EBX]
* - Bits 5:0 - Pagetable bit position used to indicate encryption
*/
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1))) {
bitpos = sev_check_cpu_support();
if (bitpos < 0) {
if (snp)
error("SEV-SNP support indicated by CC blob, but not CPUID.");
return;
@@ -454,7 +457,24 @@ void sev_enable(struct boot_params *bp)
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
sme_me_mask = BIT_ULL(ebx & 0x3f);
sme_me_mask = BIT_ULL(bitpos);
}
/*
* sev_get_status - Retrieve the SEV status mask
*
* Returns 0 if the CPU is not SEV capable, otherwise the value of the
* AMD64_SEV MSR.
*/
u64 sev_get_status(void)
{
struct msr m;
if (sev_check_cpu_support() < 0)
return 0;
boot_rdmsr(MSR_AMD64_SEV, &m);
return m.q;
}
/* Search for Confidential Computing blob in the EFI config table. */
@@ -545,7 +565,7 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
* accessed after switchover.
*/
if (sev_snp_enabled()) {
unsigned long cc_info_pa = boot_params->cc_blob_address;
unsigned long cc_info_pa = boot_params_ptr->cc_blob_address;
struct cc_blob_sev_info *cc_info;
kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));

View File

@@ -406,7 +406,7 @@ xloadflags:
# define XLF1 0
#endif
#ifdef CONFIG_EFI_STUB
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
# ifdef CONFIG_EFI_MIXED
# define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64)
# else

View File

@@ -290,6 +290,7 @@ static void efi_stub_entry_update(void)
{
unsigned long addr = efi32_stub_entry;
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
#ifdef CONFIG_X86_64
/* Yes, this is really how we defined it :( */
addr = efi64_stub_entry - 0x200;
@@ -298,6 +299,7 @@ static void efi_stub_entry_update(void)
#ifdef CONFIG_EFI_MIXED
if (efi32_stub_entry != addr)
die("32-bit and 64-bit EFI entry points do not match\n");
#endif
#endif
put_unaligned_le32(addr, &buf[0x264]);
}

View File

@@ -6,6 +6,9 @@
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/segment.h>
#include <asm/cache.h>
.pushsection .noinstr.text, "ax"
@@ -20,3 +23,23 @@ SYM_FUNC_END(entry_ibpb)
EXPORT_SYMBOL_GPL(entry_ibpb);
.popsection
/*
* Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be
* used late in exit-to-user path after page tables are switched.
*/
.pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel)
UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
.word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel);
/* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel);
.popsection

View File

@@ -912,6 +912,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
BUG_IF_WRONG_CR3 no_user_check=1
popfl
popl %eax
CLEAR_CPU_BUFFERS
/*
* Return back to the vDSO, which will pop ecx and edx.
@@ -981,6 +982,7 @@ restore_all_switch_stack:
/* Restore user state */
RESTORE_REGS pop=4 # skip orig_eax/error_code
CLEAR_CPU_BUFFERS
.Lirq_return:
/*
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
@@ -1173,6 +1175,7 @@ SYM_CODE_START(asm_exc_nmi)
/* Not on SYSENTER stack. */
call exc_nmi
CLEAR_CPU_BUFFERS
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:

View File

@@ -223,6 +223,7 @@ syscall_return_via_sysret:
SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
swapgs
CLEAR_CPU_BUFFERS
sysretq
SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
@@ -656,6 +657,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
/* Restore RDI. */
popq %rdi
swapgs
CLEAR_CPU_BUFFERS
jmp .Lnative_iret
@@ -767,6 +769,8 @@ native_irq_return_ldt:
*/
popq %rax /* Restore user RAX */
CLEAR_CPU_BUFFERS
/*
* RSP now points to an ordinary IRET frame, except that the page
* is read-only and RSP[31:16] are preloaded with the userspace
@@ -1493,6 +1497,12 @@ nmi_restore:
std
movq $0, 5*8(%rsp) /* clear "NMI executing" */
/*
* Skip CLEAR_CPU_BUFFERS here, since it only helps in rare cases like
* NMI in kernel after user state is restored. For an unprivileged user
* these conditions are hard to meet.
*/
/*
* iretq reads the "iret" frame and exits the NMI stack in a
* single instruction. We are returning to kernel mode, so this
@@ -1511,6 +1521,7 @@ SYM_CODE_START(ignore_sysret)
UNWIND_HINT_EMPTY
ENDBR
mov $-ENOSYS, %eax
CLEAR_CPU_BUFFERS
sysretl
SYM_CODE_END(ignore_sysret)
#endif

View File

@@ -272,6 +272,7 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_unsafe_stack, SYM_L_GLOBAL)
xorl %r9d, %r9d
xorl %r10d, %r10d
swapgs
CLEAR_CPU_BUFFERS
sysretl
SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR

View File

@@ -79,4 +79,14 @@
# define BOOT_STACK_SIZE 0x1000
#endif
#ifndef __ASSEMBLY__
extern unsigned int output_len;
extern const unsigned long kernel_total_size;
unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
void (*error)(char *x));
extern struct boot_params *boot_params_ptr;
#endif
#endif /* _ASM_X86_BOOT_H */

View File

@@ -304,7 +304,7 @@
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
#define X86_FEATURE_CLEAR_CPU_BUF (11*32+18) /* "" Clear CPU buffers using VERW */
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */

View File

@@ -88,6 +88,8 @@ static inline void efi_fpu_end(void)
}
#ifdef CONFIG_X86_32
#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
#define arch_efi_call_virt_setup() \
({ \
efi_fpu_begin(); \
@@ -101,8 +103,7 @@ static inline void efi_fpu_end(void)
})
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
extern asmlinkage u64 __efi_call(void *fp, ...);
@@ -214,6 +215,8 @@ efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
#ifdef CONFIG_EFI_MIXED
#define EFI_ALLOC_LIMIT (efi_is_64bit() ? ULONG_MAX : U32_MAX)
#define ARCH_HAS_EFISTUB_WRAPPERS
static inline bool efi_is_64bit(void)
@@ -325,6 +328,13 @@ static inline u32 efi64_convert_status(efi_status_t status)
#define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \
(__efi64_split(phys), __efi64_split(size), __efi64_split(flags))
/* Memory Attribute Protocol */
#define __efi64_argmap_set_memory_attributes(protocol, phys, size, flags) \
((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
#define __efi64_argmap_clear_memory_attributes(protocol, phys, size, flags) \
((protocol), __efi64_split(phys), __efi64_split(size), __efi64_split(flags))
/*
* The macros below handle the plumbing for the argument mapping. To add a
* mapping for a specific EFI method, simply define a macro

View File

@@ -91,7 +91,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
mds_user_clear_cpu_buffers();
amd_clear_divider();
}
#define arch_exit_to_user_mode arch_exit_to_user_mode

View File

@@ -194,6 +194,19 @@
#endif
.endm
/*
* Macro to execute VERW instruction that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
*
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro CLEAR_CPU_BUFFERS
ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
verw _ASM_RIP(mds_verw_sel)
.Lskip_verw_\@:
.endm
#else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \
@@ -368,13 +381,14 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
extern u16 mds_verw_sel;
#include <asm/segment.h>
/**
@@ -400,17 +414,6 @@ static __always_inline void mds_clear_cpu_buffers(void)
asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
}
/**
* mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
*
* Clear CPU buffers if the corresponding static key is enabled
*/
static __always_inline void mds_user_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers();
}
/**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
*

View File

@@ -157,6 +157,7 @@ static __always_inline void sev_es_nmi_complete(void)
__sev_es_nmi_complete();
}
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
extern void sev_enable(struct boot_params *bp);
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
{
@@ -202,12 +203,15 @@ void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __init __noreturn snp_abort(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
static inline void sev_es_nmi_complete(void) { }
static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
static inline void sev_enable(struct boot_params *bp) { }
static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
static inline void setup_ghcb(void) { }
@@ -225,6 +229,9 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
{
return -ENOTTY;
}
static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; }
#endif
#endif

View File

@@ -110,9 +110,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control MDS CPU buffer clear before returning to user space */
DEFINE_STATIC_KEY_FALSE(mds_user_clear);
EXPORT_SYMBOL_GPL(mds_user_clear);
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
@@ -251,7 +248,7 @@ static void __init mds_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
static_branch_enable(&mds_user_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
(mds_nosmt || cpu_mitigations_auto_nosmt()))
@@ -355,7 +352,7 @@ static void __init taa_select_mitigation(void)
* For guests that can't determine whether the correct microcode is
* present on host, enable the mitigation for UCODE_NEEDED as well.
*/
static_branch_enable(&mds_user_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
if (taa_nosmt || cpu_mitigations_auto_nosmt())
cpu_smt_disable(false);
@@ -423,7 +420,7 @@ static void __init mmio_select_mitigation(void)
*/
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
boot_cpu_has(X86_FEATURE_RTM)))
static_branch_enable(&mds_user_clear);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
static_branch_enable(&mmio_stale_data_clear);
@@ -483,12 +480,12 @@ static void __init md_clear_update_mitigation(void)
if (cpu_mitigations_off())
return;
if (!static_key_enabled(&mds_user_clear))
if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
goto out;
/*
* mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
* mitigation, if necessary.
* X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
* Stale Data mitigation, if necessary.
*/
if (mds_mitigation == MDS_MITIGATION_OFF &&
boot_cpu_has_bug(X86_BUG_MDS)) {

View File

@@ -216,6 +216,90 @@ int intel_cpu_collect_info(struct ucode_cpu_info *uci)
}
EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme_early(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -367,6 +451,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if (detect_extended_topology_early(c) < 0)
detect_ht_early(c);
/*
* Adjust the number of physical bits early because it affects the
* valid bits of the MTRR mask registers.
*/
if (cpu_has(c, X86_FEATURE_TME))
detect_tme_early(c);
}
static void bsp_init_intel(struct cpuinfo_x86 *c)
@@ -527,90 +618,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
#define MSR_IA32_TME_ACTIVATE 0x982
/* Helpers to access TME_ACTIVATE MSR */
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
/* Values for mktme_status (SW only construct) */
#define MKTME_ENABLED 0
#define MKTME_DISABLED 1
#define MKTME_UNINITIALIZED 2
static int mktme_status = MKTME_UNINITIALIZED;
static void detect_tme(struct cpuinfo_x86 *c)
{
u64 tme_activate, tme_policy, tme_crypto_algs;
int keyid_bits = 0, nr_keyids = 0;
static u64 tme_activate_cpu0 = 0;
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
if (mktme_status != MKTME_UNINITIALIZED) {
if (tme_activate != tme_activate_cpu0) {
/* Broken BIOS? */
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
pr_err_once("x86/tme: MKTME is not usable\n");
mktme_status = MKTME_DISABLED;
/* Proceed. We may need to exclude bits from x86_phys_bits. */
}
} else {
tme_activate_cpu0 = tme_activate;
}
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
pr_info_once("x86/tme: not enabled by BIOS\n");
mktme_status = MKTME_DISABLED;
return;
}
if (mktme_status != MKTME_UNINITIALIZED)
goto detect_keyid_bits;
pr_info("x86/tme: enabled by BIOS\n");
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
tme_crypto_algs);
mktme_status = MKTME_DISABLED;
}
detect_keyid_bits:
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
nr_keyids = (1UL << keyid_bits) - 1;
if (nr_keyids) {
pr_info_once("x86/mktme: enabled by BIOS\n");
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
} else {
pr_info_once("x86/mktme: disabled by BIOS\n");
}
if (mktme_status == MKTME_UNINITIALIZED) {
/* MKTME is usable */
mktme_status = MKTME_ENABLED;
}
/*
* KeyID bits effectively lower the number of physical address
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
*/
c->x86_phys_bits -= keyid_bits;
}
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@@ -747,9 +754,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_ia32_feat_ctl(c);
if (cpu_has(c, X86_FEATURE_TME))
detect_tme(c);
init_intel_misc_features(c);
split_lock_init();

View File

@@ -1017,10 +1017,12 @@ void __init e820__reserve_setup_data(void)
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
/*
* SETUP_EFI and SETUP_IMA are supplied by kexec and do not need
* to be reserved.
* SETUP_EFI, SETUP_IMA and SETUP_RNG_SEED are supplied by
* kexec and do not need to be reserved.
*/
if (data->type != SETUP_EFI && data->type != SETUP_IMA)
if (data->type != SETUP_EFI &&
data->type != SETUP_IMA &&
data->type != SETUP_RNG_SEED)
e820__range_update_kexec(pa_data,
sizeof(*data) + data->len,
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);

View File

@@ -522,9 +522,6 @@ nmi_restart:
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
if (user_mode(regs))
mds_user_clear_cpu_buffers();
}
#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)

View File

@@ -2,7 +2,10 @@
#ifndef __KVM_X86_VMX_RUN_FLAGS_H
#define __KVM_X86_VMX_RUN_FLAGS_H
#define VMX_RUN_VMRESUME (1 << 0)
#define VMX_RUN_SAVE_SPEC_CTRL (1 << 1)
#define VMX_RUN_VMRESUME_SHIFT 0
#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT 1
#define VMX_RUN_VMRESUME BIT(VMX_RUN_VMRESUME_SHIFT)
#define VMX_RUN_SAVE_SPEC_CTRL BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
#endif /* __KVM_X86_VMX_RUN_FLAGS_H */

View File

@@ -106,7 +106,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */
testb $VMX_RUN_VMRESUME, %bl
bt $VMX_RUN_VMRESUME_SHIFT, %bx
/* Load guest registers. Don't clobber flags. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -128,8 +128,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
/* Load guest RAX. This kills the @regs pointer! */
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
/* Check EFLAGS.ZF from 'testb' above */
jz .Lvmlaunch
/* Clobbers EFLAGS.ZF */
CLEAR_CPU_BUFFERS
/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
jnc .Lvmlaunch
/*
* After a successful VMRESUME/VMLAUNCH, control flow "magically"

View File

@@ -407,7 +407,8 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
vmx_fb_clear_ctrl_available;
/*
* If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
@@ -7120,11 +7121,14 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
{
guest_state_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */
/*
* L1D Flush includes CPU buffer clear to mitigate MDS, but VERW
* mitigation for MDS is done late in VMentry and is still
* executed in spite of L1D Flush. This is because an extra VERW
* should not matter much after the big hammer L1D Flush.
*/
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mds_user_clear))
mds_clear_cpu_buffers();
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();

View File

@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
bt_dev_dbg(hdev, "QCA Patch config");
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
@@ -594,27 +594,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Firmware files to download are based on ROM version.
* ROM version is derived from last two bytes of soc_ver.
*/
rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
if (soc_type == QCA_WCN3988)
rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f);
else
rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f);
if (soc_type == QCA_WCN6750)
qca_send_patch_config_cmd(hdev);
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
if (qca_is_wcn399x(soc_type)) {
switch (soc_type) {
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
snprintf(config.fwname, sizeof(config.fwname),
"qca/crbtfw%02x.tlv", rom_ver);
} else if (soc_type == QCA_QCA6390) {
break;
case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname),
"qca/apbtfw%02x.tlv", rom_ver);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
"qca/htbtfw%02x.tlv", rom_ver);
} else if (soc_type == QCA_WCN6750) {
break;
case QCA_WCN6750:
/* Choose mbn file by default.If mbn file is not found
* then choose tlv file
*/
config.type = ELF_TYPE_PATCH;
snprintf(config.fwname, sizeof(config.fwname),
"qca/msbtfw%02x.mbn", rom_ver);
} else {
break;
case QCA_WCN6855:
snprintf(config.fwname, sizeof(config.fwname),
"qca/hpbtfw%02x.tlv", rom_ver);
break;
case QCA_WCN7850:
snprintf(config.fwname, sizeof(config.fwname),
"qca/hmtbtfw%02x.tlv", rom_ver);
break;
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/rampatch_%08x.bin", soc_ver);
}
@@ -630,27 +651,48 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
if (firmware_name)
if (firmware_name) {
snprintf(config.fwname, sizeof(config.fwname),
"qca/%s", firmware_name);
else if (qca_is_wcn399x(soc_type)) {
if (ver.soc_id == QCA_WCN3991_SOC_ID) {
} else {
switch (soc_type) {
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02xu.bin", rom_ver);
} else {
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
}
break;
case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02xu.bin", rom_ver);
} else {
"qca/apnv%02x.bin", rom_ver);
break;
case QCA_QCA6390:
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
"qca/htnv%02x.bin", rom_ver);
break;
case QCA_WCN6750:
snprintf(config.fwname, sizeof(config.fwname),
"qca/msnv%02x.bin", rom_ver);
break;
case QCA_WCN6855:
snprintf(config.fwname, sizeof(config.fwname),
"qca/hpnv%02x.bin", rom_ver);
break;
case QCA_WCN7850:
snprintf(config.fwname, sizeof(config.fwname),
"qca/hmtnv%02x.bin", rom_ver);
break;
default:
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
}
}
else if (soc_type == QCA_QCA6390)
snprintf(config.fwname, sizeof(config.fwname),
"qca/htnv%02x.bin", rom_ver);
else if (soc_type == QCA_WCN6750)
snprintf(config.fwname, sizeof(config.fwname),
"qca/msnv%02x.bin", rom_ver);
else
snprintf(config.fwname, sizeof(config.fwname),
"qca/nvm_%08x.bin", soc_ver);
err = qca_download_firmware(hdev, &config, soc_type, rom_ver);
if (err < 0) {
@@ -658,16 +700,25 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
if (soc_type >= QCA_WCN3991) {
switch (soc_type) {
case QCA_WCN3991:
case QCA_QCA6390:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
err = qca_disable_soc_logging(hdev);
if (err < 0)
return err;
break;
default:
break;
}
/* WCN399x and WCN6750 supports the Microsoft vendor extension with 0xFD70 as the
* VsMsftOpCode.
*/
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
@@ -685,11 +736,18 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err;
}
if (soc_type == QCA_WCN3991 || soc_type == QCA_WCN6750) {
switch (soc_type) {
case QCA_WCN3991:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
/* get fw build info */
err = qca_read_fw_build_info(hdev);
if (err < 0)
return err;
break;
default:
break;
}
bt_dev_info(hdev, "QCA setup on UART is completed");

View File

@@ -142,11 +142,14 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
QCA_WCN3988,
QCA_WCN3990,
QCA_WCN3998,
QCA_WCN3991,
QCA_QCA6390,
QCA_WCN6750,
QCA_WCN6855,
QCA_WCN7850,
};
#if IS_ENABLED(CONFIG_BT_QCA)
@@ -159,16 +162,6 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
enum qca_btsoc_type);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3991 ||
soc_type == QCA_WCN3998;
}
static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
{
return soc_type == QCA_WCN6750;
}
#else
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
@@ -196,16 +189,6 @@ static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
{
return false;
}
static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type)
{
return false;
}
static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
{
return -EOPNOTSUPP;

View File

@@ -7,6 +7,7 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -606,9 +607,18 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qca_is_wcn399x(qcadev->btsoc_type) ||
qca_is_wcn6750(qcadev->btsoc_type))
switch (qcadev->btsoc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
hu->init_speed = qcadev->init_speed;
break;
default:
break;
}
if (qcadev->oper_speed)
hu->oper_speed = qcadev->oper_speed;
@@ -1314,11 +1324,20 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
/* Give the controller time to process the request */
if (qca_is_wcn399x(qca_soc_type(hu)) ||
qca_is_wcn6750(qca_soc_type(hu)))
switch (qca_soc_type(hu)) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
usleep_range(1000, 10000);
else
break;
default:
msleep(300);
}
return 0;
}
@@ -1391,12 +1410,20 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
if (qca_is_wcn399x(qca_soc_type(hu)) ||
qca_is_wcn6750(qca_soc_type(hu))) {
switch (qca_soc_type(hu)) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
} else {
break;
default:
if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
@@ -1425,13 +1452,29 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
/* Disable flow control for wcn3990 to deassert RTS while
* changing the baudrate of chip and host.
*/
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type))
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
hci_uart_set_flow_control(hu, true);
break;
if (soc_type == QCA_WCN3990) {
default:
break;
}
switch (soc_type) {
case QCA_WCN3990:
reinit_completion(&qca->drop_ev_comp);
set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
break;
default:
break;
}
qca_baudrate = qca_get_baudrate_value(speed);
@@ -1443,11 +1486,23 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
host_set_baudrate(hu, speed);
error:
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type))
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
hci_uart_set_flow_control(hu, false);
break;
if (soc_type == QCA_WCN3990) {
default:
break;
}
switch (soc_type) {
case QCA_WCN3990:
/* Wait for the controller to send the vendor event
* for the baudrate change command.
*/
@@ -1459,6 +1514,10 @@ error:
}
clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
break;
default:
break;
}
}
@@ -1620,12 +1679,20 @@ static int qca_regulator_init(struct hci_uart *hu)
}
}
if (qca_is_wcn399x(soc_type)) {
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
/* Forcefully enable wcn399x to enter in to boot mode. */
host_set_baudrate(hu, 2400);
ret = qca_send_power_pulse(hu, false);
if (ret)
return ret;
break;
default:
break;
}
/* For wcn6750 need to enable gpio bt_en */
@@ -1642,10 +1709,18 @@ static int qca_regulator_init(struct hci_uart *hu)
qca_set_speed(hu, QCA_INIT_SPEED);
if (qca_is_wcn399x(soc_type)) {
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
ret = qca_send_power_pulse(hu, true);
if (ret)
return ret;
break;
default:
break;
}
/* Now the device is in ready state to communicate with host.
@@ -1679,10 +1754,18 @@ static int qca_power_on(struct hci_dev *hdev)
if (!hu->serdev)
return 0;
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type)) {
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
ret = qca_regulator_init(hu);
} else {
break;
default:
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bt_en) {
gpiod_set_value_cansleep(qcadev->bt_en, 1);
@@ -1705,6 +1788,7 @@ static int qca_setup(struct hci_uart *hu)
const char *firmware_name = qca_get_firmware_name(hu);
int ret;
struct qca_btsoc_version ver;
const char *soc_name;
ret = qca_check_speeds(hu);
if (ret)
@@ -1719,9 +1803,30 @@ static int qca_setup(struct hci_uart *hu)
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
bt_dev_info(hdev, "setting up %s",
qca_is_wcn399x(soc_type) ? "wcn399x" :
(soc_type == QCA_WCN6750) ? "wcn6750" : "ROME/QCA6390");
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
soc_name = "wcn399x";
break;
case QCA_WCN6750:
soc_name = "wcn6750";
break;
case QCA_WCN6855:
soc_name = "wcn6855";
break;
case QCA_WCN7850:
soc_name = "wcn7850";
break;
default:
soc_name = "ROME/QCA6390";
}
bt_dev_info(hdev, "setting up %s", soc_name);
qca->memdump_state = QCA_MEMDUMP_IDLE;
@@ -1732,15 +1837,33 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
/* Set BDA quirk bit for reading BDA value from fwnode property
* only if that property exist in DT.
*/
if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
} else {
bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
}
hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
goto out;
} else {
break;
default:
qca_set_speed(hu, QCA_INIT_SPEED);
}
@@ -1754,8 +1877,17 @@ retry:
qca_baudrate = qca_get_baudrate_value(speed);
}
if (!(qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type))) {
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
break;
default:
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1824,7 +1956,18 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
static const struct qca_device_data qca_soc_data_wcn3990 = {
static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
.soc_type = QCA_WCN3988,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
{ "vddxo", 80000 },
{ "vddrf", 300000 },
{ "vddch0", 450000 },
},
.num_vregs = 4,
};
static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1835,7 +1978,7 @@ static const struct qca_device_data qca_soc_data_wcn3990 = {
.num_vregs = 4,
};
static const struct qca_device_data qca_soc_data_wcn3991 = {
static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = {
.soc_type = QCA_WCN3991,
.vregs = (struct qca_vreg []) {
{ "vddio", 15000 },
@@ -1847,7 +1990,7 @@ static const struct qca_device_data qca_soc_data_wcn3991 = {
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
static const struct qca_device_data qca_soc_data_wcn3998 = {
static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = {
.soc_type = QCA_WCN3998,
.vregs = (struct qca_vreg []) {
{ "vddio", 10000 },
@@ -1858,13 +2001,13 @@ static const struct qca_device_data qca_soc_data_wcn3998 = {
.num_vregs = 4,
};
static const struct qca_device_data qca_soc_data_qca6390 = {
static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = {
.soc_type = QCA_QCA6390,
.num_vregs = 0,
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
static const struct qca_device_data qca_soc_data_wcn6750 = {
static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = {
.soc_type = QCA_WCN6750,
.vregs = (struct qca_vreg []) {
{ "vddio", 5000 },
@@ -1881,6 +2024,34 @@ static const struct qca_device_data qca_soc_data_wcn6750 = {
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
static const struct qca_device_data qca_soc_data_wcn6855 = {
.soc_type = QCA_WCN6855,
.vregs = (struct qca_vreg []) {
{ "vddio", 5000 },
{ "vddbtcxmx", 126000 },
{ "vddrfacmn", 12500 },
{ "vddrfa0p8", 102000 },
{ "vddrfa1p7", 302000 },
{ "vddrfa1p2", 257000 },
},
.num_vregs = 6,
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = {
.soc_type = QCA_WCN7850,
.vregs = (struct qca_vreg []) {
{ "vddio", 5000 },
{ "vddaon", 26000 },
{ "vdddig", 126000 },
{ "vddrfa0p8", 102000 },
{ "vddrfa1p2", 257000 },
{ "vddrfa1p9", 302000 },
},
.num_vregs = 6,
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
};
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_serdev *qcadev;
@@ -1906,11 +2077,18 @@ static void qca_power_shutdown(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qca_is_wcn399x(soc_type)) {
switch (soc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
host_set_baudrate(hu, 2400);
qca_send_power_pulse(hu, false);
qca_regulator_disable(qcadev);
} else if (soc_type == QCA_WCN6750) {
break;
case QCA_WCN6750:
case QCA_WCN6855:
gpiod_set_value_cansleep(qcadev->bt_en, 0);
msleep(100);
qca_regulator_disable(qcadev);
@@ -1918,7 +2096,9 @@ static void qca_power_shutdown(struct hci_uart *hu)
sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl);
bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state);
}
} else if (qcadev->bt_en) {
break;
default:
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
@@ -2043,10 +2223,19 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (!qcadev->oper_speed)
BT_DBG("UART will pick default operating speed");
if (data &&
(qca_is_wcn399x(data->soc_type) ||
qca_is_wcn6750(data->soc_type))) {
if (data)
qcadev->btsoc_type = data->soc_type;
else
qcadev->btsoc_type = QCA_ROME;
switch (qcadev->btsoc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
@@ -2065,14 +2254,19 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) {
if (IS_ERR_OR_NULL(qcadev->bt_en) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
power_ctrl_enabled = false;
}
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750)
if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850))
dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
@@ -2086,12 +2280,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
BT_ERR("wcn3990 serdev registration failed");
return err;
}
} else {
if (data)
qcadev->btsoc_type = data->soc_type;
else
qcadev->btsoc_type = QCA_ROME;
break;
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR_OR_NULL(qcadev->bt_en)) {
@@ -2147,12 +2338,24 @@ static void qca_serdev_remove(struct serdev_device *serdev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
struct qca_power *power = qcadev->bt_power;
if ((qca_is_wcn399x(qcadev->btsoc_type) ||
qca_is_wcn6750(qcadev->btsoc_type)) &&
power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
switch (qcadev->btsoc_type) {
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
if (power->vregs_on) {
qca_power_shutdown(&qcadev->serdev_hu);
break;
}
fallthrough;
default:
if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
}
hci_uart_unregister_device(&qcadev->serdev_hu);
}
@@ -2329,10 +2532,13 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
{ .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750},
{ .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855},
{ .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);

View File

@@ -21,24 +21,24 @@
#define MISC_CLK_ENB 0x48
#define OSC_CTRL 0x50
#define OSC_CTRL_OSC_FREQ_MASK (3<<30)
#define OSC_CTRL_OSC_FREQ_13MHZ (0<<30)
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3<<30)
#define OSC_CTRL_MASK (0x3f2 | OSC_CTRL_OSC_FREQ_MASK)
#define OSC_CTRL_OSC_FREQ_MASK (3u<<30)
#define OSC_CTRL_OSC_FREQ_13MHZ (0u<<30)
#define OSC_CTRL_OSC_FREQ_19_2MHZ (1u<<30)
#define OSC_CTRL_OSC_FREQ_12MHZ (2u<<30)
#define OSC_CTRL_OSC_FREQ_26MHZ (3u<<30)
#define OSC_CTRL_MASK (0x3f2u | OSC_CTRL_OSC_FREQ_MASK)
#define OSC_CTRL_PLL_REF_DIV_MASK (3<<28)
#define OSC_CTRL_PLL_REF_DIV_1 (0<<28)
#define OSC_CTRL_PLL_REF_DIV_2 (1<<28)
#define OSC_CTRL_PLL_REF_DIV_4 (2<<28)
#define OSC_CTRL_PLL_REF_DIV_MASK (3u<<28)
#define OSC_CTRL_PLL_REF_DIV_1 (0u<<28)
#define OSC_CTRL_PLL_REF_DIV_2 (1u<<28)
#define OSC_CTRL_PLL_REF_DIV_4 (2u<<28)
#define OSC_FREQ_DET 0x58
#define OSC_FREQ_DET_TRIG (1<<31)
#define OSC_FREQ_DET_TRIG (1u<<31)
#define OSC_FREQ_DET_STATUS 0x5c
#define OSC_FREQ_DET_BUSY (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFF
#define OSC_FREQ_DET_BUSYu (1<<31)
#define OSC_FREQ_DET_CNT_MASK 0xFFFFu
#define TEGRA20_CLK_PERIPH_BANKS 3

View File

@@ -2952,6 +2952,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
if (min_pstate > cpu->max_perf_ratio)
min_pstate = cpu->max_perf_ratio;
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;

View File

@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -384,7 +385,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
FSL_QDMA_CMD_RWTTYPE_OFFSET);
FSL_QDMA_CMD_RWTTYPE_OFFSET) |
FSL_QDMA_CMD_PF;
sdf->data = QDMA_SDDF_CMD(cmd);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
@@ -1201,10 +1203,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1243,19 +1241,22 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
if (ret)
return ret;
ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
return 0;
}

View File

@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;

View File

@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);

View File

@@ -185,8 +185,27 @@ static const struct attribute_group efi_subsys_attr_group = {
static struct efivars generic_efivars;
static struct efivar_operations generic_ops;
static bool generic_ops_supported(void)
{
unsigned long name_size;
efi_status_t status;
efi_char16_t name;
efi_guid_t guid;
name_size = sizeof(name);
status = efi.get_next_variable(&name_size, &name, &guid);
if (status == EFI_UNSUPPORTED)
return false;
return true;
}
static int generic_ops_register(void)
{
if (!generic_ops_supported())
return 0;
generic_ops.get_variable = efi.get_variable;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
@@ -200,6 +219,9 @@ static int generic_ops_register(void)
static void generic_ops_unregister(void)
{
if (!generic_ops.get_variable)
return;
efivars_unregister(&generic_efivars);
}

View File

@@ -84,6 +84,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch-stub.o

View File

@@ -22,12 +22,15 @@
* Return: status code
*/
efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
unsigned long max, unsigned long align)
unsigned long max, unsigned long align,
int memory_type)
{
efi_physical_addr_t alloc_addr;
efi_status_t status;
int slack;
max = min(max, EFI_ALLOC_LIMIT);
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
@@ -36,7 +39,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
slack = align / EFI_PAGE_SIZE - 1;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,
EFI_LOADER_DATA, size / EFI_PAGE_SIZE + slack,
memory_type, size / EFI_PAGE_SIZE + slack,
&alloc_addr);
if (status != EFI_SUCCESS)
return status;

View File

@@ -180,7 +180,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
* locate the kernel at a randomized offset in physical memory.
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed);
reserve_addr, phys_seed,
EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
@@ -190,10 +191,11 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (status != EFI_SUCCESS) {
if (!check_image_region((u64)_text, kernel_memsize)) {
efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n");
} else if (IS_ALIGNED((u64)_text, min_kimg_align)) {
} else if (IS_ALIGNED((u64)_text, min_kimg_align) &&
(u64)_end < EFI_ALLOC_LIMIT) {
/*
* Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the alignment is suitable.
* UEFI PE/COFF loader if the placement is suitable.
*/
*image_addr = (u64)_text;
*reserve_size = 0;
@@ -201,7 +203,8 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
ULONG_MAX, min_kimg_align);
ULONG_MAX, min_kimg_align,
EFI_LOADER_CODE);
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");

View File

@@ -216,6 +216,8 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
} else if (!strcmp(param, "noinitrd")) {
efi_noinitrd = true;
} else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
efi_no5lvl = true;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
efi_novamap |= parse_option_str(val, "novamap");

View File

@@ -29,6 +29,11 @@
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
#ifndef EFI_ALLOC_LIMIT
#define EFI_ALLOC_LIMIT ULONG_MAX
#endif
extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
extern int efi_loglevel;
@@ -415,6 +420,26 @@ union efi_dxe_services_table {
} mixed_mode;
};
typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
union efi_memory_attribute_protocol {
struct {
efi_status_t (__efiapi *get_memory_attributes)(
efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
efi_status_t (__efiapi *set_memory_attributes)(
efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
efi_status_t (__efiapi *clear_memory_attributes)(
efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
};
struct {
u32 get_memory_attributes;
u32 set_memory_attributes;
u32 clear_memory_attributes;
} mixed_mode;
};
typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
union efi_uga_draw_protocol {
@@ -880,7 +905,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed);
unsigned long *addr, unsigned long random_seed,
int memory_type, unsigned long alloc_min,
unsigned long alloc_max);
efi_status_t efi_random_get_seed(void);
@@ -907,7 +934,8 @@ efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max);
efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
unsigned long max, unsigned long align);
unsigned long max, unsigned long align,
int memory_type);
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min);

View File

@@ -89,9 +89,12 @@ efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
efi_physical_addr_t alloc_addr;
efi_status_t status;
max = min(max, EFI_ALLOC_LIMIT);
if (EFI_ALLOC_ALIGN > EFI_PAGE_SIZE)
return efi_allocate_pages_aligned(size, addr, max,
EFI_ALLOC_ALIGN);
EFI_ALLOC_ALIGN,
EFI_LOADER_DATA);
alloc_addr = ALIGN_DOWN(max + 1, EFI_ALLOC_ALIGN) - 1;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_MAX_ADDRESS,

View File

@@ -16,7 +16,8 @@
*/
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
unsigned long align_shift)
unsigned long align_shift,
u64 alloc_min, u64 alloc_max)
{
unsigned long align = 1UL << align_shift;
u64 first_slot, last_slot, region_end;
@@ -29,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
return 0;
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
(u64)ULONG_MAX);
alloc_max);
if (region_end < size)
return 0;
first_slot = round_up(md->phys_addr, align);
first_slot = round_up(max(md->phys_addr, alloc_min), align);
last_slot = round_down(region_end - size + 1, align);
if (first_slot > last_slot)
@@ -53,7 +54,10 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
efi_status_t efi_random_alloc(unsigned long size,
unsigned long align,
unsigned long *addr,
unsigned long random_seed)
unsigned long random_seed,
int memory_type,
unsigned long alloc_min,
unsigned long alloc_max)
{
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
@@ -75,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
slots = get_entry_num_slots(md, size, ilog2(align));
slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
alloc_max);
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
@@ -118,7 +123,7 @@ efi_status_t efi_random_alloc(unsigned long size,
pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
EFI_LOADER_DATA, pages, &target);
memory_type, pages, &target);
if (status == EFI_SUCCESS)
*addr = target;
break;

View File

@@ -0,0 +1,95 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/efi.h>
#include <asm/boot.h>
#include <asm/desc.h>
#include <asm/efi.h>
#include "efistub.h"
#include "x86-stub.h"
bool efi_no5lvl;
static void (*la57_toggle)(void *cr3);
static const struct desc_struct gdt[] = {
[GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
[GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
};
/*
* Enabling (or disabling) 5 level paging is tricky, because it can only be
* done from 32-bit mode with paging disabled. This means not only that the
* code itself must be running from 32-bit addressable physical memory, but
* also that the root page table must be 32-bit addressable, as programming
* a 64-bit value into CR3 when running in 32-bit mode is not supported.
*/
efi_status_t efi_setup_5level_paging(void)
{
u8 tmpl_size = (u8 *)&trampoline_ljmp_imm_offset - (u8 *)&trampoline_32bit_src;
efi_status_t status;
u8 *la57_code;
if (!efi_is_64bit())
return EFI_SUCCESS;
/* check for 5 level paging support */
if (native_cpuid_eax(0) < 7 ||
!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
return EFI_SUCCESS;
/* allocate some 32-bit addressable memory for code and a page table */
status = efi_allocate_pages(2 * PAGE_SIZE, (unsigned long *)&la57_code,
U32_MAX);
if (status != EFI_SUCCESS)
return status;
la57_toggle = memcpy(la57_code, trampoline_32bit_src, tmpl_size);
memset(la57_code + tmpl_size, 0x90, PAGE_SIZE - tmpl_size);
/*
* To avoid the need to allocate a 32-bit addressable stack, the
* trampoline uses a LJMP instruction to switch back to long mode.
* LJMP takes an absolute destination address, which needs to be
* fixed up at runtime.
*/
*(u32 *)&la57_code[trampoline_ljmp_imm_offset] += (unsigned long)la57_code;
efi_adjust_memory_range_protection((unsigned long)la57_toggle, PAGE_SIZE);
return EFI_SUCCESS;
}
void efi_5level_switch(void)
{
bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
u64 *cr3 = (u64 *)__native_read_cr3();
u64 *new_cr3;
if (!la57_toggle || !need_toggle)
return;
if (!have_la57) {
/*
* 5 level paging will be enabled, so a root level page needs
* to be allocated from the 32-bit addressable physical region,
* with its first entry referring to the existing hierarchy.
*/
new_cr3 = memset(pgt, 0, PAGE_SIZE);
new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
} else {
/* take the new root table pointer from the current entry #0 */
new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
/* copy the new root table if it is not 32-bit addressable */
if ((u64)new_cr3 > U32_MAX)
new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
}
native_load_gdt(&(struct desc_ptr){ sizeof(gdt) - 1, (u64)gdt });
la57_toggle(new_cr3);
}

View File

@@ -15,16 +15,16 @@
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/boot.h>
#include <asm/kaslr.h>
#include <asm/sev.h>
#include "efistub.h"
/* Maximum physical address for 64-bit kernel with 4-level paging */
#define MAXMEM_X86_64_4LEVEL (1ull << 46)
#include "x86-stub.h"
const efi_system_table_t *efi_system_table;
const efi_dxe_services_table_t *efi_dxe_table;
extern u32 image_offset;
static efi_loaded_image_t *image = NULL;
static efi_memory_attribute_protocol_t *memattr;
static efi_status_t
preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
@@ -212,8 +212,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
static void
adjust_memory_range_protection(unsigned long start, unsigned long size)
efi_status_t efi_adjust_memory_range_protection(unsigned long start,
unsigned long size)
{
efi_status_t status;
efi_gcd_memory_space_desc_t desc;
@@ -221,12 +221,22 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unsigned long rounded_start, rounded_end;
unsigned long unprotect_start, unprotect_size;
if (efi_dxe_table == NULL)
return;
rounded_start = rounddown(start, EFI_PAGE_SIZE);
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
status = efi_call_proto(memattr, clear_memory_attributes,
rounded_start,
rounded_end - rounded_start,
EFI_MEMORY_XP);
if (status != EFI_SUCCESS)
efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
return status;
}
if (efi_dxe_table == NULL)
return EFI_SUCCESS;
/*
* Don't modify memory region attributes, they are
* already suitable, to lower the possibility to
@@ -238,7 +248,7 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
if (status != EFI_SUCCESS)
return;
break;
next = desc.base_address + desc.length;
@@ -263,69 +273,26 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unprotect_start,
unprotect_start + unprotect_size,
status);
break;
}
}
return EFI_SUCCESS;
}
/*
* Trampoline takes 2 pages and can be loaded in first megabyte of memory
* with its end placed between 128k and 640k where BIOS might start.
* (see arch/x86/boot/compressed/pgtable_64.c)
*
* We cannot find exact trampoline placement since memory map
* can be modified by UEFI, and it can alter the computed address.
*/
#define TRAMPOLINE_PLACEMENT_BASE ((128 - 8)*1024)
#define TRAMPOLINE_PLACEMENT_SIZE (640*1024 - (128 - 8)*1024)
void startup_32(struct boot_params *boot_params);
static void
setup_memory_protection(unsigned long image_base, unsigned long image_size)
static efi_char16_t *efistub_fw_vendor(void)
{
/*
* Allow execution of possible trampoline used
* for switching between 4- and 5-level page tables
* and relocated kernel image.
*/
unsigned long vendor = efi_table_attr(efi_system_table, fw_vendor);
adjust_memory_range_protection(TRAMPOLINE_PLACEMENT_BASE,
TRAMPOLINE_PLACEMENT_SIZE);
#ifdef CONFIG_64BIT
if (image_base != (unsigned long)startup_32)
adjust_memory_range_protection(image_base, image_size);
#else
/*
* Clear protection flags on a whole range of possible
* addresses used for KASLR. We don't need to do that
* on x86_64, since KASLR/extraction is performed after
* dedicated identity page tables are built and we only
* need to remove possible protection on relocated image
* itself disregarding further relocations.
*/
adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
#endif
return (efi_char16_t *)vendor;
}
static const efi_char16_t apple[] = L"Apple";
static void setup_quirks(struct boot_params *boot_params,
unsigned long image_base,
unsigned long image_size)
static void setup_quirks(struct boot_params *boot_params)
{
efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
efi_table_attr(efi_system_table, fw_vendor);
if (!memcmp(fw_vendor, apple, sizeof(apple))) {
if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
retrieve_apple_device_properties(boot_params);
}
if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES))
setup_memory_protection(image_base, image_size);
if (IS_ENABLED(CONFIG_APPLE_PROPERTIES) &&
!memcmp(efistub_fw_vendor(), apple, sizeof(apple)))
retrieve_apple_device_properties(boot_params);
}
/*
@@ -478,7 +445,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
}
image_base = efi_table_attr(image, image_base);
image_offset = (void *)startup_32 - image_base;
status = efi_allocate_pages(sizeof(struct boot_params),
(unsigned long *)&boot_params, ULONG_MAX);
@@ -760,85 +726,139 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
return EFI_SUCCESS;
}
/*
* On success, we return the address of startup_32, which has potentially been
* relocated by efi_relocate_kernel.
* On failure, we exit to the firmware via efi_exit instead of returning.
*/
asmlinkage unsigned long efi_main(efi_handle_t handle,
efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
static bool have_unsupported_snp_features(void)
{
unsigned long bzimage_addr = (unsigned long)startup_32;
unsigned long buffer_start, buffer_end;
u64 unsupported;
unsupported = snp_get_unsupported_features(sev_get_status());
if (unsupported) {
efi_err("Unsupported SEV-SNP features detected: 0x%llx\n",
unsupported);
return true;
}
return false;
}
static void efi_get_seed(void *seed, int size)
{
efi_get_random_bytes(size, seed);
/*
* This only updates seed[0] when running on 32-bit, but in that case,
* seed[1] is not used anyway, as there is no virtual KASLR on 32-bit.
*/
*(unsigned long *)seed ^= kaslr_get_random_long("EFI");
}
static void error(char *str)
{
efi_warn("Decompression failed: %s\n", str);
}
static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
unsigned long addr, alloc_size, entry;
efi_status_t status;
u32 seed[2] = {};
/* determine the required size of the allocation */
alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
MIN_KERNEL_ALIGN);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
static const efi_char16_t ami[] = L"American Megatrends";
efi_get_seed(seed, sizeof(seed));
virt_addr += (range * seed[1]) >> 32;
virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
/*
* Older Dell systems with AMI UEFI firmware v2.0 may hang
* while decompressing the kernel if physical address
* randomization is enabled.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=218173
*/
if (efi_system_table->hdr.revision <= EFI_2_00_SYSTEM_TABLE_REVISION &&
!memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
seed[0] = 0;
}
boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
}
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
seed[0], EFI_LOADER_CODE,
LOAD_PHYSICAL_ADDR,
EFI_X86_KERNEL_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
return status;
entry = decompress_kernel((void *)addr, virt_addr, error);
if (entry == ULONG_MAX) {
efi_free(alloc_size, addr);
return EFI_LOAD_ERROR;
}
*kernel_entry = addr + entry;
return efi_adjust_memory_range_protection(addr, kernel_total_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
struct boot_params *boot_params)
{
/* enter decompressed kernel with boot_params pointer in RSI/ESI */
asm("jmp *%0"::"r"(kernel_addr), "S"(boot_params));
unreachable();
}
/*
* On success, this routine will jump to the relocated image directly and never
* return. On failure, it will exit to the firmware via efi_exit() instead of
* returning.
*/
void __noreturn efi_stub_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
{
efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
struct setup_header *hdr = &boot_params->hdr;
const struct linux_efi_initrd *initrd = NULL;
unsigned long kernel_entry;
efi_status_t status;
boot_params_ptr = boot_params;
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
if (efi_dxe_table &&
efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
efi_warn("Ignoring DXE services table: invalid signature\n");
efi_dxe_table = NULL;
if (have_unsupported_snp_features())
efi_exit(handle, EFI_UNSUPPORTED);
if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) {
efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
if (efi_dxe_table &&
efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
efi_warn("Ignoring DXE services table: invalid signature\n");
efi_dxe_table = NULL;
}
}
/*
* If the kernel isn't already loaded at a suitable address,
* relocate it.
*
* It must be loaded above LOAD_PHYSICAL_ADDR.
*
* The maximum address for 64-bit is 1 << 46 for 4-level paging. This
* is defined as the macro MAXMEM, but unfortunately that is not a
* compile-time constant if 5-level paging is configured, so we instead
* define our own macro for use here.
*
* For 32-bit, the maximum address is complicated to figure out, for
* now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
* KASLR uses.
*
* Also relocate it if image_offset is zero, i.e. the kernel wasn't
* loaded by LoadImage, but rather by a bootloader that called the
* handover entry. The reason we must always relocate in this case is
* to handle the case of systemd-boot booting a unified kernel image,
* which is a PE executable that contains the bzImage and an initrd as
* COFF sections. The initrd section is placed after the bzImage
* without ensuring that there are at least init_size bytes available
* for the bzImage, and thus the compressed kernel's startup code may
* overwrite the initrd unless it is moved out of the way.
*/
/* grab the memory attributes protocol if it exists */
efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
buffer_start = ALIGN(bzimage_addr - image_offset,
hdr->kernel_alignment);
buffer_end = buffer_start + hdr->init_size;
if ((buffer_start < LOAD_PHYSICAL_ADDR) ||
(IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
(IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
(image_offset == 0)) {
extern char _bss[];
status = efi_relocate_kernel(&bzimage_addr,
(unsigned long)_bss - bzimage_addr,
hdr->init_size,
hdr->pref_address,
hdr->kernel_alignment,
LOAD_PHYSICAL_ADDR);
if (status != EFI_SUCCESS) {
efi_err("efi_relocate_kernel() failed!\n");
goto fail;
}
/*
* Now that we've copied the kernel elsewhere, we no longer
* have a set up block before startup_32(), so reset image_offset
* to zero in case it was set earlier.
*/
image_offset = 0;
status = efi_setup_5level_paging();
if (status != EFI_SUCCESS) {
efi_err("efi_setup_5level_paging() failed!\n");
goto fail;
}
#ifdef CONFIG_CMDLINE_BOOL
@@ -858,6 +878,12 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
}
}
status = efi_decompress_kernel(&kernel_entry);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
goto fail;
}
/*
* At this point, an initrd may already have been loaded by the
* bootloader and passed via bootparams. We permit an initrd loaded
@@ -897,7 +923,7 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
setup_efi_pci(boot_params);
setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start);
setup_quirks(boot_params);
status = exit_boot(boot_params, handle);
if (status != EFI_SUCCESS) {
@@ -905,9 +931,38 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
goto fail;
}
return bzimage_addr;
/*
* Call the SEV init code while still running with the firmware's
* GDT/IDT, so #VC exceptions will be handled by EFI.
*/
sev_enable(boot_params);
efi_5level_switch();
enter_kernel(kernel_entry, boot_params);
fail:
efi_err("efi_main() failed!\n");
efi_err("efi_stub_entry() failed!\n");
efi_exit(handle, status);
}
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
{
extern char _bss[], _ebss[];
memset(_bss, 0, _ebss - _bss);
efi_stub_entry(handle, sys_table_arg, boot_params);
}
#ifndef CONFIG_EFI_MIXED
extern __alias(efi_handover_entry)
void efi32_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params);
extern __alias(efi_handover_entry)
void efi64_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params);
#endif
#endif

View File

@@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/efi.h>
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
efi_status_t efi_adjust_memory_range_protection(unsigned long start,
unsigned long size);
#ifdef CONFIG_X86_64
efi_status_t efi_setup_5level_paging(void);
void efi_5level_switch(void);
#else
static inline efi_status_t efi_setup_5level_paging(void) { return EFI_SUCCESS; }
static inline void efi_5level_switch(void) {}
#endif

View File

@@ -66,19 +66,28 @@ int efivars_register(struct efivars *efivars,
const struct efivar_operations *ops,
struct kobject *kobject)
{
int rv;
if (down_interruptible(&efivars_lock))
return -EINTR;
if (__efivars) {
pr_warn("efivars already registered\n");
rv = -EBUSY;
goto out;
}
efivars->ops = ops;
efivars->kobject = kobject;
__efivars = efivars;
pr_info("Registered efivars operations\n");
rv = 0;
out:
up(&efivars_lock);
return 0;
return rv;
}
EXPORT_SYMBOL_GPL(efivars_register);

View File

@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;

View File

@@ -784,11 +784,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
goto err_remove_acpi_chip;
goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
goto err_remove_acpi_chip;
goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@@ -813,13 +813,13 @@ err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
err_remove_acpi_chip:
acpi_gpiochip_remove(gc);
err_remove_of_chip:
err_free_hogs:
gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */

View File

@@ -51,8 +51,12 @@ endif
endif
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
frame_warn_flag := -Wframe-larger-than=3072
else
frame_warn_flag := -Wframe-larger-than=2048
endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)

View File

@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
static int si_set_temperature_range(struct amdgpu_device *adev)
{
int ret;
ret = si_thermal_enable_alert(adev, false);
if (ret)
return ret;
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(adev, true);
if (ret)
return ret;
return ret;
}
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->pm.dpm_enabled)
return 0;
ret = si_set_temperature_range(adev);
if (ret)
return ret;
#if 0 //TODO ?
si_dpm_powergate_uvd(adev, true);
#endif
return 0;
}

View File

@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
if (block_start < start || block_end > end) {
u64 adjusted_start = max(block_start, start);
u64 adjusted_end = min(block_end, end);
if (round_down(adjusted_end + 1, req_size) <=
round_up(adjusted_start, req_size))
continue;
}
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*

View File

@@ -316,32 +316,34 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
goto exit_afbcd;
if (has_components) {
ret = component_bind_all(drm->dev, drm);
ret = component_bind_all(dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all components\n");
/* Do not try to unbind */
has_components = false;
goto exit_afbcd;
}
}
ret = meson_encoder_hdmi_init(priv);
if (ret)
goto unbind_all;
goto exit_afbcd;
ret = meson_plane_create(priv);
if (ret)
goto unbind_all;
goto exit_afbcd;
ret = meson_overlay_create(priv);
if (ret)
goto unbind_all;
goto exit_afbcd;
ret = meson_crtc_create(priv);
if (ret)
goto unbind_all;
goto exit_afbcd;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
goto unbind_all;
goto exit_afbcd;
drm_mode_config_reset(drm);
@@ -359,15 +361,18 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
uninstall_irq:
free_irq(priv->vsync_irq, drm);
unbind_all:
if (has_components)
component_unbind_all(drm->dev, drm);
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
free_drm:
drm_dev_put(drm);
meson_encoder_hdmi_remove(priv);
meson_encoder_cvbs_remove(priv);
if (has_components)
component_unbind_all(dev, drm);
return ret;
}

View File

@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}

View File

@@ -472,6 +472,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}

View File

@@ -1252,9 +1252,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
/*
* Only take over from a potential firmware framebuffer if any CRTCs
* have been registered. This must not be a fatal error because there
* are other accelerators that are exposed via this driver.
*
* Another case where this happens is on Tegra234 where the display
* hardware is no longer part of the host1x complex, so this driver
* will not expose any modesetting features.
*/
if (drm->mode_config.num_crtc > 0) {
err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
if (err < 0)
goto hub;
} else {
/*
* Indicate to userspace that this doesn't expose any display
* capabilities.
*/
drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
}
err = tegra_drm_fb_init(drm);
if (err < 0)

View File

@@ -16,7 +16,7 @@
#include <linux/tracepoint.h>
#include <rdma/ib_cm.h>
#include <trace/events/rdma.h>
#include <trace/misc/rdma.h>
/*
* enum ib_cm_state, from include/rdma/ib_cm.h

View File

@@ -3547,121 +3547,6 @@ err:
return ret;
}
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
struct sockaddr_storage zero_sock = {};
if (src_addr && src_addr->sa_family)
return rdma_bind_addr(id, src_addr);
/*
* When the src_addr is not specified, automatically supply an any addr
*/
zero_sock.ss_family = dst_addr->sa_family;
if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 =
(struct sockaddr_in6 *)&zero_sock;
struct sockaddr_in6 *dst_addr6 =
(struct sockaddr_in6 *)dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
id->route.addr.dev_addr.bound_dev_if =
dst_addr6->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *)&zero_sock)->sib_pkey =
((struct sockaddr_ib *)dst_addr)->sib_pkey;
}
return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
}
/*
* If required, resolve the source address for bind and leave the id_priv in
* state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
* calls made by ULP, a previously bound ID will not be re-bound and src_addr is
* ignored.
*/
static int resolve_prepare_src(struct rdma_id_private *id_priv,
struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
int ret;
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
/* For a well behaved ULP state will be RDMA_CM_IDLE */
ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
if (ret)
goto err_dst;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
RDMA_CM_ADDR_QUERY))) {
ret = -EINVAL;
goto err_dst;
}
}
if (cma_family(id_priv) != dst_addr->sa_family) {
ret = -EINVAL;
goto err_state;
}
return 0;
err_state:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
err_dst:
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
return ret;
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
if (ret)
return ret;
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
/*
* The FSM can return back to RDMA_CM_ADDR_BOUND after
* rdma_resolve_ip() is called, eg through the error
* path in addr_handler(). If this happens the existing
* request must be canceled before issuing a new one.
* Since canceling a request is a bit slow and this
* oddball path is rare, keep track once a request has
* been issued. The track turns out to be a permanent
* state since this is the only cancel as it is
* immediately before rdma_resolve_ip().
*/
if (id_priv->used_resolve_ip)
rdma_addr_cancel(&id->route.addr.dev_addr);
else
id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
false, id_priv);
}
}
if (ret)
goto err;
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
{
struct rdma_id_private *id_priv;
@@ -4064,27 +3949,26 @@ err:
}
EXPORT_SYMBOL(rdma_listen);
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
struct sockaddr *addr, const struct sockaddr *daddr)
{
struct rdma_id_private *id_priv;
struct sockaddr *id_daddr;
int ret;
struct sockaddr *daddr;
if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
addr->sa_family != AF_IB)
return -EAFNOSUPPORT;
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL;
ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
if (ret)
goto err1;
memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
if (ret)
goto err1;
@@ -4104,8 +3988,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
}
#endif
}
daddr = cma_dst_addr(id_priv);
daddr->sa_family = addr->sa_family;
id_daddr = cma_dst_addr(id_priv);
if (daddr != id_daddr)
memcpy(id_daddr, daddr, rdma_addr_size(addr));
id_daddr->sa_family = addr->sa_family;
ret = cma_get_port(id_priv);
if (ret)
@@ -4121,6 +4007,129 @@ err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
}
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
struct sockaddr_storage zero_sock = {};
if (src_addr && src_addr->sa_family)
return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
/*
* When the src_addr is not specified, automatically supply an any addr
*/
zero_sock.ss_family = dst_addr->sa_family;
if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 =
(struct sockaddr_in6 *)&zero_sock;
struct sockaddr_in6 *dst_addr6 =
(struct sockaddr_in6 *)dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
id->route.addr.dev_addr.bound_dev_if =
dst_addr6->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *)&zero_sock)->sib_pkey =
((struct sockaddr_ib *)dst_addr)->sib_pkey;
}
return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
}
/*
* If required, resolve the source address for bind and leave the id_priv in
* state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
* calls made by ULP, a previously bound ID will not be re-bound and src_addr is
* ignored.
*/
static int resolve_prepare_src(struct rdma_id_private *id_priv,
struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
int ret;
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
/* For a well behaved ULP state will be RDMA_CM_IDLE */
ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
if (ret)
return ret;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
RDMA_CM_ADDR_QUERY)))
return -EINVAL;
} else {
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
}
if (cma_family(id_priv) != dst_addr->sa_family) {
ret = -EINVAL;
goto err_state;
}
return 0;
err_state:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
return ret;
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
if (ret)
return ret;
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
/*
* The FSM can return back to RDMA_CM_ADDR_BOUND after
* rdma_resolve_ip() is called, eg through the error
* path in addr_handler(). If this happens the existing
* request must be canceled before issuing a new one.
* Since canceling a request is a bit slow and this
* oddball path is rare, keep track once a request has
* been issued. The track turns out to be a permanent
* state since this is the only cancel as it is
* immediately before rdma_resolve_ip().
*/
if (id_priv->used_resolve_ip)
rdma_addr_cancel(&id->route.addr.dev_addr);
else
id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
false, id_priv);
}
}
if (ret)
goto err;
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
}
EXPORT_SYMBOL(rdma_bind_addr);
static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)

View File

@@ -15,7 +15,7 @@
#define _TRACE_RDMA_CMA_H
#include <linux/tracepoint.h>
#include <trace/events/rdma.h>
#include <trace/misc/rdma.h>
DECLARE_EVENT_CLASS(cma_fsm_class,

View File

@@ -131,6 +131,11 @@ struct ib_umad_packet {
struct ib_user_mad mad;
};
struct ib_rmpp_mad_hdr {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
} __packed;
#define CREATE_TRACE_POINTS
#include <trace/events/ib_umad.h>
@@ -494,11 +499,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_umad_file *file = filp->private_data;
struct ib_rmpp_mad_hdr *rmpp_mad_hdr;
struct ib_umad_packet *packet;
struct ib_mad_agent *agent;
struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
struct ib_rmpp_mad *rmpp_mad;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
u8 base_version;
@@ -506,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
return -EINVAL;
packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
packet = kzalloc(sizeof(*packet) + IB_MGMT_RMPP_HDR, GFP_KERNEL);
if (!packet)
return -ENOMEM;
@@ -560,13 +565,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err_up;
}
rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
rmpp_mad_hdr = (struct ib_rmpp_mad_hdr *)packet->mad.data;
hdr_len = ib_get_mad_data_offset(rmpp_mad_hdr->mad_hdr.mgmt_class);
if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
if (ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
&& ib_mad_kernel_rmpp_agent(agent)) {
copy_offset = IB_MGMT_RMPP_HDR;
rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
rmpp_active = ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE;
} else {
copy_offset = IB_MGMT_MAD_HDR;
@@ -615,12 +620,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff));
rmpp_mad->mad_hdr.tid = *tid;
rmpp_mad_hdr->mad_hdr.tid = *tid;
}
if (!ib_mad_kernel_rmpp_agent(agent)
&& ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
&& ib_is_mad_class_rmpp(rmpp_mad_hdr->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&rmpp_mad_hdr->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
spin_lock_irq(&file->send_lock);
list_add_tail(&packet->list, &file->send_list);
spin_unlock_irq(&file->send_lock);

View File

@@ -564,6 +564,9 @@ struct xboxone_init_packet {
#define GIP_MOTOR_LT BIT(3)
#define GIP_MOTOR_ALL (GIP_MOTOR_R | GIP_MOTOR_L | GIP_MOTOR_RT | GIP_MOTOR_LT)
#define GIP_WIRED_INTF_DATA 0
#define GIP_WIRED_INTF_AUDIO 1
/*
* This packet is required for all Xbox One pads with 2015
* or later firmware installed (or present from the factory).
@@ -2008,7 +2011,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
}
if (xpad->xtype == XTYPE_XBOXONE &&
intf->cur_altsetting->desc.bInterfaceNumber != 0) {
intf->cur_altsetting->desc.bInterfaceNumber != GIP_WIRED_INTF_DATA) {
/*
* The Xbox One controller lists three interfaces all with the
* same interface class, subclass and protocol. Differentiate by

View File

@@ -29,7 +29,6 @@ static LIST_HEAD(icc_providers);
static int providers_count;
static bool synced_state;
static DEFINE_MUTEX(icc_lock);
static DEFINE_MUTEX(icc_bw_lock);
static struct dentry *icc_debugfs_dir;
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
@@ -636,7 +635,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
mutex_lock(&icc_bw_lock);
mutex_lock(&icc_lock);
old_avg = path->reqs[0].avg_bw;
old_peak = path->reqs[0].peak_bw;
@@ -668,7 +667,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
apply_constraints(path);
}
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
trace_icc_set_bw_end(path, ret);
@@ -971,7 +970,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
return;
mutex_lock(&icc_lock);
mutex_lock(&icc_bw_lock);
node->provider = provider;
list_add_tail(&node->node_list, &provider->nodes);
@@ -997,7 +995,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->avg_bw = 0;
node->peak_bw = 0;
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_add);
@@ -1137,7 +1134,6 @@ void icc_sync_state(struct device *dev)
return;
mutex_lock(&icc_lock);
mutex_lock(&icc_bw_lock);
synced_state = true;
list_for_each_entry(p, &icc_providers, provider_list) {
dev_dbg(p->dev, "interconnect provider is in synced state\n");
@@ -1150,21 +1146,13 @@ void icc_sync_state(struct device *dev)
}
}
}
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_sync_state);
static int __init icc_init(void)
{
struct device_node *root;
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&icc_bw_lock);
fs_reclaim_release(GFP_KERNEL);
root = of_find_node_by_path("/");
struct device_node *root = of_find_node_by_path("/");
providers_count = of_count_icc_providers(root);
of_node_put(root);

View File

@@ -152,6 +152,18 @@ static void queue_inc_cons(struct arm_smmu_ll_queue *q)
q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
}
static void queue_sync_cons_ovf(struct arm_smmu_queue *q)
{
struct arm_smmu_ll_queue *llq = &q->llq;
if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
return;
llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
Q_IDX(llq, llq->cons);
queue_sync_cons_out(q);
}
static int queue_sync_prod_in(struct arm_smmu_queue *q)
{
u32 prod;
@@ -1583,8 +1595,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
Q_IDX(llq, llq->cons);
queue_sync_cons_ovf(q);
return IRQ_HANDLED;
}
@@ -1642,9 +1653,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
} while (!queue_empty(llq));
/* Sync our overflow flag, as we believe we're up to speed */
llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
Q_IDX(llq, llq->cons);
queue_sync_cons_out(q);
queue_sync_cons_ovf(q);
return IRQ_HANDLED;
}

View File

@@ -267,12 +267,26 @@ static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
{
unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
unsigned int last_s2cr;
u32 reg;
u32 smr;
int i;
/*
* Some platforms support more than the Arm SMMU architected maximum of
* 128 stream matching groups. For unknown reasons, the additional
* groups don't exhibit the same behavior as the architected registers,
* so limit the groups to 128 until the behavior is fixed for the other
* groups.
*/
if (smmu->num_mapping_groups > 128) {
dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
smmu->num_mapping_groups = 128;
}
last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
/*
* With some firmware versions writes to S2CR of type FAULT are
* ignored, and writing BYPASS will end up written as FAULT in the

View File

@@ -152,13 +152,6 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
return &dom->domain;
}
static void sprd_iommu_domain_free(struct iommu_domain *domain)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
kfree(dom);
}
static void sprd_iommu_first_vpn(struct sprd_iommu_domain *dom)
{
struct sprd_iommu_device *sdev = dom->sdev;
@@ -231,6 +224,28 @@ static void sprd_iommu_hw_en(struct sprd_iommu_device *sdev, bool en)
sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
}
static void sprd_iommu_cleanup(struct sprd_iommu_domain *dom)
{
size_t pgt_size;
/* Nothing need to do if the domain hasn't been attached */
if (!dom->sdev)
return;
pgt_size = sprd_iommu_pgt_size(&dom->domain);
dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
dom->sdev = NULL;
sprd_iommu_hw_en(dom->sdev, false);
}
static void sprd_iommu_domain_free(struct iommu_domain *domain)
{
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
sprd_iommu_cleanup(dom);
kfree(dom);
}
static int sprd_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{

View File

@@ -1007,10 +1007,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;

View File

@@ -200,6 +200,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
host->dma_in_progress = true;
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
@@ -238,9 +240,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
static void sdmmc_idma_error(struct mmci_host *host)
{
struct mmc_data *data = host->data;
struct sdmmc_idma *idma = host->dma_priv;
if (!dma_inprogress(host))
return;
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
host->dma_in_progress = false;
data->host_cookie = 0;
if (!idma->use_bounce_buffer)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
if (!dma_inprogress(host))
return;
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -567,6 +590,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
.dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,

View File

@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
static int xenon_check_stability_internal_clk(struct sdhci_host *host)
{
u32 reg;
int err;
err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
if (err)
dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
return err;
}
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
int ret = xenon_check_stability_internal_clk(host);
if (ret)
return ret;
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
/* wait for host eMMC PHY init completes */
udelay(wait);
reg = sdhci_readl(host, phy_regs->timing_adj);
reg &= XENON_PHY_INITIALIZAION;
if (reg) {
/*
* AC5X spec says bit must be polled until zero.
* We see cases in which timeout can take longer
* than the standard calculation on AC5X, which is
* expected following the spec comment above.
* According to the spec, we must wait as long as
* it takes for that bit to toggle on AC5X.
* Cap that with 100 delay loops so we won't get
* stuck here forever:
*/
ret = read_poll_timeout(sdhci_readl, reg,
!(reg & XENON_PHY_INITIALIZAION),
wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
false, host, phy_regs->timing_adj);
if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
wait);
return -ETIMEDOUT;
}
wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
return 0;
return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1

View File

@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
&status2);
spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
&status2);
spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:

View File

@@ -84,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -189,6 +188,7 @@ source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"

View File

@@ -976,7 +976,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1022,6 +1022,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1041,7 +1042,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;

View File

@@ -52,8 +52,10 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
max_regions = max_tcam_regions;
tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
if (!tcam->used_regions)
return -ENOMEM;
if (!tcam->used_regions) {
err = -ENOMEM;
goto err_alloc_used_regions;
}
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
@@ -78,6 +80,8 @@ err_tcam_init:
bitmap_free(tcam->used_groups);
err_alloc_used_groups:
bitmap_free(tcam->used_regions);
err_alloc_used_regions:
mutex_destroy(&tcam->lock);
return err;
}
@@ -86,10 +90,10 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
mutex_destroy(&tcam->lock);
ops->fini(mlxsw_sp, tcam->priv);
bitmap_free(tcam->used_groups);
bitmap_free(tcam->used_regions);
mutex_destroy(&tcam->lock);
}
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,

View File

@@ -3900,8 +3900,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
if (priv->fpe_wq)
if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
priv->fpe_wq = NULL;
}
netdev_info(priv->dev, "FPE workqueue stop");
}

View File

@@ -1902,26 +1902,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
err = rtnl_link_register(&gtp_link_ops);
err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
err = register_pernet_subsys(&gtp_net_ops);
err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
goto unreg_rtnl_link;
goto unreg_pernet_subsys;
err = genl_register_family(&gtp_genl_family);
if (err < 0)
goto unreg_pernet_subsys;
goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
unreg_pernet_subsys:
unregister_pernet_subsys(&gtp_net_ops);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
unreg_pernet_subsys:
unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;

View File

@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);

Some files were not shown because too many files have changed in this diff Show More