Merge branch 'android14-5.15' into 'android14-5.15-lts'

This catches the -lts branch up with all of the recent changes that have
gone into the non-lts branch, INCLUDING the ABI update which we want
here to ensure that we do NOT break any newly added dependent symbols
(and to bring back in the reverts that were required before the ABI
break).

This includes the following commits:

1463dca508 ANDROID: 6/16/2023 KMI update
61d1582c93 UPSTREAM: tipc: check the bearer min mtu properly when setting it by netlink
5b20b206c4 UPSTREAM: tipc: do not update mtu if msg_max is too small in mtu negotiation
b288e3eb9a UPSTREAM: tipc: add tipc_bearer_min_mtu to calculate min mtu
63225f30d6 UPSTREAM: ASoC: fsl_micfil: Fix error handler with pm_runtime_enable
b64f71ac97 UPSTREAM: firmware: arm_sdei: Fix sleep from invalid context BUG
a45af5569a UPSTREAM: uapi/linux/const.h: prefer ISO-friendly __typeof__
18bae38a20 UPSTREAM: sched: Fix DEBUG && !SCHEDSTATS warn
8b4a04dce2 UPSTREAM: sched: Make struct sched_statistics independent of fair sched class
7f1bd76f41 UPSTREAM: platform: Provide a remove callback that returns no value
b529f9de5b ANDROID: GKI: reserve extra arm64 cpucaps for ABI preservation
2ab1955d56 ANDROID: KVM: arm64: Allow setting {P,U}XN in stage-2 PTEs
69e2ba2e16 ANDROID: KVM: arm64: Restrict host-to-hyp MMIO donations
3f060ac3de ANDROID: KVM: arm64: Allow state changes of MMIO pages
57574f0ae2 ANDROID: KVM: arm64: Allow MMIO perm changes from modules
951d15786a ANDROID: KVM: arm64: Don't allocate from handle_host_mem_abort
e609adf5cb ANDROID: KVM: arm64: Donate IOMMU regions to pKVM
1386a01618 ANDROID: KVM: arm64: Map MMIO donation as device at EL2
9debaf482d ANDROID: KVM: arm64: Don't recycle pages from host mem abort
aa4b272b34 ANDROID: KVM: arm64: Pin host stage-2 tables
97877e974b ANDROID: KVM: arm64: Move kvm_pte_follow() to header
76380240a2 ANDROID: KVM: arm64: Pre-populate host stage2
a2b45ad90a ANDROID: KVM: arm64: Fix the host ownership later
d522a07153 ANDROID: KVM: arm64: Don't recycle non-default PTEs
2bad47ce33 ANDROID: KVM: arm64: Introduce kvm_pgtable_stage2_reclaim_leaves
da5b14f0a1 ANDROID: KVM: arm64: Deprecate late pKVM module loading
2c641cfce1 BACKPORT: FROMGIT: usb: core: add sysfs entry for usb device state
61067bd1c2 BACKPORT: usb: xhci: plat: remove error log for failure to get usb-phy
4b219f7fc9 BACKPORT: usb: xhci: plat: Add USB 3.0 phy support
7b23f0d62a UPSTREAM: usb: dwc3: core: add support for realtek SoCs custom's global register start address
2c2c2503ed ANDROID: GKI: Enable CONFIG_RPMSG_CTRL
182ac7a9d9 UPSTREAM: mailbox: mailbox-test: fix a locking issue in mbox_test_message_write()
a6c1ea62c9 UPSTREAM: mailbox: mailbox-test: Fix potential double-free in mbox_test_message_write()
ad90aba4d6 UPSTREAM: net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize
267d3e1f3f ANDROID: set CONFIG_IKHEADERS=m for gki_defconfig.
c1d1130811 UPSTREAM: usb: gadget: uvc: queue empty isoc requests if no video buffer is available
634ea38c4e ANDROID: Update the ABI symbol list
719fc80624 ANDROID: fs: Add vendor hooks for ep_create_wakeup_source & timerfd_create
4742f48a5a BACKPORT: arm64: Enable KCSAN
d2d27f72cf ANDROID: block: Partially revert "Send requeued requests to the I/O scheduler"
cc244e96d7 Revert "ANDROID: block: Warn if a zoned write is about to be reordered"

Change-Id: Ifb116236a7ed04eaf472d088aa36a470eb6b138d
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-06-14 19:53:42 +00:00
55 changed files with 5247 additions and 4876 deletions

View File

@@ -255,6 +255,16 @@ Description:
which is marked with early_stop has failed to initialize, it will ignore
all future connections until this attribute is clear.
What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/state
Date: June 2023
Contact: Roy Luo <royluo@google.com>
Description:
Indicates current state of the USB device attached to the port.
Valid states are: 'not-attached', 'attached', 'powered',
'reconnecting', 'unauthenticated', 'default', 'addressed',
'configured', and 'suspended'. This file supports poll() to
monitor the state change from user space.
What: /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout
Date: May 2013
Contact: Mathias Nyman <mathias.nyman@linux.intel.com>

File diff suppressed because it is too large Load Diff

View File

@@ -2129,6 +2129,7 @@
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_early_resume_begin
__traceiter_android_vh_enable_thermal_genl_check
__traceiter_android_vh_ep_create_wakeup_source
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_meminfo_proc_show
__traceiter_android_vh_mm_compaction_begin
@@ -2141,6 +2142,7 @@
__traceiter_android_vh_si_meminfo
__traceiter_android_vh_sound_usb_support_cpu_suspend
__traceiter_android_vh_sysrq_crash
__traceiter_android_vh_timerfd_create
__traceiter_android_vh_typec_store_partner_src_caps
__traceiter_android_vh_typec_tcpci_override_toggling
__traceiter_android_vh_typec_tcpm_get_timer
@@ -2251,6 +2253,7 @@
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_early_resume_begin
__tracepoint_android_vh_enable_thermal_genl_check
__tracepoint_android_vh_ep_create_wakeup_source
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_mm_compaction_begin
@@ -2263,6 +2266,7 @@
__tracepoint_android_vh_si_meminfo
__tracepoint_android_vh_sound_usb_support_cpu_suspend
__tracepoint_android_vh_sysrq_crash
__tracepoint_android_vh_timerfd_create
__tracepoint_android_vh_typec_store_partner_src_caps
__tracepoint_android_vh_typec_tcpci_override_toggling
__tracepoint_android_vh_typec_tcpm_get_timer

View File

@@ -157,6 +157,8 @@ config ARM64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
# Some instrumentation may be unsound, hence EXPERT
select HAVE_ARCH_KCSAN if EXPERT
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS

View File

@@ -17,7 +17,7 @@ CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_IKHEADERS=m
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
@@ -569,6 +569,7 @@ CONFIG_MAILBOX=y
CONFIG_REMOTEPROC=y
CONFIG_REMOTEPROC_CDEV=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
CONFIG_QCOM_GENI_SE=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y

View File

@@ -63,17 +63,11 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
/*
* __pkvm_alloc_module_va may temporarily serve as the privileged hcall
* limit when module loading is enabled, see early_pkvm_enable_modules().
*/
__KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va,
__KVM_HOST_SMCCC_FUNC___pkvm_map_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_unmap_module_page,
__KVM_HOST_SMCCC_FUNC___pkvm_init_module,
__KVM_HOST_SMCCC_FUNC___pkvm_register_hcall,
__KVM_HOST_SMCCC_FUNC___pkvm_close_module_registration,
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
/* Hypercalls available after pKVM finalisation */

View File

@@ -72,7 +72,10 @@ typedef u64 kvm_pte_t;
#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN 1
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN 3
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_XN 2
#define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
static inline bool kvm_pte_valid(kvm_pte_t pte)
{
@@ -167,6 +170,11 @@ struct kvm_pgtable_mm_ops {
void (*icache_inval_pou)(void *addr, size_t size);
};
static inline kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
}
/**
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
* @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
@@ -184,7 +192,9 @@ enum kvm_pgtable_stage2_flags {
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
* @KVM_PGTABLE_PROT_NC: Normal non-cacheable attributes.
* @KVM_PGTABLE_PROT_NC: Normal non-cacheable attributes.
* @KVM_PGTABLE_PROT_PXN: Privileged execute-never.
* @KVM_PGTABLE_PROT_UXN: Unprivileged execute-never.
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
@@ -197,6 +207,8 @@ enum kvm_pgtable_prot {
KVM_PGTABLE_PROT_DEVICE = BIT(3),
KVM_PGTABLE_PROT_NC = BIT(4),
KVM_PGTABLE_PROT_PXN = BIT(5),
KVM_PGTABLE_PROT_UXN = BIT(6),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
@@ -490,6 +502,21 @@ int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
*/
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_pgtable_stage2_reclaim_leaves() - Attempt to reclaim leaf page-table
* pages by coalescing table entries into
* block mappings.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to reclaim leaves.
* @size: Size of the range.
*
* The offset of @addr within a page is ignored and @size is rounded-up to
* the next page boundary.
*
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
* without TLB invalidation.

View File

@@ -32,6 +32,7 @@ ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
$(CC_FLAGS_LTO)
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n

View File

@@ -93,8 +93,6 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
int __pkvm_iommu_pm_notify(unsigned long dev_id,
enum pkvm_iommu_pm_event event);
int __pkvm_iommu_finalize(int err);
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
phys_addr_t *end);
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
phys_addr_t fault_pa);
void pkvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,

View File

@@ -72,6 +72,8 @@ int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa);
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio);
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);

View File

@@ -11,13 +11,10 @@ int __pkvm_register_hyp_panic_notifier(void (*cb)(struct kvm_cpu_context *));
enum pkvm_psci_notification;
int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *));
int reset_pkvm_priv_hcall_limit(void);
#ifdef CONFIG_MODULES
int __pkvm_init_module(void *module_init);
int __pkvm_register_hcall(unsigned long hfn_hyp_va);
int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt);
int __pkvm_close_late_module_registration(void);
void __pkvm_close_module_registration(void);
#else
static inline int __pkvm_init_module(void *module_init) { return -EOPNOTSUPP; }
@@ -27,6 +24,5 @@ static inline int handle_host_dynamic_hcall(struct kvm_cpu_context *host_ctxt)
{
return HCALL_UNHANDLED;
}
static inline int __pkvm_close_late_module_registration(void) { return -EOPNOTSUPP; }
static inline void __pkvm_close_module_registration(void) { }
#endif

View File

@@ -18,3 +18,4 @@ $(obj)/hyp.lds: $(src)/hyp.lds.S FORCE
include $(srctree)/arch/arm64/kvm/hyp/nvhe/Makefile.nvhe
obj-y := kvm_nvhe.o
KCSAN_SANITIZE := n

View File

@@ -1212,12 +1212,6 @@ static void handle___pkvm_register_hcall(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_register_hcall(hfn_hyp_va);
}
static void
handle___pkvm_close_module_registration(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_close_late_module_registration();
}
static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, pack_hva, host_ctxt, 1);
@@ -1290,13 +1284,11 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__pkvm_alloc_module_va),
HANDLE_FUNC(__pkvm_map_module_page),
HANDLE_FUNC(__pkvm_unmap_module_page),
HANDLE_FUNC(__pkvm_init_module),
HANDLE_FUNC(__pkvm_register_hcall),
HANDLE_FUNC(__pkvm_close_module_registration),
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp),
@@ -1330,22 +1322,6 @@ static const hcall_t host_hcall[] = {
#endif
};
unsigned long pkvm_priv_hcall_limit __ro_after_init = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
int reset_pkvm_priv_hcall_limit(void)
{
unsigned long *addr;
if (pkvm_priv_hcall_limit == __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize)
return -EACCES;
addr = hyp_fixmap_map(__hyp_pa(&pkvm_priv_hcall_limit));
*addr = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
hyp_fixmap_unmap();
return 0;
}
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, id, host_ctxt, 0);
@@ -1365,7 +1341,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
* returns -EPERM after the first call for a given CPU.
*/
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = pkvm_priv_hcall_limit;
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
id -= KVM_HOST_SMCCC_ID(0);

View File

@@ -392,6 +392,7 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
.id = dev_id,
.ops = drv->ops,
.pa = dev_pa,
.va = hyp_phys_to_virt(dev_pa),
.size = dev_size,
.flags = flags,
};
@@ -421,22 +422,11 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
goto out_free;
}
/*
* Unmap the device's MMIO range from host stage-2. If registration
* is successful, future attempts to re-map will be blocked by
* pkvm_iommu_host_stage2_adjust_range.
*/
ret = host_stage2_unmap_reg_locked(dev_pa, dev_size);
ret = __pkvm_host_donate_hyp_locked(hyp_phys_to_pfn(dev_pa),
PAGE_ALIGN(dev_size) >> PAGE_SHIFT);
if (ret)
goto out_free;
/* Create EL2 mapping for the device. */
ret = __pkvm_create_private_mapping(dev_pa, dev_size,
PAGE_HYP_DEVICE, (unsigned long *)(&dev->va));
if (ret){
goto out_free;
}
/* Register device and prevent host from mapping the MMIO range. */
list_add_tail(&dev->list, &iommu_list);
if (dev->parent)
@@ -466,8 +456,6 @@ int __pkvm_iommu_finalize(int err)
ret = -EPERM;
hyp_spin_unlock(&iommu_registration_lock);
__pkvm_close_late_module_registration();
return ret;
}
@@ -497,39 +485,6 @@ int __pkvm_iommu_pm_notify(unsigned long dev_id, enum pkvm_iommu_pm_event event)
return ret;
}
/*
* Check host memory access against IOMMUs' MMIO regions.
* Returns -EPERM if the address is within the bounds of a registered device.
* Otherwise returns zero and adjusts boundaries of the new mapping to avoid
* MMIO regions of registered IOMMUs.
*/
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
phys_addr_t *end)
{
struct pkvm_iommu *dev;
phys_addr_t new_start = *start;
phys_addr_t new_end = *end;
phys_addr_t dev_start, dev_end;
assert_host_component_locked();
list_for_each_entry(dev, &iommu_list, list) {
dev_start = dev->pa;
dev_end = dev_start + dev->size;
if (addr < dev_start)
new_end = min(new_end, dev_start);
else if (addr >= dev_end)
new_start = max(new_start, dev_end);
else
return -EPERM;
}
*start = new_start;
*end = new_end;
return 0;
}
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
phys_addr_t pa)
{

View File

@@ -79,10 +79,35 @@ static void hyp_unlock_component(void)
hyp_spin_unlock(&pkvm_pgd_lock);
}
static void assert_host_can_alloc(void)
{
/* We can always get back to the host from guest context */
if (read_sysreg(vttbr_el2) != kvm_get_vttbr(&host_mmu.arch.mmu))
return;
/*
* An error code must be returned to EL1 to handle memory allocation
* failures cleanly. That's doable for explicit calls into higher
* ELs, but not so much for other EL2 entry reasons such as mem aborts.
* Thankfully we don't need memory allocation in these cases by
* construction, so let's enforce the invariant.
*/
switch (ESR_ELx_EC(read_sysreg(esr_el2))) {
case ESR_ELx_EC_HVC64:
case ESR_ELx_EC_SMC64:
break;
default:
WARN_ON(1);
}
}
static void *host_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
void *addr;
assert_host_can_alloc();
addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
hyp_split_page(hyp_virt_to_page(addr));
/*
@@ -97,6 +122,8 @@ static void *host_s2_zalloc_pages_exact(size_t size)
static void *host_s2_zalloc_page(void *pool)
{
assert_host_can_alloc();
return hyp_alloc_pages(pool, 0);
}
@@ -146,6 +173,27 @@ static void prepare_host_vtcr(void)
id_aa64mmfr1_el1_sys_val, phys_shift);
}
static int prepopulate_host_stage2(void)
{
struct memblock_region *reg;
u64 addr = 0;
int i, ret;
for (i = 0; i < hyp_memblock_nr; i++) {
reg = &hyp_memory[i];
ret = host_stage2_idmap_locked(addr, reg->base - addr, PKVM_HOST_MMIO_PROT, false);
if (ret)
return ret;
ret = host_stage2_idmap_locked(reg->base, reg->size, PKVM_HOST_MEM_PROT, false);
if (ret)
return ret;
addr = reg->base + reg->size;
}
return host_stage2_idmap_locked(addr, BIT(host_mmu.pgt.ia_bits) - addr, PKVM_HOST_MMIO_PROT,
false);
}
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
@@ -172,7 +220,7 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
mmu->pgt = &host_mmu.pgt;
atomic64_set(&mmu->vmid.id, 0);
return 0;
return prepopulate_host_stage2();
}
static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
@@ -398,7 +446,7 @@ int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size)
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, start, size);
ret = kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
if (ret)
return ret;
@@ -466,6 +514,11 @@ static enum kvm_pgtable_prot default_host_prot(bool is_memory)
return is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
}
static enum kvm_pgtable_prot default_hyp_prot(phys_addr_t phys)
{
return addr_is_memory(phys) ? PAGE_HYP : PAGE_HYP_DEVICE;
}
bool addr_is_memory(phys_addr_t phys)
{
struct kvm_mem_range range;
@@ -763,22 +816,15 @@ static int host_stage2_idmap(struct kvm_vcpu_fault_info *fault, u64 addr)
}
}
/*
* Adjust against IOMMU devices first. host_stage2_adjust_range() should
* be called last for proper alignment.
*/
if (!is_memory) {
ret = pkvm_iommu_host_stage2_adjust_range(addr, &range.start,
&range.end);
if (ret)
return ret;
}
ret = host_stage2_adjust_range(addr, &range, level);
if (ret)
return ret;
return host_stage2_idmap_locked(range.start, range.end - range.start, prot, false);
/*
* We're guaranteed not to require memory allocation by construction,
* no need to bother even trying to recycle pages.
*/
return __host_stage2_idmap(range.start, range.end, prot, false);
}
static void (*illegal_abt_notifier)(struct kvm_cpu_context *host_ctxt);
@@ -972,7 +1018,7 @@ static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
if (is_memory && hyp_phys_to_page(addr)->flags & MODULE_OWNED_PAGE)
return PKVM_MODULE_DONT_TOUCH;
if (!addr_is_allowed_memory(addr))
if (is_memory && !addr_is_allowed_memory(addr))
return PKVM_NOPAGE;
if (!kvm_pte_valid(pte) && pte)
@@ -1186,8 +1232,10 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
phys_addr_t phys = hyp_virt_to_phys((void *)addr);
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
if (perms != PAGE_HYP)
if (!addr_is_memory(phys) || perms != prot)
return -EPERM;
if (__hyp_ack_skip_pgtable_check(tx))
@@ -1242,8 +1290,10 @@ static int hyp_complete_donation(u64 addr,
const struct pkvm_mem_transition *tx)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
phys_addr_t phys = hyp_virt_to_phys(start);
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
prot = pkvm_mkstate(prot, PKVM_PAGE_OWNED);
return pkvm_create_mappings_locked(start, end, prot);
}
@@ -1280,7 +1330,7 @@ static int guest_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (perms != KVM_PGTABLE_PROT_RWX)
if (!addr_is_memory(tx->completer.guest.phys) || perms != KVM_PGTABLE_PROT_RWX)
return -EPERM;
return __guest_check_page_state_range(tx->completer.guest.hyp_vcpu,
@@ -1291,6 +1341,9 @@ static int guest_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (!addr_is_memory(tx->completer.guest.phys))
return -EPERM;
return __guest_check_page_state_range(tx->completer.guest.hyp_vcpu,
addr, size, PKVM_NOPAGE);
}
@@ -1776,7 +1829,7 @@ int __pkvm_host_share_hyp(u64 pfn)
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
.completer_prot = default_hyp_prot(host_addr),
};
host_lock_component();
@@ -1873,7 +1926,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
.completer_prot = default_hyp_prot(host_addr),
};
host_lock_component();
@@ -1888,6 +1941,27 @@ int __pkvm_host_unshare_hyp(u64 pfn)
}
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
return ___pkvm_host_donate_hyp(pfn, nr_pages, false);
}
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio)
{
phys_addr_t start = hyp_pfn_to_phys(pfn);
phys_addr_t end = start + (nr_pages << PAGE_SHIFT);
int ret;
if (!accept_mmio && !range_is_memory(start, end))
return -EPERM;
host_lock_component();
ret = __pkvm_host_donate_hyp_locked(pfn, nr_pages);
host_unlock_component();
return ret;
}
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages)
{
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
@@ -1908,13 +1982,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
},
};
host_lock_component();
hyp_assert_lock_held(&host_mmu.lock);
hyp_lock_component();
ret = do_donate(&donation);
hyp_unlock_component();
host_unlock_component();
return ret;
}
@@ -1964,15 +2037,19 @@ static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm
return ret;
}
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
KVM_PGTABLE_PROT_NC | \
KVM_PGTABLE_PROT_PXN | \
KVM_PGTABLE_PROT_UXN)
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
{
u64 addr = hyp_pfn_to_phys(pfn);
struct hyp_page *page;
struct hyp_page *page = NULL;
kvm_pte_t pte;
u32 level;
int ret;
if ((prot & KVM_PGTABLE_PROT_RWX) != prot || !addr_is_memory(addr))
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
return -EINVAL;
host_lock_component();
@@ -1980,6 +2057,14 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
if (ret)
goto unlock;
/*
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
* of module-owned MMIO regions hard, so we trust the modules not to
* mess things up.
*/
if (!addr_is_memory(addr))
goto update;
ret = -EPERM;
page = hyp_phys_to_page(addr);
@@ -1994,14 +2079,15 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
goto unlock;
}
if (prot == KVM_PGTABLE_PROT_RWX)
update:
if (prot == default_host_prot(!!page))
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_HOST);
else if (!prot)
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_PROTECTED);
else
ret = restrict_host_page_perms(addr, pte, level, prot);
if (ret)
if (ret || !page)
goto unlock;
if (prot != KVM_PGTABLE_PROT_RWX)

View File

@@ -77,13 +77,9 @@ void __pkvm_close_module_registration(void)
*/
}
int __pkvm_close_late_module_registration(void)
static int __pkvm_module_host_donate_hyp(u64 pfn, u64 nr_pages)
{
__pkvm_close_module_registration();
return reset_pkvm_priv_hcall_limit();
/* The fuse is blown! No way back until reset */
return ___pkvm_host_donate_hyp(pfn, nr_pages, true);
}
const struct pkvm_module_ops module_ops = {
@@ -108,7 +104,7 @@ const struct pkvm_module_ops module_ops = {
.register_illegal_abt_notifier = __pkvm_register_illegal_abt_notifier,
.register_psci_notifier = __pkvm_register_psci_notifier,
.register_hyp_panic_notifier = __pkvm_register_hyp_panic_notifier,
.host_donate_hyp = __pkvm_host_donate_hyp,
.host_donate_hyp = __pkvm_module_host_donate_hyp,
.hyp_donate_host = __pkvm_hyp_donate_host,
.host_share_hyp = __pkvm_host_share_hyp,
.host_unshare_hyp = __pkvm_host_unshare_hyp,

View File

@@ -277,6 +277,29 @@ static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,
return 0;
}
static int pin_table_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
struct kvm_pgtable_mm_ops *mm_ops = arg;
kvm_pte_t pte = *ptep;
if (kvm_pte_valid(pte))
mm_ops->get_page(kvm_pte_follow(pte, mm_ops));
return 0;
}
static int pin_host_tables(void)
{
struct kvm_pgtable_walker walker = {
.cb = pin_table_walker,
.flags = KVM_PGTABLE_WALK_TABLE_POST,
.arg = &host_mmu.mm_ops,
};
return kvm_pgtable_walk(&host_mmu.pgt, 0, BIT(host_mmu.pgt.ia_bits), &walker);
}
static int fix_host_ownership(void)
{
struct kvm_pgtable_walker walker = {
@@ -357,10 +380,6 @@ void __noreturn __pkvm_init_finalise(void)
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
ret = fix_host_ownership();
if (ret)
goto out;
ret = fix_hyp_pgtable_refcnt();
if (ret)
goto out;
@@ -369,10 +388,18 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
ret = fix_host_ownership();
if (ret)
goto out;
ret = unmap_protected_regions();
if (ret)
goto out;
ret = pin_host_tables();
if (ret)
goto out;
ret = hyp_ffa_init(ffa_proxy_pages);
if (ret)
goto out;

View File

@@ -76,11 +76,6 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
}
static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
}
static void kvm_clear_pte(kvm_pte_t *ptep)
{
WRITE_ONCE(*ptep, 0);
@@ -281,7 +276,8 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
kvm_pte_t attr;
u32 mtype;
if (!(prot & KVM_PGTABLE_PROT_R) || (device && nc))
if (!(prot & KVM_PGTABLE_PROT_R) || (device && nc) ||
(prot & (KVM_PGTABLE_PROT_PXN | KVM_PGTABLE_PROT_UXN)))
return -EINVAL;
if (device)
@@ -570,16 +566,15 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt)
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
kvm_pte_t *ptep)
kvm_pte_t *ptep)
{
u64 exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_XN;
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
bool nc = prot & KVM_PGTABLE_PROT_NC;
enum kvm_pgtable_prot exec_prot;
kvm_pte_t attr;
if (device && nc)
return -EINVAL;
if (device)
attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
else if (nc)
@@ -587,11 +582,23 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
else
attr = KVM_S2_MEMATTR(pgt, NORMAL);
if (!(prot & KVM_PGTABLE_PROT_X))
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
else if (device)
return -EINVAL;
exec_prot = prot & (KVM_PGTABLE_PROT_X | KVM_PGTABLE_PROT_PXN | KVM_PGTABLE_PROT_UXN);
switch(exec_prot) {
case KVM_PGTABLE_PROT_X:
goto set_ap;
case KVM_PGTABLE_PROT_PXN:
exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN;
break;
case KVM_PGTABLE_PROT_UXN:
exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN;
break;
default:
if (exec_prot)
return -EINVAL;
}
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, exec_type);
set_ap:
if (prot & KVM_PGTABLE_PROT_R)
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
@@ -617,8 +624,21 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
prot |= KVM_PGTABLE_PROT_R;
if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
prot |= KVM_PGTABLE_PROT_W;
if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
switch(FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte)) {
case 0:
prot |= KVM_PGTABLE_PROT_X;
break;
case KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN:
prot |= KVM_PGTABLE_PROT_PXN;
break;
case KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN:
prot |= KVM_PGTABLE_PROT_UXN;
break;
case KVM_PTE_LEAF_ATTR_HI_S2_XN_XN:
break;
default:
WARN_ON(1);
}
return prot;
}
@@ -660,7 +680,9 @@ static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
static bool stage2_pte_executable(kvm_pte_t pte)
{
return kvm_pte_valid(pte) && !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
kvm_pte_t xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte);
return kvm_pte_valid(pte) && xn != KVM_PTE_LEAF_ATTR_HI_S2_XN_XN;
}
static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
@@ -1017,6 +1039,30 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
static int stage2_reclaim_leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
stage2_coalesce_walk_table_post(addr, end, level, ptep, arg);
return 0;
}
int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
struct stage2_map_data map_data = {
.phys = KVM_PHYS_INVALID,
.mmu = pgt->mmu,
.mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_reclaim_leaf_walker,
.arg = &map_data,
.flags = KVM_PGTABLE_WALK_TABLE_POST,
};
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
struct stage2_attr_data {
kvm_pte_t attr_set;
kvm_pte_t attr_clr;
@@ -1135,7 +1181,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
u32 level;
kvm_pte_t set = 0, clr = 0;
if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
if (prot & !KVM_PGTABLE_PROT_RWX)
return -EINVAL;
if (prot & KVM_PGTABLE_PROT_R)

View File

@@ -585,26 +585,14 @@ int pkvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
#ifdef CONFIG_MODULES
static char early_pkvm_modules[COMMAND_LINE_SIZE] __initdata;
static int __init pkvm_enable_module_late_loading(void)
{
extern unsigned long kvm_nvhe_sym(pkvm_priv_hcall_limit);
WARN(1, "Loading pKVM modules with kvm-arm.protected_modules is deprecated\n"
"Use kvm-arm.protected_modules=<module1>,<module2>");
/*
* Move the limit to allow module loading HVCs. It will be moved back to
* its original position in __pkvm_close_module_registration().
*/
kvm_nvhe_sym(pkvm_priv_hcall_limit) = __KVM_HOST_SMCCC_FUNC___pkvm_alloc_module_va;
return 0;
}
static int __init early_pkvm_modules_cfg(char *arg)
{
/*
* Loading pKVM modules with kvm-arm.protected_modules is deprecated
* Use kvm-arm.protected_modules=<module1>,<module2>
*/
if (!arg)
return pkvm_enable_module_late_loading();
return -EINVAL;
strscpy(early_pkvm_modules, arg, COMMAND_LINE_SIZE);
@@ -807,7 +795,8 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
int ret, i, secs_first;
size_t offset, size;
if (!is_protected_kvm_enabled())
/* The pKVM hyp only allows loading before it is fully initialized */
if (!is_protected_kvm_enabled() || is_pkvm_initialized())
return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(secs_map); i++) {

View File

@@ -78,3 +78,23 @@ WORKAROUND_NVIDIA_CARMEL_CNP
WORKAROUND_QCOM_FALKOR_E1003
WORKAROUND_REPEAT_TLBI
WORKAROUND_SPECULATIVE_AT
ANDROID_KABI_RESERVE_01
ANDROID_KABI_RESERVE_02
ANDROID_KABI_RESERVE_03
ANDROID_KABI_RESERVE_04
ANDROID_KABI_RESERVE_05
ANDROID_KABI_RESERVE_06
ANDROID_KABI_RESERVE_07
ANDROID_KABI_RESERVE_08
ANDROID_KABI_RESERVE_09
ANDROID_KABI_RESERVE_10
ANDROID_KABI_RESERVE_11
ANDROID_KABI_RESERVE_12
ANDROID_KABI_RESERVE_13
ANDROID_KABI_RESERVE_14
ANDROID_KABI_RESERVE_15
ANDROID_KABI_RESERVE_16
ANDROID_KABI_RESERVE_17
ANDROID_KABI_RESERVE_18
ANDROID_KABI_RESERVE_19
ANDROID_KABI_RESERVE_20

View File

@@ -19,7 +19,7 @@ CONFIG_RCU_BOOST=y
CONFIG_RCU_NOCB_CPU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_IKHEADERS=y
CONFIG_IKHEADERS=m
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
@@ -517,6 +517,7 @@ CONFIG_DEBUG_KINFO=y
CONFIG_REMOTEPROC=y
CONFIG_REMOTEPROC_CDEV=y
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
CONFIG_PM_DEVFREQ_EVENT=y
CONFIG_IIO=y
CONFIG_IIO_BUFFER=y

View File

@@ -1406,31 +1406,14 @@ out:
/* For non-shared tags, the RESTART check will suffice */
bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
LIST_HEAD(for_sched);
struct request *next;
if (nr_budgets)
blk_mq_release_budgets(q, list);
if (q->elevator)
list_for_each_entry_safe(rq, next, list, queuelist)
if (!blk_mq_sched_bypass_insert(rq))
list_move_tail(&rq->queuelist,
&for_sched);
spin_lock(&hctx->lock);
list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
if (q->elevator && !list_empty(&for_sched)) {
if (q->elevator->type->ops.requeue_request)
list_for_each_entry(rq, &for_sched, queuelist)
q->elevator->type->ops.
requeue_request(rq);
q->elevator->type->ops.insert_requests(hctx, &for_sched,
/*at_head=*/true);
}
/*
* Order adding requests to hctx->dispatch and checking
* SCHED_RESTART flag. The pair of this smp_mb() is the one
@@ -1876,8 +1859,6 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
WARN_ON_ONCE(rq->q->elevator && blk_rq_is_seq_zoned_write(rq));
spin_lock(&hctx->lock);
if (at_head)
list_add(&rq->queuelist, &hctx->dispatch);

View File

@@ -1,6 +1,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
KMI_GENERATION=8
KMI_GENERATION=9
LLVM=1
DEPMOD=depmod

View File

@@ -28,6 +28,7 @@
#include <trace/hooks/gic_v3.h>
#include <trace/hooks/epoch.h>
#include <trace/hooks/cpufreq.h>
#include <trace/hooks/fs.h>
#include <trace/hooks/mm.h>
#include <trace/hooks/preemptirq.h>
#include <trace/hooks/ftrace_dump.h>
@@ -232,6 +233,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_report_bug);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_watchdog_timer_softlockup);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_freeze_todo);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_freeze_todo_unfrozen);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ep_create_wakeup_source);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timerfd_create);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);

View File

@@ -1427,7 +1427,9 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
if (drv->remove) {
if (drv->remove_new) {
drv->remove_new(dev);
} else if (drv->remove) {
int ret = drv->remove(dev);
if (ret)

View File

@@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
/* entry point from firmware to arch asm code */
static unsigned long sdei_entry_point;
static int sdei_hp_state;
struct sdei_event {
/* These three are protected by the sdei_list_lock */
struct list_head list;
@@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to mask CPU[%u]: %d\n",
@@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void)
static void _ipi_mask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_mask_local_cpu();
}
@@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
if (err && err != -EIO) {
pr_warn_once("failed to unmask CPU[%u]: %d\n",
@@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void)
static void _ipi_unmask_cpu(void *ignored)
{
WARN_ON_ONCE(preemptible());
sdei_unmask_local_cpu();
}
@@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored)
{
int err;
WARN_ON_ONCE(preemptible());
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
NULL);
if (err && err != -EIO)
@@ -389,8 +391,6 @@ static void _local_event_enable(void *data)
int err;
struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_enable(arg->event->event_num);
sdei_cross_call_return(arg, err);
@@ -479,8 +479,6 @@ static void _local_event_unregister(void *data)
int err;
struct sdei_crosscall_args *arg = data;
WARN_ON_ONCE(preemptible());
err = sdei_api_event_unregister(arg->event->event_num);
sdei_cross_call_return(arg, err);
@@ -561,8 +559,6 @@ static void _local_event_register(void *data)
struct sdei_registered_event *reg;
struct sdei_crosscall_args *arg = data;
WARN_ON(preemptible());
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
reg, 0, 0);
@@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
{
int rv;
WARN_ON_ONCE(preemptible());
switch (action) {
case CPU_PM_ENTER:
rv = sdei_mask_local_cpu();
@@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
int err;
/* unregister private events */
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
cpuhp_remove_state(sdei_entry_point);
err = sdei_unregister_shared();
if (err)
@@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev)
return err;
}
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err)
if (err < 0) {
pr_warn("Failed to re-register CPU hotplug notifier...\n");
return err;
}
return err;
sdei_hp_state = err;
return 0;
}
static int sdei_device_restore(struct device *dev)
@@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
* We are going to reset the interface, after this there is no point
* doing work when we take CPUs offline.
*/
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
cpuhp_remove_state(sdei_hp_state);
sdei_platform_reset();
@@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev)
goto remove_cpupm;
}
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
&sdei_cpuhp_up, &sdei_cpuhp_down);
if (err) {
if (err < 0) {
pr_warn("Failed to register CPU hotplug notifier...\n");
goto remove_reboot;
}
sdei_hp_state = err;
return 0;
remove_reboot:

View File

@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
@@ -38,6 +39,7 @@ struct mbox_test_device {
char *signal;
char *message;
spinlock_t lock;
struct mutex mutex;
wait_queue_head_t waitq;
struct fasync_struct *async_queue;
struct dentry *root_debugfs_dir;
@@ -95,6 +97,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
char *message;
void *data;
int ret;
@@ -110,10 +113,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
return -EINVAL;
}
tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->message)
message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!message)
return -ENOMEM;
mutex_lock(&tdev->mutex);
tdev->message = message;
ret = copy_from_user(tdev->message, userbuf, count);
if (ret) {
ret = -EFAULT;
@@ -144,6 +150,8 @@ out:
kfree(tdev->message);
tdev->signal = NULL;
mutex_unlock(&tdev->mutex);
return ret < 0 ? ret : count;
}
@@ -392,6 +400,7 @@ static int mbox_test_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
spin_lock_init(&tdev->lock);
mutex_init(&tdev->mutex);
if (tdev->rx_channel) {
tdev->rx_buffer = devm_kzalloc(&pdev->dev,

View File

@@ -2012,6 +2012,19 @@ bool usb_device_is_owned(struct usb_device *udev)
return !!hub->ports[udev->portnum - 1]->port_owner;
}
static void update_port_device_state(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_port *port_dev;
if (udev->parent) {
hub = usb_hub_to_struct_hub(udev->parent);
port_dev = hub->ports[udev->portnum - 1];
WRITE_ONCE(port_dev->state, udev->state);
sysfs_notify_dirent(port_dev->state_kn);
}
}
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
@@ -2024,6 +2037,7 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
udev->state = USB_STATE_NOTATTACHED;
update_port_device_state(udev);
}
/**
@@ -2080,6 +2094,7 @@ void usb_set_device_state(struct usb_device *udev,
udev->state != USB_STATE_SUSPENDED)
udev->active_duration += jiffies;
udev->state = new_state;
update_port_device_state(udev);
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);

View File

@@ -83,6 +83,8 @@ struct usb_hub {
* @peer: related usb2 and usb3 ports (share the same connector)
* @req: default pm qos request for hubs without port power control
* @connect_type: port's connect type
* @state: device state of the usb device attached to the port
* @state_kn: kernfs_node of the sysfs attribute that accesses @state
* @location: opaque representation of platform connector location
* @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
* @portnum: port index num based one
@@ -99,6 +101,8 @@ struct usb_port {
struct usb_port *peer;
struct dev_pm_qos_request *req;
enum usb_port_connect_type connect_type;
enum usb_device_state state;
struct kernfs_node *state_kn;
usb_port_location_t location;
struct mutex status_lock;
u32 over_current_count;

View File

@@ -76,6 +76,16 @@ static ssize_t connect_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(connect_type);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
enum usb_device_state state = READ_ONCE(port_dev->state);
return sysfs_emit(buf, "%s\n", usb_state_string(state));
}
static DEVICE_ATTR_RO(state);
static ssize_t over_current_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -175,6 +185,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
&dev_attr_state.attr,
&dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
@@ -594,12 +605,18 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return retval;
}
port_dev->state_kn = sysfs_get_dirent(port_dev->dev.kobj.sd, "state");
if (!port_dev->state_kn) {
dev_err(&port_dev->dev, "failed to sysfs_get_dirent 'state'\n");
retval = -ENODEV;
goto err_unregister;
}
/* Set default policy of port-poweroff disabled. */
retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
if (retval < 0) {
device_unregister(&port_dev->dev);
return retval;
goto err_put_kn;
}
find_and_link_peer(hub, port1);
@@ -636,6 +653,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
port_dev->req = NULL;
}
return 0;
err_put_kn:
sysfs_put(port_dev->state_kn);
err_unregister:
device_unregister(&port_dev->dev);
return retval;
}
void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
@@ -646,5 +670,6 @@ void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
peer = port_dev->peer;
if (peer)
unlink_peers(port_dev, peer);
sysfs_put(port_dev->state_kn);
device_unregister(&port_dev->dev);
}

View File

@@ -1639,6 +1639,17 @@ static int dwc3_probe(struct platform_device *pdev)
dwc_res = *res;
dwc_res.start += DWC3_GLOBALS_REGS_START;
if (dev->of_node) {
struct device_node *parent = of_get_parent(dev->of_node);
if (of_device_is_compatible(parent, "realtek,rtd-dwc3")) {
dwc_res.start -= DWC3_GLOBALS_REGS_START;
dwc_res.start += DWC3_RTK_RTD_GLOBALS_REGS_START;
}
of_node_put(parent);
}
regs = devm_ioremap_resource(dev, &dwc_res);
if (IS_ERR(regs))
return PTR_ERR(regs);

View File

@@ -85,6 +85,8 @@
#define DWC3_OTG_REGS_START 0xcc00
#define DWC3_OTG_REGS_END 0xccff
#define DWC3_RTK_RTD_GLOBALS_REGS_START 0x8100
/* Global Registers */
#define DWC3_GSBUSCFG0 0xc100
#define DWC3_GSBUSCFG1 0xc104

View File

@@ -386,6 +386,9 @@ static void uvcg_video_pump(struct work_struct *work)
struct uvc_buffer *buf;
unsigned long flags;
int ret;
bool buf_int;
/* video->max_payload_size is only set when using bulk transfer */
bool is_bulk = video->max_payload_size;
while (video->ep->enabled) {
/*
@@ -408,20 +411,35 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
if (buf == NULL) {
if (buf != NULL) {
video->encode(req, video, buf);
/* Always interrupt for the last request of a video buffer */
buf_int = buf->state == UVC_BUF_STATE_DONE;
} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
/*
* No video buffer available; the queue is still connected and
* we're traferring over ISOC. Queue a 0 length request to
* prevent missed ISOC transfers.
*/
req->length = 0;
buf_int = false;
} else {
/*
* Either queue has been disconnected or no video buffer
* available to bulk transfer. Either way, stop processing
* further.
*/
spin_unlock_irqrestore(&queue->irqlock, flags);
break;
}
video->encode(req, video, buf);
/*
* With usb3 we have more requests. This will decrease the
* interrupt load to a quarter but also catches the corner
* cases, which needs to be handled.
*/
if (list_empty(&video->req_free) ||
buf->state == UVC_BUF_STATE_DONE ||
if (list_empty(&video->req_free) || buf_int ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
@@ -441,8 +459,7 @@ static void uvcg_video_pump(struct work_struct *work)
/* Endpoint now owns the request */
req = NULL;
if (buf->state != UVC_BUF_STATE_DONE)
video->req_int_count++;
video->req_int_count++;
}
if (!req)
@@ -527,4 +544,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
return 0;
}

View File

@@ -339,6 +339,16 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_usb3_hcd;
}
xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 1);
if (IS_ERR(xhci->shared_hcd->usb_phy)) {
xhci->shared_hcd->usb_phy = NULL;
} else {
ret = usb_phy_init(xhci->shared_hcd->usb_phy);
if (ret)
dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
__func__, ret);
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;

View File

@@ -40,6 +40,8 @@
#include <linux/rculist.h>
#include <net/busy_poll.h>
#include <trace/hooks/fs.h>
/*
* LOCKING:
* There are three level of locking required by epoll :
@@ -1367,15 +1369,20 @@ static int ep_create_wakeup_source(struct epitem *epi)
{
struct name_snapshot n;
struct wakeup_source *ws;
char ws_name[64];
strscpy(ws_name, "eventpoll", sizeof(ws_name));
trace_android_vh_ep_create_wakeup_source(ws_name, sizeof(ws_name));
if (!epi->ep->ws) {
epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
epi->ep->ws = wakeup_source_register(NULL, ws_name);
if (!epi->ep->ws)
return -ENOMEM;
}
take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
ws = wakeup_source_register(NULL, n.name.name);
strscpy(ws_name, n.name.name, sizeof(ws_name));
trace_android_vh_ep_create_wakeup_source(ws_name, sizeof(ws_name));
ws = wakeup_source_register(NULL, ws_name);
release_dentry_name_snapshot(&n);
if (!ws)

View File

@@ -28,6 +28,8 @@
#include <linux/rcupdate.h>
#include <linux/time_namespace.h>
#include <trace/hooks/fs.h>
struct timerfd_ctx {
union {
struct hrtimer tmr;
@@ -407,6 +409,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
{
int ufd;
struct timerfd_ctx *ctx;
char file_name_buf[32];
/* Check the TFD_* constants for consistency. */
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
@@ -443,7 +446,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
ctx->moffs = ktime_mono_to_real(0);
ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
strscpy(file_name_buf, "[timerfd]", sizeof(file_name_buf));
trace_android_vh_timerfd_create(file_name_buf, sizeof(file_name_buf));
ufd = anon_inode_getfd(file_name_buf, &timerfd_fops, ctx,
O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
if (ufd < 0)
kfree(ctx);
@@ -451,7 +456,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
return ufd;
}
static int do_timerfd_settime(int ufd, int flags,
static int do_timerfd_settime(int ufd, int flags,
const struct itimerspec64 *new,
struct itimerspec64 *old)
{

View File

@@ -159,7 +159,6 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,

View File

@@ -209,7 +209,18 @@ extern void platform_device_put(struct platform_device *pdev);
struct platform_driver {
int (*probe)(struct platform_device *);
/*
* Traditionally the remove callback returned an int which however is
* ignored by the driver core. This led to wrong expectations by driver
* authors who thought returning an error code was a valid error
* handling strategy. To convert to a callback returning void, new
* drivers should implement .remove_new() until the conversion it done
* that eventually makes .remove() return void.
*/
int (*remove)(struct platform_device *);
void (*remove_new)(struct platform_device *);
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);

View File

@@ -526,7 +526,7 @@ struct sched_statistics {
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
#endif
};
} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
@@ -542,8 +542,6 @@ struct sched_entity {
u64 nr_migrations;
struct sched_statistics statistics;
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
@@ -817,6 +815,8 @@ struct task_struct {
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
struct sched_statistics stats;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
struct hlist_head preempt_notifiers;

23
include/trace/hooks/fs.h Normal file
View File

@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM fs
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_FS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_FS_H
#include <trace/hooks/vendor_hooks.h>
DECLARE_HOOK(android_vh_ep_create_wakeup_source,
TP_PROTO(char *name, int len),
TP_ARGS(name, len));
DECLARE_HOOK(android_vh_timerfd_create,
TP_PROTO(char *name, int len),
TP_ARGS(name, len));
#endif /* _TRACE_HOOK_FS_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -28,7 +28,7 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))

View File

@@ -8,6 +8,7 @@ CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
$(call cc-option,-mno-outline-atomics) \
-fno-stack-protector -DDISABLE_BRANCH_PROFILING
obj-y := core.o debugfs.o report.o

View File

@@ -3614,11 +3614,11 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
#ifdef CONFIG_SMP
if (cpu == rq->cpu) {
__schedstat_inc(rq->ttwu_local);
__schedstat_inc(p->se.statistics.nr_wakeups_local);
__schedstat_inc(p->stats.nr_wakeups_local);
} else {
struct sched_domain *sd;
__schedstat_inc(p->se.statistics.nr_wakeups_remote);
__schedstat_inc(p->stats.nr_wakeups_remote);
rcu_read_lock();
for_each_domain(rq->cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
@@ -3630,14 +3630,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
}
if (wake_flags & WF_MIGRATED)
__schedstat_inc(p->se.statistics.nr_wakeups_migrate);
__schedstat_inc(p->stats.nr_wakeups_migrate);
#endif /* CONFIG_SMP */
__schedstat_inc(rq->ttwu_count);
__schedstat_inc(p->se.statistics.nr_wakeups);
__schedstat_inc(p->stats.nr_wakeups);
if (wake_flags & WF_SYNC)
__schedstat_inc(p->se.statistics.nr_wakeups_sync);
__schedstat_inc(p->stats.nr_wakeups_sync);
}
/*
@@ -4363,7 +4363,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#ifdef CONFIG_SCHEDSTATS
/* Even if schedstat is disabled, there should not be garbage */
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
memset(&p->stats, 0, sizeof(p->stats));
#endif
RB_CLEAR_NODE(&p->dl.rb_node);
@@ -9890,9 +9890,9 @@ void normalize_rt_tasks(void)
continue;
p->se.exec_start = 0;
schedstat_set(p->se.statistics.wait_start, 0);
schedstat_set(p->se.statistics.sleep_start, 0);
schedstat_set(p->se.statistics.block_start, 0);
schedstat_set(p->stats.wait_start, 0);
schedstat_set(p->stats.sleep_start, 0);
schedstat_set(p->stats.block_start, 0);
if (!dl_task(p) && !rt_task(p)) {
/*
@@ -10787,11 +10787,14 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
if (schedstat_enabled() && tg != &root_task_group) {
struct sched_statistics *stats;
u64 ws = 0;
int i;
for_each_possible_cpu(i)
ws += schedstat_val(tg->se[i]->statistics.wait_sum);
for_each_possible_cpu(i) {
stats = __schedstats_from_se(tg->se[i]);
ws += schedstat_val(stats->wait_sum);
}
seq_printf(sf, "wait_sum %llu\n", ws);
}

View File

@@ -1266,8 +1266,8 @@ static void update_curr_dl(struct rq *rq)
return;
}
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
schedstat_set(curr->stats.exec_max,
max(curr->stats.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);

View File

@@ -450,9 +450,11 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
struct sched_entity *se = tg->se[cpu];
#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
#F, (long long)schedstat_val(stats->F))
#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", \
#F, SPLIT_NS((long long)schedstat_val(stats->F)))
if (!se)
return;
@@ -462,16 +464,19 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
PN(se->sum_exec_runtime);
if (schedstat_enabled()) {
PN_SCHEDSTAT(se->statistics.wait_start);
PN_SCHEDSTAT(se->statistics.sleep_start);
PN_SCHEDSTAT(se->statistics.block_start);
PN_SCHEDSTAT(se->statistics.sleep_max);
PN_SCHEDSTAT(se->statistics.block_max);
PN_SCHEDSTAT(se->statistics.exec_max);
PN_SCHEDSTAT(se->statistics.slice_max);
PN_SCHEDSTAT(se->statistics.wait_max);
PN_SCHEDSTAT(se->statistics.wait_sum);
P_SCHEDSTAT(se->statistics.wait_count);
struct sched_statistics *stats;
stats = __schedstats_from_se(se);
PN_SCHEDSTAT(wait_start);
PN_SCHEDSTAT(sleep_start);
PN_SCHEDSTAT(block_start);
PN_SCHEDSTAT(sleep_max);
PN_SCHEDSTAT(block_max);
PN_SCHEDSTAT(exec_max);
PN_SCHEDSTAT(slice_max);
PN_SCHEDSTAT(wait_max);
PN_SCHEDSTAT(wait_sum);
P_SCHEDSTAT(wait_count);
}
P(se->load.weight);
@@ -538,9 +543,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
p->prio);
SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
SPLIT_NS(p->se.sum_exec_runtime),
SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)));
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
@@ -946,8 +951,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
"---------------------------------------------------------"
"----------\n");
#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->stats.F))
#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
PN(se.exec_start);
PN(se.vruntime);
@@ -960,33 +965,33 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
if (schedstat_enabled()) {
u64 avg_atom, avg_per_cpu;
PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
PN_SCHEDSTAT(se.statistics.wait_start);
PN_SCHEDSTAT(se.statistics.sleep_start);
PN_SCHEDSTAT(se.statistics.block_start);
PN_SCHEDSTAT(se.statistics.sleep_max);
PN_SCHEDSTAT(se.statistics.block_max);
PN_SCHEDSTAT(se.statistics.exec_max);
PN_SCHEDSTAT(se.statistics.slice_max);
PN_SCHEDSTAT(se.statistics.wait_max);
PN_SCHEDSTAT(se.statistics.wait_sum);
P_SCHEDSTAT(se.statistics.wait_count);
PN_SCHEDSTAT(se.statistics.iowait_sum);
P_SCHEDSTAT(se.statistics.iowait_count);
P_SCHEDSTAT(se.statistics.nr_migrations_cold);
P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
P_SCHEDSTAT(se.statistics.nr_forced_migrations);
P_SCHEDSTAT(se.statistics.nr_wakeups);
P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
P_SCHEDSTAT(se.statistics.nr_wakeups_local);
P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
PN_SCHEDSTAT(sum_sleep_runtime);
PN_SCHEDSTAT(wait_start);
PN_SCHEDSTAT(sleep_start);
PN_SCHEDSTAT(block_start);
PN_SCHEDSTAT(sleep_max);
PN_SCHEDSTAT(block_max);
PN_SCHEDSTAT(exec_max);
PN_SCHEDSTAT(slice_max);
PN_SCHEDSTAT(wait_max);
PN_SCHEDSTAT(wait_sum);
P_SCHEDSTAT(wait_count);
PN_SCHEDSTAT(iowait_sum);
P_SCHEDSTAT(iowait_count);
P_SCHEDSTAT(nr_migrations_cold);
P_SCHEDSTAT(nr_failed_migrations_affine);
P_SCHEDSTAT(nr_failed_migrations_running);
P_SCHEDSTAT(nr_failed_migrations_hot);
P_SCHEDSTAT(nr_forced_migrations);
P_SCHEDSTAT(nr_wakeups);
P_SCHEDSTAT(nr_wakeups_sync);
P_SCHEDSTAT(nr_wakeups_migrate);
P_SCHEDSTAT(nr_wakeups_local);
P_SCHEDSTAT(nr_wakeups_remote);
P_SCHEDSTAT(nr_wakeups_affine);
P_SCHEDSTAT(nr_wakeups_affine_attempts);
P_SCHEDSTAT(nr_wakeups_passive);
P_SCHEDSTAT(nr_wakeups_idle);
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
@@ -1052,7 +1057,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
void proc_sched_set_task(struct task_struct *p)
{
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
memset(&p->stats, 0, sizeof(p->stats));
#endif
}

View File

@@ -846,8 +846,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->exec_start = now;
schedstat_set(curr->statistics.exec_max,
max(delta_exec, curr->statistics.exec_max));
if (schedstat_enabled()) {
struct sched_statistics *stats;
stats = __schedstats_from_se(curr);
__schedstat_set(stats->exec_max,
max(delta_exec, stats->exec_max));
}
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
@@ -875,39 +880,45 @@ static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
u64 wait_start, prev_wait_start;
struct sched_statistics *stats;
if (!schedstat_enabled())
return;
stats = __schedstats_from_se(se);
wait_start = rq_clock(rq_of(cfs_rq));
prev_wait_start = schedstat_val(se->statistics.wait_start);
prev_wait_start = schedstat_val(stats->wait_start);
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
likely(wait_start > prev_wait_start))
wait_start -= prev_wait_start;
__schedstat_set(se->statistics.wait_start, wait_start);
__schedstat_set(stats->wait_start, wait_start);
}
static inline void
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct task_struct *p;
struct sched_statistics *stats;
struct task_struct *p = NULL;
u64 delta;
if (!schedstat_enabled())
return;
stats = __schedstats_from_se(se);
/*
* When the sched_schedstat changes from 0 to 1, some sched se
* maybe already in the runqueue, the se->statistics.wait_start
* will be 0.So it will let the delta wrong. We need to avoid this
* scenario.
*/
if (unlikely(!schedstat_val(se->statistics.wait_start)))
if (unlikely(!schedstat_val(stats->wait_start)))
return;
delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(stats->wait_start);
if (entity_is_task(se)) {
p = task_of(se);
@@ -917,30 +928,33 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
* time stamp can be adjusted to accumulate wait time
* prior to migration.
*/
__schedstat_set(se->statistics.wait_start, delta);
__schedstat_set(stats->wait_start, delta);
return;
}
trace_sched_stat_wait(p, delta);
}
__schedstat_set(se->statistics.wait_max,
max(schedstat_val(se->statistics.wait_max), delta));
__schedstat_inc(se->statistics.wait_count);
__schedstat_add(se->statistics.wait_sum, delta);
__schedstat_set(se->statistics.wait_start, 0);
__schedstat_set(stats->wait_max,
max(schedstat_val(stats->wait_max), delta));
__schedstat_inc(stats->wait_count);
__schedstat_add(stats->wait_sum, delta);
__schedstat_set(stats->wait_start, 0);
}
static inline void
update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_statistics *stats;
struct task_struct *tsk = NULL;
u64 sleep_start, block_start;
if (!schedstat_enabled())
return;
sleep_start = schedstat_val(se->statistics.sleep_start);
block_start = schedstat_val(se->statistics.block_start);
stats = __schedstats_from_se(se);
sleep_start = schedstat_val(stats->sleep_start);
block_start = schedstat_val(stats->block_start);
if (entity_is_task(se))
tsk = task_of(se);
@@ -951,11 +965,11 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
__schedstat_set(se->statistics.sleep_max, delta);
if (unlikely(delta > schedstat_val(stats->sleep_max)))
__schedstat_set(stats->sleep_max, delta);
__schedstat_set(se->statistics.sleep_start, 0);
__schedstat_add(se->statistics.sum_sleep_runtime, delta);
__schedstat_set(stats->sleep_start, 0);
__schedstat_add(stats->sum_sleep_runtime, delta);
if (tsk) {
account_scheduler_latency(tsk, delta >> 10, 1);
@@ -968,16 +982,16 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
if ((s64)delta < 0)
delta = 0;
if (unlikely(delta > schedstat_val(se->statistics.block_max)))
__schedstat_set(se->statistics.block_max, delta);
if (unlikely(delta > schedstat_val(stats->block_max)))
__schedstat_set(stats->block_max, delta);
__schedstat_set(se->statistics.block_start, 0);
__schedstat_add(se->statistics.sum_sleep_runtime, delta);
__schedstat_set(stats->block_start, 0);
__schedstat_add(stats->sum_sleep_runtime, delta);
if (tsk) {
if (tsk->in_iowait) {
__schedstat_add(se->statistics.iowait_sum, delta);
__schedstat_inc(se->statistics.iowait_count);
__schedstat_add(stats->iowait_sum, delta);
__schedstat_inc(stats->iowait_count);
trace_sched_stat_iowait(tsk, delta);
}
@@ -1039,10 +1053,10 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/* XXX racy against TTWU */
state = READ_ONCE(tsk->__state);
if (state & TASK_INTERRUPTIBLE)
__schedstat_set(se->statistics.sleep_start,
__schedstat_set(tsk->stats.sleep_start,
rq_clock(rq_of(cfs_rq)));
if (state & TASK_UNINTERRUPTIBLE)
__schedstat_set(se->statistics.block_start,
__schedstat_set(tsk->stats.block_start,
rq_clock(rq_of(cfs_rq)));
}
}
@@ -4722,8 +4736,11 @@ void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
if (schedstat_enabled() &&
rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
__schedstat_set(se->statistics.slice_max,
max((u64)se->statistics.slice_max,
struct sched_statistics *stats;
stats = __schedstats_from_se(se);
__schedstat_set(stats->slice_max,
max((u64)stats->slice_max,
se->sum_exec_runtime - se->prev_sum_exec_runtime));
}
@@ -6237,12 +6254,12 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
schedstat_inc(p->stats.nr_wakeups_affine_attempts);
if (target != this_cpu)
return prev_cpu;
schedstat_inc(sd->ttwu_move_affine);
schedstat_inc(p->se.statistics.nr_wakeups_affine);
schedstat_inc(p->stats.nr_wakeups_affine);
return target;
}
@@ -8164,7 +8181,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
schedstat_inc(p->stats.nr_failed_migrations_affine);
env->flags |= LBF_SOME_PINNED;
@@ -8198,7 +8215,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
env->flags &= ~LBF_ALL_PINNED;
if (task_running(env->src_rq, p)) {
schedstat_inc(p->se.statistics.nr_failed_migrations_running);
schedstat_inc(p->stats.nr_failed_migrations_running);
return 0;
}
@@ -8220,12 +8237,12 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
if (tsk_cache_hot == 1) {
schedstat_inc(env->sd->lb_hot_gained[env->idle]);
schedstat_inc(p->se.statistics.nr_forced_migrations);
schedstat_inc(p->stats.nr_forced_migrations);
}
return 1;
}
schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
schedstat_inc(p->stats.nr_failed_migrations_hot);
return 0;
}
@@ -11953,7 +11970,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
if (!cfs_rq)
goto err;
se = kzalloc_node(sizeof(struct sched_entity),
se = kzalloc_node(sizeof(struct sched_entity_stats),
GFP_KERNEL, cpu_to_node(i));
if (!se)
goto err_free_rq;

View File

@@ -1030,8 +1030,8 @@ static void update_curr_rt(struct rq *rq)
if (unlikely((s64)delta_exec <= 0))
return;
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
schedstat_set(curr->stats.exec_max,
max(curr->stats.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);

View File

@@ -41,6 +41,7 @@ rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
#else /* !CONFIG_SCHEDSTATS: */
static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
@@ -53,8 +54,26 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
# define schedstat_set(var, val) do { } while (0)
# define schedstat_val(var) 0
# define schedstat_val_or_zero(var) 0
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_FAIR_GROUP_SCHED
struct sched_entity_stats {
struct sched_entity se;
struct sched_statistics stats;
} __no_randomize_layout;
#endif
static inline struct sched_statistics *
__schedstats_from_se(struct sched_entity *se)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
if (!entity_is_task(se))
return &container_of(se, struct sched_entity_stats, se)->stats;
#endif
return &task_of(se)->stats;
}
#ifdef CONFIG_PSI
/*
* PSI tracks state that persists across sleeps, such as iowaits and

View File

@@ -78,8 +78,8 @@ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
if (unlikely((s64)delta_exec < 0))
delta_exec = 0;
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
schedstat_set(curr->stats.exec_max,
max(curr->stats.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);

View File

@@ -37,6 +37,7 @@ COMMON_GKI_MODULES_LIST = [
"drivers/usb/class/cdc-acm.ko",
"drivers/usb/serial/ftdi_sio.ko",
"drivers/usb/serial/usbserial.ko",
"kernel/kheaders.ko",
"lib/crypto/libarc4.ko",
"mm/zsmalloc.ko",
"net/6lowpan/6lowpan.ko",

View File

@@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
return mtu;
}
int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
{
int mtu = TIPC_MIN_BEARER_MTU;
struct tipc_bearer *b;
rcu_read_lock();
b = bearer_get(net, bearer_id);
if (b)
mtu += b->encap_hlen;
rcu_read_unlock();
return mtu;
}
/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
*/
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
@@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (tipc_udp_mtu_bad(nla_get_u32
(props[TIPC_NLA_PROP_MTU]))) {
if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
b->encap_hlen + TIPC_MIN_BEARER_MTU) {
NL_SET_ERR_MSG(info->extack,
"MTU value is out-of-range");
return -EINVAL;

View File

@@ -146,6 +146,7 @@ struct tipc_media {
* @identity: array index of this bearer within TIPC bearer array
* @disc: ptr to link setup request
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @encap_hlen: encap headers length
* @up: bearer up flag (bit 0)
* @refcnt: tipc_bearer reference counter
*
@@ -170,6 +171,7 @@ struct tipc_bearer {
u32 identity;
struct tipc_discoverer *disc;
char net_plane;
u16 encap_hlen;
unsigned long up;
refcount_t refcnt;
};
@@ -232,6 +234,7 @@ int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
int tipc_bearer_mtu(struct net *net, u32 bearer_id);
int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct sk_buff *skb,

View File

@@ -2199,7 +2199,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_gap_ack_blks *ga = NULL;
bool reply = msg_probe(hdr), retransmitted = false;
u32 dlen = msg_data_sz(hdr), glen = 0;
u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
@@ -2238,6 +2238,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
switch (mtyp) {
case RESET_MSG:
case ACTIVATE_MSG:
msg_max = msg_max_pkt(hdr);
if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
break;
/* Complete own link name with peer's interface name */
if_name = strrchr(l->name, ':') + 1;
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@@ -2282,8 +2285,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
l->peer_session = msg_session(hdr);
l->in_session = true;
l->peer_bearer_id = msg_bearer_id(hdr);
if (l->mtu > msg_max_pkt(hdr))
l->mtu = msg_max_pkt(hdr);
if (l->mtu > msg_max)
l->mtu = msg_max;
break;
case STATE_MSG:

View File

@@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
udp_conf.local_ip.s_addr = local.ipv4.s_addr;
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
sizeof(struct udphdr))) {
b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
if (tipc_mtu_bad(dev, b->encap_hlen)) {
err = -EINVAL;
goto err;
}
@@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
else
udp_conf.local_ip6 = local.ipv6;
ub->ifindex = dev->ifindex;
b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
b->mtu = 1280;
#endif
} else {

View File

@@ -763,7 +763,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "failed to pcm register\n");
return ret;
goto err_pm_disable;
}
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
@@ -771,9 +771,20 @@ static int fsl_micfil_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to register component %s\n",
fsl_micfil_component.name);
goto err_pm_disable;
}
return ret;
err_pm_disable:
pm_runtime_disable(&pdev->dev);
return ret;
}
static void fsl_micfil_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
@@ -834,6 +845,7 @@ static const struct dev_pm_ops fsl_micfil_pm_ops = {
static struct platform_driver fsl_micfil_driver = {
.probe = fsl_micfil_probe,
.remove_new = fsl_micfil_remove,
.driver = {
.name = "fsl-micfil-dai",
.pm = &fsl_micfil_pm_ops,