Merge branch 'android13-5.10' into branch 'android13-5.10-lts'

Sync up with android13-5.10 for the following commits:

f677cbf076 ANDROID: GKI: include more type definitions in vendor hooks
2b35014fea ANDROID: fuse-bpf: Introduce readdirplus test case for fuse bpf
0559f8d2fd BACKPORT: nfc: nfcmrvl: main: reorder destructive operations in nfcmrvl_nci_unregister_dev to avoid bugs
2c8c8d03c1 ANDROID: fuse-bpf: Make sure force_again flag is false by default
4ad093cae1 ANDROID: fuse-bpf: Make inodes with backing_fd reachable for regular FUSE fuse_iget
6210ced850 BACKPORT: ptrace: Check PTRACE_O_SUSPEND_SECCOMP permission on PTRACE_SEIZE
da358e264c BACKPORT: locking: Add missing __sched attributes
4e1dd5a354 BACKPORT: ALSA: pcm: Fix races among concurrent prealloc proc writes
4e5367f25d BACKPORT: ALSA: pcm: Fix races among concurrent prepare and hw_params/hw_free calls
dd17ad6e7d BACKPORT: ALSA: pcm: Fix races among concurrent read/write and buffer changes
f39647e757 ANDROID: Fix up abi issue with struct snd_pcm_runtime
d7202e9cc4 BACKPORT: ALSA: pcm: Fix races among concurrent hw_params and hw_free calls
80b6ac8cf3 Revert "ANDROID: fuse-bpf: use target instead of parent inode to execute backing revalidate"
5d95acffca ANDROID: KVM: arm64: Fix error handling in pKVM FF-A proxy
854df93a7a ANDROID: GKI: Expose device async to userspace
18be5dcef3 FROMGIT: f2fs: fix iostat related lock protection
fc02d3582a BACKPORT: FROMLIST: dma-buf: Move sysfs work out of DMA-BUF export path
fca6ddffc8 ANDROID: vendor_hooks: add vendor hoook in current_alloc_flags()
45cb58e134 ANDROID: vendor_hooks: add vendor hoook to report acr_info in cma_alloc()
45d8a7d082 ANDROID: Enable GKI Dr. No Enforcement
06e6eb707d ANDROID: KVM: arm64: Prevent kmemleak from accessing .hyp.data
2f97e58fd2 BACKPORT: exfat: improve write performance when dirsync enabled
f8ca44396e FROMLIST: scsi: ufs: Fix a race between the interrupt handler and the reset handler
3e1a3ae036 FROMLIST: scsi: ufs: Support clearing multiple commands at once
986b493c4c FROMLIST: scsi: ufs: Simplify ufshcd_clear_cmd()
fe3b7f87c4 ANDROID: Adding Image.gz and boot-gz.img
e913814ab6 ANDROID: softirq: Refine RT defer softirq
043c58ffe0 ANDROID: binder: fix race in priority restore
d45e8f3336 ANDROID: binder: switch task argument for binder_thread
d4dce34fbb ANDROID: binder: pass desired priority by reference
e4f3cf6bdb ANDROID: binder: fold common setup of node_prio
a4e61a4805 ANDROID: Update the ABI representation
8d9e58e6f3 ANDROID: Update the ABI representation
19e41a3404 ANDROID: sched: Add vendor hook for cpu distribution functions
a08f978392 ANDROID: Update the ABI representation
4b895c556f BACKPORT: io_uring: fix race between timeout flush and removal
20c6e1ba55 ANDROID: KVM: arm64: Don't update IOMMUs unnecessarily
c84bdd74f8 ANDROID: Creating boot.img for x86_64 GKI
84b11bc9d4 ANDROID: Update the ABI representation
b5a54d8de2 BACKPORT: net/sched: cls_u32: fix netns refcount changes in u32_change()
d23166278e ANDROID: Update the ABI representation
13b6bd38bb ANDROID: mm: vh for compaction begin/end
d68ba8769b ANDROID: Fix the CONFIG_ANDROID_VENDOR_OEM_DATA=n build
2a6fab1479 ANDROID: Update the ABI representation
a1037b8e1b ANDROID: init_task: Init android vendor and oem data
bb697d4a01 FROMGIT: xfrm: do not set IPv4 DF flag when encapsulating IPv6 frames <= 1280 bytes.
812805ff3b UPSTREAM: io_uring: always use original task when preparing req identity
b610eff230 ANDROID: fuse-bpf: use target instead of parent inode to execute backing revalidate
1e48e8970c FROMLIST: remoteproc: Fix dma_mem leak after rproc_shutdown
91ad5ba0aa FROMLIST: dma-mapping: Add dma_release_coherent_memory to DMA API
6eece719b7 UPSTREAM: arm64: paravirt: Use RCU read locks to guard stolen_time
0db47d8194 Revert "FROMLIST: arm64: paravirt: Use RCU read locks to guard stolen_time"
e9dd78ebe1 FROMLIST: BACKPORT: mm: fix is_pinnable_page against on cma page
c0f1d79d13 ANDROID: Update the ABI representation
3676702251 ANDROID: Update the ABI representation
0ca85e35bf ANDROID: add vendor_hook to control CMA allocation ratio
1ccbb12b74 ANDROID: Creating boot-img.tar.gz for aarch64
5d08df9399 ANDROID: Update the ABI representation
f0cf55d4bd ANDROID: Update the ABI representation
45a00576f8 UPSTREAM: usb: dwc3: gadget: Move null pinter check to proper place
83962808e2 UPSTREAM: firmware_loader: use kernel credentials when reading firmware
f6243b50bb FROMGIT: dma-buf: ensure unique directory name for dmabuf stats
53cad4677c ANDROID: Update the ABI representation
9292423a3e BACKPORT: can: ems_usb: ems_usb_start_xmit(): fix double dev_kfree_skb() in error path
0944dd4741 ANDROID: arm64: Fix MMIO guard ioremap when called before slab_is_available().
65735b81dd ANDROID: sched: Add vendor hook for update_rq_clock_pelt
4acf9710ca ANDROID: Disable CFI on trace hooks
b6193c5685 ANDROID: KVM: arm64: pkvm: Ensure that TLBs and I-cache are private to each vcpu
729adca51a ANDROID: KVM: arm64: Remove stale shadow_handle field
bb4c6c0105 BACKPORT: can: usb_8dev: usb_8dev_start_xmit(): fix double dev_kfree_skb() in error path
4f1e1edb08 Revert "ANDROID: KVM: arm64: pkvm: Ensure that TLBs and I-cache are private to each vcpu"
273ad59b01 ANDROID: Update the ABI representation
9328b6c499 ANDROID: Update the ABI symbol list
207e72ba41 BACKPORT: esp: Fix possible buffer overflow in ESP transformation
46fc349c54 ANDROID: Update the ABI representation
45361b5a0f Revert "Revert "binder: Prevent context manager from incrementing ref 0""
346e46a9a3 UPSTREAM: scsi: ufs: core: Exclude UECxx from SFR dump list
f48d444fef FROMGIT: dma-buf: call dma_buf_stats_setup after dmabuf is in valid list
611d3745f3 ANDROID: mm: keep __get_user_pages_remote behavior
9afeef924c ANDROID: Update the ABI representation
ec9b4b8fff UPSTREAM: xfrm: fix tunnel model fragmentation behavior
42596c7b41 ANDROID: fix ABI breakage caused by per_cpu_pages
2eb3710ce5 ANDROID: fix ABI breakage caused by adding union type in struct page
fc19a77b2a FROMLIST: BACKPORT: mm/page_alloc: Remotely drain per-cpu lists
b71c6184df FROMLIST: BACKPORT: mm/page_alloc: Protect PCP lists with a spinlock
c249c40b79 FROMLIST: BACKPORT: mm/page_alloc: Split out buddy removal code from rmqueue into separate helper
a248d08a94 FROMLIST: BACKPORT: mm/page_alloc: Add page->buddy_list and page->pcp_list
e70a2e110b UPSTREAM: BACKPORT: mm/page_alloc: don't pass pfn to free_unref_page_commit()
5707719280 UPSTREAM: BACKPORT: mm/page_alloc: avoid conflating IRQs disabled with zone->lock
49f6aaf99d UPSTREAM: Revert "usb: dwc3: core: Add shutdown callback for dwc3"
721fb79e0e BACKPORT: staging: ion: Prevent incorrect reference counting behavour
0f6bc2b736 FROMGIT: net: fix wrong network header length
f6f08b9b18 UPSTREAM: mm: fix unexpected zeroed page mapping with zram swap
c607c61848 ANDROID: KVM: arm64: Fix for do not allow memslot changes after first VM run under pKVM
b9b94e2aca ANDROID: KVM: arm64: pkvm: Ensure that TLBs and I-cache are private to each vcpu
392241199b ANDROID: Update the ABI representation
cebb2c99be ANDROID: Update the ABI symbol list
10b114cc3c ANDROID: KVM: arm64: Export nvhe_hyp_panic_handler
67bef07aab FROMLIST: arm64: paravirt: Use RCU read locks to guard stolen_time
4dce9d7a65 ANDROID: clang: update to 14.0.7
43e6093d9d FROMGIT: KVM: arm64: Handle host stage-2 faults from 32-bit EL0
4eb197cb06 ANDROID: fix kernelci build issue for configfs module
3ed683cb94 ANDROID: gki - set CONFIG_USB_NET_AX88179_178A=y (usb gbit ethernet dongle)
277827dd5b ANDROID: fix KCFLAGS override by __ANDROID_COMMON_KERNEL__
4053a1e898 ANDROID: Add flag to indicate compiling against ACK
e78c5b621d UPSTREAM: mm: madvise: return correct bytes advised with process_madvise
5f9fb34d8b UPSTREAM: kfence, x86: fix preemptible warning on KPTI-enabled systems
a0046956bf BACKPORT: net/packet: fix slab-out-of-bounds access in packet_recvmsg()
06bb3003c6 BACKPORT: dm: fix NULL pointer issue when free bio
98c15b2bad ANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree
6450df3d7e ANDROID: arm64: Auto-enroll MMIO guard on protected vms
3e591c63b1 ANDROID: cma: allow to use CMA in swap-in path
c56ecad172 UPSTREAM: f2fs: should not truncate blocks during roll-forward recovery
a50ef731e0 BACKPORT: media: v4l2-mem2mem: Apply DST_QUEUE_OFF_BASE on MMAP buffers across ioctls
0496c13ded ANDROID: GKI: build damon reclaim
b3190b539a FROMLIST: mm/damon/reclaim: Fix the timer always stays active
ca5cc6bc4c BACKPORT: treewide: Add missing includes masked by cgroup -> bpf dependency
891f111a14 UPSTREAM: mm/damon: modify damon_rand() macro to static inline function
284927effa UPSTREAM: mm/damon: add 'age' of region tracepoint support
3d89e63310 UPSTREAM: mm/damon: hide kernel pointer from tracepoint event
1656aa6e49 UPSTREAM: mm/damon/vaddr: hide kernel pointer from damon_va_three_regions() failure log
a0220f613b UPSTREAM: mm/damon/vaddr: use pr_debug() for damon_va_three_regions() failure logging
6be0ebcb89 UPSTREAM: mm/damon/dbgfs: remove an unnecessary variable
1b9e81febe UPSTREAM: mm/damon: move the implementation of damon_insert_region to damon.h
196600574b UPSTREAM: mm/damon: add access checking for hugetlb pages
2d885a4902 UPSTREAM: mm/damon/dbgfs: support all DAMOS stats
4baaaded13 UPSTREAM: mm/damon/reclaim: provide reclamation statistics
5388d0502f UPSTREAM: mm/damon/schemes: account how many times quota limit has exceeded
caa9694119 UPSTREAM: mm/damon/schemes: account scheme actions that successfully applied
cb7e28849d UPSTREAM: mm/damon: convert macro functions to static inline functions
e7b7a5370d UPSTREAM: mm/damon: move damon_rand() definition into damon.h
5fefa05ffd UPSTREAM: mm/damon/schemes: add the validity judgment of thresholds
1a8086a78e UPSTREAM: mm/damon/vaddr: remove swap_ranges() and replace it with swap()
ed97f2620f UPSTREAM: mm/damon: remove some unneeded function definitions in damon.h
0fb0a85d42 UPSTREAM: mm/damon/core: use abs() instead of diff_of()
df930c3b5d UPSTREAM: mm/damon: unified access_check function naming rules
b1ddf425f6 UPSTREAM: mm/damon/dbgfs: fix 'struct pid' leaks in 'dbgfs_target_ids_write()'
5529c8c7eb UPSTREAM: mm/damon/dbgfs: protect targets destructions with kdamond_lock
3de975028c UPSTREAM: mm/damon/vaddr-test: remove unnecessary variables
0a44e491b5 UPSTREAM: mm/damon/vaddr-test: split a test function having >1024 bytes frame size
a5405bc0f5 UPSTREAM: mm/damon/vaddr: remove an unnecessary warning message
04b67c5cf2 UPSTREAM: mm/damon/core: remove unnecessary error messages
1b77288b69 UPSTREAM: mm/damon/dbgfs: remove an unnecessary error message
5e31976171 UPSTREAM: mm/damon/core: use better timer mechanisms selection threshold
cce6ca2fa3 UPSTREAM: mm/damon/core: fix fake load reports due to uninterruptible sleeps
856b276105 BACKPORT: timers: implement usleep_idle_range()
4c721a7b6e UPSTREAM: mm/damon/dbgfs: fix missed use of damon_dbgfs_lock
48bd7c8963 UPSTREAM: mm/damon/dbgfs: use '__GFP_NOWARN' for user-specified size buffer allocation
90ba2d5488 UPSTREAM: mm/damon: remove return value from before_terminate callback
c8aa05a582 UPSTREAM: mm/damon: fix a few spelling mistakes in comments and a pr_debug message
8d31217aa0 UPSTREAM: mm/damon: simplify stop mechanism
c8f0959228 UPSTREAM: mm/damon/dbgfs: add adaptive_targets list check before enable monitor_on
2aafd45856 UPSTREAM: mm/damon: remove unnecessary variable initialization
b3b7318332 UPSTREAM: mm/damon: introduce DAMON-based Reclamation (DAMON_RECLAIM)
656bbf4bde UPSTREAM: selftests/damon: support watermarks
545df68814 UPSTREAM: mm/damon/dbgfs: support watermarks
62bd89b42a UPSTREAM: mm/damon/schemes: activate schemes based on a watermarks mechanism
f4a02dbcef UPSTREAM: tools/selftests/damon: update for regions prioritization of schemes
98260e4ee7 UPSTREAM: mm/damon/dbgfs: support prioritization weights
2b85e83330 UPSTREAM: mm/damon/vaddr,paddr: support pageout prioritization
76fb24657c UPSTREAM: mm/damon/schemes: prioritize regions within the quotas
6a0d2afdab UPSTREAM: mm/damon/selftests: support schemes quotas
350631992f UPSTREAM: mm/damon/dbgfs: support quotas of schemes
e2eee39ec1 UPSTREAM: mm/damon/schemes: implement time quota
a7e263a0c5 UPSTREAM: mm/damon/schemes: skip already charged targets and regions
51cd480dec UPSTREAM: mm/damon/schemes: implement size quota for schemes application speed control
78572870a2 UPSTREAM: mm/damon/paddr: support the pageout scheme
8d537db0f9 UPSTREAM: mm/damon/dbgfs: remove unnecessary variables
c525089abf UPSTREAM: mm/damon/vaddr: constify static mm_walk_ops
fd1bd69a67 UPSTREAM: mm/damon/dbgfs: support physical memory monitoring
2dc9fec10e UPSTREAM: mm/damon: implement primitives for physical address space monitoring
7dc7024f32 UPSTREAM: mm/damon/vaddr: separate commonly usable functions
319f3accc7 UPSTREAM: mm/damon/dbgfs-test: add a unit test case for 'init_regions'
2c807d1f1f UPSTREAM: mm/damon/dbgfs: allow users to set initial monitoring target regions
99510047c7 UPSTREAM: selftests/damon: add 'schemes' debugfs tests
fe2da2d930 UPSTREAM: mm/damon/schemes: implement statistics feature
59e4256bb0 UPSTREAM: mm/damon/dbgfs: support DAMON-based Operation Schemes
77091caf83 UPSTREAM: mm/damon/vaddr: support DAMON-based Operation Schemes
07cefe8b8c UPSTREAM: mm/damon/core: implement DAMON-based Operation Schemes (DAMOS)
a0a2eec57c UPSTREAM: mm/damon/core: account age of target regions
e5a92ffc76 UPSTREAM: mm/damon/core: nullify pointer ctx->kdamond with a NULL
f6a7b6527d UPSTREAM: mm/damon: needn't hold kdamond_lock to print pid of kdamond
7aa826a5df UPSTREAM: mm/damon: remove unnecessary do_exit() from kdamond
bbc7383c1f UPSTREAM: mm/damon/core: print kdamond start log in debug mode only
d09e6d4366 UPSTREAM: include/linux/damon.h: fix kernel-doc comments for 'damon_callback'
0492d06b2e UPSTREAM: mm/damon: grammar s/works/work/
e24d4d7d21 UPSTREAM: mm/damon/core-test: fix wrong expectations for 'damon_split_regions_of()'
729698e1ab UPSTREAM: mm/damon: don't use strnlen() with known-bogus source length
789928c5b6 UPSTREAM: mm/damon: add kunit tests
d3cff19d31 UPSTREAM: mm/damon: add user space selftests
ac418a7965 UPSTREAM: mm/damon/dbgfs: support multiple contexts
9fda42d2d6 UPSTREAM: mm/damon/dbgfs: export kdamond pid to the user space
c8ecb4f7a1 UPSTREAM: mm/damon: implement a debugfs-based user space interface
e415cf98cb UPSTREAM: mm/damon: add a tracepoint
75f4f6ebe9 UPSTREAM: mm/damon: implement primitives for the virtual memory address spaces
ad6156f833 UPSTREAM: mm/idle_page_tracking: make PG_idle reusable
f78eee74b4 UPSTREAM: mm/damon: adaptively adjust regions
40064a1877 UPSTREAM: mm/damon/core: implement region-based sampling
d1e43a5be8 UPSTREAM: mm: introduce Data Access MONitor (DAMON)
88e4dbaf59 ANDROID: Make MGLRU aware of speculative faults
e7c680add6 ANDROID: KVM: arm64: Prevent HVC calls outside of the core kernel text
32169780e8 ANDROID: fuse-bpf: Fix misuse of args.out_args
df2083258d ANDROID: Update the ABI representation
d7b1683f78 ANDROID: add __trace_bputs() to aarch64 ABI
f6c964af25 ANDROID: Suppress build.sh deprecation warnings.
5d6831add7 ANDROID: KVM: arm64: s2mpu: Allow r/o access to control regs
d5c0f0f937 ANDROID: KVM: arm64: s2mpu: Allow reading MPTC entries
e56d9603a6 ANDROID: KVM: arm64: s2mpu: Allow L1ENTRY_* r/o access
96767ad7be ANDROID: KVM: arm64: s2mpu: Refactor DABT handler
c43dfe89fe ANDROID: KVM: arm64: s2mpu: Extract L1ENTRY_* consts
7a9a532432 BACKPORT: ext4: don't BUG if someone dirty pages without asking ext4 first
c383610d0f UPSTREAM: binder: change error code from postive to negative in binder_transaction
d4d78c7278 ANDROID: fuse-bpf: Fix non-fusebpf build
9a5023967b ANDROID: fuse-bpf: Use fuse_bpf_args in uapi
92c8c21ad0 BACKPORT: nl80211: correctly check NL80211_ATTR_REG_ALPHA2 size
65533e0212 ANDROID: Update the ABI representation
a1013fd19b FROMLIST: kasan: mark KASAN_VMALLOC flags as kasan_vmalloc_flags_t
c098614509 FROMLIST: kasan: fix hw tags enablement when KUNIT tests are disabled
f60a0b3285 UPSTREAM: usb: dwc3: leave default DMA for PCI devices
3b508e8fe4 UPSTREAM: usb: dwc3: support 64 bit DMA in platform driver
03f40d5252 ANDROID: Update the ABI representation
6db38c5bbc FROMGIT: EXP rcu: Move expedited grace period (GP) work to RT kthread_worker
68c87a277c ANDROID: Update the ABI representation
699e6e3211 UPSTREAM: block: fix async_depth sysfs interface for mq-deadline
53ff5efb2c ANDROID: PCI/PM: Use usleep_range for d3hot_delay
609fa1be7a ANDROID: mm: page_pinner: fix elapsed time
d5d9a23576 ANDROID: mm: retry GUP with orignal gup_flags on failure
6acb261444 ANDROID: GKI: 4/15/2022 KMI freeze
a034320a68 ANDROID: add vendor fields to swap_slots_cache to support multiple swap devices
1b14ae01b0 ANDROID: add vendor fields to lruvec to record refault stats
af4eb0e377 ANDROID: add vendor fields to swap_info_struct to record swap stats
fae5207ecc ANDROID: scsi: ufs: Add suspend/resume SCSI command processing support
64293a57f1 ANDROID: scsi: ufs: Pass the clock scaling timeout as an argument
69014b2b36 ANDROID: scsi: ufs: Move a clock scaling check
aca52cabdb ANDROID: scsi: ufs: Reduce the clock scaling latency
00ed95fe93 FROMGIT: scsi: ufs: core: scsi_get_lba() error fix
c0a4aeb7aa FROMGIT: scsi: ufs: Fix runtime PM messages never-ending cycle
0cd3abcaa4 FROMGIT: scsi: core: sd: Add silence_suspend flag to suppress some PM messages
e46eb26194 FROMGIT: scsi: ufs: core: Remove wlun_dev_to_hba()
85d759e39a FROMGIT: scsi: ufs: Add checking lifetime attribute for WriteBooster
44b7a4f00f FROMGIT: scsi: ufs: Use generic error code in ufshcd_set_dev_pwr_mode()
aeedc78679 FROMGIT: scsi: ufs: ufs-mediatek: Fix error checking in ufs_mtk_init_va09_pwr_ctrl()
1fc4aef3d5 FROMGIT: scsi: ufs: Modify Tactive time setting conditions
d87405c2fe FROMGIT: scsi: ufs: ufs-pci: Add support for Intel ADL
b65cfd7b92 FROMGIT: scsi: ufs: ufs-mediatek: Add put_device() after of_find_device_by_node()
4f4bf31d39 FROMGIT: scsi: ufs: ufshpb: Fix warning in ufshpb_set_hpb_read_to_upiu()
acb0ef885c ANDROID: scsi: ufs: Minimize the difference with the upstream code
321995d280 ANDROID: GKI: build multi-gen LRU
306dbfb34c FROMLIST: mm: multi-gen LRU: design doc
8b006e4d1c FROMLIST: mm: multi-gen LRU: admin guide
3cf1dfaaa5 FROMLIST: mm: multi-gen LRU: debugfs interface
96f4a592d3 FROMLIST: mm: multi-gen LRU: thrashing prevention
76fdc1010b FROMLIST: mm: multi-gen LRU: kill switch
082bc8296a FROMLIST: mm: multi-gen LRU: optimize multiple memcgs
93c4f86793 FROMLIST: mm: multi-gen LRU: support page table walks
c8356f7573 FROMLIST: mm: multi-gen LRU: exploit locality in rmap
436dff20eb FROMLIST: mm: multi-gen LRU: minimal implementation
fe302bd1f9 FROMLIST: mm: multi-gen LRU: groundwork
4c6c817249 FROMLIST: mm/vmscan.c: refactor shrink_node()
95acc9c28b FROMLIST: mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
1ed19b562b FROMLIST: mm: x86, arm64: add arch_has_hw_pte_young()
b4f3b6ac71 UPSTREAM: include/linux/page-flags-layout.h: cleanups
2b286703d9 UPSTREAM: include/linux/page-flags-layout.h: correctly determine LAST_CPUPID_WIDTH
80343eeaf3 UPSTREAM: mm/swap: don't SetPageWorkingset unconditionally during swapin
0c20cff831 UPSTREAM: include/linux/mm_inline.h: fold page_lru_base_type() into its sole caller
aadc45fae6 BACKPORT: mm: VM_BUG_ON lru page flags
bcc2f50f7b BACKPORT: mm: add __clear_page_lru_flags() to replace page_off_lru()
552f416558 BACKPORT: mm/swap.c: don't pass "enum lru_list" to del_page_from_lru_list()
10899adee3 UPSTREAM: mm/swap.c: don't pass "enum lru_list" to trace_mm_lru_insertion()
c18b4f50ce BACKPORT: mm: don't pass "enum lru_list" to lru list addition functions
32ebee4382 BACKPORT: include/linux/mm_inline.h: shuffle lru list addition and deletion functions
885e11e970 BACKPORT: mm/vmscan.c: use add_page_to_lru_list()
75020bfbe2 ANDROID: Move BRANCH from build.config.common to .constants.
5ef1198a15 ANDROID: Update the ABI symbol list
0a227f89cf ANDROID: KVM: arm64: Do not allow memslot modifications once a PVM has run
8be6e93244 ANDROID: fuse-bpf: Fix read_iter
128ed57bca ANDROID: fuse-bpf: Use cache and refcount
8e24eb9a2d ANDROID: fuse-bpf: Rename iocb_fuse to iocb_orig
0f51319527 ANDROID: fuse-bpf: Fix fixattr in rename
0c37c1459a ANDROID: fuse-bpf: Fix readdir
68c9936883 ANDROID: clang: update to 14.0.4
7a197aa504 ANDROID: mm: fix build break
d9e4b67784 ANDROID: mm: freeing MIGRATE_ISOLATE page instantly
83aa7ef838 ANDROID: KVM: arm64: Fix size calculation of FFA memory range
2d2e0ad1d1 ANDROID: KVM: arm64: Pin FFA mailboxes shared by the host
b196350f2a ANDROID: fuse-bpf: Fix lseek return value for offset 0
bba21782c8 ANDROID: Update the ABI symbol list and xml
e5765b86ce ANDROID: GKI: set more vfs-only exports into their own namespace
74ff6e66d2 ANDROID: KVM: arm64: Fix ToCToU issue when refilling the hyp memcache
8fe46774c6 ANDROID: mm: page_pinner: remove dump_page_pinner
94c6c10c39 BACKPORT: mm, kasan: fix __GFP_BITS_SHIFT definition breaking LOCKDEP
7bfa608df5 UPSTREAM: kasan: test: support async (again) and asymm modes for HW_TAGS
4e56697b42 ANDROID: KVM: arm64: iommu: Optimize snapshot_host_stage2
174ac5b7c5 ANDROID: KVM: arm64: s2mpu: Initialize MPTs to PROT_RW
a946ac5ff5 ANDROID: KVM: arm64: iommu: Fix upper bound of PT walk
a63ec2bcac ANDROID: GKI: 4/6/2022 KMI update
ac3d413511 ANDROID: vendor_hooks: Reduce pointless modversions CRC churn
f33dc31c48 ANDROID: mm: gup: additional param in vendor hooks
16b4583a99 ANDROID: mm: page_pinner: fix build warning
01edbc91e2 ANDROID: mm: page_pinner: change pinner buffer size
b8a18e852e ANDROID: mm: page_pinner: remove static buffer
5c70ecb399 ANDROID: mm: page_pinner: remove longterm_pinner
e17f903a92 ANDROID: mm: page_pinner: change output format for alloc_contig_failed
a45f3891de ANDROID: mm: page_pinner refactoring
8e9a170748 FROMGIT: iommu/iova: Improve 32-bit free space estimate
34469ce1b4 ANDROID: KVM: arm64: pkvm: Track the SVE state in the shadow vcpu
7cea3ceefb ANDROID: KVM: arm64: Make the use of host or shadow vcpu less error prone
176c157bbe ANDROID: GKI: set vfs-only exports into their own namespace
21c7e202ac FROMLIST: export: fix string handling of namespace in EXPORT_SYMBOL_NS
fdf0178dba UPSTREAM: module.h: allow #define strings to work with MODULE_IMPORT_NS
7ba447d039 ANDROID: Update the ABI representation
8302ed7fba FROMGIT: arm64: head.S: Initialise MPAM EL2 registers and disable traps
5748592d14 ANDROID: arm64: Partial MPAM sysreg definition import
aadf7ad9db BACKPORT: virtio: pci: check bar values read from virtio config space
7e5df18bee UPSTREAM: Revert "virtio_pci: harden MSI-X interrupts"
c7912e3027 UPSTREAM: Revert "virtio-pci: harden INTX interrupts"
30d72758db FROMLIST: fuse: give wakeup hints to the scheduler
11e605185f ANDROID: KVM: arm64: Don't map host sections in pkvm
fce8d906cf ANDROID: KVM: arm64: Explicitely map kvm_vgic_global_state at EL2
6e12d67ad0 ANDROID: KVM: arm64: Unmap kvm_arm_hyp_percpu_base from the host
ae49ca9a17 ANDROID: KVM: arm64: pkvm: Don't access kvm_arm_hyp_percpu_base at EL1
b576a36a95 ANDROID: Update the ABI symbol list
4c3d004aa7 UPSTREAM: erofs: add sysfs interface
dc123bee94 Revert "ANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree"
3f9db3f711 ANDROID: sched: Add vendor hook for rt util update
cd5c13796b ANDROID: Update the ABI symbol list
9859d2761e ANDROID: Add new pkvm_iommu_* functions to aarch64 ABI
8fd93b0ef9 ANDROID: KVM: arm64: iommu: Add pkvm_iommu_finalize
798c4ea545 ANDROID: KVM: arm64: iommu: No powered check in DABT handler
57381d548d ANDROID: KVM: arm64: s2mpu: Create SysMMU_SYNC driver
e69c61cf4e ANDROID: KVM: arm64: iommu: Create parent/child relation
be84f2c770 ANDROID: KVM: arm64: iommu: Run validate() on struct pkvm_iommu
e6574a68fa ANDROID: KVM: arm64: iommu: Create private mapping last
acb9a25416 ANDROID: KVM: arm64: iommu: Free memory on registration error
6eaed0b8b7 ANDROID: KVM: arm64: iommu: Harden __pkvm_iommu_pm_notify
a75cb9df82 ANDROID: KVM: arm64: Drop FOLL_FORCE when pinning guest memory pages
a246583fcc Revert "ANDROID: BACKPORT: KVM: arm64: Add initial support for KVM_CAP_EXIT_HYPERCALL"
bbe5c85592 Revert "ANDROID: KVM: arm64: Allow userspace to receive SHARE and UNSHARE notifications"
588affc843 BACKPORT: virtio-blk: Use blk_validate_block_size() to validate block size
fe0484006f ANDROID: Update the ABI representation
e3356ca0a6 ANDROID: sched: Add vendor hook for util-update related functions
ec7c9ea9d4 ANDROID: Update the ABI representation
982febefcd ANDROID: KVM: arm64: pkvm: Inject SIGSEGV on illegal accesses
e7b80adac2 ANDROID: KVM: arm64: Refactor enter_exception64()
91c32ff1fe ANDROID: KVM: arm64: Add is_pkvm_initialized() helper
ba73e0b827 ANDROID: KVM: arm64: Use PSCI MEM_PROTECT to zap guest pages on reset
40493bc91e ANDROID: KVM: arm64: Check pin_user_pages() return value
dbba49b6cb ANDROID: KVM: arm64: Handle all ID registers trapped for a protected VM
5c6f14f146 ANDROID: Update the ABI symbol list
373e5bd7d4 ANDROID: GKI: Enable BUILD_GKI_CERTIFICATION_TOOLS
83631772f3 ANDROID: usb: gadget: f_accessory: add compat_ioctl support
4443600ce1 UPSTREAM: mm: fix use-after-free when anon vma name is used after vma is freed
6962eb33d8 UPSTREAM: mm: prevent vm_area_struct::anon_name refcount saturation
9fbdc4b53f UPSTREAM: mm: refactor vm_area_struct::anon_vma_name usage code
6b94b8c3b7 ANDROID: KVM: arm64: Only map swap-backed pages into the guest
c8b5505153 ANDROID: Update the ABI representation
3101b49e5b ANDROID: clang: update to 14.0.3
93846ccca1 ANDROID: KVM: arm64: Invalidate TLB by VMID when tearing down the shadow VM
ed0dec098e ANDROID: gki_config: enable F2FS_UNFAIR_RWSEM
f8c415b6c2 Merge remote-tracking branch 'aosp/upstream-f2fs-stable-linux-5.10.y' into android13-5.10
cf9be86ac4 ANDROID: GKI: 3/23/2022 KMI update
e5315a20b2 ANDROID: GKI: enable macsec
7531264e68 ANDROID: Update the ABI symbol list
aea946bdf0 ANDROID: mm: gup: vendor hook in GUP friends
0375b0d297 ANDROID: selftests: incfs: Add umount helper function
5fdeabb1a2 ANDROID: selftests: incfs: skip large_file_test test is not enough free space
9a7faf669a ANDROID: Update the ABI symbol list
b6079b142e ANDROID: incremental-fs: limit mount stack depth
68980ff96c fscrypt: update documentation for direct I/O support
1654219723 f2fs: support direct I/O with fscrypt using blk-crypto
9f7cc5fda4 ext4: support direct I/O with fscrypt using blk-crypto
4b613cb0c4 iomap: support direct I/O with fscrypt using blk-crypto
b6da748bab fscrypt: add functions for direct I/O support
21557656af ANDROID: selftests: incfs: Add -fno-omit-frame-pointer
59e664cd0e f2fs: fix to do sanity check on .cp_pack_total_block_count
40185ceea1 f2fs: make gc_urgent and gc_segment_mode sysfs node readable
f1e8564c55 FROMLIST: kasan, scs: support tagged vmalloc mappings
c05355c5f8 ANDROID: kasan: sync vmalloc support with linux-next/akpm
2fce32030b f2fs: use aggressive GC policy during f2fs_disable_checkpoint()
9214367dd9 f2fs: fix compressed file start atomic write may cause data corruption
bb434bbb77 f2fs: initialize sbi->gc_mode explicitly
81a9c7d6ac f2fs: introduce gc_urgent_mid mode
13f1b8f147 f2fs: compress: fix to print raw data size in error path of lz4 decompression
bede94339a f2fs: remove redundant parameter judgment
4bab14807f f2fs: use spin_lock to avoid hang
e3d44a0028 f2fs: don't get FREEZE lock in f2fs_evict_inode in frozen fs
e182f50c16 f2fs: remove unnecessary read for F2FS_FITS_IN_INODE
e9e689047f f2fs: introduce F2FS_UNFAIR_RWSEM to support unfair rwsem
beebf94806 f2fs: avoid an infinite loop in f2fs_sync_dirty_inodes
266c3326ad f2fs: fix to do sanity check on curseg->alloc_type
7a711a5447 f2fs: fix to avoid potential deadlock
185e61d607 f2fs: quota: fix loop condition at f2fs_quota_sync()
150ce3cd60 f2fs: Restore rwsem lockdep support
c5feaf141e f2fs: fix missing free nid in f2fs_handle_failed_inode
421c7a5850 f2fs: add a way to limit roll forward recovery time
95c5bd71d4 f2fs: introduce F2FS_IPU_HONOR_OPU_WRITE ipu policy
a3bb3ae25e f2fs: adjust readahead block number during recovery
4af7ca6cc2 f2fs: fix to unlock page correctly in error path of is_alive()
7fa3e73d19 f2fs: expose discard related parameters in sysfs
7f148c6c55 f2fs: move discard parameters into discard_cmd_control
04e34c8449 f2fs: fix to enable ATGC correctly via gc_idle sysfs interface
c4454e2978 f2fs: move f2fs to use reader-unfair rwsems

And track more new symbols that were added to the 'android13-5.10' branch:

Leaf changes summary: 28 artifacts changed
Changed leaf types summary: 0 leaf type changed
Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 18 Added functions
Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 10 Added variables

18 Added functions:

  [A] 'function int __traceiter_android_rvh_cpumask_any_and_distribute(void*, task_struct*, const cpumask*, const cpumask*, int*)'
  [A] 'function int __traceiter_android_rvh_update_rq_clock_pelt(void*, rq*, s64, bool*)'
  [A] 'function int __traceiter_android_vh_cma_alloc_adjust(void*, zone*, bool*)'
  [A] 'function int __traceiter_android_vh_dump_throttled_rt_tasks(void*, int, u64, ktime_t, u64, s64)'
  [A] 'function int __traceiter_android_vh_mm_compaction_begin(void*, compact_control*, long int*)'
  [A] 'function int __traceiter_android_vh_mm_compaction_end(void*, compact_control*, long int)'
  [A] 'function int __traceiter_android_vh_sched_setaffinity_early(void*, task_struct*, const cpumask*, int*)'
  [A] 'function int __traceiter_android_vh_show_max_freq(void*, cpufreq_policy*, unsigned int*)'
  [A] 'function int __traceiter_android_vh_ufs_update_sdev(void*, scsi_device*)'
  [A] 'function unsigned int cfg80211_vendor_cmd_get_sender(wiphy*)'
  [A] 'function int cpumask_any_and_distribute(const cpumask*, const cpumask*)'
  [A] 'function char* d_path(const path*, char*, int)'
  [A] 'function pid* find_vpid(int)'
  [A] 'function mm_struct* get_task_mm(task_struct*)'
  [A] 'function void lru_cache_disable()'
  [A] 'function void lru_cache_enable()'
  [A] 'function void mmput(mm_struct*)'
  [A] 'function int vprintk_emit(int, int, const dev_printk_info*, const char*, va_list)'

10 Added variables:

  [A] 'tracepoint __tracepoint_android_rvh_cpumask_any_and_distribute'
  [A] 'tracepoint __tracepoint_android_rvh_update_rq_clock_pelt'
  [A] 'tracepoint __tracepoint_android_vh_cma_alloc_adjust'
  [A] 'tracepoint __tracepoint_android_vh_dump_throttled_rt_tasks'
  [A] 'tracepoint __tracepoint_android_vh_mm_compaction_begin'
  [A] 'tracepoint __tracepoint_android_vh_mm_compaction_end'
  [A] 'tracepoint __tracepoint_android_vh_sched_setaffinity_early'
  [A] 'tracepoint __tracepoint_android_vh_show_max_freq'
  [A] 'tracepoint __tracepoint_android_vh_ufs_update_sdev'
  [A] 'unsigned long int freq_scale'

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie158191bf4ee4639506c2adb7d8beebdca4923c2
This commit is contained in:
Greg Kroah-Hartman
2022-07-01 12:36:22 +02:00
100 changed files with 3725 additions and 2746 deletions

14
OWNERS
View File

@@ -1,2 +1,12 @@
# include OWNERS from the authoritative android-mainline branch
include kernel/common:android-mainline:/OWNERS
set noparent
# GKI Dr. No Enforcement is active on this branch. Approval of one of the Dr.
# No reviewers is required following a regular CodeReview+2 vote of a code
# reviewer.
#
# See the GKI release documentation (go/gki-dr-no) for further details.
#
# The expanded list of reviewers can be found at:
# https://android.googlesource.com/kernel/common/+/android-mainline/OWNERS_DrNo
include kernel/common:android-mainline:/OWNERS_DrNo

File diff suppressed because it is too large Load Diff

View File

@@ -184,6 +184,7 @@
cfg80211_unlink_bss
cfg80211_unregister_wdev
cfg80211_update_owe_info_event
cfg80211_vendor_cmd_get_sender
cfg80211_vendor_cmd_reply
__cfi_slowpath
__check_object_size
@@ -294,6 +295,7 @@
cpu_latency_qos_add_request
cpu_latency_qos_remove_request
cpu_latency_qos_update_request
cpumask_any_and_distribute
cpumask_any_but
cpumask_next
cpumask_next_and
@@ -613,6 +615,7 @@
down_timeout
down_trylock
down_write
d_path
drain_workqueue
driver_create_file
driver_register
@@ -849,6 +852,7 @@
find_pid_ns
find_task_by_vpid
find_vma
find_vpid
finish_wait
firmware_request_nowarn
flush_dcache_page
@@ -870,6 +874,7 @@
freq_qos_add_request
freq_qos_remove_request
freq_qos_update_request
freq_scale
fs_bio_set
fsync_bdev
full_name_hash
@@ -926,6 +931,7 @@
get_sg_io_hdr
__get_task_comm
get_task_cred
get_task_mm
get_thermal_instance
get_unused_fd_flags
get_user_pages
@@ -1250,6 +1256,8 @@
log_threaded_irq_wakeup_reason
__log_write_mmio
loops_per_jiffy
lru_cache_disable
lru_cache_enable
lzo1x_1_compress
lzo1x_decompress_safe
lzorle1x_1_compress
@@ -1302,6 +1310,7 @@
misc_deregister
misc_register
__mmdrop
mmput
mod_delayed_work_on
mod_timer
__module_get
@@ -1617,6 +1626,7 @@
print_hex_dump
printk
printk_deferred
__printk_ratelimit
proc_create
proc_create_data
proc_create_single_data
@@ -2175,6 +2185,7 @@
__traceiter_android_rvh_cgroup_force_kthread_migration
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpu_cgroup_online
__traceiter_android_rvh_cpumask_any_and_distribute
__traceiter_android_rvh_cpu_overutilized
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_dequeue_task_fair
@@ -2207,13 +2218,16 @@
__traceiter_android_rvh_ufs_reprogram_all_keys
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_rq_clock_pelt
__traceiter_android_rvh_update_rt_rq_load_avg
__traceiter_android_rvh_util_est_update
__traceiter_android_vh_arch_set_freq_scale
__traceiter_android_vh_cma_alloc_adjust
__traceiter_android_vh_cma_alloc_finish
__traceiter_android_vh_cma_alloc_start
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_enable_thermal_genl_check
__traceiter_android_vh_ep_create_wakeup_source
@@ -2222,12 +2236,16 @@
__traceiter_android_vh_internal_get_user_pages_fast
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_meminfo_proc_show
__traceiter_android_vh_mm_compaction_begin
__traceiter_android_vh_mm_compaction_end
__traceiter_android_vh_of_i2c_get_board_info
__traceiter_android_vh_pagecache_get_page
__traceiter_android_vh_pin_user_pages
__traceiter_android_vh_rmqueue
__traceiter_android_vh_sched_setaffinity_early
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_show_max_freq
__traceiter_android_vh_snd_compr_use_pause_in_drain
__traceiter_android_vh_sound_usb_support_cpu_suspend
__traceiter_android_vh_sysrq_crash
@@ -2246,6 +2264,7 @@
__traceiter_android_vh_ufs_send_command
__traceiter_android_vh_ufs_send_tm_command
__traceiter_android_vh_ufs_send_uic_command
__traceiter_android_vh_ufs_update_sdev
__traceiter_android_vh_ufs_update_sysfs
__traceiter_android_vh_usb_dev_resume
__traceiter_android_vh_usb_dev_suspend
@@ -2279,6 +2298,7 @@
__tracepoint_android_rvh_cgroup_force_kthread_migration
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpu_cgroup_online
__tracepoint_android_rvh_cpumask_any_and_distribute
__tracepoint_android_rvh_cpu_overutilized
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_dequeue_task_fair
@@ -2311,13 +2331,16 @@
__tracepoint_android_rvh_ufs_reprogram_all_keys
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_rq_clock_pelt
__tracepoint_android_rvh_update_rt_rq_load_avg
__tracepoint_android_rvh_util_est_update
__tracepoint_android_vh_arch_set_freq_scale
__tracepoint_android_vh_cma_alloc_adjust
__tracepoint_android_vh_cma_alloc_finish
__tracepoint_android_vh_cma_alloc_start
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_enable_thermal_genl_check
__tracepoint_android_vh_ep_create_wakeup_source
@@ -2326,12 +2349,16 @@
__tracepoint_android_vh_internal_get_user_pages_fast
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_mm_compaction_begin
__tracepoint_android_vh_mm_compaction_end
__tracepoint_android_vh_of_i2c_get_board_info
__tracepoint_android_vh_pagecache_get_page
__tracepoint_android_vh_pin_user_pages
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_sched_setaffinity_early
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_show_max_freq
__tracepoint_android_vh_snd_compr_use_pause_in_drain
__tracepoint_android_vh_sound_usb_support_cpu_suspend
__tracepoint_android_vh_sysrq_crash
@@ -2350,6 +2377,7 @@
__tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_send_tm_command
__tracepoint_android_vh_ufs_send_uic_command
__tracepoint_android_vh_ufs_update_sdev
__tracepoint_android_vh_ufs_update_sysfs
__tracepoint_android_vh_usb_dev_resume
__tracepoint_android_vh_usb_dev_suspend
@@ -2582,6 +2610,7 @@
vm_map_ram
vm_unmap_ram
vprintk
vprintk_emit
vring_del_virtqueue
vring_interrupt
vring_new_virtqueue

View File

@@ -68,6 +68,8 @@ CONFIG_CMDLINE_EXTEND=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_ENERGY_MODEL=y
CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_GOV_MENU=y

View File

@@ -215,9 +215,6 @@ struct kvm_arch {
};
struct kvm_protected_vcpu {
/* A unique id to the shadow structs in the hyp shadow area. */
int shadow_handle;
/* A pointer to the host's vcpu. */
struct kvm_vcpu *host_vcpu;

View File

@@ -2139,11 +2139,11 @@ static int finalize_hyp_mode(void)
return 0;
/*
* Exclude HYP BSS from kmemleak so that it doesn't get peeked
* at, which would end badly once the section is inaccessible.
* None of other sections should ever be introspected.
* Exclude HYP BSS and DATA from kmemleak so that they don't get peeked
* at, which would end badly once the sections are inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start);
return pkvm_drop_host_privileges();
}

View File

@@ -78,7 +78,8 @@ int __pkvm_remove_ioguard_page(struct kvm_vcpu *vcpu, u64 ipa);
bool __pkvm_check_ioguard_page(struct kvm_vcpu *vcpu);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot,
bool update_iommu);
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, pkvm_id owner_id);
int host_stage2_unmap_dev_locked(phys_addr_t start, u64 size);
int kvm_host_prepare_stage2(void *pgt_pool_base);

View File

@@ -324,7 +324,7 @@ static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
int ret = 0;
if (nshared != nranges) {
WARN_ON(__ffa_host_unshare_ranges(ranges, nshared));
WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
ret = FFA_RET_DENIED;
}
@@ -338,7 +338,7 @@ static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
int ret = 0;
if (nunshared != nranges) {
WARN_ON(__ffa_host_share_ranges(ranges, nunshared));
WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
ret = FFA_RET_DENIED;
}

View File

@@ -465,7 +465,8 @@ static bool range_is_memory(u64 start, u64 end)
}
static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
enum kvm_pgtable_prot prot,
bool update_iommu)
{
int ret;
@@ -474,7 +475,8 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
if (ret)
return ret;
pkvm_iommu_host_stage2_idmap(start, end, prot);
if (update_iommu)
pkvm_iommu_host_stage2_idmap(start, end, prot);
return 0;
}
@@ -536,9 +538,9 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
}
int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
enum kvm_pgtable_prot prot)
enum kvm_pgtable_prot prot, bool update_iommu)
{
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot, update_iommu);
}
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(32, 1)
@@ -612,7 +614,7 @@ static int host_stage2_idmap(u64 addr)
if (ret)
return ret;
return host_stage2_idmap_locked(range.start, range.end - range.start, prot);
return host_stage2_idmap_locked(range.start, range.end - range.start, prot, false);
}
static bool is_dabt(u64 esr)
@@ -833,7 +835,7 @@ static int __host_set_page_state_range(u64 addr, u64 size,
{
enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
return host_stage2_idmap_locked(addr, size, prot);
return host_stage2_idmap_locked(addr, size, prot, true);
}
static int host_request_owned_transition(u64 *completer_addr,

View File

@@ -467,7 +467,6 @@ static int init_shadow_structs(struct kvm *kvm, struct kvm_shadow_vm *vm,
shadow_state->vm = vm;
shadow_vcpu->arch.hw_mmu = &vm->arch.mmu;
shadow_vcpu->arch.pkvm.shadow_handle = vm->shadow_handle;
shadow_vcpu->arch.pkvm.shadow_vm = vm;
shadow_vcpu->arch.power_off = true;
@@ -742,7 +741,7 @@ int __pkvm_teardown_shadow(int shadow_handle)
per_cpu_ptr(&last_loaded_vcpu, i);
struct kvm_vcpu *vcpu = *last_loaded_vcpu_ptr;
if (vcpu && vcpu->arch.pkvm.shadow_handle == shadow_handle)
if (vcpu && vcpu->arch.pkvm.shadow_vm == vm)
*last_loaded_vcpu_ptr = NULL;
}

View File

@@ -222,7 +222,7 @@ static int fix_host_ownership_walker(u64 addr, u64 end, u32 level,
return -EINVAL;
}
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot, false);
}
static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,

View File

@@ -108,14 +108,6 @@ void __init kvm_hyp_reserve(void)
hyp_mem_base);
}
/*
* Updates the state of the host's version of the vcpu state.
*/
static void update_vcpu_state(struct kvm_vcpu *vcpu, int shadow_handle)
{
vcpu->arch.pkvm.shadow_handle = shadow_handle;
}
/*
* Allocates and donates memory for EL2 shadow structs.
*
@@ -133,7 +125,7 @@ static int __create_el2_shadow(struct kvm *kvm)
void *pgd, *shadow_addr;
unsigned long idx;
int shadow_handle;
int ret, i;
int ret;
if (kvm->created_vcpus < 1)
return -EINVAL;
@@ -174,10 +166,6 @@ static int __create_el2_shadow(struct kvm *kvm)
/* Store the shadow handle given by hyp for future call reference. */
kvm->arch.pkvm.shadow_handle = shadow_handle;
/* Adjust host's vcpu state as it doesn't control it anymore. */
for (i = 0; i < kvm->created_vcpus; i++)
update_vcpu_state(kvm->vcpus[i], shadow_handle);
return 0;
free_shadow:

View File

@@ -109,7 +109,8 @@ void ioremap_phys_range_hook(phys_addr_t phys_addr, size_t size, pgprot_t prot)
* This page will be permanently accessible, similar to a
* saturated refcount.
*/
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (slab_is_available())
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (ref) {
refcount_set(&ref->count, 1);
if (xa_err(xa_store(&ioremap_guard_array, pfn, ref,

View File

@@ -60,6 +60,8 @@ CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure cgroup.memory=nok
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y

View File

@@ -5,4 +5,5 @@
BUILD_SYSTEM_DLKM=1
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
BUILD_GKI_CERTIFICATION_TOOLS=1
BUILD_GKI_ARTIFACTS=1
BUILD_GKI_BOOT_IMG_SIZE=67108864

View File

@@ -648,20 +648,21 @@ static int to_kernel_prio(int policy, int user_priority)
return MAX_USER_RT_PRIO - 1 - user_priority;
}
static void binder_do_set_priority(struct task_struct *task,
struct binder_priority desired,
static void binder_do_set_priority(struct binder_thread *thread,
const struct binder_priority *desired,
bool verify)
{
struct task_struct *task = thread->task;
int priority; /* user-space prio value */
bool has_cap_nice;
unsigned int policy = desired.sched_policy;
unsigned int policy = desired->sched_policy;
if (task->policy == policy && task->normal_prio == desired.prio)
if (task->policy == policy && task->normal_prio == desired->prio)
return;
has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
priority = to_userspace_prio(policy, desired.prio);
priority = to_userspace_prio(policy, desired->prio);
if (verify && is_rt_policy(policy) && !has_cap_nice) {
long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
@@ -686,16 +687,30 @@ static void binder_do_set_priority(struct task_struct *task,
}
}
if (policy != desired.sched_policy ||
to_kernel_prio(policy, priority) != desired.prio)
if (policy != desired->sched_policy ||
to_kernel_prio(policy, priority) != desired->prio)
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: priority %d not allowed, using %d instead\n",
task->pid, desired.prio,
task->pid, desired->prio,
to_kernel_prio(policy, priority));
trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
to_kernel_prio(policy, priority),
desired.prio);
desired->prio);
spin_lock(&thread->prio_lock);
if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
/*
* A new priority has been set by an incoming nested
* transaction. Abort this priority restore and allow
* the transaction to run at the new desired priority.
*/
spin_unlock(&thread->prio_lock);
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: %s: aborting priority restore\n",
thread->pid, __func__);
return;
}
/* Set the actual priority */
if (task->policy != policy || is_rt_policy(policy)) {
@@ -709,42 +724,47 @@ static void binder_do_set_priority(struct task_struct *task,
}
if (is_fair_policy(policy))
set_user_nice(task, priority);
thread->prio_state = BINDER_PRIO_SET;
spin_unlock(&thread->prio_lock);
}
static void binder_set_priority(struct task_struct *task,
struct binder_priority desired)
static void binder_set_priority(struct binder_thread *thread,
const struct binder_priority *desired)
{
binder_do_set_priority(task, desired, /* verify = */ true);
binder_do_set_priority(thread, desired, /* verify = */ true);
}
static void binder_restore_priority(struct task_struct *task,
struct binder_priority desired)
static void binder_restore_priority(struct binder_thread *thread,
const struct binder_priority *desired)
{
binder_do_set_priority(task, desired, /* verify = */ false);
binder_do_set_priority(thread, desired, /* verify = */ false);
}
static void binder_transaction_priority(struct task_struct *task,
static void binder_transaction_priority(struct binder_thread *thread,
struct binder_transaction *t,
struct binder_priority node_prio,
bool inherit_rt)
struct binder_node *node)
{
struct binder_priority desired_prio = t->priority;
struct task_struct *task = thread->task;
struct binder_priority desired = t->priority;
const struct binder_priority node_prio = {
.sched_policy = node->sched_policy,
.prio = node->min_priority,
};
bool skip = false;
if (t->set_priority_called)
return;
t->set_priority_called = true;
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
trace_android_vh_binder_priority_skip(task, &skip);
if (skip)
return;
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
desired.prio = NICE_TO_PRIO(0);
desired.sched_policy = SCHED_NORMAL;
}
if (node_prio.prio < t->priority.prio ||
@@ -757,10 +777,29 @@ static void binder_transaction_priority(struct task_struct *task,
* SCHED_FIFO, prefer SCHED_FIFO, since it can
* run unbounded, unlike SCHED_RR.
*/
desired_prio = node_prio;
desired = node_prio;
}
binder_set_priority(task, desired_prio);
spin_lock(&thread->prio_lock);
if (thread->prio_state == BINDER_PRIO_PENDING) {
/*
* Task is in the process of changing priorities
* saving its current values would be incorrect.
* Instead, save the pending priority and signal
* the task to abort the priority restore.
*/
t->saved_priority = thread->prio_next;
thread->prio_state = BINDER_PRIO_ABORT;
binder_debug(BINDER_DEBUG_PRIORITY_CAP,
"%d: saved pending priority %d\n",
current->pid, thread->prio_next.prio);
} else {
t->saved_priority.sched_policy = task->policy;
t->saved_priority.prio = task->normal_prio;
}
spin_unlock(&thread->prio_lock);
binder_set_priority(thread, &desired);
trace_android_vh_binder_set_priority(t, task);
}
@@ -2479,14 +2518,11 @@ static int binder_proc_transaction(struct binder_transaction *t,
struct binder_thread *thread)
{
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
BUG_ON(!node);
binder_node_lock(node);
node_prio.prio = node->min_priority;
node_prio.sched_policy = node->sched_policy;
if (oneway) {
BUG_ON(thread);
@@ -2516,8 +2552,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
thread ? thread->task : 0, node->debug_id, t->code, pending_async);
if (thread) {
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
binder_transaction_priority(thread, t, node);
binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
@@ -2604,6 +2639,7 @@ static void binder_transaction(struct binder_proc *proc,
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
bool is_nested = false;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -2788,6 +2824,7 @@ static void binder_transaction(struct binder_proc *proc,
atomic_inc(&from->tmp_ref);
target_thread = from;
spin_unlock(&tmp->lock);
is_nested = true;
break;
}
spin_unlock(&tmp->lock);
@@ -2852,6 +2889,7 @@ static void binder_transaction(struct binder_proc *proc,
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->is_nested = is_nested;
if (!(t->flags & TF_ONE_WAY) &&
binder_supported_policy(current->policy)) {
/* Inherit supported policies for synchronous transactions */
@@ -3211,9 +3249,15 @@ static void binder_transaction(struct binder_proc *proc,
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
target_proc->outstanding_txns++;
binder_inner_proc_unlock(target_proc);
if (in_reply_to->is_nested) {
spin_lock(&thread->prio_lock);
thread->prio_state = BINDER_PRIO_PENDING;
thread->prio_next = in_reply_to->saved_priority;
spin_unlock(&thread->prio_lock);
}
wake_up_interruptible_sync(&target_thread->wait);
trace_android_vh_binder_restore_priority(in_reply_to, current);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_restore_priority(thread, &in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
@@ -3327,7 +3371,7 @@ err_invalid_target_handle:
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
trace_android_vh_binder_restore_priority(in_reply_to, current);
binder_restore_priority(current, in_reply_to->saved_priority);
binder_restore_priority(thread, &in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
binder_enqueue_thread_work(thread, &thread->return_error.work);
binder_send_failed_reply(in_reply_to, return_error);
@@ -4006,7 +4050,7 @@ retry:
binder_stop_on_user_error < 2);
}
trace_android_vh_binder_restore_priority(NULL, current);
binder_restore_priority(current, proc->default_priority);
binder_restore_priority(thread, &proc->default_priority);
}
if (non_block) {
@@ -4233,14 +4277,10 @@ retry:
BUG_ON(t->buffer == NULL);
if (t->buffer->target_node) {
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
trd->target.ptr = target_node->ptr;
trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
binder_transaction_priority(thread, t, target_node);
cmd = BR_TRANSACTION;
} else {
trd->target.ptr = 0;
@@ -4471,6 +4511,8 @@ static struct binder_thread *binder_get_thread_ilocked(
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
spin_lock_init(&thread->prio_lock);
thread->prio_state = BINDER_PRIO_SET;
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
return thread;
}

View File

@@ -367,6 +367,12 @@ struct binder_priority {
int prio;
};
enum binder_prio_state {
BINDER_PRIO_SET, /* desired priority set */
BINDER_PRIO_PENDING, /* initiated a saved priority restore */
BINDER_PRIO_ABORT, /* abort the pending priority restore */
};
/**
* struct binder_proc - binder process bookkeeping
* @proc_node: element for binder_procs list
@@ -511,6 +517,12 @@ struct binder_proc {
* when outstanding transactions are cleaned up
* (protected by @proc->inner_lock)
* @task: struct task_struct for this thread
* @prio_lock: protects thread priority fields
* @prio_next: saved priority to be restored next
* (protected by @prio_lock)
* @prio_state: state of the priority restore process as
* defined by enum binder_prio_state
* (protected by @prio_lock)
*
* Bookkeeping structure for binder threads.
*/
@@ -531,6 +543,9 @@ struct binder_thread {
atomic_t tmp_ref;
bool is_dead;
struct task_struct *task;
spinlock_t prio_lock;
struct binder_priority prio_next;
enum binder_prio_state prio_state;
};
/**
@@ -567,6 +582,7 @@ struct binder_transaction {
struct binder_priority priority;
struct binder_priority saved_priority;
bool set_priority_called;
bool is_nested;
kuid_t sender_euid;
struct list_head fd_fixups;
binder_uintptr_t security_ctx;

View File

@@ -214,6 +214,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pick_next_entity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_busy_info);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_calc_alloc_flags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pagecache_get_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_fault_get_page);
@@ -277,6 +281,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_from_fragment_pool);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exclude_reserved_zone);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_include_reserved_zone);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_mem);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_print_slabinfo_header);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_shrink_slab);
@@ -397,3 +402,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rt_rq_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pci_d3_sleep);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rq_clock_pelt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpumask_any_and_distribute);

View File

@@ -11,6 +11,7 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "dma-buf-sysfs-stats.h"
@@ -135,10 +136,51 @@ void dma_buf_uninit_sysfs_statistics(void)
kset_unregister(dma_buf_stats_kset);
}
static void sysfs_add_workfn(struct work_struct *work)
{
/* The ABI would have to change for this to be false, but let's be paranoid. */
_Static_assert(sizeof(struct kobject) >= sizeof(struct work_struct),
"kobject is smaller than work_struct");
struct dma_buf_sysfs_entry *sysfs_entry =
container_of((struct kobject *)work, struct dma_buf_sysfs_entry, kobj);
struct dma_buf *dmabuf = sysfs_entry->dmabuf;
/*
* A dmabuf is ref-counted via its file member. If this handler holds the only
* reference to the dmabuf, there is no need for sysfs kobject creation. This is an
* optimization and a race; when the reference count drops to 1 immediately after
* this check it is not harmful as the sysfs entry will still get cleaned up in
* dma_buf_stats_teardown, which won't get called until the final dmabuf reference
* is released, and that can't happen until the end of this function.
*/
if (file_count(dmabuf->file) > 1) {
/*
* kobject_init_and_add expects kobject to be zero-filled, but we have populated it
* to trigger this work function.
*/
memset(&dmabuf->sysfs_entry->kobj, 0, sizeof(dmabuf->sysfs_entry->kobj));
dmabuf->sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset;
if (kobject_init_and_add(&dmabuf->sysfs_entry->kobj, &dma_buf_ktype, NULL,
"%lu", file_inode(dmabuf->file)->i_ino)) {
kobject_put(&dmabuf->sysfs_entry->kobj);
dmabuf->sysfs_entry = NULL;
}
} else {
/*
* Free the sysfs_entry and reset the pointer so dma_buf_stats_teardown doesn't
* attempt to operate on it.
*/
kfree(dmabuf->sysfs_entry);
dmabuf->sysfs_entry = NULL;
}
dma_buf_put(dmabuf);
}
int dma_buf_stats_setup(struct dma_buf *dmabuf)
{
struct dma_buf_sysfs_entry *sysfs_entry;
int ret;
struct work_struct *work;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
@@ -148,25 +190,21 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
return -EINVAL;
}
sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL);
sysfs_entry = kmalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL);
if (!sysfs_entry)
return -ENOMEM;
sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset;
sysfs_entry->dmabuf = dmabuf;
dmabuf->sysfs_entry = sysfs_entry;
/* create the directory for buffer stats */
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
"%lu", file_inode(dmabuf->file)->i_ino);
if (ret)
goto err_sysfs_dmabuf;
/*
* The use of kobj as a work_struct is an ugly hack
* to avoid an ABI break in this frozen kernel.
*/
work = (struct work_struct *)&dmabuf->sysfs_entry->kobj;
INIT_WORK(work, sysfs_add_workfn);
get_dma_buf(dmabuf); /* This reference will be dropped in sysfs_add_workfn. */
schedule_work(work);
return 0;
err_sysfs_dmabuf:
kobject_put(&sysfs_entry->kobj);
dmabuf->sysfs_entry = NULL;
return ret;
}

View File

@@ -471,6 +471,7 @@ EXPORT_SYMBOL_GPL(is_dma_buf_file);
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
{
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
@@ -480,6 +481,13 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
/*
* The ->i_ino acquired from get_next_ino() is not unique thus
* not suitable for using it as dentry name by dmabuf stats.
* Override ->i_ino with the unique and dmabuffs specific
* value.
*/
inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))

View File

@@ -460,6 +460,7 @@ static void rproc_rvdev_release(struct device *dev)
struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
of_reserved_mem_device_release(dev);
dma_release_coherent_memory(dev);
kfree(rvdev);
}

View File

@@ -752,17 +752,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register
* ufshcd_utrl_clear() - Clear requests from the controller request list.
* @hba: per adapter instance
* @pos: position of the bit to be cleared
* @mask: mask with one bit set for each request to be cleared
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
{
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
else
ufshcd_writel(hba, ~(1 << pos),
REG_UTP_TRANSFER_REQ_LIST_CLEAR);
mask = ~mask;
/*
* From the UFSHCI specification: "UTP Transfer Request List CLear
* Register (UTRLCLR): This field is bit significant. Each bit
* corresponds to a slot in the UTP Transfer Request List, where bit 0
* corresponds to request slot 0. A bit in this field is set to 0
* by host software to indicate to the host controller that a transfer
* request slot is cleared. The host controller
* shall free up any resources associated to the request slot
* immediately, and shall set the associated bit in UTRLDBR to 0. The
* host software indicates no change to request slots by setting the
* associated bits in this field to 1. Bits in this field shall only
* be set 1 or 0 by host software when UTRLRSR is set to 1."
*/
ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/**
@@ -2926,27 +2937,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
return ufshcd_compose_devman_upiu(hba, lrbp);
}
static int
ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
/*
* Clear all the requests from the controller for which a bit has been set in
* @mask and wait until the controller confirms that these requests have been
* cleared.
*/
static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
{
int err = 0;
unsigned long flags;
u32 mask = 1 << tag;
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, tag);
ufshcd_utrl_clear(hba, mask);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
mask, ~mask, 1000, 1000);
return err;
return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
mask, ~mask, 1000, 1000);
}
static int
@@ -3026,7 +3036,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
@@ -7047,14 +7057,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
}
/**
* ufshcd_eh_device_reset_handler - device reset handler registered to
* scsi layer.
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
* Returns SUCCESS/FAILED
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
u32 pos;
@@ -7073,14 +7083,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
}
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
__ufshcd_transfer_req_compl(hba, 1U << pos);
}
spin_lock_irqsave(&hba->outstanding_lock, flags);
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
if (hba->lrb[pos].lun == lun)
__set_bit(pos, &pending_reqs);
hba->outstanding_reqs &= ~pending_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
spin_lock_irqsave(&hba->outstanding_lock, flags);
not_cleared = pending_reqs &
ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
hba->outstanding_reqs |= not_cleared;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
__func__, not_cleared);
}
__ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
out:
hba->req_abort_count = 0;
@@ -7177,7 +7197,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
goto out;
}
err = ufshcd_clear_cmd(hba, tag);
err = ufshcd_clear_cmds(hba, 1U << tag);
if (err)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);

View File

@@ -3315,14 +3315,14 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
if (!dep->endpoint.desc)
return no_started_trb;
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
goto out;
if (!dep->endpoint.desc)
return no_started_trb;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list) &&
(list_empty(&dep->pending_list) || status == -EXDEV))

View File

@@ -141,7 +141,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
kfree(sbi->vol_amap);
}
int exfat_set_bitmap(struct inode *inode, unsigned int clu)
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync)
{
int i, b;
unsigned int ent_idx;
@@ -154,7 +154,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx);
set_bit_le(b, sbi->vol_amap[i]->b_data);
exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode));
exfat_update_bh(sbi->vol_amap[i], sync);
return 0;
}

View File

@@ -317,7 +317,7 @@ int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
exfat_chain_set(clu, EXFAT_EOF_CLUSTER, 0, ALLOC_NO_FAT_CHAIN);
ret = exfat_alloc_cluster(inode, 1, clu);
ret = exfat_alloc_cluster(inode, 1, clu, IS_DIRSYNC(inode));
if (ret)
return ret;

View File

@@ -388,7 +388,7 @@ int exfat_clear_volume_dirty(struct super_block *sb);
#define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu)
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain);
struct exfat_chain *p_chain, bool sync_bmap);
int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain);
int exfat_ent_get(struct super_block *sb, unsigned int loc,
unsigned int *content);
@@ -407,7 +407,7 @@ int exfat_count_num_clusters(struct super_block *sb,
/* balloc.c */
int exfat_load_bitmap(struct super_block *sb);
void exfat_free_bitmap(struct exfat_sb_info *sbi);
int exfat_set_bitmap(struct inode *inode, unsigned int clu);
int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync);
void exfat_clear_bitmap(struct inode *inode, unsigned int clu);
unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu);
int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count);

View File

@@ -277,7 +277,7 @@ release_bhs:
}
int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
struct exfat_chain *p_chain)
struct exfat_chain *p_chain, bool sync_bmap)
{
int ret = -ENOSPC;
unsigned int num_clusters = 0, total_cnt;
@@ -339,7 +339,7 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* update allocation bitmap */
if (exfat_set_bitmap(inode, new_clu)) {
if (exfat_set_bitmap(inode, new_clu, sync_bmap)) {
ret = -EIO;
goto free_cluster;
}

View File

@@ -178,7 +178,8 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
return -EIO;
}
ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu);
ret = exfat_alloc_cluster(inode, num_to_be_allocated, &new_clu,
inode_needs_sync(inode));
if (ret)
return ret;

View File

@@ -340,7 +340,7 @@ static int exfat_find_empty_entry(struct inode *inode,
exfat_chain_set(&clu, last_clu + 1, 0, p_dir->flags);
/* allocate a cluster */
ret = exfat_alloc_cluster(inode, 1, &clu);
ret = exfat_alloc_cluster(inode, 1, &clu, IS_DIRSYNC(inode));
if (ret)
return ret;

View File

@@ -2045,15 +2045,6 @@ static int ext4_writepage(struct page *page,
return 0;
}
/* Should never happen but for bugs in other kernel subsystems */
if (!page_has_buffers(page)) {
ext4_warning_inode(inode,
"page %lu does not have buffers attached", page->index);
ClearPageDirty(page);
unlock_page(page);
return 0;
}
page_bufs = page_buffers(page);
/*
* We cannot do block allocation or other extent handling in this
@@ -2673,22 +2664,6 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
continue;
}
/*
* Should never happen but for buggy code in
* other subsystems that call
* set_page_dirty() without properly warning
* the file system first. See [1] for more
* information.
*
* [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
*/
if (!page_has_buffers(page)) {
ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
ClearPageDirty(page);
unlock_page(page);
continue;
}
if (mpd->map.m_len == 0)
mpd->first_page = page->index;
mpd->next_page = page->index + 1;

View File

@@ -3407,9 +3407,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
*fsdata = NULL;
if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
goto repeat;
if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
goto repeat;

View File

@@ -91,8 +91,9 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
unsigned int cnt;
struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
unsigned long flags;
spin_lock_bh(&sbi->iostat_lat_lock);
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
for (idx = 0; idx < MAX_IO_TYPE; idx++) {
for (io = 0; io < NR_PAGE_TYPE; io++) {
cnt = io_lat->bio_cnt[idx][io];
@@ -106,7 +107,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
io_lat->bio_cnt[idx][io] = 0;
}
}
spin_unlock_bh(&sbi->iostat_lat_lock);
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
trace_f2fs_iostat_latency(sbi, iostat_lat);
}
@@ -115,14 +116,15 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
{
unsigned long long iostat_diff[NR_IO_TYPE];
int i;
unsigned long flags;
if (time_is_after_jiffies(sbi->iostat_next_period))
return;
/* Need double check under the lock */
spin_lock_bh(&sbi->iostat_lock);
spin_lock_irqsave(&sbi->iostat_lock, flags);
if (time_is_after_jiffies(sbi->iostat_next_period)) {
spin_unlock_bh(&sbi->iostat_lock);
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
return;
}
sbi->iostat_next_period = jiffies +
@@ -133,7 +135,7 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
sbi->prev_rw_iostat[i];
sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
}
spin_unlock_bh(&sbi->iostat_lock);
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
trace_f2fs_iostat(sbi, iostat_diff);
@@ -145,25 +147,27 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
int i;
spin_lock_bh(&sbi->iostat_lock);
spin_lock_irq(&sbi->iostat_lock);
for (i = 0; i < NR_IO_TYPE; i++) {
sbi->rw_iostat[i] = 0;
sbi->prev_rw_iostat[i] = 0;
}
spin_unlock_bh(&sbi->iostat_lock);
spin_unlock_irq(&sbi->iostat_lock);
spin_lock_bh(&sbi->iostat_lat_lock);
spin_lock_irq(&sbi->iostat_lat_lock);
memset(io_lat, 0, sizeof(struct iostat_lat_info));
spin_unlock_bh(&sbi->iostat_lat_lock);
spin_unlock_irq(&sbi->iostat_lat_lock);
}
void f2fs_update_iostat(struct f2fs_sb_info *sbi,
enum iostat_type type, unsigned long long io_bytes)
{
unsigned long flags;
if (!sbi->iostat_enable)
return;
spin_lock_bh(&sbi->iostat_lock);
spin_lock_irqsave(&sbi->iostat_lock, flags);
sbi->rw_iostat[type] += io_bytes;
if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
@@ -172,7 +176,7 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi,
if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
sbi->rw_iostat[APP_READ_IO] += io_bytes;
spin_unlock_bh(&sbi->iostat_lock);
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
f2fs_record_iostat(sbi);
}
@@ -185,6 +189,7 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
struct f2fs_sb_info *sbi = iostat_ctx->sbi;
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
int idx;
unsigned long flags;
if (!sbi->iostat_enable)
return;
@@ -202,12 +207,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
idx = WRITE_ASYNC_IO;
}
spin_lock_bh(&sbi->iostat_lat_lock);
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
io_lat->sum_lat[idx][iotype] += ts_diff;
io_lat->bio_cnt[idx][iotype]++;
if (ts_diff > io_lat->peak_lat[idx][iotype])
io_lat->peak_lat[idx][iotype] = ts_diff;
spin_unlock_bh(&sbi->iostat_lat_lock);
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
}
void iostat_update_and_unbind_ctx(struct bio *bio, int rw)

View File

@@ -4803,13 +4803,6 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
return -EFSCORRUPTED;
}
if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) {
f2fs_err(sbi,
"Current segment has invalid alloc_type:%d",
curseg->alloc_type);
return -EFSCORRUPTED;
}
if (f2fs_test_bit(blkofs, se->cur_valid_map))
goto out;

View File

@@ -262,6 +262,8 @@ int fuse_create_open_backing(
struct dentry *newent;
int err = 0;
const struct fuse_create_in *fci = fa->in_args[0].value;
struct fuse_inode *fuse_inode = get_fuse_inode(entry->d_inode);
u64 target_nodeid = 0;
if (!dir_fuse_inode || !dir_fuse_dentry)
return -EIO;
@@ -293,7 +295,10 @@ int fuse_create_open_backing(
};
path_get(&get_fuse_dentry(entry)->backing_path);
inode = fuse_iget_backing(dir->i_sb,
if (fuse_inode)
target_nodeid = fuse_inode->nodeid;
inode = fuse_iget_backing(dir->i_sb, target_nodeid,
get_fuse_dentry(entry)->backing_path.dentry->d_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
@@ -1173,9 +1178,11 @@ struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir,
struct fuse_dentry *fd;
struct dentry *bd;
struct inode *inode, *backing_inode;
struct fuse_inode *fuse_inode = get_fuse_inode(entry->d_inode);
struct fuse_entry_out *feo = fa->out_args[0].value;
struct fuse_entry_bpf_out *febo = fa->out_args[1].value;
struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, out);
u64 target_nodeid = 0;
fd = get_fuse_dentry(entry);
if (!fd)
@@ -1187,7 +1194,10 @@ struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir,
if (!backing_inode)
return 0;
inode = fuse_iget_backing(dir->i_sb, backing_inode);
if (fuse_inode)
target_nodeid = fuse_inode->nodeid;
inode = fuse_iget_backing(dir->i_sb, target_nodeid, backing_inode);
if (IS_ERR(inode))
return ERR_PTR(PTR_ERR(inode));
@@ -1349,7 +1359,8 @@ int fuse_mknod_backing(
{
int err = 0;
const struct fuse_mknod_in *fmi = fa->in_args[0].value;
struct inode *backing_inode = get_fuse_inode(dir)->backing_inode;
struct fuse_inode *fuse_inode = get_fuse_inode(dir);
struct inode *backing_inode = fuse_inode->backing_inode;
struct path backing_path = {};
struct inode *inode = NULL;
@@ -1376,7 +1387,7 @@ int fuse_mknod_backing(
*/
goto out;
}
inode = fuse_iget_backing(dir->i_sb, backing_inode);
inode = fuse_iget_backing(dir->i_sb, fuse_inode->nodeid, backing_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
@@ -1425,7 +1436,8 @@ int fuse_mkdir_backing(
{
int err = 0;
const struct fuse_mkdir_in *fmi = fa->in_args[0].value;
struct inode *backing_inode = get_fuse_inode(dir)->backing_inode;
struct fuse_inode *fuse_inode = get_fuse_inode(dir);
struct inode *backing_inode = fuse_inode->backing_inode;
struct path backing_path = {};
struct inode *inode = NULL;
struct dentry *d;
@@ -1453,7 +1465,7 @@ int fuse_mkdir_backing(
dput(backing_path.dentry);
backing_path.dentry = d;
}
inode = fuse_iget_backing(dir->i_sb, backing_inode);
inode = fuse_iget_backing(dir->i_sb, fuse_inode->nodeid, backing_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
@@ -1768,7 +1780,8 @@ int fuse_link_backing(struct fuse_bpf_args *fa, struct dentry *entry,
struct path backing_new_path = {};
struct dentry *backing_dir_dentry;
struct inode *fuse_new_inode = NULL;
struct inode *backing_dir_inode = get_fuse_inode(dir)->backing_inode;
struct fuse_inode *fuse_dir_inode = get_fuse_inode(dir);
struct inode *backing_dir_inode = fuse_dir_inode->backing_inode;
get_fuse_backing_path(entry, &backing_old_path);
if (!backing_old_path.dentry)
@@ -1799,7 +1812,7 @@ int fuse_link_backing(struct fuse_bpf_args *fa, struct dentry *entry,
goto out;
}
fuse_new_inode = fuse_iget_backing(dir->i_sb, backing_dir_inode);
fuse_new_inode = fuse_iget_backing(dir->i_sb, fuse_dir_inode->nodeid, backing_dir_inode);
if (IS_ERR(fuse_new_inode)) {
err = PTR_ERR(fuse_new_inode);
goto out;
@@ -2163,7 +2176,8 @@ int fuse_symlink_backing(
struct inode *dir, struct dentry *entry, const char *link, int len)
{
int err = 0;
struct inode *backing_inode = get_fuse_inode(dir)->backing_inode;
struct fuse_inode *fuse_inode = get_fuse_inode(dir);
struct inode *backing_inode = fuse_inode->backing_inode;
struct path backing_path = {};
struct inode *inode = NULL;
@@ -2186,7 +2200,7 @@ int fuse_symlink_backing(
*/
goto out;
}
inode = fuse_iget_backing(dir->i_sb, backing_inode);
inode = fuse_iget_backing(dir->i_sb, fuse_inode->nodeid, backing_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;

View File

@@ -545,7 +545,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
goto out_queue_forget;
backing_inode = backing_file->f_inode;
*inode = fuse_iget_backing(sb, backing_inode);
*inode = fuse_iget_backing(sb, outarg->nodeid, backing_inode);
if (!*inode)
goto bpf_arg_out;

View File

@@ -976,9 +976,10 @@ extern const struct dentry_operations fuse_dentry_operations;
extern const struct dentry_operations fuse_root_dentry_operations;
/**
* Get a filled in inode
* Get a filled-in inode
*/
struct inode *fuse_iget_backing(struct super_block *sb,
u64 nodeid,
struct inode *backing_inode);
struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int generation, struct fuse_attr *attr,

View File

@@ -330,6 +330,15 @@ static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
(struct fuse_inode_identifier *) _nodeidp;
struct fuse_inode *fi = get_fuse_inode(inode);
return fii->nodeid == fi->nodeid;
}
static int fuse_inode_backing_eq(struct inode *inode, void *_nodeidp)
{
struct fuse_inode_identifier *fii =
(struct fuse_inode_identifier *) _nodeidp;
struct fuse_inode *fi = get_fuse_inode(inode);
return fii->nodeid == fi->nodeid
#ifdef CONFIG_FUSE_BPF
&& fii->backing_inode == fi->backing_inode
@@ -344,6 +353,17 @@ static int fuse_inode_set(struct inode *inode, void *_nodeidp)
struct fuse_inode *fi = get_fuse_inode(inode);
fi->nodeid = fii->nodeid;
return 0;
}
static int fuse_inode_backing_set(struct inode *inode, void *_nodeidp)
{
struct fuse_inode_identifier *fii =
(struct fuse_inode_identifier *) _nodeidp;
struct fuse_inode *fi = get_fuse_inode(inode);
fi->nodeid = fii->nodeid;
#ifdef CONFIG_FUSE_BPF
fi->backing_inode = fii->backing_inode;
if (fi->backing_inode)
@@ -353,20 +373,25 @@ static int fuse_inode_set(struct inode *inode, void *_nodeidp)
return 0;
}
struct inode *fuse_iget_backing(struct super_block *sb,
struct inode *fuse_iget_backing(struct super_block *sb, u64 nodeid,
struct inode *backing_inode)
{
struct inode *inode;
struct fuse_inode *fi;
struct fuse_conn *fc = get_fuse_conn_super(sb);
struct fuse_inode_identifier fii = {
.nodeid = nodeid,
.backing_inode = backing_inode,
};
struct fuse_attr attr;
unsigned long hash = (unsigned long) backing_inode;
if (nodeid)
hash = nodeid;
fuse_fill_attr_from_inode(&attr, backing_inode);
inode = iget5_locked(sb, (unsigned long) backing_inode, fuse_inode_eq,
fuse_inode_set, &fii);
inode = iget5_locked(sb, hash, fuse_inode_backing_eq,
fuse_inode_backing_set, &fii);
if (!inode)
return NULL;

View File

@@ -573,7 +573,8 @@ int fuse_readdir(struct file *file, struct dir_context *ctx)
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
bool force_again, allow_force;
bool allow_force;
bool force_again = false;
bool is_continued = false;
again:

View File

@@ -171,6 +171,7 @@ static inline void dma_pernuma_cma_reserve(void) { }
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
void dma_release_coherent_memory(struct device *dev);
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
@@ -189,6 +190,8 @@ static inline int dma_declare_coherent_memory(struct device *dev,
{
return -ENOSYS;
}
#define dma_release_coherent_memory(dev) (0)
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)

View File

@@ -74,7 +74,11 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) ({ \
int mt = get_pageblock_migratetype(_page); \
bool ret = (mt == MIGRATE_ISOLATE || mt == MIGRATE_CMA) ? true : false; \
ret; \
})
# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_migrate_cma(migratetype) false

View File

@@ -245,7 +245,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \
static inline void __nocfi trace_##name(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(name, \
@@ -310,7 +310,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
.unregfunc = _unreg, \
.funcs = NULL }; \
__TRACEPOINT_ENTRY(_name); \
int __traceiter_##_name(void *__data, proto) \
int __nocfi __traceiter_##_name(void *__data, proto) \
{ \
struct tracepoint_func *it_func_ptr; \
void *it_func; \

View File

@@ -4,6 +4,8 @@
#include <linux/skbuff.h>
#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
struct ip_esp_hdr;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)

View File

@@ -10,13 +10,26 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct binder_transaction;
struct task_struct;
#ifdef __GENKSYMS__
struct binder_alloc;
struct binder_proc;
struct binder_thread;
struct binder_transaction_data;
struct binder_transaction;
struct task_struct;
struct seq_file;
struct binder_transaction_data;
#else
/* struct binder_alloc */
#include <../drivers/android/binder_alloc.h>
/* struct binder_proc, struct binder_thread, struct binder_transaction */
#include <../drivers/android/binder_internal.h>
/* struct task_struct */
#include <linux/sched.h>
/* struct seq_file */
#include <linux/seq_file.h>
/* struct binder_transaction_data */
#include <uapi/linux/android/binder.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_binder_transaction_init,
TP_PROTO(struct binder_transaction *t),
TP_ARGS(t));
@@ -29,8 +42,6 @@ DECLARE_HOOK(android_vh_binder_set_priority,
DECLARE_HOOK(android_vh_binder_restore_priority,
TP_PROTO(struct binder_transaction *t, struct task_struct *task),
TP_ARGS(t, task));
struct binder_proc;
struct binder_thread;
DECLARE_HOOK(android_vh_binder_wakeup_ilocked,
TP_PROTO(struct task_struct *task, bool sync, struct binder_proc *proc),
TP_ARGS(task, sync, proc));

View File

@@ -9,9 +9,18 @@
#include <trace/hooks/vendor_hooks.h>
struct blk_mq_tag_set;
#ifdef __GENKSYMS__
struct blk_mq_tags;
struct blk_mq_alloc_data;
struct blk_mq_tag_set;
#else
/* struct blk_mq_tags */
#include <../block/blk-mq-tag.h>
/* struct blk_mq_alloc_data */
#include <../block/blk-mq.h>
/* struct blk_mq_tag_set */
#include <linux/blk-mq.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_blk_alloc_rqs,
TP_PROTO(size_t *rq_size, struct blk_mq_tag_set *set,

View File

@@ -7,7 +7,18 @@
#define _TRACE_HOOK_CGROUP_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct cgroup_taskset;
struct cgroup_subsys;
struct task_struct;
#else
/* Including ../kernel/cgroup/cgroup-internal.h breaks builds. */
struct cgroup_taskset;
/* struct cgroup_subsys */
#include <linux/cgroup-defs.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_cgroup_set_task,
TP_PROTO(int ret, struct task_struct *task),
TP_ARGS(ret, task));
@@ -21,8 +32,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_refrigerator,
TP_PROTO(bool f),
TP_ARGS(f), 1);
struct cgroup_subsys;
struct cgroup_taskset;
DECLARE_HOOK(android_vh_cgroup_attach,
TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset),
TP_ARGS(ss, tset))

View File

@@ -9,7 +9,12 @@
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct cpuidle_device;
#else
/* struct cpuidle_device */
#include <linux/cpuidle.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_cpu_idle_enter,
TP_PROTO(int *state, struct cpuidle_device *dev),

View File

@@ -10,7 +10,12 @@
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct cpuidle_device;
#else
/* struct cpuidle_device */
#include <linux/cpuidle.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_cpuidle_psci_enter,
TP_PROTO(struct cpuidle_device *dev, bool s2idle),
TP_ARGS(dev, s2idle));

View File

@@ -10,8 +10,15 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct cred;
struct task_struct;
#else
/* struct cred */
#include <linux/cred.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_commit_creds,
TP_PROTO(const struct task_struct *task, const struct cred *new),
TP_ARGS(task, new));

View File

@@ -10,7 +10,12 @@
#include <trace/hooks/vendor_hooks.h>
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_ipi_stop,
TP_PROTO(struct pt_regs *regs),

View File

@@ -10,7 +10,21 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct mutex;
struct rt_mutex;
struct rw_semaphore;
struct task_struct;
#else
/* struct mutex */
#include <linux/mutex.h>
/* struct rt_mutex */
#include <linux/rtmutex.h>
/* struct rw_semaphore */
#include <linux/rwsem.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_mutex_wait_start,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
@@ -18,7 +32,6 @@ DECLARE_HOOK(android_vh_mutex_wait_finish,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
struct rt_mutex;
DECLARE_HOOK(android_vh_rtmutex_wait_start,
TP_PROTO(struct rt_mutex *lock),
TP_ARGS(lock));
@@ -26,7 +39,6 @@ DECLARE_HOOK(android_vh_rtmutex_wait_finish,
TP_PROTO(struct rt_mutex *lock),
TP_ARGS(lock));
struct rw_semaphore;
DECLARE_HOOK(android_vh_rwsem_read_wait_start,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
@@ -40,7 +52,6 @@ DECLARE_HOOK(android_vh_rwsem_write_wait_finish,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));
struct task_struct;
DECLARE_HOOK(android_vh_sched_show_task,
TP_PROTO(struct task_struct *task),
TP_ARGS(task));

View File

@@ -10,7 +10,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_die_kernel_fault,
TP_PROTO(struct pt_regs *regs, unsigned int esr, unsigned long addr, const char *msg),
TP_ARGS(regs, esr, addr, msg), 1);

View File

@@ -7,7 +7,12 @@
#define _TRACE_HOOK_FIPS140_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct crypto_aes_ctx;
#else
/* struct crypto_aes_ctx */
#include <crypto/aes.h>
#endif /* __GENKSYMS__ */
/*
* These hooks exist only for the benefit of the FIPS140 crypto module, which

View File

@@ -9,7 +9,12 @@
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_is_fpsimd_save,
TP_PROTO(struct task_struct *prev, struct task_struct *next),

View File

@@ -9,8 +9,15 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct irq_data;
#ifdef __GENKSYMS__
struct cpumask;
struct irq_data;
#else
/* struct cpumask */
#include <linux/cpumask.h>
/* struct irq_data */
#include <linux/irq.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_gic_v3_affinity_init,
TP_PROTO(int irq, u32 offset, u64 *affinity),
TP_ARGS(irq, offset, affinity));

View File

@@ -7,7 +7,12 @@
#define _TRACE_HOOK_GUP_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct page;
#else
/* struct page */
#include <linux/mm_types.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_try_grab_compound_head,
TP_PROTO(struct page *page, int refs, unsigned int flags, bool *ret),

View File

@@ -9,8 +9,13 @@
#include <trace/hooks/vendor_hooks.h>
struct printk_ringbuffer;
#ifdef __GENKSYMS__
struct printk_record;
struct printk_ringbuffer;
#else
/* struct printk_record, struct printk_ringbuffer */
#include <../kernel/printk/printk_ringbuffer.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_logbuf,
TP_PROTO(struct printk_ringbuffer *rb, struct printk_record *r),

View File

@@ -13,6 +13,25 @@
#include <linux/oom.h>
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct cma;
struct acr_info;
struct compact_control;
struct slabinfo;
struct cgroup_subsys_state;
struct mem_cgroup;
#else
/* struct compact_control */
#include <../mm/internal.h>
/* struct slabinfo */
#include <../mm/slab.h>
/* struct cgroup_subsys_state */
#include <linux/cgroup-defs.h>
/* struct acr_info */
#include <linux/gfp.h>
/* struct mem_cgroup */
#include <linux/memcontrol.h>
#endif /* __GENKSYMS__ */
struct cma;
DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
@@ -31,6 +50,19 @@ DECLARE_HOOK(android_vh_cma_alloc_finish,
TP_PROTO(struct cma *cma, struct page *page, unsigned long count,
unsigned int align, gfp_t gfp_mask, s64 ts),
TP_ARGS(cma, page, count, align, gfp_mask, ts));
DECLARE_HOOK(android_vh_cma_alloc_busy_info,
TP_PROTO(struct acr_info *info),
TP_ARGS(info));
DECLARE_HOOK(android_vh_calc_alloc_flags,
TP_PROTO(unsigned int pflags, gfp_t gfp_mask, unsigned int *alloc_flags,
bool *bypass),
TP_ARGS(pflags, gfp_mask, alloc_flags, bypass));
DECLARE_HOOK(android_vh_mm_compaction_begin,
TP_PROTO(struct compact_control *cc, long *vendor_ret),
TP_ARGS(cc, vendor_ret));
DECLARE_HOOK(android_vh_mm_compaction_end,
TP_PROTO(struct compact_control *cc, long vendor_ret),
TP_ARGS(cc, vendor_ret));
DECLARE_HOOK(android_vh_rmqueue,
TP_PROTO(struct zone *preferred_zone, struct zone *zone,
unsigned int order, gfp_t gfp_flags,
@@ -70,10 +102,12 @@ DECLARE_HOOK(android_vh_show_mem,
DECLARE_HOOK(android_vh_alloc_pages_slowpath,
TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long delta),
TP_ARGS(gfp_mask, order, delta));
DECLARE_HOOK(android_vh_cma_alloc_adjust,
TP_PROTO(struct zone *zone, bool *is_cma_alloc),
TP_ARGS(zone, is_cma_alloc));
DECLARE_HOOK(android_vh_print_slabinfo_header,
TP_PROTO(struct seq_file *m),
TP_ARGS(m));
struct slabinfo;
DECLARE_HOOK(android_vh_cache_show,
TP_PROTO(struct seq_file *m, struct slabinfo *sinfo, struct kmem_cache *s),
TP_ARGS(m, sinfo, s));
@@ -96,7 +130,6 @@ DECLARE_HOOK(android_vh_show_stack_hash,
DECLARE_HOOK(android_vh_save_track_hash,
TP_PROTO(bool alloc, unsigned long p),
TP_ARGS(alloc, p));
struct mem_cgroup;
DECLARE_HOOK(android_vh_vmpressure,
TP_PROTO(struct mem_cgroup *memcg, bool *bypass),
TP_ARGS(memcg, bypass));
@@ -109,7 +142,6 @@ DECLARE_HOOK(android_vh_mem_cgroup_free,
DECLARE_HOOK(android_vh_mem_cgroup_id_remove,
TP_PROTO(struct mem_cgroup *memcg),
TP_ARGS(memcg));
struct cgroup_subsys_state;
DECLARE_HOOK(android_vh_mem_cgroup_css_online,
TP_PROTO(struct cgroup_subsys_state *css, struct mem_cgroup *memcg),
TP_ARGS(css, memcg));

View File

@@ -9,9 +9,18 @@
#include <trace/hooks/vendor_hooks.h>
struct mmc_host;
struct mmc_card;
#ifdef __GENKSYMS__
struct sdhci_host;
struct mmc_card;
struct mmc_host;
#else
/* struct sdhci_host */
#include <../drivers/mmc/host/sdhci.h>
/* struct mmc_card */
#include <linux/mmc/card.h>
/* struct mmc_host */
#include <linux/mmc/host.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_mmc_blk_reset,
TP_PROTO(struct mmc_host *host, int err, bool *allow),

View File

@@ -10,7 +10,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct module;
#else
/* struct module */
#include <linux/module.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_set_module_permit_before_init,
TP_PROTO(const struct module *mod),
TP_ARGS(mod));

View File

@@ -10,7 +10,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_mpam_set,
TP_PROTO(struct task_struct *prev, struct task_struct *next),
TP_ARGS(prev, next));

View File

@@ -8,17 +8,30 @@
#define _TRACE_HOOK_NET_VH_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct packet_type;
struct list_head;
struct sk_buff;
struct list_head;
struct nf_conn;
struct sock;
#else
/* struct packet_type */
#include <linux/netdevice.h>
/* struct sk_buff */
#include <linux/skbuff.h>
/* struct list_head */
#include <linux/types.h>
/* struct nf_conn */
#include <net/netfilter/nf_conntrack.h>
/* struct sock */
#include <net/sock.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_ptype_head,
TP_PROTO(const struct packet_type *pt, struct list_head *vendor_pt),
TP_ARGS(pt, vendor_pt));
DECLARE_HOOK(android_vh_kfree_skb,
TP_PROTO(struct sk_buff *skb), TP_ARGS(skb));
struct nf_conn;
struct sock;
DECLARE_RESTRICTED_HOOK(android_rvh_nf_conn_alloc,
TP_PROTO(struct nf_conn *nf_conn), TP_ARGS(nf_conn), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_nf_conn_free,

View File

@@ -10,7 +10,12 @@
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct generic_pm_domain;
#else
/* struct generic_pm_domain */
#include <linux/pm_domain.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_allow_domain_state,
TP_PROTO(struct generic_pm_domain *genpd, uint32_t idx, bool *allow),
TP_ARGS(genpd, idx, allow))

View File

@@ -10,7 +10,17 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
enum freq_qos_req_type;
struct freq_constraints;
struct freq_qos_request;
struct task_struct;
#else
/* enum freq_qos_req_type, struct freq_constraints, struct freq_qos_request */
#include <linux/pm_qos.h>
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_try_to_freeze_todo,
TP_PROTO(unsigned int todo, unsigned int elapsed_msecs, bool wq_busy),
TP_ARGS(todo, elapsed_msecs, wq_busy));
@@ -19,9 +29,6 @@ DECLARE_HOOK(android_vh_try_to_freeze_todo_unfrozen,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
enum freq_qos_req_type;
struct freq_qos_request;
struct freq_constraints;
DECLARE_HOOK(android_vh_freq_qos_add_request,
TP_PROTO(struct freq_constraints *qos, struct freq_qos_request *req,

View File

@@ -11,8 +11,13 @@
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
struct psi_trigger;
#ifdef __GENKSYMS__
struct psi_group;
struct psi_trigger;
#else
/* struct psi_group, struct psi_trigger */
#include <linux/psi_types.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_psi_event,
TP_PROTO(struct psi_trigger *t),
TP_ARGS(t));

View File

@@ -9,7 +9,12 @@
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct rproc;
#else
/* struct rproc */
#include <linux/remoteproc.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_rproc_recovery,
TP_PROTO(struct rproc *rproc),

View File

@@ -9,8 +9,13 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct rw_semaphore;
struct rwsem_waiter;
#else
/* struct rw_semaphore, struct rwsem_waiter */
#include <linux/rwsem.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_rwsem_init,
TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem));

View File

@@ -9,7 +9,29 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct cgroup_taskset;
struct cgroup_subsys_state;
struct cpufreq_policy;
struct em_perf_domain;
enum uclamp_id;
struct sched_entity;
struct task_struct;
struct uclamp_se;
#else
/* Including ../kernel/cgroup/cgroup-internal.h breaks builds. */
struct cgroup_taskset;
/* struct cgroup_subsys_state */
#include <linux/cgroup-defs.h>
/* struct cpufreq_policy */
#include <linux/cpufreq.h>
/* struct em_perf_domain */
#include <linux/energy_model.h>
/* enum uclamp_id, struct sched_entity, struct task_struct, struct uclamp_se */
#include <linux/sched.h>
/* Only defined with CONFIG_UCLAMP_TASK, so declare unconditionally. */
struct uclamp_se;
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_fair,
TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
@@ -181,7 +203,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
TP_PROTO(struct task_struct *curr, int cpu, s64 delta),
TP_ARGS(curr, cpu, delta), 1);
struct sched_entity;
DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 vruntime),
TP_ARGS(cfs_rq, se, initial, vruntime), 1);
@@ -198,7 +219,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
TP_ARGS(p, rq, need_update), 1);
struct cgroup_taskset;
DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_attach,
TP_PROTO(struct cgroup_taskset *tset),
TP_ARGS(tset), 1);
@@ -207,7 +227,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_can_attach,
TP_PROTO(struct cgroup_taskset *tset, int *retval),
TP_ARGS(tset, retval), 1);
struct cgroup_subsys_state;
DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_online,
TP_PROTO(struct cgroup_subsys_state *css),
TP_ARGS(css), 1);
@@ -228,7 +247,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_sched_exec,
TP_PROTO(bool *cond),
TP_ARGS(cond), 1);
struct cpufreq_policy;
DECLARE_HOOK(android_vh_map_util_freq,
TP_PROTO(unsigned long util, unsigned long freq,
unsigned long cap, unsigned long *next_freq, struct cpufreq_policy *policy,
@@ -244,7 +262,6 @@ DECLARE_HOOK(android_vh_sugov_get_util,
TP_PROTO(unsigned int cpu, unsigned long *ret),
TP_ARGS(cpu, ret));
struct em_perf_domain;
DECLARE_HOOK(android_vh_em_cpu_energy,
TP_PROTO(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
@@ -280,8 +297,6 @@ DECLARE_HOOK(android_vh_set_wake_flags,
TP_PROTO(int *wake_flags, unsigned int *mode),
TP_ARGS(wake_flags, mode));
enum uclamp_id;
struct uclamp_se;
DECLARE_RESTRICTED_HOOK(android_rvh_uclamp_eff_get,
TP_PROTO(struct task_struct *p, enum uclamp_id clamp_id,
struct uclamp_se *uclamp_max, struct uclamp_se *uclamp_eff, int *ret),
@@ -337,6 +352,11 @@ DECLARE_HOOK(android_vh_sched_setaffinity_early,
TP_PROTO(struct task_struct *p, const struct cpumask *new_mask, int *retval),
TP_ARGS(p, new_mask, retval));
DECLARE_RESTRICTED_HOOK(android_rvh_cpumask_any_and_distribute,
TP_PROTO(struct task_struct *p, const struct cpumask *cpu_valid_mask ,
const struct cpumask *new_mask, int *dest_cpu),
TP_ARGS(p, cpu_valid_mask, new_mask, dest_cpu), 1);
DECLARE_HOOK(android_vh_free_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
@@ -350,7 +370,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_after_dequeue_task,
TP_ARGS(rq, p), 1);
struct cfs_rq;
struct sched_entity;
struct rq_flags;
DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_entity,
TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),
@@ -428,6 +447,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_rt_rq_load_avg,
TP_PROTO(u64 now, struct rq *rq, struct task_struct *tsk, int running),
TP_ARGS(now, rq, tsk, running), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_rq_clock_pelt,
TP_PROTO(struct rq *rq, s64 delta, bool *ret),
TP_ARGS(rq, delta, ret), 1);
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */

View File

@@ -7,7 +7,12 @@
#define _TRACE_HOOK_SHMEM_FS_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct page;
#else
/* struct page */
#include <linux/mm_types.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_shmem_alloc_page,
TP_PROTO(struct page **page),
TP_ARGS(page));

View File

@@ -7,7 +7,12 @@
#define _TRACE_HOOK_SIGNAL_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_do_send_sig_info,
TP_PROTO(int sig, struct task_struct *killer, struct task_struct *dst),
TP_ARGS(sig, killer, dst));

View File

@@ -10,7 +10,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_watchdog_timer_softlockup,
TP_PROTO(int duration, struct pt_regs *regs, bool is_panic),
TP_ARGS(duration, regs, is_panic));

View File

@@ -7,7 +7,12 @@
#define _TRACE_HOOK_SYS_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct task_struct;
#else
/* struct task_struct */
#include <linux/sched.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_syscall_prctl_finished,
TP_PROTO(int option, struct task_struct *task),
TP_ARGS(option, task));

View File

@@ -10,8 +10,15 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct file;
union bpf_attr;
#else
/* struct file */
#include <linux/fs.h>
/* union bpf_attr */
#include <uapi/linux/bpf.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_check_mmap_file,
TP_PROTO(const struct file *file, unsigned long prot,
unsigned long flag, unsigned long ret),

View File

@@ -10,11 +10,16 @@
#include <trace/hooks/vendor_hooks.h>
#include <linux/cpufreq.h>
#ifdef __GENKSYMS__
struct thermal_zone_device;
#else
/* struct thermal_zone_device */
#include <linux/thermal.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_enable_thermal_genl_check,
TP_PROTO(int event, int tz_id, int *enable_thermal_genl),
TP_ARGS(event, tz_id, enable_thermal_genl));
struct thermal_zone_device;
DECLARE_HOOK(android_vh_thermal_pm_notify_suspend,
TP_PROTO(struct thermal_zone_device *tz, int *irq_wakeable),
TP_ARGS(tz, irq_wakeable));

View File

@@ -10,7 +10,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct pt_regs;
#else
/* struct pt_regs */
#include <asm/ptrace.h>
#endif /* __GENKSYMS__ */
DECLARE_RESTRICTED_HOOK(android_rvh_do_undefinstr,
TP_PROTO(struct pt_regs *regs, bool user),
TP_ARGS(regs, user),

View File

@@ -10,8 +10,13 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct tcpci;
#ifdef __GENKSYMS__
struct tcpci_data;
#else
/* struct tcpci_data */
#include <../drivers/usb/typec/tcpm/tcpci.h>
#endif /* __GENKSYMS__ */
struct tcpci;
struct tcpm_port;
#ifndef TYPEC_TIMER

View File

@@ -9,9 +9,20 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct ufs_hba;
struct request;
struct ufshcd_lrb;
struct uic_command;
struct request;
struct scsi_device;
#else
/* struct ufs_hba, struct ufshcd_lrb, struct uic_command */
#include <../drivers/scsi/ufs/ufshcd.h>
/* struct request */
#include <linux/blkdev.h>
/* struct scsi_device */
#include <scsi/scsi_device.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_ufs_fill_prdt,
TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
@@ -43,7 +54,6 @@ DECLARE_HOOK(android_vh_ufs_compl_command,
TP_PROTO(struct ufs_hba *hba, struct ufshcd_lrb *lrbp),
TP_ARGS(hba, lrbp));
struct uic_command;
DECLARE_HOOK(android_vh_ufs_send_uic_command,
TP_PROTO(struct ufs_hba *hba, struct uic_command *ucmd,
const char *str),
@@ -57,7 +67,6 @@ DECLARE_HOOK(android_vh_ufs_check_int_errors,
TP_PROTO(struct ufs_hba *hba, bool queue_eh_work),
TP_ARGS(hba, queue_eh_work));
struct scsi_device;
DECLARE_HOOK(android_vh_ufs_update_sdev,
TP_PROTO(struct scsi_device *sdev),
TP_ARGS(sdev));

View File

@@ -7,7 +7,12 @@
#define _TRACE_HOOK_USER_H
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct user_struct;
#else
/* struct user_struct */
#include <linux/sched/user.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_alloc_uid,
TP_PROTO(struct user_struct *user),
TP_ARGS(user));

View File

@@ -9,12 +9,26 @@
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct v4l2_subdev;
struct v4l2_subdev_pad_config;
struct v4l2_subdev_format;
struct v4l2_subdev_frame_interval;
struct v4l2_subdev_selection;
struct v4l2_fmtdesc;
struct v4l2_format;
#else
/* struct v4l2_subdev, struct v4l2_subdev_pad_config */
#include <media/v4l2-subdev.h>
/* struct v4l2_subdev_format, struct v4l2_subdev_frame_interval, struct v4l2_subdev_selection */
#include <uapi/linux/v4l2-subdev.h>
/* struct v4l2_fmtdesc, struct v4l2_format */
#include <uapi/linux/videodev2.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_clear_reserved_fmt_fields,
TP_PROTO(struct v4l2_format *fmt, int *ret),
TP_ARGS(fmt, ret));
struct v4l2_fmtdesc;
DECLARE_HOOK(android_vh_fill_ext_fmtdesc,
TP_PROTO(struct v4l2_fmtdesc *fmtd, const char **descr),
TP_ARGS(fmtd, descr));
@@ -23,21 +37,16 @@ DECLARE_HOOK(android_vh_clear_mask_adjust,
TP_PROTO(unsigned int ctrl, int *n),
TP_ARGS(ctrl, n));
struct v4l2_subdev;
struct v4l2_subdev_pad_config;
struct v4l2_subdev_selection;
DECLARE_HOOK(android_vh_v4l2subdev_set_selection,
TP_PROTO(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *pad,
struct v4l2_subdev_selection *sel, int *ret),
TP_ARGS(sd, pad, sel, ret));
struct v4l2_subdev_format;
DECLARE_HOOK(android_vh_v4l2subdev_set_fmt,
TP_PROTO(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *pad,
struct v4l2_subdev_format *format, int *ret),
TP_ARGS(sd, pad, format, ret));
struct v4l2_subdev_frame_interval;
DECLARE_HOOK(android_vh_v4l2subdev_set_frame_interval,
TP_PROTO(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *fi,
int *ret),

View File

@@ -9,8 +9,15 @@
#include <trace/hooks/vendor_hooks.h>
#ifdef __GENKSYMS__
struct media_link;
struct media_link_desc;
#else
/* struct media_link */
#include <media/media-entity.h>
/* struct media_link_desc */
#include <uapi/linux/media.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_media_device_setup_link,
TP_PROTO(struct media_link *link, struct media_link_desc *linkd, int *ret),
TP_ARGS(link, linkd, ret));

View File

@@ -10,7 +10,12 @@
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
#ifdef __GENKSYMS__
struct worker;
#else
/* struct worker */
#include <../kernel/workqueue_internal.h>
#endif /* __GENKSYMS__ */
DECLARE_HOOK(android_vh_create_worker,
TP_PROTO(struct worker *worker, struct workqueue_attrs *attrs),
TP_ARGS(worker, attrs));

View File

@@ -213,6 +213,10 @@ struct task_struct init_task
#ifdef CONFIG_SECCOMP_FILTER
.seccomp = { .filter_count = ATOMIC_INIT(0) },
#endif
#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
.android_vendor_data1 = {0, },
.android_oem_data1 = {0, },
#endif
};
EXPORT_SYMBOL(init_task);

View File

@@ -84,7 +84,7 @@ out:
return ret;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
@@ -136,10 +136,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
ret = dma_assign_coherent_memory(dev, mem);
if (ret)
dma_release_coherent_memory(mem);
_dma_release_coherent_memory(mem);
return ret;
}
void dma_release_coherent_memory(struct device *dev)
{
if (dev)
_dma_release_coherent_memory(dev->dma_mem);
}
static void *__dma_alloc_from_coherent(struct device *dev,
struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)

View File

@@ -984,6 +984,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_MEMCG
tsk->active_memcg = NULL;
#endif
#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
memset(&tsk->android_vendor_data1, 0, sizeof(tsk->android_vendor_data1));
memset(&tsk->android_oem_data1, 0, sizeof(tsk->android_oem_data1));
#endif
trace_android_vh_dup_task_struct(tsk, orig);
return tsk;

View File

@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/sched/debug.h>
#include <linux/errno.h>
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
@@ -163,7 +164,7 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
__set_current_state(TASK_RUNNING);
}
bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
bool __sched __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
{
if (__percpu_down_read_trylock(sem))
return true;
@@ -212,7 +213,7 @@ static bool readers_active_check(struct percpu_rw_semaphore *sem)
return true;
}
void percpu_down_write(struct percpu_rw_semaphore *sem)
void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);

View File

@@ -1132,7 +1132,7 @@ static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
/*
* Wait until we successfully acquire the write lock
*/
static struct rw_semaphore *
static struct rw_semaphore __sched *
rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
{
long count;

View File

@@ -267,6 +267,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
* to sched_rt_avg_update. But I don't trust it...
*/
s64 __maybe_unused steal = 0, irq_delta = 0;
bool ret = false;
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
@@ -311,7 +312,9 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
update_irq_load_avg(rq, irq_delta + steal);
#endif
update_rq_clock_pelt(rq, delta);
trace_android_rvh_update_rq_clock_pelt(rq, delta, &ret);
if (!ret)
update_rq_clock_pelt(rq, delta);
}
void update_rq_clock(struct rq *rq)
@@ -1982,7 +1985,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
{
const struct cpumask *cpu_valid_mask = cpu_active_mask;
const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
unsigned int dest_cpu;
unsigned int dest_cpu = nr_cpu_ids;
int ret = 0;
update_rq_clock(rq);
@@ -2014,10 +2017,14 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
* for groups of tasks (ie. cpuset), so that load balancing is not
* immediately required to distribute the tasks within their new mask.
*/
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
trace_android_rvh_cpumask_any_and_distribute(p, cpu_valid_mask, new_mask, &dest_cpu);
if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
}
}
do_set_cpus_allowed(p, new_mask);

View File

@@ -343,6 +343,7 @@ bool cpupri_check_rt(void)
{
int cpu = raw_smp_processor_id();
return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
return (cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL) &&
(cpu_rq(cpu)->rt.rt_throttled == 0);
}
#endif

View File

@@ -3765,6 +3765,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;

View File

@@ -532,6 +532,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
trace_android_vh_cma_alloc_busy_info(&info);
trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
count, align);

View File

@@ -44,6 +44,8 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
@@ -2228,6 +2230,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
unsigned long last_migrated_pfn;
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
long vendor_ret;
/*
* These counters track activities during zone compaction. Initialize
@@ -2299,6 +2302,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync);
trace_android_vh_mm_compaction_begin(cc, &vendor_ret);
/* lru_add_drain_all could be expensive with involving other CPUs */
lru_add_drain();
@@ -2426,6 +2430,7 @@ out:
count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
trace_android_vh_mm_compaction_end(cc, vendor_ret);
trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync, ret);

View File

@@ -513,8 +513,12 @@ unsigned long __get_pfnblock_flags_mask(struct page *page,
bitidx = pfn_to_bitidx(page, pfn);
word_bitidx = bitidx / BITS_PER_LONG;
bitidx &= (BITS_PER_LONG-1);
word = bitmap[word_bitidx];
/*
* This races, without locks, with set_pfnblock_flags_mask(). Ensure
* a consistent read of the memory array, so that results, even though
* racy, are not corrupted.
*/
word = READ_ONCE(bitmap[word_bitidx]);
return (word >> bitidx) & mask;
}
@@ -3057,12 +3061,16 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page;
struct page *page = NULL;
if (is_migrate_cma(migratetype))
page = __rmqueue_cma(zone, order, migratetype,
if (is_migrate_cma(migratetype)) {
bool is_cma_alloc = true;
trace_android_vh_cma_alloc_adjust(zone, &is_cma_alloc);
if (is_cma_alloc)
page = __rmqueue_cma(zone, order, migratetype,
alloc_flags);
else
} else
page = __rmqueue(zone, order, migratetype, alloc_flags);
if (unlikely(page == NULL))
@@ -4104,6 +4112,11 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
{
#ifdef CONFIG_CMA
unsigned int pflags = current->flags;
bool bypass = false;
trace_android_vh_calc_alloc_flags(pflags, gfp_mask, &alloc_flags, &bypass);
if (bypass)
return alloc_flags;
if (!(pflags & PF_MEMALLOC_NOCMA) &&
gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE &&

View File

@@ -448,6 +448,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
struct page *page;
struct sk_buff *trailer;
int tailen = esp->tailen;
unsigned int allocsz;
/* this is non-NULL only with TCP/UDP Encapsulation */
if (x->encap) {

View File

@@ -483,6 +483,11 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
struct page *page;
struct sk_buff *trailer;
int tailen = esp->tailen;
unsigned int allocsz;
allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
if (allocsz > ESP_SKB_FRAG_MAXSIZE)
goto cow;
if (x->encap) {
int err = esp6_output_encap(x, skb, esp);

View File

@@ -3470,10 +3470,6 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
copy_len = max_len;
msg->msg_namelen = copy_len;
}
if (WARN_ON_ONCE(copy_len > max_len)) {
copy_len = max_len;
msg->msg_namelen = copy_len;
}
memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
}

View File

@@ -196,6 +196,7 @@ static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
*/
static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
{
bool small_ipv6 = (skb->protocol == htons(ETH_P_IPV6)) && (skb->len <= IPV6_MIN_MTU);
struct dst_entry *dst = skb_dst(skb);
struct iphdr *top_iph;
int flags;
@@ -226,7 +227,7 @@ static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
if (flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) || small_ipv6 ?
0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));

View File

@@ -1871,9 +1871,11 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
if (avail >= runtime->twake)
break;
snd_pcm_stream_unlock_irq(substream);
mutex_unlock(&runtime->buffer_mutex);
tout = schedule_timeout(wait_time);
mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
set_current_state(TASK_INTERRUPTIBLE);
switch (runtime->status->state) {
@@ -2167,6 +2169,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
nonblock = !!(substream->f_flags & O_NONBLOCK);
mutex_lock(&runtime->buffer_mutex);
snd_pcm_stream_lock_irq(substream);
err = pcm_accessible_state(runtime);
if (err < 0)
@@ -2259,6 +2262,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (xfer > 0 && err >= 0)
snd_pcm_update_state(substream, runtime);
snd_pcm_stream_unlock_irq(substream);
mutex_unlock(&runtime->buffer_mutex);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
EXPORT_SYMBOL(__snd_pcm_lib_xfer);

View File

@@ -695,7 +695,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime;
int err, usecs;
int err = 0, usecs;
unsigned int bits;
snd_pcm_uframes_t frames;

View File

@@ -394,6 +394,35 @@ int s_rename(struct s oldpathname, struct s newpathname)
return res;
}
int s_fuse_attr(struct s pathname, struct fuse_attr *fuse_attr_out)
{
struct stat st;
int result = TEST_FAILURE;
TESTSYSCALL(s_stat(pathname, &st));
fuse_attr_out->ino = st.st_ino;
fuse_attr_out->mode = st.st_mode;
fuse_attr_out->nlink = st.st_nlink;
fuse_attr_out->uid = st.st_uid;
fuse_attr_out->gid = st.st_gid;
fuse_attr_out->rdev = st.st_rdev;
fuse_attr_out->size = st.st_size;
fuse_attr_out->blksize = st.st_blksize;
fuse_attr_out->blocks = st.st_blocks;
fuse_attr_out->atime = st.st_atime;
fuse_attr_out->mtime = st.st_mtime;
fuse_attr_out->ctime = st.st_ctime;
fuse_attr_out->atimensec = UINT32_MAX;
fuse_attr_out->mtimensec = UINT32_MAX;
fuse_attr_out->ctimensec = UINT32_MAX;
result = TEST_SUCCESS;
out:
return result;
}
struct s tracing_folder(void)
{
struct s trace = {0};

View File

@@ -1491,6 +1491,192 @@ out:
return result;
}
/*
* State:
* Original: dst/folder1/content.txt
* ^
* |
* |
* Backing: src/folder1/content.txt
*
* Step 1: open(folder1) - set backing to src/folder1
* Check 1: cat(content.txt) - check not receiving call on the fuse daemon
* and content is the same
* Step 2: readdirplus(dst)
* Check 2: cat(content.txt) - check not receiving call on the fuse daemon
* and content is the same
*/
static int bpf_test_readdirplus_not_overriding_backing(const char *mount_dir)
{
const char *folder1 = "folder1";
const char *content_file = "content.txt";
const char *content = "hello world";
int result = TEST_FAILURE;
int fuse_dev = -1;
int src_fd = -1;
int content_fd = -1;
int pid = -1;
int status;
TESTSYSCALL(s_mkdir(s_path(s(ft_src), s(folder1)), 0777));
TEST(content_fd = s_creat(s_pathn(3, s(ft_src), s(folder1), s(content_file)), 0777),
content_fd != -1);
TESTEQUAL(write(content_fd, content, strlen(content)), strlen(content));
TESTEQUAL(mount_fuse_no_init(mount_dir, -1, -1, &fuse_dev), 0);
FUSE_ACTION
DIR *open_mount_dir = NULL;
struct dirent *mount_dirent;
int dst_folder1_fd = -1;
int dst_content_fd = -1;
int dst_content_read_size = -1;
char content_buffer[12];
// Step 1: Lookup folder1
TESTERR(dst_folder1_fd = s_open(s_path(s(mount_dir), s(folder1)),
O_RDONLY | O_CLOEXEC), dst_folder1_fd != -1);
// Check 1: Read content file (backed)
TESTERR(dst_content_fd =
s_open(s_pathn(3, s(mount_dir), s(folder1), s(content_file)),
O_RDONLY | O_CLOEXEC), dst_content_fd != -1);
TEST(dst_content_read_size =
read(dst_content_fd, content_buffer, strlen(content)),
dst_content_read_size == strlen(content) &&
strcmp(content, content_buffer) == 0);
TESTSYSCALL(close(dst_content_fd));
dst_content_fd = -1;
TESTSYSCALL(close(dst_folder1_fd));
dst_folder1_fd = -1;
memset(content_buffer, 0, strlen(content));
// Step 2: readdir folder 1
TEST(open_mount_dir = s_opendir(s(mount_dir)),
open_mount_dir != NULL);
TEST(mount_dirent = readdir(open_mount_dir), mount_dirent != NULL);
TESTSYSCALL(closedir(open_mount_dir));
open_mount_dir = NULL;
// Check 2: Read content file again (must be backed)
TESTERR(dst_content_fd =
s_open(s_pathn(3, s(mount_dir), s(folder1), s(content_file)),
O_RDONLY | O_CLOEXEC), dst_content_fd != -1);
TEST(dst_content_read_size =
read(dst_content_fd, content_buffer, strlen(content)),
dst_content_read_size == strlen(content) &&
strcmp(content, content_buffer) == 0);
TESTSYSCALL(close(dst_content_fd));
dst_content_fd = -1;
FUSE_DAEMON
size_t read_size = 0;
struct fuse_in_header *in_header = (struct fuse_in_header *)bytes_in;
struct fuse_read_out *read_out = NULL;
struct fuse_attr attr = {};
int backing_fd = -1;
DECL_FUSE_IN(open);
DECL_FUSE_IN(getattr);
TESTFUSEINITFLAGS(FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO);
// Step 1: Lookup folder 1 with backing
TESTFUSELOOKUP(folder1, 0);
TESTSYSCALL(s_fuse_attr(s_path(s(ft_src), s(folder1)), &attr));
TEST(backing_fd = s_open(s_path(s(ft_src), s(folder1)),
O_DIRECTORY | O_RDONLY | O_CLOEXEC),
backing_fd != -1);
TESTFUSEOUT2(fuse_entry_out, ((struct fuse_entry_out) {
.nodeid = attr.ino,
.generation = 0,
.entry_valid = UINT64_MAX,
.attr_valid = UINT64_MAX,
.entry_valid_nsec = UINT32_MAX,
.attr_valid_nsec = UINT32_MAX,
.attr = attr,
}), fuse_entry_bpf_out, ((struct fuse_entry_bpf_out) {
.backing_action = FUSE_ACTION_REPLACE,
.backing_fd = backing_fd,
}));
TESTSYSCALL(close(backing_fd));
// Step 2: Open root dir
TESTFUSEIN(FUSE_OPENDIR, open_in);
TESTFUSEOUT1(fuse_open_out, ((struct fuse_open_out) {
.fh = 100,
.open_flags = open_in->flags
}));
// Step 2: Handle getattr
TESTFUSEIN(FUSE_GETATTR, getattr_in);
TESTSYSCALL(s_fuse_attr(s(ft_src), &attr));
TESTFUSEOUT1(fuse_attr_out, ((struct fuse_attr_out) {
.attr_valid = UINT64_MAX,
.attr_valid_nsec = UINT32_MAX,
.attr = attr
}));
// Step 2: Handle readdirplus
read_size = read(fuse_dev, bytes_in, sizeof(bytes_in));
TESTEQUAL(in_header->opcode, FUSE_READDIRPLUS);
struct fuse_direntplus *dirent_plus =
(struct fuse_direntplus *) (bytes_in + read_size);
struct fuse_dirent dirent;
struct fuse_entry_out entry_out;
read_out = (struct fuse_read_out *) (bytes_in +
sizeof(*in_header) +
sizeof(struct fuse_read_in));
TESTSYSCALL(s_fuse_attr(s_path(s(ft_src), s(folder1)), &attr));
dirent = (struct fuse_dirent) {
.ino = attr.ino,
.off = 1,
.namelen = strlen(folder1),
.type = DT_REG
};
entry_out = (struct fuse_entry_out) {
.nodeid = attr.ino,
.generation = 0,
.entry_valid = UINT64_MAX,
.attr_valid = UINT64_MAX,
.entry_valid_nsec = UINT32_MAX,
.attr_valid_nsec = UINT32_MAX,
.attr = attr
};
*dirent_plus = (struct fuse_direntplus) {
.dirent = dirent,
.entry_out = entry_out
};
strcpy((char *)(bytes_in + read_size + sizeof(*dirent_plus)), folder1);
read_size += FUSE_DIRENT_ALIGN(sizeof(*dirent_plus) + strlen(folder1) +
1);
TESTFUSEDIROUTREAD(read_out,
bytes_in +
sizeof(struct fuse_in_header) +
sizeof(struct fuse_read_in) +
sizeof(struct fuse_read_out),
read_size - sizeof(struct fuse_in_header) -
sizeof(struct fuse_read_in) -
sizeof(struct fuse_read_out));
FUSE_DONE
result = TEST_SUCCESS;
out:
close(fuse_dev);
close(content_fd);
close(src_fd);
umount(mount_dir);
return result;
}
static int parse_options(int argc, char *const *argv)
{
@@ -1596,6 +1782,7 @@ int main(int argc, char *argv[])
MAKE_TEST(inotify_test),
MAKE_TEST(bpf_test_statfs),
MAKE_TEST(bpf_test_lseek),
MAKE_TEST(bpf_test_readdirplus_not_overriding_backing)
};
#undef MAKE_TEST

View File

@@ -15,6 +15,8 @@
#include <sys/statfs.h>
#include <sys/types.h>
#include <include/uapi/linux/fuse.h>
#define PAGE_SIZE 4096
#define FUSE_POSTFILTER 0x20000
@@ -52,6 +54,7 @@ int s_creat(struct s pathname, mode_t mode);
int s_mkfifo(struct s pathname, mode_t mode);
int s_stat(struct s pathname, struct stat *st);
int s_statfs(struct s pathname, struct statfs *st);
int s_fuse_attr(struct s pathname, struct fuse_attr *fuse_attr_out);
DIR *s_opendir(struct s pathname);
int s_getxattr(struct s pathname, const char name[], void *value, size_t size,
ssize_t *ret_size);
@@ -261,7 +264,7 @@ int delete_dir_tree(const char *dir_path, bool remove_root);
((struct fuse_out_header *)bytes_out)->len); \
} while (false)
#define TESTFUSEINIT() \
#define TESTFUSEINITFLAGS(fuse_connection_flags) \
do { \
DECL_FUSE_IN(init); \
\
@@ -272,7 +275,7 @@ int delete_dir_tree(const char *dir_path, bool remove_root);
.major = FUSE_KERNEL_VERSION, \
.minor = FUSE_KERNEL_MINOR_VERSION, \
.max_readahead = 4096, \
.flags = 0, \
.flags = fuse_connection_flags, \
.max_background = 0, \
.congestion_threshold = 0, \
.max_write = 4096, \
@@ -282,6 +285,9 @@ int delete_dir_tree(const char *dir_path, bool remove_root);
})); \
} while (false)
#define TESTFUSEINIT() \
TESTFUSEINITFLAGS(0)
#define DECL_FUSE_IN(name) \
struct fuse_##name##_in *name##_in = \
(struct fuse_##name##_in *) \