Merge branch 'android14-5.15' into branch 'android14-5.15-lts'

This syncs the android14-5.15-lts branch up with all of the recent
changes that have gone into the 'android14-5.15' branch.

Included in here are the following commits:

* edbf1821e9 ANDROID: GKI: Update MTKOTT symbol list
* 9aa89cf605 UPSTREAM: libceph: harden msgr2.1 frame segment length checks
* 25f13a246d UPSTREAM: netfilter: ipset: Add schedule point in call_ad().
* 813d5c850b UPSTREAM: net: xfrm: Fix xfrm_address_filter OOB read
* 04a0c46bb3 UPSTREAM: igb: set max size RX buffer when store bad packet is enabled
* 7d13770cb8 FROMGIT: f2fs: do not return EFSCORRUPTED, but try to run online repair
* 3525a7292b ANDROID: sched: Add vendor hook for util_fits_cpu
* 3191bf9249 ANDROID: KVM: Update nVHE stack size to 8KB
* d217ccf7c8 ANDROID: Add CONFIG_BLK_DEV_NULL_BLK=m to gki_defconfig
* 845ae208d5 ANDROID: GKI: Update symbol list for Amlogic
* 17cbc8f6c2 BACKPORT: take care to handle NULL ->proc_lseek()
* 01bff4820f ANDROID: KVM: arm64: Allow setting device attr in stage-2 PTEs
* 077735bc14 ANDROID: KVM: arm64: Fix hyp tracing build dependencies
* 673d30e3f5 ANDROID: GKI: Update symbol list for Amlogic
* ed5719778f ANDROID: vendor_hooks: add vendor hook in __alloc_pages()
* fb8d8135c3 UPSTREAM: netfilter: xt_sctp: validate the flag_info count
* 4b27296bd4 ANDROID: Add kunit targets.
* a3e337eb7d ANDROID: Enable CONFIG_KUNIT=y.
* 734865de30 FROMGIT: mm/madvise: fix madvise_pageout for private file mappings
* 4e664ccbea UPSTREAM: netfilter: xt_u32: validate user space input
* a114e5dca4 UPSTREAM: netfilter: nfnetlink_osf: avoid OOB read
* fae3eccae9 UPSTREAM: net/sched: Retire rsvp classifier
* 002ad09f12 UPSTREAM: ipv4: fix null-deref in ipv4_link_failure
* bfeb57ae95 FROMGIT: scsi: ufs: Set the CP flag for RT requests
* 67d3336282 FROMGIT: scsi: ufs: Simplify ufshcd_comp_scsi_upiu()
* 0b5cd2a4a4 ANDROID: scsi: Limit unaligned zoned write retries
* f1b91d8b41 ANDROID: block: Preserve the order of requeued zoned writes
* d7f09c11e7 ANDROID: gki_defconfig: Enable CONFIG_BLK_CGROUP_IOPRIO
* e2dadb5022 ANDROID: GKI: Update symbol list for Amlogic
* 5b8c9a002d ANDROID: vendor_hooks: Add hooks to avoid key threads stalled in memory allocations
* a1ebbe9d5e ANDROID: KVM: arm64: Add missing hyp events for forwarded SMCs
* 86fb1cdb30 ANDROID: GKI: Update symbol list for lenovo
* 2c0ad668ff ANDROID: KVM: arm64: Store hyp address in the host fp state array
* 813b98b113 ANDROID: KVM: arm64: Allocate host fp/simd state later in initialization
* f44a014f33 ANDROID: GKI: Update symbol list for Amlogic
* 0973d792be UPSTREAM: netfilter: nf_tables: disallow rule removal from chain binding
* 46a1c28f37 ANDROID: mm: Use intended order-adjusted batch size
* 6e161d9045 ANDROID: mm: cma: proper ret type for tasks interrupted by fatal signal
* cbd1dda137 ANDROID: GKI: Update RTK STB KMI symbol list
* 79ef0ab3a2 ANDROID: Update the ABI symbol list
* d15ca4faa6 ANDROID: GKI: Update RTK STB KMI symbol list
* 63fc189127 BACKPORT: usb: typec: bus: verify partner exists in typec_altmode_attention
* d61f670260 ANDROID: mm/memory_hotplug: Fix error path handling
* 3207c9ecb1 BACKPORT: mm: page_alloc: fix CMA and HIGHATOMIC landing on the wrong buddy list
* b0572dcd78 UPSTREAM: ARM: ptrace: Restore syscall skipping for tracers
* 277d398991 UPSTREAM: ARM: ptrace: Restore syscall restart tracing
* dfac06d29d FROMGIT: f2fs: preload extent_cache for POSIX_FADV_WILLNEED
* a430d09818 UPSTREAM: bpf, sockmap: fix deadlocks in the sockhash and sockmap
* eb21f15a1a ANDROID: GKI: Update symbol list for Amlogic
* 130bf74489 UPSTREAM: net: sched: sch_qfq: Fix UAF in qfq_dequeue()
* ba5f5fb147 UPSTREAM: net/sched: sch_hfsc: Ensure inner classes have fsc curve
* 360c724a76 ANDROID: GKI: Update symbol list for Amlogic
* d3c35bf422 ANDROID: vendor_hooks: add vendor hook in xhci_urb_suitable_for_idt()
* a29acad961 ANDROID: uid_sys_stat: instead update_io_stats_uid_locked to update_io_stats_uid
* a1931ea630 ANDROID: uid_sys_stat: split the global lock uid_lock to the fine-grained locks for each hlist in hash_table.
* 19b5b13d8f ANDROID: Flush deferred probe list before dropping host priv
* 93ef439161 ANDROID: KVM: arm64: Don't force pte mappings in [n]VHE guest stage-2
* f43b021e14 FROMGIT: f2fs: preload extent_cache for POSIX_FADV_WILLNEED
* a0622550a9 ANDROID: tools/resolve_btfids: Pass CFLAGS to libsubcmd build via EXTRA_CFLAGS
* 4aee33cbf4 ANDROID: libsubcmd: Hoist iterator variable declarations in parse_options_subcommand()
* cc1046e3c7 ANDROID: block: Revert "Send requeued requests to the I/O scheduler"
* c7b7058fbf ANDROID: block: Revert "Preserve the order of requeued requests"
* 1988ebab8f ANDROID: block: Restore request_queue.requeue_work
* cfe32cb3c3 BACKPORT: FROMGIT: scsi: ufs: Include the SCSI ID in UFS command tracing output
* 33d2a21b81 UPSTREAM: ARM: 9269/1: vfp: Add hwcap for FEAT_DotProd
* 49c6c1e40f UPSTREAM: ARM: 9268/1: vfp: Add hwcap FPHP and ASIMDHP for FEAT_FP16
* 52e28a12a9 UPSTREAM: ARM: 9267/1: Define Armv8 registers in AArch32 state
* a6f12f29b8 ANDROID: mm: fix freeing of MIGRATE_ISOLATE page
* 5da77083fc ANDROID: GKI: Update symbol list for Amlogic
* e5e093b964 UPSTREAM: tcpm: Avoid soft reset when partner does not support get_status
* f0fb694baa UPSTREAM: netfilter: nf_tables: prevent OOB access in nft_byteorder_eval
* e7c8c7106f UPSTREAM: tty: n_gsm: fix the UAF caused by race condition in gsm_cleanup_mux
* 19852c1305 ANDROID: GKI: Update RTK STB KMI symbol list
* 84d3e59750 UPSTREAM: af_unix: Fix null-ptr-deref in unix_stream_sendpage().
* 40d7d94451 ANDROID: Add initial symbol list for Tuxera
* 37f6973166 ANDROID: GKI: update mtktv symbol
* 42e5080692 ANDROID: GKI: Update symbol list for Amlogic
* 20fb3d0214 ANDROID: vendor_hooks: add vendor hook in cma_alloc()
* 5fae54013c BACKPORT: mm/filemap.c: fix update prev_pos after one read request done
* e9e2caeade UPSTREAM: usb: typec: tcpm: set initial svdm version based on pd revision
* b2cc1ef410 ANDROID: KVM: arm64: Don't update IOMMUs for share/unshare
* dd8ce75b2a ANDROID: Update the ABI symbol list
* 99aa573db4 ANDROID: fs/proc: Perform priority inheritance around access_remote_vm()
* 89a4bca262 ANDROID: Update the ABI symbol list
* 3374f61d4b ANDROID: sched: Add EXPORT_SYMBOL_GPL for sched_wakeup
* f627c35dd5 ANDROID: GKI: Enable CONFIG_IOMMU_IO_PGTABLE_ARMV7S
* 633bfdda75 ANDROID: fuse-bpf: Align data structs for 32-bit kernels
* 4d8e72739b ANDROID: uid_sys_stats: Use llist for deferred work
* 99deaa6c75 ANDROID: uid_sys_stats: Use a single work for deferred updates
* 1b5b705080 ANDROID: fuse-bpf: Get correct inode in mkdir
* 347a154da8 ANDROID: blk-mq: Run zoned blocking queues asynchronously
* be1b509d81 ANDROID: add initial symbol list for mtkott
* 4f597a1965 BACKPORT: net: nfc: Fix use-after-free caused by nfc_llcp_find_local
* 6a9549f082 UPSTREAM: netfilter: nf_tables: deactivate catchall elements in next generation
* ca527661da ANDROID: GKI: Update symbol list for Amlogic
* 0fead6967d ANDROID: Update the ABI symbol list
* 2296fa4c8f UPSTREAM: erofs: refine managed inode stuffs
* b014a90325 UPSTREAM: exfat: check if filename entries exceeds max filename length
* 47929f7de3 ANDROID: GKI: Update symbol list for lenovo
* 2ab9f0baf2 ANDROID: Add initial QCOM symbol list
* bcae79d322 ANDROID: GKI: Update RTK STB KMI symbol list
* 441510cfc0 ANDROID: vendor_hooks: add vendor hook in current_alloc_flags()
* ebd14a6c68 ANDROID: vendor_hooks: add vendor hook to report acr_info in cma_alloc()
* 9a7a27181e ANDROID: GKI: Update symbol list for Amlogic
* 7b00dfafc9 BACKPORT: FROMGIT: netfilter: nfnetlink_log: always add a timestamp
* 98a63aed61 UPSTREAM: cpuidle: Add cpu_idle_miss trace event
* 3abb2ec8d2 ANDROID: Use alias for old rules.
* 2c7641526f ANDROID: Add arch specific gki module list targets
* e8b59bcd78 ANDROID: fips140: fix the error injection module parameters

Change-Id: Ib56d3821f4fbce61a52bf6fcf686ef5adff6963e
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-10-25 08:20:59 +00:00
90 changed files with 26683 additions and 278 deletions

View File

@@ -15,7 +15,7 @@ load(
"kernel_unstripped_modules_archive",
"merged_kernel_uapi_headers",
)
load(":modules.bzl", "COMMON_GKI_MODULES_LIST")
load(":modules.bzl", "get_gki_modules_list")
package(
default_visibility = [
@@ -40,10 +40,49 @@ checkpatch(
checkpatch_pl = "scripts/checkpatch.pl",
)
write_file(
# Deprecated - Use arch specific files from below.
alias(
name = "gki_system_dlkm_modules",
out = "android/gki_system_dlkm_modules",
content = COMMON_GKI_MODULES_LIST + [
actual = "gki_system_dlkm_modules_arm64",
deprecation = """
Common list for all architectures is deprecated.
Instead use the file corresponding to the architecture used:
i.e. `gki_system_dlkm_modules_{arch}`
""",
)
alias(
name = "android/gki_system_dlkm_modules",
actual = "android/gki_system_dlkm_modules_arm64",
deprecation = """
Common list for all architectures is deprecated.
Instead use the file corresponding to the architecture used:
i.e. `gki_system_dlkm_modules_{arch}`
""",
)
write_file(
name = "gki_system_dlkm_modules_arm64",
out = "android/gki_system_dlkm_modules_arm64",
content = get_gki_modules_list("arm64") + [
# Ensure new line at the end.
"",
],
)
write_file(
name = "gki_system_dlkm_modules_x86_64",
out = "android/gki_system_dlkm_modules_x86_64",
content = get_gki_modules_list("x86_64") + [
# Ensure new line at the end.
"",
],
)
write_file(
name = "gki_system_dlkm_modules_risc64",
out = "android/gki_system_dlkm_modules_riscv64",
content = get_gki_modules_list("riscv64") + [
# Ensure new line at the end.
"",
],
@@ -57,12 +96,16 @@ filegroup(
"android/abi_gki_aarch64_db845c",
"android/abi_gki_aarch64_exynos",
"android/abi_gki_aarch64_fips140",
"android/abi_gki_aarch64_kunit",
"android/abi_gki_aarch64_lenovo",
"android/abi_gki_aarch64_mtkott",
"android/abi_gki_aarch64_mtktv",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_rtkstb",
"android/abi_gki_aarch64_rtktv",
"android/abi_gki_aarch64_tuxera",
"android/abi_gki_aarch64_virtual_device",
"android/abi_gki_aarch64_xiaomi",
"android/abi_gki_aarch64_zeku",
@@ -73,41 +116,41 @@ filegroup(
define_common_kernels(target_configs = {
"kernel_aarch64": {
"kmi_symbol_list_strict_mode": True,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"kmi_symbol_list": "android/abi_gki_aarch64",
"kmi_symbol_list_add_only": True,
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
"protected_exports_list": "android/abi_gki_protected_exports_aarch64",
"protected_modules_list": "android/gki_aarch64_protected_modules",
"module_implicit_outs": get_gki_modules_list("arm64"),
"make_goals": _GKI_AARCH64_MAKE_GOALS,
},
"kernel_aarch64_16k": {
"kmi_symbol_list_strict_mode": False,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"module_implicit_outs": get_gki_modules_list("arm64"),
"make_goals": _GKI_AARCH64_MAKE_GOALS,
},
"kernel_aarch64_debug": {
"kmi_symbol_list_strict_mode": False,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"kmi_symbol_list": "android/abi_gki_aarch64",
"kmi_symbol_list_add_only": True,
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
"protected_exports_list": "android/abi_gki_protected_exports_aarch64",
"protected_modules_list": "android/gki_aarch64_protected_modules",
"module_implicit_outs": get_gki_modules_list("arm64"),
"make_goals": _GKI_AARCH64_MAKE_GOALS,
},
"kernel_x86_64": {
"kmi_symbol_list_strict_mode": False,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"protected_exports_list": "android/abi_gki_protected_exports_x86_64",
"protected_modules_list": "android/gki_x86_64_protected_modules",
"module_implicit_outs": get_gki_modules_list("x86_64"),
"make_goals": _GKI_X86_64_MAKE_GOALS,
},
"kernel_x86_64_debug": {
"kmi_symbol_list_strict_mode": False,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"protected_exports_list": "android/abi_gki_protected_exports_x86_64",
"protected_modules_list": "android/gki_x86_64_protected_modules",
"module_implicit_outs": get_gki_modules_list("x86_64"),
"make_goals": _GKI_X86_64_MAKE_GOALS,
},
})
@@ -439,7 +482,7 @@ kernel_build(
"modules",
"rockchip/rk3399-rock-pi-4b.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS + _ROCKPI4_WATCHDOG_MODULE_OUTS,
module_outs = get_gki_modules_list("arm64") + _ROCKPI4_MODULE_OUTS + _ROCKPI4_WATCHDOG_MODULE_OUTS,
visibility = ["//visibility:private"],
)
@@ -463,7 +506,7 @@ kernel_build(
"modules",
"rockchip/rk3399-rock-pi-4b.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS,
module_outs = get_gki_modules_list("arm64") + _ROCKPI4_MODULE_OUTS,
visibility = ["//visibility:private"],
)
@@ -572,6 +615,70 @@ kernel_build(
visibility = ["//visibility:private"],
)
# KUnit test targets
# Modules defined by tools/testing/kunit/configs/android/kunit_defconfig
_KUNIT_COMMON_MODULES = [
# keep sorted
"drivers/rtc/lib_test.ko",
"fs/ext4/ext4-inode-test.ko",
"fs/fat/fat_test.ko",
"kernel/time/time_test.ko",
"lib/kunit/kunit-example-test.ko",
"lib/kunit/kunit-test.ko",
"mm/kfence/kfence_test.ko",
"sound/soc/soc-topology-test.ko",
]
kernel_build(
name = "kunit_aarch64",
outs = [],
arch = "arm64",
base_kernel = ":kernel_aarch64",
build_config = "build.config.kunit.aarch64",
defconfig_fragments = [
"tools/testing/kunit/configs/android/kunit_defconfig",
],
kmi_symbol_list = "android/abi_gki_aarch64_kunit",
make_goals = ["modules"],
module_outs = _KUNIT_COMMON_MODULES,
)
copy_to_dist_dir(
name = "kunit_aarch64_dist",
data = [":kunit_aarch64"],
dist_dir = "out/kunit_aarch64/dist",
flat = True,
log = "info",
)
kernel_abi(
name = "kunit_aarch64_abi",
kernel_build = ":kunit_aarch64",
kmi_symbol_list_add_only = True,
)
kernel_build(
name = "kunit_x86_64",
outs = [],
arch = "x86_64",
base_kernel = ":kernel_x86_64",
build_config = "build.config.kunit.x86_64",
defconfig_fragments = [
"tools/testing/kunit/configs/android/kunit_defconfig",
],
make_goals = ["modules"],
module_outs = _KUNIT_COMMON_MODULES,
)
copy_to_dist_dir(
name = "kunit_x86_64_dist",
data = [":kunit_x86_64"],
dist_dir = "out/kunit_x86_64/dist",
flat = True,
log = "info",
)
# DDK Headers
# All headers. These are the public targets for DDK modules to use.
alias(

File diff suppressed because it is too large Load Diff

View File

@@ -162,6 +162,8 @@
clk_register
clk_register_composite
clk_round_rate
clk_set_max_rate
clk_set_min_rate
clk_set_parent
clk_set_rate
clk_unprepare
@@ -471,6 +473,7 @@
dma_heap_get_dev
dma_heap_get_drvdata
dma_heap_get_name
dma_heap_put
d_make_root
dmam_alloc_attrs
dma_map_page_attrs
@@ -514,6 +517,7 @@
drm_atomic_helper_check
drm_atomic_helper_cleanup_planes
drm_atomic_helper_commit_cleanup_done
drm_atomic_helper_commit_duplicated_state
drm_atomic_helper_commit_tail
drm_atomic_helper_commit_tail_rpm
__drm_atomic_helper_connector_destroy_state
@@ -525,6 +529,7 @@
__drm_atomic_helper_crtc_destroy_state
__drm_atomic_helper_crtc_duplicate_state
drm_atomic_helper_disable_plane
drm_atomic_helper_duplicate_state
drm_atomic_helper_page_flip
__drm_atomic_helper_plane_destroy_state
__drm_atomic_helper_plane_duplicate_state
@@ -632,6 +637,7 @@
drm_modeset_drop_locks
drm_modeset_lock
drm_modeset_lock_all
drm_modeset_lock_all_ctx
drm_modeset_unlock
drm_modeset_unlock_all
drm_mode_vrefresh
@@ -709,6 +715,7 @@
find_get_pid
_find_last_bit
_find_next_bit
find_task_by_vpid
__find_vma
find_vm_area
find_vpid
@@ -798,6 +805,7 @@
get_cpu_iowait_time_us
get_device
get_device_system_crosststamp
get_each_dmabuf
__get_free_pages
get_kernel_pages
get_net_ns_by_fd
@@ -806,6 +814,7 @@
get_random_bytes
get_random_u32
get_random_u64
__get_task_comm
get_tree_bdev
get_unused_fd_flags
get_user_pages
@@ -1020,6 +1029,7 @@
kill_block_super
kill_fasync
kill_pid
kimage_vaddr
kimage_voffset
__kmalloc
kmalloc_caches
@@ -1079,6 +1089,7 @@
led_trigger_unregister_simple
__list_add_valid
__list_del_entry_valid
list_sort
ll_rw_block
load_nls
load_nls_default
@@ -1145,6 +1156,7 @@
mmc_detect_change
mmc_free_host
mmc_gpio_get_cd
mmc_gpiod_request_cd
mmc_of_parse
mmc_regulator_get_supply
mmc_regulator_set_ocr
@@ -1302,6 +1314,7 @@
of_prop_next_u32
of_pwm_xlate_with_flags
of_reserved_mem_device_init_by_idx
of_reserved_mem_device_init_by_name
of_reserved_mem_device_release
of_reserved_mem_lookup
of_reset_control_array_get
@@ -1328,6 +1341,7 @@
panic
panic_notifier_list
param_array_ops
param_get_bool
param_get_charp
param_get_hexint
param_get_int
@@ -1714,6 +1728,7 @@
sg_next
__sg_page_iter_next
__sg_page_iter_start
sg_pcopy_to_buffer
show_class_attr_string
show_regs
simple_attr_open
@@ -1891,6 +1906,7 @@
syscon_node_to_regmap
syscon_regmap_lookup_by_phandle
sysctl_sched_latency
sysfs_create_bin_file
sysfs_create_file_ns
sysfs_create_group
sysfs_create_link
@@ -1915,6 +1931,7 @@
__tasklet_schedule
tasklet_setup
tasklet_unlock_wait
tasklist_lock
task_may_not_preempt
thermal_cooling_device_unregister
thermal_of_cooling_device_register
@@ -1937,6 +1954,7 @@
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_do_sea
__traceiter_android_rvh_do_undefinstr
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_gic_v3_set_affinity
__traceiter_android_rvh_iommu_setup_dma_ops
@@ -1945,21 +1963,37 @@
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_tick_entry
__traceiter_android_vh_alloc_pages_entry
__traceiter_android_vh_cma_alloc_bypass
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_free_unref_page_bypass
__traceiter_android_vh_ftrace_format_check
__traceiter_android_vh_iommu_iovad_free_iova
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_kvmalloc_node_use_vmalloc
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_printk_caller
__traceiter_android_vh_printk_caller_id
__traceiter_android_vh_rmqueue
__traceiter_android_vh_rmqueue_bulk_bypass
__traceiter_android_vh_sched_show_task
__traceiter_android_vh_set_module_permit_after_init
__traceiter_android_vh_should_alloc_pages_retry
__traceiter_android_vh_unreserve_highatomic_bypass
__traceiter_android_vh_xhci_urb_suitable_bypass
__traceiter_gpu_mem_total
__traceiter_irq_handler_entry
__traceiter_irq_handler_exit
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__traceiter_rwmmio_post_read
__traceiter_rwmmio_post_write
__traceiter_rwmmio_read
__traceiter_rwmmio_write
__traceiter_sched_switch
__traceiter_xdp_exception
trace_output_call
@@ -1967,6 +2001,7 @@
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_do_sea
__tracepoint_android_rvh_do_undefinstr
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_iommu_setup_dma_ops
@@ -1975,15 +2010,27 @@
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_tick_entry
__tracepoint_android_vh_alloc_pages_entry
__tracepoint_android_vh_cma_alloc_bypass
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_free_unref_page_bypass
__tracepoint_android_vh_ftrace_format_check
__tracepoint_android_vh_iommu_iovad_free_iova
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_kvmalloc_node_use_vmalloc
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_printk_caller
__tracepoint_android_vh_printk_caller_id
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rmqueue_bulk_bypass
__tracepoint_android_vh_sched_show_task
__tracepoint_android_vh_set_module_permit_after_init
__tracepoint_android_vh_should_alloc_pages_retry
__tracepoint_android_vh_unreserve_highatomic_bypass
__tracepoint_android_vh_xhci_urb_suitable_bypass
__tracepoint_gpu_mem_total
__tracepoint_irq_handler_entry
__tracepoint_irq_handler_exit
@@ -1992,6 +2039,10 @@
__tracepoint_mmap_lock_start_locking
tracepoint_probe_register
tracepoint_probe_unregister
__tracepoint_rwmmio_post_read
__tracepoint_rwmmio_post_write
__tracepoint_rwmmio_read
__tracepoint_rwmmio_write
__tracepoint_sched_switch
__tracepoint_xdp_exception
trace_print_array_seq
@@ -2034,6 +2085,7 @@
unregister_blkdev
__unregister_chrdev
unregister_chrdev_region
unregister_die_notifier
unregister_filesystem
unregister_inet6addr_notifier
unregister_inetaddr_notifier
@@ -2076,6 +2128,9 @@
usb_get_from_anchor
usb_hcd_check_unlink_urb
usb_hcd_giveback_urb
usb_hcd_is_primary_hcd
usb_hcd_link_urb_to_ep
usb_hcd_map_urb_for_dma
usb_hcd_resume_root_hub
usb_ifnum_to_if
usb_interrupt_msg
@@ -2220,6 +2275,7 @@
xdp_rxq_info_unreg
xdp_rxq_info_unreg_mem_model
xdp_warn
xhci_get_ep_ctx
xp_alloc
xp_dma_map
xp_dma_sync_for_cpu_slow

View File

@@ -0,0 +1,94 @@
[abi_symbol_list]
# commonly used symbols
kfree
kmalloc_caches
kunit_binary_assert_format
kunit_do_assertion
kunit_fail_assert_format
kunit_kmalloc_array
kunit_log_append
kunit_ptr_not_err_assert_format
__kunit_test_suites_exit
__kunit_test_suites_init
kunit_try_catch_throw
kunit_unary_assert_format
memset
module_layout
_printk
__put_task_struct
_raw_spin_lock_irqsave
_raw_spin_unlock_irqrestore
scnprintf
__stack_chk_fail
strcmp
strscpy
__ubsan_handle_cfi_check_fail_abort
# required by fat_test.ko
fat_time_fat2unix
fat_time_unix2fat
# required by kfence_test.ko
for_each_kernel_tracepoint
jiffies
kasan_flag_enabled
__kfence_pool
__kmalloc
kmem_cache_alloc
kmem_cache_alloc_bulk
kmem_cache_create
kmem_cache_destroy
kmem_cache_free
kmem_cache_free_bulk
kmem_cache_shrink
krealloc
ksize
prandom_u32
rcu_barrier
__rcu_read_lock
__rcu_read_unlock
strchr
strnstr
strstr
synchronize_rcu
synchronize_srcu
tracepoint_probe_register
tracepoint_probe_unregister
tracepoint_srcu
# required by kunit-test.ko
arm64_const_caps_ready
__cfi_slowpath_diag
cpu_hwcap_keys
kmem_cache_alloc_trace
kunit_add_named_resource
kunit_add_resource
kunit_alloc_and_get_resource
kunit_binary_ptr_assert_format
kunit_binary_str_assert_format
kunit_cleanup
kunit_destroy_resource
kunit_init_test
kunit_try_catch_run
refcount_warn_saturate
# required by lib_test.ko
rtc_month_days
rtc_time64_to_tm
# required by soc-topology-test.ko
get_device
memcpy
put_device
__root_device_register
root_device_unregister
snd_soc_add_component
snd_soc_component_initialize
snd_soc_register_card
snd_soc_tplg_component_load
snd_soc_tplg_component_remove
snd_soc_unregister_card
snd_soc_unregister_component
# required by time_test.ko
time64_to_tm

View File

@@ -557,6 +557,7 @@
idr_alloc
idr_find
idr_remove
iio_get_channel_type
init_net
init_pid_ns
__init_rwsem
@@ -1338,6 +1339,7 @@
vfree
__vmalloc
vmalloc
vmalloc_array
vmalloc_user
vmap
vmf_insert_pfn_prot

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -279,6 +279,7 @@
debugfs_attr_write
debugfs_create_atomic_t
debugfs_create_bool
debugfs_create_devm_seqfile
debugfs_create_dir
debugfs_create_file
debugfs_create_file_unsafe
@@ -365,6 +366,7 @@
devm_clk_get
devm_clk_get_optional
devm_clk_put
devm_device_add_group
devm_device_add_groups
__devm_drm_dev_alloc
devm_drm_panel_bridge_add_typed
@@ -611,7 +613,9 @@
drm_bridge_chain_mode_set
drm_bridge_remove
drm_compat_ioctl
drm_connector_atomic_hdr_metadata_equal
drm_connector_attach_encoder
drm_connector_attach_hdr_output_metadata_property
drm_connector_attach_max_bpc_property
drm_connector_cleanup
drm_connector_init
@@ -678,6 +682,7 @@
drm_gem_vm_close
drm_gem_vm_open
drm_get_format_info
drm_hdmi_infoframe_set_hdr_metadata
drm_helper_mode_fill_fb_struct
drm_helper_probe_single_connector_modes
drm_ioctl
@@ -928,6 +933,7 @@
handle_simple_irq
handle_sysrq
have_governor_per_policy
hdmi_drm_infoframe_pack_only
hex2bin
hex_dump_to_buffer
hex_to_bin
@@ -2114,6 +2120,7 @@
__traceiter_android_rvh_rtmutex_prepare_setprio
__traceiter_android_rvh_sched_fork
__traceiter_android_rvh_sched_newidle_balance
__traceiter_android_rvh_sched_setaffinity
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_task_rq_fair
__traceiter_android_rvh_select_task_rq_rt
@@ -2152,6 +2159,8 @@
__traceiter_android_vh_mm_compaction_begin
__traceiter_android_vh_mm_compaction_end
__traceiter_android_vh_pagecache_get_page
__traceiter_android_vh_prio_inheritance
__traceiter_android_vh_prio_restore
__traceiter_android_vh_reclaim_pages_plug
__traceiter_android_vh_resume_end
__traceiter_android_vh_rmqueue
@@ -2183,6 +2192,7 @@
__traceiter_android_vh_use_amu_fie
__traceiter_clock_set_rate
__traceiter_cpu_frequency
__traceiter_cpu_idle
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
__traceiter_dwc3_readl
@@ -2206,6 +2216,7 @@
__traceiter_sched_switch
__traceiter_sched_util_est_cfs_tp
__traceiter_sched_util_est_se_tp
__traceiter_sched_wakeup
__traceiter_softirq_entry
__traceiter_softirq_exit
__traceiter_suspend_resume
@@ -2245,6 +2256,7 @@
__tracepoint_android_rvh_rtmutex_prepare_setprio
__tracepoint_android_rvh_sched_fork
__tracepoint_android_rvh_sched_newidle_balance
__tracepoint_android_rvh_sched_setaffinity
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_task_rq_fair
__tracepoint_android_rvh_select_task_rq_rt
@@ -2283,6 +2295,8 @@
__tracepoint_android_vh_mm_compaction_begin
__tracepoint_android_vh_mm_compaction_end
__tracepoint_android_vh_pagecache_get_page
__tracepoint_android_vh_prio_inheritance
__tracepoint_android_vh_prio_restore
__tracepoint_android_vh_reclaim_pages_plug
__tracepoint_android_vh_resume_end
__tracepoint_android_vh_rmqueue
@@ -2314,6 +2328,7 @@
__tracepoint_android_vh_use_amu_fie
__tracepoint_clock_set_rate
__tracepoint_cpu_frequency
__tracepoint_cpu_idle
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
__tracepoint_dwc3_readl
@@ -2339,6 +2354,7 @@
__tracepoint_sched_switch
__tracepoint_sched_util_est_cfs_tp
__tracepoint_sched_util_est_se_tp
__tracepoint_sched_wakeup
__tracepoint_softirq_entry
__tracepoint_softirq_exit
__tracepoint_suspend_resume

2661
android/abi_gki_aarch64_qcom Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -464,6 +464,33 @@
dma_contiguous_default_area
sg_alloc_table_from_pages_segment
# required by cpufreq-dt.ko
cpufreq_enable_boost_support
cpufreq_freq_attr_scaling_available_freqs
cpufreq_freq_attr_scaling_boost_freqs
cpufreq_generic_frequency_table_verify
cpufreq_generic_get
cpufreq_generic_suspend
cpufreq_register_driver
cpufreq_unregister_driver
cpumask_next
__cpu_possible_mask
dev_pm_opp_free_cpufreq_table
dev_pm_opp_get_max_transition_latency
dev_pm_opp_get_opp_count
dev_pm_opp_get_sharing_cpus
dev_pm_opp_get_suspend_opp_freq
dev_pm_opp_init_cpufreq_table
dev_pm_opp_of_cpumask_add_table
dev_pm_opp_of_cpumask_remove_table
dev_pm_opp_of_get_sharing_cpus
dev_pm_opp_of_register_em
dev_pm_opp_put_regulators
dev_pm_opp_set_rate
dev_pm_opp_set_regulators
dev_pm_opp_set_sharing_cpus
policy_has_boost_freq
# required by cqhci.ko
devm_blk_crypto_profile_init
dmam_alloc_attrs
@@ -785,6 +812,16 @@
soc_device_register
soc_device_unregister
# required by rtk_cma_accelerator.ko
__traceiter_android_vh_calc_alloc_flags
__traceiter_android_vh_cma_alloc_busy_info
__traceiter_android_vh_cma_alloc_finish
__traceiter_android_vh_cma_alloc_start
__tracepoint_android_vh_calc_alloc_flags
__tracepoint_android_vh_cma_alloc_busy_info
__tracepoint_android_vh_cma_alloc_finish
__tracepoint_android_vh_cma_alloc_start
# required by rtk_cpu_volt_sel.ko
bus_register_notifier
dev_pm_opp_put_prop_name
@@ -796,6 +833,7 @@
component_bind_all
component_del
component_master_del
devm_extcon_dev_free
component_unbind_all
devm_gpio_request_one
dma_buf_begin_cpu_access
@@ -902,6 +940,8 @@
drm_property_create_range
drm_read
drm_release
drm_scdc_read
drm_scdc_write
drm_scdc_set_high_tmds_clock_ratio
drm_scdc_set_scrambling
drm_universal_plane_init
@@ -1050,6 +1090,7 @@
# required by sdhci-of-rtkstb.ko
sdhci_set_clock
sdhci_set_ios
sdhci_request
# required by sdhci-rtk.ko
sdhci_adma_write_desc

View File

@@ -0,0 +1,286 @@
[abi_symbol_list]
add_to_page_cache_locked
__alloc_pages
__arch_copy_from_user
__arch_copy_to_user
arm64_const_caps_ready
autoremove_wake_function
balance_dirty_pages_ratelimited
bcmp
bdev_read_only
__bforget
bio_add_page
bio_alloc_bioset
bio_associate_blkg
bio_put
__bitmap_weight
bit_waitqueue
blkdev_issue_discard
blkdev_issue_flush
blk_finish_plug
blk_start_plug
__blockdev_direct_IO
block_invalidatepage
block_is_partially_uptodate
__breadahead
__bread_gfp
__brelse
buffer_migrate_page
call_rcu
__cancel_dirty_page
capable
capable_wrt_inode_uidgid
__cfi_slowpath_diag
__check_object_size
clean_bdev_aliases
__cleancache_invalidate_inode
clear_inode
clear_page
clear_page_dirty_for_io
complete_and_exit
copy_page_from_iter_atomic
cpu_hwcap_keys
cpu_hwcaps
create_empty_buffers
current_umask
d_add
d_add_ci
delete_from_page_cache
d_instantiate
d_make_root
d_obtain_alias
down_read
down_write
down_write_trylock
dput
drop_nlink
d_splice_alias
dump_stack
end_buffer_read_sync
end_buffer_write_sync
end_page_writeback
errseq_set
fault_in_iov_iter_readable
fault_in_safe_writeable
fget
fiemap_fill_next_extent
fiemap_prep
file_check_and_advance_wb_err
filemap_fault
filemap_fdatawait_range
filemap_fdatawrite
filemap_fdatawrite_range
filemap_flush
__filemap_set_wb_err
filemap_write_and_wait_range
file_remove_privs
file_update_time
file_write_and_wait_range
finish_wait
flush_dcache_page
fput
freezing_slow_path
fs_bio_set
generic_error_remove_page
generic_file_direct_write
generic_file_llseek
generic_file_mmap
generic_file_open
generic_file_read_iter
generic_file_splice_read
generic_fillattr
generic_perform_write
generic_read_dir
generic_write_checks
__getblk_gfp
gic_nonsecure_priorities
grab_cache_page_write_begin
iget5_locked
igrab
ihold
ilookup5
inc_nlink
in_group_p
__init_rwsem
init_special_inode
init_wait_entry
__init_waitqueue_head
inode_dio_wait
inode_init_once
inode_init_owner
inode_newsize_ok
inode_set_flags
__insert_inode_hash
invalidate_bdev
invalidate_mapping_pages
io_schedule
iov_iter_advance
iov_iter_alignment
iov_iter_get_pages
iov_iter_single_seg_count
iput
is_bad_inode
iter_file_splice_write
iunique
jiffies
jiffies_to_msecs
kasan_flag_enabled
kfree
kill_block_super
__kmalloc
kmalloc_caches
kmem_cache_alloc
kmem_cache_alloc_trace
kmem_cache_create
kmem_cache_create_usercopy
kmem_cache_destroy
kmem_cache_free
krealloc
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get_coarse_real_ts64
kvfree
__list_add_valid
__list_del_entry_valid
ll_rw_block
load_nls
load_nls_default
__lock_buffer
__lock_page
lru_cache_add
make_bad_inode
mark_buffer_async_write
mark_buffer_dirty
mark_buffer_write_io_error
__mark_inode_dirty
mark_page_accessed
memcmp
memcpy
memmove
memset
mktime64
mnt_drop_write_file
mnt_want_write_file
mount_bdev
mpage_readahead
mpage_readpage
__msecs_to_jiffies
__mutex_init
mutex_lock
mutex_trylock
mutex_unlock
new_inode
notify_change
pagecache_get_page
page_cache_next_miss
page_cache_prev_miss
page_mapped
page_pinner_inited
__page_pinner_put_page
pagevec_lookup_range
pagevec_lookup_range_tag
__pagevec_release
page_zero_new_buffers
__percpu_down_read
preempt_schedule
preempt_schedule_notrace
prepare_to_wait
prepare_to_wait_event
_printk
__printk_ratelimit
__put_page
put_pages_list
___ratelimit
_raw_read_lock
_raw_read_lock_irqsave
_raw_read_unlock
_raw_read_unlock_irqrestore
_raw_spin_lock
_raw_spin_lock_irqsave
_raw_spin_unlock
_raw_spin_unlock_irqrestore
_raw_write_lock
_raw_write_lock_irqsave
_raw_write_unlock
_raw_write_unlock_irqrestore
rcu_barrier
rcuwait_wake_up
readahead_gfp_mask
read_cache_page
redirty_page_for_writepage
__refrigerator
register_filesystem
__remove_inode_hash
sb_min_blocksize
sb_set_blocksize
schedule
schedule_timeout_interruptible
security_inode_init_security
seq_printf
setattr_prepare
set_freezable
set_nlink
set_page_dirty
__set_page_dirty_buffers
__set_page_dirty_nobuffers
set_user_nice
simple_strtol
simple_strtoul
simple_strtoull
snprintf
sprintf
sscanf
__stack_chk_fail
strchr
strcmp
strlen
strncasecmp
strncmp
strsep
strstr
submit_bh
submit_bio
sync_blockdev
__sync_dirty_buffer
sync_dirty_buffer
sync_filesystem
sync_inode_metadata
system_freezing_cnt
sys_tz
tag_pages_for_writeback
__test_set_page_writeback
time64_to_tm
timestamp_truncate
touch_atime
_trace_android_vh_record_pcpu_rwsem_starttime
truncate_inode_pages
truncate_inode_pages_final
truncate_pagecache
truncate_setsize
try_to_release_page
try_to_writeback_inodes_sb
__ubsan_handle_cfi_check_fail_abort
unload_nls
unlock_buffer
unlock_new_inode
unlock_page
unmap_mapping_range
unregister_filesystem
up_read
up_write
vfree
vfs_fsync_range
vmalloc
__vmalloc
vsnprintf
vzalloc
__wait_on_buffer
wait_on_page_bit
wake_bit_function
__wake_up
wake_up_process
__warn_printk
write_inode_now
write_one_page
xa_load

View File

@@ -25,6 +25,8 @@
#define CPUID_EXT_ISAR3 0x6c
#define CPUID_EXT_ISAR4 0x70
#define CPUID_EXT_ISAR5 0x74
#define CPUID_EXT_ISAR6 0x7c
#define CPUID_EXT_PFR2 0x90
#else
#define CPUID_EXT_PFR0 "c1, 0"
#define CPUID_EXT_PFR1 "c1, 1"
@@ -40,6 +42,8 @@
#define CPUID_EXT_ISAR3 "c2, 3"
#define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5"
#define CPUID_EXT_ISAR6 "c2, 7"
#define CPUID_EXT_PFR2 "c3, 4"
#endif
#define MPIDR_SMP_BITMASK (0x3 << 30)

View File

@@ -87,6 +87,12 @@
#define MVFR0_DP_BIT (8)
#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
/* MVFR1 bits */
#define MVFR1_ASIMDHP_BIT (20)
#define MVFR1_ASIMDHP_MASK (0xf << MVFR1_ASIMDHP_BIT)
#define MVFR1_FPHP_BIT (24)
#define MVFR1_FPHP_MASK (0xf << MVFR1_FPHP_BIT)
/* Bit patterns for decoding the packaged operation descriptors */
#define VFPOPDESC_LENGTH_BIT (9)
#define VFPOPDESC_LENGTH_MASK (0x07 << VFPOPDESC_LENGTH_BIT)

View File

@@ -28,6 +28,9 @@
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
#define HWCAP_LPAE (1 << 20)
#define HWCAP_EVTSTRM (1 << 21)
#define HWCAP_FPHP (1 << 22)
#define HWCAP_ASIMDHP (1 << 23)
#define HWCAP_ASIMDDP (1 << 24)
/*
* HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2

View File

@@ -1248,6 +1248,9 @@ static const char *hwcap_str[] = {
"vfpd32",
"lpae",
"evtstrm",
"fphp",
"asimdhp",
"asimddp",
NULL
};

View File

@@ -774,6 +774,7 @@ static int __init vfp_init(void)
{
unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
unsigned int isar6;
/*
* Enable the access to the VFP on all online CPUs so the
@@ -831,7 +832,20 @@ static int __init vfp_init(void)
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
if (((fmrx(MVFR1) & MVFR1_ASIMDHP_MASK) >> MVFR1_ASIMDHP_BIT) == 0x2)
elf_hwcap |= HWCAP_ASIMDHP;
if (((fmrx(MVFR1) & MVFR1_FPHP_MASK) >> MVFR1_FPHP_BIT) == 0x3)
elf_hwcap |= HWCAP_FPHP;
}
/*
* Check for the presence of Advanced SIMD Dot Product
* instructions.
*/
isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
if (cpuid_feature_extract_field(isar6, 4) == 0x1)
elf_hwcap |= HWCAP_ASIMDDP;
/* Extract the architecture version on pre-cpuid scheme */
} else {
if (vfpsid & FPSID_NODOUBLE) {

View File

@@ -109,6 +109,7 @@ CONFIG_MODULE_SIG_PROTECT=y
CONFIG_MODPROBE_PATH="/system/bin/modprobe"
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_IOSCHED_BFQ=y
@@ -315,6 +316,7 @@ CONFIG_ARM_SCPI_PROTOCOL=y
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
CONFIG_GNSS=y
CONFIG_BLK_DEV_NULL_BLK=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
@@ -567,6 +569,7 @@ CONFIG_COMMON_CLK_SCPI=y
CONFIG_HWSPINLOCK=y
# CONFIG_SUN50I_ERRATUM_UNKNOWN1 is not set
CONFIG_MAILBOX=y
CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y
CONFIG_REMOTEPROC=y
CONFIG_REMOTEPROC_CDEV=y
CONFIG_RPMSG_CHAR=y
@@ -736,4 +739,5 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_TRACE_MMIO_ACCESS=y
CONFIG_HIST_TRIGGERS=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_KUNIT=y
# CONFIG_RUNTIME_TESTING_MENU is not set

View File

@@ -259,6 +259,8 @@ extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[];
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)

View File

@@ -414,10 +414,4 @@ static inline size_t pkvm_host_fp_state_size(void)
return sizeof(struct user_fpsimd_state);
}
static inline unsigned long hyp_host_fp_pages(unsigned long nr_cpus)
{
return PAGE_ALIGN(size_mul(nr_cpus, pkvm_host_fp_state_size())) >>
PAGE_SHIFT;
}
#endif /* __ARM64_KVM_PKVM_H__ */

View File

@@ -113,13 +113,21 @@
#define OVERFLOW_STACK_SIZE SZ_4K
#if PAGE_SIZE == SZ_4K
#define NVHE_STACK_SHIFT (PAGE_SHIFT + 1)
#else
#define NVHE_STACK_SHIFT PAGE_SHIFT
#endif
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
/*
* With the minimum frame size of [x29, x30], exactly half the combined
* sizes of the hyp and overflow stacks is the maximum size needed to
* save the unwinded stacktrace; plus an additional entry to delimit the
* end.
*/
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
/*
* Alignment of kernel segments (e.g. .text, .data).

View File

@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);

View File

@@ -126,7 +126,7 @@ KVM_NVHE_ALIAS(__hyp_data_start);
KVM_NVHE_ALIAS(__hyp_data_end);
KVM_NVHE_ALIAS(__hyp_rodata_start);
KVM_NVHE_ALIAS(__hyp_rodata_end);
#ifdef CONFIG_FTRACE
#ifdef CONFIG_TRACING
KVM_NVHE_ALIAS(__hyp_event_ids_start);
KVM_NVHE_ALIAS(__hyp_event_ids_end);
#endif

View File

@@ -50,7 +50,7 @@ static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT;
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
DECLARE_KVM_NVHE_PER_CPU(int, hyp_cpu_number);
@@ -1648,6 +1648,11 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
static inline size_t pkvm_host_fp_state_order(void)
{
return get_order(pkvm_host_fp_state_size());
}
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
@@ -2010,8 +2015,10 @@ static void teardown_hyp_mode(void)
free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
free_pages(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu],
pkvm_host_fp_state_order());
}
}
@@ -2097,6 +2104,48 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
return 0;
}
static int init_pkvm_host_fp_state(void)
{
int cpu;
if (!is_protected_kvm_enabled())
return 0;
/* Allocate pages for protected-mode host-fp state. */
for_each_possible_cpu(cpu) {
struct page *page;
unsigned long addr;
page = alloc_pages(GFP_KERNEL, pkvm_host_fp_state_order());
if (!page)
return -ENOMEM;
addr = (unsigned long)page_address(page);
kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] = addr;
}
/*
* Don't map the pages in hyp since these are only used in protected
* mode, which will (re)create its own mapping when initialized.
*/
return 0;
}
/*
* Finalizes the initialization of hyp mode, once everything else is initialized
* and the initialziation process cannot fail.
*/
static void finalize_init_hyp_mode(void)
{
int cpu;
for_each_possible_cpu(cpu) {
kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu] =
kern_hyp_va(kvm_nvhe_sym(kvm_arm_hyp_host_fp_state)[cpu]);
}
}
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -2124,15 +2173,15 @@ static int init_hyp_mode(void)
* Allocate stack pages for Hypervisor-mode
*/
for_each_possible_cpu(cpu) {
unsigned long stack_page;
unsigned long stack_base;
stack_page = __get_free_page(GFP_KERNEL);
if (!stack_page) {
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
if (!stack_base) {
err = -ENOMEM;
goto out_err;
}
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
}
/*
@@ -2208,7 +2257,7 @@ static int init_hyp_mode(void)
*/
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
unsigned long hyp_addr;
/*
@@ -2216,7 +2265,7 @@ static int init_hyp_mode(void)
* and guard page. The allocation is also aligned based on
* the order of its size.
*/
err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
err = hyp_alloc_private_va_range(NVHE_STACK_SIZE * 2, &hyp_addr);
if (err) {
kvm_err("Cannot allocate hyp stack guard page\n");
goto out_err;
@@ -2227,12 +2276,12 @@ static int init_hyp_mode(void)
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
*/
err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
__pa(stack_page), PAGE_HYP);
err = __create_hyp_mappings(hyp_addr + NVHE_STACK_SIZE, NVHE_STACK_SIZE,
__pa(stack_base), PAGE_HYP);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
@@ -2244,9 +2293,9 @@ static int init_hyp_mode(void)
* __hyp_pa() won't do the right thing there, since the stack
* has been mapped in the flexible private VA space.
*/
params->stack_pa = __pa(stack_page);
params->stack_pa = __pa(stack_base);
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
params->stack_hyp_va = hyp_addr + (2 * NVHE_STACK_SIZE);
}
for_each_possible_cpu(cpu) {
@@ -2264,6 +2313,10 @@ static int init_hyp_mode(void)
cpu_prepare_hyp_mode(cpu);
}
err = init_pkvm_host_fp_state();
if (err)
goto out_err;
kvm_hyp_init_symbols();
/* TODO: Real .h interface */
@@ -2422,6 +2475,13 @@ int kvm_arch_init(void *opaque)
kvm_info("Hyp mode initialized successfully\n");
}
/*
* This should be called after initialization is done and failure isn't
* possible anymore.
*/
if (!in_hyp_mode)
finalize_init_hyp_mode();
return 0;
out_hyp:

View File

@@ -10,7 +10,7 @@ int main(void)
DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page));
DEFINE(PKVM_HYP_VM_SIZE, sizeof(struct pkvm_hyp_vm));
DEFINE(PKVM_HYP_VCPU_SIZE, sizeof(struct pkvm_hyp_vcpu));
#ifdef CONFIG_FTRACE
#ifdef CONFIG_TRACING
DEFINE(STRUCT_HYP_BUFFER_PAGE_SIZE, sizeof(struct hyp_buffer_page));
#endif
return 0;

View File

@@ -82,8 +82,6 @@ struct pkvm_hyp_vm {
struct pkvm_hyp_vcpu *vcpus[];
};
extern void *host_fp_state;
static inline struct pkvm_hyp_vm *
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
{
@@ -107,7 +105,6 @@ extern phys_addr_t pvmfw_base;
extern phys_addr_t pvmfw_size;
void pkvm_hyp_vm_table_init(void *tbl);
void pkvm_hyp_host_fp_init(void *host_fp);
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva, unsigned long last_ran_hva);

View File

@@ -154,12 +154,12 @@ SYM_FUNC_END(__host_hvc)
/*
* Test whether the SP has overflowed, without corrupting a GPR.
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
* of SP should always be 1.
*/
add sp, sp, x0 // sp' = sp + x0
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp

View File

@@ -1383,11 +1383,15 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
handled = kvm_host_ffa_handler(host_ctxt);
if (!handled && smp_load_acquire(&default_host_smc_handler))
handled = default_host_smc_handler(host_ctxt);
if (!handled)
__kvm_hyp_host_forward_smc(host_ctxt);
trace_host_smc(func_id, !handled);
if (!handled) {
trace_hyp_exit();
__kvm_hyp_host_forward_smc(host_ctxt);
trace_hyp_enter();
}
/* SMC was trapped, move ELR past the current PC. */
kvm_skip_host_instr();
}

View File

@@ -1048,9 +1048,20 @@ static int __host_check_page_state_range(u64 addr, u64 size,
static int __host_set_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
bool update_iommu = true;
enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
return host_stage2_idmap_locked(addr, size, prot, true);
/*
* Sharing and unsharing host pages shouldn't change the IOMMU page tables,
* so avoid extra page tables walks for the IOMMU.
* HOWEVER THIS WILL NOT WORK WHEN DEVICE ASSIGNMENT IS SUPPORTED AS THE GUEST
* MIGHT HAVE ACCESS TO DMA.
* but as Android-14 doesn't support device assignment this should be fine.
*/
if ((state == PKVM_PAGE_OWNED) || (state == PKVM_PAGE_SHARED_OWNED))
update_iommu = false;
return host_stage2_idmap_locked(addr, size, prot, update_iommu);
}
static int host_request_owned_transition(u64 *completer_addr,
@@ -2038,6 +2049,7 @@ static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm
}
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
KVM_PGTABLE_PROT_DEVICE |\
KVM_PGTABLE_PROT_NC | \
KVM_PGTABLE_PROT_PXN | \
KVM_PGTABLE_PROT_UXN)

View File

@@ -41,17 +41,15 @@ static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);
*
* Only valid when (fp_state == FP_STATE_GUEST_OWNED) in the hyp vCPU structure.
*/
void *host_fp_state;
unsigned long __ro_after_init kvm_arm_hyp_host_fp_state[NR_CPUS];
static void *__get_host_fpsimd_bytes(void)
{
void *state = host_fp_state +
size_mul(pkvm_host_fp_state_size(), hyp_smp_processor_id());
if (state < host_fp_state)
return NULL;
return state;
/*
* The addresses in this array have been converted to hyp addresses
* in finalize_init_hyp_mode().
*/
return (void *)kvm_arm_hyp_host_fp_state[hyp_smp_processor_id()];
}
struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu)
@@ -295,12 +293,6 @@ void pkvm_hyp_vm_table_init(void *tbl)
vm_table = tbl;
}
void pkvm_hyp_host_fp_init(void *host_fp)
{
WARN_ON(host_fp_state);
host_fp_state = host_fp;
}
/*
* Return the hyp vm structure corresponding to the handle.
*/

View File

@@ -34,7 +34,6 @@ static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static void *ffa_proxy_pages;
static void *hyp_host_fp_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@@ -69,10 +68,21 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!ffa_proxy_pages)
return -ENOMEM;
nr_pages = hyp_host_fp_pages(hyp_nr_cpus);
hyp_host_fp_base = hyp_early_alloc_contig(nr_pages);
if (!hyp_host_fp_base)
return -ENOMEM;
return 0;
}
static int create_hyp_host_fp_mappings(void)
{
void *start, *end;
int ret, i;
for (i = 0; i < hyp_nr_cpus; i++) {
start = (void *)kern_hyp_va(kvm_arm_hyp_host_fp_state[i]);
end = start + PAGE_ALIGN(pkvm_host_fp_state_size());
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;
}
return 0;
}
@@ -140,7 +150,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
* and guard page. The allocation is also aligned based on
* the order of its size.
*/
ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
ret = pkvm_alloc_private_va_range(NVHE_STACK_SIZE * 2, &hyp_addr);
if (ret)
return ret;
@@ -149,21 +159,23 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
* at the higher address and leave the lower guard page
* unbacked.
*
* Any valid stack address now has the PAGE_SHIFT bit as 1
* Any valid stack address now has the NVHE_STACK_SHIFT bit as 1
* and addresses corresponding to the guard page have the
* PAGE_SHIFT bit as 0 - this is used for overflow detection.
* NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection.
*/
hyp_spin_lock(&pkvm_pgd_lock);
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
PAGE_SIZE, params->stack_pa, PAGE_HYP);
ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + NVHE_STACK_SIZE,
NVHE_STACK_SIZE, params->stack_pa, PAGE_HYP);
hyp_spin_unlock(&pkvm_pgd_lock);
if (ret)
return ret;
/* Update stack_hyp_va to end of the stack's private VA range */
params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
params->stack_hyp_va = hyp_addr + (2 * NVHE_STACK_SIZE);
}
create_hyp_host_fp_mappings();
/*
* Map the host sections RO in the hypervisor, but transfer the
* ownership from the host to the hypervisor itself to make sure they
@@ -405,7 +417,6 @@ void __noreturn __pkvm_init_finalise(void)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
pkvm_hyp_host_fp_init(hyp_host_fp_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,

View File

@@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE);
stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
stacktrace_info->fp = fp;
stacktrace_info->pc = pc;
@@ -53,7 +53,7 @@ static bool on_hyp_stack(unsigned long sp, unsigned long size,
{
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
unsigned long high = params->stack_hyp_va;
unsigned long low = high - PAGE_SIZE;
unsigned long low = high - NVHE_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}

View File

@@ -701,7 +701,7 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
static bool stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
{
return true;
return false;
}
static bool stage2_pte_is_counted(kvm_pte_t pte, u32 level)

View File

@@ -173,7 +173,6 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += hyp_ffa_proxy_pages();
hyp_mem_pages += hyp_host_fp_pages(num_possible_cpus());
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once
@@ -504,10 +503,6 @@ static int __init finalize_pkvm(void)
if (pkvm_load_early_modules())
pkvm_firmware_rmem_clear();
/* If no DMA protection. */
if (!pkvm_iommu_finalized())
pkvm_firmware_rmem_clear();
/*
* Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once inaccessible.
@@ -516,6 +511,12 @@ static int __init finalize_pkvm(void)
kmemleak_free_part(__hyp_data_start, __hyp_data_end - __hyp_data_start);
kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
flush_deferred_probe_now();
/* If no DMA protection. */
if (!pkvm_iommu_finalized())
pkvm_firmware_rmem_clear();
ret = pkvm_drop_host_privileges();
if (ret) {
pr_err("Failed to de-privilege the host kernel: %d\n", ret);

View File

@@ -44,7 +44,7 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
switch (type) {
case STACK_TYPE_HYP:
kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
hyp_base = (unsigned long)stacktrace_info->stack_base;
break;
case STACK_TYPE_OVERFLOW:
@@ -79,7 +79,7 @@ static bool on_hyp_stack(unsigned long sp, unsigned long size,
struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE;
unsigned long high = low + NVHE_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
}

View File

@@ -95,6 +95,7 @@ CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_PROTECT=y
CONFIG_BLK_DEV_ZONED=y
CONFIG_BLK_CGROUP_IOCOST=y
CONFIG_BLK_CGROUP_IOPRIO=y
CONFIG_BLK_INLINE_ENCRYPTION=y
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
CONFIG_IOSCHED_BFQ=y
@@ -293,6 +294,7 @@ CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
CONFIG_GNSS=y
CONFIG_OF=y
CONFIG_BLK_DEV_NULL_BLK=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
@@ -675,3 +677,4 @@ CONFIG_SCHEDSTATS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_HIST_TRIGGERS=y
CONFIG_UNWINDER_FRAME_POINTER=y
CONFIG_KUNIT=y

View File

@@ -1787,7 +1787,7 @@ int __init blk_dev_init(void)
panic("Failed to create kblockd\n");
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
sizeof(struct internal_request_queue), 0, SLAB_PANIC, NULL);
blk_debugfs_root = debugfs_create_dir("block", NULL);

View File

@@ -412,7 +412,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
bool blk_mq_sched_bypass_insert(struct request *rq)
static bool blk_mq_sched_bypass_insert(struct request *rq)
{
/*
* dispatch flush and passthrough rq directly

View File

@@ -18,7 +18,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
bool blk_mq_sched_bypass_insert(struct request *rq);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async);
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,

View File

@@ -71,7 +71,6 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
return !list_empty_careful(&hctx->dispatch) ||
!list_empty_careful(&hctx->queue->requeue_list) ||
sbitmap_any_bit_set(&hctx->ctx_map) ||
blk_mq_sched_has_work(hctx);
}
@@ -772,36 +771,42 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
}
EXPORT_SYMBOL(blk_mq_requeue_request);
static bool blk_mq_has_sqsched(struct request_queue *q);
static void blk_mq_process_requeue_list(struct blk_mq_hw_ctx *hctx)
static void blk_mq_requeue_work(struct work_struct *work)
{
struct request_queue *q = hctx->queue;
struct request_queue *q = &container_of(work,
struct internal_request_queue, requeue_work.work)->q;
LIST_HEAD(rq_list);
struct request *rq, *next;
LIST_HEAD(at_head);
LIST_HEAD(at_tail);
if (list_empty_careful(&q->requeue_list))
return;
spin_lock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &q->requeue_list, queuelist) {
if (!blk_mq_has_sqsched(q) && rq->mq_hctx != hctx)
continue;
if (rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)) {
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_move(&rq->queuelist, &at_head);
} else {
list_move(&rq->queuelist, &at_tail);
}
}
list_splice_init(&q->requeue_list, &rq_list);
spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &at_head, queuelist)
blk_mq_sched_insert_request(rq, /*at_head=*/true, false, false);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
continue;
list_for_each_entry_safe(rq, next, &at_tail, queuelist)
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
/*
* If RQF_DONTPREP, rq has contained some driver specific
* data, so insert it to hctx dispatch list to avoid any
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
blk_mq_request_bypass_insert(rq, false, false);
else
blk_mq_sched_insert_request(rq, /*at_head=*/
!blk_rq_is_seq_zoned_write(rq), false, false);
}
while (!list_empty(&rq_list)) {
rq = list_entry(rq_list.next, struct request, queuelist);
list_del_init(&rq->queuelist);
blk_mq_sched_insert_request(rq, false, false, false);
}
blk_mq_run_hw_queues(q, false);
}
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
@@ -831,14 +836,19 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_kick_requeue_list(struct request_queue *q)
{
blk_mq_run_hw_queues(q, /*async=*/in_atomic());
struct internal_request_queue *iq = to_internal_q(q);
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &iq->requeue_work, 0);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{
blk_mq_delay_run_hw_queues(q, msecs);
struct internal_request_queue *iq = to_internal_q(q);
kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &iq->requeue_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
@@ -1484,8 +1494,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
blk_mq_process_requeue_list(hctx);
hctx_lock(hctx, &srcu_idx);
blk_mq_sched_dispatch_requests(hctx);
hctx_unlock(hctx, srcu_idx);
@@ -1561,7 +1569,10 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
if (!async && cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
if (!async &&
!(hctx->flags & BLK_MQ_F_BLOCKING &&
blk_queue_is_zoned(hctx->queue)) &&
cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
return;
}
@@ -1672,7 +1683,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
* scheduler.
*/
if (!sq_hctx || sq_hctx == hctx ||
blk_mq_hctx_has_pending(hctx))
!list_empty_careful(&hctx->dispatch))
blk_mq_run_hw_queue(hctx, async);
}
}
@@ -1700,7 +1711,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
* scheduler.
*/
if (!sq_hctx || sq_hctx == hctx ||
blk_mq_hctx_has_pending(hctx))
!list_empty_careful(&hctx->dispatch))
blk_mq_delay_run_hw_queue(hctx, msecs);
}
}
@@ -3282,6 +3293,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
set->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
INIT_DELAYED_WORK(&to_internal_q(q)->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);
@@ -4002,6 +4014,8 @@ void blk_mq_cancel_work_sync(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&to_internal_q(q)->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
}

View File

@@ -17,6 +17,17 @@
extern struct dentry *blk_debugfs_root;
struct internal_request_queue {
struct request_queue q;
struct delayed_work requeue_work;
};
static inline struct internal_request_queue *
to_internal_q(struct request_queue *q)
{
return container_of(q, struct internal_request_queue, q);
}
struct blk_flush_queue {
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;

View File

@@ -6,6 +6,9 @@ FILES="${FILES}
arch/arm64/boot/Image.lz4
arch/arm64/boot/Image.gz
"
ADDITIONAL_KMI_SYMBOL_LISTS="
android/abi_gki_aarch64_qcom
"
BUILD_SYSTEM_DLKM=1

View File

@@ -0,0 +1,4 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
DEFCONFIG=gki_defconfig

View File

@@ -0,0 +1,4 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
DEFCONFIG=gki_defconfig

View File

@@ -17,9 +17,9 @@
* related macros to be expanded as they would be for built-in code; e.g.,
* module_init() adds the function to the .initcalls section of the binary.
*
* The .c file that contains the real module_init() for fips140.ko is then
* responsible for redefining MODULE, and the real module_init() is responsible
* for executing all the initcalls that were collected into .initcalls.
* The .c files that contain the real module_init, module license, and module
* parameters for fips140.ko are then responsible for redefining MODULE. The
* real module_init executes all initcalls that were collected into .initcalls.
*/
#undef MODULE

View File

@@ -20,6 +20,14 @@
__inline_maybe_unused notrace
#undef BUILD_FIPS140_KO
/*
* Since this .c file contains real module parameters for fips140.ko, it needs
* to be compiled normally, so undo the hacks that were done in fips140-defs.h.
*/
#define MODULE
#undef KBUILD_MODFILE
#undef __DISABLE_EXPORTS
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/module.h>

View File

@@ -195,6 +195,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pcplist_add_cma_pages_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete);
@@ -312,6 +317,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_adjust);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_busy_info);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_calc_alloc_flags);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_compaction_end);
@@ -355,3 +362,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_xhci_urb_suitable_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_entry);

View File

@@ -731,6 +731,29 @@ void wait_for_device_probe(void)
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
/**
* flush_deferred_probe_now
*
* This function should be used sparingly. It's meant for when we need to flush
* the deferred probe list at earlier initcall levels. Really meant only for KVM
* needs. This function should never be exported because it makes no sense for
* modules to call this.
*/
void flush_deferred_probe_now(void)
{
/*
* Really shouldn't using this if deferred probe has already been
* enabled
*/
if (WARN_ON(driver_deferred_probe_enable))
return;
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
wait_for_device_probe();
driver_deferred_probe_enable = false;
}
static int __driver_probe_device(struct device_driver *drv, struct device *dev)
{
int ret = 0;

View File

@@ -8,6 +8,7 @@
* This code is licenced under the GPL.
*/
#include "linux/percpu-defs.h"
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -291,6 +292,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* Shallower states are enabled, so update. */
dev->states_usage[entered_state].above++;
trace_cpu_idle_miss(dev->cpu, entered_state, false);
break;
}
} else if (diff > delay) {
@@ -302,8 +304,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* Update if a deeper state would have been a
* better match for the observed idle duration.
*/
if (diff - delay >= drv->states[i].target_residency_ns)
if (diff - delay >= drv->states[i].target_residency_ns) {
dev->states_usage[entered_state].below++;
trace_cpu_idle_miss(dev->cpu, entered_state, true);
}
break;
}

View File

@@ -19,20 +19,24 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/rtmutex.h>
#include <linux/sched/cputime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/spinlock_types.h>
#define UID_HASH_BITS 10
#define UID_HASH_NUMS (1 << UID_HASH_BITS)
DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
/*
* uid_lock[bkt] ensure consistency of hash_table[bkt]
*/
spinlock_t uid_lock[UID_HASH_NUMS];
static DEFINE_RT_MUTEX(uid_lock);
static struct proc_dir_entry *cpu_parent;
static struct proc_dir_entry *io_parent;
static struct proc_dir_entry *proc_parent;
@@ -77,6 +81,32 @@ struct uid_entry {
#endif
};
static inline int trylock_uid(uid_t uid)
{
return spin_trylock(
&uid_lock[hash_min(uid, HASH_BITS(hash_table))]);
}
static inline void lock_uid(uid_t uid)
{
spin_lock(&uid_lock[hash_min(uid, HASH_BITS(hash_table))]);
}
static inline void unlock_uid(uid_t uid)
{
spin_unlock(&uid_lock[hash_min(uid, HASH_BITS(hash_table))]);
}
static inline void lock_uid_by_bkt(u32 bkt)
{
spin_lock(&uid_lock[bkt]);
}
static inline void unlock_uid_by_bkt(u32 bkt)
{
spin_unlock(&uid_lock[bkt]);
}
static u64 compute_write_bytes(struct task_io_accounting *ioac)
{
if (ioac->write_bytes <= ioac->cancelled_write_bytes)
@@ -332,24 +362,29 @@ static int uid_cputime_show(struct seq_file *m, void *v)
struct user_namespace *user_ns = current_user_ns();
u64 utime;
u64 stime;
unsigned long bkt;
u32 bkt;
uid_t uid;
rt_mutex_lock(&uid_lock);
hash_for_each(hash_table, bkt, uid_entry, hash) {
uid_entry->active_stime = 0;
uid_entry->active_utime = 0;
for (bkt = 0, uid_entry = NULL; uid_entry == NULL &&
bkt < HASH_SIZE(hash_table); bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
uid_entry->active_stime = 0;
uid_entry->active_utime = 0;
}
unlock_uid_by_bkt(bkt);
}
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
lock_uid(uid);
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
rcu_read_unlock();
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
pr_err("%s: failed to find the uid_entry for uid %d\n",
__func__, uid);
return -ENOMEM;
@@ -360,19 +395,24 @@ static int uid_cputime_show(struct seq_file *m, void *v)
uid_entry->active_utime += utime;
uid_entry->active_stime += stime;
}
unlock_uid(uid);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
u64 total_utime = uid_entry->utime +
uid_entry->active_utime;
u64 total_stime = uid_entry->stime +
uid_entry->active_stime;
seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
ktime_to_us(total_utime), ktime_to_us(total_stime));
for (bkt = 0, uid_entry = NULL; uid_entry == NULL &&
bkt < HASH_SIZE(hash_table); bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
u64 total_utime = uid_entry->utime +
uid_entry->active_utime;
u64 total_stime = uid_entry->stime +
uid_entry->active_stime;
seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
ktime_to_us(total_utime), ktime_to_us(total_stime));
}
unlock_uid_by_bkt(bkt);
}
rt_mutex_unlock(&uid_lock);
return 0;
}
@@ -420,9 +460,8 @@ static ssize_t uid_remove_write(struct file *file,
return -EINVAL;
}
rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) {
lock_uid(uid_start);
hash_for_each_possible_safe(hash_table, uid_entry, tmp,
hash, (uid_t)uid_start) {
if (uid_start == uid_entry->uid) {
@@ -431,9 +470,9 @@ static ssize_t uid_remove_write(struct file *file,
kfree(uid_entry);
}
}
unlock_uid(uid_start);
}
rt_mutex_unlock(&uid_lock);
return count;
}
@@ -471,41 +510,59 @@ static void add_uid_io_stats(struct uid_entry *uid_entry,
__add_uid_io_stats(uid_entry, &task->ioac, slot);
}
static void update_io_stats_all_locked(void)
static void update_io_stats_all(void)
{
struct uid_entry *uid_entry = NULL;
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
unsigned long bkt;
u32 bkt;
uid_t uid;
hash_for_each(hash_table, bkt, uid_entry, hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
sizeof(struct io_stats));
set_io_uid_tasks_zero(uid_entry);
}
unlock_uid_by_bkt(bkt);
}
rcu_read_lock();
do_each_thread(temp, task) {
uid = from_kuid_munged(user_ns, task_uid(task));
lock_uid(uid);
if (!uid_entry || uid_entry->uid != uid)
uid_entry = find_or_register_uid(uid);
if (!uid_entry)
if (!uid_entry) {
unlock_uid(uid);
continue;
}
add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
unlock_uid(uid);
} while_each_thread(temp, task);
rcu_read_unlock();
hash_for_each(hash_table, bkt, uid_entry, hash) {
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
&uid_entry->io[UID_STATE_TOTAL_CURR],
&uid_entry->io[UID_STATE_TOTAL_LAST],
&uid_entry->io[UID_STATE_DEAD_TASKS]);
compute_io_uid_tasks(uid_entry);
}
unlock_uid_by_bkt(bkt);
}
}
#ifndef CONFIG_UID_SYS_STATS_DEBUG
static void update_io_stats_uid(struct uid_entry *uid_entry)
#else
static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
#endif
{
struct task_struct *task, *temp;
struct user_namespace *user_ns = current_user_ns();
@@ -533,14 +590,15 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
static int uid_io_show(struct seq_file *m, void *v)
{
struct uid_entry *uid_entry;
unsigned long bkt;
u32 bkt;
rt_mutex_lock(&uid_lock);
update_io_stats_all();
for (bkt = 0, uid_entry = NULL; uid_entry == NULL && bkt < HASH_SIZE(hash_table);
bkt++) {
update_io_stats_all_locked();
hash_for_each(hash_table, bkt, uid_entry, hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
lock_uid_by_bkt(bkt);
hlist_for_each_entry(uid_entry, &hash_table[bkt], hash) {
seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
uid_entry->uid,
uid_entry->io[UID_STATE_FOREGROUND].rchar,
uid_entry->io[UID_STATE_FOREGROUND].wchar,
@@ -553,10 +611,11 @@ static int uid_io_show(struct seq_file *m, void *v)
uid_entry->io[UID_STATE_FOREGROUND].fsync,
uid_entry->io[UID_STATE_BACKGROUND].fsync);
show_io_uid_tasks(m, uid_entry);
show_io_uid_tasks(m, uid_entry);
}
unlock_uid_by_bkt(bkt);
}
rt_mutex_unlock(&uid_lock);
return 0;
}
@@ -584,6 +643,9 @@ static ssize_t uid_procstat_write(struct file *file,
uid_t uid;
int argc, state;
char input[128];
#ifndef CONFIG_UID_SYS_STATS_DEBUG
struct uid_entry uid_entry_tmp;
#endif
if (count >= sizeof(input))
return -EINVAL;
@@ -600,24 +662,51 @@ static ssize_t uid_procstat_write(struct file *file,
if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
return -EINVAL;
rt_mutex_lock(&uid_lock);
lock_uid(uid);
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
return -EINVAL;
}
if (uid_entry->state == state) {
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
return count;
}
#ifndef CONFIG_UID_SYS_STATS_DEBUG
/*
* Update_io_stats_uid_locked would take a long lock-time of uid_lock
* due to call do_each_thread to compute uid_entry->io, which would
* cause to lock competition sometime.
*
* Using uid_entry_tmp to get the result of Update_io_stats_uid,
* so that we can unlock_uid during update_io_stats_uid, in order
* to avoid the unnecessary lock-time of uid_lock.
*/
uid_entry_tmp.uid = uid_entry->uid;
memcpy(uid_entry_tmp.io, uid_entry->io,
sizeof(struct io_stats) * UID_STATE_SIZE);
unlock_uid(uid);
update_io_stats_uid(&uid_entry_tmp);
lock_uid(uid);
hlist_for_each_entry(uid_entry, &hash_table[hash_min(uid, HASH_BITS(hash_table))], hash) {
if (uid_entry->uid == uid_entry_tmp.uid) {
memcpy(uid_entry->io, uid_entry_tmp.io,
sizeof(struct io_stats) * UID_STATE_SIZE);
uid_entry->state = state;
break;
}
}
unlock_uid(uid);
#else
update_io_stats_uid_locked(uid_entry);
uid_entry->state = state;
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
#endif
return count;
}
@@ -629,7 +718,6 @@ static const struct proc_ops uid_procstat_fops = {
};
struct update_stats_work {
struct work_struct work;
uid_t uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
struct task_struct *task;
@@ -637,38 +725,46 @@ struct update_stats_work {
struct task_io_accounting ioac;
u64 utime;
u64 stime;
struct llist_node node;
};
static LLIST_HEAD(work_usw);
static void update_stats_workfn(struct work_struct *work)
{
struct update_stats_work *usw =
container_of(work, struct update_stats_work, work);
struct update_stats_work *usw, *t;
struct uid_entry *uid_entry;
struct task_entry *task_entry __maybe_unused;
struct llist_node *node;
rt_mutex_lock(&uid_lock);
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto exit;
node = llist_del_all(&work_usw);
llist_for_each_entry_safe(usw, t, node, node) {
lock_uid(usw->uid);
uid_entry = find_uid_entry(usw->uid);
if (!uid_entry)
goto next;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
uid_entry->utime += usw->utime;
uid_entry->stime += usw->stime;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto exit;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
task_entry = find_task_entry(uid_entry, usw->task);
if (!task_entry)
goto next;
add_uid_tasks_io_stats(task_entry, &usw->ioac,
UID_STATE_DEAD_TASKS);
#endif
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);
__add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
next:
unlock_uid(usw->uid);
#ifdef CONFIG_UID_SYS_STATS_DEBUG
put_task_struct(usw->task);
put_task_struct(usw->task);
#endif
kfree(usw);
kfree(usw);
}
}
static DECLARE_WORK(update_stats_work, update_stats_workfn);
static int process_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
@@ -682,12 +778,11 @@ static int process_notifier(struct notifier_block *self,
return NOTIFY_OK;
uid = from_kuid_munged(current_user_ns(), task_uid(task));
if (!rt_mutex_trylock(&uid_lock)) {
if (!trylock_uid(uid)) {
struct update_stats_work *usw;
usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
if (usw) {
INIT_WORK(&usw->work, update_stats_workfn);
usw->uid = uid;
#ifdef CONFIG_UID_SYS_STATS_DEBUG
usw->task = get_task_struct(task);
@@ -698,7 +793,8 @@ static int process_notifier(struct notifier_block *self,
*/
usw->ioac = task->ioac;
task_cputime_adjusted(task, &usw->utime, &usw->stime);
schedule_work(&usw->work);
llist_add(&usw->node, &work_usw);
schedule_work(&update_stats_work);
}
return NOTIFY_OK;
}
@@ -716,7 +812,7 @@ static int process_notifier(struct notifier_block *self,
add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
exit:
rt_mutex_unlock(&uid_lock);
unlock_uid(uid);
return NOTIFY_OK;
}
@@ -724,9 +820,18 @@ static struct notifier_block process_notifier_block = {
.notifier_call = process_notifier,
};
static void init_hash_table_and_lock(void)
{
int i;
hash_init(hash_table);
for (i = 0; i < UID_HASH_NUMS; i++)
spin_lock_init(&uid_lock[i]);
}
static int __init proc_uid_sys_stats_init(void)
{
hash_init(hash_table);
init_hash_table_and_lock();
cpu_parent = proc_mkdir("uid_cputime", NULL);
if (!cpu_parent) {

View File

@@ -528,6 +528,7 @@ static void scsi_report_sense(struct scsi_device *sdev,
*/
enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
{
struct request *req = scsi_cmd_to_rq(scmd);
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
@@ -679,8 +680,14 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
* have completed.
*/
if (sshdr.asc == 0x21 && sshdr.ascq == 0x04 &&
blk_queue_no_zone_write_lock(scsi_cmd_to_rq(scmd)->q))
blk_queue_no_zone_write_lock(req->q) &&
blk_rq_is_seq_zoned_write(req) &&
scmd->retries < scmd->allowed) {
sdev_printk(KERN_INFO, scmd->device,
"Retrying unaligned write at LBA %#llx\n",
scsi_get_lba(scmd));
return NEEDS_DELAYED_RETRY;
}
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */

View File

@@ -464,7 +464,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
trace_ufshcd_command(cmd->device, str_t, tag,
doorbell, transfer_len, intr, lba, opcode, group_id);
}
@@ -2774,24 +2774,22 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
u8 upiu_flags;
int ret = 0;
if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
}
return ret;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
lrbp->cmd->sc_data_direction, 0);
if (ioprio_class == IOPRIO_CLASS_RT)
upiu_flags |= UPIU_CMD_FLAGS_CP;
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
}
/**

View File

@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
#include <trace/hooks/usb.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -1419,6 +1420,11 @@ static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
urb->transfer_buffer = NULL;
}
void _trace_android_vh_xhci_urb_suitable_bypass(struct urb *urb, int *ret)
{
trace_android_vh_xhci_urb_suitable_bypass(urb, ret);
}
/*
* Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
* we'll copy the actual data into the TRB address register. This is limited to

View File

@@ -2235,6 +2235,8 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
urb->stream_id);
}
void _trace_android_vh_xhci_urb_suitable_bypass(struct urb *urb, int *ret);
/*
* TODO: As per spec Isochronous IDT transmissions are supported. We bypass
* them anyways as we where unable to find a device that matches the
@@ -2242,6 +2244,12 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
*/
static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
{
int ret = 1;
_trace_android_vh_xhci_urb_suitable_bypass(urb, &ret);
if (ret <= 0)
return ret == 0;
if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE &&

View File

@@ -154,20 +154,25 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit);
*
* Notifies the partner of @adev about Attention command.
*/
int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
{
struct altmode *partner = to_altmode(adev)->partner;
struct typec_altmode *pdev;
/*
* If partner is NULL then a NULL pointer error occurs when
* dereferencing pdev and its operations. The original upstream commit
* changes the return type so the tcpm can log when this occurs, but
* due to KMI restrictions we can only silently prevent the error for
* now.
*/
if (!partner)
return -ENODEV;
return;
pdev = &partner->adev;
if (pdev->ops && pdev->ops->attention)
pdev->ops->attention(pdev, vdo);
return 0;
}
EXPORT_SYMBOL_GPL(typec_altmode_attention);

View File

@@ -1877,8 +1877,7 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
}
break;
case ADEV_ATTENTION:
if (typec_altmode_attention(adev, p[1]))
tcpm_log(port, "typec_altmode_attention no port partner altmode");
typec_altmode_attention(adev, p[1]);
break;
}
}

View File

@@ -582,6 +582,11 @@ static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
return ret;
}
/*
* It will be called only on inode eviction. In case that there are still some
* decompression requests in progress, wait with rescheduling for a bit here.
* We could introduce an extra locking instead but it seems unnecessary.
*/
static void erofs_managed_cache_invalidatepage(struct page *page,
unsigned int offset,
unsigned int length)
@@ -615,8 +620,7 @@ static int erofs_init_managed_cache(struct super_block *sb)
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &managed_cache_aops;
mapping_set_gfp_mask(inode->i_mapping,
GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->managed_cache = inode;
return 0;
}

View File

@@ -1207,10 +1207,10 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr,
struct page **pagepool,
struct address_space *mc,
gfp_t gfp)
struct address_space *mc)
{
const pgoff_t index = pcl->obj.index;
gfp_t gfp = mapping_gfp_mask(mc);
bool tocache = false;
struct address_space *mapping;
@@ -1440,8 +1440,7 @@ static void z_erofs_submit_queue(struct super_block *sb,
struct page *page;
page = pickup_page_for_submission(pcl, i++, pagepool,
MNGD_MAPPING(sbi),
GFP_NOFS);
MNGD_MAPPING(sbi));
if (!page)
continue;

View File

@@ -4768,6 +4768,9 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
filp->f_mode &= ~FMODE_RANDOM;
spin_unlock(&filp->f_lock);
return 0;
} else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
/* Load extent cache at the first readahead. */
f2fs_precache_extents(inode);
}
err = generic_fadvise(filp, offset, len, advice);

View File

@@ -2735,7 +2735,9 @@ recover_xnid:
f2fs_update_inode_page(inode);
/* 3: update and set xattr node page dirty */
memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
if (page)
memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
VALID_XATTR_BLOCK_SIZE);
set_page_dirty(xpage);
f2fs_put_page(xpage, 1);

View File

@@ -363,10 +363,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
*xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
if (!*xe) {
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
err = -EFSCORRUPTED;
err = -ENODATA;
f2fs_handle_error(F2FS_I_SB(inode),
ERROR_CORRUPTED_XATTR);
goto out;
@@ -583,13 +583,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
error = -EFSCORRUPTED;
f2fs_handle_error(F2FS_I_SB(inode),
ERROR_CORRUPTED_XATTR);
goto cleanup;
break;
}
if (!handler || (handler->list && !handler->list(dentry)))
@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
retry:
error = read_all_xattrs(inode, ipage, &base_addr);
if (error)
return error;
@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
/* find entry with wanted name. */
here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
if (!here) {
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
if (!F2FS_I(inode)->i_xattr_nid) {
f2fs_notice(F2FS_I_SB(inode),
"recover xattr in inode (%lu)", inode->i_ino);
f2fs_recover_xattr_data(inode, NULL);
kfree(base_addr);
goto retry;
}
f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
inode->i_ino);
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
error = -EFSCORRUPTED;

View File

@@ -1445,32 +1445,34 @@ int fuse_mkdir_initialize(
int fuse_mkdir_backing(
struct fuse_bpf_args *fa,
struct inode *dir, struct dentry *entry, umode_t mode)
struct inode *dir_inode, struct dentry *entry, umode_t mode)
{
int err = 0;
const struct fuse_mkdir_in *fmi = fa->in_args[0].value;
struct fuse_inode *fuse_inode = get_fuse_inode(dir);
struct inode *backing_inode = fuse_inode->backing_inode;
struct fuse_inode *dir_fuse_inode = get_fuse_inode(dir_inode);
struct inode *dir_backing_inode = dir_fuse_inode->backing_inode;
struct path backing_path = {};
struct inode *inode = NULL;
struct dentry *d;
//TODO Actually deal with changing the backing entry in mkdir
get_fuse_backing_path(entry, &backing_path);
if (!backing_path.dentry)
return -EBADF;
inode_lock_nested(backing_inode, I_MUTEX_PARENT);
inode_lock_nested(dir_backing_inode, I_MUTEX_PARENT);
mode = fmi->mode;
if (!IS_POSIXACL(backing_inode))
if (!IS_POSIXACL(dir_backing_inode))
mode &= ~fmi->umask;
err = vfs_mkdir(&init_user_ns, backing_inode, backing_path.dentry, mode);
err = vfs_mkdir(&init_user_ns, dir_backing_inode, backing_path.dentry,
mode);
if (err)
goto out;
if (d_really_is_negative(backing_path.dentry) ||
unlikely(d_unhashed(backing_path.dentry))) {
d = lookup_one_len(entry->d_name.name, backing_path.dentry->d_parent,
entry->d_name.len);
struct dentry *d = lookup_one_len(entry->d_name.name,
backing_path.dentry->d_parent,
entry->d_name.len);
if (IS_ERR(d)) {
err = PTR_ERR(d);
goto out;
@@ -1478,14 +1480,19 @@ int fuse_mkdir_backing(
dput(backing_path.dentry);
backing_path.dentry = d;
}
inode = fuse_iget_backing(dir->i_sb, fuse_inode->nodeid, backing_inode);
inode = fuse_iget_backing(dir_inode->i_sb, 0,
backing_path.dentry->d_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto out;
}
d_instantiate(entry, inode);
if (get_fuse_inode(inode)->bpf)
bpf_prog_put(get_fuse_inode(inode)->bpf);
get_fuse_inode(inode)->bpf = get_fuse_dentry(entry)->bpf;
get_fuse_dentry(entry)->bpf = NULL;
out:
inode_unlock(backing_inode);
inode_unlock(dir_backing_inode);
path_put(&backing_path);
return err;
}

View File

@@ -99,6 +99,7 @@
#include <linux/cn_proc.h>
#include <linux/cpufreq_times.h>
#include <trace/events/oom.h>
#include <trace/hooks/sched.h>
#include "internal.h"
#include "fd.h"
@@ -345,13 +346,24 @@ static ssize_t get_task_cmdline(struct task_struct *tsk, char __user *buf,
size_t count, loff_t *pos)
{
struct mm_struct *mm;
bool prio_inherited = false;
int saved_prio;
ssize_t ret;
mm = get_task_mm(tsk);
if (!mm)
return 0;
/*
* access_remote_vm() holds the hot mmap_sem lock which can cause the
* task for which we read cmdline etc for by some debug deamon to slow
* down and suffer a performance hit. Especially if the reader task has
* a low nice value.
*/
trace_android_vh_prio_inheritance(tsk, &saved_prio, &prio_inherited);
ret = get_mm_cmdline(mm, buf, count, pos);
if (prio_inherited)
trace_android_vh_prio_restore(saved_prio);
mmput(mm);
return ret;
}

View File

@@ -489,6 +489,9 @@ static int proc_reg_open(struct inode *inode, struct file *file)
typeof_member(struct proc_ops, proc_release) release;
struct pde_opener *pdeo;
if (!pde->proc_ops->proc_lseek)
file->f_mode &= ~FMODE_LSEEK;
if (pde_is_permanent(pde)) {
open = pde->proc_ops->proc_open;
if (open)

View File

@@ -99,9 +99,8 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_DONTPREP | \
RQF_SPECIAL_PAYLOAD)
#define RQF_NOMERGE_FLAGS \
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
/*
* Request state for blk-mq.

View File

@@ -133,6 +133,7 @@ extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);
extern void flush_deferred_probe_now(void);
/* sysfs interface for exporting driver attributes */

View File

@@ -69,7 +69,7 @@ struct typec_altmode_ops {
int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
int typec_altmode_exit(struct typec_altmode *altmode);
int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
const u32 header, const u32 *vdo, int count);
int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,

View File

@@ -40,6 +40,28 @@ DEFINE_EVENT(cpu, cpu_idle,
TP_ARGS(state, cpu_id)
);
TRACE_EVENT(cpu_idle_miss,
TP_PROTO(unsigned int cpu_id, unsigned int state, bool below),
TP_ARGS(cpu_id, state, below),
TP_STRUCT__entry(
__field(u32, cpu_id)
__field(u32, state)
__field(bool, below)
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
__entry->state = state;
__entry->below = below;
),
TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id,
(unsigned long)__entry->state, (__entry->below)?"below":"above")
);
TRACE_EVENT(powernv_throttle,
TP_PROTO(int chip_id, const char *reason, int pmax),

View File

@@ -269,15 +269,15 @@ DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
TP_ARGS(dev_name, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, int transfer_len, u32 intr,
u64 lba, u8 opcode, u8 group_id),
TP_ARGS(dev_name, str_t, tag, doorbell, transfer_len,
TP_ARGS(sdev, str_t, tag, doorbell, transfer_len,
intr, lba, opcode, group_id),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(struct scsi_device *, sdev)
__field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
@@ -289,7 +289,7 @@ TRACE_EVENT(ufshcd_command,
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->sdev = sdev;
__entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
@@ -302,7 +302,8 @@ TRACE_EVENT(ufshcd_command,
TP_printk(
"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x",
show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
show_ufs_cmd_trace_str(__entry->str_t),
dev_name(&__entry->sdev->sdev_dev),
__entry->tag, __entry->doorbell, __entry->transfer_len,
__entry->intr, __entry->lba, (u32)__entry->opcode,
str_opcode(__entry->opcode), (u32)__entry->group_id

View File

@@ -12,6 +12,7 @@
struct oom_control;
struct slabinfo;
struct cma;
struct acr_info;
struct compact_control;
DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
@@ -40,6 +41,13 @@ DECLARE_HOOK(android_vh_cma_alloc_finish,
TP_PROTO(struct cma *cma, struct page *page, unsigned long count,
unsigned int align, gfp_t gfp_mask, s64 ts),
TP_ARGS(cma, page, count, align, gfp_mask, ts));
DECLARE_HOOK(android_vh_cma_alloc_busy_info,
TP_PROTO(struct acr_info *info),
TP_ARGS(info));
DECLARE_HOOK(android_vh_calc_alloc_flags,
TP_PROTO(gfp_t gfp_mask, unsigned int *alloc_flags,
bool *bypass),
TP_ARGS(gfp_mask, alloc_flags, bypass));
DECLARE_HOOK(android_vh_meminfo_proc_show,
TP_PROTO(struct seq_file *m),
TP_ARGS(m));
@@ -75,6 +83,24 @@ DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
TP_PROTO(int migratetype, bool *bypass),
TP_ARGS(migratetype, bypass));
DECLARE_HOOK(android_vh_free_unref_page_bypass,
TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
TP_ARGS(page, order, migratetype, bypass));
DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc,
TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc),
TP_ARGS(size, kmalloc_flags, use_vmalloc));
DECLARE_HOOK(android_vh_should_alloc_pages_retry,
TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry),
TP_ARGS(gfp_mask, order, alloc_flags,
migratetype, preferred_zone, page, should_alloc_retry));
DECLARE_HOOK(android_vh_unreserve_highatomic_bypass,
TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic),
TP_ARGS(force, zone, skip_unreserve_highatomic));
DECLARE_HOOK(android_vh_rmqueue_bulk_bypass,
TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
struct list_head *list),
TP_ARGS(order, pcp, migratetype, list));
DECLARE_HOOK(android_vh_mmap_region,
TP_PROTO(struct vm_area_struct *vma, unsigned long addr),
TP_ARGS(vma, addr));
@@ -134,6 +160,14 @@ DECLARE_HOOK(android_vh_mem_cgroup_css_offline,
DECLARE_HOOK(android_vh_si_meminfo,
TP_PROTO(struct sysinfo *val),
TP_ARGS(val));
DECLARE_HOOK(android_vh_cma_alloc_bypass,
TP_PROTO(struct cma *cma, unsigned long count, unsigned int align,
gfp_t gfp_mask, struct page **page, bool *bypass),
TP_ARGS(cma, count, align, gfp_mask, page, bypass));
DECLARE_HOOK(android_vh_alloc_pages_entry,
TP_PROTO(gfp_t *gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask),
TP_ARGS(gfp, order, preferred_nid, nodemask));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -235,6 +235,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
TP_ARGS(p, rq, need_update), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_util_fits_cpu,
TP_PROTO(unsigned long util, unsigned long uclamp_min, unsigned long uclamp_max,
int cpu, bool *fits, bool *done),
TP_ARGS(util, uclamp_min, uclamp_max, cpu, fits, done), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
TP_PROTO(struct task_struct *p),
TP_ARGS(p), 1);
@@ -449,6 +454,14 @@ DECLARE_HOOK(android_vh_sched_setaffinity_early,
TP_PROTO(struct task_struct *p, const struct cpumask *new_mask, bool *retval),
TP_ARGS(p, new_mask, retval));
DECLARE_HOOK(android_vh_prio_inheritance,
TP_PROTO(struct task_struct *p, int *saved_prio, bool *prio_inherited),
TP_ARGS(p, saved_prio, prio_inherited));
DECLARE_HOOK(android_vh_prio_restore,
TP_PROTO(int saved_prio),
TP_ARGS(saved_prio));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_SCHED_H */

View File

@@ -11,6 +11,7 @@
* mechanism for vendor modules to hook and extend functionality
*/
struct usb_device;
struct urb;
DECLARE_HOOK(android_vh_usb_new_device_added,
TP_PROTO(struct usb_device *udev, int *err),
@@ -24,6 +25,10 @@ DECLARE_HOOK(android_vh_usb_dev_resume,
TP_PROTO(struct usb_device *udev, pm_message_t msg, int *bypass),
TP_ARGS(udev, msg, bypass));
DECLARE_HOOK(android_vh_xhci_urb_suitable_bypass,
TP_PROTO(struct urb *urb, int *ret),
TP_ARGS(urb, ret));
#endif /* _TRACE_HOOK_USB_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -56,15 +56,29 @@ struct fuse_in_postfilter_header {
/** One input argument of a request */
struct fuse_bpf_in_arg {
uint32_t size;
const void *value;
const void *end_offset;
uint32_t padding;
union {
const void *value;
uint64_t padding2;
};
union {
const void *end_offset;
uint64_t padding3;
};
};
/** One output argument of a request */
struct fuse_bpf_arg {
uint32_t size;
void *value;
void *end_offset;
uint32_t padding;
union {
void *value;
uint64_t padding2;
};
union {
void *end_offset;
uint64_t padding3;
};
};
#define FUSE_MAX_IN_ARGS 5
@@ -80,6 +94,7 @@ struct fuse_bpf_args {
uint32_t in_numargs;
uint32_t out_numargs;
uint32_t flags;
uint32_t padding;
struct fuse_bpf_in_arg in_args[FUSE_MAX_IN_ARGS];
struct fuse_bpf_arg out_args[FUSE_MAX_OUT_ARGS];
};

View File

@@ -97,9 +97,10 @@ enum {
UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
};
/* UPIU Read/Write flags */
/* UPIU Read/Write flags. See also table "UPIU Flags" in the UFS standard. */
enum {
UPIU_CMD_FLAGS_NONE = 0x00,
UPIU_CMD_FLAGS_CP = 0x04,
UPIU_CMD_FLAGS_WRITE = 0x20,
UPIU_CMD_FLAGS_READ = 0x40,
};

View File

@@ -48,6 +48,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_waking);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_wakeup);
#ifdef CONFIG_SCHEDSTATS
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_sleep);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_wait);

View File

@@ -4166,7 +4166,12 @@ static inline int util_fits_cpu(unsigned long util,
{
unsigned long capacity_orig, capacity_orig_thermal;
unsigned long capacity = capacity_of(cpu);
bool fits, uclamp_max_fits;
bool fits, uclamp_max_fits, done = false;
trace_android_rvh_util_fits_cpu(util, uclamp_min, uclamp_max, cpu, &fits, &done);
if (done)
return fits;
/*
* Check if the real util fits without any uclamp boost/cap applied.

View File

@@ -62,6 +62,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_fits_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ttwu_cond);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule_bug);
@@ -110,3 +111,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rt_rq_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_cpus_allowed_by_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_cpus_allowed_comm);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_setaffinity_early);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prio_inheritance);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prio_restore);

View File

@@ -446,6 +446,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
int max_retries = 5;
s64 ts;
struct cma_alloc_info cma_info = {0};
bool bypass = false;
trace_android_vh_cma_alloc_bypass(cma, count, align, gfp_mask,
&page, &bypass);
if (bypass)
return page;
trace_android_vh_cma_alloc_start(&ts);
@@ -480,8 +486,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
spin_unlock_irq(&cma->lock);
if (fatal_signal_pending(current) ||
(gfp_mask & __GFP_NORETRY))
(gfp_mask & __GFP_NORETRY)) {
ret = -EINTR;
break;
}
/*
* Page may be momentarily pinned by some other
@@ -535,6 +543,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
trace_android_vh_cma_alloc_busy_info(&info);
trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
count, align);

View File

@@ -2617,6 +2617,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
int i, error = 0;
bool writably_mapped;
loff_t isize, end_offset;
loff_t last_pos = ra->prev_pos;
if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
return 0;
@@ -2665,7 +2666,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
* mark it as accessed the first time.
*/
if (iocb->ki_pos >> PAGE_SHIFT !=
ra->prev_pos >> PAGE_SHIFT)
last_pos >> PAGE_SHIFT)
mark_page_accessed(pvec.pages[0]);
for (i = 0; i < pagevec_count(&pvec); i++) {
@@ -2696,7 +2697,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
already_read += copied;
iocb->ki_pos += copied;
ra->prev_pos = iocb->ki_pos;
last_pos = iocb->ki_pos;
if (copied < bytes) {
error = -EFAULT;
@@ -2710,7 +2711,7 @@ put_pages:
} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
file_accessed(filp);
ra->prev_pos = last_pos;
return already_read ? already_read : error;
}
EXPORT_SYMBOL_GPL(filemap_read);

View File

@@ -40,6 +40,7 @@
struct madvise_walk_private {
struct mmu_gather *tlb;
bool pageout;
bool can_pageout_file;
};
/*
@@ -319,6 +320,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
struct madvise_walk_private *private = walk->private;
struct mmu_gather *tlb = private->tlb;
bool pageout = private->pageout;
bool pageout_anon_only = pageout && !private->can_pageout_file;
struct mm_struct *mm = tlb->mm;
struct vm_area_struct *vma = walk->vma;
pte_t *orig_pte, *pte, ptent;
@@ -355,6 +357,9 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
if (page_mapcount(page) != 1)
goto huge_unlock;
if (pageout_anon_only && !PageAnon(page))
goto huge_unlock;
if (next - addr != HPAGE_PMD_SIZE) {
int err;
@@ -423,6 +428,8 @@ regular_page:
if (PageTransCompound(page)) {
if (page_mapcount(page) != 1)
break;
if (pageout_anon_only && !PageAnon(page))
break;
get_page(page);
if (!trylock_page(page)) {
put_page(page);
@@ -450,6 +457,9 @@ regular_page:
if (!PageLRU(page) || page_mapcount(page) != 1)
continue;
if (pageout_anon_only && !PageAnon(page))
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page);
if (pte_young(ptent)) {
@@ -527,11 +537,13 @@ static long madvise_cold(struct vm_area_struct *vma,
static void madvise_pageout_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
unsigned long addr, unsigned long end,
bool can_pageout_file)
{
struct madvise_walk_private walk_private = {
.pageout = true,
.tlb = tlb,
.can_pageout_file = can_pageout_file,
};
tlb_start_vma(tlb, vma);
@@ -539,10 +551,8 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
tlb_end_vma(tlb, vma);
}
static inline bool can_do_pageout(struct vm_area_struct *vma)
static inline bool can_do_file_pageout(struct vm_area_struct *vma)
{
if (vma_is_anonymous(vma))
return true;
if (!vma->vm_file)
return false;
/*
@@ -562,17 +572,23 @@ static long madvise_pageout(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb;
bool can_pageout_file;
*prev = vma;
if (!can_madv_lru_vma(vma))
return -EINVAL;
if (!can_do_pageout(vma))
return 0;
/*
* If the VMA belongs to a private file mapping, there can be private
* dirty pages which can be paged out if even this process is neither
* owner nor write capable of the file. Cache the file access check
* here and use it later during page walk.
*/
can_pageout_file = can_do_file_pageout(vma);
lru_add_drain();
tlb_gather_mmu(&tlb, mm);
madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
madvise_pageout_page_range(&tlb, vma, start_addr, end_addr, can_pageout_file);
tlb_finish_mmu(&tlb);
return 0;

View File

@@ -1516,14 +1516,22 @@ int add_memory_subsection(int nid, u64 start, u64 size)
ret = arch_add_memory(nid, start, size, &params);
if (ret) {
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
pr_err("%s failed to add subsection start 0x%llx size 0x%llx\n",
__func__, start, size);
goto err_add_memory;
}
mem_hotplug_done();
return ret;
err_add_memory:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
mem_hotplug_done();
release_memory_resource(res);
return ret;
}
EXPORT_SYMBOL_GPL(add_memory_subsection);

View File

@@ -1742,11 +1742,15 @@ static void __free_pages_ok(struct page *page, unsigned int order,
int migratetype;
unsigned long pfn = page_to_pfn(page);
struct zone *zone = page_zone(page);
bool skip_free_unref_page = false;
if (!free_pages_prepare(page, order, true, fpi_flags))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
spin_lock_irqsave(&zone->lock, flags);
if (unlikely(has_isolate_pageblock(zone) ||
@@ -2988,6 +2992,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
struct page *page;
int order;
bool ret;
bool skip_unreserve_highatomic = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
ac->nodemask) {
@@ -2999,6 +3004,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
pageblock_nr_pages)
continue;
trace_android_vh_unreserve_highatomic_bypass(force, zone,
&skip_unreserve_highatomic);
if (skip_unreserve_highatomic)
continue;
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]);
@@ -3255,6 +3265,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
int batch = READ_ONCE(pcp->batch);
int alloced;
trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
if (!list_empty(list))
return list;
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
@@ -3264,7 +3278,7 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
*/
if (batch > 1)
batch = max(batch >> order, 2);
alloced = rmqueue_bulk(zone, order, pcp->batch, list, migratetype, alloc_flags);
alloced = rmqueue_bulk(zone, order, batch, list, migratetype, alloc_flags);
pcp->count += alloced << order;
if (list_empty(list))
@@ -3569,20 +3583,26 @@ void free_unref_page(struct page *page, unsigned int order)
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
int migratetype;
int migratetype, pcpmigratetype;
bool pcp_skip_cma_pages = false;
bool skip_free_unref_page = false;
if (!free_unref_page_prepare(page, pfn, order))
return;
migratetype = get_pcppage_migratetype(page);
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
if (skip_free_unref_page)
return;
/*
* We only track unmovable, reclaimable movable, and CMA on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
* offlined but treat HIGHATOMIC as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
* offlined but treat HIGHATOMIC and CMA as movable pages so we can
* get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
migratetype = get_pcppage_migratetype(page);
migratetype = pcpmigratetype = get_pcppage_migratetype(page);
if (unlikely(migratetype > MIGRATE_RECLAIMABLE)) {
trace_android_vh_pcplist_add_cma_pages_bypass(migratetype,
&pcp_skip_cma_pages);
@@ -3591,16 +3611,15 @@ void free_unref_page(struct page *page, unsigned int order)
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
return;
}
migratetype = MIGRATE_MOVABLE;
if (migratetype == MIGRATE_HIGHATOMIC)
migratetype = MIGRATE_MOVABLE;
if (pcpmigratetype == MIGRATE_HIGHATOMIC)
pcpmigratetype = MIGRATE_MOVABLE;
}
zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags);
if (pcp) {
free_unref_page_commit(zone, pcp, page, pfn, migratetype, order);
free_unref_page_commit(zone, pcp, page, pfn, pcpmigratetype, order);
pcp_spin_unlock_irqrestore(pcp, flags);
} else {
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
@@ -4258,6 +4277,12 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
unsigned int alloc_flags)
{
#ifdef CONFIG_CMA
bool bypass = false;
trace_android_vh_calc_alloc_flags(gfp_mask, &alloc_flags, &bypass);
if (bypass)
return alloc_flags;
if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE && gfp_mask & __GFP_CMA)
alloc_flags |= ALLOC_CMA;
#endif
@@ -5150,6 +5175,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned int zonelist_iter_cookie;
int reserve_flags;
unsigned long alloc_start = jiffies;
bool should_alloc_retry = false;
/*
* We also sanity check to catch abuse of atomic reserves being used by
* callers that are not in atomic context.
@@ -5282,6 +5308,11 @@ retry:
if (current->flags & PF_MEMALLOC)
goto nopage;
trace_android_vh_should_alloc_pages_retry(gfp_mask, order, &alloc_flags,
ac->migratetype, ac->preferred_zoneref->zone, &page, &should_alloc_retry);
if (should_alloc_retry)
goto retry;
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
&did_some_progress);
@@ -5629,6 +5660,7 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = { };
trace_android_vh_alloc_pages_entry(&gfp, order, preferred_nid, nodemask);
/*
* There are several places where we assume that the order value is sane
* so bail out early if the request is out of bound.

View File

@@ -27,8 +27,9 @@
#include <linux/uaccess.h>
#include "internal.h"
#ifndef __GENSYMS__
#ifndef __GENKSYMS__
#include <trace/hooks/syscall_check.h>
#include <trace/hooks/mm.h>
#endif
/**
@@ -598,6 +599,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
{
gfp_t kmalloc_flags = flags;
void *ret;
bool use_vmalloc = false;
/*
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
@@ -606,6 +608,9 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
if ((flags & GFP_KERNEL) != GFP_KERNEL)
return kmalloc_node(size, flags, node);
trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
if (use_vmalloc)
goto use_vmalloc_node;
/*
* We want to attempt a large physically contiguous block first because
* it is less likely to fragment multiple larger blocks and therefore
@@ -635,6 +640,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
return NULL;
}
use_vmalloc_node:
return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0));
}

View File

@@ -6,8 +6,9 @@ This module contains a full list of kernel modules
compiled by GKI.
"""
COMMON_GKI_MODULES_LIST = [
_COMMON_GKI_MODULES_LIST = [
# keep sorted
"drivers/block/null_blk/null_blk.ko",
"drivers/block/zram/zram.ko",
"drivers/bluetooth/btbcm.ko",
"drivers/bluetooth/btqca.ko",
@@ -67,3 +68,43 @@ COMMON_GKI_MODULES_LIST = [
"net/tipc/diag.ko",
"net/tipc/tipc.ko",
]
# Deprecated - Use `get_gki_modules_list` function instead.
COMMON_GKI_MODULES_LIST = _COMMON_GKI_MODULES_LIST
_ARM64_GKI_MODULES_LIST = [
# keep sorted
]
_RISCV64_GKI_MODULES_LIST = [
# keep sorted
]
_X86_64_GKI_MODULES_LIST = [
# keep sorted
]
# buildifier: disable=unnamed-macro
def get_gki_modules_list(arch = None):
""" Provides the list of GKI modules.
Args:
arch: One of [arm64, x86_64, riscv64].
Returns:
The list of GKI modules for the given |arch|.
"""
gki_modules_list = [] + _COMMON_GKI_MODULES_LIST
if arch == "arm64":
gki_modules_list += _ARM64_GKI_MODULES_LIST
elif arch == "x86_64":
gki_modules_list += _X86_64_GKI_MODULES_LIST
elif arch == "riscv64":
gki_modules_list += _RISCV64_GKI_MODULES_LIST
else:
fail("{}: arch {} not supported. Use one of [arm64, x86_64, riscv64]".format(
str(native.package_relative_label(":x")).removesuffix(":x"),
arch,
))
return gki_modules_list

View File

@@ -412,8 +412,9 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
{
struct sock *sk;
int err = 0;
unsigned long flags;
raw_spin_lock_bh(&stab->lock);
raw_spin_lock_irqsave(&stab->lock, flags);
sk = *psk;
if (!sk_test || sk_test == sk)
sk = xchg(psk, NULL);
@@ -423,7 +424,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
else
err = -EINVAL;
raw_spin_unlock_bh(&stab->lock);
raw_spin_unlock_irqrestore(&stab->lock, flags);
return err;
}
@@ -930,11 +931,12 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_bucket *bucket;
struct bpf_shtab_elem *elem;
int ret = -ENOENT;
unsigned long flags;
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
raw_spin_lock_bh(&bucket->lock);
raw_spin_lock_irqsave(&bucket->lock, flags);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem) {
hlist_del_rcu(&elem->node);
@@ -942,7 +944,7 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
sock_hash_free_elem(htab, elem);
ret = 0;
}
raw_spin_unlock_bh(&bucket->lock);
raw_spin_unlock_irqrestore(&bucket->lock, flags);
return ret;
}

View File

@@ -587,9 +587,9 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
}
if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
if (hooknum <= NF_INET_FORWARD) {
struct nfulnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
struct timespec64 kts = ktime_to_timespec64(skb->tstamp ?: ktime_get_real());
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);

View File

@@ -44,7 +44,7 @@ $(OUTPUT) $(OUTPUT)/libbpf $(OUTPUT)/libsubcmd:
$(Q)mkdir -p $(@)
$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
$(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
$(Q)$(MAKE) -C $(SUBCMD_SRC) EXTRA_CFLAGS="$(CFLAGS)" OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ \

View File

@@ -637,10 +637,11 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
/* build usage string if it's not provided */
if (subcommands && !usagestr[0]) {
char *buf = NULL;
int i;
astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
for (int i = 0; subcommands[i]; i++) {
for (i = 0; subcommands[i]; i++) {
if (i)
astrcat(&buf, "|");
astrcat(&buf, subcommands[i]);
@@ -666,7 +667,9 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
exit(130);
case PARSE_OPT_LIST_SUBCMDS:
if (subcommands) {
for (int i = 0; subcommands[i]; i++)
int i;
for (i = 0; subcommands[i]; i++)
printf("%s ", subcommands[i]);
}
putchar('\n');

View File

@@ -0,0 +1,31 @@
# Defconfig fragment for Android Kunit targets
#
# Instead of setting CONFIG_KUNIT_ALL_TESTS=m, we enable individual tests
# because:
# - The defconfig fragment is applied after make defconfig
# - If additional tests are added to CONFIG_KUNIT_ALL_TESTS in the future,
# //common:kunit_* module_outs needs to be updated.
# CONFIG_MODULE_SIG_ALL is not set
# Corresponds to BUILD.bazel, _KUNIT_COMMON_MODULES
CONFIG_TIME_KUNIT_TEST=m
CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST=m
CONFIG_RTC_LIB_KUNIT_TEST=m
CONFIG_EXT4_KUNIT_TESTS=m
CONFIG_FAT_KUNIT_TEST=m
CONFIG_KFENCE_KUNIT_TEST=m
CONFIG_KUNIT_TEST=m
CONFIG_KUNIT_EXAMPLE_TEST=m
# CONFIG_NET_HANDSHAKE is not enabled in gki_defconfig.
# CONFIG_NET_HANDSHAKE_KUNIT_TEST=m
# TODO(b/296116800): Enable these tests
# CONFIG_DRM_KUNIT_TEST=m
# CONFIG_KASAN_KUNIT_TEST=m
# TODO(b/296116800): These are booleans, not tristates.
# CONFIG_BINFMT_ELF_KUNIT_TEST=y
# CONFIG_PM_QOS_KUNIT_TEST=y
# CONFIG_DRIVER_PE_KUNIT_TEST=y

View File

@@ -2047,6 +2047,38 @@ out:
return result;
}
static int bpf_test_mkdir_and_remove_bpf(const char *mount_dir)
{
const char *dir = "dir";
int result = TEST_FAILURE;
int src_fd = -1;
int bpf_fd = -1;
int fuse_dev = -1;
int fd = -1;
int fd2 = -1;
TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC),
src_fd != -1);
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_mkdir_remove", &bpf_fd,
NULL, NULL), 0);
TESTEQUAL(mount_fuse_no_init(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
TEST(fd = s_mkdir(s_path(s(mount_dir), s(dir)), 0777),
fd != -1);
TEST(fd2 = s_open(s_path(s(mount_dir), s(dir)), O_RDONLY),
fd2 != -1);
result = TEST_SUCCESS;
out:
close(fd2);
close(fd);
close(fuse_dev);
close(bpf_fd);
close(src_fd);
umount(mount_dir);
return result;
}
static void parse_range(const char *ranges, bool *run_test, size_t tests)
{
size_t i;
@@ -2175,6 +2207,7 @@ int main(int argc, char *argv[])
MAKE_TEST(bpf_test_lookup_postfilter),
MAKE_TEST(flock_test),
MAKE_TEST(bpf_test_create_and_remove_bpf),
MAKE_TEST(bpf_test_mkdir_and_remove_bpf),
};
#undef MAKE_TEST

View File

@@ -530,4 +530,26 @@ int createremovebpf_test(struct fuse_bpf_args *fa)
}
}
SEC("test_mkdir_remove")
int mkdirremovebpf_test(struct fuse_bpf_args *fa)
{
switch (fa->opcode) {
case FUSE_LOOKUP | FUSE_PREFILTER: {
return FUSE_BPF_BACKING | FUSE_BPF_POST_FILTER;
}
case FUSE_LOOKUP | FUSE_POSTFILTER: {
struct fuse_entry_bpf_out *febo = fa->out_args[1].value;
febo->bpf_action = FUSE_ACTION_REMOVE;
return 0;
}
case FUSE_OPENDIR | FUSE_PREFILTER: {
return -EIO;
}
default:
return FUSE_BPF_BACKING;
}
}