Merge android14-5.15 into android14-5.15-lts

This catches up the android14-5.15-lts branch up with a lot of bugfixes
and ABI additions that were in the android14-5.15 branch.

This consists of the following commits:

* 95e0307577 BACKPORT: blk-crypto: dynamically allocate fallback profile
* c7860b4dbd UPSTREAM: media: usb: siano: Fix warning due to null work_func_t function pointer
* cb69585f72 UPSTREAM: Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb
* 064fe2809f ANDROID: Delete build.config.gki.aarch64.16k.
* 5acbeb3895 FROMGIT: usb: typec: tcpm: Refactor the PPS APDO selection
* 90ad33eb0e ANDROID: GKI: Update symbol list for lenovo
* 844dfdef1c UPSTREAM: net: tap_open(): set sk_uid from current_fsuid()
* 7e0b682baf UPSTREAM: net: tun_chr_open(): set sk_uid from current_fsuid()
* 09eac0d3a8 UPSTREAM: usb: typec: tcpm: Fix response to vsafe0V event
* 8f1d7c6850 ANDROID: Update the ABI symbol list
* 54afed884d UPSTREAM: net/sched: cls_route: No longer copy tcf_result on update to avoid use-after-free
* d6c1899f33 UPSTREAM: net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after-free
* cb45423bcc UPSTREAM: net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after-free
* 880189fdd2 UPSTREAM: netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID
* 4c73cba07b ANDROID: usb: gadget: f_accessory: Mitgate handling of non-existent USB request
* eebccae505 ANDROID: Update the ABI symbol list
* a845525d3e FROMGIT: Multi-gen LRU: skip CMA pages when they are not eligible
* 4502265e16 BACKPORT: mm: skip CMA pages when they are not available
* 53c38ebc4e ANDROID: dm-bow: Protect Ranges fetched and erased from the RB tree
* b88b3d3664 UPSTREAM: netfilter: nf_tables: skip bound chain on rule flush
* 6bcc6ff841 UPSTREAM: tty: n_gsm: fix UAF in gsm_cleanup_mux
* b4a6c6f4a1 UPSTREAM: net/sched: sch_qfq: account for stab overhead in qfq_enqueue
* 1b2e6ebdc6 UPSTREAM: net/sched: sch_qfq: refactor parsing of netlink parameters
* 4128e28878 UPSTREAM: netfilter: nft_set_pipapo: fix improper element removal
* 1122dd3711 ANDROID: Add checkpatch target.
* 056a17677d BACKPORT: FROMGIT: usb: typec: altmodes/displayport: Signal hpd when configuring pin assignment
* ee1147625d BACKPORT: USB: Gadget: core: Help prevent panic during UVC unconfigure
* 7ba2131ad4 FROMLIST: scsi: ufs: Disable zone write locking
* 4224108a41 FROMLIST: scsi: ufs: Split an if-condition
* 1c33b80bf7 FROMLIST: scsi: scsi_debug: Support injecting unaligned write errors
* 6c0276ef82 FROMLIST: scsi: scsi_debug: Support disabling zone write locking
* 3347f7daae FROMLIST: scsi: core: Retry unaligned zoned writes
* 4e1d1b839d FROMLIST: block/mq-deadline: Only use zone locking if necessary
* 5414ea3f50 BACKPORT: FROMLIST: block: Introduce the flag QUEUE_FLAG_NO_ZONE_WRITE_LOCK
* d01888a968 Revert "ANDROID: block: Introduce a request queue flag for pipelining zoned writes"
* 6a1ceeede7 Revert "ANDROID: block/mq-deadline: Only use zone locking if necessary"
* dc77af3d38 Revert "ANDROID: scsi: Retry unaligned zoned writes"
* 2e83e40f6a Revert "ANDROID: scsi: ufs: Enable zoned write pipelining"
* e08f97fe80 Revert "ANDROID: scsi: ufs: Disable zoned write pipelining"
* a0565250b3 Revert "ANDROID: scsi: core: Delay unaligned write error retries"
* 053f022ed4 Revert "ANDROID: scsi: core: Limit zoned write retries"
* 8c7e6396da ANDROID: GKI: update mtktv symbol
* 7d47dd77f1 ANDROID: GKI: Add mtktv ABI symbol list, media, trace and remaining subsys
* 655a288103 ANDROID: GKI: Add mtktv ABI symbol list, cpufreq and misc
* 663ca511e8 ANDROID: GKI: Add mtktv ABI symbol list, net, eth, regulator and pinmux
* 99c011c37d ANDROID: GKI: Add mtktv ABI symbol list, pwm, serial and snd
* b85af8a6dd ANDROID: GKI: Add mtktv ABI symbol list, usb and v4l2
* 0f986fae12 FROMGIT: BACKPORT: Multi-gen LRU: Fix can_swap in lru_gen_look_around()
* 12fb1c3ef5 FROMGIT: BACKPORT: Multi-gen LRU: Avoid race in inc_min_seq()
* bf03a7c90a FROMGIT: BACKPORT: Multi-gen LRU: Fix per-zone reclaim
* 0c5273e138 BACKPORT: FROMGIT: block: Improve performance for BLK_MQ_F_BLOCKING drivers
* d12306bb95 FROMGIT: scsi: Remove a blk_mq_run_hw_queues() call
* 8bb4682c76 BACKPORT: FROMGIT: scsi: Inline scsi_kick_queue()
* 4bbc8e45b2 ANDROID: ABI: Update STG ABI to format version 2
* a9bc04930f ANDROID: GKI: usb: phy: use ANDROID_KABI_USE for api notify_port_status
* 7aa67c1a12 FROMGIT: dt-bindings: phy: realtek: Add Realtek DHC RTD SoC USB 3.0 PHY
* d58cdad855 FROMGIT: dt-bindings: phy: realtek: Add Realtek DHC RTD SoC USB 2.0 PHY
* 6e97af079d FROMGIT: phy: realtek: usb: Add driver for the Realtek SoC USB 3.0 PHY
* 3dadac8ba6 FROMGIT: phy: realtek: usb: Add driver for the Realtek SoC USB 2.0 PHY
* a600af2c26 FROMGIT: usb: phy: add usb phy notify port status API
* 46ee6c3cdd ANDROID: GKI: Create symbol files in include/config
* 9c0a91f91a ANDROID: fuse-bpf: Use stored bpf for create_open
* 77092bb630 ANDROID: fuse-bpf: Add bpf to negative fuse_dentry
* fe475ca0b5 ANDROID: fuse-bpf: Check inode not null
* feb5ea6684 ANDROID: fuse-bpf: Fix flock test compile error
* 3c49a49167 ANDROID: fuse-bpf: Add partial flock support
* 95e1c94a22 UPSTREAM: dm init: add dm-mod.waitfor to wait for asynchronously probed block devices
* 098173a46b ANDROID: Update the ABI symbol list
* 619a5f635c ANDROID: cpuidle: teo: Export a function that allows modifying util_threshold
* d92dd7312f Revert "ANDROID: GKI: Remove temp build file abi_gki_protected_exports"
* 924b7017dd ANDROID: Update the ABI symbol list
* c82392dabd BACKPORT: blk-crypto: use dynamic lock class for blk_crypto_profile::lock
* 44cf75cc35 ANDROID: KVM: arm64: Fix memory ordering for pKVM module callbacks
* fddd85ce63 ANDROID: GKI: Update symbol list for Amlogic
* f686a35a77 UPSTREAM: net/sched: cls_fw: Fix improper refcount update leads to use-after-free
* 625e1470a9 UPSTREAM: netfilter: nf_tables: fix chain binding transaction logic
* de818a4efb UPSTREAM: fs/ntfs3: Check fields while reading
* 48fffa48b0 ANDROID: GKI: Move GKI module headers to generated includes
* c51761363c ANDROID: set kmi_symbol_list_add_only for Kleaf builds.
* 1281598c53 ANDROID: GKI: Remove temp build file abi_gki_protected_exports
* 48916f9971 ANDROID: GKI: Update symbol list for Amlogic
* c2591e463d UPSTREAM: ASoC: soc-pcm: Move debugfs removal out of spinlock
* 4dc2398a95 UPSTREAM: ASoC: soc-pcm: Fix DPCM lockdep warning due to nested stream locks
* 1dd1248e12 FROMLIST: fuse: revalidate: don't invalidate if interrupted
* ef049b5a71 ANDROID: GKI: Update symbol list for Amlogic
* 88829ece33 UPSTREAM: squashfs: always build "file direct" version of page actor
* 044746150d UPSTREAM: squashfs: fix cache race with migration
* f15bd09d1e BACKPORT: squashfs: cache partial compressed blocks
* 9d4be29185 UPSTREAM: squashfs: fix buffer release race condition in readahead code
* 813c3dec23 UPSTREAM: squashfs: fix extending readahead beyond end of file
* dec4ef3ce4 UPSTREAM: squashfs: fix read regression introduced in readahead code
* 80e656926e UPSTREAM: squashfs: don't call kmalloc in decompressors
* 401371c174 UPSTREAM: squashfs: don't use intermediate buffer if pages missing
* c7f85bb4c4 UPSTREAM: squashfs: extend "page actor" to handle missing pages
* af51b9dded UPSTREAM: squashfs: support reading fragments in readahead call
* 3ba07deef7 UPSTREAM: squashfs: implement readahead
* feb80c37c6 UPSTREAM: gfs2: Don't deref jdesc in evict
* 578ffd6434 UPSTREAM: media: dvb-core: Fix kernel WARNING for blocking operation in wait_event*()
* 9b30cd89af ANDROID: Incremental fs: Allocate data buffer based on input request size
* a6ab807946 ANDROID: GKI: Update symbol list for Amlogic
* b4a6ab6566 ANDROID: KVM: arm64: Fix MMU context save/restore over TLB invalidation
* 770ba0ef6a ANDROID: Update the ABI symbol list
* 48d77946ef UPSTREAM: kasan: suppress recursive reports for HW_TAGS
* 47c669cde1 UPSTREAM: kasan, arm64: add arch_suppress_tag_checks_start/stop
* 03471b2b03 BACKPORT: arm64: mte: rename TCO routines
* 867621cdc5 BACKPORT: kasan, arm64: rename tagging-related routines
* 4c8e131d55 UPSTREAM: kasan: drop empty tagging-related defines
* c624358178 ANDROID: GKI: Update symbol list for Amlogic
* 0c09eb760b ANDROID: Update the ABI symbol list
* ca0cd37761 UPSTREAM: usb: gadget: u_serial: Add null pointer check in gs_start_io
* b0992aa672 ANDROID: Update the ABI symbol list
* fb6593baff ANDROID: ABI: Update lenovo symbol list
* 28c0341559 ANDROID: Update the ABI symbol list
* 40a4ec538f UPSTREAM: fsverity: reject FS_IOC_ENABLE_VERITY on mode 3 fds
* 28d90f10d5 UPSTREAM: fsverity: explicitly check for buffer overflow in build_merkle_tree()
* d53de05681 ANDROID: GKI: Update RTK STB KMI symbol list
* 0765cda329 UPSTREAM: f2fs: fix deadlock in i_xattr_sem and inode page lock
* 38fff8f312 Revert "FROMLIST: f2fs: remove i_xattr_sem to avoid deadlock and fix the original issue"
* 60a2ccabe2 UPSTREAM: usb: gadget: udc: renesas_usb3: Fix use after free bug in renesas_usb3_remove due to race condition
* ebe7bbdffd UPSTREAM: media: rkvdec: fix use after free bug in rkvdec_remove
* 4d634bb7be UPSTREAM: relayfs: fix out-of-bounds access in relay_file_read
* b8cb7eb0b4 BACKPORT: revert "net: align SO_RCVMARK required privileges with SO_MARK"
* 9b46997240 UPSTREAM: wifi: cfg80211: fix link del callback to call correct handler
* dc11ed25f7 UPSTREAM: wifi: cfg80211: reject bad AP MLD address
* 2e6bf292f3 UPSTREAM: KVM: arm64: Populate fault info for watchpoint
* c8a3a08497 UPSTREAM: KVM: Fix vcpu_array[0] races
* d18fa8c525 UPSTREAM: media: pvrusb2: fix DVB_CORE dependency
* f4aace942a UPSTREAM: kasan: hw_tags: avoid invalid virt_to_page()
* 8f4b51c499 UPSTREAM: scsi: ufs: core: mcq: Fix &hwq->cq_lock deadlock issue
* 94fb13dc4f UPSTREAM: x86/mm: Avoid using set_pgd() outside of real PGD pages
* 759c5c3fc2 UPSTREAM: netfilter: nf_tables: incorrect error path handling with NFT_MSG_NEWRULE
* be89d165e3 UPSTREAM: net/sched: flower: fix possible OOB write in fl_set_geneve_opt()
* 4ae6b40b7c UPSTREAM: PCI/PM: Extend D3hot delay for NVIDIA HDA controllers
* 738dfcc029 UPSTREAM: wifi: cfg80211: fix MLO connection ownership
* d0e0e85d34 UPSTREAM: wifi: nl80211: fix NULL-ptr deref in offchan check
* 9e7678cc60 UPSTREAM: scsi: ufs: mcq: Use active_reqs to check busy in clock scaling
* 9d0d5eacda UPSTREAM: scsi: ufs: mcq: qcom: Clean the return path of ufs_qcom_mcq_config_resource()
* fa5c4a2186 UPSTREAM: scsi: ufs: mcq: qcom: Fix passing zero to PTR_ERR
* 63ab8dfd17 UPSTREAM: scsi: ufs: mcq: Fix incorrectly set queue depth
* 6423bd5a46 UPSTREAM: net: use a bounce buffer for copying skb->mark
* 656563759a UPSTREAM: io_uring: hold uring mutex around poll removal
* 1f5a89e0cc ANDROID: Set arch attribute for allmodconfig builds
* ceb26af319 ANDROID: KVM: arm64: Remove 'struct kvm_vcpu' from the KMI
* aad223db39 UPSTREAM: KVM: arm64: Restore GICv2-on-GICv3 functionality
* 2c17fbc0d9 UPSTREAM: KVM: arm64: vgic: Wrap vgic_its_create() with config_lock
* ec0944c324 UPSTREAM: KVM: arm64: vgic: Fix a circular locking issue
* e4b31e748a UPSTREAM: KVM: arm64: vgic: Don't acquire its_lock before config_lock
* b7e1f97ef7 BACKPORT: KVM: arm64: Avoid lock inversion when setting the VM register width
* 0c5ec70ec3 UPSTREAM: KVM: arm64: Avoid vcpu->mutex v. kvm->lock inversion in CPU_ON
* 60266126b3 BACKPORT: KVM: arm64: Use config_lock to protect data ordered against KVM_RUN
* 1536afa216 UPSTREAM: KVM: arm64: Use config_lock to protect vgic state
* 1d194af64a BACKPORT: KVM: arm64: Add helper vgic_write_guest_lock()
* 54b1b225ed UPSTREAM: ipvlan:Fix out-of-bounds caused by unclear skb->cb
* b31675307e UPSTREAM: net/sched: cls_u32: Fix reference counter leak leading to overflow
* eda34db29b ANDROID: GKI: Update symbol list for Amlogic
* d8eb5e7ca9 ANDROID: db845c: Fix build when using --kgdb
* d40f3254b6 FROMLIST: kheaders: dereferences the source tree
* 2ebd113814 FROMLIST: f2fs: remove i_xattr_sem to avoid deadlock and fix the original issue
* 258f11319b ANDROID: db845c: Local define for db845c targets
* 2af5c43333 ANDROID: Update the ABI symbol list
* 5af00d8531 ANDROID: Export cpu_push_stop
* 3c328a636a ANDROID: Update the ABI symbol list
* bdd2312e95 ANDROID: rockpi4: Fix build when using --kgdb
* d1601b50e6 ANDROID: kleaf: android/gki_system_dlkm_modules is generated.
* a7068670a7 ANDROID: fuse-bpf: Move FUSE_RELEASE to correct place
* ad20125502 ANDROID: fuse-bpf: Ensure bpf field can never be nulled
* bd30e9ff41 ANDROID: Update the ABI symbol list
* 7a26ac9e26 ANDROID: Delete MODULES_LIST from build configs.
* 270b27a648 ANDROID: ABI: Update lenovo symbol list
* 0a9d005c81 UPSTREAM: memstick: r592: Fix UAF bug in r592_remove due to race condition
* d465a446d2 UPSTREAM: xfs: verify buffer contents when we skip log replay

Change-Id: I56fc52175fbae29d6dea862b66a619b249e93e7c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-08-24 18:34:00 +00:00
126 changed files with 14464 additions and 7984 deletions

View File

@@ -1,15 +1,19 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2021 The Android Open Source Project
load("@bazel_skylib//rules:write_file.bzl", "write_file")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//build/kernel/kleaf:common_kernels.bzl", "define_common_kernels", "define_db845c")
load("//build/kernel/kleaf:common_kernels.bzl", "define_common_kernels")
load(
"//build/kernel/kleaf:kernel.bzl",
"checkpatch",
"ddk_headers",
"kernel_abi",
"kernel_build",
"kernel_images",
"kernel_modules_install",
"kernel_unstripped_modules_archive",
"merged_kernel_uapi_headers",
)
load(":modules.bzl", "COMMON_GKI_MODULES_LIST")
@@ -31,6 +35,20 @@ _GKI_X86_64_MAKE_GOALS = [
"modules",
]
checkpatch(
name = "checkpatch",
checkpatch_pl = "scripts/checkpatch.pl",
)
write_file(
name = "gki_system_dlkm_modules",
out = "android/gki_system_dlkm_modules",
content = COMMON_GKI_MODULES_LIST + [
# Ensure new line at the end.
"",
],
)
filegroup(
name = "aarch64_additional_kmi_symbol_lists",
srcs = [
@@ -40,6 +58,7 @@ filegroup(
"android/abi_gki_aarch64_exynos",
"android/abi_gki_aarch64_fips140",
"android/abi_gki_aarch64_lenovo",
"android/abi_gki_aarch64_mtktv",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_rtkstb",
@@ -56,6 +75,7 @@ define_common_kernels(target_configs = {
"kmi_symbol_list_strict_mode": True,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"kmi_symbol_list": "android/abi_gki_aarch64",
"kmi_symbol_list_add_only": True,
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
"protected_exports_list": "android/abi_gki_protected_exports_aarch64",
"protected_modules_list": "android/gki_aarch64_protected_modules",
@@ -70,6 +90,7 @@ define_common_kernels(target_configs = {
"kmi_symbol_list_strict_mode": False,
"module_implicit_outs": COMMON_GKI_MODULES_LIST,
"kmi_symbol_list": "android/abi_gki_aarch64",
"kmi_symbol_list_add_only": True,
"additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"],
"protected_exports_list": "android/abi_gki_protected_exports_aarch64",
"protected_modules_list": "android/gki_aarch64_protected_modules",
@@ -91,189 +112,340 @@ define_common_kernels(target_configs = {
},
})
define_db845c(
name = "db845c",
_DB845C_MODULE_OUTS = [
# keep sorted
"crypto/michael_mic.ko",
"drivers/base/regmap/regmap-sdw.ko",
"drivers/base/regmap/regmap-slimbus.ko",
"drivers/bus/mhi/host/mhi.ko",
"drivers/clk/qcom/clk-qcom.ko",
"drivers/clk/qcom/clk-rpmh.ko",
"drivers/clk/qcom/clk-spmi-pmic-div.ko",
"drivers/clk/qcom/dispcc-sdm845.ko",
"drivers/clk/qcom/dispcc-sm8250.ko",
"drivers/clk/qcom/gcc-sdm845.ko",
"drivers/clk/qcom/gcc-sm8250.ko",
"drivers/clk/qcom/gpucc-sdm845.ko",
"drivers/clk/qcom/gpucc-sm8250.ko",
"drivers/clk/qcom/lpass-gfm-sm8250.ko",
"drivers/clk/qcom/videocc-sdm845.ko",
"drivers/clk/qcom/videocc-sm8250.ko",
"drivers/cpufreq/qcom-cpufreq-hw.ko",
"drivers/dma-buf/heaps/system_heap.ko",
"drivers/dma/qcom/bam_dma.ko",
"drivers/extcon/extcon-usb-gpio.ko",
"drivers/firmware/qcom-scm.ko",
"drivers/gpio/gpio-wcd934x.ko",
"drivers/gpu/drm/bridge/display-connector.ko",
"drivers/gpu/drm/bridge/lontium-lt9611.ko",
"drivers/gpu/drm/bridge/lontium-lt9611uxc.ko",
"drivers/gpu/drm/msm/msm.ko",
"drivers/gpu/drm/scheduler/gpu-sched.ko",
"drivers/hwspinlock/qcom_hwspinlock.ko",
"drivers/i2c/busses/i2c-designware-core.ko",
"drivers/i2c/busses/i2c-designware-platform.ko",
"drivers/i2c/busses/i2c-qcom-geni.ko",
"drivers/i2c/busses/i2c-qup.ko",
"drivers/i2c/busses/i2c-rk3x.ko",
"drivers/i2c/i2c-dev.ko",
"drivers/i2c/i2c-mux.ko",
"drivers/i2c/muxes/i2c-mux-pca954x.ko",
"drivers/iio/adc/qcom-spmi-adc5.ko",
"drivers/iio/adc/qcom-vadc-common.ko",
"drivers/input/misc/pm8941-pwrkey.ko",
"drivers/interconnect/qcom/icc-bcm-voter.ko",
"drivers/interconnect/qcom/icc-osm-l3.ko",
"drivers/interconnect/qcom/icc-rpmh.ko",
"drivers/interconnect/qcom/qnoc-sdm845.ko",
"drivers/interconnect/qcom/qnoc-sm8250.ko",
"drivers/iommu/arm/arm-smmu/arm_smmu.ko",
"drivers/irqchip/qcom-pdc.ko",
"drivers/leds/led-class-multicolor.ko",
"drivers/mailbox/qcom-apcs-ipc-mailbox.ko",
"drivers/mailbox/qcom-ipcc.ko",
"drivers/mfd/qcom-spmi-pmic.ko",
"drivers/mfd/wcd934x.ko",
"drivers/misc/fastrpc.ko",
"drivers/mmc/host/cqhci.ko",
"drivers/mmc/host/sdhci-msm.ko",
"drivers/net/can/spi/mcp251xfd/mcp251xfd.ko",
"drivers/net/wireless/ath/ath.ko",
"drivers/net/wireless/ath/ath10k/ath10k_core.ko",
"drivers/net/wireless/ath/ath10k/ath10k_pci.ko",
"drivers/net/wireless/ath/ath10k/ath10k_snoc.ko",
"drivers/net/wireless/ath/ath11k/ath11k.ko",
"drivers/net/wireless/ath/ath11k/ath11k_ahb.ko",
"drivers/net/wireless/ath/ath11k/ath11k_pci.ko",
"drivers/nvmem/nvmem_qfprom.ko",
"drivers/phy/qualcomm/phy-qcom-qmp.ko",
"drivers/phy/qualcomm/phy-qcom-qusb2.ko",
"drivers/phy/qualcomm/phy-qcom-snps-femto-v2.ko",
"drivers/phy/qualcomm/phy-qcom-usb-hs.ko",
"drivers/pinctrl/qcom/pinctrl-lpass-lpi.ko",
"drivers/pinctrl/qcom/pinctrl-msm.ko",
"drivers/pinctrl/qcom/pinctrl-sdm845.ko",
"drivers/pinctrl/qcom/pinctrl-sm8250.ko",
"drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko",
"drivers/pinctrl/qcom/pinctrl-spmi-mpp.ko",
"drivers/power/reset/qcom-pon.ko",
"drivers/power/reset/reboot-mode.ko",
"drivers/power/reset/syscon-reboot-mode.ko",
"drivers/regulator/gpio-regulator.ko",
"drivers/regulator/qcom-rpmh-regulator.ko",
"drivers/regulator/qcom_spmi-regulator.ko",
"drivers/regulator/qcom_usb_vbus-regulator.ko",
"drivers/remoteproc/qcom_common.ko",
"drivers/remoteproc/qcom_pil_info.ko",
"drivers/remoteproc/qcom_q6v5.ko",
"drivers/remoteproc/qcom_q6v5_adsp.ko",
"drivers/remoteproc/qcom_q6v5_mss.ko",
"drivers/remoteproc/qcom_q6v5_pas.ko",
"drivers/remoteproc/qcom_q6v5_wcss.ko",
"drivers/remoteproc/qcom_sysmon.ko",
"drivers/reset/reset-qcom-aoss.ko",
"drivers/reset/reset-qcom-pdc.ko",
"drivers/rpmsg/qcom_glink.ko",
"drivers/rpmsg/qcom_glink_rpm.ko",
"drivers/rpmsg/qcom_glink_smem.ko",
"drivers/rpmsg/qcom_smd.ko",
"drivers/rpmsg/rpmsg_ns.ko",
"drivers/rtc/rtc-pm8xxx.ko",
"drivers/slimbus/slim-qcom-ngd-ctrl.ko",
"drivers/slimbus/slimbus.ko",
"drivers/soc/qcom/apr.ko",
"drivers/soc/qcom/cmd-db.ko",
"drivers/soc/qcom/llcc-qcom.ko",
"drivers/soc/qcom/mdt_loader.ko",
"drivers/soc/qcom/pdr_interface.ko",
"drivers/soc/qcom/qcom_aoss.ko",
"drivers/soc/qcom/qcom_rpmh.ko",
"drivers/soc/qcom/qmi_helpers.ko",
"drivers/soc/qcom/rmtfs_mem.ko",
"drivers/soc/qcom/rpmhpd.ko",
"drivers/soc/qcom/smem.ko",
"drivers/soc/qcom/smp2p.ko",
"drivers/soc/qcom/smsm.ko",
"drivers/soc/qcom/socinfo.ko",
"drivers/soundwire/soundwire-bus.ko",
"drivers/soundwire/soundwire-qcom.ko",
"drivers/spi/spi-geni-qcom.ko",
"drivers/spi/spi-pl022.ko",
"drivers/spi/spi-qcom-qspi.ko",
"drivers/spi/spi-qup.ko",
"drivers/spmi/spmi-pmic-arb.ko",
"drivers/thermal/qcom/lmh.ko",
"drivers/thermal/qcom/qcom-spmi-adc-tm5.ko",
"drivers/thermal/qcom/qcom-spmi-temp-alarm.ko",
"drivers/thermal/qcom/qcom_tsens.ko",
"drivers/tty/serial/msm_serial.ko",
"drivers/ufs/host/ufs_qcom.ko",
"drivers/usb/common/ulpi.ko",
"drivers/usb/host/ohci-hcd.ko",
"drivers/usb/host/ohci-pci.ko",
"drivers/usb/host/ohci-platform.ko",
"drivers/usb/typec/qcom-pmic-typec.ko",
"net/mac80211/mac80211.ko",
"net/qrtr/qrtr.ko",
"net/qrtr/qrtr-mhi.ko",
"net/qrtr/qrtr-smd.ko",
"net/qrtr/qrtr-tun.ko",
"net/wireless/cfg80211.ko",
"sound/soc/codecs/snd-soc-dmic.ko",
"sound/soc/codecs/snd-soc-hdmi-codec.ko",
"sound/soc/codecs/snd-soc-lpass-va-macro.ko",
"sound/soc/codecs/snd-soc-lpass-wsa-macro.ko",
"sound/soc/codecs/snd-soc-max98927.ko",
"sound/soc/codecs/snd-soc-rl6231.ko",
"sound/soc/codecs/snd-soc-rt5663.ko",
"sound/soc/codecs/snd-soc-wcd-mbhc.ko",
"sound/soc/codecs/snd-soc-wcd9335.ko",
"sound/soc/codecs/snd-soc-wcd934x.ko",
"sound/soc/codecs/snd-soc-wsa881x.ko",
"sound/soc/qcom/qdsp6/q6adm.ko",
"sound/soc/qcom/qdsp6/q6afe.ko",
"sound/soc/qcom/qdsp6/q6afe-clocks.ko",
"sound/soc/qcom/qdsp6/q6afe-dai.ko",
"sound/soc/qcom/qdsp6/q6asm.ko",
"sound/soc/qcom/qdsp6/q6asm-dai.ko",
"sound/soc/qcom/qdsp6/q6core.ko",
"sound/soc/qcom/qdsp6/q6dsp-common.ko",
"sound/soc/qcom/qdsp6/q6routing.ko",
"sound/soc/qcom/snd-soc-qcom-common.ko",
"sound/soc/qcom/snd-soc-sdm845.ko",
"sound/soc/qcom/snd-soc-sm8250.ko",
]
_DB845C_WATCHDOG_MODULE_OUTS = [
"drivers/watchdog/pm8916_wdt.ko",
"drivers/watchdog/qcom-wdt.ko",
]
kernel_build(
name = "db845c_no_kgdb",
outs = [
"arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb",
"arch/arm64/boot/dts/qcom/sdm845-db845c.dtb",
],
define_abi_targets = True,
kmi_symbol_list = "//common:android/abi_gki_aarch64_db845c",
kmi_symbol_list_add_only = True,
# Enable mixed build.
base_kernel = ":kernel_aarch64",
build_config = "build.config.db845c",
collect_unstripped_modules = True,
kmi_symbol_list = "android/abi_gki_aarch64_db845c",
make_goals = [
"modules",
"qcom/sdm845-db845c.dtb",
"qcom/qrb5165-rb5.dtb",
],
module_outs = [
# keep sorted
"crypto/michael_mic.ko",
"drivers/base/regmap/regmap-sdw.ko",
"drivers/base/regmap/regmap-slimbus.ko",
"drivers/bus/mhi/host/mhi.ko",
"drivers/clk/qcom/clk-qcom.ko",
"drivers/clk/qcom/clk-rpmh.ko",
"drivers/clk/qcom/clk-spmi-pmic-div.ko",
"drivers/clk/qcom/dispcc-sdm845.ko",
"drivers/clk/qcom/dispcc-sm8250.ko",
"drivers/clk/qcom/gcc-sdm845.ko",
"drivers/clk/qcom/gcc-sm8250.ko",
"drivers/clk/qcom/gpucc-sdm845.ko",
"drivers/clk/qcom/gpucc-sm8250.ko",
"drivers/clk/qcom/lpass-gfm-sm8250.ko",
"drivers/clk/qcom/videocc-sdm845.ko",
"drivers/clk/qcom/videocc-sm8250.ko",
"drivers/cpufreq/qcom-cpufreq-hw.ko",
"drivers/dma-buf/heaps/system_heap.ko",
"drivers/dma/qcom/bam_dma.ko",
"drivers/extcon/extcon-usb-gpio.ko",
"drivers/firmware/qcom-scm.ko",
"drivers/gpio/gpio-wcd934x.ko",
"drivers/gpu/drm/bridge/display-connector.ko",
"drivers/gpu/drm/bridge/lontium-lt9611.ko",
"drivers/gpu/drm/bridge/lontium-lt9611uxc.ko",
"drivers/gpu/drm/msm/msm.ko",
"drivers/gpu/drm/scheduler/gpu-sched.ko",
"drivers/hwspinlock/qcom_hwspinlock.ko",
"drivers/i2c/busses/i2c-designware-core.ko",
"drivers/i2c/busses/i2c-designware-platform.ko",
"drivers/i2c/busses/i2c-qcom-geni.ko",
"drivers/i2c/busses/i2c-qup.ko",
"drivers/i2c/busses/i2c-rk3x.ko",
"drivers/i2c/i2c-dev.ko",
"drivers/i2c/i2c-mux.ko",
"drivers/i2c/muxes/i2c-mux-pca954x.ko",
"drivers/iio/adc/qcom-spmi-adc5.ko",
"drivers/iio/adc/qcom-vadc-common.ko",
"drivers/input/misc/pm8941-pwrkey.ko",
"drivers/interconnect/qcom/icc-bcm-voter.ko",
"drivers/interconnect/qcom/icc-osm-l3.ko",
"drivers/interconnect/qcom/icc-rpmh.ko",
"drivers/interconnect/qcom/qnoc-sdm845.ko",
"drivers/interconnect/qcom/qnoc-sm8250.ko",
"drivers/iommu/arm/arm-smmu/arm_smmu.ko",
"drivers/irqchip/qcom-pdc.ko",
"drivers/leds/led-class-multicolor.ko",
"drivers/mailbox/qcom-apcs-ipc-mailbox.ko",
"drivers/mailbox/qcom-ipcc.ko",
"drivers/mfd/qcom-spmi-pmic.ko",
"drivers/mfd/wcd934x.ko",
"drivers/misc/fastrpc.ko",
"drivers/mmc/host/cqhci.ko",
"drivers/mmc/host/sdhci-msm.ko",
"drivers/net/can/spi/mcp251xfd/mcp251xfd.ko",
"drivers/net/wireless/ath/ath.ko",
"drivers/net/wireless/ath/ath10k/ath10k_core.ko",
"drivers/net/wireless/ath/ath10k/ath10k_pci.ko",
"drivers/net/wireless/ath/ath10k/ath10k_snoc.ko",
"drivers/net/wireless/ath/ath11k/ath11k.ko",
"drivers/net/wireless/ath/ath11k/ath11k_ahb.ko",
"drivers/net/wireless/ath/ath11k/ath11k_pci.ko",
"drivers/nvmem/nvmem_qfprom.ko",
"drivers/phy/qualcomm/phy-qcom-qmp.ko",
"drivers/phy/qualcomm/phy-qcom-qusb2.ko",
"drivers/phy/qualcomm/phy-qcom-snps-femto-v2.ko",
"drivers/phy/qualcomm/phy-qcom-usb-hs.ko",
"drivers/pinctrl/qcom/pinctrl-lpass-lpi.ko",
"drivers/pinctrl/qcom/pinctrl-msm.ko",
"drivers/pinctrl/qcom/pinctrl-sdm845.ko",
"drivers/pinctrl/qcom/pinctrl-sm8250.ko",
"drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko",
"drivers/pinctrl/qcom/pinctrl-spmi-mpp.ko",
"drivers/power/reset/qcom-pon.ko",
"drivers/power/reset/reboot-mode.ko",
"drivers/power/reset/syscon-reboot-mode.ko",
"drivers/regulator/gpio-regulator.ko",
"drivers/regulator/qcom-rpmh-regulator.ko",
"drivers/regulator/qcom_spmi-regulator.ko",
"drivers/regulator/qcom_usb_vbus-regulator.ko",
"drivers/remoteproc/qcom_common.ko",
"drivers/remoteproc/qcom_pil_info.ko",
"drivers/remoteproc/qcom_q6v5.ko",
"drivers/remoteproc/qcom_q6v5_adsp.ko",
"drivers/remoteproc/qcom_q6v5_mss.ko",
"drivers/remoteproc/qcom_q6v5_pas.ko",
"drivers/remoteproc/qcom_q6v5_wcss.ko",
"drivers/remoteproc/qcom_sysmon.ko",
"drivers/reset/reset-qcom-aoss.ko",
"drivers/reset/reset-qcom-pdc.ko",
"drivers/rpmsg/qcom_glink.ko",
"drivers/rpmsg/qcom_glink_rpm.ko",
"drivers/rpmsg/qcom_glink_smem.ko",
"drivers/rpmsg/qcom_smd.ko",
"drivers/rpmsg/rpmsg_ns.ko",
"drivers/rtc/rtc-pm8xxx.ko",
"drivers/slimbus/slim-qcom-ngd-ctrl.ko",
"drivers/slimbus/slimbus.ko",
"drivers/soc/qcom/apr.ko",
"drivers/soc/qcom/cmd-db.ko",
"drivers/soc/qcom/llcc-qcom.ko",
"drivers/soc/qcom/mdt_loader.ko",
"drivers/soc/qcom/pdr_interface.ko",
"drivers/soc/qcom/qcom_aoss.ko",
"drivers/soc/qcom/qcom_rpmh.ko",
"drivers/soc/qcom/qmi_helpers.ko",
"drivers/soc/qcom/rmtfs_mem.ko",
"drivers/soc/qcom/rpmhpd.ko",
"drivers/soc/qcom/smem.ko",
"drivers/soc/qcom/smp2p.ko",
"drivers/soc/qcom/smsm.ko",
"drivers/soc/qcom/socinfo.ko",
"drivers/soundwire/soundwire-bus.ko",
"drivers/soundwire/soundwire-qcom.ko",
"drivers/spi/spi-geni-qcom.ko",
"drivers/spi/spi-pl022.ko",
"drivers/spi/spi-qcom-qspi.ko",
"drivers/spi/spi-qup.ko",
"drivers/spmi/spmi-pmic-arb.ko",
"drivers/thermal/qcom/lmh.ko",
"drivers/thermal/qcom/qcom-spmi-adc-tm5.ko",
"drivers/thermal/qcom/qcom-spmi-temp-alarm.ko",
"drivers/thermal/qcom/qcom_tsens.ko",
"drivers/tty/serial/msm_serial.ko",
"drivers/ufs/host/ufs_qcom.ko",
"drivers/usb/common/ulpi.ko",
"drivers/usb/host/ohci-hcd.ko",
"drivers/usb/host/ohci-pci.ko",
"drivers/usb/host/ohci-platform.ko",
"drivers/usb/typec/qcom-pmic-typec.ko",
"drivers/watchdog/pm8916_wdt.ko",
"drivers/watchdog/qcom-wdt.ko",
"net/mac80211/mac80211.ko",
"net/qrtr/qrtr.ko",
"net/qrtr/qrtr-mhi.ko",
"net/qrtr/qrtr-smd.ko",
"net/qrtr/qrtr-tun.ko",
"net/wireless/cfg80211.ko",
"sound/soc/codecs/snd-soc-dmic.ko",
"sound/soc/codecs/snd-soc-hdmi-codec.ko",
"sound/soc/codecs/snd-soc-lpass-va-macro.ko",
"sound/soc/codecs/snd-soc-lpass-wsa-macro.ko",
"sound/soc/codecs/snd-soc-max98927.ko",
"sound/soc/codecs/snd-soc-rl6231.ko",
"sound/soc/codecs/snd-soc-rt5663.ko",
"sound/soc/codecs/snd-soc-wcd-mbhc.ko",
"sound/soc/codecs/snd-soc-wcd9335.ko",
"sound/soc/codecs/snd-soc-wcd934x.ko",
"sound/soc/codecs/snd-soc-wsa881x.ko",
"sound/soc/qcom/qdsp6/q6adm.ko",
"sound/soc/qcom/qdsp6/q6afe.ko",
"sound/soc/qcom/qdsp6/q6afe-clocks.ko",
"sound/soc/qcom/qdsp6/q6afe-dai.ko",
"sound/soc/qcom/qdsp6/q6asm.ko",
"sound/soc/qcom/qdsp6/q6asm-dai.ko",
"sound/soc/qcom/qdsp6/q6core.ko",
"sound/soc/qcom/qdsp6/q6dsp-common.ko",
"sound/soc/qcom/qdsp6/q6routing.ko",
"sound/soc/qcom/snd-soc-qcom-common.ko",
"sound/soc/qcom/snd-soc-sdm845.ko",
"sound/soc/qcom/snd-soc-sm8250.ko",
module_outs = _DB845C_MODULE_OUTS + _DB845C_WATCHDOG_MODULE_OUTS,
strip_modules = True,
)
kernel_build(
name = "db845c_with_kgdb",
outs = [
"arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb",
"arch/arm64/boot/dts/qcom/sdm845-db845c.dtb",
],
# Enable mixed build.
base_kernel = ":kernel_aarch64",
build_config = "build.config.db845c",
make_goals = [
"modules",
"qcom/sdm845-db845c.dtb",
"qcom/qrb5165-rb5.dtb",
],
module_outs = _DB845C_MODULE_OUTS,
strip_modules = True,
)
alias(
name = "db845c",
actual = select({
"//build/kernel/kleaf:kgdb_is_true": "db845c_with_kgdb",
"//conditions:default": "db845c_no_kgdb",
}),
)
kernel_abi(
name = "db845c_abi",
kernel_build = ":db845c",
kmi_symbol_list_add_only = True,
)
kernel_modules_install(
name = "db845c_modules_install",
kernel_build = ":db845c",
)
merged_kernel_uapi_headers(
name = "db845c_merged_kernel_uapi_headers",
kernel_build = ":db845c",
)
kernel_images(
name = "db845c_images",
build_initramfs = True,
kernel_build = ":db845c",
kernel_modules_install = ":db845c_modules_install",
)
copy_to_dist_dir(
name = "db845c_dist",
data = [
":db845c",
":db845c_images",
":db845c_modules_install",
":db845c_merged_kernel_uapi_headers",
# Mixed build: Additional GKI artifacts.
":kernel_aarch64",
":kernel_aarch64_modules",
":kernel_aarch64_additional_artifacts",
],
dist_dir = "out/db845/dist",
flat = True,
log = "info",
)
_ROCKPI4_MODULE_OUTS = [
# keep sorted
"drivers/block/virtio_blk.ko",
"drivers/char/hw_random/virtio-rng.ko",
"drivers/clk/clk-rk808.ko",
"drivers/cpufreq/cpufreq-dt.ko",
"drivers/dma/pl330.ko",
"drivers/gpu/drm/bridge/analogix/analogix_dp.ko",
"drivers/gpu/drm/bridge/synopsys/dw-hdmi.ko",
"drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.ko",
"drivers/gpu/drm/rockchip/rockchipdrm.ko",
"drivers/i2c/busses/i2c-rk3x.ko",
"drivers/iio/adc/rockchip_saradc.ko",
"drivers/iio/buffer/industrialio-triggered-buffer.ko",
"drivers/iio/buffer/kfifo_buf.ko",
"drivers/mfd/rk808.ko",
"drivers/mmc/core/pwrseq_simple.ko",
"drivers/mmc/host/cqhci.ko",
"drivers/mmc/host/dw_mmc.ko",
"drivers/mmc/host/dw_mmc-pltfm.ko",
"drivers/mmc/host/dw_mmc-rockchip.ko",
"drivers/mmc/host/sdhci-of-arasan.ko",
"drivers/net/ethernet/stmicro/stmmac/dwmac-rk.ko",
"drivers/net/ethernet/stmicro/stmmac/stmmac.ko",
"drivers/net/ethernet/stmicro/stmmac/stmmac-platform.ko",
"drivers/net/net_failover.ko",
"drivers/net/pcs/pcs_xpcs.ko",
"drivers/net/virtio_net.ko",
"drivers/nvmem/nvmem_rockchip_efuse.ko",
"drivers/pci/controller/pcie-rockchip-host.ko",
"drivers/phy/rockchip/phy-rockchip-emmc.ko",
"drivers/phy/rockchip/phy-rockchip-inno-usb2.ko",
"drivers/phy/rockchip/phy-rockchip-pcie.ko",
"drivers/phy/rockchip/phy-rockchip-typec.ko",
"drivers/pwm/pwm-rockchip.ko",
"drivers/regulator/fan53555.ko",
"drivers/regulator/pwm-regulator.ko",
"drivers/regulator/rk808-regulator.ko",
"drivers/rtc/rtc-rk808.ko",
"drivers/soc/rockchip/io-domain.ko",
"drivers/thermal/rockchip_thermal.ko",
"drivers/usb/host/ohci-hcd.ko",
"drivers/usb/host/ohci-platform.ko",
"drivers/virtio/virtio_pci.ko",
"drivers/virtio/virtio_pci_modern_dev.ko",
"net/core/failover.ko",
]
_ROCKPI4_WATCHDOG_MODULE_OUTS = [
# keep sorted
"drivers/watchdog/dw_wdt.ko",
]
# TODO(b/258259749): Convert rockpi4 to mixed build
kernel_build(
name = "rockpi4_no_kgdb",
outs = [
"Image",
"System.map",
"modules.builtin",
"modules.builtin.modinfo",
"rk3399-rock-pi-4b.dtb",
"vmlinux",
"vmlinux.symvers",
],
build_config = "build.config.rockpi4",
collect_unstripped_modules = True,
kmi_symbol_list = "android/abi_gki_rockpi4",
make_goals = [
"Image",
"modules",
"rockchip/rk3399-rock-pi-4b.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS + _ROCKPI4_WATCHDOG_MODULE_OUTS,
visibility = ["//visibility:private"],
)
# TODO(b/258259749): Convert rockpi4 to mixed build
kernel_build(
name = "rockpi4",
name = "rockpi4_with_kgdb",
outs = [
"Image",
"System.map",
@@ -291,65 +463,27 @@ kernel_build(
"modules",
"rockchip/rk3399-rock-pi-4b.dtb",
],
module_outs = COMMON_GKI_MODULES_LIST + [
# keep sorted
"drivers/block/virtio_blk.ko",
"drivers/char/hw_random/virtio-rng.ko",
"drivers/clk/clk-rk808.ko",
"drivers/cpufreq/cpufreq-dt.ko",
"drivers/dma/pl330.ko",
"drivers/gpu/drm/bridge/analogix/analogix_dp.ko",
"drivers/gpu/drm/bridge/synopsys/dw-hdmi.ko",
"drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.ko",
"drivers/gpu/drm/rockchip/rockchipdrm.ko",
"drivers/i2c/busses/i2c-rk3x.ko",
"drivers/iio/adc/rockchip_saradc.ko",
"drivers/iio/buffer/industrialio-triggered-buffer.ko",
"drivers/iio/buffer/kfifo_buf.ko",
"drivers/mfd/rk808.ko",
"drivers/mmc/core/pwrseq_simple.ko",
"drivers/mmc/host/cqhci.ko",
"drivers/mmc/host/dw_mmc.ko",
"drivers/mmc/host/dw_mmc-pltfm.ko",
"drivers/mmc/host/dw_mmc-rockchip.ko",
"drivers/mmc/host/sdhci-of-arasan.ko",
"drivers/net/ethernet/stmicro/stmmac/dwmac-rk.ko",
"drivers/net/ethernet/stmicro/stmmac/stmmac.ko",
"drivers/net/ethernet/stmicro/stmmac/stmmac-platform.ko",
"drivers/net/net_failover.ko",
"drivers/net/pcs/pcs_xpcs.ko",
"drivers/net/virtio_net.ko",
"drivers/nvmem/nvmem_rockchip_efuse.ko",
"drivers/pci/controller/pcie-rockchip-host.ko",
"drivers/phy/rockchip/phy-rockchip-emmc.ko",
"drivers/phy/rockchip/phy-rockchip-inno-usb2.ko",
"drivers/phy/rockchip/phy-rockchip-pcie.ko",
"drivers/phy/rockchip/phy-rockchip-typec.ko",
"drivers/pwm/pwm-rockchip.ko",
"drivers/regulator/fan53555.ko",
"drivers/regulator/pwm-regulator.ko",
"drivers/regulator/rk808-regulator.ko",
"drivers/rtc/rtc-rk808.ko",
"drivers/soc/rockchip/io-domain.ko",
"drivers/thermal/rockchip_thermal.ko",
"drivers/usb/host/ohci-hcd.ko",
"drivers/usb/host/ohci-platform.ko",
"drivers/virtio/virtio_pci.ko",
"drivers/virtio/virtio_pci_modern_dev.ko",
"drivers/watchdog/dw_wdt.ko",
"net/core/failover.ko",
],
module_outs = COMMON_GKI_MODULES_LIST + _ROCKPI4_MODULE_OUTS,
visibility = ["//visibility:private"],
)
alias(
name = "rockpi4",
actual = select({
"//build/kernel/kleaf:kgdb_is_true": "rockpi4_with_kgdb",
"//conditions:default": "rockpi4_no_kgdb",
}),
)
kernel_abi(
name = "rockpi4_abi",
kernel_build = "//common:rockpi4",
kernel_build = ":rockpi4",
kmi_symbol_list_add_only = True,
)
kernel_modules_install(
name = "rockpi4_modules_install",
kernel_build = "//common:rockpi4",
kernel_build = ":rockpi4",
)
kernel_unstripped_modules_archive(
@@ -360,8 +494,8 @@ kernel_unstripped_modules_archive(
kernel_images(
name = "rockpi4_images",
build_initramfs = True,
kernel_build = "//common:rockpi4",
kernel_modules_install = "//common:rockpi4_modules_install",
kernel_build = ":rockpi4",
kernel_modules_install = ":rockpi4_modules_install",
)
copy_to_dist_dir(
@@ -381,13 +515,13 @@ kernel_build(
outs = [],
base_kernel = ":kernel_aarch64",
build_config = "build.config.gki.aarch64.fips140",
kmi_symbol_list = "//common:android/abi_gki_aarch64_fips140",
kmi_symbol_list = "android/abi_gki_aarch64_fips140",
module_outs = ["crypto/fips140.ko"],
)
kernel_abi(
name = "fips140_abi",
kernel_build = "//common:fips140",
kernel_build = ":fips140",
kmi_symbol_list_add_only = True,
)

View File

@@ -0,0 +1,175 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2023 Realtek Semiconductor Corporation
%YAML 1.2
---
$id: http://devicetree.org/schemas/phy/realtek,usb2phy.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Realtek DHC SoCs USB 2.0 PHY
maintainers:
- Stanley Chang <stanley_chang@realtek.com>
description: |
Realtek USB 2.0 PHY support the digital home center (DHC) RTD series SoCs.
The USB 2.0 PHY driver is designed to support the XHCI controller. The SoCs
support multiple XHCI controllers. One PHY device node maps to one XHCI
controller.
RTD1295/RTD1619 SoCs USB
The USB architecture includes three XHCI controllers.
Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on some
controllers.
XHCI controller#0 -- usb2phy -- phy#0
|- usb3phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
|- usb3phy -- phy#0
RTD1395 SoCs USB
The USB architecture includes two XHCI controllers.
The controller#0 has one USB 2.0 PHY. The controller#1 includes two USB 2.0
PHY.
XHCI controller#0 -- usb2phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
|- phy#1
RTD1319/RTD1619b SoCs USB
The USB architecture includes three XHCI controllers.
Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#2.
XHCI controller#0 -- usb2phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
|- usb3phy -- phy#0
RTD1319d SoCs USB
The USB architecture includes three XHCI controllers.
Each xhci maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#0.
XHCI controller#0 -- usb2phy -- phy#0
|- usb3phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
RTD1312c/RTD1315e SoCs USB
The USB architecture includes three XHCI controllers.
Each XHCI maps to one USB 2.0 PHY.
XHCI controller#0 -- usb2phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
properties:
compatible:
enum:
- realtek,rtd1295-usb2phy
- realtek,rtd1312c-usb2phy
- realtek,rtd1315e-usb2phy
- realtek,rtd1319-usb2phy
- realtek,rtd1319d-usb2phy
- realtek,rtd1395-usb2phy
- realtek,rtd1395-usb2phy-2port
- realtek,rtd1619-usb2phy
- realtek,rtd1619b-usb2phy
reg:
items:
- description: PHY data registers
- description: PHY control registers
"#phy-cells":
const: 0
nvmem-cells:
maxItems: 2
description:
Phandles to nvmem cell that contains the trimming data.
If unspecified, default value is used.
nvmem-cell-names:
items:
- const: usb-dc-cal
- const: usb-dc-dis
description:
The following names, which correspond to each nvmem-cells.
usb-dc-cal is the driving level for each phy specified via efuse.
usb-dc-dis is the disconnection level for each phy specified via efuse.
realtek,inverse-hstx-sync-clock:
description:
For one of the phys of RTD1619b SoC, the synchronous clock of the
high-speed tx must be inverted.
type: boolean
realtek,driving-level:
description:
Control the magnitude of High speed Dp/Dm output swing (mV).
For a different board or port, the original magnitude maybe not meet
the specification. In this situation we can adjust the value to meet
the specification.
$ref: /schemas/types.yaml#/definitions/uint32
default: 8
minimum: 0
maximum: 31
realtek,driving-level-compensate:
description:
For RTD1315e SoC, the driving level can be adjusted by reading the
efuse table. This property provides drive compensation.
If the magnitude of High speed Dp/Dm output swing still not meet the
specification, then we can set this value to meet the specification.
$ref: /schemas/types.yaml#/definitions/int32
default: 0
minimum: -8
maximum: 8
realtek,disconnection-compensate:
description:
This adjusts the disconnection level compensation for the different
boards with different disconnection level.
$ref: /schemas/types.yaml#/definitions/int32
default: 0
minimum: -8
maximum: 8
required:
- compatible
- reg
- "#phy-cells"
allOf:
- if:
not:
properties:
compatible:
contains:
enum:
- realtek,rtd1619b-usb2phy
then:
properties:
realtek,inverse-hstx-sync-clock: false
- if:
not:
properties:
compatible:
contains:
enum:
- realtek,rtd1315e-usb2phy
then:
properties:
realtek,driving-level-compensate: false
additionalProperties: false
examples:
- |
usb-phy@13214 {
compatible = "realtek,rtd1619b-usb2phy";
reg = <0x13214 0x4>, <0x28280 0x4>;
#phy-cells = <0>;
nvmem-cells = <&otp_usb_port0_dc_cal>, <&otp_usb_port0_dc_dis>;
nvmem-cell-names = "usb-dc-cal", "usb-dc-dis";
realtek,inverse-hstx-sync-clock;
realtek,driving-level = <0xa>;
realtek,disconnection-compensate = <(-1)>;
};

View File

@@ -0,0 +1,107 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2023 Realtek Semiconductor Corporation
%YAML 1.2
---
$id: http://devicetree.org/schemas/phy/realtek,usb3phy.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Realtek DHC SoCs USB 3.0 PHY
maintainers:
- Stanley Chang <stanley_chang@realtek.com>
description: |
Realtek USB 3.0 PHY support the digital home center (DHC) RTD series SoCs.
The USB 3.0 PHY driver is designed to support the XHCI controller. The SoCs
support multiple XHCI controllers. One PHY device node maps to one XHCI
controller.
RTD1295/RTD1619 SoCs USB
The USB architecture includes three XHCI controllers.
Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on some
controllers.
XHCI controller#0 -- usb2phy -- phy#0
|- usb3phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
|- usb3phy -- phy#0
RTD1319/RTD1619b SoCs USB
The USB architecture includes three XHCI controllers.
Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#2.
XHCI controller#0 -- usb2phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
|- usb3phy -- phy#0
RTD1319d SoCs USB
The USB architecture includes three XHCI controllers.
Each xhci maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#0.
XHCI controller#0 -- usb2phy -- phy#0
|- usb3phy -- phy#0
XHCI controller#1 -- usb2phy -- phy#0
XHCI controller#2 -- usb2phy -- phy#0
properties:
compatible:
enum:
- realtek,rtd1295-usb3phy
- realtek,rtd1319-usb3phy
- realtek,rtd1319d-usb3phy
- realtek,rtd1619-usb3phy
- realtek,rtd1619b-usb3phy
reg:
maxItems: 1
"#phy-cells":
const: 0
nvmem-cells:
maxItems: 1
description: A phandle to the tx lfps swing trim data provided by
a nvmem device, if unspecified, default values shall be used.
nvmem-cell-names:
items:
- const: usb_u3_tx_lfps_swing_trim
realtek,amplitude-control-coarse-tuning:
description:
This adjusts the signal amplitude for normal operation and beacon LFPS.
This value is a parameter for coarse tuning.
For different boards, if the default value is inappropriate, this
property can be assigned to adjust.
$ref: /schemas/types.yaml#/definitions/uint32
default: 255
minimum: 0
maximum: 255
realtek,amplitude-control-fine-tuning:
description:
This adjusts the signal amplitude for normal operation and beacon LFPS.
This value is used for fine-tuning parameters.
$ref: /schemas/types.yaml#/definitions/uint32
default: 65535
minimum: 0
maximum: 65535
required:
- compatible
- reg
- "#phy-cells"
additionalProperties: false
examples:
- |
usb-phy@13e10 {
compatible = "realtek,rtd1319d-usb3phy";
reg = <0x13e10 0x4>;
#phy-cells = <0>;
nvmem-cells = <&otp_usb_u3_tx_lfps_swing_trim>;
nvmem-cell-names = "usb_u3_tx_lfps_swing_trim";
realtek,amplitude-control-coarse-tuning = <0x77>;
};

File diff suppressed because it is too large Load Diff

View File

@@ -47,6 +47,7 @@
bitmap_find_next_zero_area_off
bitmap_free
__bitmap_or
bitmap_parselist
__bitmap_set
__bitmap_shift_left
__bitmap_shift_right
@@ -354,6 +355,7 @@
devm_kmemdup
devm_kstrdup
devm_kvasprintf
devm_mbox_controller_register
devm_nvmem_cell_get
devm_of_clk_add_hw_provider
devm_of_phy_get
@@ -708,6 +710,7 @@
_find_last_bit
_find_next_bit
__find_vma
find_vm_area
find_vpid
finish_wait
flow_block_cb_setup_simple
@@ -783,7 +786,9 @@
gen_pool_create
gen_pool_destroy
gen_pool_first_fit_align
gen_pool_first_fit_order_align
gen_pool_free_owner
gen_pool_has_addr
gen_pool_set_algo
gen_pool_size
gen_pool_virt_to_phys
@@ -933,6 +938,10 @@
invalidate_bdev
invalidate_inode_buffers
iomem_resource
iommu_device_register
iommu_device_sysfs_add
iommu_device_sysfs_remove
iommu_device_unregister
__ioremap
ioremap_cache
iounmap
@@ -1085,10 +1094,12 @@
mark_buffer_dirty
__mark_inode_dirty
mbox_chan_received_data
mbox_chan_txdone
mbox_controller_register
mbox_controller_unregister
mbox_free_channel
mbox_request_channel
mbox_request_channel_byname
mbox_send_message
mdiobus_alloc_size
mdiobus_free
@@ -1098,6 +1109,10 @@
mdiobus_write
mdio_device_create
mdio_device_free
media_create_pad_link
media_entity_remote_pad
media_pipeline_start
media_pipeline_stop
memchr
memcmp
memcpy
@@ -1192,6 +1207,7 @@
netif_schedule_queue
netif_set_real_num_rx_queues
netif_set_real_num_tx_queues
netif_set_xps_queue
netif_tx_stop_all_queues
netif_tx_wake_queue
netlink_broadcast
@@ -1313,6 +1329,9 @@
panic_notifier_list
param_array_ops
param_get_charp
param_get_hexint
param_get_int
param_get_short
param_get_string
param_ops_bool
param_ops_byte
@@ -1327,6 +1346,9 @@
param_ops_ushort
param_set_charp
param_set_copystring
param_set_hexint
param_set_int
pci_choose_state
pci_disable_device
pci_enable_device
pci_find_next_bus
@@ -1342,9 +1364,11 @@
pci_read_config_dword
pci_remove_root_bus
pci_rescan_bus
pci_set_power_state
pci_stop_and_remove_bus_device_locked
pci_stop_root_bus
pci_unlock_rescan_remove
pci_write_config_byte
pci_write_config_dword
PDE_DATA
__per_cpu_offset
@@ -1398,6 +1422,7 @@
pin_user_pages
pin_user_pages_fast
pin_user_pages_remote
platform_bus_type
platform_device_add
platform_device_add_data
platform_device_add_resources
@@ -1514,6 +1539,7 @@
rdev_get_drvdata
read_cache_page
read_sanitised_ftr_reg
rebuild_sched_domains
refcount_warn_saturate
__refrigerator
regcache_cache_only
@@ -1588,6 +1614,7 @@
rfkill_set_hw_state_reason
rfkill_set_sw_state
rfkill_unregister
rfs_needed
rhashtable_free_and_destroy
rhashtable_insert_slow
rhltable_init
@@ -1597,6 +1624,8 @@
round_jiffies
round_jiffies_relative
round_jiffies_up
rps_cpu_mask
rps_sock_flow_table
rtc_add_group
rtc_time64_to_tm
rtc_tm_to_time64
@@ -1877,6 +1906,7 @@
system_freezing_cnt
system_highpri_wq
system_power_efficient_wq
system_unbound_wq
system_wq
sys_tz
__tasklet_hi_schedule
@@ -1907,11 +1937,14 @@
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_do_sea
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_gic_v3_set_affinity
__traceiter_android_rvh_iommu_setup_dma_ops
__traceiter_android_rvh_place_entity
__traceiter_android_rvh_replace_next_task_fair
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_tick_entry
__traceiter_android_vh_cma_drain_all_pages_bypass
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
@@ -1927,17 +1960,21 @@
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__traceiter_sched_switch
__traceiter_xdp_exception
trace_output_call
__tracepoint_android_rvh_arm64_serror_panic
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_do_sea
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_iommu_setup_dma_ops
__tracepoint_android_rvh_place_entity
__tracepoint_android_rvh_replace_next_task_fair
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_tick_entry
__tracepoint_android_vh_cma_drain_all_pages_bypass
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
@@ -1955,6 +1992,7 @@
__tracepoint_mmap_lock_start_locking
tracepoint_probe_register
tracepoint_probe_unregister
__tracepoint_sched_switch
__tracepoint_xdp_exception
trace_print_array_seq
trace_print_flags_seq
@@ -2055,17 +2093,22 @@
utf8_to_utf32
uuid_null
v4l2_ctrl_add_handler
v4l2_ctrl_find
v4l2_ctrl_handler_free
v4l2_ctrl_handler_init_class
v4l2_ctrl_handler_setup
v4l2_ctrl_new_custom
v4l2_ctrl_new_int_menu
v4l2_ctrl_new_std
__v4l2_ctrl_s_ctrl
__v4l2_ctrl_s_ctrl_int64
v4l2_ctrl_subscribe_event
v4l2_device_register
v4l2_device_register_subdev
v4l2_device_unregister
v4l2_device_unregister_subdev
v4l2_event_queue_fh
v4l2_event_subdev_unsubscribe
v4l2_event_subscribe
v4l2_event_unsubscribe
v4l2_fh_add
@@ -2073,6 +2116,7 @@
v4l2_fh_exit
v4l2_fh_init
v4l2_fh_open
__v4l2_find_nearest_size
v4l2_i2c_subdev_init
v4l2_m2m_buf_queue
v4l2_m2m_buf_remove
@@ -2095,6 +2139,7 @@
v4l2_m2m_release
v4l2_m2m_try_schedule
v4l2_src_change_event_subscribe
v4l2_subdev_link_validate
v4l_bound_align_image
vabits_actual
vb2_buffer_done
@@ -2107,6 +2152,7 @@
vb2_ioctl_create_bufs
vb2_ioctl_dqbuf
vb2_ioctl_expbuf
vb2_ioctl_prepare_buf
vb2_ioctl_qbuf
vb2_ioctl_querybuf
vb2_ioctl_reqbufs
@@ -2151,6 +2197,7 @@
wait_for_completion_timeout
__wait_on_buffer
wait_on_page_bit
wait_woken
__wake_up
wake_up_process
wakeup_source_register

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -226,6 +226,8 @@
cpuhp_tasks_frozen
cpu_hwcap_keys
cpu_hwcaps
cpuidle_driver_state_disabled
cpuidle_get_driver
cpu_latency_qos_add_request
cpu_latency_qos_remove_request
cpu_latency_qos_update_request
@@ -534,6 +536,7 @@
down_read_trylock
down_trylock
down_write
d_path
drain_workqueue
driver_create_file
driver_register
@@ -543,6 +546,8 @@
drm_add_modes_noedid
drm_atomic_add_affected_connectors
drm_atomic_add_affected_planes
drm_atomic_bridge_chain_disable
drm_atomic_bridge_chain_post_disable
drm_atomic_commit
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
@@ -553,6 +558,7 @@
drm_atomic_helper_bridge_destroy_state
drm_atomic_helper_bridge_duplicate_state
drm_atomic_helper_bridge_reset
drm_atomic_helper_calc_timestamping_constants
drm_atomic_helper_check_modeset
drm_atomic_helper_check_planes
drm_atomic_helper_check_plane_state
@@ -584,6 +590,7 @@
drm_atomic_helper_setup_commit
drm_atomic_helper_shutdown
drm_atomic_helper_swap_state
drm_atomic_helper_update_legacy_modeset_state
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_dependencies
drm_atomic_helper_wait_for_fences
@@ -605,6 +612,7 @@
drm_bridge_remove
drm_compat_ioctl
drm_connector_attach_encoder
drm_connector_attach_max_bpc_property
drm_connector_cleanup
drm_connector_init
drm_connector_list_iter_begin
@@ -646,6 +654,7 @@
drm_dp_read_sink_count
drm_dsc_pps_payload_pack
drm_edid_get_monitor_name
drm_edid_is_valid
drm_edid_to_sad
drm_encoder_cleanup
drm_encoder_init
@@ -872,6 +881,7 @@
get_random_bytes
get_random_u32
get_sg_io_hdr
__get_task_comm
get_task_cred
get_task_mm
get_thermal_instance
@@ -1578,6 +1588,7 @@
pskb_expand_head
__pskb_pull_tail
___pskb_trim
push_cpu_stop
__put_cred
put_device
put_iova_domain
@@ -1949,6 +1960,7 @@
static_key_slow_dec
static_key_slow_inc
stop_machine
stop_one_cpu_nowait
strcasecmp
strcat
strchr
@@ -2041,6 +2053,8 @@
tcpm_sourcing_vbus
tcpm_update_sink_capabilities
tcpm_vbus_change
teo_cpu_get_util_threshold
teo_cpu_set_util_threshold
thermal_cdev_update
thermal_cooling_device_unregister
thermal_of_cooling_device_register
@@ -2070,6 +2084,7 @@
__traceiter_android_rvh_arm64_serror_panic
__traceiter_android_rvh_attach_entity_load_avg
__traceiter_android_rvh_audio_usb_offload_disconnect
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_cgroup_force_kthread_migration
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_cpu_cgroup_online
@@ -2087,6 +2102,7 @@
__traceiter_android_rvh_enqueue_task_fair
__traceiter_android_rvh_find_busiest_group
__traceiter_android_rvh_find_energy_efficient_cpu
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_irqs_disable
__traceiter_android_rvh_irqs_enable
__traceiter_android_rvh_panic_unhandled
@@ -2125,6 +2141,7 @@
__traceiter_android_vh_cma_alloc_start
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_do_madvise_blk_plug
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_dup_task_struct
__traceiter_android_vh_early_resume_begin
@@ -2135,10 +2152,14 @@
__traceiter_android_vh_mm_compaction_begin
__traceiter_android_vh_mm_compaction_end
__traceiter_android_vh_pagecache_get_page
__traceiter_android_vh_reclaim_pages_plug
__traceiter_android_vh_resume_end
__traceiter_android_vh_rmqueue
__traceiter_android_vh_sched_setaffinity_early
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_shrink_inactive_list_blk_plug
__traceiter_android_vh_shrink_lruvec_blk_plug
__traceiter_android_vh_si_meminfo
__traceiter_android_vh_sound_usb_support_cpu_suspend
__traceiter_android_vh_sysrq_crash
@@ -2194,6 +2215,7 @@
__tracepoint_android_rvh_arm64_serror_panic
__tracepoint_android_rvh_attach_entity_load_avg
__tracepoint_android_rvh_audio_usb_offload_disconnect
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_cgroup_force_kthread_migration
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_cpu_cgroup_online
@@ -2211,6 +2233,7 @@
__tracepoint_android_rvh_enqueue_task_fair
__tracepoint_android_rvh_find_busiest_group
__tracepoint_android_rvh_find_energy_efficient_cpu
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_irqs_disable
__tracepoint_android_rvh_irqs_enable
__tracepoint_android_rvh_panic_unhandled
@@ -2249,6 +2272,7 @@
__tracepoint_android_vh_cma_alloc_start
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_do_madvise_blk_plug
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_dup_task_struct
__tracepoint_android_vh_early_resume_begin
@@ -2259,10 +2283,14 @@
__tracepoint_android_vh_mm_compaction_begin
__tracepoint_android_vh_mm_compaction_end
__tracepoint_android_vh_pagecache_get_page
__tracepoint_android_vh_reclaim_pages_plug
__tracepoint_android_vh_resume_end
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_sched_setaffinity_early
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_shrink_inactive_list_blk_plug
__tracepoint_android_vh_shrink_lruvec_blk_plug
__tracepoint_android_vh_si_meminfo
__tracepoint_android_vh_sound_usb_support_cpu_suspend
__tracepoint_android_vh_sysrq_crash

View File

@@ -11,13 +11,20 @@
__arm_smccc_smc
bpf_trace_run1
bpf_trace_run2
cancel_delayed_work
cancel_delayed_work_sync
cancel_work_sync
cdev_add
cdev_alloc
cdev_del
cdev_init
cec_notifier_conn_register
cec_notifier_set_phys_addr
cec_notifier_set_phys_addr_from_edid
__cfi_slowpath_diag
__check_object_size
__class_create
class_create_file_ns
class_destroy
clk_disable
clk_enable
@@ -41,7 +48,10 @@
__cpu_online_mask
debugfs_create_dir
debugfs_create_file
debugfs_lookup
debugfs_remove
delayed_work_timer_fn
del_timer
del_timer_sync
desc_to_gpio
destroy_workqueue
@@ -83,6 +93,7 @@
dma_buf_detach
dma_buf_export
dma_buf_fd
dma_buf_get
dma_buf_map_attachment
dma_buf_put
dma_buf_set_name
@@ -103,8 +114,10 @@
dma_unmap_page_attrs
dma_unmap_sg_attrs
down
down_interruptible
event_triggers_call
fasync_helper
_find_first_bit
_find_next_bit
find_pid_ns
finish_wait
@@ -118,14 +131,23 @@
gen_pool_create
gen_pool_free_owner
gen_pool_set_algo
get_device
get_zeroed_page
gic_nonsecure_priorities
gpiod_direction_input
gpiod_direction_output
gpiod_direction_output_raw
gpiod_get_value
gpiod_put
gpiod_set_debounce
gpiod_to_irq
gpio_to_desc
hrtimer_cancel
hrtimer_forward
__hrtimer_get_remaining
hrtimer_init
hrtimer_start_range_ns
hrtimer_try_to_cancel
__hwspin_unlock
i2c_del_driver
i2c_get_adapter
@@ -140,6 +162,7 @@
init_timer_key
init_wait_entry
__init_waitqueue_head
__ioremap
iounmap
__irq_domain_add
irq_get_irq_data
@@ -166,10 +189,16 @@
kobject_put
kobject_uevent
kobject_uevent_env
kstrtoint
kstrtoll
kstrtouint
kstrtoull
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get
ktime_get_mono_fast_ns
ktime_get_ts64
__list_add_valid
__list_del_entry_valid
memcpy
@@ -179,7 +208,13 @@
memstart_addr
misc_deregister
misc_register
mmc_add_host
mmc_alloc_host
mmc_free_host
mmc_gpio_get_cd
mmc_of_parse
mmc_remove_host
mmc_request_done
mmc_send_tuning
mmc_wait_for_cmd
mod_timer
@@ -189,22 +224,27 @@
__mutex_init
mutex_lock
mutex_unlock
__ndelay
nonseekable_open
nvmem_cell_get
nvmem_cell_put
nvmem_cell_read
of_address_to_resource
of_alias_get_id
of_clk_add_provider
of_clk_del_provider
of_clk_get
of_clk_src_simple_get
of_device_get_match_data
of_device_is_available
of_device_is_compatible
of_find_compatible_node
of_find_device_by_node
of_find_node_opts_by_path
of_find_property
of_get_child_by_name
of_get_compatible_child
of_get_named_gpio_flags
of_get_next_child
of_get_property
of_get_regulator_init_data
@@ -214,30 +254,39 @@
of_match_node
of_nvmem_cell_get
of_parse_phandle
of_platform_depopulate
of_platform_populate
of_property_count_elems_of_size
of_property_read_string
of_property_read_string_helper
of_property_read_u32_index
of_property_read_variable_u32_array
of_property_read_variable_u8_array
panic
param_ops_int
perf_trace_buf_alloc
perf_trace_run_bpf_submit
pid_task
pinctrl_lookup_state
pinctrl_select_state
platform_bus_type
platform_device_register_full
platform_device_unregister
__platform_driver_register
platform_driver_unregister
platform_get_irq
platform_get_resource
platform_get_resource_byname
__pm_runtime_disable
pm_runtime_enable
pm_runtime_force_resume
pm_runtime_force_suspend
__pm_runtime_idle
__pm_runtime_resume
pm_runtime_set_autosuspend_delay
__pm_runtime_set_status
__pm_runtime_suspend
__pm_runtime_use_autosuspend
power_supply_register
power_supply_unregister
preempt_schedule_notrace
@@ -250,9 +299,12 @@
queue_work_on
raw_notifier_call_chain
raw_notifier_chain_register
raw_notifier_chain_unregister
_raw_spin_lock
_raw_spin_lock_bh
_raw_spin_lock_irqsave
_raw_spin_unlock
_raw_spin_unlock_bh
_raw_spin_unlock_irqrestore
rdev_get_drvdata
refcount_warn_saturate
@@ -275,6 +327,7 @@
rpmsg_send
rtc_time64_to_tm
rtc_tm_to_time64
schedule
schedule_timeout
scnprintf
sdhci_add_host
@@ -291,23 +344,40 @@
seq_puts
seq_read
sg_free_table
sg_init_one
sg_next
single_open
single_release
skb_dequeue
skb_put
skb_queue_tail
snd_card_free
snd_card_new
snd_card_register
snd_ctl_add
snd_ctl_new1
snd_pcm_add_chmap_ctls
snd_pcm_alt_chmaps
snd_pcm_lib_ioctl
snd_pcm_new
snd_pcm_period_elapsed
snd_pcm_set_ops
snd_pcm_suspend_all
snprintf
soc_device_match
sprintf
sscanf
__stack_chk_fail
strcmp
strlen
strncmp
strncpy
strscpy
strstr
syscon_node_to_regmap
syscon_regmap_lookup_by_phandle
syscon_regmap_lookup_by_phandle_args
sysfs_create_files
sysfs_create_group
sysfs_remove_group
system_state
@@ -321,11 +391,17 @@
trace_event_raw_init
trace_event_reg
trace_handle_return
__traceiter_dwc3_readl
__tracepoint_dwc3_readl
trace_raw_output_prep
__ubsan_handle_cfi_check_fail_abort
unregister_chrdev_region
unregister_reboot_notifier
up
usb_add_phy_dev
usb_debug_root
usb_remove_phy
usb_role_switch_set_role
usleep_range_state
vabits_actual
vfree
@@ -336,6 +412,7 @@
wait_for_completion
wait_for_completion_timeout
__wake_up
wake_up_process
__warn_printk
# required by apw8889-regulator.ko
@@ -356,15 +433,16 @@
regulator_set_voltage_sel_regmap
strcasecmp
# required by buflock.ko
__get_task_comm
simple_read_from_buffer
# required by clk-det.ko
__clk_get_hw
devm_clk_hw_unregister
devm_of_clk_del_provider
of_clk_hw_simple_get
# required by clk-rtd1619b-cc.ko
of_device_is_compatible
# required by clk-rtk.ko
clk_hw_get_name
clk_hw_get_num_parents
@@ -386,6 +464,69 @@
dma_contiguous_default_area
sg_alloc_table_from_pages_segment
# required by cqhci.ko
devm_blk_crypto_profile_init
dmam_alloc_attrs
dmam_free_coherent
mmc_cqe_request_done
# required by dpi_core.ko
mod_delayed_work_on
round_jiffies
system_freezable_wq
thermal_zone_get_temp
thermal_zone_get_zone_by_name
# required by dw_mmc_cqe-rtk.ko
clk_set_phase
__mmc_claim_host
mmc_cmdq_disable
mmc_cmdq_enable
mmc_hw_reset
mmc_release_host
mmc_switch
mmc_wait_for_req
# required by dw_mmc_cqe.ko
device_property_read_u32_array
down_write
__init_rwsem
mmc_gpio_get_ro
mmc_regulator_get_supply
up_write
# required by dwc3-rtk-debugfs.ko
__of_get_address
of_translate_address
# required by dwc3-rtk.ko
dev_fwnode
strchrnul
__traceiter_dwc3_writel
__tracepoint_dwc3_writel
usb_get_dr_mode
usb_role_switch_get_drvdata
usb_role_switch_get_role
usb_role_switch_register
usb_role_switch_unregister
# required by extcon-rtk-type-c.ko
devm_extcon_dev_allocate
devm_extcon_dev_register
extcon_set_property
extcon_set_property_capability
extcon_set_state
extcon_sync
flush_delayed_work
gpio_free
gpio_request
skip_spaces
typec_get_drvdata
typec_register_port
typec_set_data_role
typec_set_pwr_role
typec_unregister_port
# required by gpio-rtd.ko
gpiochip_add_data_with_key
handle_simple_irq
@@ -395,21 +536,14 @@
pinctrl_gpio_request
pinctrl_gpio_set_config
# required by hdcp.ko
cancel_delayed_work
# required by hdmitx.ko
bpf_trace_run4
cec_notifier_conn_register
cec_notifier_set_phys_addr
cec_notifier_set_phys_addr_from_edid
disable_irq
driver_for_each_device
enable_irq
gpiod_set_value
krealloc
reset_control_release
schedule
trace_print_symbols_seq
# required by i2c-rtk.ko
@@ -420,7 +554,6 @@
i2c_generic_scl_recovery
i2c_parse_fw_timings
i2c_recover_bus
of_alias_get_id
# required by irq-realtek-mux.ko
handle_level_irq
@@ -509,6 +642,9 @@
devm_phy_create
of_phy_simple_xlate
# required by phy-rtk-usb3.ko
of_property_read_variable_u16_array
# required by pinctrl-rtd.ko
pinconf_generic_dt_node_to_map
pinctrl_dev_get_drvdata
@@ -516,10 +652,8 @@
pinctrl_utils_free_map
# required by pwm-rtk.ko
kstrtoint
pwmchip_add
pwmchip_remove
sscanf
# required by r8169soc.ko
alloc_etherdev_mqs
@@ -576,7 +710,6 @@
__memcpy_toio
of_get_next_available_child
rpmsg_register_device
strstr
# required by rtc-rtk.ko
device_init_wakeup
@@ -589,6 +722,10 @@
hwrng_register
hwrng_unregister
# required by rtd13xx-ve1.ko
dev_pm_domain_attach_by_id
dev_pm_domain_detach
# required by rtk-gpu_wrap.ko
devm_of_platform_populate
devm_platform_ioremap_resource_byname
@@ -601,30 +738,35 @@
rc_repeat
register_pm_notifier
# required by rtk-jpu.ko
panic
# required by rtk-reboot.ko
register_restart_handler
# required by rtk-sdmmc.ko
del_timer
down_trylock
flush_workqueue
kmalloc_order_trace
mmc_add_host
mmc_alloc_host
mmc_detect_change
mmc_free_host
mmc_remove_host
mmc_request_done
sg_copy_from_buffer
sg_copy_to_buffer
sg_init_one
sg_init_table
# required by rtk-usb-manager.ko
class_compat_create_link
class_compat_register
class_compat_remove_link
class_compat_unregister
device_attach
device_release_driver
extcon_find_edev_by_node
extcon_get_property
extcon_get_state
extcon_register_notifier
gpiod_get_from_of_node
usb_register_notify
usb_role_switch_find_by_fwnode
usb_unregister_notify
# required by rtk_bootstatus.ko
syscon_regmap_lookup_by_phandle_args
sysfs_create_link
# required by rtk_cec.ko
@@ -648,10 +790,8 @@
dev_pm_opp_put_prop_name
dev_pm_opp_set_prop_name
get_cpu_device
platform_bus_type
# required by rtk_drm.ko
cancel_delayed_work_sync
component_add
component_bind_all
component_del
@@ -767,25 +907,30 @@
drm_universal_plane_init
drm_vblank_init
gpiod_get_raw_value
gpio_to_desc
hdmi_avi_infoframe_pack
kmemdup
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get_raw
of_get_named_gpio_flags
__platform_register_drivers
platform_unregister_drivers
_raw_spin_lock_irq
_raw_spin_unlock_irq
schedule_timeout_uninterruptible
__sw_hweight8
sysfs_create_files
wake_up_process
# required by rtk_fwdbg.ko
__register_chrdev
# required by rtk_fan.ko
__platform_driver_probe
pwm_apply_state
pwm_get
# required by rtk_fss.ko
devm_platform_ioremap_resource
of_get_parent
# required by rtk_gic_extension.ko
__cpuhp_remove_state
__cpuhp_setup_state
cpu_pm_register_notifier
cpu_pm_unregister_notifier
# required by rtk_gpc.ko
atomic_notifier_chain_register
@@ -796,7 +941,6 @@
devm_clk_bulk_get_all
of_genpd_add_provider_simple
panic_notifier_list
platform_get_resource_byname
pm_genpd_init
pm_genpd_remove
@@ -809,7 +953,6 @@
devm_platform_get_and_ioremap_resource
# required by rtk_krpc_agent.ko
get_device
rpmsg_create_ept
rpmsg_destroy_ept
@@ -834,7 +977,6 @@
__bitmap_set
__bitmap_weight
bitmap_zalloc
class_create_file_ns
debugfs_attr_read
debugfs_attr_write
debugfs_create_symlink
@@ -842,11 +984,9 @@
__devres_alloc_node
devres_find
devres_release
dma_buf_get
dma_heap_get_dev
dma_set_coherent_mask
dma_set_mask
_find_first_bit
_find_last_bit
generic_file_llseek
gen_pool_avail
@@ -882,13 +1022,9 @@
regulator_suspend_enable
# required by rtk_pm_suspend.ko
__ioremap
memchr
# required by rtk_rpc_mem.ko
of_platform_depopulate
_raw_spin_lock_bh
_raw_spin_unlock_bh
__register_rpmsg_driver
unregister_rpmsg_driver
@@ -897,14 +1033,12 @@
# required by rtk_tee_mem_api.ko
platform_find_device_by_driver
raw_notifier_chain_unregister
# required by rtk_tp.ko
of_n_addr_cells
__pm_runtime_idle
# required by rtk_urpc_service.ko
cdev_alloc
# required by rtk_ve3_uart.ko
devm_clk_put
# required by rtk_wdt.ko
platform_get_irq_optional
@@ -914,7 +1048,6 @@
watchdog_unregister_device
# required by sdhci-of-rtkstb.ko
mmc_gpio_get_cd
sdhci_set_clock
sdhci_set_ios
@@ -932,28 +1065,8 @@
pm_clk_destroy
# required by snd-soc-realtek.ko
hrtimer_cancel
hrtimer_forward
__hrtimer_get_remaining
hrtimer_init
hrtimer_start_range_ns
hrtimer_try_to_cancel
ktime_get_ts64
__ndelay
param_array_ops
param_ops_bool
snd_card_free
snd_card_new
snd_card_register
snd_ctl_add
snd_ctl_new1
snd_pcm_add_chmap_ctls
snd_pcm_alt_chmaps
snd_pcm_lib_ioctl
snd_pcm_new
snd_pcm_period_elapsed
snd_pcm_set_ops
snd_pcm_suspend_all
# required by tee.ko
add_uevent_var

View File

@@ -252,6 +252,7 @@
dmabuf_page_pool_get_size
dma_buf_put
dma_contiguous_default_area
dma_fence_array_ops
dma_fence_context_alloc
dma_fence_default_wait
dma_fence_init
@@ -617,6 +618,7 @@
kobject_create_and_add
kobject_put
kobject_uevent_env
ksize
kstrdup
kstrtobool
kstrtoint
@@ -631,6 +633,7 @@
kthread_should_stop
kthread_stop
kthread_unpark
ktime_add_safe
ktime_get
ktime_get_coarse_with_offset
ktime_get_mono_fast_ns
@@ -803,6 +806,7 @@
param_ops_string
param_ops_uint
param_ops_ulong
pci_choose_state
__per_cpu_offset
perf_trace_buf_alloc
perf_trace_run_bpf_submit
@@ -977,6 +981,7 @@
seq_printf
seq_puts
seq_read
seq_vprintf
setattr_copy
setattr_prepare
set_bh_page

View File

@@ -1,59 +0,0 @@
drivers/block/zram/zram.ko
drivers/bluetooth/btbcm.ko
drivers/bluetooth/btqca.ko
drivers/bluetooth/btsdio.ko
drivers/bluetooth/hci_uart.ko
drivers/net/can/dev/can-dev.ko
drivers/net/can/slcan.ko
drivers/net/can/vcan.ko
drivers/net/mii.ko
drivers/net/ppp/bsd_comp.ko
drivers/net/ppp/ppp_deflate.ko
drivers/net/ppp/ppp_generic.ko
drivers/net/ppp/ppp_mppe.ko
drivers/net/ppp/pppox.ko
drivers/net/ppp/pptp.ko
drivers/net/slip/slhc.ko
drivers/net/usb/aqc111.ko
drivers/net/usb/asix.ko
drivers/net/usb/ax88179_178a.ko
drivers/net/usb/cdc_eem.ko
drivers/net/usb/cdc_ether.ko
drivers/net/usb/cdc_ncm.ko
drivers/net/usb/r8152.ko
drivers/net/usb/r8153_ecm.ko
drivers/net/usb/rtl8150.ko
drivers/net/usb/usbnet.ko
drivers/usb/class/cdc-acm.ko
drivers/usb/serial/ftdi_sio.ko
drivers/usb/serial/usbserial.ko
kernel/kheaders.ko
lib/crypto/libarc4.ko
mm/zsmalloc.ko
net/6lowpan/6lowpan.ko
net/6lowpan/nhc_dest.ko
net/6lowpan/nhc_fragment.ko
net/6lowpan/nhc_hop.ko
net/6lowpan/nhc_ipv6.ko
net/6lowpan/nhc_mobility.ko
net/6lowpan/nhc_routing.ko
net/6lowpan/nhc_udp.ko
net/8021q/8021q.ko
net/bluetooth/bluetooth.ko
net/bluetooth/hidp/hidp.ko
net/bluetooth/rfcomm/rfcomm.ko
net/can/can.ko
net/can/can-bcm.ko
net/can/can-gw.ko
net/can/can-raw.ko
net/ieee802154/6lowpan/ieee802154_6lowpan.ko
net/ieee802154/ieee802154.ko
net/ieee802154/ieee802154_socket.ko
net/l2tp/l2tp_core.ko
net/l2tp/l2tp_ppp.ko
net/mac802154/mac802154.ko
net/nfc/nfc.ko
net/rfkill/rfkill.ko
net/tipc/diag.ko
net/tipc/tipc.ko

View File

@@ -206,6 +206,11 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
#ifndef __GENKSYMS__
/* Protects VM-scoped configuration data */
struct mutex config_lock;
#endif
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
@@ -353,7 +358,11 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
#ifdef __GENKSYMS__
struct kvm_vcpu *__hyp_running_vcpu;
#else
void *__hyp_running_vcpu;
#endif
};
struct kvm_host_data {
@@ -512,6 +521,9 @@ struct kvm_vcpu_arch {
/* vcpu power state */
struct kvm_mp_state mp_state;
#ifndef __GENKSYMS__
spinlock_t mp_state_lock;
#endif
union {
/* Cache some mmu pages needed inside spinlock regions */

View File

@@ -249,9 +249,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
}
#ifdef CONFIG_KASAN_HW_TAGS
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_enable_tagging_asymm() mte_enable_kernel_asymm()
#define arch_enable_tag_checks_sync() mte_enable_kernel_sync()
#define arch_enable_tag_checks_async() mte_enable_kernel_async()
#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm()
#define arch_suppress_tag_checks_start() mte_enable_tco()
#define arch_suppress_tag_checks_stop() mte_disable_tco()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)

View File

@@ -12,8 +12,73 @@
#include <linux/types.h>
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return static_branch_unlikely(&mte_async_or_asymm_mode);
}
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return false;
}
#endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_ARM64_MTE
/*
* The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
* affects EL0 and TCF affects EL1 irrespective of which TTBR is
* used.
* The kernel accesses TTBR0 usually with LDTR/STTR instructions
* when UAO is available, so these would act as EL0 accesses using
* TCF0.
* However futex.h code uses exclusives which would be executed as
* EL1, this can potentially cause a tag check fault even if the
* user disables TCF0.
*
* To address the problem we set the PSTATE.TCO bit in uaccess_enable()
* and reset it in uaccess_disable().
*
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
static inline void mte_disable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
static inline void mte_enable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
/*
* These functions disable tag checking only if in MTE async mode
* since the sync mode generates exceptions synchronously and the
* nofault or load_unaligned_zeropad can handle them.
*/
static inline void __mte_disable_tco_async(void)
{
if (system_uses_mte_async_or_asymm_mode())
mte_disable_tco();
}
static inline void __mte_enable_tco_async(void)
{
if (system_uses_mte_async_or_asymm_mode())
mte_enable_tco();
}
/*
* These functions are meant to be only used from KASAN runtime through
* the arch_*() interface defined in asm/memory.h.
@@ -137,6 +202,22 @@ void mte_enable_kernel_asymm(void);
#else /* CONFIG_ARM64_MTE */
static inline void mte_disable_tco(void)
{
}
static inline void mte_enable_tco(void)
{
}
static inline void __mte_disable_tco_async(void)
{
}
static inline void __mte_enable_tco_async(void)
{
}
static inline u8 mte_get_ptr_tag(void *ptr)
{
return 0xFF;

View File

@@ -145,14 +145,6 @@ static inline void mte_disable_tco_entry(struct task_struct *task)
}
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DECLARE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return static_branch_unlikely(&mte_async_or_asymm_mode);
}
void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
@@ -179,10 +171,6 @@ static inline void mte_check_tfsr_exit(void)
mte_check_tfsr_el1();
}
#else
static inline bool system_uses_mte_async_or_asymm_mode(void)
{
return false;
}
static inline void mte_check_tfsr_el1(void)
{
}

View File

@@ -208,7 +208,7 @@ static inline void __uaccess_enable_tco_async(void)
static inline void uaccess_disable_privileged(void)
{
__uaccess_disable_tco();
mte_disable_tco();
if (uaccess_ttbr0_disable())
return;
@@ -218,7 +218,7 @@ static inline void uaccess_disable_privileged(void)
static inline void uaccess_enable_privileged(void)
{
__uaccess_enable_tco();
mte_enable_tco();
if (uaccess_ttbr0_enable())
return;
@@ -332,8 +332,8 @@ do { \
#define get_user __get_user
/*
* We must not call into the scheduler between __uaccess_enable_tco_async() and
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
* We must not call into the scheduler between __mte_enable_tco_async() and
* __mte_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __get_kernel_nofault(dst, src, type, err_label) \
@@ -342,10 +342,10 @@ do { \
__typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__mte_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(__gkn_src), __gkn_err); \
__uaccess_disable_tco_async(); \
__mte_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \
goto err_label; \
@@ -423,8 +423,8 @@ do { \
#define put_user __put_user
/*
* We must not call into the scheduler between __uaccess_enable_tco_async() and
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
* We must not call into the scheduler between __mte_enable_tco_async() and
* __mte_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -433,10 +433,10 @@ do { \
__typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \
\
__uaccess_enable_tco_async(); \
__mte_enable_tco_async(); \
__raw_put_mem("str", *((type *)(__pkn_src)), \
(__force type *)(__pkn_dst), __pkn_err); \
__uaccess_disable_tco_async(); \
__mte_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \
goto err_label; \

View File

@@ -55,7 +55,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, tmp;
__uaccess_enable_tco_async();
__mte_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
@@ -78,7 +78,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
: "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr));
__uaccess_disable_tco_async();
__mte_disable_tco_async();
return ret;
}

View File

@@ -155,6 +155,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (type & ~KVM_VM_TYPE_MASK)
return -EINVAL;
mutex_init(&kvm->arch.config_lock);
#ifdef CONFIG_LOCKDEP
/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->lock);
#endif
ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
return ret;
@@ -431,6 +441,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err;
spin_lock_init(&vcpu->arch.mp_state_lock);
#ifdef CONFIG_LOCKDEP
/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
mutex_lock(&vcpu->mutex);
mutex_lock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
mutex_unlock(&vcpu->mutex);
#endif
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@@ -574,34 +594,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
}
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.mp_state_lock);
__kvm_arm_vcpu_power_off(vcpu);
spin_unlock(&vcpu->arch.mp_state_lock);
}
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
}
static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
{
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
kvm_make_request(KVM_REQ_SUSPEND, vcpu);
kvm_vcpu_kick(vcpu);
}
static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
{
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
*mp_state = vcpu->arch.mp_state;
*mp_state = READ_ONCE(vcpu->arch.mp_state);
return 0;
}
@@ -611,12 +638,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
int ret = 0;
spin_lock(&vcpu->arch.mp_state_lock);
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.mp_state = *mp_state;
WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
break;
case KVM_MP_STATE_STOPPED:
kvm_arm_vcpu_power_off(vcpu);
__kvm_arm_vcpu_power_off(vcpu);
break;
case KVM_MP_STATE_SUSPENDED:
kvm_arm_vcpu_suspend(vcpu);
@@ -625,6 +654,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
ret = -EINVAL;
}
spin_unlock(&vcpu->arch.mp_state_lock);
return ret;
}
@@ -717,9 +748,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static_branch_inc(&userspace_irqchip_in_use);
}
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
@@ -1333,7 +1364,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
kvm_arm_vcpu_power_off(vcpu);
else
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
return 0;
}

View File

@@ -953,7 +953,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
switch (attr->group) {
case KVM_ARM_VCPU_PMU_V3_CTRL:
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);

View File

@@ -332,17 +332,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {

View File

@@ -39,7 +39,12 @@ static bool (*default_trap_handler)(struct kvm_cpu_context *host_ctxt);
int __pkvm_register_host_smc_handler(bool (*cb)(struct kvm_cpu_context *))
{
return cmpxchg(&default_host_smc_handler, NULL, cb) ? -EBUSY : 0;
/*
* Paired with smp_load_acquire(&default_host_smc_handler) in
* handle_host_smc(). Ensure memory stores happening during a pKVM module
* init are observed before executing the callback.
*/
return cmpxchg_release(&default_host_smc_handler, NULL, cb) ? -EBUSY : 0;
}
int __pkvm_register_default_trap_handler(bool (*cb)(struct kvm_cpu_context *))
@@ -1376,7 +1381,7 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
handled = kvm_host_psci_handler(host_ctxt);
if (!handled)
handled = kvm_host_ffa_handler(host_ctxt);
if (!handled && READ_ONCE(default_host_smc_handler))
if (!handled && smp_load_acquire(&default_host_smc_handler))
handled = default_host_smc_handler(host_ctxt);
if (!handled)
__kvm_hyp_host_forward_smc(host_ctxt);

View File

@@ -28,14 +28,19 @@ struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
static void (*pkvm_psci_notifier)(enum pkvm_psci_notification, struct kvm_cpu_context *);
static void pkvm_psci_notify(enum pkvm_psci_notification notif, struct kvm_cpu_context *host_ctxt)
{
if (READ_ONCE(pkvm_psci_notifier))
if (smp_load_acquire(&pkvm_psci_notifier))
pkvm_psci_notifier(notif, host_ctxt);
}
#ifdef CONFIG_MODULES
int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *))
{
return cmpxchg(&pkvm_psci_notifier, NULL, cb) ? -EBUSY : 0;
/*
* Paired with smp_load_acquire(&pkvm_psci_notifier) in
* pkvm_psci_notify(). Ensure memory stores hapenning during a pKVM module
* init are observed before executing the callback.
*/
return cmpxchg_release(&pkvm_psci_notifier, NULL, cb) ? -EBUSY : 0;
}
#endif

View File

@@ -35,7 +35,8 @@ static inline void __hyp_putx4n(unsigned long x, int n)
static inline bool hyp_serial_enabled(void)
{
return !!READ_ONCE(__hyp_putc);
/* Paired with __pkvm_register_serial_driver()'s cmpxchg */
return !!smp_load_acquire(&__hyp_putc);
}
void hyp_puts(const char *s)
@@ -64,5 +65,10 @@ void hyp_putc(char c)
int __pkvm_register_serial_driver(void (*cb)(char))
{
return cmpxchg(&__hyp_putc, NULL, cb) ? -EBUSY : 0;
/*
* Paired with smp_load_acquire(&__hyp_putc) in
* hyp_serial_enabled(). Ensure memory stores hapenning during a pKVM
* module init are observed before executing the callback.
*/
return cmpxchg_release(&__hyp_putc, NULL, cb) ? -EBUSY : 0;
}

View File

@@ -223,6 +223,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};
@@ -234,6 +235,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View File

@@ -32,13 +32,19 @@ static void enter_vmid_context(struct kvm_s2_mmu *mmu,
* to do.
*/
if (vcpu) {
/* We're in guest context */
if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu))
return;
} else if (mmu == host_s2_mmu) {
return;
cxt->mmu = vcpu->arch.hw_mmu;
} else {
/* We're in host context */
if (mmu == host_s2_mmu)
return;
cxt->mmu = host_s2_mmu;
}
cxt->mmu = mmu;
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;

View File

@@ -125,6 +125,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};

View File

@@ -409,7 +409,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
if (val & ~fw_reg_features)
return -EINVAL;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
val != *fw_reg_bmap) {
@@ -419,7 +419,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
WRITE_ONCE(*fw_reg_bmap, val);
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View File

@@ -976,7 +976,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
struct arm_pmu *arm_pmu;
int ret = -ENXIO;
mutex_lock(&kvm->lock);
lockdep_assert_held(&kvm->arch.config_lock);
mutex_lock(&arm_pmus_lock);
list_for_each_entry(entry, &arm_pmus, entry) {
@@ -996,7 +996,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
}
mutex_unlock(&arm_pmus_lock);
mutex_unlock(&kvm->lock);
return ret;
}
@@ -1004,22 +1003,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
struct kvm *kvm = vcpu->kvm;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vcpu_has_pmu(vcpu))
return -ENODEV;
if (vcpu->arch.pmu.created)
return -EBUSY;
mutex_lock(&kvm->lock);
if (!kvm->arch.arm_pmu) {
/* No PMU set, get the default one */
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
if (!kvm->arch.arm_pmu) {
mutex_unlock(&kvm->lock);
if (!kvm->arch.arm_pmu)
return -ENODEV;
}
}
mutex_unlock(&kvm->lock);
switch (attr->attr) {
case KVM_ARM_VCPU_PMU_V3_IRQ: {
@@ -1063,19 +1060,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
filter.action != KVM_PMU_EVENT_DENY))
return -EINVAL;
mutex_lock(&kvm->lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
mutex_unlock(&kvm->lock);
if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
return -EBUSY;
}
if (!kvm->arch.pmu_filter) {
kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
if (!kvm->arch.pmu_filter) {
mutex_unlock(&kvm->lock);
if (!kvm->arch.pmu_filter)
return -ENOMEM;
}
/*
* The default depends on the first applied filter.
@@ -1094,8 +1085,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
else
bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
mutex_unlock(&kvm->lock);
return 0;
}
case KVM_ARM_VCPU_PMU_V3_SET_PMU: {

View File

@@ -46,6 +46,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
struct vcpu_reset_state *reset_state;
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
int ret = PSCI_RET_SUCCESS;
unsigned long cpu_id;
cpu_id = smccc_get_arg1(source_vcpu);
@@ -60,11 +61,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
spin_lock(&vcpu->arch.mp_state_lock);
if (!kvm_arm_vcpu_stopped(vcpu)) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
ret = PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
ret = PSCI_RET_INVALID_PARAMS;
goto out_unlock;
}
reset_state = &vcpu->arch.reset_state;
@@ -80,7 +85,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
reset_state->r0 = smccc_get_arg3(source_vcpu);
WRITE_ONCE(reset_state->reset, true);
reset_state->reset = true;
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
/*
@@ -92,7 +97,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_vcpu_wake_up(vcpu);
return PSCI_RET_SUCCESS;
out_unlock:
spin_unlock(&vcpu->arch.mp_state_lock);
return ret;
}
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
@@ -152,8 +159,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
* after this call is handled and before the VCPUs have been
* re-initialized.
*/
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
spin_lock(&tmp->arch.mp_state_lock);
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
spin_unlock(&tmp->arch.mp_state_lock);
}
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
@@ -200,7 +210,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
int ret = 1;
@@ -225,9 +234,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
kvm_psci_narrow_to_32bit(vcpu);
fallthrough;
case PSCI_0_2_FN64_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
kvm_psci_narrow_to_32bit(vcpu);
@@ -366,7 +373,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
@@ -376,9 +382,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;

View File

@@ -176,7 +176,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
lockdep_assert_held(&kvm->lock);
lockdep_assert_held(&kvm->arch.config_lock);
if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
/*
@@ -228,17 +228,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
int ret;
bool loaded;
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
ret = kvm_set_vm_width(vcpu);
if (!ret) {
reset_state = vcpu->arch.reset_state;
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
}
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
if (ret)
return ret;
spin_lock(&vcpu->arch.mp_state_lock);
reset_state = vcpu->arch.reset_state;
vcpu->arch.reset_state.reset = false;
spin_unlock(&vcpu->arch.mp_state_lock);
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);

View File

@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
if (iter) {
iter = ERR_PTR(-EBUSY);
@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
if (end_of_vgic(iter))
iter = NULL;
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return iter;
}
@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
if (IS_ERR(v))
return;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
iter = kvm->arch.vgic.iter;
kfree(iter->lpi_array);
kfree(iter);
kvm->arch.vgic.iter = NULL;
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
}
static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)

View File

@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
unsigned long i;
int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
/*
* This function is also called by the KVM_CREATE_IRQCHIP handler,
* which had no chance yet to check the availability of the GICv2
@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
!kvm_vgic_global_state.can_emulate_gicv2)
return -ENODEV;
/* Must be held to avoid race with vCPU creation */
lockdep_assert_held(&kvm->lock);
ret = -EBUSY;
if (!lock_all_vcpus(kvm))
return ret;
mutex_lock(&kvm->arch.config_lock);
if (irqchip_in_kernel(kvm)) {
ret = -EEXIST;
goto out_unlock;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_has_run_once(vcpu))
goto out_unlock;
@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
return ret;
}
@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
* KVM io device for the redistributor that belongs to this VCPU.
*/
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->slots_lock);
ret = vgic_register_redist_iodev(vcpu);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->slots_lock);
}
return ret;
}
@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
* The function is generally called when nr_spis has been explicitly set
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
* vgic_initialized() returns true when this function has succeeded.
* Must be called with kvm->lock held!
*/
int vgic_init(struct kvm *kvm)
{
@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
int ret = 0, i;
unsigned long idx;
lockdep_assert_held(&kvm->arch.config_lock);
if (vgic_initialized(kvm))
return 0;
@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
/* To be called with kvm->lock held */
static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_destroy(struct kvm *kvm)
{
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
}
/**
@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
return -EBUSY;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
ret = vgic_init(kvm);
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
}
return ret;
@@ -436,30 +446,48 @@ int vgic_lazy_init(struct kvm *kvm)
int kvm_vgic_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
enum vgic_type type;
gpa_t dist_base;
int ret = 0;
if (likely(vgic_ready(kvm)))
return 0;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm))
goto out;
if (!irqchip_in_kernel(kvm))
goto out;
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
ret = vgic_v2_map_resources(kvm);
else
type = VGIC_V2;
} else {
ret = vgic_v3_map_resources(kvm);
type = VGIC_V3;
}
if (ret)
if (ret) {
__kvm_vgic_destroy(kvm);
else
dist->ready = true;
goto out;
}
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);
}
mutex_unlock(&kvm->slots_lock);
return ret;
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
return ret;
}

View File

@@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
static int vgic_its_create(struct kvm_device *dev, u32 type)
{
int ret;
struct vgic_its *its;
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
@@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
if (!its)
return -ENOMEM;
mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_initialized(dev->kvm)) {
int ret = vgic_v4_init(dev->kvm);
ret = vgic_v4_init(dev->kvm);
if (ret < 0) {
mutex_unlock(&dev->kvm->arch.config_lock);
kfree(its);
return ret;
}
@@ -1958,6 +1962,14 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock);
/* Yep, even more trickery for lock ordering... */
#ifdef CONFIG_LOCKDEP
mutex_lock(&its->cmd_lock);
mutex_lock(&its->its_lock);
mutex_unlock(&its->its_lock);
mutex_unlock(&its->cmd_lock);
#endif
its->vgic_its_base = VGIC_ADDR_UNDEF;
INIT_LIST_HEAD(&its->device_list);
@@ -1976,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
dev->private = its;
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
}
static void vgic_its_destroy(struct kvm_device *kvm_dev)
@@ -2045,6 +2061,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
ret = -ENXIO;
goto out;
@@ -2058,11 +2081,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
goto out;
}
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
addr = its->vgic_its_base + offset;
len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
@@ -2076,8 +2094,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
} else {
*reg = region->its_read(dev->kvm, its, addr, len);
}
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return ret;
}
@@ -2187,7 +2206,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@@ -2339,7 +2358,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@@ -2526,7 +2545,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
/*
@@ -2607,7 +2626,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
@@ -2749,14 +2768,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return 0;
mutex_lock(&kvm->lock);
mutex_lock(&its->its_lock);
if (!lock_all_vcpus(kvm)) {
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->lock);
return -EBUSY;
}
mutex_lock(&kvm->arch.config_lock);
mutex_lock(&its->its_lock);
switch (attr) {
case KVM_DEV_ARM_ITS_CTRL_RESET:
vgic_its_reset(kvm, its);
@@ -2769,8 +2789,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
break;
}
unlock_all_vcpus(kvm);
mutex_unlock(&its->its_lock);
mutex_unlock(&kvm->arch.config_lock);
unlock_all_vcpus(kvm);
mutex_unlock(&kvm->lock);
return ret;
}

View File

@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
struct vgic_dist *vgic = &kvm->arch.vgic;
int r;
mutex_lock(&kvm->lock);
mutex_lock(&kvm->arch.config_lock);
switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
r = -ENODEV;
}
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->arch.config_lock);
return r;
}
@@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (get_user(addr, uaddr))
return -EFAULT;
mutex_lock(&kvm->lock);
/*
* Since we can't hold config_lock while registering the redistributor
* iodevs, take the slots_lock immediately.
*/
mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_VGIC_V2_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
if (r)
goto out;
mutex_lock(&kvm->arch.config_lock);
if (write) {
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
if (!r)
@@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
} else {
addr = *addr_ptr;
}
mutex_unlock(&kvm->arch.config_lock);
out:
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->slots_lock);
if (!r && !write)
r = put_user(addr, uaddr);
@@ -227,7 +233,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
(val & 31))
return -EINVAL;
mutex_lock(&dev->kvm->lock);
mutex_lock(&dev->kvm->arch.config_lock);
if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
ret = -EBUSY;
@@ -235,16 +241,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
dev->kvm->arch.vgic.nr_spis =
val - VGIC_NR_PRIVATE_IRQS;
mutex_unlock(&dev->kvm->lock);
mutex_unlock(&dev->kvm->arch.config_lock);
return ret;
}
case KVM_DEV_ARM_VGIC_GRP_CTRL: {
switch (attr->attr) {
case KVM_DEV_ARM_VGIC_CTRL_INIT:
mutex_lock(&dev->kvm->lock);
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_init(dev->kvm);
mutex_unlock(&dev->kvm->lock);
mutex_unlock(&dev->kvm->arch.config_lock);
return r;
case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
/*
@@ -260,7 +266,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
r = vgic_v3_save_pending_tables(dev->kvm);
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
return r;
@@ -411,15 +420,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
mutex_lock(&dev->kvm->arch.config_lock);
ret = vgic_init(dev->kvm);
if (ret)
goto out;
if (!lock_all_vcpus(dev->kvm)) {
ret = -EBUSY;
goto out;
}
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
@@ -432,8 +443,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
break;
}
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && !is_write)
@@ -569,12 +581,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
if (!lock_all_vcpus(dev->kvm)) {
mutex_unlock(&dev->kvm->lock);
return -EBUSY;
}
if (!lock_all_vcpus(dev->kvm)) {
mutex_lock(&dev->kvm->arch.config_lock);
if (unlikely(!vgic_initialized(dev->kvm))) {
ret = -EBUSY;
goto out;
}
@@ -609,8 +623,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
break;
}
unlock_all_vcpus(dev->kvm);
out:
mutex_unlock(&dev->kvm->arch.config_lock);
unlock_all_vcpus(dev->kvm);
mutex_unlock(&dev->kvm->lock);
if (!ret && uaccess && !is_write) {

View File

@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
case GICD_CTLR: {
bool was_enabled, is_hwsgi;
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
was_enabled = dist->enabled;
is_hwsgi = dist->nassgireq;
@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
else if (!was_enabled && dist->enabled)
vgic_kick_vcpus(vcpu->kvm);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
break;
}
case GICD_TYPER:
@@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
struct vgic_redist_region *rdreg;
gpa_t rd_base;
int ret;
int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
return 0;
goto out_unlock;
/*
* We may be creating VCPUs before having set the base address for the
@@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
*/
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
if (!rdreg)
return 0;
goto out_unlock;
if (!vgic_v3_check_base(kvm))
return -EINVAL;
if (!vgic_v3_check_base(kvm)) {
ret = -EINVAL;
goto out_unlock;
}
vgic_cpu->rdreg = rdreg;
vgic_cpu->rdreg_index = rdreg->free_index;
@@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
rd_dev->redist_vcpu = vcpu;
mutex_lock(&kvm->slots_lock);
mutex_unlock(&kvm->arch.config_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
2 * SZ_64K, &rd_dev->dev);
mutex_unlock(&kvm->slots_lock);
if (ret)
return ret;
/* Protected by slots_lock */
rdreg->free_index++;
return 0;
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
@@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
/* The current c failed, so iterate over the previous ones. */
int i;
mutex_lock(&kvm->slots_lock);
for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu);
}
mutex_unlock(&kvm->slots_lock);
}
return ret;
@@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{
int ret;
mutex_lock(&kvm->arch.config_lock);
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
mutex_unlock(&kvm->arch.config_lock);
if (ret)
return ret;
@@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
if (ret) {
struct vgic_redist_region *rdreg;
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
vgic_v3_free_redist_region(rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}

View File

@@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
u32 val;
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
val = __vgic_mmio_read_active(vcpu, addr, len);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
return val;
}
@@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_cactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
}
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
@@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
mutex_lock(&vcpu->kvm->lock);
mutex_lock(&vcpu->kvm->arch.config_lock);
vgic_access_active_prepare(vcpu, intid);
__vgic_mmio_write_sactive(vcpu, addr, len, val);
vgic_access_active_finish(vcpu, intid);
mutex_unlock(&vcpu->kvm->lock);
mutex_unlock(&vcpu->kvm->arch.config_lock);
}
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
@@ -1093,7 +1093,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
enum vgic_type type)
{
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
int ret = 0;
unsigned int len;
switch (type) {
@@ -1111,10 +1110,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
io_device->iodev_type = IODEV_DIST;
io_device->redist_vcpu = NULL;
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
len, &io_device->dev);
mutex_unlock(&kvm->slots_lock);
return ret;
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
len, &io_device->dev);
}

View File

@@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base,

View File

@@ -538,7 +538,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int ret = 0;
unsigned long c;
kvm_for_each_vcpu(c, vcpu, kvm) {
@@ -568,12 +567,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);

View File

@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
* @kvm: Pointer to the VM being initialized
*
* We may be called each time a vITS is created, or when the
* vgic is initialized. This relies on kvm->lock to be
* held. In both cases, the number of vcpus should now be
* fixed.
* vgic is initialized. In both cases, the number of vcpus
* should now be fixed.
*/
int vgic_v4_init(struct kvm *kvm)
{
@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
int nr_vcpus, ret;
unsigned long i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
/**
* vgic_v4_teardown - Free the GICv4 data structures
* @kvm: Pointer to the VM being destroyed
*
* Relies on kvm->lock to be held.
*/
void vgic_v4_teardown(struct kvm *kvm)
{
struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
int i;
lockdep_assert_held(&kvm->arch.config_lock);
if (!its_vm->vpes)
return;

View File

@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/*
* Locking order is always:
* kvm->lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
* vcpu->mutex (mutex)
* kvm->arch.config_lock (mutex)
* its->cmd_lock (mutex)
* its->its_lock (mutex)
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower

View File

@@ -6,6 +6,7 @@
#define __KVM_ARM_VGIC_NEW_H__
#include <linux/irqchip/arm-gic-common.h>
#include <asm/kvm_mmu.h>
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
@@ -131,6 +132,16 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
return vgic_irq_get_lr_count(irq) > 1;
}
static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int ret;
ret = kvm_write_guest_lock(kvm, gpa, data, len);
return ret;
}
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC

View File

@@ -77,7 +77,7 @@ static struct blk_crypto_fallback_keyslot {
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots;
static struct blk_crypto_profile blk_crypto_fallback_profile;
static struct blk_crypto_profile *blk_crypto_fallback_profile;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
static struct bio_set crypto_bio_split;
@@ -293,7 +293,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
src_bio->bi_status = blk_st;
@@ -396,7 +396,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
bio->bi_status = blk_st;
@@ -500,7 +500,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
return false;
}
if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
&bc->bc_key->crypto_cfg)) {
bio->bi_status = BLK_STS_NOTSUPP;
return false;
@@ -527,7 +527,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
}
static bool blk_crypto_fallback_inited;
@@ -535,7 +535,6 @@ static int blk_crypto_fallback_init(void)
{
int i;
int err;
struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
if (blk_crypto_fallback_inited)
return 0;
@@ -546,19 +545,28 @@ static int blk_crypto_fallback_init(void)
if (err)
goto out;
err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
if (err)
/* Dynamic allocation is needed because of lockdep_register_key(). */
blk_crypto_fallback_profile =
kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
if (!blk_crypto_fallback_profile) {
err = -ENOMEM;
goto fail_free_bioset;
}
err = blk_crypto_profile_init(blk_crypto_fallback_profile,
blk_crypto_num_keyslots);
if (err)
goto fail_free_profile;
err = -ENOMEM;
profile->ll_ops = blk_crypto_fallback_ll_ops;
profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
blk_crypto_fallback_profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
/* All blk-crypto modes have a crypto API fallback. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
profile->modes_supported[i] = 0xFFFFFFFF;
profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
WQ_UNBOUND | WQ_HIGHPRI |
@@ -599,7 +607,9 @@ fail_free_keyslots:
fail_free_wq:
destroy_workqueue(blk_crypto_wq);
fail_destroy_profile:
blk_crypto_profile_destroy(profile);
blk_crypto_profile_destroy(blk_crypto_fallback_profile);
fail_free_profile:
kfree(blk_crypto_fallback_profile);
fail_free_bioset:
bioset_exit(&crypto_bio_split);
out:

View File

@@ -78,7 +78,18 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
unsigned int slot_hashtable_size;
memset(profile, 0, sizeof(*profile));
/*
* profile->lock of an underlying device can nest inside profile->lock
* of a device-mapper device, so use a dynamic lock class to avoid
* false-positive lockdep reports.
*/
#ifdef CONFIG_LOCKDEP
lockdep_register_key(&profile->lockdep_key);
__init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key);
#else
init_rwsem(&profile->lock);
#endif
if (num_slots == 0)
return 0;
@@ -88,7 +99,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile,
profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
GFP_KERNEL);
if (!profile->slots)
return -ENOMEM;
goto err_destroy;
profile->num_slots = num_slots;
@@ -442,6 +453,9 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
{
if (!profile)
return;
#ifdef CONFIG_LOCKDEP
lockdep_unregister_key(&profile->lockdep_key);
#endif
kvfree(profile->slot_hashtable);
kvfree_sensitive(profile->slots,
sizeof(profile->slots[0]) * profile->num_slots);

View File

@@ -59,7 +59,10 @@ void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
* don't check dying flag for MQ because the request won't
* be reused after dying flag is set
*/
blk_mq_sched_insert_request(rq, at_head, true, false);
blk_mq_sched_insert_request(rq, at_head, true,
rq->mq_hctx->flags & BLK_MQ_F_BLOCKING &&
rq->cmd_flags & REQ_NOWAIT);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);

View File

@@ -1561,11 +1561,9 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
return;
}
if (!async && cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
return;
}
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
@@ -1768,7 +1766,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
blk_mq_run_hw_queue(hctx, false);
blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
@@ -1798,7 +1796,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
int i;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_start_stopped_hw_queue(hctx, async);
blk_mq_start_stopped_hw_queue(hctx, async ||
(hctx->flags & BLK_MQ_F_BLOCKING));
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);

View File

@@ -537,8 +537,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
if (!blk_queue_pipeline_zoned_writes(q) &&
!args->seq_zones_wlock) {
if (!args->seq_zones_wlock) {
args->seq_zones_wlock =
blk_alloc_zone_bitmap(q->node, args->nr_zones);
if (!args->seq_zones_wlock)

View File

@@ -340,6 +340,16 @@ static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
return rq;
}
/*
* Use write locking if either QUEUE_FLAG_NO_ZONE_WRITE_LOCK has not been set.
* Not using zone write locking is only safe if the block driver preserves the
* request order.
*/
static bool dd_use_zone_write_locking(struct request_queue *q)
{
return blk_queue_is_zoned(q) && !blk_queue_no_zone_write_lock(q);
}
/*
* For the specified data direction, return the next request to
* dispatch using arrival ordered lists.
@@ -355,8 +365,7 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
return NULL;
rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q) ||
blk_queue_pipeline_zoned_writes(rq->q))
if (data_dir == DD_READ || !dd_use_zone_write_locking(rq->q))
return rq;
/*
@@ -401,8 +410,7 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
if (!rq)
return NULL;
if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q) ||
blk_queue_pipeline_zoned_writes(rq->q))
if (data_dir == DD_READ || !dd_use_zone_write_locking(rq->q))
return rq;
/*
@@ -557,7 +565,8 @@ done:
/*
* If the request needs its target zone locked, do it.
*/
blk_req_zone_write_lock(rq);
if (dd_use_zone_write_locking(rq->q))
blk_req_zone_write_lock(rq);
rq->rq_flags |= RQF_STARTED;
return rq;
}
@@ -934,8 +943,7 @@ static void dd_finish_request(struct request *rq)
atomic_inc(&per_prio->stats.completed);
if (blk_queue_is_zoned(rq->q) &&
!blk_queue_pipeline_zoned_writes(q)) {
if (dd_use_zone_write_locking(rq->q)) {
unsigned long flags;
spin_lock_irqsave(&dd->zone_lock, flags);

View File

@@ -8,7 +8,6 @@ arch/arm64/boot/Image.gz
"
BUILD_SYSTEM_DLKM=1
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
BUILD_GKI_CERTIFICATION_TOOLS=1

View File

@@ -1,5 +0,0 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64
DEFCONFIG=16k_gki_defconfig
PRE_DEFCONFIG_CMDS="mkdir -p \${OUT_DIR}/arch/arm64/configs/ && cat ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/16k_gki.fragment > \${OUT_DIR}/arch/arm64/configs/${DEFCONFIG};"
POST_DEFCONFIG_CMDS=""

View File

@@ -3,7 +3,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
BUILD_SYSTEM_DLKM=1
MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
BUILD_GKI_CERTIFICATION_TOOLS=1

View File

@@ -202,6 +202,19 @@ struct teo_cpu {
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
unsigned long teo_cpu_get_util_threshold(int cpu)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, cpu);
return cpu_data->util_threshold;
}
EXPORT_SYMBOL_GPL(teo_cpu_get_util_threshold);
void teo_cpu_set_util_threshold(int cpu, unsigned long util)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, cpu);
cpu_data->util_threshold = util;
}
EXPORT_SYMBOL_GPL(teo_cpu_set_util_threshold);
/**
* teo_cpu_is_utilized - Check if the CPU's util is above the threshold
* @cpu: Target CPU

View File

@@ -611,6 +611,7 @@ static void dm_bow_dtr(struct dm_target *ti)
wait_for_completion(dm_get_completion_from_kobject(kobj));
}
mutex_lock(&bc->ranges_lock);
while (rb_first(&bc->ranges)) {
struct bow_range *br = container_of(rb_first(&bc->ranges),
struct bow_range, node);
@@ -618,6 +619,7 @@ static void dm_bow_dtr(struct dm_target *ti)
rb_erase(&br->node, &bc->ranges);
kfree(br);
}
mutex_unlock(&bc->ranges_lock);
mutex_destroy(&bc->ranges_lock);
kfree(bc->log_sector);
@@ -1191,6 +1193,7 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
return;
}
mutex_lock(&bc->ranges_lock);
for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
struct bow_range *br = container_of(i, struct bow_range, node);
@@ -1198,11 +1201,11 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
readable_type[br->type],
(unsigned long long)br->sector);
if (result >= end)
return;
goto unlock;
result += scnprintf(result, end - result, "\n");
if (result >= end)
return;
goto unlock;
if (br->type == TRIMMED)
++trimmed_range_count;
@@ -1224,19 +1227,22 @@ static void dm_bow_tablestatus(struct dm_target *ti, char *result,
if (!rb_next(i)) {
scnprintf(result, end - result,
"\nERROR: Last range not of type TOP");
return;
goto unlock;
}
if (br->sector > range_top(br)) {
scnprintf(result, end - result,
"\nERROR: sectors out of order");
return;
goto unlock;
}
}
if (trimmed_range_count != trimmed_list_length)
scnprintf(result, end - result,
"\nERROR: not all trimmed ranges in trimmed list");
unlock:
mutex_unlock(&bc->ranges_lock);
}
static void dm_bow_status(struct dm_target *ti, status_type_t type,

View File

@@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB
bool "pvrusb2 ATSC/DVB support"
default y
depends on VIDEO_PVRUSB2 && DVB_CORE
depends on VIDEO_PVRUSB2=m || DVB_CORE=y
select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT

View File

@@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
sock_init_data_uid(&q->sock, &q->sk, current_fsuid());
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;

View File

@@ -3411,7 +3411,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;

View File

@@ -1936,6 +1936,19 @@ static void quirk_radeon_pm(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
/*
* NVIDIA Ampere-based HDA controllers can wedge the whole device if a bus
* reset is performed too soon after transition to D0, extend d3hot_delay
* to previous effective default for all NVIDIA HDA controllers.
*/
static void quirk_nvidia_hda_pm(struct pci_dev *dev)
{
quirk_d3hot_delay(dev, 20);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8,
quirk_nvidia_hda_pm);
/*
* Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
* https://bugzilla.kernel.org/show_bug.cgi?id=205587

View File

@@ -85,6 +85,7 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"

View File

@@ -26,6 +26,7 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
realtek/ \
renesas/ \
rockchip/ \
samsung/ \

View File

@@ -0,0 +1,25 @@
# SPDX-License-Identifier: GPL-2.0
#
# Phy drivers for Realtek platforms
#
config PHY_RTK_RTD_USB2PHY
tristate "Realtek RTD USB2 PHY Transceiver Driver"
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
help
Enable this to support Realtek SoC USB2 phy transceiver.
The DHC (digital home center) RTD series SoCs used the Synopsys
DWC3 USB IP. This driver will do the PHY initialization
of the parameters.
config PHY_RTK_RTD_USB3PHY
tristate "Realtek RTD USB3 PHY Transceiver Driver"
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
help
Enable this to support Realtek SoC USB3 phy transceiver.
The DHC (digital home center) RTD series SoCs used the Synopsys
DWC3 USB IP. This driver will do the PHY initialization
of the parameters.

View File

@@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,766 @@
// SPDX-License-Identifier: GPL-2.0
/*
* phy-rtk-usb3.c RTK usb3.0 phy driver
*
* copyright (c) 2023 realtek semiconductor corporation
*
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/nvmem-consumer.h>
#include <linux/regmap.h>
#include <linux/sys_soc.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/phy.h>
#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
#define MAX_USB_PHY_DATA_SIZE 0x30
#define PHY_ADDR_0X09 0x09
#define PHY_ADDR_0X0B 0x0b
#define PHY_ADDR_0X0D 0x0d
#define PHY_ADDR_0X10 0x10
#define PHY_ADDR_0X1F 0x1f
#define PHY_ADDR_0X20 0x20
#define PHY_ADDR_0X21 0x21
#define PHY_ADDR_0X30 0x30
#define REG_0X09_FORCE_CALIBRATION BIT(9)
#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
struct phy_reg {
void __iomem *reg_mdio_ctl;
};
struct phy_data {
u8 addr;
u16 data;
};
struct phy_cfg {
int param_size;
struct phy_data param[MAX_USB_PHY_DATA_SIZE];
bool check_efuse;
bool do_toggle;
bool do_toggle_once;
bool use_default_parameter;
bool check_rx_front_end_offset;
};
struct phy_parameter {
struct phy_reg phy_reg;
/* Get from efuse */
u8 efuse_usb_u3_tx_lfps_swing_trim;
/* Get from dts */
u32 amplitude_control_coarse;
u32 amplitude_control_fine;
};
struct rtk_phy {
struct usb_phy phy;
struct device *dev;
struct phy_cfg *phy_cfg;
int num_phy;
struct phy_parameter *phy_parameter;
struct dentry *debug_dir;
};
#define PHY_IO_TIMEOUT_USEC (50000)
#define PHY_IO_DELAY_US (100)
static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
int ret;
unsigned int val;
ret = read_poll_timeout(readl, val, ((val & mask) == result),
PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
if (ret) {
pr_err("%s can't program USB phy\n", __func__);
return -ETIMEDOUT;
}
return 0;
}
static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
{
return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
}
static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
{
unsigned int tmp;
u32 value;
tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
writel(tmp, phy_reg->reg_mdio_ctl);
rtk_phy3_wait_vbusy(phy_reg);
value = readl(phy_reg->reg_mdio_ctl);
value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
return (u16)value;
}
static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
{
unsigned int val;
val = USB_MDIO_CTRL_PHY_WRITE |
(addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
(data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
writel(val, phy_reg->reg_mdio_ctl);
rtk_phy3_wait_vbusy(phy_reg);
return 0;
}
static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
{
struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
struct phy_reg *phy_reg;
struct phy_parameter *phy_parameter;
struct phy_data *phy_data;
u8 addr;
u16 data;
int i;
phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
phy_reg = &phy_parameter->phy_reg;
if (!phy_cfg->do_toggle)
return;
i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
phy_data = phy_cfg->param + i;
addr = phy_data->addr;
data = phy_data->data;
if (!addr && !data) {
addr = PHY_ADDR_0X09;
data = rtk_phy_read(phy_reg, addr);
phy_data->addr = addr;
phy_data->data = data;
}
rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
mdelay(1);
rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
}
static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
{
struct phy_cfg *phy_cfg;
struct phy_reg *phy_reg;
struct phy_parameter *phy_parameter;
int i = 0;
phy_cfg = rtk_phy->phy_cfg;
phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
phy_reg = &phy_parameter->phy_reg;
if (phy_cfg->use_default_parameter)
goto do_toggle;
for (i = 0; i < phy_cfg->param_size; i++) {
struct phy_data *phy_data = phy_cfg->param + i;
u8 addr = phy_data->addr;
u16 data = phy_data->data;
if (!addr && !data)
continue;
rtk_phy_write(phy_reg, addr, data);
}
do_toggle:
if (phy_cfg->do_toggle_once)
phy_cfg->do_toggle = true;
do_rtk_usb3_phy_toggle(rtk_phy, index, false);
if (phy_cfg->do_toggle_once) {
u16 check_value = 0;
int count = 10;
u16 value_0x0d, value_0x10;
/* Enable Debug mode by set 0x0D and 0x10 */
value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
rtk_phy_write(phy_reg, PHY_ADDR_0X10,
(value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
REG_0X10_DEBUG_MODE_SETTING);
check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
while (!(check_value & BIT(15))) {
check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
mdelay(1);
if (count-- < 0)
break;
}
if (!(check_value & BIT(15)))
dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
PHY_ADDR_0X30, check_value);
/* Disable Debug mode by set 0x0D and 0x10 to default*/
rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
phy_cfg->do_toggle = false;
}
if (phy_cfg->check_rx_front_end_offset) {
u16 rx_offset_code, rx_offset_range;
u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
bool do_update = false;
rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
if (((rx_offset_code & code_mask) == 0x0) ||
((rx_offset_code & code_mask) == code_mask))
do_update = true;
rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
if (((rx_offset_range & range_mask) == range_mask) && do_update) {
dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
rx_offset_code, rx_offset_range);
do_update = false;
}
if (do_update) {
u16 tmp1, tmp2;
tmp1 = rx_offset_range & (~range_mask);
tmp2 = rx_offset_range & range_mask;
tmp2 += (1 << 2);
rx_offset_range = tmp1 | (tmp2 & range_mask);
rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
goto do_toggle;
}
}
return 0;
}
static int rtk_phy_init(struct phy *phy)
{
struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
int ret = 0;
int i;
unsigned long phy_init_time = jiffies;
for (i = 0; i < rtk_phy->num_phy; i++)
ret = do_rtk_phy_init(rtk_phy, i);
dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
jiffies_to_msecs(jiffies - phy_init_time));
return ret;
}
static int rtk_phy_exit(struct phy *phy)
{
return 0;
}
static const struct phy_ops ops = {
.init = rtk_phy_init,
.exit = rtk_phy_exit,
.owner = THIS_MODULE,
};
static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
{
int index = port;
struct rtk_phy *rtk_phy = NULL;
rtk_phy = dev_get_drvdata(usb3_phy->dev);
if (index > rtk_phy->num_phy) {
dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
__func__, index, rtk_phy->num_phy);
return;
}
do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
}
static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
u16 portstatus, u16 portchange)
{
bool connect = false;
pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
__func__, port, (int)portstatus, (int)portchange);
if (portstatus & USB_PORT_STAT_CONNECTION)
connect = true;
if (portchange & USB_PORT_STAT_C_CONNECTION)
rtk_phy_toggle(x, connect, port);
return 0;
}
#ifdef CONFIG_DEBUG_FS
static struct dentry *create_phy_debug_root(void)
{
struct dentry *phy_debug_root;
phy_debug_root = debugfs_lookup("phy", usb_debug_root);
if (!phy_debug_root)
phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
return phy_debug_root;
}
static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
{
struct rtk_phy *rtk_phy = s->private;
struct phy_cfg *phy_cfg;
int i, index;
phy_cfg = rtk_phy->phy_cfg;
seq_puts(s, "Property:\n");
seq_printf(s, " check_efuse: %s\n",
phy_cfg->check_efuse ? "Enable" : "Disable");
seq_printf(s, " do_toggle: %s\n",
phy_cfg->do_toggle ? "Enable" : "Disable");
seq_printf(s, " do_toggle_once: %s\n",
phy_cfg->do_toggle_once ? "Enable" : "Disable");
seq_printf(s, " use_default_parameter: %s\n",
phy_cfg->use_default_parameter ? "Enable" : "Disable");
for (index = 0; index < rtk_phy->num_phy; index++) {
struct phy_reg *phy_reg;
struct phy_parameter *phy_parameter;
phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
phy_reg = &phy_parameter->phy_reg;
seq_printf(s, "PHY %d:\n", index);
for (i = 0; i < phy_cfg->param_size; i++) {
struct phy_data *phy_data = phy_cfg->param + i;
u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
u16 data = phy_data->data;
if (!phy_data->addr && !data)
seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
addr, rtk_phy_read(phy_reg, addr));
else
seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
addr, data, rtk_phy_read(phy_reg, addr));
}
seq_puts(s, "PHY Property:\n");
seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
(int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
seq_printf(s, " amplitude_control_coarse: 0x%x\n",
(int)phy_parameter->amplitude_control_coarse);
seq_printf(s, " amplitude_control_fine: 0x%x\n",
(int)phy_parameter->amplitude_control_fine);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
static inline void create_debug_files(struct rtk_phy *rtk_phy)
{
struct dentry *phy_debug_root = NULL;
phy_debug_root = create_phy_debug_root();
if (!phy_debug_root)
return;
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
if (!rtk_phy->debug_dir)
return;
if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
&rtk_usb3_parameter_fops))
goto file_error;
return;
file_error:
debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
{
debugfs_remove_recursive(rtk_phy->debug_dir);
}
#else
static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
#endif /* CONFIG_DEBUG_FS */
static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
struct phy_parameter *phy_parameter, int index)
{
struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
u8 value = 0;
struct nvmem_cell *cell;
if (!phy_cfg->check_efuse)
goto out;
cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
if (IS_ERR(cell)) {
dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
__func__, PTR_ERR(cell));
} else {
unsigned char *buf;
size_t buf_size;
buf = nvmem_cell_read(cell, &buf_size);
value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
kfree(buf);
nvmem_cell_put(cell);
}
if (value > 0 && value < 0x8)
phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
else
phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
out:
return 0;
}
static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
struct phy_parameter *phy_parameter)
{
struct phy_cfg *phy_cfg;
struct phy_reg *phy_reg;
phy_reg = &phy_parameter->phy_reg;
phy_cfg = rtk_phy->phy_cfg;
if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
u16 data;
if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
} else {
data = phy_cfg->param[PHY_ADDR_0X20].data;
}
data &= (~val_mask);
data |= (phy_parameter->amplitude_control_coarse & val_mask);
phy_cfg->param[PHY_ADDR_0X20].data = data;
}
if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
u16 data;
if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
} else {
data = phy_cfg->param[PHY_ADDR_0X20].data;
}
data &= ~(val_mask << val_shift);
data |= ((efuse_val & val_mask) << val_shift);
phy_cfg->param[PHY_ADDR_0X20].data = data;
}
if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
phy_cfg->param[PHY_ADDR_0X21].data =
phy_parameter->amplitude_control_fine & val_mask;
}
}
static int parse_phy_data(struct rtk_phy *rtk_phy)
{
struct device *dev = rtk_phy->dev;
struct phy_parameter *phy_parameter;
int ret = 0;
int index;
rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
rtk_phy->num_phy, GFP_KERNEL);
if (!rtk_phy->phy_parameter)
return -ENOMEM;
for (index = 0; index < rtk_phy->num_phy; index++) {
phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
/* Amplitude control address 0x20 bit 0 to bit 7 */
if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
&phy_parameter->amplitude_control_coarse))
phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
/* Amplitude control address 0x21 bit 0 to bit 16 */
if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
&phy_parameter->amplitude_control_fine))
phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
update_amplitude_control_value(rtk_phy, phy_parameter);
}
return ret;
}
static int rtk_usb3phy_probe(struct platform_device *pdev)
{
struct rtk_phy *rtk_phy;
struct device *dev = &pdev->dev;
struct phy *generic_phy;
struct phy_provider *phy_provider;
const struct phy_cfg *phy_cfg;
int ret;
phy_cfg = of_device_get_match_data(dev);
if (!phy_cfg) {
dev_err(dev, "phy config are not assigned!\n");
return -EINVAL;
}
rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
if (!rtk_phy)
return -ENOMEM;
rtk_phy->dev = &pdev->dev;
rtk_phy->phy.dev = rtk_phy->dev;
rtk_phy->phy.label = "rtk-usb3phy";
rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
rtk_phy->num_phy = 1;
ret = parse_phy_data(rtk_phy);
if (ret)
goto err;
platform_set_drvdata(pdev, rtk_phy);
generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
if (IS_ERR(generic_phy))
return PTR_ERR(generic_phy);
phy_set_drvdata(generic_phy, rtk_phy);
phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider))
return PTR_ERR(phy_provider);
ret = usb_add_phy_dev(&rtk_phy->phy);
if (ret)
goto err;
create_debug_files(rtk_phy);
err:
return ret;
}
static void rtk_usb3phy_remove(struct platform_device *pdev)
{
struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
remove_debug_files(rtk_phy);
usb_remove_phy(&rtk_phy->phy);
}
static const struct phy_cfg rtd1295_phy_cfg = {
.param_size = MAX_USB_PHY_DATA_SIZE,
.param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
[2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
[4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
[6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
[8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
[10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
[12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
[14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
[16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
[18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
[20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
[22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
[24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
[26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
[28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
[30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
[32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
[34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
[36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
[38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
[40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
[42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
[44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
[46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
.check_efuse = false,
.do_toggle = true,
.do_toggle_once = false,
.use_default_parameter = false,
.check_rx_front_end_offset = false,
};
static const struct phy_cfg rtd1619_phy_cfg = {
.param_size = MAX_USB_PHY_DATA_SIZE,
.param = { [8] = {0x08, 0x3591},
[38] = {0x26, 0x840b},
[40] = {0x28, 0xf842}, },
.check_efuse = false,
.do_toggle = true,
.do_toggle_once = false,
.use_default_parameter = false,
.check_rx_front_end_offset = false,
};
static const struct phy_cfg rtd1319_phy_cfg = {
.param_size = MAX_USB_PHY_DATA_SIZE,
.param = { [1] = {0x01, 0xac86},
[6] = {0x06, 0x0003},
[9] = {0x09, 0x924c},
[10] = {0x0a, 0xa608},
[11] = {0x0b, 0xb905},
[14] = {0x0e, 0x2010},
[32] = {0x20, 0x705a},
[33] = {0x21, 0xf645},
[34] = {0x22, 0x0013},
[35] = {0x23, 0xcb66},
[41] = {0x29, 0xff00}, },
.check_efuse = true,
.do_toggle = true,
.do_toggle_once = false,
.use_default_parameter = false,
.check_rx_front_end_offset = false,
};
static const struct phy_cfg rtd1619b_phy_cfg = {
.param_size = MAX_USB_PHY_DATA_SIZE,
.param = { [1] = {0x01, 0xac8c},
[6] = {0x06, 0x0017},
[9] = {0x09, 0x724c},
[10] = {0x0a, 0xb610},
[11] = {0x0b, 0xb90d},
[13] = {0x0d, 0xef2a},
[15] = {0x0f, 0x9050},
[16] = {0x10, 0x000c},
[32] = {0x20, 0x70ff},
[34] = {0x22, 0x0013},
[35] = {0x23, 0xdb66},
[38] = {0x26, 0x8609},
[41] = {0x29, 0xff13},
[42] = {0x2a, 0x3070}, },
.check_efuse = true,
.do_toggle = false,
.do_toggle_once = true,
.use_default_parameter = false,
.check_rx_front_end_offset = false,
};
static const struct phy_cfg rtd1319d_phy_cfg = {
.param_size = MAX_USB_PHY_DATA_SIZE,
.param = { [1] = {0x01, 0xac89},
[4] = {0x04, 0xf2f5},
[6] = {0x06, 0x0017},
[9] = {0x09, 0x424c},
[10] = {0x0a, 0x9610},
[11] = {0x0b, 0x9901},
[12] = {0x0c, 0xf000},
[13] = {0x0d, 0xef2a},
[14] = {0x0e, 0x1000},
[15] = {0x0f, 0x9050},
[32] = {0x20, 0x7077},
[35] = {0x23, 0x0b62},
[37] = {0x25, 0x10ec},
[42] = {0x2a, 0x3070}, },
.check_efuse = true,
.do_toggle = false,
.do_toggle_once = true,
.use_default_parameter = false,
.check_rx_front_end_offset = true,
};
static const struct of_device_id usbphy_rtk_dt_match[] = {
{ .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
{ .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
{ .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
{ .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
{ .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
{},
};
MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
static struct platform_driver rtk_usb3phy_driver = {
.probe = rtk_usb3phy_probe,
.remove_new = rtk_usb3phy_remove,
.driver = {
.name = "rtk-usb3phy",
.of_match_table = usbphy_rtk_dt_match,
},
};
module_platform_driver(rtk_usb3phy_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform: rtk-usb3phy");
MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");

View File

@@ -181,6 +181,7 @@ static const char *sdebug_version_date = "20200710";
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_CMD_ABORT 0x10000
#define SDEBUG_OPT_UNALIGNED_WRITE 0x20000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
@@ -188,7 +189,8 @@ static const char *sdebug_version_date = "20200710";
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \
SDEBUG_OPT_CMD_ABORT)
SDEBUG_OPT_CMD_ABORT | \
SDEBUG_OPT_UNALIGNED_WRITE)
#define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
@@ -830,6 +832,7 @@ static int dix_reads;
static int dif_errors;
/* ZBC global data */
static bool sdeb_no_zwrl;
static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
static int sdeb_zbc_zone_cap_mb;
static int sdeb_zbc_zone_size_mb;
@@ -3494,6 +3497,14 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
rwlock_t *macc_lckp = &sip->macc_lck;
u8 *cmd = scp->cmnd;
if (unlikely(sdebug_opts & SDEBUG_OPT_UNALIGNED_WRITE &&
atomic_read(&sdeb_inject_pending))) {
atomic_set(&sdeb_inject_pending, 0);
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
UNALIGNED_WRITE_ASCQ);
return check_condition_result;
}
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -5114,9 +5125,13 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
static int scsi_debug_slave_alloc(struct scsi_device *sdp)
{
struct request_queue *q = sdp->request_queue;
if (sdebug_verbose)
pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdeb_no_zwrl)
blk_queue_flag_set(QUEUE_FLAG_NO_ZONE_WRITE_LOCK, q);
return 0;
}
@@ -5767,6 +5782,7 @@ module_param_named(medium_error_start, sdebug_medium_error_start, int,
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(no_zone_write_lock, sdeb_no_zwrl, bool, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
@@ -5840,6 +5856,8 @@ MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM er
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(no_zone_write_lock,
"Disable serialization of zoned writes (def=0)");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");

View File

@@ -27,6 +27,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/list_sort.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -672,14 +673,15 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case ILLEGAL_REQUEST:
/*
* Unaligned write command. This indicates that zoned writes got
* reordered. Retry after all pending commands have completed.
* Unaligned write command. This may indicate that zoned writes
* have been received by the device in the wrong order. If zone
* write locking is disabled, retry after all pending commands
* have completed.
*/
if (sshdr.asc == 0x21 && sshdr.ascq == 0x04) {
return scsi_cmd_retry_allowed(scmd) &&
!scsi_noretry_cmd(scmd) ? NEEDS_DELAYED_RETRY :
SUCCESS;
}
if (sshdr.asc == 0x21 && sshdr.ascq == 0x04 &&
blk_queue_no_zone_write_lock(scsi_cmd_to_rq(scmd)->q))
return NEEDS_DELAYED_RETRY;
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
sshdr.asc == 0x22 || /* Invalid function */
@@ -2138,6 +2140,25 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
/*
* Returns a negative value if @_a has a lower starting sector than @_b, zero if
* both have the same starting sector and a positive value otherwise.
*/
static int scsi_cmp_sector(void *priv, const struct list_head *_a,
const struct list_head *_b)
{
struct scsi_cmnd *a = list_entry(_a, typeof(*a), eh_entry);
struct scsi_cmnd *b = list_entry(_b, typeof(*b), eh_entry);
const sector_t pos_a = blk_rq_pos(scsi_cmd_to_rq(a));
const sector_t pos_b = blk_rq_pos(scsi_cmd_to_rq(b));
if (pos_a < pos_b)
return -1;
if (pos_a > pos_b)
return 1;
return 0;
}
/**
* scsi_eh_flush_done_q - finish processed commands or retry them.
* @done_q: list_head of processed commands.
@@ -2146,6 +2167,13 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
/*
* Sort pending SCSI commands in starting sector order. This is
* important if one of the SCSI devices associated with @shost is a
* zoned block device for which zone write locking is disabled.
*/
list_sort(NULL, done_q, scsi_cmp_sector);
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&

View File

@@ -304,11 +304,6 @@ void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
cmd->budget_token = -1;
}
static void scsi_kick_queue(struct request_queue *q)
{
blk_mq_run_hw_queues(q, false);
}
/*
* Called for single_lun devices on IO completion. Clear starget_sdev_user,
* and call blk_run_queue for all the scsi_devices on the target -
@@ -333,7 +328,8 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
scsi_kick_queue(current_sdev->request_queue);
blk_mq_run_hw_queues(current_sdev->request_queue,
shost->queuecommand_may_block);
spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user)
@@ -346,7 +342,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(sdev->request_queue);
blk_mq_run_hw_queues(sdev->request_queue, false);
spin_lock_irqsave(shost->host_lock, flags);
scsi_device_put(sdev);
@@ -433,7 +429,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(slq);
blk_mq_run_hw_queues(slq, false);
blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags);
@@ -458,8 +454,8 @@ static void scsi_run_queue(struct request_queue *q)
if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host);
/* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */
blk_mq_kick_requeue_list(q);
blk_mq_run_hw_queues(q, false);
}
void scsi_requeue_run_queue(struct work_struct *work)
@@ -1438,6 +1434,7 @@ static void scsi_complete(struct request *rq)
case ADD_TO_MLQUEUE:
scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
break;
case NEEDS_DELAYED_RETRY:
default:
scsi_eh_scmd_add(cmd);
break;

View File

@@ -1304,7 +1304,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = sdp->sector_size;
cmd->underflow = nr_blocks << 9;
cmd->allowed = sdkp->max_retries;
if (blk_queue_pipeline_zoned_writes(rq->q) &&
if (blk_queue_no_zone_write_lock(rq->q) &&
blk_rq_is_seq_zoned_write(rq))
cmd->allowed += rq->q->nr_requests;
cmd->sdb.length = nr_blocks * sdp->sector_size;

View File

@@ -2411,8 +2411,10 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
gsm->has_devices = false;
}
for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
if (gsm->dlci[i]) {
gsm_dlci_release(gsm->dlci[i]);
gsm->dlci[i] = NULL;
}
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);

View File

@@ -299,11 +299,11 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
unsigned long completed_reqs;
unsigned long completed_reqs, flags;
spin_lock(&hwq->cq_lock);
spin_lock_irqsave(&hwq->cq_lock, flags);
completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
spin_unlock(&hwq->cq_lock);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
return completed_reqs;
}

View File

@@ -1512,7 +1512,7 @@ start_window:
scaling->window_start_t = curr_t;
scaling->tot_busy_t = 0;
if (hba->outstanding_reqs) {
if (scaling->active_reqs) {
scaling->busy_start_t = curr_t;
scaling->is_busy_started = true;
} else {
@@ -2135,7 +2135,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.active_reqs--;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
@@ -4343,29 +4343,67 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
static void ufshcd_update_no_zone_write_lock(struct ufs_hba *hba,
bool set_no_zone_write_lock)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, hba->host)
blk_freeze_queue_start(sdev->request_queue);
shost_for_each_device(sdev, hba->host) {
struct request_queue *q = sdev->request_queue;
blk_mq_freeze_queue_wait(q);
if (set_no_zone_write_lock)
blk_queue_flag_set(QUEUE_FLAG_NO_ZONE_WRITE_LOCK, q);
else
blk_queue_flag_clear(QUEUE_FLAG_NO_ZONE_WRITE_LOCK, q);
blk_mq_unfreeze_queue(q);
}
}
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
{
unsigned long flags;
bool update = false;
bool prev_state, new_state, update = false;
if (!ufshcd_is_auto_hibern8_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
prev_state = ufshcd_is_auto_hibern8_enabled(hba);
if (hba->ahit != ahit) {
hba->ahit = ahit;
update = true;
}
new_state = ufshcd_is_auto_hibern8_enabled(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
if (!update)
return;
if (!is_mcq_enabled(hba) && !prev_state && new_state) {
/*
* Auto-hibernation will be enabled. Enable write locking for
* zoned writes since auto-hibernation may cause reordering of
* zoned writes when using the legacy mode of the UFS host
* controller.
*/
ufshcd_update_no_zone_write_lock(hba, false);
}
if (!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba, false);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
}
if (!is_mcq_enabled(hba) && prev_state && !new_state) {
/*
* Auto-hibernation has been disabled. Disable write locking
* for zoned writes.
*/
ufshcd_update_no_zone_write_lock(hba, true);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
@@ -5145,9 +5183,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
ufshcd_hpb_configure(hba, sdev);
#if 0
blk_queue_flag_set(QUEUE_FLAG_PIPELINE_ZONED_WRITES, q);
#endif
blk_queue_flag_set(QUEUE_FLAG_NO_ZONE_WRITE_LOCK, q);
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
blk_queue_update_dma_alignment(q, 4096 - 1);
@@ -8585,7 +8621,9 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ret = ufshcd_alloc_mcq(hba);
if (ret) {
if (!ret) {
ufshcd_config_mcq(hba);
} else {
/* Continue with SDB mode */
use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
@@ -8597,10 +8635,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
return ret;
}
hba->scsi_host_added = true;
}
/* MCQ may be disabled if ufshcd_alloc_mcq() fails */
if (is_mcq_supported(hba) && use_mcq_mode)
} else if (is_mcq_supported(hba)) {
/* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
ufshcd_config_mcq(hba);
}
}
ufshcd_tune_unipro_params(hba);

View File

@@ -1432,8 +1432,8 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
if (IS_ERR(res->base)) {
dev_err(hba->dev, "Failed to map res %s, err=%d\n",
res->name, (int)PTR_ERR(res->base));
res->base = NULL;
ret = PTR_ERR(res->base);
res->base = NULL;
return ret;
}
}
@@ -1447,7 +1447,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
/* Explicitly allocate MCQ resource from ufs_mem */
res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
if (!res_mcq)
return ret;
return -ENOMEM;
res_mcq->start = res_mem->start +
MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
@@ -1459,7 +1459,7 @@ static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
ret);
goto insert_res_err;
return ret;
}
res->base = devm_ioremap_resource(hba->dev, res_mcq);
@@ -1476,8 +1476,6 @@ out:
ioremap_err:
res->base = NULL;
remove_resource(res_mcq);
insert_res_err:
devm_kfree(hba->dev, res_mcq);
return ret;
}

View File

@@ -614,6 +614,29 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
/*
* There is no need to lock status_mutex here, because status_mutex
* protects hub->status, and the phy driver only checks the port
* status without changing the status.
*/
if (!ret) {
struct usb_device *hdev = hub->hdev;
/*
* Only roothub will be notified of port state changes,
* since the USB PHY only cares about changes at the next
* level.
*/
if (is_root_hub(hdev)) {
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
if (hcd->usb_phy)
usb_phy_notify_port_status(hcd->usb_phy,
port1 - 1, *status, *change);
}
}
return ret;
}

View File

@@ -679,8 +679,11 @@ fail:
pr_err("acc_bind() could not allocate requests\n");
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
dev->rx_req[i] = NULL;
}
return -1;
}
@@ -712,6 +715,12 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
goto done;
}
if (!dev->rx_req[0]) {
pr_warn("acc_read: USB request already handled/freed");
r = -EINVAL;
goto done;
}
/*
* Calculate the data length by considering termination character.
* Then compansite the difference of rounding up to
@@ -1208,8 +1217,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
for (i = 0; i < RX_REQ_MAX; i++)
for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
dev->rx_req[i] = NULL;
}
acc_hid_unbind(dev);
}

View File

@@ -538,16 +538,20 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
static int gs_start_io(struct gs_port *port)
{
struct list_head *head = &port->read_pool;
struct usb_ep *ep = port->port_usb->out;
struct usb_ep *ep;
int status;
unsigned started;
if (!port->port_usb || !port->port.tty)
return -EIO;
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
ep = port->port_usb->out;
status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status)

View File

@@ -815,6 +815,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
* usb_gadget_activate() is called. For example, user mode components may
* need to be activated before the system can talk to hosts.
*
* This routine may sleep; it must not be called in interrupt context
* (such as from within a gadget driver's disconnect() callback).
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_deactivate(struct usb_gadget *gadget)
@@ -854,6 +857,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
* This routine activates gadget which was previously deactivated with
* usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
*
* This routine may sleep; it must not be called in interrupt context.
*
* Returns zero on success, else negative errno.
*/
int usb_gadget_activate(struct usb_gadget *gadget)
@@ -1510,7 +1515,11 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
usb_gadget_disable_async_callbacks(udc);
if (udc->gadget->irq)
synchronize_irq(udc->gadget->irq);
mutex_unlock(&connect_lock);
udc->driver->unbind(udc->gadget);
mutex_lock(&connect_lock);
usb_gadget_udc_stop_locked(udc);
mutex_unlock(&connect_lock);

View File

@@ -58,6 +58,7 @@ struct dp_altmode {
enum dp_state state;
bool hpd;
bool pending_hpd;
struct mutex lock; /* device lock */
struct work_struct work;
@@ -141,8 +142,13 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
dp->state = DP_STATE_EXIT;
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
ret = dp_altmode_configure(dp, con);
if (!ret)
if (!ret) {
dp->state = DP_STATE_CONFIGURE;
if (dp->hpd != hpd) {
dp->hpd = hpd;
dp->pending_hpd = true;
}
}
} else {
if (dp->hpd != hpd) {
dp->hpd = hpd;
@@ -157,6 +163,15 @@ static int dp_altmode_configured(struct dp_altmode *dp)
{
sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
/*
* If the DFP_D/UFP_D sends a change in HPD when first notifying the
* DisplayPort driver that it is connected, then we wait until
* configuration is complete to signal HPD.
*/
if (dp->pending_hpd) {
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
dp->pending_hpd = false;
}
return dp_altmode_notify(dp);
}

View File

@@ -3205,23 +3205,12 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
return ret;
}
#define min_pps_apdo_current(x, y) \
min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y))
static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
{
unsigned int i, j, max_mw = 0, max_mv = 0;
unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
unsigned int min_snk_mv, max_snk_mv;
unsigned int max_op_mv;
u32 pdo, src, snk;
unsigned int src_pdo = 0, snk_pdo = 0;
unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
unsigned int src_pdo = 0;
u32 pdo, src;
/*
* Select the source PPS APDO providing the most power while staying
* within the board's limits. We skip the first PDO as this is always
* 5V 3A.
*/
for (i = 1; i < port->nr_source_caps; ++i) {
pdo = port->source_caps[i];
@@ -3232,54 +3221,17 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
continue;
}
min_src_mv = pdo_pps_apdo_min_voltage(pdo);
max_src_mv = pdo_pps_apdo_max_voltage(pdo);
if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
continue;
src_ma = pdo_pps_apdo_max_current(pdo);
src_mw = (src_ma * max_src_mv) / 1000;
/*
* Now search through the sink PDOs to find a matching
* PPS APDO. Again skip the first sink PDO as this will
* always be 5V 3A.
*/
for (j = 1; j < port->nr_snk_pdo; j++) {
pdo = port->snk_pdo[j];
switch (pdo_type(pdo)) {
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
tcpm_log(port,
"Not PPS APDO (sink), ignoring");
continue;
}
min_snk_mv =
pdo_pps_apdo_min_voltage(pdo);
max_snk_mv =
pdo_pps_apdo_max_voltage(pdo);
break;
default:
tcpm_log(port,
"Not APDO type (sink), ignoring");
continue;
}
if (min_src_mv <= max_snk_mv &&
max_src_mv >= min_snk_mv) {
max_op_mv = min(max_src_mv, max_snk_mv);
src_mw = (max_op_mv * src_ma) / 1000;
/* Prefer higher voltages if available */
if ((src_mw == max_mw &&
max_op_mv > max_mv) ||
src_mw > max_mw) {
src_pdo = i;
snk_pdo = j;
max_mw = src_mw;
max_mv = max_op_mv;
}
}
max_op_ma = min(src_ma, port->pps_data.req_op_curr);
op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
if (op_mw > max_temp_mw) {
src_pdo = i;
max_temp_mw = op_mw;
}
break;
default:
tcpm_log(port, "Not APDO type (source), ignoring");
@@ -3289,16 +3241,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
if (src_pdo) {
src = port->source_caps[src_pdo];
snk = port->snk_pdo[snk_pdo];
port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
pdo_pps_apdo_min_voltage(snk));
port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
pdo_pps_apdo_max_voltage(snk));
port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
max(port->pps_data.req_min_volt,
port->pps_data.req_out_volt));
port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
port->pps_data.req_op_curr);
}
@@ -3416,32 +3362,16 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
{
unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
enum pd_pdo_type type;
unsigned int src_pdo_index;
u32 pdo;
src_pdo_index = tcpm_pd_select_pps_apdo(port);
if (!src_pdo_index)
return -EOPNOTSUPP;
pdo = port->source_caps[src_pdo_index];
type = pdo_type(pdo);
switch (type) {
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
tcpm_log(port, "Invalid APDO selected!");
return -EINVAL;
}
max_mv = port->pps_data.req_max_volt;
max_ma = port->pps_data.req_max_curr;
out_mv = port->pps_data.req_out_volt;
op_ma = port->pps_data.req_op_curr;
break;
default:
tcpm_log(port, "Invalid PDO selected!");
return -EINVAL;
}
max_mv = port->pps_data.req_max_volt;
max_ma = port->pps_data.req_max_curr;
out_mv = port->pps_data.req_out_volt;
op_ma = port->pps_data.req_op_curr;
flags = RDO_USB_COMM | RDO_NO_SUSPEND;
@@ -5315,6 +5245,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
/* Do nothing, vbus drop expected */
break;
case SNK_HARD_RESET_WAIT_VBUS:
/* Do nothing, its OK to receive vbus off events */
break;
default:
if (port->pwr_role == TYPEC_SINK && port->attached)
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
@@ -5366,6 +5300,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
case SNK_DEBOUNCED:
/*Do nothing, still waiting for VSAFE5V for connect */
break;
case SNK_HARD_RESET_WAIT_VBUS:
/* Do nothing, its OK to receive vbus off events */
break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
@@ -5882,12 +5819,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
goto port_unlock;
}
if (req_out_volt < port->pps_data.min_volt ||
req_out_volt > port->pps_data.max_volt) {
ret = -EINVAL;
goto port_unlock;
}
target_mw = (port->current_limit * req_out_volt) / 1000;
if (target_mw < port->operating_snk_mw) {
ret = -EINVAL;
@@ -6397,11 +6328,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
ret = tcpm_psy_set_online(port, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (val->intval < port->pps_data.min_volt * 1000 ||
val->intval > port->pps_data.max_volt * 1000)
ret = -EINVAL;
else
ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (val->intval > port->pps_data.max_curr * 1000)

View File

@@ -775,8 +775,15 @@ int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
{
int err = -EAGAIN;
if (f2fs_has_inline_dentry(dir))
if (f2fs_has_inline_dentry(dir)) {
/*
* Should get i_xattr_sem to keep the lock order:
* i_xattr_sem -> inode_page lock used by f2fs_setxattr.
*/
f2fs_down_read(&F2FS_I(dir)->i_xattr_sem);
err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
f2fs_up_read(&F2FS_I(dir)->i_xattr_sem);
}
if (err == -EAGAIN)
err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);

View File

@@ -527,10 +527,12 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
if (!ipage)
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr, &base_size, &is_inline);
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (!ipage)
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;

View File

@@ -208,6 +208,7 @@ int fuse_create_open_backing(
struct file *file, unsigned int flags, umode_t mode)
{
struct fuse_inode *dir_fuse_inode = get_fuse_inode(dir);
struct fuse_dentry *fuse_entry = get_fuse_dentry(entry);
struct fuse_dentry *dir_fuse_dentry = get_fuse_dentry(entry->d_parent);
struct dentry *backing_dentry = NULL;
struct inode *inode = NULL;
@@ -239,29 +240,28 @@ int fuse_create_open_backing(
if (err)
goto out;
if (get_fuse_dentry(entry)->backing_path.dentry)
path_put(&get_fuse_dentry(entry)->backing_path);
get_fuse_dentry(entry)->backing_path = (struct path) {
if (fuse_entry->backing_path.dentry)
path_put(&fuse_entry->backing_path);
fuse_entry->backing_path = (struct path) {
.mnt = dir_fuse_dentry->backing_path.mnt,
.dentry = backing_dentry,
};
path_get(&get_fuse_dentry(entry)->backing_path);
path_get(&fuse_entry->backing_path);
if (d_inode)
target_nodeid = get_fuse_inode(d_inode)->nodeid;
inode = fuse_iget_backing(dir->i_sb, target_nodeid,
get_fuse_dentry(entry)->backing_path.dentry->d_inode);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
fuse_entry->backing_path.dentry->d_inode);
if (!inode) {
err = -EIO;
goto out;
}
if (get_fuse_inode(inode)->bpf)
bpf_prog_put(get_fuse_inode(inode)->bpf);
get_fuse_inode(inode)->bpf = dir_fuse_inode->bpf;
if (get_fuse_inode(inode)->bpf)
bpf_prog_inc(dir_fuse_inode->bpf);
get_fuse_inode(inode)->bpf = fuse_entry->bpf;
fuse_entry->bpf = NULL;
newent = d_splice_alias(inode, entry);
if (IS_ERR(newent)) {
@@ -269,10 +269,12 @@ int fuse_create_open_backing(
goto out;
}
inode = NULL;
entry = newent ? newent : entry;
err = finish_open(file, entry, fuse_open_file_backing);
out:
iput(inode);
dput(backing_dentry);
return err;
}
@@ -295,44 +297,19 @@ void *fuse_create_open_finalize(
}
int fuse_release_initialize(struct fuse_bpf_args *fa, struct fuse_release_in *fri,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
struct fuse_file *fuse_file = file->private_data;
/* Always put backing file whatever bpf/userspace says */
fput(fuse_file->backing_file);
fput(ff->backing_file);
*fri = (struct fuse_release_in) {
.fh = ((struct fuse_file *)(file->private_data))->fh,
.fh = ff->fh,
};
*fa = (struct fuse_bpf_args) {
.nodeid = get_fuse_inode(inode)->nodeid,
.opcode = FUSE_RELEASE,
.in_numargs = 1,
.in_args[0].size = sizeof(*fri),
.in_args[0].value = fri,
};
return 0;
}
int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
struct fuse_release_in *fri,
struct inode *inode, struct file *file)
{
struct fuse_file *fuse_file = file->private_data;
/* Always put backing file whatever bpf/userspace says */
fput(fuse_file->backing_file);
*fri = (struct fuse_release_in) {
.fh = ((struct fuse_file *)(file->private_data))->fh,
};
*fa = (struct fuse_bpf_args) {
.nodeid = get_fuse_inode(inode)->nodeid,
.opcode = FUSE_RELEASEDIR,
.opcode = S_ISDIR(inode->i_mode) ? FUSE_RELEASEDIR
: FUSE_RELEASE,
.in_numargs = 1,
.in_args[0].size = sizeof(*fri),
.in_args[0].value = fri,
@@ -342,15 +319,14 @@ int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
}
int fuse_release_backing(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
return 0;
}
void *fuse_release_finalize(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file)
struct inode *inode, struct fuse_file *ff)
{
fuse_file_free(file->private_data);
return NULL;
}
@@ -1004,6 +980,20 @@ long fuse_backing_ioctl(struct file *file, unsigned int command, unsigned long a
return ret;
}
int fuse_file_flock_backing(struct file *file, int cmd, struct file_lock *fl)
{
struct fuse_file *ff = file->private_data;
struct file *backing_file = ff->backing_file;
int error;
fl->fl_file = backing_file;
if (backing_file->f_op->flock)
error = backing_file->f_op->flock(backing_file, cmd, fl);
else
error = locks_lock_file_wait(backing_file, fl);
return error;
}
ssize_t fuse_backing_mmap(struct file *file, struct vm_area_struct *vma)
{
int ret;
@@ -1236,14 +1226,12 @@ int fuse_handle_bpf_prog(struct fuse_entry_bpf *feb, struct inode *parent,
}
/* Cannot change existing program */
if (*bpf && new_bpf) {
bpf_prog_put(new_bpf);
if (*bpf) {
if (new_bpf)
bpf_prog_put(new_bpf);
return new_bpf == *bpf ? 0 : -EINVAL;
}
if (*bpf)
bpf_prog_put(*bpf);
*bpf = new_bpf;
return 0;
}
@@ -1251,61 +1239,62 @@ int fuse_handle_bpf_prog(struct fuse_entry_bpf *feb, struct inode *parent,
struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir,
struct dentry *entry, unsigned int flags)
{
struct fuse_dentry *fd;
struct dentry *bd;
struct inode *inode, *backing_inode;
struct inode *d_inode = entry->d_inode;
struct fuse_dentry *fuse_entry;
struct dentry *backing_entry;
struct inode *inode = NULL, *backing_inode;
struct inode *entry_inode = entry->d_inode;
struct fuse_entry_out *feo = fa->out_args[0].value;
struct fuse_entry_bpf_out *febo = fa->out_args[1].value;
struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, out);
struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf,
out);
int error = -1;
u64 target_nodeid = 0;
struct dentry *ret;
struct dentry *ret = NULL;
fd = get_fuse_dentry(entry);
if (!fd) {
fuse_entry = get_fuse_dentry(entry);
if (!fuse_entry) {
ret = ERR_PTR(-EIO);
goto out;
}
bd = fd->backing_path.dentry;
if (!bd) {
backing_entry = fuse_entry->backing_path.dentry;
if (!backing_entry) {
ret = ERR_PTR(-ENOENT);
goto out;
}
backing_inode = bd->d_inode;
if (!backing_inode) {
ret = 0;
goto out;
}
if (entry_inode)
target_nodeid = get_fuse_inode(entry_inode)->nodeid;
if (d_inode)
target_nodeid = get_fuse_inode(d_inode)->nodeid;
backing_inode = backing_entry->d_inode;
if (backing_inode)
inode = fuse_iget_backing(dir->i_sb, target_nodeid,
backing_inode);
inode = fuse_iget_backing(dir->i_sb, target_nodeid, backing_inode);
if (IS_ERR(inode)) {
ret = ERR_PTR(PTR_ERR(inode));
goto out;
}
error = fuse_handle_bpf_prog(feb, dir, &get_fuse_inode(inode)->bpf);
error = inode ?
fuse_handle_bpf_prog(feb, dir, &get_fuse_inode(inode)->bpf) :
fuse_handle_bpf_prog(feb, dir, &fuse_entry->bpf);
if (error) {
ret = ERR_PTR(error);
goto out;
}
error = fuse_handle_backing(feb, &get_fuse_inode(inode)->backing_inode, &fd->backing_path);
if (error) {
ret = ERR_PTR(error);
goto out;
if (inode) {
error = fuse_handle_backing(feb,
&get_fuse_inode(inode)->backing_inode,
&fuse_entry->backing_path);
if (error) {
ret = ERR_PTR(error);
goto out;
}
get_fuse_inode(inode)->nodeid = feo->nodeid;
ret = d_splice_alias(inode, entry);
if (!IS_ERR(ret))
inode = NULL;
}
get_fuse_inode(inode)->nodeid = feo->nodeid;
ret = d_splice_alias(inode, entry);
out:
iput(inode);
if (feb->backing_file)
fput(feb->backing_file);
return ret;

View File

@@ -312,7 +312,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
spin_unlock(&fi->lock);
}
kfree(forget);
if (ret == -ENOMEM)
if (ret == -ENOMEM || ret == -EINTR)
goto out;
if (ret || fuse_invalid_attr(&outarg.attr) ||
fuse_stale_inode(inode, outarg.generation, &outarg.attr))
@@ -355,9 +355,14 @@ static void fuse_dentry_release(struct dentry *dentry)
{
struct fuse_dentry *fd = dentry->d_fsdata;
#ifdef CONFIG_FUSE_BPF
if (fd && fd->backing_path.dentry)
path_put(&fd->backing_path);
if (fd && fd->bpf)
bpf_prog_put(fd->bpf);
#endif
kfree_rcu(fd, rcu);
}
#endif
@@ -495,7 +500,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
if (name->len > FUSE_NAME_MAX)
goto out;
forget = fuse_alloc_forget();
err = -ENOMEM;
if (!forget)
@@ -514,32 +518,34 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
err = -ENOENT;
if (!entry)
goto out_queue_forget;
goto out_put_forget;
err = -EINVAL;
backing_file = bpf_arg.backing_file;
if (!backing_file)
goto out_queue_forget;
goto out_put_forget;
if (IS_ERR(backing_file)) {
err = PTR_ERR(backing_file);
goto out_queue_forget;
goto out_put_forget;
}
backing_inode = backing_file->f_inode;
*inode = fuse_iget_backing(sb, outarg->nodeid, backing_inode);
if (!*inode)
goto out;
goto out_put_forget;
err = fuse_handle_backing(&bpf_arg,
&get_fuse_inode(*inode)->backing_inode,
&get_fuse_dentry(entry)->backing_path);
if (err)
goto out;
err = fuse_handle_bpf_prog(&bpf_arg, NULL, &get_fuse_inode(*inode)->bpf);
if (err)
goto out;
if (!err)
err = fuse_handle_bpf_prog(&bpf_arg, NULL,
&get_fuse_inode(*inode)->bpf);
if (err) {
iput(*inode);
*inode = NULL;
goto out_put_forget;
}
} else
#endif
{
@@ -559,9 +565,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
}
err = -ENOMEM;
#ifdef CONFIG_FUSE_BPF
out_queue_forget:
#endif
if (!*inode && outarg->nodeid) {
fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1);
goto out;
@@ -1688,17 +1691,6 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
static int fuse_dir_release(struct inode *inode, struct file *file)
{
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_releasedir_initialize, fuse_release_backing,
fuse_release_finalize,
inode, file);
if (fer.ret)
return PTR_ERR(fer.result);
#endif
fuse_release_common(file, true);
return 0;
}

View File

@@ -104,25 +104,39 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
kfree(ra);
}
static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
static void fuse_file_put(struct inode *inode, struct fuse_file *ff,
bool sync, bool isdir)
{
if (refcount_dec_and_test(&ff->count)) {
struct fuse_args *args = &ff->release_args->args;
struct fuse_args *args = &ff->release_args->args;
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
#endif
if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
/* Do nothing when client does not implement 'open' */
fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
fuse_release_end(ff->fm, args, 0);
} else {
args->end = fuse_release_end;
if (fuse_simple_background(ff->fm, args,
GFP_KERNEL | __GFP_NOFAIL))
fuse_release_end(ff->fm, args, -ENOTCONN);
}
kfree(ff);
if (!refcount_dec_and_test(&ff->count))
return;
#ifdef CONFIG_FUSE_BPF
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_release_initialize, fuse_release_backing,
fuse_release_finalize,
inode, ff);
if (fer.ret) {
fuse_release_end(ff->fm, args, 0);
} else
#endif
if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
/* Do nothing when client does not implement 'open' */
fuse_release_end(ff->fm, args, 0);
} else if (sync) {
fuse_simple_request(ff->fm, args);
fuse_release_end(ff->fm, args, 0);
} else {
args->end = fuse_release_end;
if (fuse_simple_background(ff->fm, args,
GFP_KERNEL | __GFP_NOFAIL))
fuse_release_end(ff->fm, args, -ENOTCONN);
}
kfree(ff);
}
struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid,
@@ -345,7 +359,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
fuse_file_put(ff, ff->fm->fc->destroy, isdir);
fuse_file_put(ra->inode, ff, ff->fm->fc->destroy, isdir);
}
void fuse_release_common(struct file *file, bool isdir)
@@ -363,17 +377,6 @@ static int fuse_release(struct inode *inode, struct file *file)
{
struct fuse_conn *fc = get_fuse_conn(inode);
#ifdef CONFIG_FUSE_BPF
struct fuse_err_ret fer;
fer = fuse_bpf_backing(inode, struct fuse_release_in,
fuse_release_initialize, fuse_release_backing,
fuse_release_finalize,
inode, file);
if (fer.ret)
return PTR_ERR(fer.result);
#endif
/* see fuse_vma_close() for !writeback_cache case */
if (fc->writeback_cache)
write_inode_now(inode, 1);
@@ -393,7 +396,7 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff,
* iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here"
*/
fuse_file_put(ff, true, false);
fuse_file_put(&fi->inode, ff, true, false);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
@@ -967,8 +970,11 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
unlock_page(page);
put_page(page);
}
if (ia->ff)
fuse_file_put(ia->ff, false, false);
if (ia->ff) {
WARN_ON(!mapping);
fuse_file_put(mapping ? mapping->host : NULL, ia->ff,
false, false);
}
fuse_io_free(ia);
}
@@ -1707,7 +1713,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
__free_page(ap->pages[i]);
if (wpa->ia.ff)
fuse_file_put(wpa->ia.ff, false, false);
fuse_file_put(wpa->inode, wpa->ia.ff, false, false);
kfree(ap->pages);
kfree(wpa);
@@ -1963,7 +1969,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
ff = __fuse_write_file_get(fi);
err = fuse_flush_times(inode, ff);
if (ff)
fuse_file_put(ff, false, false);
fuse_file_put(inode, ff, false, false);
return err;
}
@@ -2352,7 +2358,7 @@ static int fuse_writepages(struct address_space *mapping,
fuse_writepages_send(&data);
}
if (data.ff)
fuse_file_put(data.ff, false, false);
fuse_file_put(inode, data.ff, false, false);
kfree(data.orig_pages);
out:
@@ -2668,12 +2674,18 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
int err;
#ifdef CONFIG_FUSE_BPF
/* TODO - this is simply passthrough, not a proper BPF filter */
if (ff->backing_file)
return fuse_file_flock_backing(file, cmd, fl);
#endif
if (fc->no_flock) {
err = locks_lock_file_wait(file, fl);
} else {
struct fuse_file *ff = file->private_data;
/* emulate flock with POSIX locks */
ff->flock = true;

View File

@@ -76,7 +76,13 @@ struct fuse_dentry {
u64 time;
struct rcu_head rcu;
};
#ifdef CONFIG_FUSE_BPF
struct path backing_path;
/* bpf program *only* set for negative dentries */
struct bpf_prog *bpf;
#endif
};
static inline struct fuse_dentry *get_fuse_dentry(const struct dentry *entry)
@@ -1500,14 +1506,11 @@ void *fuse_link_finalize(struct fuse_bpf_args *fa, struct dentry *entry,
struct inode *dir, struct dentry *newent);
int fuse_release_initialize(struct fuse_bpf_args *fa, struct fuse_release_in *fri,
struct inode *inode, struct file *file);
int fuse_releasedir_initialize(struct fuse_bpf_args *fa,
struct fuse_release_in *fri,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
int fuse_release_backing(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
void *fuse_release_finalize(struct fuse_bpf_args *fa,
struct inode *inode, struct file *file);
struct inode *inode, struct fuse_file *ff);
int fuse_flush_initialize(struct fuse_bpf_args *fa, struct fuse_flush_in *ffi,
struct file *file, fl_owner_t id);
@@ -1633,6 +1636,7 @@ void *fuse_file_write_iter_finalize(struct fuse_bpf_args *fa,
long fuse_backing_ioctl(struct file *file, unsigned int command, unsigned long arg, int flags);
int fuse_file_flock_backing(struct file *file, int cmd, struct file_lock *fl);
ssize_t fuse_backing_mmap(struct file *file, struct vm_area_struct *vma);
int fuse_file_fallocate_initialize(struct fuse_bpf_args *fa,

View File

@@ -113,6 +113,10 @@ static void fuse_free_inode(struct inode *inode)
kfree(fi->forget);
#ifdef CONFIG_FUSE_DAX
kfree(fi->dax);
#endif
#ifdef CONFIG_FUSE_BPF
if (fi->bpf)
bpf_prog_put(fi->bpf);
#endif
kmem_cache_free(fuse_inode_cachep, fi);
}
@@ -123,13 +127,6 @@ static void fuse_evict_inode(struct inode *inode)
/* Will write inode on close/munmap and in all other dirtiers */
WARN_ON(inode->i_state & I_DIRTY_INODE);
#ifdef CONFIG_FUSE_BPF
iput(fi->backing_inode);
if (fi->bpf)
bpf_prog_put(fi->bpf);
fi->bpf = NULL;
#endif
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (inode->i_sb->s_flags & SB_ACTIVE) {
@@ -149,6 +146,15 @@ static void fuse_evict_inode(struct inode *inode)
}
}
#ifdef CONFIG_FUSE_BPF
static void fuse_destroy_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
iput(fi->backing_inode);
}
#endif
static int fuse_reconfigure(struct fs_context *fsc)
{
struct super_block *sb = fsc->root->d_sb;
@@ -1166,6 +1172,9 @@ static const struct export_operations fuse_export_operations = {
static const struct super_operations fuse_super_operations = {
.alloc_inode = fuse_alloc_inode,
#ifdef CONFIG_FUSE_BPF
.destroy_inode = fuse_destroy_inode,
#endif
.free_inode = fuse_free_inode,
.evict_inode = fuse_evict_inode,
.write_inode = fuse_write_inode,

View File

@@ -918,10 +918,10 @@ static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
return -EINVAL;
if (args.timeouts_array_size_out > INCFS_DATA_FILE_BLOCK_SIZE)
if (args.timeouts_array_size > INCFS_DATA_FILE_BLOCK_SIZE)
return -EINVAL;
buffer = kzalloc(args.timeouts_array_size_out, GFP_NOFS);
buffer = kzalloc(args.timeouts_array_size, GFP_NOFS);
if (!buffer)
return -ENOMEM;

View File

@@ -5,9 +5,9 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o decompressor.o
squashfs-y += namei.o super.o symlink.o decompressor.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_CACHE) += file_cache.o
squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o page_actor.o
squashfs-$(CONFIG_SQUASHFS_FILE_DIRECT) += file_direct.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_SINGLE) += decompressor_single.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI) += decompressor_multi.o
squashfs-$(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) += decompressor_multi_percpu.o

View File

@@ -17,6 +17,7 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
@@ -34,12 +35,15 @@ static int copy_bio_to_actor(struct bio *bio,
struct squashfs_page_actor *actor,
int offset, int req_length)
{
void *actor_addr = squashfs_first_page(actor);
void *actor_addr;
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
int copied_bytes = 0;
int actor_offset = 0;
squashfs_actor_nobuff(actor);
actor_addr = squashfs_first_page(actor);
if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all)))
return 0;
@@ -49,8 +53,9 @@ static int copy_bio_to_actor(struct bio *bio,
bytes_to_copy = min_t(int, bytes_to_copy,
req_length - copied_bytes);
memcpy(actor_addr + actor_offset, bvec_virt(bvec) + offset,
bytes_to_copy);
if (!IS_ERR(actor_addr))
memcpy(actor_addr + actor_offset, bvec_virt(bvec) +
offset, bytes_to_copy);
actor_offset += bytes_to_copy;
copied_bytes += bytes_to_copy;
@@ -72,10 +77,120 @@ static int copy_bio_to_actor(struct bio *bio,
return copied_bytes;
}
static int squashfs_bio_read_cached(struct bio *fullbio,
struct address_space *cache_mapping, u64 index, int length,
u64 read_start, u64 read_end, int page_count)
{
struct page *head_to_cache = NULL, *tail_to_cache = NULL;
int start_idx = 0, end_idx = 0;
struct bvec_iter_all iter_all;
struct bio *bio = NULL;
struct bio_vec *bv;
int idx = 0;
int err = 0;
bio_for_each_segment_all(bv, fullbio, iter_all) {
struct page *page = bv->bv_page;
if (page->mapping == cache_mapping) {
idx++;
continue;
}
/*
* We only use this when the device block size is the same as
* the page size, so read_start and read_end cover full pages.
*
* Compare these to the original required index and length to
* only cache pages which were requested partially, since these
* are the ones which are likely to be needed when reading
* adjacent blocks.
*/
if (idx == 0 && index != read_start)
head_to_cache = page;
else if (idx == page_count - 1 && index + length != read_end)
tail_to_cache = page;
if (!bio || idx != end_idx) {
struct bio *new = bio_clone_fast(fullbio,
GFP_NOIO, &fs_bio_set);
if (bio) {
bio_trim(bio, start_idx * PAGE_SECTORS,
(end_idx - start_idx) * PAGE_SECTORS);
bio_chain(bio, new);
submit_bio(bio);
}
bio = new;
start_idx = idx;
}
idx++;
end_idx = idx;
}
if (bio) {
bio_trim(bio, start_idx * PAGE_SECTORS,
(end_idx - start_idx) * PAGE_SECTORS);
err = submit_bio_wait(bio);
bio_put(bio);
}
if (err)
return err;
if (head_to_cache) {
int ret = add_to_page_cache_lru(head_to_cache, cache_mapping,
read_start >> PAGE_SHIFT,
GFP_NOIO);
if (!ret) {
SetPageUptodate(head_to_cache);
unlock_page(head_to_cache);
}
}
if (tail_to_cache) {
int ret = add_to_page_cache_lru(tail_to_cache, cache_mapping,
(read_end >> PAGE_SHIFT) - 1,
GFP_NOIO);
if (!ret) {
SetPageUptodate(tail_to_cache);
unlock_page(tail_to_cache);
}
}
return 0;
}
static struct page *squashfs_get_cache_page(struct address_space *mapping,
pgoff_t index)
{
struct page *page;
if (!mapping)
return NULL;
page = find_get_page(mapping, index);
if (!page)
return NULL;
if (!PageUptodate(page)) {
put_page(page);
return NULL;
}
return page;
}
static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
struct bio **biop, int *block_offset)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
struct address_space *cache_mapping = msblk->cache_mapping;
const u64 read_start = round_down(index, msblk->devblksize);
const sector_t block = read_start >> msblk->devblksize_log2;
const u64 read_end = round_up(index + length, msblk->devblksize);
@@ -101,21 +216,33 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
for (i = 0; i < page_count; ++i) {
unsigned int len =
min_t(unsigned int, PAGE_SIZE - offset, total_len);
struct page *page = alloc_page(GFP_NOIO);
pgoff_t index = (read_start >> PAGE_SHIFT) + i;
struct page *page;
page = squashfs_get_cache_page(cache_mapping, index);
if (!page)
page = alloc_page(GFP_NOIO);
if (!page) {
error = -ENOMEM;
goto out_free_bio;
}
if (!bio_add_page(bio, page, len, offset)) {
error = -EIO;
goto out_free_bio;
}
/*
* Use the __ version to avoid merging since we need each page
* to be separate when we check for and avoid cached pages.
*/
__bio_add_page(bio, page, len, offset);
offset = 0;
total_len -= len;
}
error = submit_bio_wait(bio);
if (cache_mapping)
error = squashfs_bio_read_cached(bio, cache_mapping, index,
length, read_start, read_end,
page_count);
else
error = submit_bio_wait(bio);
if (error)
goto out_free_bio;

View File

@@ -20,6 +20,7 @@ struct squashfs_decompressor {
struct bio *, int, int, struct squashfs_page_actor *);
int id;
char *name;
int alloc_buffer;
int supported;
};

View File

@@ -39,6 +39,7 @@
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
#include "page_actor.h"
/*
* Locate cache slot in range [offset, index] for specified inode. If
@@ -494,7 +495,142 @@ out:
return 0;
}
static int squashfs_readahead_fragment(struct page **page,
unsigned int pages, unsigned int expected)
{
struct inode *inode = page[0]->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
squashfs_i(inode)->fragment_block,
squashfs_i(inode)->fragment_size);
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int error = buffer->error;
if (error)
goto out;
expected += squashfs_i(inode)->fragment_offset;
for (n = 0; n < pages; n++) {
unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
unsigned int offset = base + squashfs_i(inode)->fragment_offset;
if (expected > offset) {
unsigned int avail = min_t(unsigned int, expected -
offset, PAGE_SIZE);
squashfs_fill_page(page[n], buffer, offset, avail);
}
unlock_page(page[n]);
put_page(page[n]);
}
out:
squashfs_cache_put(buffer);
return error;
}
static void squashfs_readahead(struct readahead_control *ractl)
{
struct inode *inode = ractl->mapping->host;
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
size_t mask = (1UL << msblk->block_log) - 1;
unsigned short shift = msblk->block_log - PAGE_SHIFT;
loff_t start = readahead_pos(ractl) & ~mask;
size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
struct squashfs_page_actor *actor;
unsigned int nr_pages = 0;
struct page **pages;
int i, file_end = i_size_read(inode) >> msblk->block_log;
unsigned int max_pages = 1UL << shift;
readahead_expand(ractl, start, (len | mask) + 1);
pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
if (!pages)
return;
for (;;) {
pgoff_t index;
int res, bsize;
u64 block = 0;
unsigned int expected;
struct page *last_page;
expected = start >> msblk->block_log == file_end ?
(i_size_read(inode) & (msblk->block_size - 1)) :
msblk->block_size;
max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
nr_pages = __readahead_batch(ractl, pages, max_pages);
if (!nr_pages)
break;
if (readahead_pos(ractl) >= i_size_read(inode))
goto skip_pages;
index = pages[0]->index >> shift;
if ((pages[nr_pages - 1]->index >> shift) != index)
goto skip_pages;
if (index == file_end && squashfs_i(inode)->fragment_block !=
SQUASHFS_INVALID_BLK) {
res = squashfs_readahead_fragment(pages, nr_pages,
expected);
if (res)
goto skip_pages;
continue;
}
bsize = read_blocklist(inode, index, &block);
if (bsize == 0)
goto skip_pages;
actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
expected);
if (!actor)
goto skip_pages;
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
last_page = squashfs_page_actor_free(actor);
if (res == expected) {
int bytes;
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
if (index == file_end && bytes && last_page)
memzero_page(last_page, bytes,
PAGE_SIZE - bytes);
for (i = 0; i < nr_pages; i++) {
flush_dcache_page(pages[i]);
SetPageUptodate(pages[i]);
}
}
for (i = 0; i < nr_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
}
}
kfree(pages);
return;
skip_pages:
for (i = 0; i < nr_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
}
kfree(pages);
}
const struct address_space_operations squashfs_aops = {
.readpage = squashfs_readpage
.readpage = squashfs_readpage,
.readahead = squashfs_readahead
};

View File

@@ -18,9 +18,6 @@
#include "squashfs.h"
#include "page_actor.h"
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
int pages, struct page **page, int bytes);
/* Read separately compressed datablock directly into page cache */
int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int expected)
@@ -33,7 +30,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
int start_index = target_page->index & ~mask;
int end_index = start_index | mask;
int i, n, pages, missing_pages, bytes, res = -ENOMEM;
int i, n, pages, bytes, res = -ENOMEM;
struct page **page;
struct squashfs_page_actor *actor;
void *pageaddr;
@@ -47,50 +44,38 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
if (page == NULL)
return res;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(page, pages, 0);
if (actor == NULL)
goto out;
/* Try to grab all the pages covered by the Squashfs block */
for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
for (i = 0, n = start_index; n <= end_index; n++) {
page[i] = (n == target_page->index) ? target_page :
grab_cache_page_nowait(target_page->mapping, n);
if (page[i] == NULL) {
missing_pages++;
if (page[i] == NULL)
continue;
}
if (PageUptodate(page[i])) {
unlock_page(page[i]);
put_page(page[i]);
page[i] = NULL;
missing_pages++;
continue;
}
i++;
}
if (missing_pages) {
/*
* Couldn't get one or more pages, this page has either
* been VM reclaimed, but others are still in the page cache
* and uptodate, or we're racing with another thread in
* squashfs_readpage also trying to grab them. Fall back to
* using an intermediate buffer.
*/
res = squashfs_read_cache(target_page, block, bsize, pages,
page, expected);
if (res < 0)
goto mark_errored;
pages = i;
/*
* Create a "page actor" which will kmap and kunmap the
* page cache pages appropriately within the decompressor
*/
actor = squashfs_page_actor_init_special(msblk, page, pages, expected);
if (actor == NULL)
goto out;
}
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
squashfs_page_actor_free(actor);
if (res < 0)
goto mark_errored;
@@ -99,12 +84,12 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
goto mark_errored;
}
/* Last page may have trailing bytes not filled */
/* Last page (if present) may have trailing bytes not filled */
bytes = res % PAGE_SIZE;
if (bytes) {
pageaddr = kmap_atomic(page[pages - 1]);
if (page[pages - 1]->index == end_index && bytes) {
pageaddr = kmap_local_page(page[pages - 1]);
memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
kunmap_atomic(pageaddr);
kunmap_local(pageaddr);
}
/* Mark pages as uptodate, unlock and release */
@@ -116,7 +101,6 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
put_page(page[i]);
}
kfree(actor);
kfree(page);
return 0;
@@ -135,40 +119,6 @@ mark_errored:
}
out:
kfree(actor);
kfree(page);
return res;
}
static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
int pages, struct page **page, int bytes)
{
struct inode *i = target_page->mapping->host;
struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
block, bsize);
int res = buffer->error, n, offset = 0;
if (res) {
ERROR("Unable to read page, block %llx, size %x\n", block,
bsize);
goto out;
}
for (n = 0; n < pages && bytes > 0; n++,
bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
int avail = min_t(int, bytes, PAGE_SIZE);
if (page[n] == NULL)
continue;
squashfs_fill_page(page[n], buffer, offset, avail);
unlock_page(page[n]);
if (page[n] != target_page)
put_page(page[n]);
}
out:
squashfs_cache_put(buffer);
return res;
}

View File

@@ -119,10 +119,12 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm,
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
if (!IS_ERR(data))
memcpy(data, buff, bytes);
break;
}
memcpy(data, buff, PAGE_SIZE);
if (!IS_ERR(data))
memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
@@ -139,5 +141,6 @@ const struct squashfs_decompressor squashfs_lz4_comp_ops = {
.decompress = lz4_uncompress,
.id = LZ4_COMPRESSION,
.name = "lz4",
.alloc_buffer = 0,
.supported = 1
};

View File

@@ -93,10 +93,12 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
buff = stream->output;
while (data) {
if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
if (!IS_ERR(data))
memcpy(data, buff, bytes);
break;
} else {
memcpy(data, buff, PAGE_SIZE);
if (!IS_ERR(data))
memcpy(data, buff, PAGE_SIZE);
buff += PAGE_SIZE;
bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
@@ -116,5 +118,6 @@ const struct squashfs_decompressor squashfs_lzo_comp_ops = {
.decompress = lzo_uncompress,
.id = LZO_COMPRESSION,
.name = "lzo",
.alloc_buffer = 0,
.supported = 1
};

View File

@@ -7,6 +7,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "squashfs_fs_sb.h"
#include "decompressor.h"
#include "page_actor.h"
/*
@@ -50,6 +52,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
actor->tmp_buffer = NULL;
actor->squashfs_first_page = cache_first_page;
actor->squashfs_next_page = cache_next_page;
actor->squashfs_finish_page = cache_finish_page;
@@ -57,40 +60,75 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
}
/* Implementation of page_actor for decompressing directly into page cache. */
static void *handle_next_page(struct squashfs_page_actor *actor)
{
int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (actor->returned_pages == max_pages)
return NULL;
if ((actor->next_page == actor->pages) ||
(actor->next_index != actor->page[actor->next_page]->index)) {
actor->next_index++;
actor->returned_pages++;
actor->last_page = NULL;
return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
}
actor->next_index++;
actor->returned_pages++;
actor->last_page = actor->page[actor->next_page];
return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]);
}
static void *direct_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->pageaddr = kmap_atomic(actor->page[0]);
return handle_next_page(actor);
}
static void *direct_next_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_atomic(actor->pageaddr);
if (actor->pageaddr) {
kunmap_local(actor->pageaddr);
actor->pageaddr = NULL;
}
return actor->pageaddr = actor->next_page == actor->pages ? NULL :
kmap_atomic(actor->page[actor->next_page++]);
return handle_next_page(actor);
}
static void direct_finish_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_atomic(actor->pageaddr);
kunmap_local(actor->pageaddr);
}
struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page,
int pages, int length)
struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
struct page **page, int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
if (msblk->decompressor->alloc_buffer) {
actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (actor->tmp_buffer == NULL) {
kfree(actor);
return NULL;
}
} else
actor->tmp_buffer = NULL;
actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
actor->returned_pages = 0;
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
actor->pageaddr = NULL;
actor->last_page = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
actor->squashfs_first_page = direct_first_page;
actor->squashfs_next_page = direct_next_page;
actor->squashfs_finish_page = direct_finish_page;

View File

@@ -6,63 +6,38 @@
* Phillip Lougher <phillip@squashfs.org.uk>
*/
#ifndef CONFIG_SQUASHFS_FILE_DIRECT
struct squashfs_page_actor {
void **page;
int pages;
int length;
int next_page;
};
static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page,
int pages, int length)
{
struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL);
if (actor == NULL)
return NULL;
actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
return actor;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
actor->next_page = 1;
return actor->page[0];
}
static inline void *squashfs_next_page(struct squashfs_page_actor *actor)
{
return actor->next_page == actor->pages ? NULL :
actor->page[actor->next_page++];
}
static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
/* empty */
}
#else
struct squashfs_page_actor {
union {
void **buffer;
struct page **page;
};
void *pageaddr;
void *tmp_buffer;
void *(*squashfs_first_page)(struct squashfs_page_actor *);
void *(*squashfs_next_page)(struct squashfs_page_actor *);
void (*squashfs_finish_page)(struct squashfs_page_actor *);
struct page *last_page;
int pages;
int length;
int next_page;
int alloc_buffer;
int returned_pages;
pgoff_t next_index;
};
extern struct squashfs_page_actor *squashfs_page_actor_init(void **, int, int);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(struct page
**, int, int);
extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
int pages, int length);
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk,
struct page **page, int pages, int length);
static inline struct page *squashfs_page_actor_free(struct squashfs_page_actor *actor)
{
struct page *last_page = actor->last_page;
kfree(actor->tmp_buffer);
kfree(actor);
return last_page;
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
return actor->squashfs_first_page(actor);
@@ -75,5 +50,8 @@ static inline void squashfs_finish_page(struct squashfs_page_actor *actor)
{
actor->squashfs_finish_page(actor);
}
#endif
static inline void squashfs_actor_nobuff(struct squashfs_page_actor *actor)
{
actor->alloc_buffer = 0;
}
#endif

View File

@@ -47,6 +47,7 @@ struct squashfs_sb_info {
struct squashfs_cache *block_cache;
struct squashfs_cache *fragment_cache;
struct squashfs_cache *read_page;
struct address_space *cache_mapping;
int next_meta_index;
__le64 *id_table;
__le64 *fragment_index;

View File

@@ -257,6 +257,19 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto failed_mount;
}
if (msblk->devblksize == PAGE_SIZE) {
struct inode *cache = new_inode(sb);
if (cache == NULL)
goto failed_mount;
set_nlink(cache, 1);
cache->i_size = OFFSET_MAX;
mapping_set_gfp_mask(cache->i_mapping, GFP_NOFS);
msblk->cache_mapping = cache->i_mapping;
}
msblk->stream = squashfs_decompressor_setup(sb, flags);
if (IS_ERR(msblk->stream)) {
err = PTR_ERR(msblk->stream);
@@ -383,6 +396,8 @@ failed_mount:
squashfs_cache_delete(msblk->fragment_cache);
squashfs_cache_delete(msblk->read_page);
squashfs_decompressor_destroy(msblk);
if (msblk->cache_mapping)
iput(msblk->cache_mapping->host);
kfree(msblk->inode_lookup_table);
kfree(msblk->fragment_index);
kfree(msblk->id_table);
@@ -478,6 +493,8 @@ static void squashfs_put_super(struct super_block *sb)
squashfs_cache_delete(sbi->fragment_cache);
squashfs_cache_delete(sbi->read_page);
squashfs_decompressor_destroy(sbi);
if (sbi->cache_mapping)
iput(sbi->cache_mapping->host);
kfree(sbi->id_table);
kfree(sbi->fragment_index);
kfree(sbi->meta_index);

View File

@@ -131,6 +131,10 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->buf.out_pos = 0;
stream->buf.out_size = PAGE_SIZE;
stream->buf.out = squashfs_first_page(output);
if (IS_ERR(stream->buf.out)) {
error = PTR_ERR(stream->buf.out);
goto finish;
}
for (;;) {
enum xz_ret xz_err;
@@ -156,7 +160,10 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->buf.out_pos == stream->buf.out_size) {
stream->buf.out = squashfs_next_page(output);
if (stream->buf.out != NULL) {
if (IS_ERR(stream->buf.out)) {
error = PTR_ERR(stream->buf.out);
break;
} else if (stream->buf.out != NULL) {
stream->buf.out_pos = 0;
total += PAGE_SIZE;
}
@@ -171,6 +178,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
finish:
squashfs_finish_page(output);
return error ? error : total + stream->buf.out_pos;
@@ -183,5 +191,6 @@ const struct squashfs_decompressor squashfs_xz_comp_ops = {
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
.name = "xz",
.alloc_buffer = 1,
.supported = 1
};

View File

@@ -62,6 +62,11 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
stream->next_out = squashfs_first_page(output);
stream->avail_in = 0;
if (IS_ERR(stream->next_out)) {
error = PTR_ERR(stream->next_out);
goto finish;
}
for (;;) {
int zlib_err;
@@ -85,7 +90,10 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (stream->avail_out == 0) {
stream->next_out = squashfs_next_page(output);
if (stream->next_out != NULL)
if (IS_ERR(stream->next_out)) {
error = PTR_ERR(stream->next_out);
break;
} else if (stream->next_out != NULL)
stream->avail_out = PAGE_SIZE;
}
@@ -107,6 +115,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
finish:
squashfs_finish_page(output);
if (!error)
@@ -122,6 +131,7 @@ const struct squashfs_decompressor squashfs_zlib_comp_ops = {
.decompress = zlib_uncompress,
.id = ZLIB_COMPRESSION,
.name = "zlib",
.alloc_buffer = 1,
.supported = 1
};

View File

@@ -80,6 +80,10 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
out_buf.size = PAGE_SIZE;
out_buf.dst = squashfs_first_page(output);
if (IS_ERR(out_buf.dst)) {
error = PTR_ERR(out_buf.dst);
goto finish;
}
for (;;) {
size_t zstd_err;
@@ -104,7 +108,10 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
if (out_buf.pos == out_buf.size) {
out_buf.dst = squashfs_next_page(output);
if (out_buf.dst == NULL) {
if (IS_ERR(out_buf.dst)) {
error = PTR_ERR(out_buf.dst);
break;
} else if (out_buf.dst == NULL) {
/* Shouldn't run out of pages
* before stream is done.
*/
@@ -129,6 +136,8 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
}
finish:
squashfs_finish_page(output);
return error ? error : total_out;
@@ -140,5 +149,6 @@ const struct squashfs_decompressor squashfs_zstd_comp_ops = {
.decompress = zstd_uncompress,
.id = ZSTD_COMPRESSION,
.name = "zstd",
.alloc_buffer = 1,
.supported = 1
};

View File

@@ -15,6 +15,7 @@
struct block_buffer {
u32 filled;
bool is_root_hash;
u8 *data;
};
@@ -26,6 +27,14 @@ static int hash_one_block(struct inode *inode,
struct block_buffer *next = cur + 1;
int err;
/*
* Safety check to prevent a buffer overflow in case of a filesystem bug
* that allows the file size to change despite deny_write_access(), or a
* bug in the Merkle tree logic itself
*/
if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
return -EINVAL;
/* Zero-pad the block if it's shorter than the block size. */
memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
@@ -99,6 +108,7 @@ static int build_merkle_tree(struct file *filp,
}
}
buffers[num_levels].data = root_hash;
buffers[num_levels].is_root_hash = true;
BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
memcpy(level_offset, params->level_start, sizeof(level_offset));
@@ -349,6 +359,13 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
err = file_permission(filp, MAY_WRITE);
if (err)
return err;
/*
* __kernel_read() is used while building the Merkle tree. So, we can't
* allow file descriptors that were opened for ioctl access only, using
* the special nonstandard access mode 3. O_RDONLY only, please!
*/
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
if (IS_APPEND(inode))
return -EPERM;

View File

@@ -131,6 +131,9 @@ struct blk_crypto_profile {
* keyslots while ensuring that they can't be changed concurrently.
*/
struct rw_semaphore lock;
#ifdef CONFIG_LOCKDEP
struct lock_class_key lockdep_key;
#endif
/* List of idle slots, with least recently used slot at front */
wait_queue_head_t idle_slots_wait_queue;

Some files were not shown because too many files have changed in this diff Show More