Merge branch 'android14-5.15' into 'android14-5.15-lts'

This catches the -lts branch up with all of the recent changes that have
gone into the non-lts branch, INCLUDING the ABI update which we want
here to ensure that we do NOT break any newly added dependent symbols
(and to bring back in the reverts that were required before the ABI
break).

This includes the following commits:

5c138afc95 ANDROID: GKI: Include kheaders in gki_system_dlkm_modules
f025c9bd5a ANDROID: 6/16/2023 KMI update
d05f5134f2 ANDROID: GKI: provide more padding for struct usb_phy
e010dc3625 UPSTREAM: neighbour: fix unaligned access to pneigh_entry
abe4076974 UPSTREAM: net/ipv6: fix bool/int mismatch for skip_notify_on_dev_down
00ce9cfb40 ANDROID: GKI: Update symbol list for Amlogic
45e4388a79 ANDROID: GKI enable CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
78f4244dc9 BACKPORT: cgroup/cpuset: Free DL BW in case can_attach() fails
72c8100467 BACKPORT: sched/deadline: Create DL BW alloc, free & check overflow interface
71cf567de1 FROMGIT: cgroup/cpuset: Iterate only if DEADLINE tasks are present
16a812c05e BACKPORT: sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets
0dfe87dff2 BACKPORT: sched/cpuset: Bring back cpuset_mutex
2a32b2b9e8 FROMGIT: cgroup/cpuset: Rename functions dealing with DEADLINE accounting
1351520d67 ANDROID:  6/16/2023 KMI update
0270aeeb9f UPSTREAM: binder: fix UAF of alloc->vma in race with munmap()
b094b04779 UPSTREAM: binder: add lockless binder_alloc_(set|get)_vma()
acd8193fa0 UPSTREAM: Revert "android: binder: stop saving a pointer to the VMA"
45efb0a2fb UPSTREAM: Revert "binder_alloc: add missing mmap_lock calls when using the VMA"
bc4e0df357 UPSTREAM: usb: dwc3: fix gadget mode suspend interrupt handler issue
b03d86bd51 UPSTREAM: usb: gadget: Properly configure the device for remote wakeup
49cb2707a8 UPSTREAM: tcp: deny tcp_disconnect() when threads are waiting
744ad30e2d ANDROID: GKI: Add RTK STB KMI symbol list
faaabc7aa2 ANDROID: Remove all but top-level OWNERS
88bfbed31c ANDROID: Enable GKI Dr. No Enforcement

Change-Id: Id933d56267bad97aba87ffce873bedfbe1d3a2b1
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-06-18 12:26:55 +00:00
56 changed files with 4579 additions and 2138 deletions

View File

@@ -42,6 +42,7 @@ filegroup(
"android/abi_gki_aarch64_lenovo",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_rtkstb",
"android/abi_gki_aarch64_rtktv",
"android/abi_gki_aarch64_virtual_device",
"android/abi_gki_aarch64_xiaomi",

View File

@@ -1 +0,0 @@
per-file sysfs-fs-f2fs=file:/fs/f2fs/OWNERS

View File

@@ -1 +0,0 @@
per-file f2fs**=file:/fs/f2fs/OWNERS

14
OWNERS
View File

@@ -1,2 +1,12 @@
# include OWNERS from the authoritative android-mainline branch
include kernel/common:android-mainline:/OWNERS
set noparent
# GKI Dr. No Enforcement is active on this branch. Approval of one of the Dr.
# No reviewers is required following a regular CodeReview+2 vote of a code
# reviewer.
#
# See the GKI release documentation (go/gki-dr-no) for further details.
#
# The expanded list of reviewers can be found at:
# https://android.googlesource.com/kernel/common/+/android-mainline/OWNERS_DrNo
include kernel/common:android-mainline:/OWNERS_DrNo

View File

@@ -1,13 +0,0 @@
# If we ever add another OWNERS above this directory, it's likely to be
# more permissive, so don't inherit from it
set noparent
include kernel/common:android-mainline:/OWNERS_DrNo
# Downstream boards maintained directly in this manifest branch
per-file abi_gki_aarch64_cuttlefish = adelva@google.com, rammuthiah@google.com
per-file abi_gki_aarch64_goldfish = rkir@google.com
# per-file for review purposes
per-file gki_system_dlkm_modules = ramjiyani@google.com
per-file abi_gki_protected_exports_* = ramjiyani@google.com
per-file gki_*_protected_modules = ramjiyani@google.com

File diff suppressed because it is too large Load Diff

View File

@@ -44,6 +44,7 @@
__bitmap_and
__bitmap_andnot
__bitmap_clear
bitmap_find_next_zero_area_off
bitmap_free
__bitmap_or
__bitmap_set
@@ -1039,6 +1040,7 @@
kstrtouint
kstrtouint_from_user
kstrtoull
kthread_bind
kthread_create_on_node
__kthread_init_worker
kthread_queue_work
@@ -1112,6 +1114,7 @@
__memset_io
memstart_addr
memunmap
migrate_pages
misc_deregister
misc_register
__mmap_lock_do_trace_acquire_returned
@@ -1298,6 +1301,7 @@
page_endio
__page_mapcount
page_mapping
__page_pinner_failure_detect
page_pinner_inited
__page_pinner_put_page
page_pool_alloc_pages
@@ -1459,6 +1463,7 @@
ptp_clock_index
ptp_clock_register
ptp_clock_unregister
putback_movable_pages
put_device
__put_net
__put_page
@@ -1615,6 +1620,7 @@
schedule
schedule_timeout
schedule_timeout_interruptible
schedule_timeout_killable
scnprintf
sdio_align_size
sdio_claim_host
@@ -2127,6 +2133,7 @@
vmalloc_to_pfn
vmalloc_user
vmap
vm_event_states
vmf_insert_pfn_prot
vm_insert_page
vm_unmap_aliases

View File

@@ -0,0 +1,980 @@
[abi_symbol_list]
# commonly used symbols
alloc_chrdev_region
__alloc_pages
__alloc_skb
alloc_workqueue
__arch_copy_from_user
__arch_copy_to_user
arm64_const_caps_ready
arm64_use_ng_mappings
__arm_smccc_smc
bpf_trace_run1
bpf_trace_run2
cancel_work_sync
cdev_add
cdev_del
cdev_init
__cfi_slowpath_diag
__check_object_size
__class_create
class_destroy
clk_disable
clk_enable
clk_fixed_factor_ops
clk_get
clk_get_rate
__clk_is_enabled
clk_prepare
clk_put
clk_round_rate
clk_set_rate
clk_unprepare
cma_alloc
cma_release
compat_ptr_ioctl
complete
__const_udelay
cpu_hwcap_keys
cpu_hwcaps
cpu_number
__cpu_online_mask
debugfs_create_dir
debugfs_create_file
delayed_work_timer_fn
del_timer_sync
desc_to_gpio
destroy_workqueue
_dev_emerg
_dev_err
dev_err_probe
dev_get_regmap
device_create
device_create_file
device_destroy
device_register
device_remove_file
device_unregister
_dev_info
devm_add_action
devm_clk_get
devm_clk_get_optional
devm_clk_hw_register
devm_gpiod_get
devm_gpiod_put
devm_hwspin_lock_request_specific
devm_ioremap
devm_ioremap_resource
devm_kfree
devm_kmalloc
devm_mfd_add_devices
devm_of_clk_add_hw_provider
devm_pinctrl_get
__devm_regmap_init_i2c
devm_regulator_get
devm_regulator_register
devm_request_threaded_irq
__devm_reset_control_get
devm_reset_controller_register
dev_set_name
_dev_warn
dma_alloc_attrs
dma_buf_attach
dma_buf_detach
dma_buf_export
dma_buf_fd
dma_buf_map_attachment
dma_buf_put
dma_buf_set_name
dma_buf_unmap_attachment
dma_free_attrs
dma_get_sgtable_attrs
dma_heap_add
dma_heap_get_drvdata
dma_heap_get_name
dma_map_page_attrs
dma_map_sg_attrs
dma_map_sgtable
dma_mmap_attrs
dma_sync_sg_for_cpu
dma_sync_sg_for_device
dma_sync_single_for_cpu
dma_sync_single_for_device
dma_unmap_page_attrs
dma_unmap_sg_attrs
down
event_triggers_call
fasync_helper
_find_next_bit
find_pid_ns
finish_wait
free_irq
__free_pages
free_pages
generic_handle_irq
gen_pool_add_owner
gen_pool_alloc_algo_owner
gen_pool_best_fit
gen_pool_create
gen_pool_free_owner
gen_pool_set_algo
get_zeroed_page
gic_nonsecure_priorities
gpiod_direction_input
gpiod_direction_output
gpiod_get_value
gpiod_put
gpiod_set_debounce
gpiod_to_irq
__hwspin_unlock
i2c_del_driver
i2c_get_adapter
i2c_register_driver
i2c_transfer
idr_alloc
idr_destroy
idr_find
idr_remove
init_pid_ns
__init_swait_queue_head
init_timer_key
init_wait_entry
__init_waitqueue_head
iounmap
__irq_domain_add
irq_get_irq_data
irq_of_parse_and_map
__irq_resolve_mapping
irq_set_chained_handler_and_data
irq_set_chip_and_handler_name
irq_set_chip_data
irq_set_irq_type
is_vmalloc_addr
jiffies
jiffies_to_msecs
kasan_flag_enabled
kasprintf
kernel_kobj
kfree
kfree_skb_reason
kill_fasync
kimage_voffset
__kmalloc
kmalloc_caches
kmem_cache_alloc_trace
kobject_create_and_add
kobject_put
kobject_uevent
kobject_uevent_env
kstrtoll
kstrtouint
kstrtoull
ktime_get
__list_add_valid
__list_del_entry_valid
memcpy
__memcpy_fromio
memset
__memset_io
memstart_addr
misc_deregister
misc_register
mmc_of_parse
mmc_send_tuning
mmc_wait_for_cmd
mod_timer
module_layout
__msecs_to_jiffies
msleep
__mutex_init
mutex_lock
mutex_unlock
nonseekable_open
nvmem_cell_get
nvmem_cell_put
nvmem_cell_read
of_address_to_resource
of_clk_add_provider
of_clk_del_provider
of_clk_get
of_clk_src_simple_get
of_device_get_match_data
of_device_is_available
of_find_compatible_node
of_find_device_by_node
of_find_node_opts_by_path
of_find_property
of_get_child_by_name
of_get_next_child
of_get_property
of_get_regulator_init_data
of_hwspin_lock_get_id
of_iomap
of_match_device
of_match_node
of_nvmem_cell_get
of_parse_phandle
of_platform_populate
of_property_count_elems_of_size
of_property_read_string
of_property_read_string_helper
of_property_read_u32_index
of_property_read_variable_u32_array
param_ops_int
perf_trace_buf_alloc
perf_trace_run_bpf_submit
pid_task
pinctrl_lookup_state
pinctrl_select_state
platform_device_register_full
platform_device_unregister
__platform_driver_register
platform_driver_unregister
platform_get_irq
platform_get_resource
__pm_runtime_disable
pm_runtime_enable
pm_runtime_force_resume
pm_runtime_force_suspend
__pm_runtime_resume
__pm_runtime_set_status
power_supply_register
power_supply_unregister
preempt_schedule_notrace
prepare_to_wait_event
print_hex_dump
_printk
put_device
__put_task_struct
queue_delayed_work_on
queue_work_on
raw_notifier_call_chain
raw_notifier_chain_register
_raw_spin_lock
_raw_spin_lock_irqsave
_raw_spin_unlock
_raw_spin_unlock_irqrestore
rdev_get_drvdata
refcount_warn_saturate
register_reboot_notifier
regmap_read
regmap_update_bits_base
regmap_write
regulator_count_voltages
regulator_get_voltage
regulator_list_voltage
regulator_set_voltage
remap_pfn_range
request_threaded_irq
reset_control_assert
reset_control_deassert
__reset_control_get
reset_control_put
reset_control_reset
reset_control_status
rpmsg_send
rtc_time64_to_tm
rtc_tm_to_time64
schedule_timeout
scnprintf
sdhci_add_host
sdhci_pltfm_free
sdhci_pltfm_init
sdhci_remove_host
sdhci_reset
sdhci_resume_host
sdhci_set_bus_width
sdhci_set_uhs_signaling
sdhci_suspend_host
seq_lseek
seq_printf
seq_puts
seq_read
sg_free_table
sg_next
single_open
single_release
skb_dequeue
skb_put
skb_queue_tail
snprintf
soc_device_match
sprintf
__stack_chk_fail
strcmp
strlen
strncmp
strncpy
strscpy
syscon_node_to_regmap
syscon_regmap_lookup_by_phandle
sysfs_create_group
sysfs_remove_group
system_state
system_wq
tasklet_init
__tasklet_schedule
trace_event_buffer_commit
trace_event_buffer_reserve
trace_event_ignore_this_pid
trace_event_printf
trace_event_raw_init
trace_event_reg
trace_handle_return
trace_raw_output_prep
__ubsan_handle_cfi_check_fail_abort
unregister_chrdev_region
unregister_reboot_notifier
up
usleep_range_state
vabits_actual
vfree
vmalloc
vmalloc_to_pfn
vmap
vunmap
wait_for_completion
wait_for_completion_timeout
__wake_up
__warn_printk
# required by apw8889-regulator.ko
regcache_cache_bypass
# required by apw888x-regulator-core.ko
devm_regmap_field_alloc
rdev_get_dev
regmap_field_read
regmap_field_update_bits_base
regulator_disable_regmap
regulator_enable_regmap
regulator_get_voltage_sel_regmap
regulator_is_enabled_regmap
regulator_list_voltage_linear
regulator_map_voltage_iterate
regulator_map_voltage_linear
regulator_set_voltage_sel_regmap
strcasecmp
# required by clk-det.ko
__clk_get_hw
devm_clk_hw_unregister
devm_of_clk_del_provider
of_clk_hw_simple_get
# required by clk-rtd1619b-cc.ko
of_device_is_compatible
# required by clk-rtk.ko
clk_hw_get_name
clk_hw_get_num_parents
clk_hw_get_parent
__clk_mux_determine_rate
__devm_regmap_init_mmio_clk
of_clk_hw_onecell_get
# required by clk-tee.ko
__clk_get_name
clk_register_clkdev
devm_clk_register
driver_find_device
driver_register
driver_unregister
# required by cma_heap.ko
cma_get_name
dma_contiguous_default_area
sg_alloc_table_from_pages_segment
# required by gpio-rtd.ko
gpiochip_add_data_with_key
handle_simple_irq
irq_create_mapping_affinity
irq_domain_simple_ops
pinctrl_gpio_free
pinctrl_gpio_request
pinctrl_gpio_set_config
# required by hdcp.ko
cancel_delayed_work
# required by hdmitx.ko
bpf_trace_run4
cec_notifier_conn_register
cec_notifier_set_phys_addr
cec_notifier_set_phys_addr_from_edid
disable_irq
driver_for_each_device
enable_irq
gpiod_set_value
krealloc
reset_control_release
schedule
trace_print_symbols_seq
# required by i2c-rtk.ko
__device_reset
devm_gpiod_get_optional
i2c_add_numbered_adapter
i2c_del_adapter
i2c_generic_scl_recovery
i2c_parse_fw_timings
i2c_recover_bus
of_alias_get_id
# required by irq-realtek-mux.ko
handle_level_irq
irq_domain_xlate_onecell
irq_modify_status
platform_irqchip_probe
# required by nvmem_rtk-efuse.ko
devm_nvmem_register
__hwspin_lock_timeout
# required by optee.ko
alloc_pages_exact
__arm_smccc_hvc
bus_for_each_dev
completion_done
device_property_read_string
down_read
__find_vma
free_pages_exact
i2c_put_adapter
idr_get_next
ktime_get_real_ts64
memremap
memunmap
__mmap_lock_do_trace_acquire_returned
__mmap_lock_do_trace_released
__mmap_lock_do_trace_start_locking
msleep_interruptible
nr_cpu_ids
pfn_is_map_memory
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
up_read
wait_for_completion_interruptible
# required by pcie-rtd.ko
bitmap_find_free_region
bitmap_release_region
devm_kmemdup
devm_of_phy_get
devm_request_pci_bus_resources
handle_edge_irq
irq_chip_ack_parent
irq_chip_mask_parent
irq_chip_unmask_parent
irq_domain_get_irq_data
irq_domain_remove
irq_domain_set_info
irq_domain_update_bus_token
__irq_set_handler
irq_set_handler_data
of_irq_get
of_irq_parse_and_map_pci
of_pci_parse_bus_range
of_pci_range_parser_init
of_pci_range_parser_one
of_pci_range_to_resource
pci_add_resource
pci_add_resource_offset
pci_alloc_host_bridge
pci_bus_add_devices
pci_bus_assign_resources
pci_bus_size_bridges
pci_common_swizzle
pcie_bus_configure_settings
pcie_capability_clear_and_set_word
pcie_capability_read_dword
pcie_capability_read_word
pci_free_resource_list
pci_msi_create_irq_domain
pci_msi_mask_irq
pci_msi_unmask_irq
pci_scan_root_bus_bridge
phy_calibrate
phy_init
phy_power_off
phy_power_on
# required by phy-rtk-pcie.ko
__devm_of_phy_provider_register
devm_phy_create
of_phy_simple_xlate
# required by pinctrl-rtd.ko
pinconf_generic_dt_node_to_map
pinctrl_dev_get_drvdata
pinctrl_register
pinctrl_utils_free_map
# required by pwm-rtk.ko
kstrtoint
pwmchip_add
pwmchip_remove
sscanf
# required by r8169soc.ko
alloc_etherdev_mqs
consume_skb
crc32_le
dev_driver_string
device_set_wakeup_enable
ethtool_convert_link_mode_to_legacy_u32
ethtool_op_get_link
ethtool_op_get_ts_info
eth_type_trans
eth_validate_addr
free_netdev
init_net
kstrtou16
kstrtou8
mii_ethtool_get_link_ksettings
napi_complete_done
napi_disable
napi_enable
napi_gro_receive
__napi_schedule
napi_schedule_prep
__netdev_alloc_skb
netdev_err
netdev_info
netdev_notice
netdev_update_features
netif_carrier_off
netif_carrier_on
netif_device_attach
netif_device_detach
netif_napi_add
__netif_napi_del
netif_tx_wake_queue
net_ratelimit
of_get_mac_address
of_irq_parse_one
pm_schedule_suspend
proc_create_data
proc_get_parent_data
proc_mkdir_data
register_netdev
remove_proc_entry
rtnl_is_locked
seq_putc
skb_tstamp_tx
strsep
synchronize_net
synchronize_rcu
unregister_netdev
# required by rpmsg_rtk.ko
__memcpy_toio
of_get_next_available_child
rpmsg_register_device
strstr
# required by rtc-rtk.ko
device_init_wakeup
devm_rtc_device_register
mktime64
rtc_update_irq
rtc_valid_tm
# required by rtd-rng.ko
hwrng_register
hwrng_unregister
# required by rtk-gpu_wrap.ko
devm_of_platform_populate
devm_platform_ioremap_resource_byname
# required by rtk-ir.ko
devm_rc_allocate_device
devm_rc_register_device
devm_reset_control_array_get
rc_keydown
rc_repeat
register_pm_notifier
# required by rtk-jpu.ko
panic
# required by rtk-reboot.ko
register_restart_handler
# required by rtk-sdmmc.ko
del_timer
down_trylock
flush_workqueue
kmalloc_order_trace
mmc_add_host
mmc_alloc_host
mmc_detect_change
mmc_free_host
mmc_remove_host
mmc_request_done
sg_copy_from_buffer
sg_copy_to_buffer
sg_init_one
sg_init_table
# required by rtk_bootstatus.ko
syscon_regmap_lookup_by_phandle_args
sysfs_create_link
# required by rtk_cec.ko
cec_allocate_adapter
cec_delete_adapter
cec_notifier_cec_adap_register
cec_notifier_cec_adap_unregister
cec_notifier_parse_hdmi_phandle
cec_received_msg_ts
cec_register_adapter
cec_transmit_done_ts
cec_unregister_adapter
of_clk_get_by_name
# required by rtk_chip.ko
soc_device_register
soc_device_unregister
# required by rtk_cpu_volt_sel.ko
bus_register_notifier
dev_pm_opp_put_prop_name
dev_pm_opp_set_prop_name
get_cpu_device
platform_bus_type
# required by rtk_drm.ko
cancel_delayed_work_sync
component_add
component_bind_all
component_del
component_master_del
component_unbind_all
devm_gpio_request_one
dma_buf_begin_cpu_access
dma_buf_end_cpu_access
dma_buf_vmap
dma_buf_vunmap
down_timeout
drm_add_edid_modes
drm_atomic_helper_check
drm_atomic_helper_cleanup_planes
drm_atomic_helper_commit
drm_atomic_helper_commit_hw_done
drm_atomic_helper_commit_modeset_disables
drm_atomic_helper_commit_modeset_enables
drm_atomic_helper_commit_planes
drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_reset
__drm_atomic_helper_crtc_destroy_state
__drm_atomic_helper_crtc_duplicate_state
drm_atomic_helper_crtc_reset
drm_atomic_helper_disable_plane
drm_atomic_helper_page_flip
__drm_atomic_helper_plane_destroy_state
__drm_atomic_helper_plane_duplicate_state
drm_atomic_helper_plane_reset
drm_atomic_helper_set_config
drm_atomic_helper_update_plane
drm_compat_ioctl
drm_connector_attach_content_protection_property
drm_connector_attach_encoder
drm_connector_attach_max_bpc_property
drm_connector_cleanup
drm_connector_init
drm_connector_update_edid_property
drm_crtc_cleanup
drm_crtc_handle_vblank
drm_crtc_init_with_planes
drm_crtc_send_vblank_event
drm_crtc_vblank_get
drm_crtc_vblank_off
drm_crtc_vblank_on
drm_crtc_vblank_put
drm_crtc_wait_one_vblank
__drm_dbg
drm_debugfs_create_files
drm_detect_hdmi_monitor
drm_detect_monitor_audio
drm_dev_alloc
drm_dev_put
drm_dev_register
drm_dev_unregister
drm_dp_aux_register
drm_dp_dpcd_read
drm_dp_dpcd_write
drm_edid_is_valid
drm_edid_to_sad
drm_encoder_cleanup
drm_encoder_init
__drm_err
drm_format_info
drm_framebuffer_cleanup
drm_framebuffer_init
drm_gem_cma_vm_ops
drm_gem_create_mmap_offset
drm_gem_handle_create
drm_gem_mmap
drm_gem_mmap_obj
drm_gem_object_free
drm_gem_object_init
drm_gem_object_lookup
drm_gem_object_release
drm_gem_prime_fd_to_handle
drm_gem_prime_handle_to_fd
drm_gem_prime_import
drm_gem_vm_close
drm_gem_vm_open
drm_get_edid
drm_get_format_info
drm_hdcp_check_ksvs_revoked
drm_hdcp_update_content_protection
drm_hdmi_avi_infoframe_from_display_mode
drm_helper_hpd_irq_event
drm_helper_mode_fill_fb_struct
drm_helper_probe_single_connector_modes
drm_ioctl
drm_kms_helper_poll_fini
drm_kms_helper_poll_init
drm_match_cea_mode
drmm_mode_config_init
drm_mode_config_reset
drm_mode_object_find
drm_modeset_lock
drm_modeset_unlock
drm_object_attach_property
drm_of_component_probe
drm_of_find_possible_crtcs
drm_open
drm_plane_cleanup
drm_plane_create_zpos_immutable_property
drm_poll
drm_prime_gem_destroy
drm_property_create_enum
drm_property_create_range
drm_read
drm_release
drm_scdc_set_high_tmds_clock_ratio
drm_scdc_set_scrambling
drm_universal_plane_init
drm_vblank_init
gpiod_get_raw_value
gpio_to_desc
hdmi_avi_infoframe_pack
kmemdup
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get_raw
of_get_named_gpio_flags
__platform_register_drivers
platform_unregister_drivers
_raw_spin_lock_irq
_raw_spin_unlock_irq
schedule_timeout_uninterruptible
__sw_hweight8
sysfs_create_files
wake_up_process
# required by rtk_fwdbg.ko
__register_chrdev
# required by rtk_gpc.ko
atomic_notifier_chain_register
clk_bulk_disable
clk_bulk_enable
clk_bulk_prepare
clk_bulk_unprepare
devm_clk_bulk_get_all
of_genpd_add_provider_simple
panic_notifier_list
platform_get_resource_byname
pm_genpd_init
pm_genpd_remove
# required by rtk_gpio_manager.ko
gpiod_get_array
gpiod_put_array
# required by rtk_hwspinlock.ko
devm_hwspin_lock_register
devm_platform_get_and_ioremap_resource
# required by rtk_krpc_agent.ko
get_device
rpmsg_create_ept
rpmsg_destroy_ept
# required by rtk_lsadc0.ko
devm_iio_device_alloc
iio_device_free
__iio_device_register
iio_device_unregister
iio_get_time_ns
iio_push_event
# required by rtk_mcp.ko
__get_free_pages
__hwspin_trylock
# required by rtk_media_heaps.ko
__bitmap_clear
__bitmap_complement
bitmap_find_next_zero_area_off
bitmap_free
__bitmap_or
__bitmap_set
__bitmap_weight
bitmap_zalloc
class_create_file_ns
debugfs_attr_read
debugfs_attr_write
debugfs_create_symlink
devres_add
__devres_alloc_node
devres_find
devres_release
dma_buf_get
dma_heap_get_dev
dma_set_coherent_mask
dma_set_mask
_find_first_bit
_find_last_bit
generic_file_llseek
gen_pool_avail
gen_pool_size
hex_dump_to_buffer
list_sort
no_llseek
of_reserved_mem_lookup
__rcu_read_lock
__rcu_read_unlock
sg_alloc_table
simple_attr_open
simple_attr_read
simple_attr_release
simple_attr_write
simple_strtoul
strcpy
strlcpy
__sw_hweight64
__traceiter_android_vh_dmabuf_heap_flags_validation
__tracepoint_android_vh_dmabuf_heap_flags_validation
tracepoint_probe_register
vmalloc_to_page
# required by rtk_pm_alarm.ko
rtc_class_open
rtc_read_time
rtc_set_time
rtc_tm_to_ktime
# required by rtk_pm_hifi.ko
regulator_suspend_disable
regulator_suspend_enable
# required by rtk_pm_suspend.ko
__ioremap
memchr
# required by rtk_rpc_mem.ko
of_platform_depopulate
_raw_spin_lock_bh
_raw_spin_unlock_bh
__register_rpmsg_driver
unregister_rpmsg_driver
# required by rtk_sb2.ko
regmap_bulk_write
# required by rtk_tee_mem_api.ko
platform_find_device_by_driver
raw_notifier_chain_unregister
# required by rtk_tp.ko
of_n_addr_cells
__pm_runtime_idle
# required by rtk_urpc_service.ko
cdev_alloc
# required by rtk_wdt.ko
platform_get_irq_optional
watchdog_init_timeout
watchdog_register_device
watchdog_set_restart_priority
watchdog_unregister_device
# required by sdhci-of-rtkstb.ko
mmc_gpio_get_cd
sdhci_set_clock
sdhci_set_ios
# required by sdhci-rtk.ko
sdhci_adma_write_desc
sdhci_calc_clk
sdhci_enable_clk
# required by snd-realtek-notify.ko
gpiod_count
gpiod_get_index
of_pm_clk_add_clks
of_property_match_string
pm_clk_create
pm_clk_destroy
# required by snd-soc-realtek.ko
hrtimer_cancel
hrtimer_forward
__hrtimer_get_remaining
hrtimer_init
hrtimer_start_range_ns
hrtimer_try_to_cancel
ktime_get_ts64
__ndelay
param_array_ops
param_ops_bool
snd_card_free
snd_card_new
snd_card_register
snd_ctl_add
snd_ctl_new1
snd_pcm_add_chmap_ctls
snd_pcm_alt_chmaps
snd_pcm_lib_ioctl
snd_pcm_new
snd_pcm_period_elapsed
snd_pcm_set_ops
snd_pcm_suspend_all
# required by tee.ko
add_uevent_var
anon_inode_getfd
bus_register
bus_unregister
cdev_device_add
cdev_device_del
class_find_device
crypto_alloc_shash
crypto_destroy_tfm
crypto_shash_final
crypto_shash_update
device_initialize
gen_pool_destroy
gen_pool_virt_to_phys
get_kernel_pages
in_egroup_p
page_pinner_inited
__page_pinner_put_page
pin_user_pages_fast
__put_page
unpin_user_pages
uuid_null

View File

@@ -27,6 +27,7 @@ drivers/net/usb/usbnet.ko
drivers/usb/class/cdc-acm.ko
drivers/usb/serial/ftdi_sio.ko
drivers/usb/serial/usbserial.ko
kernel/kheaders.ko
lib/crypto/libarc4.ko
mm/zsmalloc.ko
net/6lowpan/6lowpan.ko

View File

@@ -1 +0,0 @@
include ../arm64/OWNERS

View File

@@ -1,4 +0,0 @@
per-file crypto/**=file:/crypto/OWNERS
per-file {include,kernel,kvm,lib}/**=mzyngier@google.com,willdeacon@google.com
per-file mm/**=file:/mm/OWNERS
per-file net/**=file:/net/OWNERS

View File

@@ -194,6 +194,7 @@ CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y

View File

@@ -1,3 +0,0 @@
per-file crypto/**=file:/crypto/OWNERS
per-file mm/**=file:/mm/OWNERS
per-file net/**=file:/net/OWNERS

View File

@@ -180,6 +180,7 @@ CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
CONFIG_NETFILTER_XT_MATCH_BPF=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y

View File

@@ -1,2 +0,0 @@
bvanassche@google.com
jaegeuk@google.com

View File

@@ -1,6 +1,6 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
KMI_GENERATION=9
KMI_GENERATION=11
LLVM=1
DEPMOD=depmod

View File

@@ -1 +0,0 @@
ardb@google.com

View File

@@ -1,6 +0,0 @@
per-file base/**=gregkh@google.com,saravanak@google.com
per-file block/**=akailash@google.com
per-file md/**=akailash@google.com,paullawrence@google.com
per-file net/**=file:/net/OWNERS
per-file scsi/**=bvanassche@google.com,jaegeuk@google.com
per-file {tty,usb}/**=gregkh@google.com

View File

@@ -213,8 +213,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
mmap_read_lock(mm);
vma = vma_lookup(mm, alloc->vma_addr);
mmap_write_lock(mm);
vma = alloc->vma;
}
if (!vma && need_mm) {
@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return 0;
@@ -304,35 +304,24 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
}
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
unsigned long vm_start = 0;
if (vma) {
vm_start = vma->vm_start;
mmap_assert_write_locked(alloc->vma_vm_mm);
}
alloc->vma_addr = vm_start;
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
smp_store_release(&alloc->vma, vma);
}
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{
struct vm_area_struct *vma = NULL;
if (alloc->vma_addr)
vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
return vma;
/* pairs with smp_store_release in binder_alloc_set_vma() */
return smp_load_acquire(&alloc->vma);
}
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
@@ -395,15 +384,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
mmap_read_lock(alloc->vma_vm_mm);
/* Check binder_alloc is fully initialized */
if (!binder_alloc_get_vma(alloc)) {
mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
mmap_read_unlock(alloc->vma_vm_mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -794,6 +781,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
/* Signal binder_alloc is fully initialized */
binder_alloc_set_vma(alloc, vma);
return 0;
@@ -824,8 +813,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma_addr &&
vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -932,25 +920,17 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
mmap_read_lock(alloc->vma_vm_mm);
if (binder_alloc_get_vma(alloc) == NULL) {
mmap_read_unlock(alloc->vma_vm_mm);
goto uninitialized;
if (binder_alloc_get_vma(alloc) != NULL) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}
}
mmap_read_unlock(alloc->vma_vm_mm);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}
uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);

View File

@@ -100,7 +100,7 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
unsigned long vma_addr;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void __user *buffer;
struct list_head buffers;

View File

@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
if (!binder_selftest_run)
return;
mutex_lock(&binder_selftest_lock);
if (!binder_selftest_run || !alloc->vma_addr)
if (!binder_selftest_run || !alloc->vma)
goto done;
pr_info("STARTED\n");
binder_selftest_alloc_offset(alloc, end_offset, 0);

View File

@@ -1105,6 +1105,7 @@ struct dwc3_scratchpad_array {
* 3 - Reserved
* @dis_metastability_quirk: set to disable metastability quirk.
* @dis_split_quirk: set to disable split boundary.
* @suspended: set to track suspend event due to U3/L2.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
* @max_cfg_eps: current max number of IN eps used across all USB configs.
@@ -1318,6 +1319,7 @@ struct dwc3 {
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
unsigned suspended:1;
u16 imod_interval;

View File

@@ -3708,6 +3708,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
{
int reg;
dwc->suspended = false;
dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -3728,6 +3730,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
dwc->suspended = false;
/*
* Ideally, dwc3_reset_gadget() would trigger the function
* drivers to stop any active transfers through ep disable.
@@ -3954,6 +3958,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
{
dwc->suspended = false;
/*
* TODO take core out of low power mode when that's
* implemented.
@@ -4069,8 +4075,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
{
enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
dwc->suspended = true;
dwc3_suspend_gadget(dwc);
}
dwc->link_state = next;
}

View File

@@ -511,6 +511,19 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
return min(val, 900U) / 8;
}
void check_remote_wakeup_config(struct usb_gadget *g,
struct usb_configuration *c)
{
if (USB_CONFIG_ATT_WAKEUP & c->bmAttributes) {
/* Reset the rw bit if gadget is not capable of it */
if (!g->wakeup_capable && g->ops->set_remote_wakeup) {
WARN(c->cdev, "Clearing wakeup bit for config c.%d\n",
c->bConfigurationValue);
c->bmAttributes &= ~USB_CONFIG_ATT_WAKEUP;
}
}
}
static int config_buf(struct usb_configuration *config,
enum usb_device_speed speed, void *buf, u8 type)
{
@@ -958,6 +971,11 @@ static int set_config(struct usb_composite_dev *cdev,
power = min(power, 500U);
else
power = min(power, 900U);
if (USB_CONFIG_ATT_WAKEUP & c->bmAttributes)
usb_gadget_set_remote_wakeup(gadget, 1);
else
usb_gadget_set_remote_wakeup(gadget, 0);
done:
if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
usb_gadget_set_selfpowered(gadget);

View File

@@ -1416,6 +1416,9 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
if (gadget_is_otg(gadget))
c->descriptors = otg_desc;
/* Properly configure the bmAttributes wakeup bit */
check_remote_wakeup_config(gadget, c);
cfg = container_of(c, struct config_usb_cfg, c);
if (!list_empty(&cfg->string_list)) {
i = 0;

View File

@@ -515,6 +515,33 @@ out:
}
EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
/**
* usb_gadget_set_remote_wakeup - configures the device remote wakeup feature.
* @gadget:the device being configured for remote wakeup
* @set:value to be configured.
*
* set to one to enable remote wakeup feature and zero to disable it.
*
* returns zero on success, else negative errno.
*/
int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set)
{
int ret = 0;
if (!gadget->ops->set_remote_wakeup) {
ret = -EOPNOTSUPP;
goto out;
}
ret = gadget->ops->set_remote_wakeup(gadget, set);
out:
trace_usb_gadget_set_remote_wakeup(gadget, ret);
return ret;
}
EXPORT_SYMBOL_GPL(usb_gadget_set_remote_wakeup);
/**
* usb_gadget_set_selfpowered - sets the device selfpowered feature.
* @gadget:the device being declared as self-powered

View File

@@ -91,6 +91,11 @@ DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup,
TP_ARGS(g, ret)
);
DEFINE_EVENT(udc_log_gadget, usb_gadget_set_remote_wakeup,
TP_PROTO(struct usb_gadget *g, int ret),
TP_ARGS(g, ret)
);
DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered,
TP_PROTO(struct usb_gadget *g, int ret),
TP_ARGS(g, ret)

View File

@@ -1 +0,0 @@
per-file {crypto,verity}/**=ebiggers@google.com

View File

@@ -1 +0,0 @@
jaegeuk@google.com

View File

@@ -1 +0,0 @@
balsini@google.com

View File

@@ -1,2 +0,0 @@
akailash@google.com
paullawrence@google.com

View File

@@ -1 +0,0 @@
per-file net/**=file:/net/OWNERS

View File

@@ -1,4 +0,0 @@
per-file bio.h=file:/block/OWNERS
per-file blk*.h=file:/block/OWNERS
per-file f2fs**=file:/fs/f2fs/OWNERS
per-file net**=file:/net/OWNERS

View File

@@ -56,6 +56,10 @@ extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -177,6 +181,11 @@ static inline void cpuset_update_active_cpus(void)
static inline void cpuset_wait_for_hotplug(void) { }
static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
static inline void cpuset_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{

View File

@@ -1829,7 +1829,9 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
extern bool cpupri_check_rt(void);

View File

@@ -426,6 +426,8 @@ extern int composite_dev_prepare(struct usb_composite_driver *composite,
extern int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
struct usb_ep *ep0);
void composite_dev_cleanup(struct usb_composite_dev *cdev);
void check_remote_wakeup_config(struct usb_gadget *g,
struct usb_configuration *c);
static inline struct usb_composite_driver *to_cdriver(
struct usb_gadget_driver *gdrv)

View File

@@ -317,6 +317,7 @@ struct usb_udc;
struct usb_gadget_ops {
int (*get_frame)(struct usb_gadget *);
int (*wakeup)(struct usb_gadget *);
int (*set_remote_wakeup)(struct usb_gadget *, int set);
int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered);
int (*vbus_session) (struct usb_gadget *, int is_active);
int (*vbus_draw) (struct usb_gadget *, unsigned mA);
@@ -396,6 +397,8 @@ struct usb_gadget_ops {
* @connected: True if gadget is connected.
* @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
* indicates that it supports LPM as per the LPM ECN & errata.
* @wakeup_capable: True if gadget is capable of sending remote wakeup.
* @wakeup_armed: True if gadget is armed by the host for remote wakeup.
* @irq: the interrupt number for device controller.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
@@ -456,6 +459,8 @@ struct usb_gadget {
unsigned deactivated:1;
unsigned connected:1;
unsigned lpm_capable:1;
unsigned wakeup_capable:1;
unsigned wakeup_armed:1;
int irq;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -611,6 +616,7 @@ static inline int gadget_is_otg(struct usb_gadget *g)
#if IS_ENABLED(CONFIG_USB_GADGET)
int usb_gadget_frame_number(struct usb_gadget *gadget);
int usb_gadget_wakeup(struct usb_gadget *gadget);
int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set);
int usb_gadget_set_selfpowered(struct usb_gadget *gadget);
int usb_gadget_clear_selfpowered(struct usb_gadget *gadget);
int usb_gadget_vbus_connect(struct usb_gadget *gadget);
@@ -626,6 +632,8 @@ static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set)
{ return 0; }
static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
{ return 0; }
static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)

View File

@@ -159,7 +159,18 @@ struct usb_phy {
enum usb_charger_type (*charger_detect)(struct usb_phy *x);
ANDROID_VENDOR_DATA(1);
/*
* Reserved slot 0 here is seserved for a notify_port_status callback addition that narrowly
* missed the ABI freeze deadline due to upstream review disussions. See
* https://lore.kernel.org/linux-usb/20230607062500.24669-1-stanley_chang@realtek.com/
* for details. All other slots are for "normal" future ABI breaks in LTS updates
*/
ANDROID_KABI_RESERVE(0);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
/* for board-specific init logic */

View File

@@ -180,7 +180,7 @@ struct pneigh_entry {
struct net_device *dev;
u8 flags;
u8 protocol;
u8 key[];
u32 key[];
};
/*

View File

@@ -54,7 +54,7 @@ struct netns_sysctl_ipv6 {
int seg6_flowlabel;
u32 ioam6_id;
u64 ioam6_id_wide;
bool skip_notify_on_dev_down;
int skip_notify_on_dev_down;
u8 fib_notify_on_flag_change;
ANDROID_KABI_RESERVE(1);
};

View File

@@ -336,6 +336,7 @@ struct bpf_local_storage;
* @sk_cgrp_data: cgroup data for this cgroup
* @sk_memcg: this socket's memory cgroup association
* @sk_write_pending: a write to stream socket waits to start
* @sk_wait_pending: number of threads blocked on this socket
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
* @sk_write_space: callback to indicate there is bf sending space available
@@ -420,6 +421,7 @@ struct sock {
unsigned int sk_napi_id;
#endif
int sk_rcvbuf;
int sk_wait_pending;
struct sk_filter __rcu *sk_filter;
union {
@@ -1136,6 +1138,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
({ int __rc; \
__sk->sk_wait_pending++; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
@@ -1145,6 +1148,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
__sk->sk_wait_pending--; \
__rc = __condition; \
__rc; \
})

View File

@@ -1 +0,0 @@
per-file f2fs**=file:/fs/f2fs/OWNERS

View File

@@ -1,3 +0,0 @@
per-file f2fs**=file:/fs/f2fs/OWNERS
per-file fuse**=file:/fs/fuse/OWNERS
per-file net**=file:/net/OWNERS

View File

@@ -56,6 +56,7 @@
#include <linux/file.h>
#include <linux/fs_parser.h>
#include <linux/sched/cputime.h>
#include <linux/sched/deadline.h>
#include <linux/psi.h>
#include <net/sock.h>
@@ -6468,6 +6469,9 @@ void cgroup_exit(struct task_struct *tsk)
list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
if (dl_task(tsk))
dec_dl_tasks_cs(tsk);
WARN_ON_ONCE(cgroup_task_frozen(tsk));
if (unlikely(!(tsk->flags & PF_KTHREAD) &&
test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))

View File

@@ -166,6 +166,14 @@ struct cpuset {
int use_parent_ecpus;
int child_ecpus_count;
/*
* number of SCHED_DEADLINE tasks attached to this cpuset, so that we
* know when to rebuild associated root domain bandwidth information.
*/
int nr_deadline_tasks;
int nr_migrate_dl_tasks;
u64 sum_migrate_dl_bw;
/* Handle for cpuset.cpus.partition */
struct cgroup_file partition_file;
};
@@ -213,6 +221,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
return css_cs(cs->css.parent);
}
void inc_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks++;
}
void dec_dl_tasks_cs(struct task_struct *p)
{
struct cpuset *cs = task_cs(p);
cs->nr_deadline_tasks--;
}
/* bits in struct cpuset flags field */
typedef enum {
CS_ONLINE,
@@ -316,22 +338,23 @@ static struct cpuset top_cpuset = {
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
* There are two global locks guarding cpuset structures - cpuset_rwsem and
* There are two global locks guarding cpuset structures - cpuset_mutex and
* callback_lock. We also require taking task_lock() when dereferencing a
* task's cpuset pointer. See "The task_lock() exception", at the end of this
* comment. The cpuset code uses only cpuset_rwsem write lock. Other
* kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
* prevent change to cpuset structures.
* comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
* can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
* structures. Note that cpuset_mutex needs to be a mutex as it is used in
* paths that rely on priority inheritance (e.g. scheduler - on RT) for
* correctness.
*
* A task must hold both locks to modify cpusets. If a task holds
* cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
* is the only task able to also acquire callback_lock and be able to
* modify cpusets. It can perform various checks on the cpuset structure
* first, knowing nothing will change. It can also allocate memory while
* just holding cpuset_rwsem. While it is performing these checks, various
* callback routines can briefly acquire callback_lock to query cpusets.
* Once it is ready to make the changes, it takes callback_lock, blocking
* everyone else.
* cpuset_mutex, it blocks others, ensuring that it is the only task able to
* also acquire callback_lock and be able to modify cpusets. It can perform
* various checks on the cpuset structure first, knowing nothing will change.
* It can also allocate memory while just holding cpuset_mutex. While it is
* performing these checks, various callback routines can briefly acquire
* callback_lock to query cpusets. Once it is ready to make the changes, it
* takes callback_lock, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
* callback_lock, as that would risk double tripping on callback_lock
@@ -353,7 +376,18 @@ static struct cpuset top_cpuset = {
* guidelines for accessing subsystem state in kernel/cgroup.c
*/
DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
static DEFINE_MUTEX(cpuset_mutex);
void cpuset_lock(void)
{
mutex_lock(&cpuset_mutex);
}
void cpuset_unlock(void)
{
mutex_unlock(&cpuset_mutex);
}
static DEFINE_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -389,7 +423,7 @@ static inline bool is_in_v2_mode(void)
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
@@ -431,7 +465,7 @@ out_unlock:
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
@@ -443,7 +477,8 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
* Call with callback_lock or cpuset_rwsem held.
* Call with callback_lock or cpuset_mutex held. The check can be skipped
* if on default hierarchy.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)
@@ -464,7 +499,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
* are only set if the other's are set. Call holding cpuset_rwsem.
* are only set if the other's are set. Call holding cpuset_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -580,7 +615,7 @@ static inline void free_cpuset(struct cpuset *cs)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
* cpuset_rwsem held.
* cpuset_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -703,7 +738,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_unlock();
}
/* Must be called with cpuset_rwsem held. */
/* Must be called with cpuset_mutex held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
@@ -729,7 +764,7 @@ static inline int nr_cpusets(void)
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
* Must be called with cpuset_rwsem held.
* Must be called with cpuset_mutex held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
@@ -940,11 +975,14 @@ done:
return ndoms;
}
static void update_tasks_root_domain(struct cpuset *cs)
static void dl_update_tasks_root_domain(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
if (cs->nr_deadline_tasks == 0)
return;
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
@@ -953,12 +991,12 @@ static void update_tasks_root_domain(struct cpuset *cs)
css_task_iter_end(&it);
}
static void rebuild_root_domains(void)
static void dl_rebuild_rd_accounting(void)
{
struct cpuset *cs = NULL;
struct cgroup_subsys_state *pos_css;
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
lockdep_assert_cpus_held();
lockdep_assert_held(&sched_domains_mutex);
@@ -981,7 +1019,7 @@ static void rebuild_root_domains(void)
rcu_read_unlock();
update_tasks_root_domain(cs);
dl_update_tasks_root_domain(cs);
rcu_read_lock();
css_put(&cs->css);
@@ -995,7 +1033,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
{
mutex_lock(&sched_domains_mutex);
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
rebuild_root_domains();
dl_rebuild_rd_accounting();
mutex_unlock(&sched_domains_mutex);
}
@@ -1008,7 +1046,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
* Call with cpuset_rwsem held. Takes cpus_read_lock().
* Call with cpuset_mutex held. Takes cpus_read_lock().
*/
static void rebuild_sched_domains_locked(void)
{
@@ -1019,7 +1057,7 @@ static void rebuild_sched_domains_locked(void)
int ndoms;
lockdep_assert_cpus_held();
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
/*
* If we have raced with CPU hotplug, return early to avoid
@@ -1070,9 +1108,9 @@ static void rebuild_sched_domains_locked(void)
void rebuild_sched_domains(void)
{
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
rebuild_sched_domains_locked();
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(rebuild_sched_domains);
@@ -1094,7 +1132,7 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its cpus_allowed to the
* effective cpuset's. As this function is called with cpuset_rwsem held,
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
static void update_tasks_cpumask(struct cpuset *cs)
@@ -1201,7 +1239,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
int old_prs, new_prs;
bool part_error = false; /* Partition error? */
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
/*
* The parent must be a partition root.
@@ -1371,7 +1409,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
* Called with cpuset_rwsem held
* Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
{
@@ -1534,7 +1572,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
percpu_rwsem_assert_held(&cpuset_rwsem);
lockdep_assert_held(&cpuset_mutex);
/*
* Check all its siblings and call update_cpumasks_hier()
@@ -1739,12 +1777,12 @@ static void *cpuset_being_rebound;
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
* effective cpuset's. As this function is called with cpuset_rwsem held,
* effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
static void update_tasks_nodemask(struct cpuset *cs)
{
static nodemask_t newmems; /* protected by cpuset_rwsem */
static nodemask_t newmems; /* protected by cpuset_mutex */
struct css_task_iter it;
struct task_struct *task;
@@ -1757,7 +1795,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
* the global cpuset_rwsem, we know that no other rebind effort
* the global cpuset_mutex, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -1803,7 +1841,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
* Called with cpuset_rwsem held
* Called with cpuset_mutex held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
@@ -1856,7 +1894,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
* Call with cpuset_rwsem held. May take callback_lock during call.
* Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -1946,7 +1984,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
* @cs: the cpuset in which each task's spread flags needs to be changed
*
* Iterate through each task of @cs updating its spread flags. As this
* function is called with cpuset_rwsem held, cpuset membership stays
* function is called with cpuset_mutex held, cpuset membership stays
* stable.
*/
static void update_tasks_flags(struct cpuset *cs)
@@ -1966,7 +2004,7 @@ static void update_tasks_flags(struct cpuset *cs)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
* Call with cpuset_rwsem held.
* Call with cpuset_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -2015,7 +2053,7 @@ out:
* cs: the cpuset to update
* new_prs: new partition root state
*
* Call with cpuset_rwsem held.
* Call with cpuset_mutex held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
@@ -2197,19 +2235,26 @@ static int fmeter_getrate(struct fmeter *fmp)
static struct cpuset *cpuset_attach_old_cs;
/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
static void reset_migrate_dl_data(struct cpuset *cs)
{
cs->nr_migrate_dl_tasks = 0;
cs->sum_migrate_dl_bw = 0;
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
struct cpuset *cs;
struct cpuset *cs, *oldcs;
struct task_struct *task;
int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
oldcs = cpuset_attach_old_cs;
cs = css_cs(css);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
/* allow moving tasks into an empty cpuset if on default hierarchy */
ret = -ENOSPC;
@@ -2218,14 +2263,39 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
goto out_unlock;
cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task, cs->effective_cpus);
ret = task_can_attach(task);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
if (dl_task(task)) {
cs->nr_migrate_dl_tasks++;
cs->sum_migrate_dl_bw += task->dl.dl_bw;
}
}
if (!cs->nr_migrate_dl_tasks)
goto out_success;
if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
if (unlikely(cpu >= nr_cpu_ids)) {
reset_migrate_dl_data(cs);
ret = -EINVAL;
goto out_unlock;
}
ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
if (ret) {
reset_migrate_dl_data(cs);
goto out_unlock;
}
}
out_success:
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
@@ -2233,7 +2303,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
cs->attach_in_progress++;
ret = 0;
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
return ret;
}
@@ -2245,15 +2315,23 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
if (cs->nr_migrate_dl_tasks) {
int cpu = cpumask_any(cs->effective_cpus);
dl_bw_free(cpu, cs->sum_migrate_dl_bw);
reset_migrate_dl_data(cs);
}
mutex_unlock(&cpuset_mutex);
}
/*
* Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach()
* Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
@@ -2261,7 +2339,7 @@ static cpumask_var_t cpus_attach;
static void cpuset_attach(struct cgroup_taskset *tset)
{
/* static buf protected by cpuset_rwsem */
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
struct task_struct *leader;
@@ -2273,7 +2351,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cs = css_cs(css);
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2321,11 +2399,17 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cs->old_mems_allowed = cpuset_attach_nodemask_to;
if (cs->nr_migrate_dl_tasks) {
cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
reset_migrate_dl_data(cs);
}
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
}
/* The various types of files and directories in a cpuset file system */
@@ -2357,7 +2441,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
int retval = 0;
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs)) {
retval = -ENODEV;
goto out_unlock;
@@ -2393,7 +2477,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
@@ -2406,7 +2490,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
int retval = -ENODEV;
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -2419,7 +2503,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
@@ -2452,7 +2536,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
* grabbing cpuset_rwsem anyway. This only happens on the legacy
* grabbing cpuset_mutex anyway. This only happens on the legacy
* hierarchies.
*/
css_get(&cs->css);
@@ -2460,7 +2544,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
flush_work(&cpuset_hotplug_work);
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -2484,7 +2568,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
free_cpuset(trialcs);
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
@@ -2617,13 +2701,13 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
css_get(&cs->css);
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
retval = update_prstate(cs, val);
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
css_put(&cs->css);
return retval ?: nbytes;
@@ -2836,7 +2920,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
return 0;
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
set_bit(CS_ONLINE, &cs->flags);
if (is_spread_page(parent))
@@ -2888,7 +2972,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return 0;
}
@@ -2909,7 +2993,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
struct cpuset *cs = css_cs(css);
cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
if (is_partition_root(cs))
update_prstate(cs, 0);
@@ -2928,7 +3012,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
@@ -2941,7 +3025,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
@@ -2954,7 +3038,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
}
spin_unlock_irq(&callback_lock);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
}
/*
@@ -2999,8 +3083,6 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
int __init cpuset_init(void)
{
BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
@@ -3074,7 +3156,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
is_empty = cpumask_empty(cs->cpus_allowed) ||
nodes_empty(cs->mems_allowed);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
/*
* Move tasks to the nearest ancestor with execution resources,
@@ -3084,7 +3166,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
if (is_empty)
remove_tasks_in_empty_cpuset(cs);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
}
static void
@@ -3134,14 +3216,14 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
/*
* We have raced with task attaching. We wait until attaching
* is finished, so we won't attach a task to an empty cpuset.
*/
if (cs->attach_in_progress) {
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
goto retry;
}
@@ -3219,7 +3301,7 @@ update_tasks:
hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
cpus_updated, mems_updated);
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
}
/**
@@ -3249,7 +3331,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
if (on_dfl && !alloc_cpumasks(NULL, &tmp))
ptmp = &tmp;
percpu_down_write(&cpuset_rwsem);
mutex_lock(&cpuset_mutex);
/* fetch the available cpus/mems and find out which changed how */
cpumask_copy(&new_cpus, cpu_active_mask);
@@ -3306,7 +3388,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
update_tasks_nodemask(&top_cpuset);
}
percpu_up_write(&cpuset_rwsem);
mutex_unlock(&cpuset_mutex);
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
@@ -3716,7 +3798,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
* and we take cpuset_rwsem, keeping cpuset_attach() from changing it
* and we take cpuset_mutex, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,

View File

@@ -1,4 +0,0 @@
connoro@google.com
elavila@google.com
qperret@google.com
tkjos@google.com

View File

@@ -7463,6 +7463,7 @@ static int __sched_setscheduler(struct task_struct *p,
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct rq *rq;
bool cpuset_locked = false;
/* The pi code expects interrupts enabled */
BUG_ON(pi && in_interrupt());
@@ -7563,6 +7564,15 @@ recheck:
return retval;
}
/*
* SCHED_DEADLINE bandwidth accounting relies on stable cpusets
* information.
*/
if (dl_policy(policy) || dl_policy(p->policy)) {
cpuset_locked = true;
cpuset_lock();
}
/*
* Make sure no PI-waiters arrive (or leave) while we are
* changing the priority of the task:
@@ -7637,6 +7647,8 @@ change:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &rf);
if (cpuset_locked)
cpuset_unlock();
goto recheck;
}
@@ -7703,8 +7715,11 @@ change:
head = splice_balance_callbacks(rq);
task_rq_unlock(rq, p, &rf);
if (pi)
if (pi) {
if (cpuset_locked)
cpuset_unlock();
rt_mutex_adjust_pi(p);
}
/* Run balance callbacks after we've adjusted the PI chain: */
balance_callbacks(rq, head);
@@ -7714,6 +7729,8 @@ change:
unlock:
task_rq_unlock(rq, p, &rf);
if (cpuset_locked)
cpuset_unlock();
return retval;
}
@@ -8939,8 +8956,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
return ret;
}
int task_can_attach(struct task_struct *p,
const struct cpumask *cs_effective_cpus)
int task_can_attach(struct task_struct *p)
{
int ret = 0;
@@ -8953,21 +8969,9 @@ int task_can_attach(struct task_struct *p,
* success of set_cpus_allowed_ptr() on all attached tasks
* before cpus_mask may be changed.
*/
if (p->flags & PF_NO_SETAFFINITY) {
if (p->flags & PF_NO_SETAFFINITY)
ret = -EINVAL;
goto out;
}
if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
cs_effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
if (unlikely(cpu >= nr_cpu_ids))
return -EINVAL;
ret = dl_cpu_busy(cpu, p);
}
out:
return ret;
}
@@ -9267,7 +9271,7 @@ static void cpuset_cpu_active(void)
static int cpuset_cpu_inactive(unsigned int cpu)
{
if (!cpuhp_tasks_frozen) {
int ret = dl_cpu_busy(cpu, NULL);
int ret = dl_bw_check_overflow(cpu);
if (ret)
return ret;

View File

@@ -18,6 +18,7 @@
#include "sched.h"
#include "pelt.h"
#include <trace/hooks/sched.h>
#include <linux/cpuset.h>
struct dl_bandwidth def_dl_bandwidth;
@@ -2453,6 +2454,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
/*
* In case a task is setscheduled out from SCHED_DEADLINE we need to
* keep track of that on its cpuset (for correct bandwidth tracking).
*/
dec_dl_tasks_cs(p);
if (!task_on_rq_queued(p)) {
/*
* Inactive timer is armed. However, p is leaving DEADLINE and
@@ -2493,6 +2500,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
/*
* In case a task is setscheduled to SCHED_DEADLINE we need to keep
* track of that on its cpuset (for correct bandwidth tracking).
*/
inc_dl_tasks_cs(p);
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
add_rq_bw(&p->dl, &rq->dl);
@@ -2892,26 +2905,38 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
return ret;
}
int dl_cpu_busy(int cpu, struct task_struct *p)
enum dl_bw_request {
dl_bw_req_check_overflow = 0,
dl_bw_req_alloc,
dl_bw_req_free
};
static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
{
unsigned long flags, cap;
unsigned long flags;
struct dl_bw *dl_b;
bool overflow;
bool overflow = 0;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cap = dl_bw_capacity(cpu);
overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
if (!overflow && p) {
/*
* We reserve space for this task in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
if (req == dl_bw_req_free) {
__dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
} else {
unsigned long cap = dl_bw_capacity(cpu);
overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
if (req == dl_bw_req_alloc && !overflow) {
/*
* We reserve space in the destination
* root_domain, as we can't fail after this point.
* We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()).
*/
__dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
}
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
@@ -2919,6 +2944,21 @@ int dl_cpu_busy(int cpu, struct task_struct *p)
return overflow ? -EBUSY : 0;
}
int dl_bw_check_overflow(int cpu)
{
return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
}
int dl_bw_alloc(int cpu, u64 dl_bw)
{
return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
}
void dl_bw_free(int cpu, u64 dl_bw)
{
dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
}
#endif
#ifdef CONFIG_SCHED_DEBUG

View File

@@ -351,7 +351,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
extern bool __checkparam_dl(const struct sched_attr *attr);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int dl_cpu_busy(int cpu, struct task_struct *p);
extern int dl_bw_check_overflow(int cpu);
#ifdef CONFIG_CGROUP_SCHED

View File

@@ -1,4 +0,0 @@
hridya@google.com
kaleshsingh@google.com
surenb@google.com
minchan@google.com

View File

@@ -1,2 +0,0 @@
lorenzo@google.com
maze@google.com

View File

@@ -591,6 +591,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
sk->sk_wait_pending++;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -606,6 +607,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
}
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
sk->sk_wait_pending--;
return timeo;
}

View File

@@ -963,6 +963,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
newsk->sk_wait_pending = 0;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;

View File

@@ -2977,6 +2977,12 @@ int tcp_disconnect(struct sock *sk, int flags)
int old_state = sk->sk_state;
u32 seq;
/* Deny disconnect if other threads are blocked in sk_wait_event()
* or inet_wait_for_connect().
*/
if (sk->sk_wait_pending)
return -EBUSY;
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);

View File

@@ -1,2 +0,0 @@
# include OWNERS from the authoritative android-mainline branch
include kernel/common:android-mainline:/tools/testing/selftests/filesystems/incfs/OWNERS

View File

@@ -1 +0,0 @@
file:/fs/incfs/OWNERS