Merge "Merge branch 'android14-5.15' into branch 'android14-5.15-lts'" into android14-5.15-lts

This commit is contained in:
Greg Kroah-Hartman
2024-02-15 13:25:56 +00:00
44 changed files with 3109 additions and 173 deletions

View File

@@ -101,6 +101,7 @@ filegroup(
"android/abi_gki_aarch64_mtktv",
"android/abi_gki_aarch64_oplus",
"android/abi_gki_aarch64_pixel",
"android/abi_gki_aarch64_pixel_watch",
"android/abi_gki_aarch64_qcom",
"android/abi_gki_aarch64_rtkstb",
"android/abi_gki_aarch64_rtktv",
@@ -776,7 +777,6 @@ ddk_headers(
"drivers/usb/host/xhci-mvebu.h",
"drivers/usb/host/xhci-plat.h",
"drivers/usb/host/xhci-rcar.h",
"drivers/usb/typec/tcpm/tcpci.h",
],
# The list of include directories where source files can #include headers
# from. In other words, these are the `-I` option to the C compiler.

View File

@@ -2083,6 +2083,14 @@ accept_ra_min_hop_limit - INTEGER
Default: 1
accept_ra_min_lft - INTEGER
Minimum acceptable lifetime value in Router Advertisement.
RA sections with a lifetime less than this value shall be
ignored. Zero lifetimes stay unaffected.
Default: 0
accept_ra_pinfo - BOOLEAN
Learn Prefix Information in Router Advertisement.

View File

@@ -35447,6 +35447,10 @@ member {
id: 0x2e137f54
type_id: 0x37638d42
}
member {
id: 0x2e3fb8e1
type_id: 0x37d09395
}
member {
id: 0x2e407ff3
type_id: 0x362f8fdc
@@ -35911,6 +35915,11 @@ member {
type_id: 0x5d08323b
offset: 160
}
member {
id: 0x3494a119
type_id: 0x5d7cfe67
offset: 1984
}
member {
id: 0x349c23b4
type_id: 0x5d5ed2a3
@@ -38619,6 +38628,11 @@ member {
type_id: 0x0faae5b1
offset: 704
}
member {
id: 0xbe977230
name: "accept_ra_min_lft"
type_id: 0x0faae5b1
}
member {
id: 0x2c2f941d
name: "accept_ra_mtu"
@@ -89265,6 +89279,12 @@ member {
type_id: 0x1d19a9d5
offset: 832
}
member {
id: 0x16cb0979
name: "group_generation"
type_id: 0x4585663f
offset: 1120
}
member {
id: 0x0f81c321
name: "group_index"
@@ -131368,6 +131388,12 @@ member {
type_id: 0xc8c766a0
offset: 384
}
member {
id: 0x642f0821
name: "padding"
type_id: 0xc9082b19
offset: 32
}
member {
id: 0x64367333
name: "padding"
@@ -193099,6 +193125,15 @@ struct_union {
member_id: 0xb9227601
}
}
struct_union {
id: 0x37d09395
kind: STRUCT
definition {
bytesize: 8
member_id: 0xbe977230
member_id: 0x642f0821
}
}
struct_union {
id: 0x37f08244
kind: STRUCT
@@ -194859,6 +194894,16 @@ struct_union {
member_id: 0x8c9fd173
}
}
struct_union {
id: 0x5d7cfe67
kind: UNION
definition {
bytesize: 8
member_id: 0x2e3fb8e1
member_id: 0x27000c61
member_id: 0x36752b74
}
}
struct_union {
id: 0x5d84d1ff
kind: UNION
@@ -216223,7 +216268,7 @@ struct_union {
member_id: 0x6180037a
member_id: 0xef330163
member_id: 0x0460772e
member_id: 0x2d0817b6
member_id: 0x3494a119
member_id: 0x63760e20
member_id: 0xac894714
member_id: 0xe0f631d4
@@ -224330,6 +224375,7 @@ struct_union {
member_id: 0xb826ad15
member_id: 0xf0897d29
member_id: 0x26e58ea7
member_id: 0x16cb0979
member_id: 0xc397b990
member_id: 0x933349d1
member_id: 0xbadfff7b
@@ -254729,6 +254775,20 @@ enumeration {
}
}
}
enumeration {
id: 0xf01a263d
name: "lockdep_ok"
definition {
underlying_type_id: 0x4585663f
enumerator {
name: "LOCKDEP_STILL_OK"
}
enumerator {
name: "LOCKDEP_NOW_UNRELIABLE"
value: 1
}
}
}
enumeration {
id: 0x0b05beb9
name: "macsec_offload"
@@ -261306,6 +261366,12 @@ function {
return_type_id: 0x48b5725f
parameter_id: 0x4585663f
}
function {
id: 0x02396084
return_type_id: 0x48b5725f
parameter_id: 0x4585663f
parameter_id: 0xf01a263d
}
function {
id: 0x023bf562
return_type_id: 0x37edd07b
@@ -301249,6 +301315,11 @@ function {
parameter_id: 0x1d19a9d5
parameter_id: 0x6720d32f
}
function {
id: 0xf886bca4
return_type_id: 0x6d7f5ff6
parameter_id: 0x188b9e81
}
function {
id: 0xf8a43ec4
return_type_id: 0x6d7f5ff6
@@ -312089,6 +312160,15 @@ elf_symbol {
type_id: 0x8624d3e6
full_name: "add_memory_subsection"
}
elf_symbol {
id: 0xa5dcf915
name: "add_taint"
is_defined: true
symbol_type: FUNCTION
crc: 0x0eb6eb87
type_id: 0x02396084
full_name: "add_taint"
}
elf_symbol {
id: 0xf2768ed7
name: "add_timer"
@@ -331603,6 +331683,15 @@ elf_symbol {
type_id: 0x6dc76989
full_name: "iio_alloc_pollfunc"
}
elf_symbol {
id: 0xf9fd308c
name: "iio_buffer_enabled"
is_defined: true
symbol_type: FUNCTION
crc: 0xaabcd3a0
type_id: 0xf886bca4
full_name: "iio_buffer_enabled"
}
elf_symbol {
id: 0x5956f21f
name: "iio_buffer_init"
@@ -352392,6 +352481,15 @@ elf_symbol {
type_id: 0x9db78784
full_name: "spi_bus_lock"
}
elf_symbol {
id: 0xf4d76908
name: "spi_bus_type"
is_defined: true
symbol_type: OBJECT
crc: 0x6e839e01
type_id: 0x257935aa
full_name: "spi_bus_type"
}
elf_symbol {
id: 0xc99e9aa2
name: "spi_bus_unlock"
@@ -363094,6 +363192,7 @@ interface {
symbol_id: 0xf09e1ca4
symbol_id: 0xcef4ad72
symbol_id: 0x9d3925bd
symbol_id: 0xa5dcf915
symbol_id: 0xf2768ed7
symbol_id: 0xfbeae533
symbol_id: 0x76ad851f
@@ -365260,6 +365359,7 @@ interface {
symbol_id: 0x4292b79d
symbol_id: 0xb779176d
symbol_id: 0x98a39db9
symbol_id: 0xf9fd308c
symbol_id: 0x5956f21f
symbol_id: 0x76041d7e
symbol_id: 0xe4653a75
@@ -367567,6 +367667,7 @@ interface {
symbol_id: 0x13b529c0
symbol_id: 0x55b4474a
symbol_id: 0xe2eab328
symbol_id: 0xf4d76908
symbol_id: 0xc99e9aa2
symbol_id: 0xa14d65f0
symbol_id: 0xc3db62f6

View File

@@ -1022,6 +1022,7 @@
ieee802154_unregister_hw
ieee802154_wake_queue
ieee802154_xmit_complete
iio_buffer_enabled
iio_device_unregister
import_iovec
in6_pton

File diff suppressed because it is too large Load Diff

View File

@@ -80,7 +80,13 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
obj-y += vdso-wrap.o
obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o
obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o
# We need to prevent the SCS patching code from patching itself. Using
# -mbranch-protection=none here to avoid the patchable PAC opcodes from being
# generated triggers an issue with full LTO on Clang, which stops emitting PAC
# instructions altogether. So disable LTO as well for the compilation unit.
CFLAGS_patch-scs.o += -mbranch-protection=none
CFLAGS_REMOVE_patch-scs.o += $(CC_FLAGS_LTO)
# Force dependency (vdso*-wrap.S includes vdso.so through incbin)
$(obj)/vdso-wrap.o: $(obj)/vdso/vdso.so

View File

@@ -880,7 +880,7 @@ err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
alloc->buffer = 0;
alloc->buffer = NULL;
mutex_lock(&binder_alloc_mmap_lock);
alloc->buffer_size = 0;
err_already_mapped:

View File

@@ -389,3 +389,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_freepages_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_freepages_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_thrashing_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_delayacct_thrashing_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page_spf);

View File

@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
mutex_lock(&vif->wdev.mtx);
cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
mutex_unlock(&vif->wdev.mtx);
}

View File

@@ -304,6 +304,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
mutex_lock(&priv->wdev.mtx);
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
mutex_unlock(&priv->wdev.mtx);
}

View File

@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
mutex_lock(&vif->wdev.mtx);
cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
mutex_unlock(&vif->wdev.mtx);
}

View File

@@ -4018,7 +4018,7 @@ next:
sis->highest_bit = cur_lblock - 1;
out:
if (not_aligned)
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
not_aligned, blks_per_sec * F2FS_BLKSIZE);
return ret;
}

View File

@@ -256,7 +256,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
(!fi->i_inline_xattr_size ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
MAX_INLINE_XATTR_SIZE);
return false;

View File

@@ -633,7 +633,7 @@ static void f2fs_ra_node_pages(struct page *parent, int start, int n)
/* Then, try readahead for siblings of the desired node */
end = start + n;
end = min(end, NIDS_PER_BLOCK);
end = min(end, (int)NIDS_PER_BLOCK);
for (i = start; i < end; i++) {
nid = get_nid(parent, i, false);
f2fs_ra_node_page(sbi, nid);

View File

@@ -3275,6 +3275,14 @@ loff_t max_file_blocks(struct inode *inode)
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
/*
* For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
* a 4K crypto data unit, we must restrict the max filesize to what can
* fit within U32_MAX + 1 data units.
*/
result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
return result;
}
@@ -3429,7 +3437,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return -EFSCORRUPTED;
}
/* Currently, support 512/1024/2048/4096 bytes sector size */
/* Currently, support 512/1024/2048/4096/16K bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
le32_to_cpu(raw_super->log_sectorsize) <
@@ -4769,7 +4777,7 @@ static int __init init_f2fs_fs(void)
int err;
if (PAGE_SIZE != F2FS_BLKSIZE) {
printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
PAGE_SIZE, F2FS_BLKSIZE);
return -EINVAL;
}

View File

@@ -17,7 +17,6 @@
#include "kernfs-internal.h"
DECLARE_RWSEM(kernfs_rwsem);
static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
/*
* Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
@@ -34,7 +33,7 @@ static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
static bool kernfs_active(struct kernfs_node *kn)
{
lockdep_assert_held(&kernfs_rwsem);
lockdep_assert_held(kernfs_rwsem(kernfs_root(kn)));
return atomic_read(&kn->active) >= 0;
}
@@ -465,14 +464,15 @@ void kernfs_put_active(struct kernfs_node *kn)
* return after draining is complete.
*/
static void kernfs_drain(struct kernfs_node *kn)
__releases(&kernfs_rwsem) __acquires(&kernfs_rwsem)
__releases(kernfs_rwsem(kernfs_root(kn)))
__acquires(kernfs_rwsem(kernfs_root(kn)))
{
struct kernfs_root *root = kernfs_root(kn);
lockdep_assert_held_write(&kernfs_rwsem);
lockdep_assert_held_write(kernfs_rwsem(root));
WARN_ON_ONCE(kernfs_active(kn));
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
@@ -491,7 +491,7 @@ static void kernfs_drain(struct kernfs_node *kn)
kernfs_drain_open_files(kn);
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
}
/**
@@ -728,11 +728,12 @@ err_unlock:
int kernfs_add_one(struct kernfs_node *kn)
{
struct kernfs_node *parent = kn->parent;
struct kernfs_root *root = kernfs_root(parent);
struct kernfs_iattrs *ps_iattr;
bool has_ns;
int ret;
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
@@ -763,7 +764,7 @@ int kernfs_add_one(struct kernfs_node *kn)
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
}
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
/*
* Activate the new node unless CREATE_DEACTIVATED is requested.
@@ -777,7 +778,7 @@ int kernfs_add_one(struct kernfs_node *kn)
return 0;
out_unlock:
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
return ret;
}
@@ -798,7 +799,7 @@ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
bool has_ns = kernfs_ns_enabled(parent);
unsigned int hash;
lockdep_assert_held(&kernfs_rwsem);
lockdep_assert_held(kernfs_rwsem(kernfs_root(parent)));
if (has_ns != (bool)ns) {
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
@@ -830,7 +831,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
size_t len;
char *p, *name;
lockdep_assert_held_read(&kernfs_rwsem);
lockdep_assert_held_read(kernfs_rwsem(kernfs_root(parent)));
spin_lock_irq(&kernfs_pr_cont_lock);
@@ -868,11 +869,12 @@ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns)
{
struct kernfs_node *kn;
struct kernfs_root *root = kernfs_root(parent);
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(root));
kn = kernfs_find_ns(parent, name, ns);
kernfs_get(kn);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return kn;
}
@@ -892,11 +894,12 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
const char *path, const void *ns)
{
struct kernfs_node *kn;
struct kernfs_root *root = kernfs_root(parent);
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(root));
kn = kernfs_walk_ns(parent, path, ns);
kernfs_get(kn);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return kn;
}
@@ -913,13 +916,16 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv)
{
struct kernfs_root_ext *root_ext;
struct kernfs_root *root;
struct kernfs_node *kn;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
root_ext = kzalloc(sizeof(*root_ext), GFP_KERNEL);
if (!root_ext)
return ERR_PTR(-ENOMEM);
init_rwsem(&root_ext->kernfs_rwsem);
root = &root_ext->root;
idr_init(&root->ino_idr);
INIT_LIST_HEAD(&root->supers);
@@ -966,7 +972,13 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
*/
void kernfs_destroy_root(struct kernfs_root *root)
{
kernfs_remove(root->kn); /* will also free @root */
/*
* kernfs_remove holds kernfs_rwsem from the root so the root
* shouldn't be freed during the operation.
*/
kernfs_get(root->kn);
kernfs_remove(root->kn);
kernfs_put(root->kn); /* will also free @root */
}
/**
@@ -1044,6 +1056,7 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
{
struct kernfs_node *kn;
struct kernfs_root *root;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -1055,18 +1068,19 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
*/
down_read(&kernfs_rwsem);
spin_lock(&dentry->d_lock);
parent = kernfs_dentry_node(dentry->d_parent);
if (parent) {
spin_unlock(&dentry->d_lock);
root = kernfs_root(parent);
down_read(kernfs_rwsem(root));
if (kernfs_dir_changed(parent, dentry)) {
spin_unlock(&dentry->d_lock);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return 0;
}
}
spin_unlock(&dentry->d_lock);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
} else
spin_unlock(&dentry->d_lock);
/* The kernfs parent node hasn't changed, leave the
* dentry negative and return success.
@@ -1075,7 +1089,8 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
}
kn = kernfs_dentry_node(dentry);
down_read(&kernfs_rwsem);
root = kernfs_root(kn);
down_read(kernfs_rwsem(root));
/* The kernfs node has been deactivated */
if (!kernfs_active(kn))
@@ -1094,10 +1109,10 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return 1;
out_bad:
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return 0;
}
@@ -1111,10 +1126,12 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_node *kn;
struct kernfs_root *root;
struct inode *inode = NULL;
const void *ns = NULL;
down_read(&kernfs_rwsem);
root = kernfs_root(parent);
down_read(kernfs_rwsem(root));
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dir->i_sb)->ns;
@@ -1125,7 +1142,7 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
* create a negative.
*/
if (!kernfs_active(kn)) {
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return NULL;
}
inode = kernfs_get_inode(dir->i_sb, kn);
@@ -1140,7 +1157,7 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
*/
if (!IS_ERR(inode))
kernfs_set_rev(parent, dentry);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
/* instantiate and hash (possibly negative) dentry */
return d_splice_alias(inode, dentry);
@@ -1263,7 +1280,7 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
{
struct rb_node *rbn;
lockdep_assert_held_write(&kernfs_rwsem);
lockdep_assert_held_write(kernfs_rwsem(kernfs_root(root)));
/* if first iteration, visit leftmost descendant which may be root */
if (!pos)
@@ -1298,8 +1315,9 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
void kernfs_activate(struct kernfs_node *kn)
{
struct kernfs_node *pos;
struct kernfs_root *root = kernfs_root(kn);
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
@@ -1313,14 +1331,14 @@ void kernfs_activate(struct kernfs_node *kn)
pos->flags |= KERNFS_ACTIVATED;
}
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
}
static void __kernfs_remove(struct kernfs_node *kn)
{
struct kernfs_node *pos;
lockdep_assert_held_write(&kernfs_rwsem);
lockdep_assert_held_write(kernfs_rwsem(kernfs_root(kn)));
/*
* Short-circuit if non-root @kn has already finished removal.
@@ -1390,9 +1408,16 @@ static void __kernfs_remove(struct kernfs_node *kn)
*/
void kernfs_remove(struct kernfs_node *kn)
{
down_write(&kernfs_rwsem);
struct kernfs_root *root;
if (!kn)
return;
root = kernfs_root(kn);
down_write(kernfs_rwsem(root));
__kernfs_remove(kn);
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
}
/**
@@ -1478,8 +1503,9 @@ void kernfs_unbreak_active_protection(struct kernfs_node *kn)
bool kernfs_remove_self(struct kernfs_node *kn)
{
bool ret;
struct kernfs_root *root = kernfs_root(kn);
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
kernfs_break_active_protection(kn);
/*
@@ -1507,9 +1533,9 @@ bool kernfs_remove_self(struct kernfs_node *kn)
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
break;
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
schedule();
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
}
finish_wait(waitq, &wait);
WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
@@ -1522,7 +1548,7 @@ bool kernfs_remove_self(struct kernfs_node *kn)
*/
kernfs_unbreak_active_protection(kn);
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
return ret;
}
@@ -1539,6 +1565,7 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
const void *ns)
{
struct kernfs_node *kn;
struct kernfs_root *root;
if (!parent) {
WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
@@ -1546,7 +1573,8 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
return -ENOENT;
}
down_write(&kernfs_rwsem);
root = kernfs_root(parent);
down_write(kernfs_rwsem(root));
kn = kernfs_find_ns(parent, name, ns);
if (kn) {
@@ -1555,7 +1583,7 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
kernfs_put(kn);
}
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
if (kn)
return 0;
@@ -1574,6 +1602,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns)
{
struct kernfs_node *old_parent;
struct kernfs_root *root;
const char *old_name = NULL;
int error;
@@ -1581,7 +1610,8 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
if (!kn->parent)
return -EINVAL;
down_write(&kernfs_rwsem);
root = kernfs_root(kn);
down_write(kernfs_rwsem(root));
error = -ENOENT;
if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
@@ -1635,7 +1665,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
error = 0;
out:
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
return error;
}
@@ -1706,11 +1736,14 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
struct dentry *dentry = file->f_path.dentry;
struct kernfs_node *parent = kernfs_dentry_node(dentry);
struct kernfs_node *pos = file->private_data;
struct kernfs_root *root;
const void *ns = NULL;
if (!dir_emit_dots(file, ctx))
return 0;
down_read(&kernfs_rwsem);
root = kernfs_root(parent);
down_read(kernfs_rwsem(root));
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dentry->d_sb)->ns;
@@ -1727,12 +1760,12 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
if (!dir_emit(ctx, name, len, ino, type))
return 0;
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(root));
}
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
file->private_data = NULL;
ctx->pos = INT_MAX;
return 0;

View File

@@ -847,6 +847,7 @@ static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
struct kernfs_super_info *info;
struct kernfs_root *root;
repeat:
/* pop one off the notify_list */
spin_lock_irq(&kernfs_notify_lock);
@@ -859,8 +860,9 @@ repeat:
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
root = kernfs_root(kn);
/* kick fsnotify */
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
@@ -898,7 +900,7 @@ repeat:
iput(inode);
}
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
kernfs_put(kn);
goto repeat;
}

View File

@@ -99,10 +99,11 @@ int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
{
int ret;
struct kernfs_root *root = kernfs_root(kn);
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
ret = __kernfs_setattr(kn, iattr);
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
return ret;
}
@@ -111,12 +112,14 @@ int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct kernfs_node *kn = inode->i_private;
struct kernfs_root *root;
int error;
if (!kn)
return -EINVAL;
down_write(&kernfs_rwsem);
root = kernfs_root(kn);
down_write(kernfs_rwsem(root));
error = setattr_prepare(&init_user_ns, dentry, iattr);
if (error)
goto out;
@@ -129,7 +132,7 @@ int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
setattr_copy(&init_user_ns, inode, iattr);
out:
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
return error;
}
@@ -184,13 +187,14 @@ int kernfs_iop_getattr(struct user_namespace *mnt_userns,
{
struct inode *inode = d_inode(path->dentry);
struct kernfs_node *kn = inode->i_private;
struct kernfs_root *root = kernfs_root(kn);
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(root));
spin_lock(&inode->i_lock);
kernfs_refresh_inode(kn, inode);
generic_fillattr(&init_user_ns, inode, stat);
spin_unlock(&inode->i_lock);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return 0;
}
@@ -274,19 +278,21 @@ int kernfs_iop_permission(struct user_namespace *mnt_userns,
struct inode *inode, int mask)
{
struct kernfs_node *kn;
struct kernfs_root *root;
int ret;
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
kn = inode->i_private;
root = kernfs_root(kn);
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(root));
spin_lock(&inode->i_lock);
kernfs_refresh_inode(kn, inode);
ret = generic_permission(&init_user_ns, inode, mask);
spin_unlock(&inode->i_lock);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return ret;
}

View File

@@ -50,6 +50,14 @@ static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
return kn->dir.root;
}
static inline struct rw_semaphore *kernfs_rwsem(struct kernfs_root *root)
{
struct kernfs_root_ext *root_ext;
root_ext = container_of(root, struct kernfs_root_ext, root);
return &root_ext->kernfs_rwsem;
}
/*
* mount.c
*/
@@ -122,7 +130,6 @@ int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
/*
* dir.c
*/
extern struct rw_semaphore kernfs_rwsem;
extern const struct dentry_operations kernfs_dops;
extern const struct file_operations kernfs_dir_fops;
extern const struct inode_operations kernfs_dir_iops;

View File

@@ -236,6 +236,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc)
{
struct kernfs_super_info *info = kernfs_info(sb);
struct kernfs_root *kf_root = kfc->root;
struct inode *inode;
struct dentry *root;
@@ -255,9 +256,9 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
sb->s_shrink.seeks = 0;
/* get root inode, initialize and unlock it */
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(kf_root));
inode = kernfs_get_inode(sb, info->root->kn);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(kf_root));
if (!inode) {
pr_debug("kernfs: could not get root inode\n");
return -ENOMEM;
@@ -334,6 +335,7 @@ int kernfs_get_tree(struct fs_context *fc)
if (!sb->s_root) {
struct kernfs_super_info *info = kernfs_info(sb);
struct kernfs_root *root = kfc->root;
kfc->new_sb_created = true;
@@ -344,9 +346,9 @@ int kernfs_get_tree(struct fs_context *fc)
}
sb->s_flags |= SB_ACTIVE;
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
list_add(&info->node, &info->root->supers);
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
}
fc->root = dget(sb->s_root);
@@ -371,10 +373,11 @@ void kernfs_free_fs_context(struct fs_context *fc)
void kernfs_kill_sb(struct super_block *sb)
{
struct kernfs_super_info *info = kernfs_info(sb);
struct kernfs_root *root = info->root;
down_write(&kernfs_rwsem);
down_write(kernfs_rwsem(root));
list_del(&info->node);
up_write(&kernfs_rwsem);
up_write(kernfs_rwsem(root));
/*
* Remove the superblock from fs_supers/s_instances

View File

@@ -114,11 +114,12 @@ static int kernfs_getlink(struct inode *inode, char *path)
struct kernfs_node *kn = inode->i_private;
struct kernfs_node *parent = kn->parent;
struct kernfs_node *target = kn->symlink.target_kn;
struct kernfs_root *root = kernfs_root(parent);
int error;
down_read(&kernfs_rwsem);
down_read(kernfs_rwsem(root));
error = kernfs_get_target_path(parent, target, path);
up_read(&kernfs_rwsem);
up_read(kernfs_rwsem(root));
return error;
}

View File

@@ -33,6 +33,7 @@
#define _ANDROID_KABI_H
#include <linux/compiler.h>
#include <linux/stringify.h>
/*
* Worker macros, don't use these, use the ones without a leading '_'

View File

@@ -13,10 +13,10 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
@@ -209,14 +209,14 @@ struct f2fs_checkpoint {
unsigned char sit_nat_version_bitmap[];
} __packed;
#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
#define CP_MIN_CHKSUM_OFFSET \
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
/*
* For orphan inode management
*/
#define F2FS_ORPHANS_PER_BLOCK 1020
#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -242,14 +242,31 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
/* 200 bytes for inline xattrs by default */
#define DEFAULT_INLINE_XATTR_ADDRS 50
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define OFFSET_OF_END_OF_I_EXT 360
#define SIZE_OF_I_NID 20
struct node_footer {
__le32 nid; /* node id */
__le32 ino; /* inode number */
__le32 flag; /* include cold/fsync/dentry marks and offset */
__le64 cp_ver; /* checkpoint version */
__le32 next_blkaddr; /* next node page block address */
} __packed;
/* Address Pointers in an Inode */
#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
- SIZE_OF_I_NID \
- sizeof(struct node_footer)) / sizeof(__le32))
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
/* Address Pointers in a Direct Block */
#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
/* Node IDs in an Indirect Block */
#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_PAGE(page, inode) \
(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
@@ -341,14 +358,6 @@ enum {
#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
struct node_footer {
__le32 nid; /* node id */
__le32 ino; /* inode number */
__le32 flag; /* include cold/fsync/dentry marks and offset */
__le64 cp_ver; /* checkpoint version */
__le32 next_blkaddr; /* next node page block address */
} __packed;
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
union {
@@ -362,7 +371,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -377,12 +386,13 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
* Each segment is 2MB in size by default so that a bitmap for validity of
* there-in blocks should occupy 64 bytes, 512 bits.
* A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
* this results in a segment size of 2MB. For 16k pages, the default segment size
* is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -417,7 +427,7 @@ struct f2fs_sit_block {
* For segment summary
*
* One summary block contains exactly 512 summary entries, which represents
* exactly 2MB segment by default. Not allow to change the basic units.
* exactly one segment by default. Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -428,12 +438,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
#define ENTRIES_IN_SUM 512
#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
/* a summary entry for a 4KB-sized block in a segment */
/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -517,7 +527,7 @@ struct f2fs_journal {
};
} __packed;
/* 4KB-sized summary block structure */
/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -558,11 +568,14 @@ typedef __le32 f2fs_hash_t;
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
/* the number of dentry in a block */
#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
@@ -575,7 +588,7 @@ struct f2fs_dir_entry {
__u8 file_type; /* file type */
} __packed;
/* 4KB-sized directory entry block */
/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];

View File

@@ -84,7 +84,8 @@ struct ipv6_devconf {
struct ctl_table_header *sysctl_header;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_USE(1, struct { __s32 accept_ra_min_lft; u32 padding; });
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);

View File

@@ -16,6 +16,7 @@
#include <linux/atomic.h>
#include <linux/uidgid.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <linux/android_kabi.h>
struct file;
@@ -209,6 +210,11 @@ struct kernfs_root {
ANDROID_KABI_RESERVE(1);
};
struct kernfs_root_ext {
struct kernfs_root root;
struct rw_semaphore kernfs_rwsem;
};
struct kernfs_open_file {
/* published fields */
struct kernfs_node *kn;

View File

@@ -663,6 +663,9 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
#ifndef __GENKSYMS__
unsigned int group_generation;
#endif
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;

View File

@@ -1330,6 +1330,9 @@ struct cfg80211_unsol_bcast_probe_resp {
* @fils_discovery: FILS discovery transmission parameters
* @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
* @mbssid_config: AP settings for multiple bssid
* @punct_bitmap: Preamble puncturing bitmap. Each bit represents
* a 20 MHz channel, lowest bit corresponding to the lowest channel.
* Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1364,6 +1367,7 @@ struct cfg80211_ap_settings {
struct cfg80211_fils_discovery fils_discovery;
struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
struct cfg80211_mbssid_config mbssid_config;
u16 punct_bitmap;
ANDROID_KABI_RESERVE(1);
};
@@ -1383,6 +1387,9 @@ struct cfg80211_ap_settings {
* @radar_required: whether radar detection is required on the new channel
* @block_tx: whether transmissions should be blocked while changing
* @count: number of beacons until switch
* @punct_bitmap: Preamble puncturing bitmap. Each bit represents
* a 20 MHz channel, lowest bit corresponding to the lowest channel.
* Bit set to 1 indicates that the channel is punctured.
*/
struct cfg80211_csa_settings {
struct cfg80211_chan_def chandef;
@@ -1395,6 +1402,7 @@ struct cfg80211_csa_settings {
bool radar_required;
bool block_tx;
u8 count;
u16 punct_bitmap;
ANDROID_KABI_RESERVE(1);
};
@@ -8306,13 +8314,14 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @dev: the device which switched channels
* @chandef: the new channel definition
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @punct_bitmap: the new puncturing bitmap
*
* Caller must acquire wdev_lock, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id);
unsigned int link_id, u16 punct_bitmap);
/*
* cfg80211_ch_switch_started_notify - notify channel switch start
@@ -8321,6 +8330,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @count: the number of TBTTs until the channel switch happens
* @quiet: whether or not immediate quiet was requested by the AP
* @punct_bitmap: the future puncturing bitmap
*
* Inform the userspace about the channel switch that has just
* started, so that it can take appropriate actions (eg. starting
@@ -8329,7 +8339,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
bool quiet);
bool quiet, u16 punct_bitmap);
/**
* ieee80211_operating_class_to_band - convert operating class to band
@@ -8937,4 +8947,16 @@ static inline int cfg80211_color_change_notify(struct net_device *dev)
0, 0);
}
/**
* cfg80211_valid_disable_subchannel_bitmap - validate puncturing bitmap
* @bitmap: bitmap to be validated
* @chandef: channel definition
*
* Validate the puncturing bitmap.
*
* Return: %true if the bitmap is valid. %false otherwise.
*/
bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
const struct cfg80211_chan_def *chandef);
#endif /* __NET_CFG80211_H */

View File

@@ -174,6 +174,9 @@ DECLARE_HOOK(android_vh_isolate_freepages,
DECLARE_HOOK(android_vh_ptep_clear_flush_young,
TP_PROTO(bool *skip),
TP_ARGS(skip));
DECLARE_HOOK(android_vh_do_swap_page_spf,
TP_PROTO(bool *allow_swap_spf),
TP_ARGS(allow_swap_spf));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -196,6 +196,9 @@ enum {
DEVCONF_IOAM6_ENABLED,
DEVCONF_IOAM6_ID,
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_ACCEPT_UNTRACKED_NA,
DEVCONF_ACCEPT_RA_MIN_LFT,
DEVCONF_MAX
};

View File

@@ -2751,6 +2751,12 @@ enum nl80211_commands {
* the incoming frame RX timestamp.
* @NL80211_ATTR_TD_BITMAP: Transition Disable bitmap, for subsequent
* (re)associations.
*
* @NL80211_ATTR_PUNCT_BITMAP: (u32) Preamble puncturing bitmap, lowest
* bit corresponds to the lowest 20 MHz channel. Each bit set to 1
* indicates that the sub-channel is punctured. Higher 16 bits are
* reserved.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3279,6 +3285,7 @@ enum nl80211_attrs {
NL80211_ATTR_TX_HW_TIMESTAMP,
NL80211_ATTR_RX_HW_TIMESTAMP,
NL80211_ATTR_TD_BITMAP,
NL80211_ATTR_PUNCT_BITMAP,
/* add attributes here, update the policy in nl80211.c */
@@ -6294,6 +6301,11 @@ enum nl80211_feature_flags {
* might apply, e.g. no scans in progress, no offchannel operations
* in progress, and no active connections.
*
* @NL80211_EXT_FEATURE_PUNCT: Driver supports preamble puncturing in AP mode.
*
* @NL80211_EXT_FEATURE_SECURE_NAN: Device supports NAN Pairing which enables
* authentication, data encryption and message integrity.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6362,6 +6374,14 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD,
NL80211_EXT_FEATURE_RADAR_BACKGROUND,
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE,
/*
* ANDROID CRC kabi preservation hack due to commits d7c1a9a0ed18
* and 9b89495e479c.
*/
#ifndef __GENKSYMS__
NL80211_EXT_FEATURE_PUNCT,
NL80211_EXT_FEATURE_SECURE_NAN,
#endif
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,

View File

@@ -1899,28 +1899,31 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
static int __perf_event_read_size(u64 read_format, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
if (read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
/*
* Since perf_event_validate_size() limits this to 16k and inhibits
* adding more siblings, this will never overflow.
*/
return size + nr * entry;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
@@ -1970,8 +1973,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
*/
static void perf_event__header_size(struct perf_event *event)
{
__perf_event_read_size(event,
event->group_leader->nr_siblings);
event->read_size =
__perf_event_read_size(event->attr.read_format,
event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
@@ -2002,24 +2006,35 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}
/*
* Check that adding an event to the group does not result in anybody
* overflowing the 64k event limit imposed by the output buffer.
*
* Specifically, check that the read_size for the event does not exceed 16k,
* read_size being the one term that grows with groups size. Since read_size
* depends on per-event read_format, also (re)check the existing events.
*
* This leaves 48k for the constant size fields and things like callchains,
* branch stacks and register sets.
*/
static bool perf_event_validate_size(struct perf_event *event)
{
/*
* The values computed here will be over-written when we actually
* attach the event.
*/
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
perf_event__id_header_size(event);
struct perf_event *sibling, *group_leader = event->group_leader;
/*
* Sum the lot; should not exceed the 64k limit we have on records.
* Conservative limit to allow for callchains and other variable fields.
*/
if (event->read_size + event->header_size +
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
if (__perf_event_read_size(event->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
if (__perf_event_read_size(group_leader->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
for_each_sibling_event(sibling, group_leader) {
if (__perf_event_read_size(sibling->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
}
return true;
}
@@ -2046,6 +2061,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
group_leader->group_generation++;
perf_event__header_size(group_leader);
@@ -2238,6 +2254,7 @@ static void perf_group_detach(struct perf_event *event)
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
event->group_leader->group_generation++;
goto out;
}
@@ -5371,7 +5388,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
struct perf_event *sub;
struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@@ -5381,6 +5398,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* Verify the grouping between the parent and child (inherited)
* events is still in tact.
*
* Specifically:
* - leader->ctx->lock pins leader->sibling_list
* - parent->child_mutex pins parent->child_list
* - parent->ctx->mutex pins parent->sibling_list
*
* Because parent->ctx != leader->ctx (and child_list nests inside
* ctx->mutex), group destruction is not atomic between children, also
* see perf_event_release_kernel(). Additionally, parent can grow the
* group.
*
* Therefore it is possible to have parent and child groups in a
* different configuration and summing over such a beast makes no sense
* what so ever.
*
* Reject this.
*/
parent = leader->parent;
if (parent &&
(parent->group_generation != leader->group_generation ||
parent->nr_siblings != leader->nr_siblings)) {
ret = -ECHILD;
goto unlock;
}
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@@ -5410,8 +5454,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = primary_event_id(sub);
}
unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
return ret;
}
static int perf_read_group(struct perf_event *event,
@@ -5430,10 +5475,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
/*
* By locking the child_mutex of the leader we effectively
* lock the child list of all siblings.. XXX explain how.
*/
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@@ -13252,6 +13293,7 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
leader->group_generation = parent_event->group_generation;
return 0;
}

View File

@@ -5342,7 +5342,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
refcount_set(&memcg->id.ref, 1);
css_get(css);
if (unlikely(mem_cgroup_is_root(memcg)))
if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled())
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);

View File

@@ -3720,16 +3720,31 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
void *shadow = NULL;
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
pte_unmap(vmf->pte);
count_vm_spf_event(SPF_ABORT_SWAP);
return VM_FAULT_RETRY;
bool allow_swap_spf = false;
/* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */
#ifndef CONFIG_KSM
trace_android_vh_do_swap_page_spf(&allow_swap_spf);
#endif
if (!allow_swap_spf) {
pte_unmap(vmf->pte);
count_vm_spf_event(SPF_ABORT_SWAP);
return VM_FAULT_RETRY;
}
}
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
if (vmf->flags & FAULT_FLAG_SPECULATIVE)
ret = VM_FAULT_RETRY;
goto out;
}
entry = pte_to_swp_entry(vmf->orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
ret = VM_FAULT_RETRY;
goto out;
}
if (is_migration_entry(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
@@ -3787,6 +3802,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
swap_readpage(page, true);
set_page_private(page, 0);
}
} else if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
/*
* Don't try readahead during a speculative page fault
* as the VMA's boundaries may change in our back.
* If the page is not in the swap cache and synchronous
* read is disabled, fall back to the regular page fault
* mechanism.
*/
delayacct_clear_flag(current, DELAYACCT_PF_SWAPIN);
ret = VM_FAULT_RETRY;
goto out;
} else {
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE | __GFP_CMA,

View File

@@ -226,7 +226,7 @@ static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
* If we have the only reference, swap the refcount to -1. This
* will prevent other concurrent references by get_vma() for SPFs.
*/
return atomic_cmpxchg(&vma->file_ref_count, 0, -1) == 0;
return atomic_cmpxchg_acquire(&vma->file_ref_count, 0, -1) == 0;
}
/*
@@ -234,12 +234,13 @@ static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
*/
static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
{
int old = atomic_xchg_release(&vma->file_ref_count, 0);
/*
* This should only be called after a corresponding,
* successful trylock_vma_ref_count().
*/
VM_BUG_ON_VMA(atomic_cmpxchg(&vma->file_ref_count, -1, 0) != -1,
vma);
VM_BUG_ON_VMA(old != -1, vma);
}
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)

View File

@@ -209,6 +209,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.ra_defrtr_metric = IP6_RT_PRIO_USER,
.accept_ra_from_local = 0,
.accept_ra_min_hop_limit= 1,
.accept_ra_min_lft = 0,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@@ -269,6 +270,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.ra_defrtr_metric = IP6_RT_PRIO_USER,
.accept_ra_from_local = 0,
.accept_ra_min_hop_limit= 1,
.accept_ra_min_lft = 0,
.accept_ra_pinfo = 1,
#ifdef CONFIG_IPV6_ROUTER_PREF
.accept_ra_rtr_pref = 1,
@@ -2756,6 +2758,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
return;
}
if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
goto put;
/*
* Two things going on here:
* 1) Add routes for on-link prefixes
@@ -5611,6 +5616,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
array[DEVCONF_ACCEPT_RA_MIN_LFT] = cnf->accept_ra_min_lft;
}
static inline size_t inet6_ifla6_size(void)
@@ -6800,6 +6806,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra_min_lft",
.data = &ipv6_devconf.accept_ra_min_lft,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "accept_ra_pinfo",
.data = &ipv6_devconf.accept_ra_pinfo,

View File

@@ -1270,6 +1270,14 @@ static void ndisc_router_discovery(struct sk_buff *skb)
goto skip_defrtr;
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) {
ND_PRINTK(2, info,
"RA: router lifetime (%ds) is too short: %s\n",
lifetime, skb->dev->name);
goto skip_defrtr;
}
/* Do not accept RA with source-addr found on local machine unless
* accept_ra_from_local is set to true.
*/
@@ -1282,8 +1290,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
goto skip_defrtr;
}
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
#ifdef CONFIG_IPV6_ROUTER_PREF
pref = ra_msg->icmph.icmp6_router_pref;
/* 10b is handled as if it were 00b (medium) */
@@ -1454,6 +1460,9 @@ skip_linkparms:
if (ri->prefix_len == 0 &&
!in6_dev->cnf.accept_ra_defrtr)
continue;
if (ri->lifetime != 0 &&
ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft)
continue;
if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen)
continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)

View File

@@ -3313,7 +3313,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
if (err)
return err;
cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef, 0);
cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef, 0, 0);
return 0;
}
@@ -3583,7 +3583,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
cfg80211_ch_switch_started_notify(sdata->dev,
&sdata->csa_chandef, 0,
params->count, params->block_tx);
params->count, params->block_tx, 0);
if (changed) {
ieee80211_bss_info_change_notify(sdata, changed);

View File

@@ -1240,7 +1240,7 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
return;
}
cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef, 0);
cfg80211_ch_switch_notify(sdata->dev, &sdata->reserved_chandef, 0, 0);
}
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
@@ -1442,7 +1442,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->mtx);
cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, 0,
csa_ie.count, csa_ie.mode);
csa_ie.count, csa_ie.mode, 0);
if (local->ops->channel_switch) {
/* use driver's channel switch callback */

View File

@@ -39,7 +39,7 @@ static int ___cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
wdev->u.ap.ssid_len = 0;
rdev_set_qos_map(rdev, dev, NULL);
if (notify)
nl80211_send_ap_stopped(wdev);
nl80211_send_ap_stopped(wdev, link_id);
/* Should we apply the grace period during beaconing interface
* shutdown also?

View File

@@ -1460,3 +1460,72 @@ struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev,
}
}
EXPORT_SYMBOL(wdev_chandef);
struct cfg80211_per_bw_puncturing_values {
u8 len;
const u16 *valid_values;
};
static const u16 puncturing_values_80mhz[] = {
0x8, 0x4, 0x2, 0x1
};
static const u16 puncturing_values_160mhz[] = {
0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, 0xc0, 0x30, 0xc, 0x3
};
static const u16 puncturing_values_320mhz[] = {
0xc000, 0x3000, 0xc00, 0x300, 0xc0, 0x30, 0xc, 0x3, 0xf000, 0xf00,
0xf0, 0xf, 0xfc00, 0xf300, 0xf0c0, 0xf030, 0xf00c, 0xf003, 0xc00f,
0x300f, 0xc0f, 0x30f, 0xcf, 0x3f
};
#define CFG80211_PER_BW_VALID_PUNCTURING_VALUES(_bw) \
{ \
.len = ARRAY_SIZE(puncturing_values_ ## _bw ## mhz), \
.valid_values = puncturing_values_ ## _bw ## mhz \
}
static const struct cfg80211_per_bw_puncturing_values per_bw_puncturing[] = {
CFG80211_PER_BW_VALID_PUNCTURING_VALUES(80),
CFG80211_PER_BW_VALID_PUNCTURING_VALUES(160),
CFG80211_PER_BW_VALID_PUNCTURING_VALUES(320)
};
bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
const struct cfg80211_chan_def *chandef)
{
u32 idx, i, start_freq;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_80:
idx = 0;
start_freq = chandef->center_freq1 - 40;
break;
case NL80211_CHAN_WIDTH_160:
idx = 1;
start_freq = chandef->center_freq1 - 80;
break;
case NL80211_CHAN_WIDTH_320:
idx = 2;
start_freq = chandef->center_freq1 - 160;
break;
default:
*bitmap = 0;
break;
}
if (!*bitmap)
return true;
/* check if primary channel is punctured */
if (*bitmap & (u16)BIT((chandef->chan->center_freq - start_freq) / 20))
return false;
for (i = 0; i < per_bw_puncturing[idx].len; i++)
if (per_bw_puncturing[idx].valid_values[i] == *bitmap)
return true;
return false;
}
EXPORT_SYMBOL(cfg80211_valid_disable_subchannel_bitmap);

View File

@@ -736,7 +736,10 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_AP_VLAN:
if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)))
if (!ether_addr_equal(mgmt->bssid, wdev_address(wdev)) &&
(params->link_id < 0 ||
!ether_addr_equal(mgmt->bssid,
wdev->links[params->link_id].addr)))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:

View File

@@ -806,6 +806,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
[NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff),
};
/* policy for the key attributes */
@@ -1549,10 +1550,14 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
if (wdev->connected)
return 0;
return -ENOLINK;
case NL80211_IFTYPE_NAN:
if (wiphy_ext_feature_isset(wdev->wiphy,
NL80211_EXT_FEATURE_SECURE_NAN))
return 0;
return -EINVAL;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_NAN:
case NL80211_IFTYPE_P2P_DEVICE:
case NL80211_IFTYPE_WDS:
case NUM_NL80211_IFTYPES:
@@ -3174,6 +3179,21 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
static int nl80211_parse_punct_bitmap(struct cfg80211_registered_device *rdev,
struct genl_info *info,
const struct cfg80211_chan_def *chandef,
u16 *punct_bitmap)
{
if (!wiphy_ext_feature_isset(&rdev->wiphy, NL80211_EXT_FEATURE_PUNCT))
return -EINVAL;
*punct_bitmap = nla_get_u32(info->attrs[NL80211_ATTR_PUNCT_BITMAP]);
if (!cfg80211_valid_disable_subchannel_bitmap(punct_bitmap, chandef))
return -EINVAL;
return 0;
}
int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
struct genl_info *info,
struct cfg80211_chan_def *chandef)
@@ -5919,6 +5939,14 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
err = nl80211_parse_punct_bitmap(rdev, info,
&params->chandef,
&params->punct_bitmap);
if (err)
goto out;
}
if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params->chandef,
wdev->iftype)) {
err = -EINVAL;
@@ -10057,6 +10085,14 @@ skip_beacons:
if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
params.block_tx = true;
if (info->attrs[NL80211_ATTR_PUNCT_BITMAP]) {
err = nl80211_parse_punct_bitmap(rdev, info,
&params.chandef,
&params.punct_bitmap);
if (err)
goto free;
}
wdev_lock(wdev);
err = rdev_channel_switch(rdev, dev, &params);
wdev_unlock(wdev);
@@ -12253,6 +12289,10 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_NAN:
if (!wiphy_ext_feature_isset(wdev->wiphy,
NL80211_EXT_FEATURE_SECURE_NAN))
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP;
}
@@ -12310,6 +12350,10 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
case NL80211_IFTYPE_P2P_GO:
break;
case NL80211_IFTYPE_NAN:
if (!wiphy_ext_feature_isset(wdev->wiphy,
NL80211_EXT_FEATURE_SECURE_NAN))
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP;
}
@@ -12447,6 +12491,10 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
case NL80211_IFTYPE_P2P_DEVICE:
break;
case NL80211_IFTYPE_NAN:
if (!wiphy_ext_feature_isset(wdev->wiphy,
NL80211_EXT_FEATURE_SECURE_NAN))
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP;
}
@@ -17230,7 +17278,7 @@ static struct genl_family nl80211_fam __ro_after_init = {
.name = NL80211_GENL_NAME, /* have users key off the name instead */
.hdrsize = 0, /* no private header */
.version = 1, /* no particular meaning now */
.maxattr = NL80211_ATTR_MAX,
.maxattr = NL80211_ATTR_PUNCT_BITMAP,
.policy = nl80211_policy,
.netnsok = true,
.pre_doit = nl80211_pre_doit,
@@ -18945,7 +18993,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef,
gfp_t gfp,
enum nl80211_commands notif,
u8 count, bool quiet)
u8 count, bool quiet, u16 punct_bitmap)
{
struct wireless_dev *wdev = netdev->ieee80211_ptr;
struct sk_buff *msg;
@@ -18979,6 +19027,9 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
if (nla_put_u32(msg, NL80211_ATTR_PUNCT_BITMAP, punct_bitmap))
goto nla_put_failure;
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -18991,7 +19042,7 @@ static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
void cfg80211_ch_switch_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id)
unsigned int link_id, u16 punct_bitmap)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19000,7 +19051,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
ASSERT_WDEV_LOCK(wdev);
WARN_INVALID_LINK_ID(wdev, link_id);
trace_cfg80211_ch_switch_notify(dev, chandef, link_id);
trace_cfg80211_ch_switch_notify(dev, chandef, link_id, punct_bitmap);
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -19028,14 +19079,15 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
cfg80211_sched_dfs_chan_update(rdev);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_NOTIFY, 0, false);
NL80211_CMD_CH_SWITCH_NOTIFY, 0, false,
punct_bitmap);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
void cfg80211_ch_switch_started_notify(struct net_device *dev,
struct cfg80211_chan_def *chandef,
unsigned int link_id, u8 count,
bool quiet)
bool quiet, u16 punct_bitmap)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -19044,11 +19096,13 @@ void cfg80211_ch_switch_started_notify(struct net_device *dev,
ASSERT_WDEV_LOCK(wdev);
WARN_INVALID_LINK_ID(wdev, link_id);
trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id);
trace_cfg80211_ch_switch_started_notify(dev, chandef, link_id,
punct_bitmap);
nl80211_ch_switch_notify(rdev, dev, link_id, chandef, GFP_KERNEL,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY,
count, quiet);
count, quiet, punct_bitmap);
}
EXPORT_SYMBOL(cfg80211_ch_switch_started_notify);
@@ -19652,7 +19706,7 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
}
EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
void nl80211_send_ap_stopped(struct wireless_dev *wdev)
void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -19670,7 +19724,9 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev)
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD))
NL80211_ATTR_PAD) ||
(wdev->valid_links &&
nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)))
goto out;
genlmsg_end(msg, hdr);

View File

@@ -114,7 +114,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
void nl80211_send_ap_stopped(struct wireless_dev *wdev);
void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id);
void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);

View File

@@ -3245,39 +3245,47 @@ TRACE_EVENT(cfg80211_chandef_dfs_required,
TRACE_EVENT(cfg80211_ch_switch_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
unsigned int link_id),
TP_ARGS(netdev, chandef, link_id),
unsigned int link_id,
u16 punct_bitmap),
TP_ARGS(netdev, chandef, link_id, punct_bitmap),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
__field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
__entry->punct_bitmap = punct_bitmap;
),
TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
__entry->punct_bitmap)
);
TRACE_EVENT(cfg80211_ch_switch_started_notify,
TP_PROTO(struct net_device *netdev,
struct cfg80211_chan_def *chandef,
unsigned int link_id),
TP_ARGS(netdev, chandef, link_id),
unsigned int link_id,
u16 punct_bitmap),
TP_ARGS(netdev, chandef, link_id, punct_bitmap),
TP_STRUCT__entry(
NETDEV_ENTRY
CHAN_DEF_ENTRY
__field(unsigned int, link_id)
__field(u16, punct_bitmap)
),
TP_fast_assign(
NETDEV_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
__entry->link_id = link_id;
__entry->punct_bitmap = punct_bitmap;
),
TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d",
NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id)
TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d, punct_bitmap:%u",
NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id,
__entry->punct_bitmap)
);
TRACE_EVENT(cfg80211_radar_event,