Merge 5.15.135 into android14-5.15-lts

Changes in 5.15.135
	spi: zynqmp-gqspi: Convert to platform remove callback returning void
	spi: zynqmp-gqspi: fix clock imbalance on probe failure
	ASoC: soc-utils: Export snd_soc_dai_is_dummy() symbol
	ASoC: tegra: Fix redundant PLLA and PLLA_OUT0 updates
	NFS: Cleanup unused rpc_clnt variable
	NFS: rename nfs_client_kset to nfs_kset
	NFSv4: Fix a state manager thread deadlock regression
	ring-buffer: remove obsolete comment for free_buffer_page()
	ring-buffer: Fix bytes info in per_cpu buffer stats
	arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path
	iommu/arm-smmu-v3: Set TTL invalidation hint better
	iommu/arm-smmu-v3: Avoid constructing invalid range commands
	rbd: move rbd_dev_refresh() definition
	rbd: decouple header read-in from updating rbd_dev->header
	rbd: decouple parent info read-in from updating rbd_dev
	rbd: take header_rwsem in rbd_dev_refresh() only when updating
	block: fix use-after-free of q->q_usage_counter
	Revert "clk: imx: pll14xx: dynamically configure PLL for 393216000/361267200Hz"
	scsi: zfcp: Fix a double put in zfcp_port_enqueue()
	vringh: don't use vringh_kiov_advance() in vringh_iov_xfer()
	qed/red_ll2: Fix undefined behavior bug in struct qed_ll2_info
	wifi: mwifiex: Fix tlv_buf_left calculation
	net: replace calls to sock->ops->connect() with kernel_connect()
	net: prevent rewrite of msg_name in sock_sendmsg()
	drm/amd: Fix detection of _PR3 on the PCIe root port
	arm64: Add Cortex-A520 CPU part definition
	HID: sony: Fix a potential memory leak in sony_probe()
	ubi: Refuse attaching if mtd's erasesize is 0
	wifi: iwlwifi: dbg_ini: fix structure packing
	iwlwifi: avoid void pointer arithmetic
	wifi: iwlwifi: mvm: Fix a memory corruption issue
	wifi: mwifiex: Fix oob check condition in mwifiex_process_rx_packet
	bpf: Fix tr dereferencing
	drivers/net: process the result of hdlc_open() and add call of hdlc_close() in uhdlc_close()
	wifi: mt76: mt76x02: fix MT76x0 external LNA gain handling
	regmap: rbtree: Fix wrong register marked as in-cache when creating new node
	ima: Finish deprecation of IMA_TRUSTED_KEYRING Kconfig
	scsi: target: core: Fix deadlock due to recursive locking
	ima: rework CONFIG_IMA dependency block
	NFSv4: Fix a nfs4_state_manager() race
	bpf, sockmap: Reject sk_msg egress redirects to non-TCP sockets
	modpost: add missing else to the "of" check
	net: fix possible store tearing in neigh_periodic_work()
	ipv4, ipv6: Fix handling of transhdrlen in __ip{,6}_append_data()
	ptp: ocp: Fix error handling in ptp_ocp_device_init
	net: dsa: mv88e6xxx: Avoid EEPROM timeout when EEPROM is absent
	net: usb: smsc75xx: Fix uninit-value access in __smsc75xx_read_reg
	net: nfc: llcp: Add lock when modifying device list
	net: ethernet: ti: am65-cpsw: Fix error code in am65_cpsw_nuss_init_tx_chns()
	ibmveth: Remove condition to recompute TCP header checksum.
	netfilter: handle the connecting collision properly in nf_conntrack_proto_sctp
	netfilter: nf_tables: nft_set_rbtree: fix spurious insertion failure
	ipv4: Set offload_failed flag in fibmatch results
	net: stmmac: dwmac-stm32: fix resume on STM32 MCU
	tipc: fix a potential deadlock on &tx->lock
	tcp: fix quick-ack counting to count actual ACKs of new data
	tcp: fix delayed ACKs for MSS boundary condition
	sctp: update transport state when processing a dupcook packet
	sctp: update hb timer immediately after users change hb_interval
	HID: sony: remove duplicate NULL check before calling usb_free_urb()
	HID: intel-ish-hid: ipc: Disable and reenable ACPI GPE bit
	dm zoned: free dmz->ddev array in dmz_put_zoned_devices
	RDMA/core: Require admin capabilities to set system parameters
	of: dynamic: Fix potential memory leak in of_changeset_action()
	IB/mlx4: Fix the size of a buffer in add_port_entries()
	gpio: aspeed: fix the GPIO number passed to pinctrl_gpio_set_config()
	gpio: pxa: disable pinctrl calls for MMP_GPIO
	RDMA/cma: Initialize ib_sa_multicast structure to 0 when join
	RDMA/cma: Fix truncation compilation warning in make_cma_ports
	RDMA/uverbs: Fix typo of sizeof argument
	RDMA/siw: Fix connection failure handling
	RDMA/mlx5: Fix NULL string error
	ksmbd: fix uaf in smb20_oplock_break_ack
	parisc: Restore __ldcw_align for PA-RISC 2.0 processors
	xen/events: replace evtchn_rwlock with RCU
	Linux 5.15.135

Change-Id: I2b01cef028c0b40b20701592f09552261e8c2d73
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-10-12 10:18:28 +00:00
88 changed files with 692 additions and 515 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 134
SUBLEVEL = 135
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -864,7 +864,11 @@ static inline bool cpu_has_hw_af(void)
if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
return false;
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
/*
* Use cached version to avoid emulated msr operation on KVM
* guests.
*/
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
return cpuid_feature_extract_unsigned_field(mmfr1,
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
}

View File

@@ -79,6 +79,7 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
@@ -134,6 +135,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)

View File

@@ -2,14 +2,28 @@
#ifndef __PARISC_LDCW_H
#define __PARISC_LDCW_H
#ifndef CONFIG_PA20
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
and GCC only guarantees 8-byte alignment for stack locals, we can't
be assured of 16-byte alignment for atomic lock data even if we
specify "__attribute ((aligned(16)))" in the type declaration. So,
we use a struct containing an array of four ints for the atomic lock
type and dynamically select the 16-byte aligned int from the array
for the semaphore. */
for the semaphore. */
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is implemented, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd).
Although the cache control hint is accepted by all PA 2.0 processors,
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
require 16-byte alignment. If the address is unaligned, the operation
of the instruction is undefined. The ldcw instruction does not generate
unaligned data reference traps so misaligned accesses are not detected.
This hid the problem for years. So, restore the 16-byte alignment dropped
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
#define __PA_LDCW_ALIGNMENT 16
#define __PA_LDCW_ALIGN_ORDER 4
@@ -19,22 +33,12 @@
& ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
#define __LDCW "ldcw"
#else /*CONFIG_PA20*/
/* From: "Jim Hull" <jim.hull of hp.com>
I've attached a summary of the change, but basically, for PA 2.0, as
long as the ",CO" (coherent operation) completer is specified, then the
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
they only require "natural" alignment (4-byte for ldcw, 8-byte for
ldcd). */
#define __PA_LDCW_ALIGNMENT 4
#define __PA_LDCW_ALIGN_ORDER 2
#define __ldcw_align(a) (&(a)->slock)
#ifdef CONFIG_PA20
#define __LDCW "ldcw,co"
#endif /*!CONFIG_PA20*/
#else
#define __LDCW "ldcw"
#endif
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
We don't explicitly expose that "*a" may be written as reload

View File

@@ -3,13 +3,8 @@
#define __ASM_SPINLOCK_TYPES_H
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
} arch_spinlock_t;

View File

@@ -407,8 +407,6 @@ void blk_cleanup_queue(struct request_queue *q)
blk_mq_sched_free_requests(q);
mutex_unlock(&q->sysfs_lock);
percpu_ref_exit(&q->q_usage_counter);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}

View File

@@ -748,6 +748,8 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{
struct request_queue *q = container_of(rcu_head, struct request_queue,
rcu_head);
percpu_ref_exit(&q->q_usage_counter);
kmem_cache_free(blk_requestq_cachep, q);
}

View File

@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
reg - rbnode->base_reg, value);
(reg - rbnode->base_reg) / map->reg_stride,
value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}

View File

@@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -996,15 +995,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
static void rbd_image_header_cleanup(struct rbd_image_header *header)
{
kfree(header->object_prefix);
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
memset(header, 0, sizeof(*header));
}
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
struct rbd_image_header_ondisk *ondisk)
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
bool first_time)
{
struct rbd_image_header *header = &rbd_dev->header;
bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
@@ -1071,11 +1079,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
rbd_init_layout(rbd_dev);
} else {
ceph_put_snap_context(header->snapc);
kfree(header->snap_names);
kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
@@ -4861,7 +4864,9 @@ out_req:
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@@ -4909,7 +4914,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
ret = rbd_header_from_disk(rbd_dev, ondisk);
ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
@@ -4933,39 +4938,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
u64 mapping_size;
int ret;
down_write(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
ret = rbd_dev_header_info(rbd_dev);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev);
if (ret)
goto out;
}
rbd_assert(!rbd_is_snap(rbd_dev));
rbd_dev->mapping.size = rbd_dev->header.image_size;
out:
up_write(&rbd_dev->header_rwsem);
if (!ret && mapping_size != rbd_dev->mapping.size)
rbd_dev_update_size(rbd_dev);
return ret;
}
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
};
@@ -5508,17 +5480,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
&rbd_dev->header.obj_order,
&rbd_dev->header.image_size);
}
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
char **pobject_prefix)
{
size_t size;
void *reply_buf;
char *object_prefix;
int ret;
void *p;
@@ -5536,16 +5503,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
goto out;
p = reply_buf;
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
p + ret, NULL, GFP_NOIO);
object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
GFP_NOIO);
if (IS_ERR(object_prefix)) {
ret = PTR_ERR(object_prefix);
goto out;
}
ret = 0;
if (IS_ERR(rbd_dev->header.object_prefix)) {
ret = PTR_ERR(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
} else {
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
}
*pobject_prefix = object_prefix;
dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
@@ -5596,13 +5563,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
{
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
rbd_is_ro(rbd_dev),
&rbd_dev->header.features);
}
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
@@ -5639,6 +5599,14 @@ struct parent_image_info {
u64 overlap;
};
static void rbd_parent_info_cleanup(struct parent_image_info *pii)
{
kfree(pii->pool_ns);
kfree(pii->image_id);
memset(pii, 0, sizeof(*pii));
}
/*
* The caller is responsible for @pii.
*/
@@ -5708,6 +5676,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
pii->has_overlap, pii->overlap);
return 0;
e_inval:
@@ -5746,14 +5717,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
static int get_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
@@ -5781,7 +5755,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
return ret;
}
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
@@ -5791,37 +5765,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (!parent_spec)
return -ENOMEM;
ret = get_parent_info(rbd_dev, &pii);
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
__func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
pii.has_overlap, pii.overlap);
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
}
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
}
/* The ceph file layout needs to fit pool id in 32 bits */
@@ -5833,58 +5782,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
/*
* The parent won't change (except when the clone is
* flattened, already handled that). So we only need to
* record the parent spec we have not already done so.
* The parent won't change except when the clone is flattened,
* so we only need to record the parent image spec once.
*/
if (!rbd_dev->parent_spec) {
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
parent_spec->pool_id = pii.pool_id;
if (pii.pool_ns && *pii.pool_ns) {
parent_spec->pool_ns = pii.pool_ns;
pii.pool_ns = NULL;
}
parent_spec->image_id = pii.image_id;
pii.image_id = NULL;
parent_spec->snap_id = pii.snap_id;
rbd_assert(!rbd_dev->parent_spec);
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
/*
* We always update the parent overlap. If it's zero we issue
* a warning, as we will proceed as if there was no parent.
* Record the parent overlap. If it's zero, issue a warning as
* we will proceed as if there is no parent.
*/
if (!pii.overlap) {
if (parent_spec) {
/* refresh, careful to warn just once */
if (rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone now standalone (overlap became 0)");
} else {
/* initial probe */
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
}
}
if (!pii.overlap)
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
kfree(pii.pool_ns);
kfree(pii.image_id);
rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
@@ -5896,27 +5833,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
if (ret < size)
return -ERANGE;
p = &striping_info_buf;
rbd_dev->header.stripe_unit = ceph_decode_64(&p);
rbd_dev->header.stripe_count = ceph_decode_64(&p);
*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
*stripe_count);
return 0;
}
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
__le64 data_pool_id;
__le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
NULL, 0, &data_pool_id, sizeof(data_pool_id));
NULL, 0, &data_pool_buf,
sizeof(data_pool_buf));
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
if (ret < sizeof(data_pool_id))
if (ret < sizeof(data_pool_buf))
return -EBADMSG;
rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
*data_pool_id = le64_to_cpu(data_pool_buf);
dout(" data_pool_id = %lld\n", *data_pool_id);
WARN_ON(*data_pool_id == CEPH_NOPOOL);
return 0;
}
@@ -6108,7 +6051,8 @@ out_err:
return ret;
}
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
@@ -6169,9 +6113,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = snapc;
*psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
@@ -6220,38 +6162,42 @@ out:
return snap_name;
}
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
ret = rbd_dev_v2_image_size(rbd_dev);
ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
first_time ? &header->obj_order : NULL,
&header->image_size);
if (ret)
return ret;
if (first_time) {
ret = rbd_dev_v2_header_onetime(rbd_dev);
ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
ret = rbd_dev_v2_snap_context(rbd_dev);
if (ret && first_time) {
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
}
ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
if (ret)
return ret;
return ret;
return 0;
}
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
static int rbd_dev_header_info(struct rbd_device *rbd_dev,
struct rbd_image_header *header,
bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
return rbd_dev_v1_header_info(rbd_dev);
return rbd_dev_v1_header_info(rbd_dev, header, first_time);
return rbd_dev_v2_header_info(rbd_dev);
return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
@@ -6738,60 +6684,49 @@ out:
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
header = &rbd_dev->header;
ceph_put_snap_context(header->snapc);
kfree(header->snap_sizes);
kfree(header->snap_names);
kfree(header->object_prefix);
memset(header, 0, sizeof (*header));
rbd_image_header_cleanup(&rbd_dev->header);
}
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
int ret;
ret = rbd_dev_v2_object_prefix(rbd_dev);
ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
goto out_err;
return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
ret = rbd_dev_v2_features(rbd_dev);
ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
rbd_is_ro(rbd_dev), &header->features);
if (ret)
goto out_err;
return ret;
/* If the image supports fancy striping, get its parameters */
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev);
if (ret < 0)
goto out_err;
}
if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev);
if (header->features & RBD_FEATURE_STRIPINGV2) {
ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
&header->stripe_count);
if (ret)
goto out_err;
return ret;
}
rbd_init_layout(rbd_dev);
return 0;
if (header->features & RBD_FEATURE_DATA_POOL) {
ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
return ret;
}
out_err:
rbd_dev->header.features = 0;
kfree(rbd_dev->header.object_prefix);
rbd_dev->header.object_prefix = NULL;
return ret;
return 0;
}
/*
@@ -6986,13 +6921,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (!depth)
down_write(&rbd_dev->header_rwsem);
ret = rbd_dev_header_info(rbd_dev);
ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_probe;
}
rbd_init_layout(rbd_dev);
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
@@ -7021,7 +6958,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
@@ -7047,6 +6984,107 @@ err_out_format:
return ret;
}
static void rbd_dev_update_header(struct rbd_device *rbd_dev,
struct rbd_image_header *header)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
if (rbd_dev->header.image_size != header->image_size) {
rbd_dev->header.image_size = header->image_size;
if (!rbd_is_snap(rbd_dev)) {
rbd_dev->mapping.size = header->image_size;
rbd_dev_update_size(rbd_dev);
}
}
ceph_put_snap_context(rbd_dev->header.snapc);
rbd_dev->header.snapc = header->snapc;
header->snapc = NULL;
if (rbd_dev->image_format == 1) {
kfree(rbd_dev->header.snap_names);
rbd_dev->header.snap_names = header->snap_names;
header->snap_names = NULL;
kfree(rbd_dev->header.snap_sizes);
rbd_dev->header.snap_sizes = header->snap_sizes;
header->snap_sizes = NULL;
}
}
static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
struct parent_image_info *pii)
{
if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
/*
* Either the parent never existed, or we have
* record of it but the image got flattened so it no
* longer has a parent. When the parent of a
* layered image disappears we immediately set the
* overlap to 0. The effect of this is that all new
* requests will be treated as if the image had no
* parent.
*
* If !pii.has_overlap, the parent image spec is not
* applicable. It's there to avoid duplication in each
* snapshot record.
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone has been flattened\n",
rbd_dev->disk->disk_name);
}
} else {
rbd_assert(rbd_dev->parent_spec);
/*
* Update the parent overlap. If it became zero, issue
* a warning as we will proceed as if there is no parent.
*/
if (!pii->overlap && rbd_dev->parent_overlap)
rbd_warn(rbd_dev,
"clone has become standalone (overlap 0)");
rbd_dev->parent_overlap = pii->overlap;
}
}
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
{
struct rbd_image_header header = { 0 };
struct parent_image_info pii = { 0 };
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
ret = rbd_dev_header_info(rbd_dev, &header, false);
if (ret)
goto out;
/*
* If there is a parent, see if it has disappeared due to the
* mapped image getting flattened.
*/
if (rbd_dev->parent) {
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out;
}
down_write(&rbd_dev->header_rwsem);
rbd_dev_update_header(rbd_dev, &header);
if (rbd_dev->parent)
rbd_dev_update_parent(rbd_dev, &pii);
up_write(&rbd_dev->header_rwsem);
out:
rbd_parent_info_cleanup(&pii);
rbd_image_header_cleanup(&header);
return ret;
}
static ssize_t do_rbd_add(struct bus_type *bus,
const char *buf,
size_t count)

View File

@@ -60,6 +60,8 @@ static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = {
PLL_1443X_RATE(650000000U, 325, 3, 2, 0),
PLL_1443X_RATE(594000000U, 198, 2, 2, 0),
PLL_1443X_RATE(519750000U, 173, 2, 2, 16384),
PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
};
struct imx_pll14xx_clk imx_1443x_pll = {

View File

@@ -963,7 +963,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
else if (param == PIN_CONFIG_BIAS_DISABLE ||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
param == PIN_CONFIG_DRIVE_STRENGTH)
return pinctrl_gpio_set_config(offset, config);
return pinctrl_gpio_set_config(chip->base + offset, config);
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */

View File

@@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
case MMP_GPIO:
return false;
default:

View File

@@ -2225,7 +2225,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->flags |= AMD_IS_PX;
if (!(adev->flags & AMD_IS_APU)) {
parent = pci_upstream_bridge(adev->pdev);
parent = pcie_find_root_port(adev->pdev);
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
}

View File

@@ -3074,6 +3074,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
err:
usb_free_urb(sc->ghl_urb);
hid_hw_stop(hdev);
return ret;
}

View File

@@ -129,6 +129,14 @@ static int enable_gpe(struct device *dev)
}
wakeup = &adev->wakeup;
/*
* Call acpi_disable_gpe(), so that reference count
* gpe_event_info->runtime_count doesn't overflow.
* When gpe_event_info->runtime_count = 0, the call
* to acpi_disable_gpe() simply return.
*/
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
if (ACPI_FAILURE(acpi_sts)) {
dev_err(dev, "enable ose_gpe failed\n");

View File

@@ -4751,7 +4751,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
struct ib_sa_multicast ib;
struct ib_sa_multicast ib = {};
enum ib_gid_type gid_type;
bool send_only;

View File

@@ -218,7 +218,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
return -ENOMEM;
for (i = 0; i < ports_num; i++) {
char port_str[10];
char port_str[11];
ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1);

View File

@@ -2316,6 +2316,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
},
[RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit,
.flags = RDMA_NL_ADMIN_PERM,
},
[RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit,

View File

@@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count)
return -EINVAL;
if (count < method_elm->req_size + sizeof(hdr)) {
if (count < method_elm->req_size + sizeof(*hdr)) {
/*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't

View File

@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{
int i;
char buff[11];
char buff[12];
struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ;
struct ib_port_attr attr;

View File

@@ -2095,7 +2095,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory";
default:
return NULL;
return "Unknown";
}
}

View File

@@ -973,6 +973,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv) {
siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep);
goto error;
}
@@ -1097,9 +1098,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/*
* Socket close before MPA request received.
*/
siw_dbg_cep(cep, "no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
if (cep->listen_cep) {
siw_dbg_cep(cep,
"no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
}
}
}
release_cep = 1;
@@ -1222,7 +1226,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep)
goto out;
siw_dbg_cep(cep, "state: %d\n", cep->state);
siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
cep->state, sk->sk_state);
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE:

View File

@@ -1881,13 +1881,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
num_pages = size >> tg;
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd->tlbi.tg = (tg - 10) / 2;
/* Determine what level the granule is at */
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg;
/*
* Determine what level the granule is at. For non-leaf, both
* io-pgtable and SVA pass a nominal last-level granule because
* they don't know what level(s) actually apply, so ignore that
* and leave TTL=0. However for various errata reasons we still
* want to use a range command, so avoid the SVA corner case
* where both scale and num could be 0 as well.
*/
if (cmd->tlbi.leaf)
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
num_pages++;
}
cmds.num = 0;

View File

@@ -750,17 +750,16 @@ err:
/*
* Cleanup zoned device information.
*/
static void dmz_put_zoned_device(struct dm_target *ti)
static void dmz_put_zoned_devices(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
int i;
for (i = 0; i < dmz->nr_ddevs; i++) {
if (dmz->ddev[i]) {
for (i = 0; i < dmz->nr_ddevs; i++)
if (dmz->ddev[i])
dm_put_device(ti, dmz->ddev[i]);
dmz->ddev[i] = NULL;
}
}
kfree(dmz->ddev);
}
static int dmz_fixup_devices(struct dm_target *ti)
@@ -959,7 +958,7 @@ err_bio:
err_meta:
dmz_dtr_metadata(dmz->metadata);
err_dev:
dmz_put_zoned_device(ti);
dmz_put_zoned_devices(ti);
err:
kfree(dmz->dev);
kfree(dmz);
@@ -990,7 +989,7 @@ static void dmz_dtr(struct dm_target *ti)
bioset_exit(&dmz->bio_set);
dmz_put_zoned_device(ti);
dmz_put_zoned_devices(ti);
mutex_destroy(&dmz->chunk_lock);

View File

@@ -888,6 +888,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
/* UBI cannot work on flashes with zero erasesize. */
if (!mtd->erasesize) {
pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
mtd->index);
return -EINVAL;
}
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)

View File

@@ -2594,14 +2594,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
mv88e6xxx_g1_wait_eeprom_done(chip);
if (chip->info->ops->get_eeprom)
mv88e6xxx_g2_eeprom_wait(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
mv88e6xxx_g1_wait_eeprom_done(chip);
if (chip->info->ops->get_eeprom)
mv88e6xxx_g2_eeprom_wait(chip);
}
}

View File

@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
{
const unsigned long timeout = jiffies + 1 * HZ;
u16 val;
int err;
/* Wait up to 1 second for the switch to finish reading the
* EEPROM.
*/
while (time_before(jiffies, timeout)) {
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
if (err) {
dev_err(chip->dev, "Error reading status");
return;
}
/* If the switch is still resetting, it may not
* respond on the bus, and so MDIO read returns
* 0xffff. Differentiate between that, and waiting for
* the EEPROM to be done by bit 0 being set.
*/
if (val != 0xffff &&
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
return;
usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout waiting for EEPROM done");
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5

View File

@@ -280,7 +280,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);

View File

@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;

View File

@@ -357,6 +357,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;

View File

@@ -1297,24 +1297,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
* the user space for finding a flow. During this process, OVS computes
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
*
* So, re-compute TCP pseudo header checksum when configured for
* trunk mode.
* So, re-compute TCP pseudo header checksum.
*/
if (iph_proto == IPPROTO_TCP) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
if (tcph->check == 0x0000) {
/* Recompute TCP pseudo header checksum */
if (adapter->is_active_trunk) {
tcphdrlen = skb->len - iphlen;
if (skb_proto == ETH_P_IP)
tcph->check =
~csum_tcpudp_magic(iph->saddr,
iph->daddr, tcphdrlen, iph_proto, 0);
else if (skb_proto == ETH_P_IPV6)
tcph->check =
~csum_ipv6_magic(&iph6->saddr,
&iph6->daddr, tcphdrlen, iph_proto, 0);
}
tcphdrlen = skb->len - iphlen;
if (skb_proto == ETH_P_IP)
tcph->check =
~csum_tcpudp_magic(iph->saddr,
iph->daddr, tcphdrlen, iph_proto, 0);
else if (skb_proto == ETH_P_IPV6)
tcph->check =
~csum_ipv6_magic(&iph6->saddr,
&iph6->daddr, tcphdrlen, iph_proto, 0);
/* Setup SKB fields for checksum offload */
skb_partial_csum_set(skb, iphlen,
offsetof(struct tcphdr, check));

View File

@@ -111,9 +111,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
};
extern const struct qed_ll2_ops qed_ll2_ops_pass;

View File

@@ -105,6 +105,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@@ -122,7 +123,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
if (!dwmac->dev->power.is_suspended) {
if (!dwmac->ops->clk_rx_enable_in_suspend ||
!dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@@ -515,7 +517,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {

View File

@@ -1619,6 +1619,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
if (tx_chn->irq <= 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
tx_chn->irq);
ret = tx_chn->irq ?: -ENXIO;
goto err;
}

View File

@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
if (unlikely(ret < 0)) {
if (unlikely(ret < 4)) {
ret = ret < 0 ? ret : -ENODATA;
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;

View File

@@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
static int uhdlc_close(struct net_device *dev);
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@@ -705,6 +707,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@@ -728,10 +731,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
rc = hdlc_open(dev);
if (rc)
uhdlc_close(dev);
}
return 0;
return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@@ -821,6 +827,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
hdlc_close(dev);
return 0;
}

View File

@@ -1960,7 +1960,7 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_fw_ini_error_dump_header *header;
u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id);
u32 num_of_ranges, i, size;
void *range;
u8 *range;
/*
* The higher part of the ID in version 2 is irrelevant for

View File

@@ -277,9 +277,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
__le32 internal_base_addr;
__le64 dram_base_addr;
__le32 page_num;
__le32 internal_base_addr __packed;
__le64 dram_base_addr __packed;
__le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};

View File

@@ -1090,7 +1090,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
/* skip the sta_id at the beginning */
_kek_kck_cmd = (void *)
((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id));
}
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",

View File

@@ -1950,7 +1950,7 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
goto out;
}
ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len);
*ppos += ret;
out:

View File

@@ -686,7 +686,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
(void *)mvm->nvm_data->channels + 1;
(void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}

View File

@@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen = len - hdrlen;
if (fraglen) {
int offset = (void *)hdr + hdrlen -
rxb_addr(rxb) + rxb_offset(rxb);
int offset = (u8 *)hdr + hdrlen -
(u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);

View File

@@ -223,8 +223,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
fraglen = len - headlen;
if (fraglen) {
int offset = (void *)hdr + headlen + pad_len -
rxb_addr(rxb) + rxb_offset(rxb);
int offset = (u8 *)hdr + headlen + pad_len -
(u8 *)rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
fraglen, rxb->truesize);

View File

@@ -2150,7 +2150,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
struct iwl_scan_umac_chan_param *chan_param;
void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
struct iwl_scan_req_umac_tail_v2 *tail_v2 =
(struct iwl_scan_req_umac_tail_v2 *)sec_part;

View File

@@ -363,7 +363,7 @@ struct iwl_trans_pcie {
/* PCI bus related data */
struct pci_dev *pci_dev;
void __iomem *hw_base;
u8 __iomem *hw_base;
bool ucode_write_complete;
bool sx_complete;

View File

@@ -727,7 +727,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
goto err;
}
rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
rxq->rb_stts_dma =
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;

View File

@@ -2798,7 +2798,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;

View File

@@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
void *tfd;
u32 num_tbs;
tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
memset(tfd, 0, trans->txqs.tfd.size);

View File

@@ -189,7 +189,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
return NULL;
/* set the chaining pointer to the previous page if there */
*(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
*(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
*page_ptr = ret;
return ret;
@@ -314,7 +314,7 @@ alloc:
return NULL;
p->pos = page_address(p->page);
/* set the chaining pointer to NULL */
*(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
*(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
out:
*page_ptr = p->page;
get_page(p->page);
@@ -963,7 +963,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
while (next) {
struct page *tmp = next;
next = *(void **)(page_address(next) + PAGE_SIZE -
next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
sizeof(void *));
__free_page(tmp);
}

View File

@@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
if (trans->trans_cfg->use_tfh)
idx = iwl_txq_get_cmd_index(txq, idx);
return txq->tfds + trans->txqs.tfd.size * idx;
return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
}
int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,

View File

@@ -977,8 +977,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}

View File

@@ -98,7 +98,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@@ -107,12 +108,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type

View File

@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
u16 val;
u8 lna;
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
*lna_2g = 0;
if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
memset(lna_5g, 0, sizeof(s8) * 3);
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)

View File

@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
bool use_lna;
u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (chan->band == NL80211_BAND_2GHZ)
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
else
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
if (use_lna)
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);

View File

@@ -901,13 +901,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
{
struct of_changeset_entry *ce;
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
if (!ce)
return -ENOMEM;
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
return -EINVAL;
/* get a reference to the node */
ce->action = action;
ce->np = of_node_get(np);

View File

@@ -1292,7 +1292,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
return 0;
out:
ptp_ocp_dev_release(&bp->dev);
put_device(&bp->dev);
return err;
}

View File

@@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (port) {
put_device(&port->dev);
retval = -EEXIST;
goto err_out;
goto err_put;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
goto err_out;
goto err_put;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
@@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
goto err_put;
}
retval = -EINVAL;
@@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
err_out:
err_put:
zfcp_ccw_adapter_put(adapter);
err_out:
return ERR_PTR(retval);
}

View File

@@ -1199,9 +1199,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
return 0;
clk_dis_all:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_dis_pclk:
clk_disable_unprepare(xqspi->pclk);
@@ -1221,17 +1221,19 @@ remove_master:
*
* Return: 0 Always
*/
static int zynqmp_qspi_remove(struct platform_device *pdev)
static void zynqmp_qspi_remove(struct platform_device *pdev)
{
struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(&pdev->dev);
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
clk_disable_unprepare(xqspi->refclk);
clk_disable_unprepare(xqspi->pclk);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id zynqmp_qspi_of_match[] = {
@@ -1243,7 +1245,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
static struct platform_driver zynqmp_qspi_driver = {
.probe = zynqmp_qspi_probe,
.remove = zynqmp_qspi_remove,
.remove_new = zynqmp_qspi_remove,
.driver = {
.name = "zynqmp-qspi",
.of_match_table = zynqmp_qspi_of_match,

View File

@@ -875,7 +875,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@@ -885,11 +884,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
struct config_item *item;
int ret;
config_item_put(iter->prev_item);
iter->prev_item = NULL;
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@@ -899,12 +896,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!iter->prev_item)
item = config_item_get_unless_zero(&dev->dev_group.cg_item);
if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@@ -927,7 +925,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
config_item_put(iter.prev_item);
return ret;
}

View File

@@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
done += partlen;
len -= partlen;
ptr += partlen;
iov->consumed += partlen;
iov->iov[iov->i].iov_len -= partlen;
iov->iov[iov->i].iov_base += partlen;
vringh_kiov_advance(iov, partlen);
if (!iov->iov[iov->i].iov_len) {
/* Fix up old iov element then increment. */
iov->iov[iov->i].iov_len = iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
iov->consumed = 0;
iov->i++;
}
}
return done;
}

View File

@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/irqnr.h>
#include <linux/pci.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/cpuhotplug.h>
#include <linux/atomic.h>
@@ -95,6 +96,7 @@ enum xen_irq_type {
struct irq_info {
struct list_head list;
struct list_head eoi_list;
struct rcu_work rwork;
short refcnt;
u8 spurious_cnt;
u8 is_accounted;
@@ -144,23 +146,13 @@ const struct evtchn_ops *evtchn_ops;
*/
static DEFINE_MUTEX(irq_mapping_update_lock);
/*
* Lock protecting event handling loop against removing event channels.
* Adding of event channels is no issue as the associated IRQ becomes active
* only after everything is setup (before request_[threaded_]irq() the handler
* can't be entered for an event, as the event channel will be unmasked only
* then).
*/
static DEFINE_RWLOCK(evtchn_rwlock);
/*
* Lock hierarchy:
*
* irq_mapping_update_lock
* evtchn_rwlock
* IRQ-desc lock
* percpu eoi_list_lock
* irq_info->lock
* IRQ-desc lock
* percpu eoi_list_lock
* irq_info->lock
*/
static LIST_HEAD(xen_irq_list_head);
@@ -304,6 +296,22 @@ static void channels_on_cpu_inc(struct irq_info *info)
info->is_accounted = 1;
}
static void delayed_free_irq(struct work_struct *work)
{
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
rwork);
unsigned int irq = info->irq;
/* Remove the info pointer only now, with no potential users left. */
set_info_for_irq(irq, NULL);
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
if (irq >= nr_legacy_irqs())
irq_free_desc(irq);
}
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@@ -666,33 +674,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
read_lock_irqsave(&evtchn_rwlock, flags);
rcu_read_lock();
while (true) {
spin_lock(&eoi->eoi_list_lock);
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
eoi_list);
if (info == NULL || now < info->eoi_time) {
spin_unlock(&eoi->eoi_list_lock);
if (info == NULL)
break;
if (now < info->eoi_time) {
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed,
info->eoi_time - now);
break;
}
list_del_init(&info->eoi_list);
spin_unlock(&eoi->eoi_list_lock);
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
info->eoi_time = 0;
xen_irq_lateeoi_locked(info, false);
}
if (info)
mod_delayed_work_on(info->eoi_cpu, system_wq,
&eoi->delayed, info->eoi_time - now);
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
read_unlock_irqrestore(&evtchn_rwlock, flags);
rcu_read_unlock();
}
static void xen_cpu_init_eoi(unsigned int cpu)
@@ -707,16 +718,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
{
struct irq_info *info;
unsigned long flags;
read_lock_irqsave(&evtchn_rwlock, flags);
rcu_read_lock();
info = info_for_irq(irq);
if (info)
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
read_unlock_irqrestore(&evtchn_rwlock, flags);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
@@ -730,6 +740,7 @@ static void xen_irq_init(unsigned irq)
info->type = IRQT_UNBOUND;
info->refcnt = -1;
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
set_info_for_irq(irq, info);
/*
@@ -787,31 +798,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
static void xen_free_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
unsigned long flags;
if (WARN_ON(!info))
return;
write_lock_irqsave(&evtchn_rwlock, flags);
if (!list_empty(&info->eoi_list))
lateeoi_list_del(info);
list_del(&info->list);
set_info_for_irq(irq, NULL);
WARN_ON(info->refcnt > 0);
write_unlock_irqrestore(&evtchn_rwlock, flags);
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
if (irq < nr_legacy_irqs())
return;
irq_free_desc(irq);
queue_rcu_work(system_wq, &info->rwork);
}
static void xen_evtchn_close(evtchn_port_t port)
@@ -1715,7 +1713,14 @@ static void __xen_evtchn_do_upcall(void)
int cpu = smp_processor_id();
struct evtchn_loop_ctrl ctrl = { 0 };
read_lock(&evtchn_rwlock);
/*
* When closing an event channel the associated IRQ must not be freed
* until all cpus have left the event handling loop. This is ensured
* by taking the rcu_read_lock() while handling events, as freeing of
* the IRQ is handled via queue_rcu_work() _after_ closing the event
* channel.
*/
rcu_read_lock();
do {
vcpu_info->evtchn_upcall_pending = 0;
@@ -1728,7 +1733,7 @@ static void __xen_evtchn_do_upcall(void)
} while (vcpu_info->evtchn_upcall_pending);
read_unlock(&evtchn_rwlock);
rcu_read_unlock();
/*
* Increment irq_epoch only now to defer EOIs only for

View File

@@ -8058,10 +8058,10 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
goto err_out;
}
opinfo_put(opinfo);
ksmbd_fd_put(work, fp);
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
opinfo_put(opinfo);
ksmbd_fd_put(work, fp);
rsp->StructureSize = cpu_to_le16(24);
rsp->OplockLevel = rsp_oplevel;

View File

@@ -10557,7 +10557,9 @@ static void nfs4_disable_swap(struct inode *inode)
*/
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
nfs4_schedule_state_manager(clp);
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
wake_up_var(&clp->cl_state);
}
static const struct inode_operations nfs4_dir_inode_operations = {

View File

@@ -1213,17 +1213,23 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
{
struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
struct rpc_clnt *cl = clp->cl_rpcclient;
while (cl != cl->cl_parent)
cl = cl->cl_parent;
struct rpc_clnt *clnt = clp->cl_rpcclient;
bool swapon = false;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
wake_up_var(&clp->cl_state);
return;
if (atomic_read(&clnt->cl_swapper)) {
swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
&clp->cl_state);
if (!swapon) {
wake_up_var(&clp->cl_state);
return;
}
}
set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
return;
__module_get(THIS_MODULE);
refcount_inc(&clp->cl_count);
@@ -1240,8 +1246,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
__func__, PTR_ERR(task));
if (!nfs_client_init_is_complete(clp))
nfs_mark_client_ready(clp, PTR_ERR(task));
if (swapon)
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs4_clear_state_manager_bit(clp);
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs_put_client(clp);
module_put(THIS_MODULE);
}
@@ -2692,6 +2699,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
!test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
&clp->cl_state)) {
memflags = memalloc_nofs_save();
continue;
}
if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
@@ -2730,22 +2744,25 @@ static int nfs4_run_state_manager(void *ptr)
allow_signal(SIGKILL);
again:
set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
nfs4_state_manager(clp);
if (atomic_read(&cl->cl_swapper)) {
if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
!test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
wait_var_event_interruptible(&clp->cl_state,
test_bit(NFS4CLNT_RUN_MANAGER,
&clp->cl_state));
if (atomic_read(&cl->cl_swapper) &&
test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
if (!atomic_read(&cl->cl_swapper))
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
!test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
goto again;
/* Either no longer a swapper, or were signalled */
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
}
clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
!test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state))
!test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
goto again;
nfs_put_client(clp);

View File

@@ -18,7 +18,7 @@
#include "sysfs.h"
struct kobject *nfs_client_kobj;
static struct kset *nfs_client_kset;
static struct kset *nfs_kset;
static void nfs_netns_object_release(struct kobject *kobj)
{
@@ -55,13 +55,13 @@ static struct kobject *nfs_netns_object_alloc(const char *name,
int nfs_sysfs_init(void)
{
nfs_client_kset = kset_create_and_add("nfs", NULL, fs_kobj);
if (!nfs_client_kset)
nfs_kset = kset_create_and_add("nfs", NULL, fs_kobj);
if (!nfs_kset)
return -ENOMEM;
nfs_client_kobj = nfs_netns_object_alloc("net", nfs_client_kset, NULL);
nfs_client_kobj = nfs_netns_object_alloc("net", nfs_kset, NULL);
if (!nfs_client_kobj) {
kset_unregister(nfs_client_kset);
nfs_client_kset = NULL;
kset_unregister(nfs_kset);
nfs_kset = NULL;
return -ENOMEM;
}
return 0;
@@ -70,7 +70,7 @@ int nfs_sysfs_init(void)
void nfs_sysfs_exit(void)
{
kobject_put(nfs_client_kobj);
kset_unregister(nfs_client_kset);
kset_unregister(nfs_kset);
}
static ssize_t nfs_netns_identifier_show(struct kobject *kobj,
@@ -158,7 +158,7 @@ static struct nfs_netns_client *nfs_netns_client_alloc(struct kobject *parent,
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
p->net = net;
p->kobject.kset = nfs_client_kset;
p->kobject.kset = nfs_kset;
if (kobject_init_and_add(&p->kobject, &nfs_netns_client_type,
parent, "nfs_client") == 0)
return p;

View File

@@ -849,7 +849,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
return ERR_PTR(-EOPNOTSUPP);
return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)

View File

@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};

View File

@@ -340,12 +340,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
/* How many ACKs S/ACKing new data have we sent? */
const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */

View File

@@ -320,10 +320,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
local_set(&bpage->commit, 0);
}
/*
* Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
* this issue out.
*/
static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
{
return local_read(&bpage->page->commit);
}
static void free_buffer_page(struct buffer_page *bpage)
{
free_page((unsigned long)bpage->page);
@@ -1985,7 +1986,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
* Increment overrun to account for the lost events.
*/
local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
}
@@ -2331,11 +2332,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read);
}
static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
{
return local_read(&bpage->page->commit);
}
static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter)
{
@@ -2481,7 +2477,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* the counters.
*/
local_add(entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
/*
@@ -2624,9 +2620,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail);
/* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
@@ -2640,7 +2633,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
* write counter enough to allow another writer to slip
* in on this page.
* We put in a discarded commit instead, to make sure
* that this space is not used again.
* that this space is not used again, and this space will
* not be accounted into 'entries_bytes'.
*
* If we are less than the minimum size, we don't need to
* worry about it.
@@ -2665,6 +2659,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* time delta must be non zero */
event->time_delta = 1;
/* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
/* Make sure the padding is visible before the tail_page->write update */
smp_wmb();
@@ -4189,7 +4186,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
/**
* ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
* ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
@@ -4820,6 +4817,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
length = rb_event_length(event);
cpu_buffer->reader_page->read += length;
cpu_buffer->read_bytes += length;
}
static void rb_advance_iter(struct ring_buffer_iter *iter)
@@ -5984,7 +5982,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
} else {
/* update the entry counter */
cpu_buffer->read += rb_page_entries(reader);
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
cpu_buffer->read_bytes += rb_page_commit(reader);
/* swap the pages */
rb_init_page(bpage);

View File

@@ -930,7 +930,9 @@ static void neigh_periodic_work(struct work_struct *work)
(state == NUD_FAILED ||
!time_in_range_open(jiffies, n->used,
n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);

View File

@@ -676,6 +676,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
sk = __sock_map_lookup_elem(map, key);
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
return SK_DROP;
msg->flags = flags;
msg->sk_redir = sk;
@@ -1269,6 +1271,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
sk = __sock_hash_lookup_elem(map, key);
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
return SK_DROP;
msg->flags = flags;
msg->sk_redir = sk;

View File

@@ -3430,6 +3430,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fa->fa_type == fri.type) {
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
fri.offload_failed =
READ_ONCE(fa->offload_failed);
break;
}
}

View File

@@ -244,6 +244,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (unlikely(len > icsk->icsk_ack.rcv_mss +
MAX_TCP_OPTION_SPACE))
tcp_gro_dev_warn(sk, skb, len);
/* If the skb has a len of exactly 1*MSS and has the PSH bit
* set then it is likely the end of an application write. So
* more data may not be arriving soon, and yet the data sender
* may be waiting for an ACK if cwnd-bound or using TX zero
* copy. So we set ICSK_ACK_PUSHED here so that
* tcp_cleanup_rbuf() will send an ACK immediately if the app
* reads all of the data and is not ping-pong. If len > MSS
* then this logic does not matter (and does not hurt) because
* tcp_cleanup_rbuf() will always ACK immediately if the app
* reads data and there is more than an MSS of unACKed data.
*/
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.

View File

@@ -177,8 +177,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
u32 rcv_nxt)
static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -192,7 +191,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
if (unlikely(rcv_nxt != tp->rcv_nxt))
return; /* Special ACK sent by DCTCP to reflect ECN */
tcp_dec_quickack_mode(sk, pkts);
tcp_dec_quickack_mode(sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@@ -1374,7 +1373,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
tcp_event_ack_sent(sk, rcv_nxt);
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);

View File

@@ -508,7 +508,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
*/
if (len > INT_MAX - transhdrlen)
return -EMSGSIZE;
ulen = len + transhdrlen;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
@@ -629,6 +628,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
lock_sock(sk);
ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,

View File

@@ -1507,8 +1507,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
}
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
salen, 0);
result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;

View File

@@ -112,7 +112,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
/* cookie_ack */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
@@ -126,7 +126,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
/* cookie_echo */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
@@ -426,6 +426,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
} else if (sch->type == SCTP_CID_COOKIE_ACK) {
ct->proto.sctp.init[dir] = 0;
ct->proto.sctp.init[!dir] = 0;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
@@ -474,16 +477,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
/* If it is an INIT or an INIT ACK note down the vtag */
if (sch->type == SCTP_CID_INIT ||
sch->type == SCTP_CID_INIT_ACK) {
struct sctp_inithdr _inithdr, *ih;
if (sch->type == SCTP_CID_INIT) {
struct sctp_inithdr _ih, *ih;
ih = skb_header_pointer(skb, offset + sizeof(_sch),
sizeof(_inithdr), &_inithdr);
if (ih == NULL)
ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
if (!ih)
goto out_unlock;
pr_debug("Setting vtag %x for dir %d\n",
ih->init_tag, !dir);
if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir])
ct->proto.sctp.init[!dir] = 0;
ct->proto.sctp.init[dir] = 1;
pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
/* don't renew timeout on init retransmit so
@@ -494,6 +499,24 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
old_state == SCTP_CONNTRACK_CLOSED &&
nf_ct_is_confirmed(ct))
ignore = true;
} else if (sch->type == SCTP_CID_INIT_ACK) {
struct sctp_inithdr _ih, *ih;
__be32 vtag;
ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
if (!ih)
goto out_unlock;
vtag = ct->proto.sctp.vtag[!dir];
if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag)
goto out_unlock;
/* collision */
if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] &&
vtag != ih->init_tag)
goto out_unlock;
pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
}
ct->proto.sctp.state = new_state;

View File

@@ -233,10 +233,9 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
rb_erase(&rbe->node, &priv->root);
}
static int nft_rbtree_gc_elem(const struct nft_set *__set,
struct nft_rbtree *priv,
struct nft_rbtree_elem *rbe,
u8 genmask)
static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
struct nft_rbtree_elem *rbe, u8 genmask)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
@@ -246,7 +245,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
if (!gc)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
/* search for end interval coming before this element.
* end intervals don't carry a timeout extension, they
@@ -261,6 +260,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
prev = rb_prev(prev);
}
rbe_prev = NULL;
if (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
nft_rbtree_gc_remove(net, set, priv, rbe_prev);
@@ -272,7 +272,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
*/
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (WARN_ON_ONCE(!gc))
return -ENOMEM;
return ERR_PTR(-ENOMEM);
nft_trans_gc_elem_add(gc, rbe_prev);
}
@@ -280,13 +280,13 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
nft_rbtree_gc_remove(net, set, priv, rbe);
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (WARN_ON_ONCE(!gc))
return -ENOMEM;
return ERR_PTR(-ENOMEM);
nft_trans_gc_elem_add(gc, rbe);
nft_trans_gc_queue_sync_done(gc);
return 0;
return rbe_prev;
}
static bool nft_rbtree_update_first(const struct nft_set *set,
@@ -314,7 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree *priv = nft_set_priv(set);
u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
int d, err;
int d;
/* Descend the tree to search for an existing element greater than the
* key value to insert that is greater than the new element. This is the
@@ -363,9 +363,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*/
if (nft_set_elem_expired(&rbe->ext) &&
nft_set_elem_active(&rbe->ext, cur_genmask)) {
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
if (err < 0)
return err;
const struct nft_rbtree_elem *removed_end;
removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
if (IS_ERR(removed_end))
return PTR_ERR(removed_end);
if (removed_end == rbe_le || removed_end == rbe_ge)
return -EAGAIN;
continue;
}
@@ -486,11 +491,18 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *rbe = elem->priv;
int err;
write_lock_bh(&priv->lock);
write_seqcount_begin(&priv->count);
err = __nft_rbtree_insert(net, set, rbe, ext);
write_seqcount_end(&priv->count);
write_unlock_bh(&priv->lock);
do {
if (fatal_signal_pending(current))
return -EINTR;
cond_resched();
write_lock_bh(&priv->lock);
write_seqcount_begin(&priv->count);
err = __nft_rbtree_insert(net, set, rbe, ext);
write_seqcount_end(&priv->count);
write_unlock_bh(&priv->lock);
} while (err == -EAGAIN);
return err;
}

View File

@@ -1646,7 +1646,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
spin_lock(&llcp_devices_lock);
list_add(&local->list, &llcp_devices);
spin_unlock(&llcp_devices_lock);
return 0;
}

View File

@@ -170,7 +170,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)

View File

@@ -1161,8 +1161,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports)
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
!sctp_assoc_add_peer(asoc, &trans->ipaddr,
if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state))
return -ENOMEM;

View File

@@ -2457,6 +2457,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
sctp_transport_reset_hb_timer(trans);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);

View File

@@ -708,6 +708,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
return ret;
}
static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
int err = security_socket_sendmsg(sock, msg,
msg_data_left(msg));
return err ?: sock_sendmsg_nosec(sock, msg);
}
/**
* sock_sendmsg - send a message through @sock
* @sock: socket
@@ -718,10 +726,19 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
*/
int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
int err = security_socket_sendmsg(sock, msg,
msg_data_left(msg));
struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
struct sockaddr_storage address;
int ret;
return err ?: sock_sendmsg_nosec(sock, msg);
if (msg->msg_name) {
memcpy(&address, msg->msg_name, msg->msg_namelen);
msg->msg_name = &address;
}
ret = __sock_sendmsg(sock, msg);
msg->msg_name = save_addr;
return ret;
}
EXPORT_SYMBOL(sock_sendmsg);
@@ -1069,7 +1086,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
res = sock_sendmsg(sock, &msg);
res = __sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
}
@@ -2048,7 +2065,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
err = sock_sendmsg(sock, &msg);
err = __sock_sendmsg(sock, &msg);
out_put:
fput_light(sock->file, fput_needed);
@@ -2421,7 +2438,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
err = sock_sendmsg_nosec(sock, msg_sys);
goto out_freectl;
}
err = sock_sendmsg(sock, msg_sys);
err = __sock_sendmsg(sock, msg_sys);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.

View File

@@ -1452,14 +1452,14 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
struct tipc_key key;
spin_lock(&tx->lock);
spin_lock_bh(&tx->lock);
key = tx->key;
WARN_ON(!key.active || tx_key != key.active);
/* Free the active key */
tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
spin_unlock(&tx->lock);
spin_unlock_bh(&tx->lock);
pr_warn("%s: key is revoked\n", tx->name);
return -EKEYREVOKED;

View File

@@ -1547,7 +1547,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
/* First handle the "special" cases */
if (sym_is(name, namelen, "usb"))
do_usb_table(symval, sym->st_size, mod);
if (sym_is(name, namelen, "of"))
else if (sym_is(name, namelen, "of"))
do_of_table(symval, sym->st_size, mod);
else if (sym_is(name, namelen, "pnp"))
do_pnp_device_entry(symval, sym->st_size, mod);

View File

@@ -29,9 +29,11 @@ config IMA
to learn more about IMA.
If unsure, say N.
if IMA
config IMA_KEXEC
bool "Enable carrying the IMA measurement list across a soft boot"
depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
depends on TCG_TPM && HAVE_IMA_KEXEC
default n
help
TPM PCRs are only reset on a hard reboot. In order to validate
@@ -43,7 +45,6 @@ config IMA_KEXEC
config IMA_MEASURE_PCR_IDX
int
depends on IMA
range 8 14
default 10
help
@@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
config IMA_LSM_RULES
bool
depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
default y
help
Disabling this option will disregard LSM based policy rules.
@@ -61,7 +62,6 @@ config IMA_LSM_RULES
choice
prompt "Default template"
default IMA_NG_TEMPLATE
depends on IMA
help
Select the default IMA measurement template.
@@ -80,14 +80,12 @@ endchoice
config IMA_DEFAULT_TEMPLATE
string
depends on IMA
default "ima-ng" if IMA_NG_TEMPLATE
default "ima-sig" if IMA_SIG_TEMPLATE
choice
prompt "Default integrity hash algorithm"
default IMA_DEFAULT_HASH_SHA1
depends on IMA
help
Select the default hash algorithm used for the measurement
list, integrity appraisal and audit log. The compiled default
@@ -117,7 +115,6 @@ endchoice
config IMA_DEFAULT_HASH
string
depends on IMA
default "sha1" if IMA_DEFAULT_HASH_SHA1
default "sha256" if IMA_DEFAULT_HASH_SHA256
default "sha512" if IMA_DEFAULT_HASH_SHA512
@@ -126,7 +123,6 @@ config IMA_DEFAULT_HASH
config IMA_WRITE_POLICY
bool "Enable multiple writes to the IMA policy"
depends on IMA
default n
help
IMA policy can now be updated multiple times. The new rules get
@@ -137,7 +133,6 @@ config IMA_WRITE_POLICY
config IMA_READ_POLICY
bool "Enable reading back the current IMA policy"
depends on IMA
default y if IMA_WRITE_POLICY
default n if !IMA_WRITE_POLICY
help
@@ -147,7 +142,6 @@ config IMA_READ_POLICY
config IMA_APPRAISE
bool "Appraise integrity measurements"
depends on IMA
default n
help
This option enables local measurement integrity appraisal.
@@ -268,7 +262,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
config IMA_BLACKLIST_KEYRING
bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
depends on SYSTEM_TRUSTED_KEYRING
depends on IMA_TRUSTED_KEYRING
depends on INTEGRITY_TRUSTED_KEYRING
default n
help
This option creates an IMA blacklist keyring, which contains all
@@ -278,7 +272,7 @@ config IMA_BLACKLIST_KEYRING
config IMA_LOAD_X509
bool "Load X509 certificate onto the '.ima' trusted keyring"
depends on IMA_TRUSTED_KEYRING
depends on INTEGRITY_TRUSTED_KEYRING
default n
help
File signature verification is based on the public keys
@@ -303,7 +297,6 @@ config IMA_APPRAISE_SIGNED_INIT
config IMA_MEASURE_ASYMMETRIC_KEYS
bool
depends on IMA
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
default y
@@ -322,7 +315,8 @@ config IMA_SECURE_AND_OR_TRUSTED_BOOT
config IMA_DISABLE_HTABLE
bool "Disable htable to allow measurement of duplicate records"
depends on IMA
default n
help
This option disables htable to allow measurement of duplicate records.
endif

View File

@@ -159,6 +159,7 @@ int snd_soc_dai_is_dummy(struct snd_soc_dai *dai)
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_dai_is_dummy);
int snd_soc_component_is_dummy(struct snd_soc_component *component)
{

View File

@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <sound/graph_card.h>
#include <sound/pcm_params.h>
#include <sound/soc-dai.h>
#define MAX_PLLA_OUT0_DIV 128
@@ -44,6 +45,21 @@ struct tegra_audio_cdata {
unsigned int plla_out0_rates[NUM_RATE_TYPE];
};
static bool need_clk_update(struct snd_soc_dai *dai)
{
if (snd_soc_dai_is_dummy(dai) ||
!dai->driver->ops ||
!dai->driver->name)
return false;
if (strstr(dai->driver->name, "I2S") ||
strstr(dai->driver->name, "DMIC") ||
strstr(dai->driver->name, "DSPK"))
return true;
return false;
}
/* Setup PLL clock as per the given sample rate */
static int tegra_audio_graph_update_pll(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
@@ -140,19 +156,7 @@ static int tegra_audio_graph_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int err;
/*
* This gets called for each DAI link (FE or BE) when DPCM is used.
* We may not want to update PLLA rate for each call. So PLLA update
* must be restricted to external I/O links (I2S, DMIC or DSPK) since
* they actually depend on it. I/O modules update their clocks in
* hw_param() of their respective component driver and PLLA rate
* update here helps them to derive appropriate rates.
*
* TODO: When more HW accelerators get added (like sample rate
* converter, volume gain controller etc., which don't really
* depend on PLLA) we need a better way to filter here.
*/
if (cpu_dai->driver->ops && rtd->dai_link->no_pcm) {
if (need_clk_update(cpu_dai)) {
err = tegra_audio_graph_update_pll(substream, params);
if (err)
return err;