Merge 5.15.129 into android14-5.15-lts

Changes in 5.15.129
	objtool/x86: Fix SRSO mess
	NFSv4.2: fix error handling in nfs42_proc_getxattr
	NFSv4: fix out path in __nfs4_get_acl_uncached
	xprtrdma: Remap Receive buffers after a reconnect
	PCI: acpiphp: Reassign resources on bridge if necessary
	dlm: improve plock logging if interrupted
	dlm: replace usage of found with dedicated list iterator variable
	fs: dlm: add pid to debug log
	fs: dlm: change plock interrupted message to debug again
	fs: dlm: use dlm_plock_info for do_unlock_close
	fs: dlm: fix mismatch of plock results from userspace
	MIPS: cpu-features: Enable octeon_cache by cpu_type
	MIPS: cpu-features: Use boot_cpu_type for CPU type based features
	fbdev: Improve performance of sys_imageblit()
	fbdev: Fix sys_imageblit() for arbitrary image widths
	fbdev: fix potential OOB read in fast_imageblit()
	ALSA: pcm: Fix potential data race at PCM memory allocation helpers
	jbd2: remove t_checkpoint_io_list
	jbd2: remove journal_clean_one_cp_list()
	jbd2: fix a race when checking checkpoint buffer busy
	can: raw: fix receiver memory leak
	drm/amd/display: do not wait for mpc idle if tg is disabled
	drm/amd/display: check TG is non-null before checking if enabled
	can: raw: fix lockdep issue in raw_release()
	tracing: Fix cpu buffers unavailable due to 'record_disabled' missed
	tracing: Fix memleak due to race between current_tracer and trace
	octeontx2-af: SDP: fix receive link config
	sock: annotate data-races around prot->memory_pressure
	dccp: annotate data-races in dccp_poll()
	ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
	net: bgmac: Fix return value check for fixed_phy_register()
	net: bcmgenet: Fix return value check for fixed_phy_register()
	net: validate veth and vxcan peer ifindexes
	ice: fix receive buffer size miscalculation
	igb: Avoid starting unnecessary workqueues
	igc: Fix the typo in the PTM Control macro
	net/sched: fix a qdisc modification with ambiguous command request
	netfilter: nf_tables: flush pending destroy work before netlink notifier
	netfilter: nf_tables: fix out of memory error handling
	rtnetlink: return ENODEV when ifname does not exist and group is given
	rtnetlink: Reject negative ifindexes in RTM_NEWLINK
	net: remove bond_slave_has_mac_rcu()
	bonding: fix macvlan over alb bond support
	net/ncsi: make one oem_gma function for all mfr id
	net/ncsi: change from ndo_set_mac_address to dev_set_mac_address
	Revert "KVM: x86: enable TDP MMU by default"
	ibmveth: Use dcbf rather than dcbfl
	NFSv4: Fix dropped lock for racing OPEN and delegation return
	clk: Fix slab-out-of-bounds error in devm_clk_release()
	ALSA: ymfpci: Fix the missing snd_card_free() call at probe error
	mm: add a call to flush_cache_vmap() in vmap_pfn()
	NFS: Fix a use after free in nfs_direct_join_group()
	nfsd: Fix race to FREE_STATEID and cl_revoked
	selinux: set next pointer before attaching to list
	batman-adv: Trigger events for auto adjusted MTU
	batman-adv: Don't increase MTU when set by user
	batman-adv: Do not get eth header before batadv_check_management_packet
	batman-adv: Fix TT global entry leak when client roamed back
	batman-adv: Fix batadv_v_ogm_aggr_send memory leak
	batman-adv: Hold rtnl lock during MTU update via netlink
	lib/clz_ctz.c: Fix __clzdi2() and __ctzdi2() for 32-bit kernels
	radix tree: remove unused variable
	of: unittest: Fix EXPECT for parse_phandle_with_args_map() test
	of: dynamic: Refactor action prints to not use "%pOF" inside devtree_lock
	media: vcodec: Fix potential array out-of-bounds in encoder queue_setup
	PCI: acpiphp: Use pci_assign_unassigned_bridge_resources() only for non-root bus
	drm/vmwgfx: Fix shader stage validation
	drm/display/dp: Fix the DP DSC Receiver cap size
	x86/fpu: Invalidate FPU state correctly on exec()
	x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4
	nfs: use vfs setgid helper
	nfsd: use vfs setgid helper
	torture: Fix hang during kthread shutdown phase
	cgroup/cpuset: Rename functions dealing with DEADLINE accounting
	sched/cpuset: Bring back cpuset_mutex
	sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets
	cgroup/cpuset: Iterate only if DEADLINE tasks are present
	sched/deadline: Create DL BW alloc, free & check overflow interface
	cgroup/cpuset: Free DL BW in case can_attach() fails
	drm/i915: Fix premature release of request's reusable memory
	can: raw: add missing refcount for memory leak fix
	scsi: snic: Fix double free in snic_tgt_create()
	scsi: core: raid_class: Remove raid_component_add()
	clk: Fix undefined reference to `clk_rate_exclusive_{get,put}'
	pinctrl: renesas: rza2: Add lock around pinctrl_generic{{add,remove}_group,{add,remove}_function}
	dma-buf/sw_sync: Avoid recursive lock during fence signal
	mm: memory-failure: kill soft_offline_free_page()
	mm: memory-failure: fix unexpected return value in soft_offline_page()
	mm,ima,kexec,of: use memblock_free_late from ima_free_kexec_buffer
	Linux 5.15.129

Change-Id: I196922313560df14873ab83e55b11961989f33de
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-09-01 20:11:57 +00:00
89 changed files with 833 additions and 713 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 128
SUBLEVEL = 129
EXTRAVERSION =
NAME = Trick or Treat

View File

@@ -124,7 +124,24 @@
#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#define cpu_has_octeon_cache \
({ \
int __res; \
\
switch (boot_cpu_type()) { \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 1; \
break; \
\
default: \
__res = 0; \
} \
\
__res; \
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
@@ -351,7 +368,7 @@
({ \
int __res; \
\
switch (current_cpu_type()) { \
switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \

View File

@@ -416,8 +416,7 @@ DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
* FPU state for a task MUST let the rest of the kernel know that the
* FPU registers are no longer valid for this task.
*
* Either one of these invalidation functions is enough. Invalidate
* a resource you control: CPU if using the CPU for something else
* Invalidate a resource you control: CPU if using the CPU for something else
* (with preemption disabled), FPU for the current task, or a task that
* is prevented from running by the current task.
*/

View File

@@ -330,7 +330,7 @@ static void fpu_reset_fpstate(void)
struct fpu *fpu = &current->thread.fpu;
fpregs_lock();
fpu__drop(fpu);
__fpu_invalidate_fpregs_state(fpu);
/*
* This does not change the actual hardware registers. It just
* resets the memory image and sets TIF_NEED_FPU_LOAD so a

View File

@@ -809,6 +809,13 @@ void __init fpu__init_system_xstate(void)
goto out_disable;
}
/*
* CPU capabilities initialization runs before FPU init. So
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
* functional, set the feature bit so depending code works.
*/
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
xfeatures_mask_all,

View File

@@ -10,7 +10,7 @@
#include <asm/cmpxchg.h>
#include <trace/events/kvm.h>
static bool __read_mostly tdp_mmu_enabled = true;
static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */

View File

@@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
struct clk **ptr, *clk;
struct devm_clk_state *state;
struct clk *clk;
ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
*ptr = clk;
devres_add(dev, ptr);
state->clk = clk;
devres_add(dev, state);
} else {
devres_free(ptr);
devres_free(state);
}
return clk;

View File

@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
list_del_init(&pt->link);
dma_fence_get(&pt->base);
list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
/*
* A signal callback may release the last reference to this
* fence, causing it to be freed. That operation has to be
* last to avoid a use after free inside this loop, and must
* be after we remove the fence from the timeline in order to
* prevent deadlocking on timeline->lock inside
* timeline_fence_release().
*/
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &signalled, link) {
list_del_init(&pt->link);
dma_fence_put(&pt->base);
}
}
/**

View File

@@ -3142,7 +3142,9 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
if (pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
}

View File

@@ -447,8 +447,11 @@ int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
}
} while (unlikely(is_barrier(active)));
if (!__i915_active_fence_set(active, fence))
fence = __i915_active_fence_set(active, fence);
if (!fence)
__i915_active_acquire(ref);
else
dma_fence_put(fence);
out:
i915_active_release(ref);
@@ -467,13 +470,9 @@ __i915_active_set_fence(struct i915_active *ref,
return NULL;
}
rcu_read_lock();
prev = __i915_active_fence_set(active, fence);
if (prev)
prev = dma_fence_get_rcu(prev);
else
if (!prev)
__i915_active_acquire(ref);
rcu_read_unlock();
return prev;
}
@@ -1040,10 +1039,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
*
* Records the new @fence as the last active fence along its timeline in
* this active tracker, moving the tracking callbacks from the previous
* fence onto this one. Returns the previous fence (if not already completed),
* which the caller must ensure is executed before the new fence. To ensure
* that the order of fences within the timeline of the i915_active_fence is
* understood, it should be locked by the caller.
* fence onto this one. Gets and returns a reference to the previous fence
* (if not already completed), which the caller must put after making sure
* that it is executed before the new fence. To ensure that the order of
* fences within the timeline of the i915_active_fence is understood, it
* should be locked by the caller.
*/
struct dma_fence *
__i915_active_fence_set(struct i915_active_fence *active,
@@ -1052,7 +1052,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
struct dma_fence *prev;
unsigned long flags;
if (fence == rcu_access_pointer(active->fence))
/*
* In case of fences embedded in i915_requests, their memory is
* SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
* by new requests. Then, there is a risk of passing back a pointer
* to a new, completely unrelated fence that reuses the same memory
* while tracked under a different active tracker. Combined with i915
* perf open/close operations that build await dependencies between
* engine kernel context requests and user requests from different
* timelines, this can lead to dependency loops and infinite waits.
*
* As a countermeasure, we try to get a reference to the active->fence
* first, so if we succeed and pass it back to our user then it is not
* released and potentially reused by an unrelated request before the
* user has a chance to set up an await dependency on it.
*/
prev = i915_active_fence_get(active);
if (fence == prev)
return fence;
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
@@ -1061,27 +1077,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
* Consider that we have two threads arriving (A and B), with
* C already resident as the active->fence.
*
* A does the xchg first, and so it sees C or NULL depending
* on the timing of the interrupt handler. If it is NULL, the
* previous fence must have been signaled and we know that
* we are first on the timeline. If it is still present,
* we acquire the lock on that fence and serialise with the interrupt
* handler, in the process removing it from any future interrupt
* callback. A will then wait on C before executing (if present).
*
* As B is second, it sees A as the previous fence and so waits for
* it to complete its transition and takes over the occupancy for
* itself -- remembering that it needs to wait on A before executing.
* Both A and B have got a reference to C or NULL, depending on the
* timing of the interrupt handler. Let's assume that if A has got C
* then it has locked C first (before B).
*
* Note the strong ordering of the timeline also provides consistent
* nesting rules for the fence->lock; the inner lock is always the
* older lock.
*/
spin_lock_irqsave(fence->lock, flags);
prev = xchg(__active_fence_slot(active), fence);
if (prev) {
GEM_BUG_ON(prev == fence);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
/*
* A does the cmpxchg first, and so it sees C or NULL, as before, or
* something else, depending on the timing of other threads and/or
* interrupt handler. If not the same as before then A unlocks C if
* applicable and retries, starting from an attempt to get a new
* active->fence. Meanwhile, B follows the same path as A.
* Once A succeeds with cmpxch, B fails again, retires, gets A from
* active->fence, locks it as soon as A completes, and possibly
* succeeds with cmpxchg.
*/
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
if (prev) {
spin_unlock(prev->lock);
dma_fence_put(prev);
}
spin_unlock_irqrestore(fence->lock, flags);
prev = i915_active_fence_get(active);
GEM_BUG_ON(prev == fence);
spin_lock_irqsave(fence->lock, flags);
if (prev)
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
}
/*
* If prev is NULL then the previous fence must have been signaled
* and we know that we are first on the timeline. If it is still
* present then, having the lock on that fence already acquired, we
* serialise with the interrupt handler, in the process of removing it
* from any future interrupt callback. A will then wait on C before
* executing (if present).
*
* As B is second, it sees A as the previous fence and so waits for
* it to complete its transition and takes over the occupancy for
* itself -- remembering that it needs to wait on A before executing.
*/
if (prev) {
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
@@ -1098,11 +1143,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
int err = 0;
/* Must maintain timeline ordering wrt previous active requests */
rcu_read_lock();
fence = __i915_active_fence_set(active, &rq->fence);
if (fence) /* but the previous fence may not belong to that timeline! */
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
if (fence) {
err = i915_request_await_dma_fence(rq, fence);
dma_fence_put(fence);

View File

@@ -1596,6 +1596,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
&rq->dep,
0);
}
if (prev)
i915_request_put(prev);
/*
* Make sure that no request gazumped us - if it was allocated after

View File

@@ -1685,4 +1685,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw)
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
u32 shader_type)
{
SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
if (shader_model >= VMW_SM_5)
max_allowed = SVGA3D_SHADERTYPE_MAX;
else if (shader_model >= VMW_SM_4)
max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
}
#endif

View File

@@ -2003,7 +2003,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
@@ -2125,8 +2125,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
@@ -2143,6 +2141,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) cmd->body.slot);
return -EINVAL;
}
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
@@ -2151,14 +2157,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
if (binding.shader_slot >= max_shader_num ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
(unsigned int) cmd->body.type,
(unsigned int) binding.slot);
return -EINVAL;
}
vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
binding.slot);
@@ -2179,15 +2177,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= max_allowed) {
!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2211,8 +2207,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
@@ -2223,8 +2217,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= max_allowed ||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;

View File

@@ -733,6 +733,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
return -EINVAL;
if (*nplanes) {
if (*nplanes != q_data->fmt->num_planes)
return -EINVAL;
for (i = 0; i < *nplanes; i++)
if (sizes[i] < q_data->sizeimage[i])
return -EINVAL;

View File

@@ -657,10 +657,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
return NULL;
arp = (struct arp_pkt *)skb_network_header(skb);
/* Don't modify or load balance ARPs that do not originate locally
* (e.g.,arrive via a bridge).
/* Don't modify or load balance ARPs that do not originate
* from the bond itself or a VLAN directly above the bond.
*/
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
return NULL;
if (arp->op_code == htons(ARPOP_REPLY)) {

View File

@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
nla_peer = data[VXCAN_INFO_PEER];
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) +
sizeof(struct ifinfomsg),
nla_len(nla_peer) -
sizeof(struct ifinfomsg),
NULL);
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;

View File

@@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
int err;
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
if (IS_ERR(phy_dev)) {
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}

View File

@@ -571,7 +571,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
};
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
if (!phydev || IS_ERR(phydev)) {
if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register fixed PHY device\n");
return -ENODEV;
}

View File

@@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
unsigned long offset;
for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
}
/* replenish the buffers for a pool. note that we don't need to

View File

@@ -359,7 +359,8 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* Receive Packet Data Buffer Size.
* The Packet Data Buffer Size is defined in 128 byte units.
*/
rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
BIT_ULL(ICE_RLAN_CTX_DBUF_S));
/* use 32 byte descriptors */
rlan_ctx.dsize = 1;

View File

@@ -1260,18 +1260,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
return;
}
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
igb_ptp_reset(adapter);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -1281,6 +1269,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
igb_ptp_reset(adapter);
}
}

View File

@@ -539,7 +539,7 @@
#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */
#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */
#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2)
#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2)
#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8)
#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */

View File

@@ -4013,9 +4013,10 @@ rx_frscfg:
if (link < 0)
return NIX_AF_ERR_RX_LINK_INVALID;
nix_find_link_frs(rvu, req, pcifunc);
linkcfg:
nix_find_link_frs(rvu, req, pcifunc);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
if (req->update_minlen)

View File

@@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused,
write_pnet(&port->pnet, newnet);
ipvlan_migrate_l3s_hook(oldnet, newnet);
if (port->mode == IPVLAN_MODE_L3S)
ipvlan_migrate_l3s_hook(oldnet, newnet);
break;
}
case NETDEV_UNREGISTER:

View File

@@ -1654,10 +1654,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
err = rtnl_nla_parse_ifla(peer_tb,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
nla_len(nla_peer) - sizeof(struct ifinfomsg),
NULL);
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
if (err < 0)
return err;

View File

@@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
#ifdef DEBUG
const char *action_names[] = {
static const char *action_names[] = {
[0] = "INVALID",
[OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE",
[OF_RECONFIG_DETACH_NODE] = "DETACH_NODE",
[OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY",
[OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY",
[OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY",
};
#endif
int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
{
@@ -594,21 +593,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
}
ret = __of_add_property(ce->np, ce->prop);
if (ret) {
pr_err("changeset: add_property failed @%pOF/%s\n",
ce->np,
ce->prop->name);
break;
}
break;
case OF_RECONFIG_REMOVE_PROPERTY:
ret = __of_remove_property(ce->np, ce->prop);
if (ret) {
pr_err("changeset: remove_property failed @%pOF/%s\n",
ce->np,
ce->prop->name);
break;
}
break;
case OF_RECONFIG_UPDATE_PROPERTY:
@@ -622,20 +609,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
}
ret = __of_update_property(ce->np, ce->prop, &old_prop);
if (ret) {
pr_err("changeset: update_property failed @%pOF/%s\n",
ce->np,
ce->prop->name);
break;
}
break;
default:
ret = -EINVAL;
}
raw_spin_unlock_irqrestore(&devtree_lock, flags);
if (ret)
if (ret) {
pr_err("changeset: apply failed: %-15s %pOF:%s\n",
action_names[ce->action], ce->np, ce->prop->name);
return ret;
}
switch (ce->action) {
case OF_RECONFIG_ATTACH_NODE:
@@ -921,6 +905,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
if (!ce)
return -ENOMEM;
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
return -EINVAL;
/* get a reference to the node */
ce->action = action;
ce->np = of_node_get(np);

View File

@@ -187,8 +187,8 @@ int ima_free_kexec_buffer(void)
if (ret)
return ret;
return memblock_free(addr, size);
memblock_free_late(addr, size);
return 0;
}
/**

View File

@@ -657,12 +657,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle",
"phandle", 0, &args);
EXPECT_END(KERN_INFO,
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle");
"OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);

View File

@@ -503,12 +503,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
__pci_bus_size_bridges(dev->subordinate,
&add_list);
if (pci_is_root_bus(bus))
__pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
__pci_bus_assign_resources(bus, &add_list, NULL);
if (pci_is_root_bus(bus))
__pci_bus_assign_resources(bus, &add_list, NULL);
else
pci_assign_unassigned_bridge_resources(bus->self);
}
acpiphp_sanitize_bus(bus);

View File

@@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinmux.h>
@@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
struct pinctrl_dev *pctl;
struct pinctrl_gpio_range gpio_range;
int npins;
struct mutex mutex; /* serialize adding groups and functions */
};
#define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */
@@ -359,10 +361,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
psel_val[i] = MUX_FUNC(value);
}
mutex_lock(&priv->mutex);
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
if (gsel < 0)
return gsel;
if (gsel < 0) {
ret = gsel;
goto unlock;
}
/*
* Register a single group function where the 'data' is an array PSEL
@@ -391,6 +397,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.function = np->name;
*num_maps = 1;
mutex_unlock(&priv->mutex);
return 0;
remove_function:
@@ -399,6 +407,9 @@ remove_function:
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
unlock:
mutex_unlock(&priv->mutex);
dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
return ret;
@@ -474,6 +485,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
mutex_init(&priv->mutex);
platform_set_drvdata(pdev, priv);
priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *

View File

@@ -209,54 +209,6 @@ raid_attr_ro_state(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state_fn(state);
static void raid_component_release(struct device *dev)
{
struct raid_component *rc =
container_of(dev, struct raid_component, dev);
dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
put_device(rc->dev.parent);
kfree(rc);
}
int raid_component_add(struct raid_template *r,struct device *raid_dev,
struct device *component_dev)
{
struct device *cdev =
attribute_container_find_class_device(&r->raid_attrs.ac,
raid_dev);
struct raid_component *rc;
struct raid_data *rd = dev_get_drvdata(cdev);
int err;
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
INIT_LIST_HEAD(&rc->node);
device_initialize(&rc->dev);
rc->dev.release = raid_component_release;
rc->dev.parent = get_device(component_dev);
rc->num = rd->component_count++;
dev_set_name(&rc->dev, "component-%d", rc->num);
list_add_tail(&rc->node, &rd->component_list);
rc->dev.class = &raid_class.class;
err = device_add(&rc->dev);
if (err)
goto err_out;
return 0;
err_out:
put_device(&rc->dev);
list_del(&rc->node);
rd->component_count--;
put_device(component_dev);
kfree(rc);
return err;
}
EXPORT_SYMBOL(raid_component_add);
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{

View File

@@ -317,12 +317,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
"Snic Tgt: device_add, with err = %d\n",
ret);
put_device(&tgt->dev);
put_device(&snic->shost->shost_gendev);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_del(&tgt->list);
spin_unlock_irqrestore(snic->shost->host_lock, flags);
kfree(tgt);
put_device(&tgt->dev);
tgt = NULL;
return tgt;

View File

@@ -188,23 +188,29 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
{
u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
u32 bit_mask, end_mask, eorx, shift;
const char *s = image->data, *src;
u32 bit_mask, eorx, shift;
const u8 *s = image->data, *src;
u32 *dst;
const u32 *tab = NULL;
const u32 *tab;
size_t tablen;
u32 colortab[16];
int i, j, k;
switch (bpp) {
case 8:
tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
tablen = 16;
break;
case 16:
tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
tablen = 4;
break;
case 32:
default:
tab = cfb_tab32;
tablen = 2;
break;
default:
return;
}
for (i = ppw-1; i--; ) {
@@ -218,20 +224,62 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
eorx = fgx ^ bgx;
k = image->width/ppw;
for (i = 0; i < tablen; ++i)
colortab[i] = (tab[i] & eorx) ^ bgx;
for (i = image->height; i--; ) {
dst = dst1;
shift = 8;
src = s;
for (j = k; j--; ) {
/*
* Manually unroll the per-line copying loop for better
* performance. This works until we processed the last
* completely filled source byte (inclusive).
*/
switch (ppw) {
case 4: /* 8 bpp */
for (j = k; j >= 2; j -= 2, ++src) {
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
case 2: /* 16 bpp */
for (j = k; j >= 4; j -= 4, ++src) {
*dst++ = colortab[(*src >> 6) & bit_mask];
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 2) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
case 1: /* 32 bpp */
for (j = k; j >= 8; j -= 8, ++src) {
*dst++ = colortab[(*src >> 7) & bit_mask];
*dst++ = colortab[(*src >> 6) & bit_mask];
*dst++ = colortab[(*src >> 5) & bit_mask];
*dst++ = colortab[(*src >> 4) & bit_mask];
*dst++ = colortab[(*src >> 3) & bit_mask];
*dst++ = colortab[(*src >> 2) & bit_mask];
*dst++ = colortab[(*src >> 1) & bit_mask];
*dst++ = colortab[(*src >> 0) & bit_mask];
}
break;
}
/*
* For image widths that are not a multiple of 8, there
* are trailing pixels left on the current line. Print
* them as well.
*/
for (; j--; ) {
shift -= ppw;
end_mask = tab[(*src >> shift) & bit_mask];
*dst++ = (end_mask & eorx) ^ bgx;
*dst++ = colortab[(*src >> shift) & bit_mask];
if (!shift) {
shift = 8;
src++;
++src;
}
}
dst1 += p->fix.line_length;
s += spitch;
}

View File

@@ -47,6 +47,7 @@ int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
return ATTR_KILL_SGID;
return 0;
}
EXPORT_SYMBOL(setattr_should_drop_sgid);
/**
* setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to

View File

@@ -1856,7 +1856,7 @@ static void del_timeout(struct dlm_lkb *lkb)
void dlm_scan_timeout(struct dlm_ls *ls)
{
struct dlm_rsb *r;
struct dlm_lkb *lkb;
struct dlm_lkb *lkb = NULL, *iter;
int do_cancel, do_warn;
s64 wait_us;
@@ -1867,27 +1867,28 @@ void dlm_scan_timeout(struct dlm_ls *ls)
do_cancel = 0;
do_warn = 0;
mutex_lock(&ls->ls_timeout_mutex);
list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
wait_us = ktime_to_us(ktime_sub(ktime_get(),
lkb->lkb_timestamp));
iter->lkb_timestamp));
if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
wait_us >= (lkb->lkb_timeout_cs * 10000))
if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
wait_us >= (iter->lkb_timeout_cs * 10000))
do_cancel = 1;
if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
wait_us >= dlm_config.ci_timewarn_cs * 10000)
do_warn = 1;
if (!do_cancel && !do_warn)
continue;
hold_lkb(lkb);
hold_lkb(iter);
lkb = iter;
break;
}
mutex_unlock(&ls->ls_timeout_mutex);
if (!do_cancel && !do_warn)
if (!lkb)
break;
r = lkb->lkb_resource;
@@ -5239,21 +5240,18 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
int found = 0;
struct dlm_lkb *lkb = NULL, *iter;
mutex_lock(&ls->ls_waiters_mutex);
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
if (lkb->lkb_flags & DLM_IFL_RESEND) {
hold_lkb(lkb);
found = 1;
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
if (iter->lkb_flags & DLM_IFL_RESEND) {
hold_lkb(iter);
lkb = iter;
break;
}
}
mutex_unlock(&ls->ls_waiters_mutex);
if (!found)
lkb = NULL;
return lkb;
}
@@ -5912,37 +5910,36 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int mode, uint32_t flags, void *name, unsigned int namelen,
unsigned long timeout_cs, uint32_t *lkid)
{
struct dlm_lkb *lkb;
struct dlm_lkb *lkb = NULL, *iter;
struct dlm_user_args *ua;
int found_other_mode = 0;
int found = 0;
int rv = 0;
mutex_lock(&ls->ls_orphans_mutex);
list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
if (lkb->lkb_resource->res_length != namelen)
list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
if (iter->lkb_resource->res_length != namelen)
continue;
if (memcmp(lkb->lkb_resource->res_name, name, namelen))
if (memcmp(iter->lkb_resource->res_name, name, namelen))
continue;
if (lkb->lkb_grmode != mode) {
if (iter->lkb_grmode != mode) {
found_other_mode = 1;
continue;
}
found = 1;
list_del_init(&lkb->lkb_ownqueue);
lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
*lkid = lkb->lkb_id;
lkb = iter;
list_del_init(&iter->lkb_ownqueue);
iter->lkb_flags &= ~DLM_IFL_ORPHAN;
*lkid = iter->lkb_id;
break;
}
mutex_unlock(&ls->ls_orphans_mutex);
if (!found && found_other_mode) {
if (!lkb && found_other_mode) {
rv = -EAGAIN;
goto out;
}
if (!found) {
if (!lkb) {
rv = -ENOENT;
goto out;
}

View File

@@ -80,8 +80,7 @@ static void send_op(struct plock_op *op)
abandoned waiter. So, we have to insert the unlock-close when the
lock call is interrupted. */
static void do_unlock_close(struct dlm_ls *ls, u64 number,
struct file *file, struct file_lock *fl)
static void do_unlock_close(const struct dlm_plock_info *info)
{
struct plock_op *op;
@@ -90,15 +89,12 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
return;
op->info.optype = DLM_PLOCK_OP_UNLOCK;
op->info.pid = fl->fl_pid;
op->info.fsid = ls->ls_global_id;
op->info.number = number;
op->info.pid = info->pid;
op->info.fsid = info->fsid;
op->info.number = info->number;
op->info.start = 0;
op->info.end = OFFSET_MAX;
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
op->info.owner = (__u64) fl->fl_pid;
else
op->info.owner = (__u64)(long) fl->fl_owner;
op->info.owner = info->owner;
op->info.flags |= DLM_PLOCK_FL_CLOSE;
send_op(op);
@@ -161,13 +157,14 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = wait_event_killable(recv_wq, (op->done != 0));
if (rv == -ERESTARTSYS) {
log_debug(ls, "%s: wait killed %llx", __func__,
(unsigned long long)number);
spin_lock(&ops_lock);
list_del(&op->list);
spin_unlock(&ops_lock);
log_debug(ls, "%s: wait interrupted %x %llx pid %d",
__func__, ls->ls_global_id,
(unsigned long long)number, op->info.pid);
dlm_release_plock_op(op);
do_unlock_close(ls, number, file, fl);
do_unlock_close(&op->info);
goto out;
}
@@ -408,7 +405,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
list_del(&op->list);
else
list_move(&op->list, &recv_list);
list_move_tail(&op->list, &recv_list);
memcpy(&info, &op->info, sizeof(info));
}
spin_unlock(&ops_lock);
@@ -433,9 +430,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
loff_t *ppos)
{
struct plock_op *op = NULL, *iter;
struct dlm_plock_info info;
struct plock_op *op;
int found = 0, do_callback = 0;
int do_callback = 0;
if (count != sizeof(info))
return -EINVAL;
@@ -446,31 +443,63 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
if (check_version(&info))
return -EINVAL;
/*
* The results for waiting ops (SETLKW) can be returned in any
* order, so match all fields to find the op. The results for
* non-waiting ops are returned in the order that they were sent
* to userspace, so match the result with the first non-waiting op.
*/
spin_lock(&ops_lock);
list_for_each_entry(op, &recv_list, list) {
if (op->info.fsid == info.fsid &&
op->info.number == info.number &&
op->info.owner == info.owner) {
list_del_init(&op->list);
memcpy(&op->info, &info, sizeof(info));
if (op->data)
do_callback = 1;
else
op->done = 1;
found = 1;
break;
if (info.wait) {
list_for_each_entry(iter, &recv_list, list) {
if (iter->info.fsid == info.fsid &&
iter->info.number == info.number &&
iter->info.owner == info.owner &&
iter->info.pid == info.pid &&
iter->info.start == info.start &&
iter->info.end == info.end &&
iter->info.ex == info.ex &&
iter->info.wait) {
op = iter;
break;
}
}
} else {
list_for_each_entry(iter, &recv_list, list) {
if (!iter->info.wait) {
op = iter;
break;
}
}
}
if (op) {
/* Sanity check that op and info match. */
if (info.wait)
WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK);
else
WARN_ON(op->info.fsid != info.fsid ||
op->info.number != info.number ||
op->info.owner != info.owner ||
op->info.optype != info.optype);
list_del_init(&op->list);
memcpy(&op->info, &info, sizeof(info));
if (op->data)
do_callback = 1;
else
op->done = 1;
}
spin_unlock(&ops_lock);
if (found) {
if (op) {
if (do_callback)
dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
log_print("dev_write no op %x %llx", info.fsid,
(unsigned long long)info.number);
log_print("%s: no op %x %llx", __func__,
info.fsid, (unsigned long long)info.number);
return count;
}

View File

@@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
static void recover_lvb(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *high_lkb = NULL;
struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
uint32_t high_seq = 0;
int lock_lvb_exists = 0;
int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
@@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
big_lock_exists = 1;
if (iter->lkb_grmode > DLM_LOCK_CR) {
big_lkb = iter;
goto setflag;
}
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = lkb;
high_seq = lkb->lkb_lvbseq;
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = iter;
high_seq = iter->lkb_lvbseq;
}
}
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
big_lock_exists = 1;
if (iter->lkb_grmode > DLM_LOCK_CR) {
big_lkb = iter;
goto setflag;
}
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = lkb;
high_seq = lkb->lkb_lvbseq;
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = iter;
high_seq = iter->lkb_lvbseq;
}
}
@@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
/* lvb is invalidated if only NL/CR locks remain */
if (!big_lock_exists)
if (!big_lkb)
rsb_set_flag(r, RSB_VALNOTVALID);
if (!r->res_lvbptr) {
@@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
}
if (big_lock_exists) {
r->res_lvbseq = lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
if (big_lkb) {
r->res_lvbseq = big_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
} else if (high_lkb) {
r->res_lvbseq = high_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);

View File

@@ -235,5 +235,3 @@ int do_setxattr(struct user_namespace *mnt_userns, struct dentry *dentry,
/*
* fs/attr.c
*/
int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
const struct inode *inode);

View File

@@ -27,7 +27,7 @@
*
* Called with j_list_lock held.
*/
static inline void __buffer_unlink_first(struct journal_head *jh)
static inline void __buffer_unlink(struct journal_head *jh)
{
transaction_t *transaction = jh->b_cp_transaction;
@@ -40,23 +40,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh)
}
}
/*
* Unlink a buffer from a transaction checkpoint(io) list.
*
* Called with j_list_lock held.
*/
static inline void __buffer_unlink(struct journal_head *jh)
{
transaction_t *transaction = jh->b_cp_transaction;
__buffer_unlink_first(jh);
if (transaction->t_checkpoint_io_list == jh) {
transaction->t_checkpoint_io_list = jh->b_cpnext;
if (transaction->t_checkpoint_io_list == jh)
transaction->t_checkpoint_io_list = NULL;
}
}
/*
* Check a checkpoint buffer could be release or not.
*
@@ -367,19 +350,24 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
/*
* journal_clean_one_cp_list
* journal_shrink_one_cp_list
*
* Find all the written-back checkpoint buffers in the given list and
* release them. If 'destroy' is set, clean all buffers unconditionally.
* Find all the written-back checkpoint buffers in the given list
* and try to release them. If the whole transaction is released, set
* the 'released' parameter. Return the number of released checkpointed
* buffers.
*
* Called with j_list_lock held.
* Returns 1 if we freed the transaction, 0 otherwise.
*/
static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
bool destroy, bool *released)
{
struct journal_head *last_jh;
struct journal_head *next_jh = jh;
unsigned long nr_freed = 0;
int ret;
*released = false;
if (!jh)
return 0;
@@ -388,57 +376,15 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
jh = next_jh;
next_jh = jh->b_cpnext;
if (!destroy && __cp_buffer_busy(jh))
return 0;
if (__jbd2_journal_remove_checkpoint(jh))
return 1;
/*
* This function only frees up some memory
* if possible so we dont have an obligation
* to finish processing. Bail out if preemption
* requested:
*/
if (need_resched())
return 0;
} while (jh != last_jh);
return 0;
}
/*
* journal_shrink_one_cp_list
*
* Find 'nr_to_scan' written-back checkpoint buffers in the given list
* and try to release them. If the whole transaction is released, set
* the 'released' parameter. Return the number of released checkpointed
* buffers.
*
* Called with j_list_lock held.
*/
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
unsigned long *nr_to_scan,
bool *released)
{
struct journal_head *last_jh;
struct journal_head *next_jh = jh;
unsigned long nr_freed = 0;
int ret;
if (!jh || *nr_to_scan == 0)
return 0;
last_jh = jh->b_cpprev;
do {
jh = next_jh;
next_jh = jh->b_cpnext;
(*nr_to_scan)--;
if (__cp_buffer_busy(jh))
continue;
if (destroy) {
ret = __jbd2_journal_remove_checkpoint(jh);
} else {
ret = jbd2_journal_try_remove_checkpoint(jh);
if (ret < 0)
continue;
}
nr_freed++;
ret = __jbd2_journal_remove_checkpoint(jh);
if (ret) {
*released = true;
break;
@@ -446,7 +392,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
if (need_resched())
break;
} while (jh != last_jh && *nr_to_scan);
} while (jh != last_jh);
return nr_freed;
}
@@ -464,11 +410,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
unsigned long *nr_to_scan)
{
transaction_t *transaction, *last_transaction, *next_transaction;
bool released;
bool __maybe_unused released;
tid_t first_tid = 0, last_tid = 0, next_tid = 0;
tid_t tid = 0;
unsigned long nr_freed = 0;
unsigned long nr_scanned = *nr_to_scan;
unsigned long freed;
again:
spin_lock(&journal->j_list_lock);
@@ -497,19 +443,11 @@ again:
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
tid = transaction->t_tid;
released = false;
nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list,
nr_to_scan, &released);
if (*nr_to_scan == 0)
break;
if (need_resched() || spin_needbreak(&journal->j_list_lock))
break;
if (released)
continue;
nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list,
nr_to_scan, &released);
freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
false, &released);
nr_freed += freed;
(*nr_to_scan) -= min(*nr_to_scan, freed);
if (*nr_to_scan == 0)
break;
if (need_resched() || spin_needbreak(&journal->j_list_lock))
@@ -530,9 +468,8 @@ again:
if (*nr_to_scan && next_tid)
goto again;
out:
nr_scanned -= *nr_to_scan;
trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
nr_freed, nr_scanned, next_tid);
nr_freed, next_tid);
return nr_freed;
}
@@ -548,7 +485,7 @@ out:
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret;
bool released;
transaction = journal->j_checkpoint_transactions;
if (!transaction)
@@ -559,8 +496,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
do {
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
destroy);
journal_shrink_one_cp_list(transaction->t_checkpoint_list,
destroy, &released);
/*
* This function only frees up some memory if possible so we
* dont have an obligation to finish processing. Bail out if
@@ -568,23 +505,12 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
*/
if (need_resched())
return;
if (ret)
continue;
/*
* It is essential that we are as careful as in the case of
* t_checkpoint_list with removing the buffer from the list as
* we can possibly see not yet submitted buffers on io_list
*/
ret = journal_clean_one_cp_list(transaction->
t_checkpoint_io_list, destroy);
if (need_resched())
return;
/*
* Stop scanning if we couldn't free the transaction. This
* avoids pointless scanning of transactions which still
* weren't checkpointed.
*/
if (!ret)
if (!released)
return;
} while (transaction != last_transaction);
}
@@ -663,7 +589,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
jbd2_journal_put_journal_head(jh);
/* Is this transaction empty? */
if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list)
if (transaction->t_checkpoint_list)
return 0;
/*
@@ -694,6 +620,34 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
return 1;
}
/*
* Check the checkpoint buffer and try to remove it from the checkpoint
* list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
* it frees the transaction, 0 otherwise.
*
* This function is called with j_list_lock held.
*/
int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
if (!trylock_buffer(bh))
return -EBUSY;
if (buffer_dirty(bh)) {
unlock_buffer(bh);
return -EBUSY;
}
unlock_buffer(bh);
/*
* Buffer is clean and the IO has finished (we held the buffer
* lock) so the checkpoint is done. We can safely remove the
* buffer from this transaction.
*/
JBUFFER_TRACE(jh, "remove from checkpoint list");
return __jbd2_journal_remove_checkpoint(jh);
}
/*
* journal_insert_checkpoint: put a committed buffer onto a checkpoint
* list so that we know when it is safe to clean the transaction out of
@@ -755,7 +709,6 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
J_ASSERT(transaction->t_forget == NULL);
J_ASSERT(transaction->t_shadow_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(atomic_read(&transaction->t_updates) == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);

View File

@@ -1184,8 +1184,7 @@ restart_loop:
spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
/* Check if the transaction can be dropped now that we are finished */
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
if (commit_transaction->t_checkpoint_list == NULL) {
__jbd2_journal_drop_transaction(journal, commit_transaction);
jbd2_journal_free_transaction(commit_transaction);
}

View File

@@ -1775,8 +1775,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
* Otherwise, if the buffer has been written to disk,
* it is safe to remove the checkpoint and drop it.
*/
if (!buffer_dirty(bh)) {
__jbd2_journal_remove_checkpoint(jh);
if (jbd2_journal_try_remove_checkpoint(jh) >= 0) {
spin_unlock(&journal->j_list_lock);
goto drop;
}
@@ -2103,20 +2102,14 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
jh = bh2jh(bh);
if (buffer_locked(bh) || buffer_dirty(bh))
goto out;
if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
goto out;
return;
spin_lock(&journal->j_list_lock);
if (jh->b_cp_transaction != NULL) {
/* written-back checkpointed metadata buffer */
JBUFFER_TRACE(jh, "remove from checkpoint list");
__jbd2_journal_remove_checkpoint(jh);
}
/* Remove written-back checkpointed metadata buffer */
if (jh->b_cp_transaction != NULL)
jbd2_journal_try_remove_checkpoint(jh);
spin_unlock(&journal->j_list_lock);
out:
return;
}

View File

@@ -509,20 +509,26 @@ out:
return result;
}
static void
nfs_direct_join_group(struct list_head *list, struct inode *inode)
static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
{
struct nfs_page *req, *next;
struct nfs_page *req, *subreq;
list_for_each_entry(req, list, wb_list) {
if (req->wb_head != req || req->wb_this_page == req)
if (req->wb_head != req)
continue;
for (next = req->wb_this_page;
next != req->wb_head;
next = next->wb_this_page) {
nfs_list_remove_request(next);
nfs_release_request(next);
}
subreq = req->wb_this_page;
if (subreq == req)
continue;
do {
/*
* Remove subrequests from this list before freeing
* them in the call to nfs_join_page_group().
*/
if (!list_empty(&subreq->wb_list)) {
nfs_list_remove_request(subreq);
nfs_release_request(subreq);
}
} while ((subreq = subreq->wb_this_page) != req);
nfs_join_page_group(req, inode);
}
}

View File

@@ -731,9 +731,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
inode->i_mode & S_ISUID)
inode->i_mode &= ~S_ISUID;
if ((attr->ia_valid & ATTR_KILL_SGID) != 0 &&
(inode->i_mode & (S_ISGID | S_IXGRP)) ==
(S_ISGID | S_IXGRP))
if (setattr_should_drop_sgid(&init_user_ns, inode))
inode->i_mode &= ~S_ISGID;
if ((attr->ia_valid & ATTR_MODE) != 0) {
int mode = attr->ia_mode & S_IALLUGO;

View File

@@ -1339,7 +1339,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
for (i = 0; i < np; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i]) {
np = i + 1;
err = -ENOMEM;
goto out;
}
@@ -1363,8 +1362,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
} while (exception.retry);
out:
while (--np >= 0)
__free_page(pages[np]);
while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
return err;

View File

@@ -5964,9 +5964,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
out_ok:
ret = res.acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
while (--i >= 0)
__free_page(pages[i]);
if (res.acl_scratch)
__free_page(res.acl_scratch);
kfree(pages);
@@ -7153,8 +7152,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
goto out_restart;
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
if (data->arg.new_lock_owner != 0 &&
nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
lsp->ls_state))
goto out_restart;
if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
goto out_restart;
fallthrough;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {

View File

@@ -1263,9 +1263,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
WARN_ON(!list_empty(&dp->dl_recall_lru));
if (clp->cl_minorversion) {
spin_lock(&clp->cl_lock);
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
refcount_inc(&dp->dl_stid.sc_count);
spin_lock(&clp->cl_lock);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}

View File

@@ -322,7 +322,9 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
iap->ia_mode &= ~S_ISGID;
} else {
/* set ATTR_KILL_* bits and let VFS handle it */
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
iap->ia_valid |= ATTR_KILL_SUID;
iap->ia_valid |=
setattr_should_drop_sgid(&init_user_ns, inode);
}
}
}

View File

@@ -1495,7 +1495,7 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
#define DP_LTTPR_COMMON_CAP_SIZE 8

View File

@@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
#endif
#ifdef CONFIG_HAVE_CLK_PREPARE
@@ -570,38 +610,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
/**
* clk_rate_exclusive_get - get exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to get exclusive control over the rate of a
* provider. It prevents any other consumer to execute, even indirectly,
* opereation which could alter the rate of the provider or cause glitches
*
* If exlusivity is claimed more than once on clock, even by the same driver,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Must not be called from within atomic context.
*
* Returns success (0) or negative errno.
*/
int clk_rate_exclusive_get(struct clk *clk);
/**
* clk_rate_exclusive_put - release exclusivity over the rate control of a
* producer
* @clk: clock source
*
* This function allows drivers to release the exclusivity it previously got
* from clk_rate_exclusive_get()
*
* The caller must balance the number of clk_rate_exclusive_get() and
* clk_rate_exclusive_put() calls.
*
* Must not be called from within atomic context.
*/
void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -961,14 +969,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
static inline int clk_rate_exclusive_get(struct clk *clk)
{
return 0;
}
static inline void clk_rate_exclusive_put(struct clk *clk) {}
static inline int clk_enable(struct clk *clk)
{
return 0;

View File

@@ -3195,6 +3195,8 @@ extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *);
extern int file_remove_privs(struct file *);
int setattr_should_drop_sgid(struct user_namespace *mnt_userns,
const struct inode *inode);
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)

View File

@@ -626,12 +626,6 @@ struct transaction_s
*/
struct journal_head *t_checkpoint_list;
/*
* Doubly-linked circular list of all buffers submitted for IO while
* checkpointing. [j_list_lock]
*/
struct journal_head *t_checkpoint_io_list;
/*
* Doubly-linked circular list of metadata buffers being
* shadowed by log IO. The IO buffers on the iobuf list and
@@ -1447,6 +1441,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);

View File

@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
int __must_check raid_component_add(struct raid_template *, struct device *,
struct device *);

View File

@@ -700,37 +700,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
const u8 *mac)
static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
return NULL;
}
/* Caller must hold rcu_read_lock() for read */
static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
struct netdev_hw_addr *ha;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return true;
if (netdev_uc_empty(bond->dev))
return false;
netdev_for_each_uc_addr(ha, bond->dev)
if (ether_addr_equal_64bits(mac, ha->addr))
return true;
return false;
}

View File

@@ -175,8 +175,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
struct netlink_ext_ack *exterr);
int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
struct netlink_ext_ack *exterr);
struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)

View File

@@ -1272,6 +1272,7 @@ struct proto {
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
* Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
@@ -1397,7 +1398,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
static inline bool sk_under_global_memory_pressure(const struct sock *sk)
{
return sk->sk_prot->memory_pressure &&
!!*sk->sk_prot->memory_pressure;
!!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline bool sk_under_memory_pressure(const struct sock *sk)
@@ -1409,7 +1410,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
return !!*sk->sk_prot->memory_pressure;
return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline long
@@ -1467,7 +1468,7 @@ proto_memory_pressure(struct proto *prot)
{
if (!prot->memory_pressure)
return false;
return !!*prot->memory_pressure;
return !!READ_ONCE(*prot->memory_pressure);
}

View File

@@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit,
TRACE_EVENT(jbd2_shrink_checkpoint_list,
TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
unsigned long nr_freed, unsigned long nr_scanned,
tid_t next_tid),
unsigned long nr_freed, tid_t next_tid),
TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
nr_scanned, next_tid),
TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__field(tid_t, tid)
__field(tid_t, last_tid)
__field(unsigned long, nr_freed)
__field(unsigned long, nr_scanned)
__field(tid_t, next_tid)
),
@@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__entry->tid = tid;
__entry->last_tid = last_tid;
__entry->nr_freed = nr_freed;
__entry->nr_scanned = nr_scanned;
__entry->next_tid = next_tid;
),
TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
"scanned %lu next transaction %u",
"next transaction %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->first_tid, __entry->tid, __entry->last_tid,
__entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
__entry->nr_freed, __entry->next_tid)
);
#endif /* _TRACE_JBD2_H */

View File

@@ -477,8 +477,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
* Call with callback_lock or cpuset_mutex held. The check can be skipped
* if on default hierarchy.
* Call with callback_lock or cpuset_mutex held.
*/
static void cpuset_update_task_spread_flag(struct cpuset *cs,
struct task_struct *tsk)

View File

@@ -17,8 +17,8 @@
*/
#include "sched.h"
#include "pelt.h"
#include <trace/hooks/sched.h>
#include <linux/cpuset.h>
#include <trace/hooks/sched.h>
struct dl_bandwidth def_dl_bandwidth;

View File

@@ -915,7 +915,7 @@ void torture_kthread_stopping(char *title)
VERBOSE_TOROUT_STRING(buf);
while (!kthread_should_stop()) {
torture_shutdown_absorb(title);
schedule_timeout_uninterruptible(1);
schedule_timeout_uninterruptible(HZ / 20);
}
}
EXPORT_SYMBOL_GPL(torture_kthread_stopping);

View File

@@ -4109,8 +4109,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
/* Close iter->trace before switching to the new current tracer */
if (iter->trace->close)
iter->trace->close(iter);
*iter->trace = *tr->current_trace;
/* Reopen the new current tracer */
if (iter->trace->open)
iter->trace->open(iter);
}
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5172,11 +5179,17 @@ int tracing_set_cpumask(struct trace_array *tr,
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
#ifdef CONFIG_TRACER_MAX_TRACE
ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
#endif
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
#ifdef CONFIG_TRACER_MAX_TRACE
ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
#endif
}
}
arch_spin_unlock(&tr->max_lock);

View File

@@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
else
iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)

View File

@@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
else
iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)

View File

@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
}
EXPORT_SYMBOL(__clzsi2);
int __weak __clzdi2(long val);
int __weak __ctzdi2(long val);
#if BITS_PER_LONG == 32
int __weak __clzdi2(long val)
int __weak __clzdi2(u64 val);
int __weak __clzdi2(u64 val)
{
return 32 - fls((int)val);
return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
int __weak __ctzdi2(long val)
int __weak __ctzdi2(u64 val);
int __weak __ctzdi2(u64 val)
{
return __ffs((u32)val);
return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
#elif BITS_PER_LONG == 64
int __weak __clzdi2(long val)
{
return 64 - fls64((u64)val);
}
EXPORT_SYMBOL(__clzdi2);
int __weak __ctzdi2(long val)
{
return __ffs64((u64)val);
}
EXPORT_SYMBOL(__ctzdi2);
#else
#error BITS_PER_LONG not 32 or 64
#endif

View File

@@ -1134,7 +1134,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
slot++;
iter->index = __radix_tree_iter_add(iter, 1);
iter->next_index = iter->index;
iter->tags = 0;

View File

@@ -2219,16 +2219,6 @@ static int soft_offline_in_use_page(struct page *page)
return __soft_offline_page(page);
}
static int soft_offline_free_page(struct page *page)
{
int rc = 0;
if (!page_handle_poison(page, true, false))
rc = -EBUSY;
return rc;
}
static void put_ref_page(struct page *page)
{
if (page)
@@ -2294,10 +2284,13 @@ retry:
if (ret > 0) {
ret = soft_offline_in_use_page(page);
} else if (ret == 0) {
if (soft_offline_free_page(page) && try_again) {
try_again = false;
flags &= ~MF_COUNT_INCREASED;
goto retry;
if (!page_handle_poison(page, true, false)) {
if (try_again) {
try_again = false;
flags &= ~MF_COUNT_INCREASED;
goto retry;
}
ret = -EBUSY;
}
}

View File

@@ -2838,6 +2838,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
free_vm_area(area);
return NULL;
}
flush_cache_vmap((unsigned long)area->addr,
(unsigned long)area->addr + count * PAGE_SIZE);
return area->addr;
}
EXPORT_SYMBOL_GPL(vmap_pfn);

View File

@@ -507,7 +507,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_elp_packet *elp_packet;
struct batadv_hard_iface *primary_if;
struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
struct ethhdr *ethhdr;
bool res;
int ret = NET_RX_DROP;
@@ -515,6 +515,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb,
if (!res)
goto free_skb;
ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;

View File

@@ -124,8 +124,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
if (hard_iface->if_status != BATADV_IF_ACTIVE)
if (hard_iface->if_status != BATADV_IF_ACTIVE) {
kfree_skb(skb);
return;
}
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
@@ -986,7 +988,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
{
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batadv_ogm2_packet *ogm_packet;
struct ethhdr *ethhdr = eth_hdr(skb);
struct ethhdr *ethhdr;
int ogm_offset;
u8 *packet_pos;
int ret = NET_RX_DROP;
@@ -1000,6 +1002,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
goto free_skb;
ethhdr = eth_hdr(skb);
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;

View File

@@ -627,7 +627,19 @@ out:
*/
void batadv_update_min_mtu(struct net_device *soft_iface)
{
soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
int limit_mtu;
int mtu;
mtu = batadv_hardif_min_mtu(soft_iface);
if (bat_priv->mtu_set_by_user)
limit_mtu = bat_priv->mtu_set_by_user;
else
limit_mtu = ETH_DATA_LEN;
mtu = min(mtu, limit_mtu);
dev_set_mtu(soft_iface, mtu);
/* Check if the local translate table should be cleaned up to match a
* new (and smaller) MTU.

View File

@@ -495,7 +495,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info)
attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED];
atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr));
rtnl_lock();
batadv_update_min_mtu(bat_priv->soft_iface);
rtnl_unlock();
}
if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {

View File

@@ -154,11 +154,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
{
struct batadv_priv *bat_priv = netdev_priv(dev);
/* check ranges */
if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
return -EINVAL;
dev->mtu = new_mtu;
bat_priv->mtu_set_by_user = new_mtu;
return 0;
}

View File

@@ -774,7 +774,6 @@ check_roaming:
if (roamed_back) {
batadv_tt_global_free(bat_priv, tt_global,
"Roaming canceled");
tt_global = NULL;
} else {
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose

View File

@@ -1546,6 +1546,12 @@ struct batadv_priv {
/** @soft_iface: net device which holds this struct as private data */
struct net_device *soft_iface;
/**
* @mtu_set_by_user: MTU was set once by user
* protected by rtnl_lock
*/
int mtu_set_by_user;
/**
* @bat_counters: mesh internal traffic statistic counters (see
* batadv_counters)

View File

@@ -83,6 +83,7 @@ struct raw_sock {
struct sock sk;
int bound;
int ifindex;
struct net_device *dev;
struct list_head notifier;
int loopback;
int recv_own_msgs;
@@ -275,21 +276,24 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg,
if (!net_eq(dev_net(dev), sock_net(sk)))
return;
if (ro->ifindex != dev->ifindex)
if (ro->dev != dev)
return;
switch (msg) {
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
if (ro->bound)
if (ro->bound) {
raw_disable_allfilters(dev_net(dev), dev, sk);
dev_put(dev);
}
if (ro->count > 1)
kfree(ro->filter);
ro->ifindex = 0;
ro->bound = 0;
ro->dev = NULL;
ro->count = 0;
release_sock(sk);
@@ -335,6 +339,7 @@ static int raw_init(struct sock *sk)
ro->bound = 0;
ro->ifindex = 0;
ro->dev = NULL;
/* set default filter to single entry dfilter */
ro->dfilter.can_id = 0;
@@ -380,18 +385,14 @@ static int raw_release(struct socket *sock)
list_del(&ro->notifier);
spin_unlock(&raw_notifier_lock);
rtnl_lock();
lock_sock(sk);
/* remove current filters & unregister */
if (ro->bound) {
if (ro->ifindex) {
struct net_device *dev;
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
if (dev) {
raw_disable_allfilters(dev_net(dev), dev, sk);
dev_put(dev);
}
if (ro->dev) {
raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk);
dev_put(ro->dev);
} else {
raw_disable_allfilters(sock_net(sk), NULL, sk);
}
@@ -402,6 +403,7 @@ static int raw_release(struct socket *sock)
ro->ifindex = 0;
ro->bound = 0;
ro->dev = NULL;
ro->count = 0;
free_percpu(ro->uniq);
@@ -409,6 +411,8 @@ static int raw_release(struct socket *sock)
sock->sk = NULL;
release_sock(sk);
rtnl_unlock();
sock_put(sk);
return 0;
@@ -419,6 +423,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
struct sock *sk = sock->sk;
struct raw_sock *ro = raw_sk(sk);
struct net_device *dev = NULL;
int ifindex;
int err = 0;
int notify_enetdown = 0;
@@ -428,24 +433,23 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (addr->can_family != AF_CAN)
return -EINVAL;
rtnl_lock();
lock_sock(sk);
if (ro->bound && addr->can_ifindex == ro->ifindex)
goto out;
if (addr->can_ifindex) {
struct net_device *dev;
dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
if (!dev) {
err = -ENODEV;
goto out;
}
if (dev->type != ARPHRD_CAN) {
dev_put(dev);
err = -ENODEV;
goto out;
goto out_put_dev;
}
if (!(dev->flags & IFF_UP))
notify_enetdown = 1;
@@ -453,7 +457,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
/* filters set by default/setsockopt */
err = raw_enable_allfilters(sock_net(sk), dev, sk);
dev_put(dev);
if (err)
goto out_put_dev;
} else {
ifindex = 0;
@@ -464,26 +470,30 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
if (!err) {
if (ro->bound) {
/* unregister old filters */
if (ro->ifindex) {
struct net_device *dev;
dev = dev_get_by_index(sock_net(sk),
ro->ifindex);
if (dev) {
raw_disable_allfilters(dev_net(dev),
dev, sk);
dev_put(dev);
}
if (ro->dev) {
raw_disable_allfilters(dev_net(ro->dev),
ro->dev, sk);
/* drop reference to old ro->dev */
dev_put(ro->dev);
} else {
raw_disable_allfilters(sock_net(sk), NULL, sk);
}
}
ro->ifindex = ifindex;
ro->bound = 1;
/* bind() ok -> hold a reference for new ro->dev */
ro->dev = dev;
if (ro->dev)
dev_hold(ro->dev);
}
out:
out_put_dev:
/* remove potential reference from dev_get_by_index() */
if (dev)
dev_put(dev);
out:
release_sock(sk);
rtnl_unlock();
if (notify_enetdown) {
sk->sk_err = ENETDOWN;
@@ -549,9 +559,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
rtnl_lock();
lock_sock(sk);
if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
if (!dev) {
dev = ro->dev;
if (ro->bound && dev) {
if (dev->reg_state != NETREG_REGISTERED) {
if (count > 1)
kfree(filter);
err = -ENODEV;
@@ -592,7 +602,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->count = count;
out_fil:
dev_put(dev);
release_sock(sk);
rtnl_unlock();
@@ -610,9 +619,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
rtnl_lock();
lock_sock(sk);
if (ro->bound && ro->ifindex) {
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
if (!dev) {
dev = ro->dev;
if (ro->bound && dev) {
if (dev->reg_state != NETREG_REGISTERED) {
err = -ENODEV;
goto out_err;
}
@@ -636,7 +645,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
ro->err_mask = err_mask;
out_err:
dev_put(dev);
release_sock(sk);
rtnl_unlock();

View File

@@ -2173,13 +2173,27 @@ out_err:
return err;
}
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
struct netlink_ext_ack *exterr)
int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
struct netlink_ext_ack *exterr)
{
return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy,
const struct ifinfomsg *ifmp;
const struct nlattr *attrs;
size_t len;
ifmp = nla_data(nla_peer);
attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg);
len = nla_len(nla_peer) - sizeof(struct ifinfomsg);
if (ifmp->ifi_index < 0) {
NL_SET_ERR_MSG_ATTR(exterr, nla_peer,
"ifindex can't be negative");
return -EINVAL;
}
return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy,
exterr);
}
EXPORT_SYMBOL(rtnl_nla_parse_ifla);
EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg);
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
@@ -3280,6 +3294,7 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr **data;
bool link_specified;
int err;
#ifdef CONFIG_MODULES
@@ -3300,12 +3315,19 @@ replay:
ifname[0] = '\0';
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
if (ifm->ifi_index > 0) {
link_specified = true;
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
} else if (ifm->ifi_index < 0) {
NL_SET_ERR_MSG(extack, "ifindex can't be negative");
return -EINVAL;
} else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) {
link_specified = true;
dev = rtnl_dev_get(net, NULL, tb[IFLA_ALT_IFNAME], ifname);
else
} else {
link_specified = false;
dev = NULL;
}
master_dev = NULL;
m_ops = NULL;
@@ -3408,7 +3430,12 @@ replay:
}
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
/* No dev found and NLM_F_CREATE not set. Requested dev does not exist,
* or it's for a group
*/
if (link_specified)
return -ENODEV;
if (tb[IFLA_GROUP])
return rtnl_group_changelink(skb, net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, extack, tb);

View File

@@ -324,11 +324,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
__poll_t dccp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
__poll_t mask;
struct sock *sk = sock->sk;
__poll_t mask;
u8 shutdown;
int state;
sock_poll_wait(file, sock, wait);
if (sk->sk_state == DCCP_LISTEN)
state = inet_sk_state_load(sk);
if (state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
/* Socket is not locked. We are protected from async events
@@ -337,20 +341,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
*/
mask = 0;
if (sk->sk_err)
if (READ_ONCE(sk->sk_err))
mask = EPOLLERR;
shutdown = READ_ONCE(sk->sk_shutdown);
if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED)
mask |= EPOLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
if (shutdown & RCV_SHUTDOWN)
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
/* Connected? */
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
if (atomic_read(&sk->sk_rmem_alloc) > 0)
mask |= EPOLLIN | EPOLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (!(shutdown & SEND_SHUTDOWN)) {
if (sk_stream_is_writeable(sk)) {
mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
@@ -368,7 +373,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
}
return mask;
}
EXPORT_SYMBOL_GPL(dccp_poll);
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)

View File

@@ -611,14 +611,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
return 0;
}
/* Response handler for Mellanox command Get Mac Address */
static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
/* Response handler for Get Mac Address command */
static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
{
struct ncsi_dev_priv *ndp = nr->ndp;
struct net_device *ndev = ndp->ndev.dev;
const struct net_device_ops *ops = ndev->netdev_ops;
struct ncsi_rsp_oem_pkt *rsp;
struct sockaddr saddr;
u32 mac_addr_off = 0;
int ret = 0;
/* Get the response header */
@@ -626,11 +626,25 @@ static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN);
if (mfr_id == NCSI_OEM_MFR_BCM_ID)
mac_addr_off = BCM_MAC_ADDR_OFFSET;
else if (mfr_id == NCSI_OEM_MFR_MLX_ID)
mac_addr_off = MLX_MAC_ADDR_OFFSET;
else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
mac_addr_off = INTEL_MAC_ADDR_OFFSET;
memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
eth_addr_inc((u8 *)saddr.sa_data);
if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
ret = ops->ndo_set_mac_address(ndev, &saddr);
rtnl_lock();
ret = dev_set_mac_address(ndev, &saddr, NULL);
rtnl_unlock();
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
@@ -649,41 +663,10 @@ static int ncsi_rsp_handler_oem_mlx(struct ncsi_request *nr)
if (mlx->cmd == NCSI_OEM_MLX_CMD_GMA &&
mlx->param == NCSI_OEM_MLX_CMD_GMA_PARAM)
return ncsi_rsp_handler_oem_mlx_gma(nr);
return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_MLX_ID);
return 0;
}
/* Response handler for Broadcom command Get Mac Address */
static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
{
struct ncsi_dev_priv *ndp = nr->ndp;
struct net_device *ndev = ndp->ndev.dev;
const struct net_device_ops *ops = ndev->netdev_ops;
struct ncsi_rsp_oem_pkt *rsp;
struct sockaddr saddr;
int ret = 0;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
/* Increase mac address by 1 for BMC's address */
eth_addr_inc((u8 *)saddr.sa_data);
if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
ret = ops->ndo_set_mac_address(ndev, &saddr);
if (ret < 0)
netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
return ret;
}
/* Response handler for Broadcom card */
static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
{
@@ -695,42 +678,10 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr)
bcm = (struct ncsi_rsp_oem_bcm_pkt *)(rsp->data);
if (bcm->type == NCSI_OEM_BCM_CMD_GMA)
return ncsi_rsp_handler_oem_bcm_gma(nr);
return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_BCM_ID);
return 0;
}
/* Response handler for Intel command Get Mac Address */
static int ncsi_rsp_handler_oem_intel_gma(struct ncsi_request *nr)
{
struct ncsi_dev_priv *ndp = nr->ndp;
struct net_device *ndev = ndp->ndev.dev;
const struct net_device_ops *ops = ndev->netdev_ops;
struct ncsi_rsp_oem_pkt *rsp;
struct sockaddr saddr;
int ret = 0;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
memcpy(saddr.sa_data, &rsp->data[INTEL_MAC_ADDR_OFFSET], ETH_ALEN);
/* Increase mac address by 1 for BMC's address */
eth_addr_inc((u8 *)saddr.sa_data);
if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
ret = ops->ndo_set_mac_address(ndev, &saddr);
if (ret < 0)
netdev_warn(ndev,
"NCSI: 'Writing mac address to device failed\n");
return ret;
}
/* Response handler for Intel card */
static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
{
@@ -742,7 +693,7 @@ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr)
intel = (struct ncsi_rsp_oem_intel_pkt *)(rsp->data);
if (intel->cmd == NCSI_OEM_INTEL_CMD_GMA)
return ncsi_rsp_handler_oem_intel_gma(nr);
return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_INTEL_ID);
return 0;
}

View File

@@ -10303,7 +10303,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
deleted = 0;
mutex_lock(&nft_net->commit_mutex);
if (!list_empty(&nf_tables_destroy_list))
rcu_barrier();
nf_tables_trans_destroy_flush_work();
again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&

View File

@@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f)
static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k,
int mask_bits)
{
int rule = f->rules++, group, ret, bit_offset = 0;
int rule = f->rules, group, ret, bit_offset = 0;
ret = pipapo_resize(f, f->rules - 1, f->rules);
ret = pipapo_resize(f, f->rules, f->rules + 1);
if (ret)
return ret;
f->rules++;
for (group = 0; group < f->groups; group++) {
int i, v;
u8 mask;
@@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f,
step++;
if (step >= len) {
if (!masks) {
pipapo_insert(f, base, 0);
err = pipapo_insert(f, base, 0);
if (err < 0)
return err;
masks = 1;
}
goto out;
@@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
else
ret = pipapo_expand(f, start, end, f->groups * f->bb);
if (ret < 0)
return ret;
if (f->bsize > bsize_max)
bsize_max = f->bsize;

View File

@@ -1513,10 +1513,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
return 0;
}
static bool req_create_or_replace(struct nlmsghdr *n)
{
return (n->nlmsg_flags & NLM_F_CREATE &&
n->nlmsg_flags & NLM_F_REPLACE);
}
static bool req_create_exclusive(struct nlmsghdr *n)
{
return (n->nlmsg_flags & NLM_F_CREATE &&
n->nlmsg_flags & NLM_F_EXCL);
}
static bool req_change(struct nlmsghdr *n)
{
return (!(n->nlmsg_flags & NLM_F_CREATE) &&
!(n->nlmsg_flags & NLM_F_REPLACE) &&
!(n->nlmsg_flags & NLM_F_EXCL));
}
/*
* Create/change qdisc.
*/
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
@@ -1613,27 +1631,35 @@ replay:
*
* We know, that some child q is already
* attached to this parent and have choice:
* either to change it or to create/graft new one.
* 1) change it or 2) create/graft new one.
* If the requested qdisc kind is different
* than the existing one, then we choose graft.
* If they are the same then this is "change"
* operation - just let it fallthrough..
*
* 1. We are allowed to create/graft only
* if CREATE and REPLACE flags are set.
* if the request is explicitly stating
* "please create if it doesn't exist".
*
* 2. If EXCL is set, requestor wanted to say,
* that qdisc tcm_handle is not expected
* 2. If the request is to exclusive create
* then the qdisc tcm_handle is not expected
* to exist, so that we choose create/graft too.
*
* 3. The last case is when no flags are set.
* This will happen when for example tc
* utility issues a "change" command.
* Alas, it is sort of hole in API, we
* cannot decide what to do unambiguously.
* For now we select create/graft, if
* user gave KIND, which does not match existing.
* For now we select create/graft.
*/
if ((n->nlmsg_flags & NLM_F_CREATE) &&
(n->nlmsg_flags & NLM_F_REPLACE) &&
((n->nlmsg_flags & NLM_F_EXCL) ||
(tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id))))
goto create_n_graft;
if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
if (req_create_or_replace(n) ||
req_create_exclusive(n))
goto create_n_graft;
else if (req_change(n))
goto create_n_graft2;
}
}
}
} else {
@@ -1667,6 +1693,7 @@ create_n_graft:
NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
return -ENOENT;
}
create_n_graft2:
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev)) {
q = qdisc_create(dev, dev_ingress_queue(dev), p,

View File

@@ -97,7 +97,7 @@ struct percpu_counter sctp_sockets_allocated;
static void sctp_enter_memory_pressure(struct sock *sk)
{
sctp_memory_pressure = 1;
WRITE_ONCE(sctp_memory_pressure, 1);
}

View File

@@ -962,9 +962,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
if (!rep->rr_rdmabuf)
goto out_free;
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
goto out_free_regbuf;
rep->rr_cid.ci_completion_id =
atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
@@ -983,8 +980,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
spin_unlock(&buf->rb_lock);
return rep;
out_free_regbuf:
rpcrdma_regbuf_free(rep->rr_rdmabuf);
out_free:
kfree(rep);
out:
@@ -1391,6 +1386,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
rep = rpcrdma_rep_create(r_xprt, temp);
if (!rep)
break;
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) {
rpcrdma_rep_put(buf, rep);
break;
}
rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
trace_xprtrdma_post_recv(rep);

View File

@@ -2011,6 +2011,7 @@ static int filename_trans_read_helper(struct policydb *p, void *fp)
if (!datum)
goto out;
datum->next = NULL;
*dst = datum;
/* ebitmap_read() will at least init the bitmap */
@@ -2023,7 +2024,6 @@ static int filename_trans_read_helper(struct policydb *p, void *fp)
goto out;
datum->otype = le32_to_cpu(buf[0]);
datum->next = NULL;
dst = &datum->next;
}

View File

@@ -31,20 +31,51 @@ static unsigned long max_alloc_per_card = 32UL * 1024UL * 1024UL;
module_param(max_alloc_per_card, ulong, 0644);
MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
static void __update_allocated_size(struct snd_card *card, ssize_t bytes)
{
card->total_pcm_alloc_bytes += bytes;
}
static void update_allocated_size(struct snd_card *card, ssize_t bytes)
{
mutex_lock(&card->memory_mutex);
__update_allocated_size(card, bytes);
mutex_unlock(&card->memory_mutex);
}
static void decrease_allocated_size(struct snd_card *card, size_t bytes)
{
mutex_lock(&card->memory_mutex);
WARN_ON(card->total_pcm_alloc_bytes < bytes);
__update_allocated_size(card, -(ssize_t)bytes);
mutex_unlock(&card->memory_mutex);
}
static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
size_t size, struct snd_dma_buffer *dmab)
{
int err;
/* check and reserve the requested size */
mutex_lock(&card->memory_mutex);
if (max_alloc_per_card &&
card->total_pcm_alloc_bytes + size > max_alloc_per_card)
card->total_pcm_alloc_bytes + size > max_alloc_per_card) {
mutex_unlock(&card->memory_mutex);
return -ENOMEM;
}
__update_allocated_size(card, size);
mutex_unlock(&card->memory_mutex);
err = snd_dma_alloc_pages(type, dev, size, dmab);
if (!err) {
mutex_lock(&card->memory_mutex);
card->total_pcm_alloc_bytes += dmab->bytes;
mutex_unlock(&card->memory_mutex);
/* the actual allocation size might be bigger than requested,
* and we need to correct the account
*/
if (dmab->bytes != size)
update_allocated_size(card, dmab->bytes - size);
} else {
/* take back on allocation failure */
decrease_allocated_size(card, size);
}
return err;
}
@@ -53,10 +84,7 @@ static void do_free_pages(struct snd_card *card, struct snd_dma_buffer *dmab)
{
if (!dmab->area)
return;
mutex_lock(&card->memory_mutex);
WARN_ON(card->total_pcm_alloc_bytes < dmab->bytes);
card->total_pcm_alloc_bytes -= dmab->bytes;
mutex_unlock(&card->memory_mutex);
decrease_allocated_size(card, dmab->bytes);
snd_dma_free_pages(dmab);
dmab->area = NULL;
}

View File

@@ -150,8 +150,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i
void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { }
#endif /* SUPPORT_JOYSTICK */
static int snd_card_ymfpci_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
static int __snd_card_ymfpci_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -333,6 +333,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
return 0;
}
static int snd_card_ymfpci_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id));
}
static struct pci_driver ymfpci_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ymfpci_ids,

View File

@@ -725,8 +725,11 @@ bool arch_is_retpoline(struct symbol *sym)
bool arch_is_rethunk(struct symbol *sym)
{
return !strcmp(sym->name, "__x86_return_thunk") ||
!strcmp(sym->name, "srso_untrain_ret") ||
!strcmp(sym->name, "srso_safe_ret") ||
!strcmp(sym->name, "retbleed_return_thunk");
return !strcmp(sym->name, "__x86_return_thunk");
}
bool arch_is_embedded_insn(struct symbol *sym)
{
return !strcmp(sym->name, "retbleed_return_thunk") ||
!strcmp(sym->name, "srso_safe_ret");
}

View File

@@ -990,16 +990,33 @@ static int add_ignore_alternatives(struct objtool_file *file)
return 0;
}
/*
* Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
* will be added to the .retpoline_sites section.
*/
__weak bool arch_is_retpoline(struct symbol *sym)
{
return false;
}
/*
* Symbols that replace INSN_RETURN, every (tail) call to such a symbol
* will be added to the .return_sites section.
*/
__weak bool arch_is_rethunk(struct symbol *sym)
{
return false;
}
/*
* Symbols that are embedded inside other instructions, because sometimes crazy
* code exists. These are mostly ignored for validation purposes.
*/
__weak bool arch_is_embedded_insn(struct symbol *sym)
{
return false;
}
#define NEGATIVE_RELOC ((void *)-1L)
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
@@ -1235,7 +1252,7 @@ static int add_jump_destinations(struct objtool_file *file)
* middle of another instruction. Objtool only
* knows about the outer instruction.
*/
if (sym && sym->return_thunk) {
if (sym && sym->embedded_insn) {
add_return_call(file, insn, false);
continue;
}
@@ -2066,6 +2083,9 @@ static int classify_symbols(struct objtool_file *file)
if (arch_is_rethunk(func))
func->return_thunk = true;
if (arch_is_embedded_insn(func))
func->embedded_insn = true;
if (!strcmp(func->name, "__fentry__"))
func->fentry = true;

View File

@@ -89,6 +89,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base);
bool arch_is_retpoline(struct symbol *sym);
bool arch_is_rethunk(struct symbol *sym);
bool arch_is_embedded_insn(struct symbol *sym);
int arch_rewrite_retpolines(struct objtool_file *file);

View File

@@ -60,6 +60,7 @@ struct symbol {
u8 return_thunk : 1;
u8 fentry : 1;
u8 kcov : 1;
u8 embedded_insn : 1;
};
struct reloc {