Merge tag 'v6.6.114' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into odroid-6.6.y

This is the 6.6.114 stable release

Change-Id: Ib8a998c659b719d601364eaf172493f627803d13
This commit is contained in:
Mauro Ribeiro
2026-01-26 11:03:39 -03:00
127 changed files with 1542 additions and 923 deletions

View File

@@ -187,6 +187,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-500 | #841119,826419 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | MMU-600 | #1076982,1209401| N/A |

View File

@@ -25,6 +25,9 @@ seg6_require_hmac - INTEGER
Default is 0.
/proc/sys/net/ipv6/seg6_* variables:
====================================
seg6_flowlabel - INTEGER
Controls the behaviour of computing the flowlabel of outer
IPv6 header in case of SR T.encaps

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 113
SUBLEVEL = 114
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@@ -1094,6 +1094,7 @@ config ARM64_ERRATUM_3194386
* ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417
* ARM Neoverse-V3AE erratum 3312417
On affected cores "MSR SSBS, #0" instructions may not affect
subsequent speculative instructions, which may permit unexepected

View File

@@ -93,6 +93,7 @@
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3AE 0xD83
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
@@ -180,6 +181,7 @@
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)

View File

@@ -471,6 +471,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3AE),
{}
};
#endif

View File

@@ -48,10 +48,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
post_kprobe_handler(p, kcb, regs);
}
static bool __kprobes arch_check_kprobe(struct kprobe *p)
static bool __kprobes arch_check_kprobe(unsigned long addr)
{
unsigned long tmp = (unsigned long)p->addr - p->offset;
unsigned long addr = (unsigned long)p->addr;
unsigned long tmp, offset;
/* start iterating at the closest preceding symbol */
if (!kallsyms_lookup_size_offset(addr, NULL, &offset))
return false;
tmp = addr - offset;
while (tmp <= addr) {
if (tmp == addr)
@@ -70,7 +75,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long)insn & 0x1)
return -EILSEQ;
if (!arch_check_kprobe(p))
if (!arch_check_kprobe((unsigned long)p->addr))
return -EILSEQ;
/* copy instruction */

View File

@@ -147,9 +147,26 @@ int set_blocksize(struct block_device *bdev, int size)
/* Don't change the size if it is same as current */
if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
/*
* Flush and truncate the pagecache before we reconfigure the
* mapping geometry because folio sizes are variable now. If a
* reader has already allocated a folio whose size is smaller
* than the new min_order but invokes readahead after the new
* min_order becomes visible, readahead will think there are
* "zero" blocks per folio and crash. Take the inode and
* invalidation locks to avoid racing with
* read/write/fallocate.
*/
inode_lock(bdev->bd_inode);
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
sync_blockdev(bdev);
kill_bdev(bdev);
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
inode_unlock(bdev->bd_inode);
}
return 0;
}

View File

@@ -401,6 +401,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
inode_lock(bdev->bd_inode);
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
@@ -423,8 +424,10 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
GFP_KERNEL);
fail:
if (cmd == BLKRESETZONE)
if (cmd == BLKRESETZONE) {
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
inode_unlock(bdev->bd_inode);
}
return ret;
}

View File

@@ -681,7 +681,14 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = direct_write_fallback(iocb, from, ret,
blkdev_buffered_write(iocb, from));
} else {
/*
* Take i_rwsem and invalidate_lock to avoid racing with
* set_blocksize changing i_blkbits/folio order and punching
* out the pagecache.
*/
inode_lock_shared(bd_inode);
ret = blkdev_buffered_write(iocb, from);
inode_unlock_shared(bd_inode);
}
if (ret > 0)
@@ -693,6 +700,7 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
struct inode *bd_inode = bdev->bd_inode;
loff_t size = bdev_nr_bytes(bdev);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
@@ -728,7 +736,13 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
goto reexpand;
}
/*
* Take i_rwsem and invalidate_lock to avoid racing with set_blocksize
* changing i_blkbits/folio order and punching out the pagecache.
*/
inode_lock_shared(bd_inode);
ret = filemap_read(iocb, to, ret);
inode_unlock_shared(bd_inode);
reexpand:
if (unlikely(shorted))
@@ -771,6 +785,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
/*
@@ -811,6 +826,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
fail:
filemap_invalidate_unlock(inode->i_mapping);
inode_unlock(inode);
return error;
}

View File

@@ -114,6 +114,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end - 1);
if (err)
@@ -121,6 +122,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
fail:
filemap_invalidate_unlock(inode->i_mapping);
inode_unlock(inode);
return err;
}
@@ -146,12 +148,14 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
end > bdev_nr_bytes(bdev))
return -EINVAL;
inode_lock(bdev->bd_inode);
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
inode_unlock(bdev->bd_inode);
return err;
}
@@ -184,6 +188,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
return -EINVAL;
/* Invalidate the page cache, including dirty pages */
inode_lock(inode);
filemap_invalidate_lock(inode->i_mapping);
err = truncate_bdev_range(bdev, mode, start, end);
if (err)
@@ -194,6 +199,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
fail:
filemap_invalidate_unlock(inode->i_mapping);
inode_unlock(inode);
return err;
}

View File

@@ -407,7 +407,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
return 0;
return -EINVAL;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;

View File

@@ -1552,6 +1552,32 @@ out:
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_set_suspended_action(void *data)
{
pm_runtime_set_suspended(data);
}
/**
* devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_set_active_enabled(struct device *dev)
{
int err;
err = pm_runtime_set_active(dev);
if (err)
return err;
err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev);
if (err)
return err;
return devm_pm_runtime_enable(dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled);
static void pm_runtime_disable_action(void *data)
{
pm_runtime_dont_use_autosuspend(data);
@@ -1574,6 +1600,24 @@ int devm_pm_runtime_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
static void pm_runtime_put_noidle_action(void *data)
{
pm_runtime_put_noidle(data);
}
/**
* devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume.
*
* @dev: Device to handle.
*/
int devm_pm_runtime_get_noresume(struct device *dev)
{
pm_runtime_get_noresume(dev);
return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev);
}
EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
/**
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.

View File

@@ -511,6 +511,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8851BU Bluetooth devices */
{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |

View File

@@ -344,6 +344,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
unsigned int transition_latency_ns = cppc_get_transition_latency(cpu);
if (transition_latency_ns == CPUFREQ_ETERNAL)
return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
return transition_latency_ns / NSEC_PER_USEC;
}
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -366,12 +376,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif

View File

@@ -2285,10 +2285,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
mb(); /* make sure read happened */
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}

View File

@@ -2012,7 +2012,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
if (!ret) {
if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else

View File

@@ -1061,7 +1061,7 @@ static int gmc_v7_0_sw_init(void *handle)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
mb();
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;

View File

@@ -1174,7 +1174,7 @@ static int gmc_v8_0_sw_init(void *handle)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1465,7 +1465,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
&& !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1481,8 +1481,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
mb();
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;

View File

@@ -5437,8 +5437,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else if (hwmgr->pp_table_version == PP_TABLE_V0)
thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
thermal_data->sw_ctf_threshold = thermal_data->max;

View File

@@ -120,8 +120,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx)
}
/* Test for known Chip ID. */
if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
chipid[2] != REG_CHIPID2_VALUE) {
if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) {
dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
chipid[0], chipid[1], chipid[2]);
return -EINVAL;

View File

@@ -51,7 +51,6 @@ struct decon_context {
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -81,13 +80,30 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
DRM_PLANE_TYPE_CURSOR,
};
static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
/**
* decon_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: display and enhancement controller context
* @win: window to protect registers for
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
unsigned int win, bool protect)
{
struct decon_context *ctx = crtc->ctx;
u32 bits, val;
if (ctx->suspended)
return;
bits = SHADOWCON_WINx_PROTECT(win);
val = readl(ctx->regs + SHADOWCON);
if (protect)
val |= bits;
else
val &= ~bits;
writel(val, ctx->regs + SHADOWCON);
}
static void decon_wait_for_vblank(struct decon_context *ctx)
{
atomic_set(&ctx->wait_vsync_event, 1);
/*
@@ -100,25 +116,33 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
static void decon_clear_channels(struct decon_context *ctx)
{
struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
u32 val;
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
decon_shadow_protect_win(ctx, win, true);
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
ch_enabled = 1;
decon_shadow_protect_win(ctx, win, false);
}
}
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled)
decon_wait_for_vblank(ctx->crtc);
decon_wait_for_vblank(ctx);
}
static int decon_ctx_initialize(struct decon_context *ctx,
@@ -126,7 +150,7 @@ static int decon_ctx_initialize(struct decon_context *ctx,
{
ctx->drm_dev = drm_dev;
decon_clear_channels(ctx->crtc);
decon_clear_channels(ctx);
return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
}
@@ -155,9 +179,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
if (ctx->suspended)
return;
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
@@ -219,9 +240,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return -EPERM;
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -244,9 +262,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return;
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -343,36 +358,11 @@ static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
/**
* decon_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: display and enhancement controller context
* @win: window to protect registers for
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
unsigned int win, bool protect)
{
u32 bits, val;
bits = SHADOWCON_WINx_PROTECT(win);
val = readl(ctx->regs + SHADOWCON);
if (protect)
val |= bits;
else
val &= ~bits;
writel(val, ctx->regs + SHADOWCON);
}
static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
@@ -392,9 +382,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -482,9 +469,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
u32 val;
if (ctx->suspended)
return;
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
@@ -503,9 +487,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
@@ -533,9 +514,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int ret;
if (!ctx->suspended)
return;
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
@@ -549,8 +527,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
@@ -558,9 +534,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
if (ctx->suspended)
return;
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
@@ -570,8 +543,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
@@ -692,7 +663,6 @@ static int decon_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = dev;
ctx->suspended = true;
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
if (i80_if_timings)

View File

@@ -1281,9 +1281,16 @@ static int ct_receive(struct intel_guc_ct *ct)
static void ct_try_receive_message(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
int ret;
if (GEM_WARN_ON(!ct->enabled))
if (!ct->enabled) {
GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress);
return;
}
/* When interrupt disabled, message handling is not expected */
if (!guc->interrupts.enabled)
return;
ret = ct_receive(ct);

View File

@@ -230,6 +230,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
set_bit(GMU_STATUS_FW_START, &gmu->status);
return ret;
}
@@ -460,9 +462,10 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
int ret;
u32 val;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
/* Wait for the register to finish posting */
wmb();
if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
return 0;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
@@ -489,6 +492,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
int ret;
u32 val;
if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
return;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
@@ -497,6 +503,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
@@ -617,8 +625,6 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
a6xx_rpmh_stop(gmu);
err:
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
@@ -755,22 +761,18 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
} else {
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
if (state == GMU_COLD_BOOT) {
if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
"GMU firmware is not loaded\n"))
return -ENOENT;
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
ret = a6xx_gmu_fw_load(gmu);
if (ret)
return ret;
@@ -909,6 +911,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
a6xx_rpmh_stop(gmu);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)

View File

@@ -96,6 +96,12 @@ struct a6xx_gmu {
/* For power domain callback */
struct notifier_block pd_nb;
struct completion pd_gate;
/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
#define GMU_STATUS_FW_START 0
/* To track if PDC sleep seq was done */
#define GMU_STATUS_PDC_SLEEP 1
unsigned long status;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)

View File

@@ -1209,14 +1209,16 @@ static int hw_init(struct msm_gpu *gpu)
/* Clear GBIF halt in case GX domain was not collapsed */
if (adreno_is_a619_holi(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_read(gpu, REG_A6XX_GBIF_HALT);
gpu_write(gpu, REG_A6XX_RBBM_GPR0_CNTL, 0);
/* Let's make extra sure that the GPU can access the memory.. */
mb();
gpu_read(gpu, REG_A6XX_RBBM_GPR0_CNTL);
} else if (a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_read(gpu, REG_A6XX_GBIF_HALT);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
/* Let's make extra sure that the GPU can access the memory.. */
mb();
gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT);
}
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);

View File

@@ -984,7 +984,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
return format;
if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 ||
drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) {
drm_rect_width(dest) < 4 || drm_rect_height(dest) < 4) {
drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,
drm_rect_width(dest), drm_rect_height(dest));

View File

@@ -783,13 +783,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(resv);
dma_resv_for_each_fence(&cursor, resv, usage, fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
if (ret) {
dma_fence_put(fence);
/*
* As drm_sched_job_add_dependency always consumes the fence
* reference (even when it fails), and dma_resv_for_each_fence
* is not obtaining one, we need to grab one before calling.
*/
ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
if (ret)
return ret;
}
}
return 0;
}

View File

@@ -622,7 +622,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
return;
}
if (value == 0 || value < dev->battery_min || value > dev->battery_max)
if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0)
return;
if (value < dev->battery_min || value > dev->battery_max)
return;
capacity = hidinput_scale_battery_capacity(dev, value);

View File

@@ -83,9 +83,8 @@ enum latency_mode {
HID_LATENCY_HIGH = 1,
};
#define MT_IO_FLAGS_RUNNING 0
#define MT_IO_FLAGS_ACTIVE_SLOTS 1
#define MT_IO_FLAGS_PENDING_SLOTS 2
#define MT_IO_SLOTS_MASK GENMASK(7, 0) /* reserve first 8 bits for slot tracking */
#define MT_IO_FLAGS_RUNNING 32
static const bool mtrue = true; /* default for true */
static const bool mfalse; /* default for false */
@@ -161,7 +160,11 @@ struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
struct hid_device *hdev; /* hid_device we're attached to */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_RUNNING)
* first 8 bits are reserved for keeping the slot
* states, this is fine because we only support up
* to 250 slots (MT_MAX_MAXCONTACT)
*/
__u8 inputmode_value; /* InputMode HID feature value */
__u8 maxcontacts;
bool is_buttonpad; /* is this device a button pad? */
@@ -936,6 +939,7 @@ static void mt_release_pending_palms(struct mt_device *td,
for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) {
clear_bit(slotnum, app->pending_palm_slots);
clear_bit(slotnum, &td->mt_io_flags);
input_mt_slot(input, slotnum);
input_mt_report_slot_inactive(input);
@@ -967,12 +971,6 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
app->num_received = 0;
app->left_button_state = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
}
static int mt_compute_timestamp(struct mt_application *app, __s32 value)
@@ -1147,7 +1145,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
set_bit(slotnum, &td->mt_io_flags);
} else {
clear_bit(slotnum, &td->mt_io_flags);
}
return 0;
@@ -1282,7 +1282,7 @@ static void mt_touch_report(struct hid_device *hid,
* defect.
*/
if (app->quirks & MT_QUIRK_STICKY_FINGERS) {
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
if (td->mt_io_flags & MT_IO_SLOTS_MASK)
mod_timer(&td->release_timer,
jiffies + msecs_to_jiffies(100));
else
@@ -1658,6 +1658,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_CP_CONSUMER_CONTROL:
case HID_GD_WIRELESS_RADIO_CTLS:
case HID_GD_SYSTEM_MULTIAXIS:
case HID_DG_PEN:
/* already handled by hid core */
break;
case HID_DG_TOUCHSCREEN:
@@ -1729,6 +1730,7 @@ static void mt_release_contacts(struct hid_device *hid)
for (i = 0; i < mt->num_slots; i++) {
input_mt_slot(input_dev, i);
input_mt_report_slot_inactive(input_dev);
clear_bit(i, &td->mt_io_flags);
}
input_mt_sync_frame(input_dev);
input_sync(input_dev);
@@ -1751,7 +1753,7 @@ static void mt_expired_timeout(struct timer_list *t)
*/
if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
if (td->mt_io_flags & MT_IO_SLOTS_MASK)
mt_release_contacts(hdev);
clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}

View File

@@ -126,9 +126,9 @@ struct inv_icm42600_suspended {
* @suspended: suspended sensors configuration.
* @indio_gyro: gyroscope IIO device.
* @indio_accel: accelerometer IIO device.
* @buffer: data transfer buffer aligned for DMA.
* @fifo: FIFO management structure.
* @timestamp: interrupt timestamps.
* @fifo: FIFO management structure.
* @buffer: data transfer buffer aligned for DMA.
*/
struct inv_icm42600_state {
struct mutex lock;
@@ -142,12 +142,12 @@ struct inv_icm42600_state {
struct inv_icm42600_suspended suspended;
struct iio_dev *indio_gyro;
struct iio_dev *indio_accel;
u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
struct inv_icm42600_fifo fifo;
struct {
s64 gyro;
s64 accel;
} timestamp;
struct inv_icm42600_fifo fifo;
u8 buffer[2] __aligned(IIO_DMA_MINALIGN);
};
/* Virtual register addresses: @bank on MSB (4 upper bits), @address on LSB */

View File

@@ -567,20 +567,12 @@ static void inv_icm42600_disable_vdd_reg(void *_data)
static void inv_icm42600_disable_vddio_reg(void *_data)
{
struct inv_icm42600_state *st = _data;
const struct device *dev = regmap_get_device(st->map);
int ret;
struct device *dev = regmap_get_device(st->map);
ret = regulator_disable(st->vddio_supply);
if (ret)
dev_err(dev, "failed to disable vddio error %d\n", ret);
}
if (pm_runtime_status_suspended(dev))
return;
static void inv_icm42600_disable_pm(void *_data)
{
struct device *dev = _data;
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
regulator_disable(st->vddio_supply);
}
int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
@@ -677,16 +669,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq,
return ret;
/* setup runtime power management */
ret = pm_runtime_set_active(dev);
ret = devm_pm_runtime_set_active_enabled(dev);
if (ret)
return ret;
pm_runtime_get_noresume(dev);
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
pm_runtime_put(dev);
return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
return ret;
}
EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
@@ -697,17 +687,15 @@ EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, IIO_ICM42600);
static int inv_icm42600_suspend(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
int ret;
int ret = 0;
mutex_lock(&st->lock);
st->suspended.gyro = st->conf.gyro.mode;
st->suspended.accel = st->conf.accel.mode;
st->suspended.temp = st->conf.temp_en;
if (pm_runtime_suspended(dev)) {
ret = 0;
if (pm_runtime_suspended(dev))
goto out_unlock;
}
/* disable FIFO data streaming */
if (st->fifo.on) {
@@ -739,10 +727,13 @@ static int inv_icm42600_resume(struct device *dev)
struct inv_icm42600_state *st = dev_get_drvdata(dev);
struct inv_sensors_timestamp *gyro_ts = iio_priv(st->indio_gyro);
struct inv_sensors_timestamp *accel_ts = iio_priv(st->indio_accel);
int ret;
int ret = 0;
mutex_lock(&st->lock);
if (pm_runtime_suspended(dev))
goto out_unlock;
ret = inv_icm42600_enable_regulator_vddio(st);
if (ret)
goto out_unlock;

View File

@@ -361,7 +361,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_put(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe);
int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass);
int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_config(struct mxc_isi_pipe *pipe,

View File

@@ -589,7 +589,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe)
*
* TODO: Support secondary line buffer for downscaling YUV420 images.
*/
int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass)
int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe)
{
/* Channel chaining requires both line and output buffer. */
const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF

View File

@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data {
struct v4l2_pix_format_mplane format;
const struct mxc_isi_format_info *info;
u32 sequence;
bool streaming;
};
struct mxc_isi_m2m_ctx {
@@ -236,6 +235,65 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
{
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
struct mxc_isi_m2m *m2m = ctx->m2m;
int ret;
guard(mutex)(&m2m->lock);
if (m2m->usage_count == INT_MAX)
return -EOVERFLOW;
/*
* Acquire the pipe and initialize the channel with the first user of
* the M2M device.
*/
if (m2m->usage_count == 0) {
bool bypass = cap_pix->width == out_pix->width &&
cap_pix->height == out_pix->height &&
cap_info->encoding == out_info->encoding;
ret = mxc_isi_channel_acquire(m2m->pipe,
&mxc_isi_m2m_frame_write_done,
bypass);
if (ret)
return ret;
mxc_isi_channel_get(m2m->pipe);
}
m2m->usage_count++;
/*
* Allocate resources for the channel, counting how many users require
* buffer chaining.
*/
if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
ret = mxc_isi_channel_chain(m2m->pipe);
if (ret)
goto err_deinit;
m2m->chained_count++;
ctx->chained = true;
}
return 0;
err_deinit:
if (--m2m->usage_count == 0) {
mxc_isi_channel_put(m2m->pipe);
mxc_isi_channel_release(m2m->pipe);
}
return ret;
}
static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
unsigned int count)
{
@@ -265,6 +323,35 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
}
}
static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
{
struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
struct mxc_isi_m2m *m2m = ctx->m2m;
guard(mutex)(&m2m->lock);
/*
* If the last context is this one, reset it to make sure the device
* will be reconfigured when streaming is restarted.
*/
if (m2m->last_ctx == ctx)
m2m->last_ctx = NULL;
/* Free the channel resources if this is the last chained context. */
if (ctx->chained && --m2m->chained_count == 0)
mxc_isi_channel_unchain(m2m->pipe);
ctx->chained = false;
/* Turn off the light with the last user. */
if (--m2m->usage_count == 0) {
mxc_isi_channel_disable(m2m->pipe);
mxc_isi_channel_put(m2m->pipe);
mxc_isi_channel_release(m2m->pipe);
}
WARN_ON(m2m->usage_count < 0);
}
static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
.queue_setup = mxc_isi_m2m_vb2_queue_setup,
.buf_init = mxc_isi_m2m_vb2_buffer_init,
@@ -272,8 +359,10 @@ static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
.buf_queue = mxc_isi_m2m_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming,
.start_streaming = mxc_isi_m2m_vb2_start_streaming,
.stop_streaming = mxc_isi_m2m_vb2_stop_streaming,
.unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming,
};
static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
@@ -483,136 +572,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
return 0;
}
static int mxc_isi_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
struct mxc_isi_m2m *m2m = ctx->m2m;
bool bypass;
int ret;
if (q->streaming)
return 0;
mutex_lock(&m2m->lock);
if (m2m->usage_count == INT_MAX) {
ret = -EOVERFLOW;
goto unlock;
}
bypass = cap_pix->width == out_pix->width &&
cap_pix->height == out_pix->height &&
cap_info->encoding == out_info->encoding;
/*
* Acquire the pipe and initialize the channel with the first user of
* the M2M device.
*/
if (m2m->usage_count == 0) {
ret = mxc_isi_channel_acquire(m2m->pipe,
&mxc_isi_m2m_frame_write_done,
bypass);
if (ret)
goto unlock;
mxc_isi_channel_get(m2m->pipe);
}
m2m->usage_count++;
/*
* Allocate resources for the channel, counting how many users require
* buffer chaining.
*/
if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
ret = mxc_isi_channel_chain(m2m->pipe, bypass);
if (ret)
goto deinit;
m2m->chained_count++;
ctx->chained = true;
}
/*
* Drop the lock to start the stream, as the .device_run() operation
* needs to acquire it.
*/
mutex_unlock(&m2m->lock);
ret = v4l2_m2m_ioctl_streamon(file, fh, type);
if (ret) {
/* Reacquire the lock for the cleanup path. */
mutex_lock(&m2m->lock);
goto unchain;
}
q->streaming = true;
return 0;
unchain:
if (ctx->chained && --m2m->chained_count == 0)
mxc_isi_channel_unchain(m2m->pipe);
ctx->chained = false;
deinit:
if (--m2m->usage_count == 0) {
mxc_isi_channel_put(m2m->pipe);
mxc_isi_channel_release(m2m->pipe);
}
unlock:
mutex_unlock(&m2m->lock);
return ret;
}
static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
struct mxc_isi_m2m *m2m = ctx->m2m;
v4l2_m2m_ioctl_streamoff(file, fh, type);
if (!q->streaming)
return 0;
mutex_lock(&m2m->lock);
/*
* If the last context is this one, reset it to make sure the device
* will be reconfigured when streaming is restarted.
*/
if (m2m->last_ctx == ctx)
m2m->last_ctx = NULL;
/* Free the channel resources if this is the last chained context. */
if (ctx->chained && --m2m->chained_count == 0)
mxc_isi_channel_unchain(m2m->pipe);
ctx->chained = false;
/* Turn off the light with the last user. */
if (--m2m->usage_count == 0) {
mxc_isi_channel_disable(m2m->pipe);
mxc_isi_channel_put(m2m->pipe);
mxc_isi_channel_release(m2m->pipe);
}
WARN_ON(m2m->usage_count < 0);
mutex_unlock(&m2m->lock);
q->streaming = false;
return 0;
}
static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_querycap = mxc_isi_m2m_querycap,
@@ -633,8 +592,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_streamon = mxc_isi_m2m_streamon,
.vidioc_streamoff = mxc_isi_m2m_streamoff,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,

View File

@@ -851,7 +851,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe,
/* Chain the channel if needed for wide resolutions. */
if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
ret = mxc_isi_channel_chain(pipe, bypass);
ret = mxc_isi_channel_chain(pipe);
if (ret)
mxc_isi_channel_release(pipe);
}

View File

@@ -183,7 +183,7 @@ static void m_can_plat_remove(struct platform_device *pdev)
struct m_can_classdev *mcan_class = &priv->cdev;
m_can_class_unregister(mcan_class);
pm_runtime_disable(mcan_class->dev);
m_can_class_free_dev(mcan_class->net);
}

View File

@@ -286,11 +286,6 @@ struct gs_host_frame {
#define GS_MAX_RX_URBS 30
#define GS_NAPI_WEIGHT 32
/* Maximum number of interfaces the driver supports per device.
* Current hardware only supports 3 interfaces. The future may vary.
*/
#define GS_MAX_INTF 3
struct gs_tx_context {
struct gs_can *dev;
unsigned int echo_id;
@@ -321,7 +316,6 @@ struct gs_can {
/* usb interface struct */
struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
struct usb_device *udev;
@@ -333,9 +327,11 @@ struct gs_usb {
unsigned int hf_size_rx;
u8 active_channels;
u8 channel_cnt;
unsigned int pipe_in;
unsigned int pipe_out;
struct gs_can *canch[] __counted_by(channel_cnt);
};
/* 'allocate' a tx context.
@@ -596,7 +592,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
/* device reports out of range channel id */
if (hf->channel >= GS_MAX_INTF)
if (hf->channel >= parent->channel_cnt)
goto device_detach;
dev = parent->canch[hf->channel];
@@ -696,7 +692,7 @@ resubmit_urb:
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
device_detach:
for (rc = 0; rc < GS_MAX_INTF; rc++) {
for (rc = 0; rc < parent->channel_cnt; rc++) {
if (parent->canch[rc])
netif_device_detach(parent->canch[rc]->netdev);
}
@@ -1246,6 +1242,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
netdev->dev_id = channel;
netdev->dev_port = channel;
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
@@ -1457,17 +1454,19 @@ static int gs_usb_probe(struct usb_interface *intf,
icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
if (icount > type_max(parent->channel_cnt)) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
type_max(parent->channel_cnt));
return -EINVAL;
}
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
parent = kzalloc(struct_size(parent, canch, icount), GFP_KERNEL);
if (!parent)
return -ENOMEM;
parent->channel_cnt = icount;
init_usb_anchor(&parent->rx_submitted);
usb_set_intfdata(intf, parent);
@@ -1528,7 +1527,7 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return;
}
for (i = 0; i < GS_MAX_INTF; i++)
for (i = 0; i < parent->channel_cnt; i++)
if (parent->canch[i])
gs_destroy_candev(parent->canch[i]);

View File

@@ -1172,7 +1172,6 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
{
pdata->phy_link = -1;
pdata->phy_speed = SPEED_UNKNOWN;
return pdata->phy_if.phy_reset(pdata);

View File

@@ -1664,6 +1664,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
pdata->phy.duplex = DUPLEX_FULL;
}
pdata->phy_link = 0;
pdata->phy.link = 0;
pdata->phy.pause_autoneg = pdata->pause_autoneg;

View File

@@ -5814,7 +5814,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
u32 local_adv, remote_adv, sgsr;
u32 local_adv = 0, remote_adv = 0, sgsr;
if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) &&
@@ -5955,9 +5955,6 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
else
current_duplex = DUPLEX_HALF;
local_adv = 0;
remote_adv = 0;
if (bmcr & BMCR_ANENABLE) {
u32 common;

View File

@@ -498,25 +498,34 @@ static int alloc_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
dma_addr_t addr;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (!skb) {
free_list(dev);
return -ENOMEM;
}
if (!skb)
goto err_free_list;
addr = dma_map_single(&np->pdev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(&np->pdev->dev, addr))
goto err_kfree_skb;
np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
np->rx_ring[i].fraginfo = cpu_to_le64(addr);
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
return 0;
err_kfree_skb:
dev_kfree_skb(np->rx_skbuff[i]);
np->rx_skbuff[i] = NULL;
err_free_list:
free_list(dev);
return -ENOMEM;
}
static void rio_hw_init(struct net_device *dev)

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_DEFINES_H_
#define _IXGBEVF_DEFINES_H_
@@ -16,6 +16,9 @@
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
#define IXGBE_DEV_ID_E610_VF 0x57AD
#define IXGBE_SUBDEV_ID_E610_VF_HV 0x00FF
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -25,6 +28,7 @@
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_100_FULL 0x0008

View File

@@ -271,6 +271,9 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
return -EOPNOTSUPP;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
@@ -400,6 +403,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
return;
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
@@ -628,6 +634,10 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
size_t size;
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_17:
if (!(adapter->pf_features & IXGBEVF_PF_SUP_IPSEC))
return;
break;
case ixgbe_mbox_api_14:
break;
default:

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef _IXGBEVF_H_
#define _IXGBEVF_H_
@@ -366,6 +366,13 @@ struct ixgbevf_adapter {
/* Interrupt Throttle Rate */
u32 eitr_param;
u32 pf_features;
#define IXGBEVF_PF_SUP_IPSEC BIT(0)
#define IXGBEVF_PF_SUP_ESX_MBX BIT(1)
#define IXGBEVF_SUPPORTED_FEATURES (IXGBEVF_PF_SUP_IPSEC | \
IXGBEVF_PF_SUP_ESX_MBX)
struct ixgbevf_hw_stats stats;
unsigned long state;
@@ -418,6 +425,8 @@ enum ixgbevf_boards {
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
board_x550em_a_vf,
board_e610_vf,
board_e610_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -434,11 +443,13 @@ extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
extern const struct ixgbevf_info ixgbevf_e610_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_e610_vf_hv_info;
extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
/* needed by ethtool.c */

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Copyright(c) 1999 - 2024 Intel Corporation. */
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
@@ -39,7 +39,7 @@ static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2018 Intel Corporation.";
"Copyright (c) 2009 - 2024 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -51,6 +51,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
[board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
[board_e610_vf] = &ixgbevf_e610_vf_info,
[board_e610_vf_hv] = &ixgbevf_e610_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -71,6 +73,9 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
{PCI_VDEVICE_SUB(INTEL, IXGBE_DEV_ID_E610_VF, PCI_ANY_ID,
IXGBE_SUBDEV_ID_E610_VF_HV), board_e610_vf_hv},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_E610_VF), board_e610_vf},
/* required last entry */
{0, }
};
@@ -2270,10 +2275,36 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
}
/**
* ixgbevf_set_features - Set features supported by PF
* @adapter: pointer to the adapter struct
*
* Negotiate with PF supported features and then set pf_features accordingly.
*/
static void ixgbevf_set_features(struct ixgbevf_adapter *adapter)
{
u32 *pf_features = &adapter->pf_features;
struct ixgbe_hw *hw = &adapter->hw;
int err;
err = hw->mac.ops.negotiate_features(hw, pf_features);
if (err && err != -EOPNOTSUPP)
netdev_dbg(adapter->netdev,
"PF feature negotiation failed.\n");
/* Address also pre API 1.7 cases */
if (hw->api_version == ixgbe_mbox_api_14)
*pf_features |= IXGBEVF_PF_SUP_IPSEC;
else if (hw->api_version == ixgbe_mbox_api_15)
*pf_features |= IXGBEVF_PF_SUP_ESX_MBX;
}
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
ixgbe_mbox_api_17,
ixgbe_mbox_api_16,
ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
@@ -2293,7 +2324,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
if (hw->api_version >= ixgbe_mbox_api_15) {
ixgbevf_set_features(adapter);
if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX) {
hw->mbx.ops.init_params(hw);
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
@@ -2650,6 +2683,8 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_17:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -4644,6 +4679,8 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_17:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4694,6 +4731,9 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mac_X540_vf:
dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
break;
case ixgbe_mac_e610_vf:
dev_info(&pdev->dev, "Intel(R) E610 Virtual Function\n");
break;
case ixgbe_mac_82599_vf:
default:
dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");

View File

@@ -66,6 +66,8 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
ixgbe_mbox_api_16, /* API version 1.6, linux/freebsd VF driver */
ixgbe_mbox_api_17, /* API version 1.7, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -102,6 +104,12 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
/* mailbox API, version 1.6 VF requests */
#define IXGBE_VF_GET_PF_LINK_STATE 0x11 /* request PF to send link info */
/* mailbox API, version 1.7 VF requests */
#define IXGBE_VF_FEATURES_NEGOTIATE 0x12 /* get features supported by PF*/
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Copyright(c) 1999 - 2024 Intel Corporation. */
#include "vf.h"
#include "ixgbevf.h"
@@ -313,6 +313,8 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
case ixgbe_mbox_api_17:
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -382,6 +384,8 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
case ixgbe_mbox_api_17:
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
@@ -552,6 +556,8 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
@@ -624,6 +630,85 @@ static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
return -EOPNOTSUPP;
}
/**
* ixgbevf_get_pf_link_state - Get PF's link status
* @hw: pointer to the HW structure
* @speed: link speed
* @link_up: indicate if link is up/down
*
* Ask PF to provide link_up state and speed of the link.
*
* Return: IXGBE_ERR_MBX in the case of mailbox error,
* -EOPNOTSUPP if the op is not supported or 0 on success.
*/
static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
u32 msgbuf[3] = {};
int err;
switch (hw->api_version) {
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
}
msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
err = IXGBE_ERR_MBX;
*speed = IXGBE_LINK_SPEED_UNKNOWN;
/* No need to set @link_up to false as it will be done by
* ixgbe_check_mac_link_vf().
*/
} else {
*speed = msgbuf[1];
*link_up = msgbuf[2];
}
return err;
}
/**
* ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
* @hw: pointer to the HW structure
* @pf_features: bitmask of features supported by PF
*
* Return: IXGBE_ERR_MBX in the case of mailbox error,
* -EOPNOTSUPP if the op is not supported or 0 on success.
*/
static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
{
u32 msgbuf[2] = {};
int err;
switch (hw->api_version) {
case ixgbe_mbox_api_17:
break;
default:
return -EOPNOTSUPP;
}
msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
ARRAY_SIZE(msgbuf));
if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
err = IXGBE_ERR_MBX;
*pf_features = 0x0;
} else {
*pf_features = msgbuf[1];
}
return err;
}
/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
@@ -658,6 +743,58 @@ mbx_err:
return err;
}
/**
* ixgbe_read_vflinks - Read VFLINKS register
* @hw: pointer to the HW structure
* @speed: link speed
* @link_up: indicate if link is up/down
*
* Get linkup status and link speed from the VFLINKS register.
*/
static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
/* if link status is down no point in checking to see if PF is up */
if (!(vflinks & IXGBE_LINKS_UP)) {
*link_up = false;
return;
}
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
if (hw->mac.type == ixgbe_mac_82599_vf) {
for (int i = 0; i < 5; i++) {
udelay(100);
vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(vflinks & IXGBE_LINKS_UP)) {
*link_up = false;
return;
}
}
}
/* We reached this point so there's link */
*link_up = true;
switch (vflinks & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
}
/**
* ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
* @hw: unused
@@ -702,10 +839,10 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
bool *link_up,
bool autoneg_wait_to_complete)
{
struct ixgbevf_adapter *adapter = hw->back;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = 0;
u32 links_reg;
u32 in_msg = 0;
/* If we were hit with a reset drop the link */
@@ -715,43 +852,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
if (!mac->get_link_status)
goto out;
/* if link status is down no point in checking to see if pf is up */
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
if (mac->type == ixgbe_mac_82599_vf) {
int i;
for (i = 0; i < 5; i++) {
udelay(100);
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
}
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
if (hw->mac.type == ixgbe_mac_e610_vf) {
ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
if (ret_val)
goto out;
} else {
ixgbe_read_vflinks(hw, speed, link_up);
if (*link_up == false)
goto out;
}
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1)) {
if (hw->api_version >= ixgbe_mbox_api_15)
if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
mac->get_link_status = false;
goto out;
}
@@ -951,6 +1066,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_15:
case ixgbe_mbox_api_16:
case ixgbe_mbox_api_17:
break;
default:
return 0;
@@ -1005,6 +1122,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
.negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.negotiate_features = ixgbevf_negotiate_features_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
@@ -1076,3 +1194,13 @@ const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
.mac = ixgbe_mac_x550em_a_vf,
.mac_ops = &ixgbevf_mac_ops,
};
const struct ixgbevf_info ixgbevf_e610_vf_info = {
.mac = ixgbe_mac_e610_vf,
.mac_ops = &ixgbevf_mac_ops,
};
const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
.mac = ixgbe_mac_e610_vf,
.mac_ops = &ixgbevf_hv_mac_ops,
};

View File

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 1999 - 2018 Intel Corporation. */
/* Copyright(c) 1999 - 2024 Intel Corporation. */
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
@@ -26,6 +26,7 @@ struct ixgbe_mac_operations {
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
int (*negotiate_features)(struct ixgbe_hw *hw, u32 *pf_features);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -54,6 +55,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_x550em_a_vf,
ixgbe_mac_e610,
ixgbe_mac_e610_vf,
ixgbe_num_macs
};

View File

@@ -4919,8 +4919,9 @@ static int rtl8169_resume(struct device *device)
if (!device_may_wakeup(tp_to_dev(tp)))
clk_prepare_enable(tp->clk);
/* Reportedly at least Asus X453MA truncates packets otherwise */
if (tp->mac_version == RTL_GIGA_MAC_VER_37)
/* Some chip versions may truncate packets without this initialization */
if (tp->mac_version == RTL_GIGA_MAC_VER_37 ||
tp->mac_version == RTL_GIGA_MAC_VER_46)
rtl_init_rxcfg(tp);
return rtl8169_runtime_resume(device);

View File

@@ -1940,13 +1940,19 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_regs = lan78xx_get_regs,
};
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
static int lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
u8 addr[6];
int ret;
lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
if (ret < 0)
return ret;
ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
if (ret < 0)
return ret;
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1979,14 +1985,26 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
if (ret < 0)
return ret;
ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
if (ret < 0)
return ret;
}
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
if (ret < 0)
return ret;
ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
if (ret < 0)
return ret;
eth_hw_addr_set(dev->net, addr);
return 0;
}
/* MDIO read and write wrappers for phylib */
@@ -2910,8 +2928,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
}
} while (buf & HW_CFG_LRST_);
lan78xx_init_mac_address(dev);
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
if (ret < 0)
@@ -2920,6 +2936,10 @@ static int lan78xx_reset(struct lan78xx_net *dev)
dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
ret = lan78xx_init_mac_address(dev);
if (ret < 0)
return ret;
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
if (ret < 0)

View File

@@ -10104,7 +10104,12 @@ static int __init rtl8152_driver_init(void)
ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
if (ret)
return ret;
return usb_register(&rtl8152_driver);
ret = usb_register(&rtl8152_driver);
if (ret)
usb_deregister_device_driver(&rtl8152_cfgselector_driver);
return ret;
}
static void __exit rtl8152_driver_exit(void)

View File

@@ -131,12 +131,14 @@ void nvme_mpath_start_request(struct request *rq)
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns->head->disk;
if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) &&
!(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) {
atomic_inc(&ns->ctrl->nr_active);
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
}
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) ||
(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;

View File

@@ -48,6 +48,7 @@ enum link_status {
#define J721E_MODE_RC BIT(7)
#define LANE_COUNT(n) ((n) << 8)
#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
struct j721e_pcie {
@@ -225,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
return ret;
}
static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
struct regmap *syscon)
{
struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
u32 mask = ACSPCIE_PAD_DISABLE_MASK;
struct of_phandle_args args;
u32 val;
int ret;
ret = of_parse_phandle_with_fixed_args(node,
"ti,syscon-acspcie-proxy-ctrl",
1, 0, &args);
if (ret) {
dev_err(dev,
"ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
return ret;
}
/* Clear PAD IO disable bits to enable refclk output */
val = ~(args.args[0]);
ret = regmap_update_bits(syscon, 0, mask, val);
if (ret) {
dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
return ret;
}
return 0;
}
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -246,6 +277,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
/*
* The PCIe Controller's registers have different "reset-values"
* depending on the "strap" settings programmed into the PCIEn_CTRL
* register within the CTRL_MMR memory-mapped register space.
* The registers latch onto a "reset-value" based on the "strap"
* settings sampled after the PCIe Controller is powered on.
* To ensure that the "reset-values" are sampled accurately, power
* off the PCIe Controller before programming the "strap" settings
* and power it on after that. The runtime PM APIs namely
* pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
* increment the usage counter respectively, causing GENPD to power off
* and power on the PCIe Controller.
*/
ret = pm_runtime_put_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to power off PCIe Controller\n");
return ret;
}
ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
@@ -264,7 +314,19 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
return 0;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to power on PCIe Controller\n");
return ret;
}
/* Enable ACSPCIE refclk output if the optional property exists */
syscon = syscon_regmap_lookup_by_phandle_optional(node,
"ti,syscon-acspcie-proxy-ctrl");
if (!syscon)
return 0;
return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,

View File

@@ -1963,6 +1963,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar;
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
};
static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
@@ -2036,6 +2045,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = tegra_pcie_ep_init,
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};

View File

@@ -186,9 +186,15 @@ static ssize_t max_link_speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
ssize_t ret;
return sysfs_emit(buf, "%s\n",
pci_speed_string(pcie_get_speed_cap(pdev)));
/* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
pci_config_pm_runtime_get(pdev);
ret = sysfs_emit(buf, "%s\n",
pci_speed_string(pcie_get_speed_cap(pdev)));
pci_config_pm_runtime_put(pdev);
return ret;
}
static DEVICE_ATTR_RO(max_link_speed);

View File

@@ -30,6 +30,7 @@
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1)
#define DPHY_CMN_TX_MODE_EN BIT(9)
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
@@ -79,6 +80,7 @@ struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
u16 pll_fbdiv;
u32 hs_clk_rate;
unsigned int nlanes;
};
@@ -99,6 +101,8 @@ struct cdns_dphy_ops {
void (*set_pll_cfg)(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg);
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
int (*wait_for_pll_lock)(struct cdns_dphy *dphy);
int (*wait_for_cmn_ready)(struct cdns_dphy *dphy);
};
struct cdns_dphy {
@@ -108,6 +112,8 @@ struct cdns_dphy {
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
struct phy *phy;
bool is_configured;
bool is_powered;
};
/* Order of bands is important since the index is the band number. */
@@ -154,6 +160,9 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
cfg->pll_ipdiv,
pll_ref_hz);
cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv,
2 * cfg->pll_opdiv * cfg->pll_ipdiv);
return 0;
}
@@ -191,6 +200,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
return dphy->ops->get_wakeup_time_ns(dphy);
}
static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy)
{
return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0;
}
static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy)
{
return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0;
}
static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Default wakeup time is 800 ns (in a simulated environment). */
@@ -232,7 +251,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
u32 status;
/*
* set the PWM and PLL Byteclk divider settings to recommended values
@@ -249,13 +267,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
(status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
(status & DPHY_TX_WIZ_O_CMN_READY), 0,
POLL_TIMEOUT_US);
}
static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
@@ -263,6 +274,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
}
static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy)
{
u32 status;
return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US);
}
static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy)
{
u32 status;
return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
status & DPHY_TX_WIZ_O_CMN_READY, 0,
POLL_TIMEOUT_US);
}
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
@@ -278,6 +306,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
.set_psm_div = cdns_dphy_j721e_set_psm_div,
.wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock,
.wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready,
};
static int cdns_dphy_config_from_opts(struct phy *phy,
@@ -297,6 +327,7 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
if (ret)
return ret;
opts->hs_clk_rate = cfg->hs_clk_rate;
opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
return 0;
@@ -334,21 +365,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
struct cdns_dphy_cfg cfg = { 0 };
int ret, band_ctrl;
unsigned int reg;
int ret;
ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
if (ret)
return ret;
ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg);
if (!ret)
dphy->is_configured = true;
return ret;
}
static int cdns_dphy_power_on(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
int ret;
u32 reg;
if (!dphy->is_configured || dphy->is_powered)
return -EINVAL;
clk_prepare_enable(dphy->psm_clk);
clk_prepare_enable(dphy->pll_ref_clk);
/*
* Configure the internal PSM clk divider so that the DPHY has a
* 1MHz clk (or something close).
*/
ret = cdns_dphy_setup_psm(dphy);
if (ret)
return ret;
if (ret) {
dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret);
goto err_power_on;
}
/*
* Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
@@ -363,40 +409,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
* Configure the DPHY PLL that will be used to generate the TX byte
* clk.
*/
cdns_dphy_set_pll_cfg(dphy, &cfg);
cdns_dphy_set_pll_cfg(dphy, &dphy->cfg);
band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
if (band_ctrl < 0)
return band_ctrl;
ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate);
if (ret < 0) {
dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret);
goto err_power_on;
}
reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) |
FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret);
writel(reg, dphy->regs + DPHY_BAND_CFG);
return 0;
}
static int cdns_dphy_power_on(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
clk_prepare_enable(dphy->psm_clk);
clk_prepare_enable(dphy->pll_ref_clk);
/* Start TX state machine. */
writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
reg = readl(dphy->regs + DPHY_CMN_SSM);
writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
dphy->regs + DPHY_CMN_SSM);
ret = cdns_dphy_wait_for_pll_lock(dphy);
if (ret) {
dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret);
goto err_power_on;
}
ret = cdns_dphy_wait_for_cmn_ready(dphy);
if (ret) {
dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n",
ret);
goto err_power_on;
}
dphy->is_powered = true;
return 0;
err_power_on:
clk_disable_unprepare(dphy->pll_ref_clk);
clk_disable_unprepare(dphy->psm_clk);
return ret;
}
static int cdns_dphy_power_off(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
u32 reg;
clk_disable_unprepare(dphy->pll_ref_clk);
clk_disable_unprepare(dphy->psm_clk);
/* Stop TX state machine. */
reg = readl(dphy->regs + DPHY_CMN_SSM);
writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM);
dphy->is_powered = false;
return 0;
}

View File

@@ -11,12 +11,15 @@
/* #define VERBOSE_DEBUG */
#include <linux/cleanup.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/usb/gadget.h>
#include "u_serial.h"
@@ -612,6 +615,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_string *us;
int status;
struct usb_ep *ep;
struct usb_request *request __free(free_usb_request) = NULL;
/* REVISIT might want instance-specific strings to help
* distinguish instances ...
@@ -629,7 +633,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs, and patch descriptors */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
acm->ctrl_id = status;
acm_iad_descriptor.bFirstInterface = status;
@@ -638,40 +642,38 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
acm->data_id = status;
acm_data_interface_desc.bInterfaceNumber = status;
acm_union_desc.bSlaveInterface0 = status;
acm_call_mgmt_descriptor.bDataInterface = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
if (!ep)
goto fail;
return -ENODEV;
acm->port.in = ep;
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
if (!ep)
goto fail;
return -ENODEV;
acm->port.out = ep;
ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
if (!ep)
goto fail;
return -ENODEV;
acm->notify = ep;
/* allocate notification */
acm->notify_req = gs_alloc_req(ep,
sizeof(struct usb_cdc_notification) + 2,
GFP_KERNEL);
if (!acm->notify_req)
goto fail;
request = gs_alloc_req(ep,
sizeof(struct usb_cdc_notification) + 2,
GFP_KERNEL);
if (!request)
return -ENODEV;
acm->notify_req->complete = acm_cdc_notify_complete;
acm->notify_req->context = acm;
request->complete = acm_cdc_notify_complete;
request->context = acm;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -688,7 +690,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
acm_ss_function, acm_ss_function);
if (status)
goto fail;
return status;
acm->notify_req = no_free_ptr(request);
dev_dbg(&cdev->gadget->dev,
"acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n",
@@ -696,14 +700,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm->port.in->name, acm->port.out->name,
acm->notify->name);
return 0;
fail:
if (acm->notify_req)
gs_free_req(acm->notify, acm->notify_req);
ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
return status;
}
static void acm_unbind(struct usb_configuration *c, struct usb_function *f)

View File

@@ -8,12 +8,15 @@
/* #define VERBOSE_DEBUG */
#include <linux/cleanup.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/usb/gadget.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_ecm.h"
@@ -678,6 +681,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_ecm_opts *ecm_opts;
struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
@@ -711,7 +715,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
ecm->ctrl_id = status;
ecm_iad_descriptor.bFirstInterface = status;
@@ -720,24 +724,22 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
ecm->data_id = status;
ecm_data_nop_intf.bInterfaceNumber = status;
ecm_data_intf.bInterfaceNumber = status;
ecm_union_desc.bSlaveInterface0 = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc);
if (!ep)
goto fail;
return -ENODEV;
ecm->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc);
if (!ep)
goto fail;
return -ENODEV;
ecm->port.out_ep = ep;
/* NOTE: a status/notification endpoint is *OPTIONAL* but we
@@ -746,20 +748,18 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
*/
ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc);
if (!ep)
goto fail;
return -ENODEV;
ecm->notify = ep;
status = -ENOMEM;
/* allocate notification request and buffer */
ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!ecm->notify_req)
goto fail;
ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
if (!ecm->notify_req->buf)
goto fail;
ecm->notify_req->context = ecm;
ecm->notify_req->complete = ecm_notify_complete;
request = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!request)
return -ENOMEM;
request->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL);
if (!request->buf)
return -ENOMEM;
request->context = ecm;
request->complete = ecm_notify_complete;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -778,7 +778,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
ecm_ss_function, ecm_ss_function);
if (status)
goto fail;
return status;
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
@@ -788,20 +788,12 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
ecm->port.open = ecm_open;
ecm->port.close = ecm_close;
ecm->notify_req = no_free_ptr(request);
DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n",
ecm->port.in_ep->name, ecm->port.out_ep->name,
ecm->notify->name);
return 0;
fail:
if (ecm->notify_req) {
kfree(ecm->notify_req->buf);
usb_ep_free_request(ecm->notify, ecm->notify_req);
}
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item)

View File

@@ -11,6 +11,7 @@
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -19,6 +20,7 @@
#include <linux/crc32.h>
#include <linux/usb/cdc.h>
#include <linux/usb/gadget.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
@@ -1422,18 +1424,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_ncm_opts *ncm_opts;
struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
if (cdev->use_os_string) {
f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
GFP_KERNEL);
if (!f->os_desc_table)
os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
if (!os_desc_table)
return -ENOMEM;
f->os_desc_n = 1;
f->os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
}
mutex_lock(&ncm_opts->lock);
@@ -1443,16 +1445,15 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
mutex_unlock(&ncm_opts->lock);
if (status)
goto fail;
return status;
ncm_opts->bound = true;
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
if (IS_ERR(us)) {
status = PTR_ERR(us);
goto fail;
}
if (IS_ERR(us))
return PTR_ERR(us);
ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1462,55 +1463,47 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
ncm->ctrl_id = status;
ncm_iad_desc.bFirstInterface = status;
ncm_control_intf.bInterfaceNumber = status;
ncm_union_desc.bMasterInterface0 = status;
if (cdev->use_os_string)
f->os_desc_table[0].if_id =
ncm_iad_desc.bFirstInterface;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
ncm->data_id = status;
ncm_data_nop_intf.bInterfaceNumber = status;
ncm_data_intf.bInterfaceNumber = status;
ncm_union_desc.bSlaveInterface0 = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc);
if (!ep)
goto fail;
return -ENODEV;
ncm->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc);
if (!ep)
goto fail;
return -ENODEV;
ncm->port.out_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc);
if (!ep)
goto fail;
return -ENODEV;
ncm->notify = ep;
status = -ENOMEM;
/* allocate notification request and buffer */
ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!ncm->notify_req)
goto fail;
ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
if (!ncm->notify_req->buf)
goto fail;
ncm->notify_req->context = ncm;
ncm->notify_req->complete = ncm_notify_complete;
request = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!request)
return -ENOMEM;
request->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL);
if (!request->buf)
return -ENOMEM;
request->context = ncm;
request->complete = ncm_notify_complete;
/*
* support all relevant hardware speeds... we expect that when
@@ -1530,7 +1523,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
ncm_ss_function, ncm_ss_function);
if (status)
goto fail;
return status;
/*
* NOTE: all that is done without knowing or caring about
@@ -1544,23 +1537,18 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
if (cdev->use_os_string) {
os_desc_table[0].os_desc = &ncm_opts->ncm_os_desc;
os_desc_table[0].if_id = ncm_iad_desc.bFirstInterface;
f->os_desc_table = no_free_ptr(os_desc_table);
f->os_desc_n = 1;
}
ncm->notify_req = no_free_ptr(request);
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
ncm->notify->name);
return 0;
fail:
kfree(f->os_desc_table);
f->os_desc_n = 0;
if (ncm->notify_req) {
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
}
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)

View File

@@ -19,6 +19,8 @@
#include <linux/atomic.h>
#include <linux/usb/gadget.h>
#include "u_ether.h"
#include "u_ether_configfs.h"
#include "u_rndis.h"
@@ -662,6 +664,8 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
struct f_rndis_opts *rndis_opts;
struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_rndis(c))
return -EINVAL;
@@ -669,12 +673,9 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
if (cdev->use_os_string) {
f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
GFP_KERNEL);
if (!f->os_desc_table)
os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
if (!os_desc_table)
return -ENOMEM;
f->os_desc_n = 1;
f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
}
rndis_iad_descriptor.bFunctionClass = rndis_opts->class;
@@ -692,16 +693,14 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
gether_set_gadget(rndis_opts->net, cdev->gadget);
status = gether_register_netdev(rndis_opts->net);
if (status)
goto fail;
return status;
rndis_opts->bound = true;
}
us = usb_gstrings_attach(cdev, rndis_strings,
ARRAY_SIZE(rndis_string_defs));
if (IS_ERR(us)) {
status = PTR_ERR(us);
goto fail;
}
if (IS_ERR(us))
return PTR_ERR(us);
rndis_control_intf.iInterface = us[0].id;
rndis_data_intf.iInterface = us[1].id;
rndis_iad_descriptor.iFunction = us[2].id;
@@ -709,36 +708,30 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
rndis->ctrl_id = status;
rndis_iad_descriptor.bFirstInterface = status;
rndis_control_intf.bInterfaceNumber = status;
rndis_union_desc.bMasterInterface0 = status;
if (cdev->use_os_string)
f->os_desc_table[0].if_id =
rndis_iad_descriptor.bFirstInterface;
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
return status;
rndis->data_id = status;
rndis_data_intf.bInterfaceNumber = status;
rndis_union_desc.bSlaveInterface0 = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
if (!ep)
goto fail;
return -ENODEV;
rndis->port.in_ep = ep;
ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
if (!ep)
goto fail;
return -ENODEV;
rndis->port.out_ep = ep;
/* NOTE: a status/notification endpoint is, strictly speaking,
@@ -747,21 +740,19 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
*/
ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
if (!ep)
goto fail;
return -ENODEV;
rndis->notify = ep;
status = -ENOMEM;
/* allocate notification request and buffer */
rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!rndis->notify_req)
goto fail;
rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
if (!rndis->notify_req->buf)
goto fail;
rndis->notify_req->length = STATUS_BYTECOUNT;
rndis->notify_req->context = rndis;
rndis->notify_req->complete = rndis_response_complete;
request = usb_ep_alloc_request(ep, GFP_KERNEL);
if (!request)
return -ENOMEM;
request->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
if (!request->buf)
return -ENOMEM;
request->length = STATUS_BYTECOUNT;
request->context = rndis;
request->complete = rndis_response_complete;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -778,7 +769,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
eth_ss_function, eth_ss_function);
if (status)
goto fail;
return status;
rndis->port.open = rndis_open;
rndis->port.close = rndis_close;
@@ -789,10 +780,19 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
if (rndis->manufacturer && rndis->vendorID &&
rndis_set_param_vendor(rndis->params, rndis->vendorID,
rndis->manufacturer)) {
status = -EINVAL;
goto fail_free_descs;
usb_free_all_descriptors(f);
return -EINVAL;
}
if (cdev->use_os_string) {
os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
os_desc_table[0].if_id = rndis_iad_descriptor.bFirstInterface;
f->os_desc_table = no_free_ptr(os_desc_table);
f->os_desc_n = 1;
}
rndis->notify_req = no_free_ptr(request);
/* NOTE: all that is done without knowing or caring about
* the network link ... which is unavailable to this code
* until we're activated via set_alt().
@@ -802,21 +802,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis->port.in_ep->name, rndis->port.out_ep->name,
rndis->notify->name);
return 0;
fail_free_descs:
usb_free_all_descriptors(f);
fail:
kfree(f->os_desc_table);
f->os_desc_n = 0;
if (rndis->notify_req) {
kfree(rndis->notify_req->buf);
usb_ep_free_request(rndis->notify, rndis->notify_req);
}
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)

View File

@@ -194,6 +194,9 @@ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
req = ep->ops->alloc_request(ep, gfp_flags);
if (req)
req->ep = ep;
trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
return req;

View File

@@ -985,7 +985,7 @@ static void btrfs_readahead_expand(struct readahead_control *ractl,
{
const u64 ra_pos = readahead_pos(ractl);
const u64 ra_end = ra_pos + readahead_length(ractl);
const u64 em_end = em->start + em->ram_bytes;
const u64 em_end = em->start + em->len;
/* No expansion for holes and inline extents. */
if (em->block_start > EXTENT_MAP_LAST_BYTE)

View File

@@ -1108,14 +1108,15 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
* If ret is 1 (no key found), it means this is an empty block group,
* without any extents allocated from it and there's no block group
* item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
* because we are using the block group tree feature, so block group
* items are stored in the block group tree. It also means there are no
* extents allocated for block groups with a start offset beyond this
* block group's end offset (this is the last, highest, block group).
* because we are using the block group tree feature (so block group
* items are stored in the block group tree) or this is a new block
* group created in the current transaction and its block group item
* was not yet inserted in the extent tree (that happens in
* btrfs_create_pending_block_groups() -> insert_block_group_item()).
* It also means there are no extents allocated for block groups with a
* start offset beyond this block group's end offset (this is the last,
* highest, block group).
*/
if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
ASSERT(ret == 0);
start = block_group->start;
end = block_group->start + block_group->length;
while (ret == 0) {

View File

@@ -3877,6 +3877,7 @@ out:
/*
* Mark start of chunk relocation that is cancellable. Check if the cancellation
* has been requested meanwhile and don't start in that case.
* NOTE: if this returns an error, reloc_chunk_end() must not be called.
*
* Return:
* 0 success
@@ -3893,10 +3894,8 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
btrfs_info(fs_info, "chunk relocation canceled on start");
/*
* On cancel, clear all requests but let the caller mark
* the end after cleanup operations.
*/
/* On cancel, clear all requests. */
clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
atomic_set(&fs_info->reloc_cancel_req, 0);
return -ECANCELED;
}
@@ -3905,9 +3904,11 @@ static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
/*
* Mark end of chunk relocation that is cancellable and wake any waiters.
* NOTE: call only if a previous call to reloc_chunk_start() succeeded.
*/
static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
{
ASSERT(test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags));
/* Requested after start, clear bit first so any waiters can continue */
if (atomic_read(&fs_info->reloc_cancel_req) > 0)
btrfs_info(fs_info, "chunk relocation canceled during operation");
@@ -4119,9 +4120,9 @@ out:
if (err && rw)
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
reloc_chunk_end(fs_info);
out_put_bg:
btrfs_put_block_group(bg);
reloc_chunk_end(fs_info);
free_reloc_control(rc);
return err;
}
@@ -4311,8 +4312,8 @@ out_clean:
err = ret;
out_unset:
unset_reloc_control(rc);
out_end:
reloc_chunk_end(fs_info);
out_end:
free_reloc_control(rc);
out:
free_reloc_roots(&reloc_roots);

View File

@@ -1578,7 +1578,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == WRITE) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;
} else {
} else if (!sb_rdonly(iomi.inode->i_sb)) {
lockdep_assert_held(&iomi.inode->i_rwsem);
}

View File

@@ -1861,6 +1861,8 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
__dget_dlock(parent);
dentry->d_parent = parent;
list_add(&dentry->d_child, &parent->d_subdirs);
if (parent->d_flags & DCACHE_DISCONNECTED)
dentry->d_flags |= DCACHE_DISCONNECTED;
spin_unlock(&parent->d_lock);
return dentry;

View File

@@ -45,10 +45,10 @@
*
* 1) epnested_mutex (mutex)
* 2) ep->mtx (mutex)
* 3) ep->lock (rwlock)
* 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
* We need a rwlock (ep->lock) because we manipulate objects
* We need a spinlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
@@ -194,7 +194,7 @@ struct eventpoll {
struct list_head rdllist;
/* Lock which protects rdllist and ovflist */
rwlock_t lock;
spinlock_t lock;
/* RB tree root used to store monitored fd structs */
struct rb_root_cached rbr;
@@ -206,7 +206,7 @@ struct eventpoll {
*/
struct epitem *ovflist;
/* wakeup_source used when ep_scan_ready_list is running */
/* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */
struct wakeup_source *ws;
/* The user that created the eventpoll descriptor */
@@ -625,10 +625,10 @@ static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
* in a lockless way.
*/
lockdep_assert_irqs_enabled();
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
list_splice_init(&ep->rdllist, txlist);
WRITE_ONCE(ep->ovflist, NULL);
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
}
static void ep_done_scan(struct eventpoll *ep,
@@ -636,7 +636,7 @@ static void ep_done_scan(struct eventpoll *ep,
{
struct epitem *epi, *nepi;
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
@@ -677,7 +677,7 @@ static void ep_done_scan(struct eventpoll *ep,
wake_up(&ep->wq);
}
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
}
static void epi_rcu_free(struct rcu_head *head)
@@ -757,10 +757,10 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
rb_erase_cached(&epi->rbn, &ep->rbr);
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
@@ -1018,7 +1018,7 @@ static int ep_alloc(struct eventpoll **pep)
return -ENOMEM;
mutex_init(&ep->mtx);
rwlock_init(&ep->lock);
spin_lock_init(&ep->lock);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
@@ -1105,100 +1105,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
}
#endif /* CONFIG_KCMP */
/*
* Adds a new entry to the tail of the list in a lockless way, i.e.
* multiple CPUs are allowed to call this function concurrently.
*
* Beware: it is necessary to prevent any other modifications of the
* existing list until all changes are completed, in other words
* concurrent list_add_tail_lockless() calls should be protected
* with a read lock, where write lock acts as a barrier which
* makes sure all list_add_tail_lockless() calls are fully
* completed.
*
* Also an element can be locklessly added to the list only in one
* direction i.e. either to the tail or to the head, otherwise
* concurrent access will corrupt the list.
*
* Return: %false if element has been already added to the list, %true
* otherwise.
*/
static inline bool list_add_tail_lockless(struct list_head *new,
struct list_head *head)
{
struct list_head *prev;
/*
* This is simple 'new->next = head' operation, but cmpxchg()
* is used in order to detect that same element has been just
* added to the list from another CPU: the winner observes
* new->next == new.
*/
if (!try_cmpxchg(&new->next, &new, head))
return false;
/*
* Initially ->next of a new element must be updated with the head
* (we are inserting to the tail) and only then pointers are atomically
* exchanged. XCHG guarantees memory ordering, thus ->next should be
* updated before pointers are actually swapped and pointers are
* swapped before prev->next is updated.
*/
prev = xchg(&head->prev, new);
/*
* It is safe to modify prev->next and new->prev, because a new element
* is added only to the tail and new->next is updated before XCHG.
*/
prev->next = new;
new->prev = prev;
return true;
}
/*
* Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
* i.e. multiple CPUs are allowed to call this function concurrently.
*
* Return: %false if epi element has been already chained, %true otherwise.
*/
static inline bool chain_epi_lockless(struct epitem *epi)
{
struct eventpoll *ep = epi->ep;
/* Fast preliminary check */
if (epi->next != EP_UNACTIVE_PTR)
return false;
/* Check that the same epi has not been just chained from another CPU */
if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
return false;
/* Atomically exchange tail */
epi->next = xchg(&ep->ovflist, epi);
return true;
}
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
*
* This callback takes a read lock in order not to contend with concurrent
* events from another file descriptor, thus all modifications to ->rdllist
* or ->ovflist are lockless. Read lock is paired with the write lock from
* ep_scan_ready_list(), which stops all list modifications and guarantees
* that lists state is seen correctly.
*
* Another thing worth to mention is that ep_poll_callback() can be called
* concurrently for the same @epi from different CPUs if poll table was inited
* with several wait queues entries. Plural wakeup from different CPUs of a
* single wait queue is serialized by wq.lock, but the case when multiple wait
* queues are used should be detected accordingly. This is detected using
* cmpxchg() operation.
*/
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
@@ -1209,7 +1119,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
unsigned long flags;
int ewake = 0;
read_lock_irqsave(&ep->lock, flags);
spin_lock_irqsave(&ep->lock, flags);
ep_set_busy_poll_napi_id(epi);
@@ -1238,12 +1148,15 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* chained in ep->ovflist and requeued later on.
*/
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
if (chain_epi_lockless(epi))
if (epi->next == EP_UNACTIVE_PTR) {
epi->next = READ_ONCE(ep->ovflist);
WRITE_ONCE(ep->ovflist, epi);
ep_pm_stay_awake_rcu(epi);
}
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
ep_pm_stay_awake_rcu(epi);
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake_rcu(epi);
}
/*
@@ -1276,7 +1189,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
pwake++;
out_unlock:
read_unlock_irqrestore(&ep->lock, flags);
spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
@@ -1611,7 +1524,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
}
/* We have to drop the new item inside our item list to keep track of it */
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
@@ -1628,7 +1541,7 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
pwake++;
}
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
/* We have to call this outside the lock */
if (pwake)
@@ -1692,7 +1605,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1703,7 +1616,7 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
@@ -1792,7 +1705,7 @@ static int ep_send_events(struct eventpoll *ep,
* availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
* ep_send_events() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -1936,7 +1849,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
init_wait(&wait);
wait.func = ep_autoremove_wake_function;
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
/*
* Barrierless variant, waitqueue_active() is called under
* the same lock on wakeup ep_poll_callback() side, so it
@@ -1945,7 +1858,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
__set_current_state(TASK_INTERRUPTIBLE);
/*
* Do the final check under the lock. ep_scan_ready_list()
* Do the final check under the lock. ep_start/done_scan()
* plays with two lists (->rdllist and ->ovflist) and there
* is always a race when both lists are empty for short
* period of time although events are pending, so lock is
@@ -1955,7 +1868,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (!eavail)
__add_wait_queue_exclusive(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
if (!eavail)
timed_out = !schedule_hrtimeout_range(to, slack,
@@ -1970,7 +1883,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
eavail = 1;
if (!list_empty_careful(&wait.entry)) {
write_lock_irq(&ep->lock);
spin_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue,
* it means that the thread was woken up after its
@@ -1981,7 +1894,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
spin_unlock_irq(&ep->lock);
}
}
}

View File

@@ -277,9 +277,16 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
bh, is_metadata, inode->i_mode,
test_opt(inode->i_sb, DATA_FLAGS));
/* In the no journal case, we can just do a bforget and return */
/*
* In the no journal case, we should wait for the ongoing buffer
* to complete and do a forget.
*/
if (!ext4_handle_valid(handle)) {
bforget(bh);
if (bh) {
clear_buffer_dirty(bh);
wait_on_buffer(bh);
__bforget(bh);
}
return 0;
}

View File

@@ -4944,6 +4944,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
ext4_set_inode_flags(inode, true);
/* Detect invalid flag combination - can't have both inline data and extents */
if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
ext4_error_inode(inode, function, line, 0,
"inode has both inline data and extents flags");
ret = -EFSCORRUPTED;
goto bad_inode;
}
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))

View File

@@ -2506,7 +2506,7 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
struct ext4_fs_context *m_ctx)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
char *s_mount_opts = NULL;
char s_mount_opts[65];
struct ext4_fs_context *s_ctx = NULL;
struct fs_context *fc = NULL;
int ret = -ENOMEM;
@@ -2514,15 +2514,11 @@ static int parse_apply_sb_mount_options(struct super_block *sb,
if (!sbi->s_es->s_mount_opts[0])
return 0;
s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
sizeof(sbi->s_es->s_mount_opts),
GFP_KERNEL);
if (!s_mount_opts)
return ret;
strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts, sizeof(s_mount_opts));
fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL);
if (!fc)
goto out_free;
return -ENOMEM;
s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL);
if (!s_ctx)
@@ -2554,11 +2550,8 @@ parse_failed:
ret = 0;
out_free:
if (fc) {
ext4_fc_free(fc);
kfree(fc);
}
kfree(s_mount_opts);
ext4_fc_free(fc);
kfree(fc);
return ret;
}

View File

@@ -1506,8 +1506,8 @@ static bool f2fs_map_blocks_cached(struct inode *inode,
struct f2fs_dev_info *dev = &sbi->devs[bidx];
map->m_bdev = dev->bdev;
map->m_pblk -= dev->start_blk;
map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
map->m_pblk -= dev->start_blk;
} else {
map->m_bdev = inode->i_sb->s_bdev;
}

View File

@@ -40,6 +40,18 @@ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1,
p1 = s1->unicode;
p2 = s2->unicode;
if (len1 > HFSPLUS_MAX_STRLEN) {
len1 = HFSPLUS_MAX_STRLEN;
pr_err("invalid length %u has been corrected to %d\n",
be16_to_cpu(s1->length), len1);
}
if (len2 > HFSPLUS_MAX_STRLEN) {
len2 = HFSPLUS_MAX_STRLEN;
pr_err("invalid length %u has been corrected to %d\n",
be16_to_cpu(s2->length), len2);
}
while (1) {
c1 = c2 = 0;
@@ -74,6 +86,18 @@ int hfsplus_strcmp(const struct hfsplus_unistr *s1,
p1 = s1->unicode;
p2 = s2->unicode;
if (len1 > HFSPLUS_MAX_STRLEN) {
len1 = HFSPLUS_MAX_STRLEN;
pr_err("invalid length %u has been corrected to %d\n",
be16_to_cpu(s1->length), len1);
}
if (len2 > HFSPLUS_MAX_STRLEN) {
len2 = HFSPLUS_MAX_STRLEN;
pr_err("invalid length %u has been corrected to %d\n",
be16_to_cpu(s2->length), len2);
}
for (len = min(len1, len2); len > 0; len--) {
c1 = be16_to_cpu(*p1);
c2 = be16_to_cpu(*p2);

View File

@@ -1649,6 +1649,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
int wait_for_writeback = 0;
if (is_handle_aborted(handle))
return -EROFS;
@@ -1772,18 +1773,22 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh)
}
/*
* The buffer is still not written to disk, we should
* attach this buffer to current transaction so that the
* buffer can be checkpointed only after the current
* transaction commits.
* The buffer has not yet been written to disk. We should
* either clear the buffer or ensure that the ongoing I/O
* is completed, and attach this buffer to current
* transaction so that the buffer can be checkpointed only
* after the current transaction commits.
*/
clear_buffer_dirty(bh);
wait_for_writeback = 1;
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
spin_unlock(&journal->j_list_lock);
}
drop:
__brelse(bh);
spin_unlock(&jh->b_state_lock);
if (wait_for_writeback)
wait_on_buffer(bh);
jbd2_journal_put_journal_head(jh);
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */

View File

@@ -117,7 +117,6 @@ static __be32
nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
struct iomap *iomaps, int nr_iomaps)
{
loff_t new_size = lcp->lc_last_wr + 1;
struct iattr iattr = { .ia_valid = 0 };
int error;
@@ -127,9 +126,9 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
if (new_size > i_size_read(inode)) {
if (lcp->lc_size_chg) {
iattr.ia_valid |= ATTR_SIZE;
iattr.ia_size = new_size;
iattr.ia_size = lcp->lc_newsize;
}
error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,

View File

@@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
*p++ = cpu_to_be32(len);
*p++ = cpu_to_be32(1); /* we always return a single extent */
p = xdr_encode_opaque_fixed(p, &b->vol_id,
sizeof(struct nfsd4_deviceid));
p = svcxdr_encode_deviceid4(p, &b->vol_id);
p = xdr_encode_hyper(p, b->foff);
p = xdr_encode_hyper(p, b->len);
p = xdr_encode_hyper(p, b->soff);
@@ -145,9 +144,7 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
for (i = 0; i < nr_iomaps; i++) {
struct pnfs_block_extent bex;
memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
p = svcxdr_decode_deviceid4(p, &bex.vol_id);
p = xdr_decode_hyper(p, &bex.foff);
if (bex.foff & (block_size - 1)) {
dprintk("%s: unaligned offset 0x%llx\n",

View File

@@ -1071,28 +1071,62 @@ static struct svc_export *exp_find(struct cache_detail *cd,
return exp;
}
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
/**
* check_xprtsec_policy - check if access to export is allowed by the
* xprtsec policy
* @exp: svc_export that is being accessed.
* @rqstp: svc_rqst attempting to access @exp.
*
* Helper function for check_nfsd_access(). Note that callers should be
* using check_nfsd_access() instead of calling this function directly. The
* one exception is fh_verify() since it has logic that may result in one
* or both of the helpers being skipped.
*
* Return values:
* %nfs_ok if access is granted, or
* %nfserr_acces or %nfserr_wrongsec if access is denied
*/
__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
struct svc_xprt *xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
goto ok;
return nfs_ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
!test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
goto ok;
return nfs_ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
goto ok;
return nfs_ok;
}
goto denied;
ok:
return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
}
/**
* check_security_flavor - check if access to export is allowed by the
* xprtsec policy
* @exp: svc_export that is being accessed.
* @rqstp: svc_rqst attempting to access @exp.
*
* Helper function for check_nfsd_access(). Note that callers should be
* using check_nfsd_access() instead of calling this function directly. The
* one exception is fh_verify() since it has logic that may result in one
* or both of the helpers being skipped.
*
* Return values:
* %nfs_ok if access is granted, or
* %nfserr_acces or %nfserr_wrongsec if access is denied
*/
__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp)
{
struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
return 0;
@@ -1117,10 +1151,20 @@ ok:
if (nfsd4_spo_must_allow(rqstp))
return 0;
denied:
return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec;
}
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
{
__be32 status;
status = check_xprtsec_policy(exp, rqstp);
if (status != nfs_ok)
return status;
return check_security_flavor(exp, rqstp);
}
/*
* Uses rq_client and rq_gssclient to find an export; uses rq_client (an
* auth_unix client) if it's available and has secinfo information;

View File

@@ -100,6 +100,8 @@ struct svc_expkey {
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp);
__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp);
__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp);
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
/*

View File

@@ -125,6 +125,13 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
return 0;
}
static __be32
nfsd4_ff_proc_layoutcommit(struct inode *inode,
struct nfsd4_layoutcommit *lcp)
{
return nfs_ok;
}
const struct nfsd4_layout_ops ff_layout_ops = {
.notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
@@ -133,4 +140,5 @@ const struct nfsd4_layout_ops ff_layout_ops = {
.encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
.proc_layoutget = nfsd4_ff_proc_layoutget,
.encode_layoutget = nfsd4_ff_encode_layoutget,
.proc_layoutcommit = nfsd4_ff_proc_layoutcommit,
};

View File

@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
*p++ = cpu_to_be32(1); /* single mirror */
*p++ = cpu_to_be32(1); /* single data server */
p = xdr_encode_opaque_fixed(p, &fl->deviceid,
sizeof(struct nfsd4_deviceid));
p = svcxdr_encode_deviceid4(p, &fl->deviceid);
*p++ = cpu_to_be32(1); /* efficiency */

View File

@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
id->generation = device_generation;
id->pad = 0;
return 0;
}

View File

@@ -2308,7 +2308,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
@@ -2324,18 +2323,20 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
goto out;
inode = d_inode(current_fh->fh_dentry);
nfserr = nfserr_inval;
if (new_size <= seg->offset) {
dprintk("pnfsd: last write before layout segment\n");
goto out;
}
if (new_size > seg->offset + seg->length) {
dprintk("pnfsd: last write beyond layout segment\n");
goto out;
}
if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
dprintk("pnfsd: layoutcommit beyond EOF\n");
goto out;
lcp->lc_size_chg = false;
if (lcp->lc_newoffset) {
loff_t new_size = lcp->lc_last_wr + 1;
nfserr = nfserr_inval;
if (new_size <= seg->offset)
goto out;
if (new_size > seg->offset + seg->length)
goto out;
if (new_size > i_size_read(inode)) {
lcp->lc_size_chg = true;
lcp->lc_newsize = new_size;
}
}
nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
@@ -2352,13 +2353,6 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
/* LAYOUTCOMMIT does not require any serialization */
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) {
lcp->lc_size_chg = 1;
lcp->lc_newsize = new_size;
} else {
lcp->lc_size_chg = 0;
}
nfserr = ops->proc_layoutcommit(inode, lcp);
nfs4_put_stid(&ls->ls_stid);
out:

View File

@@ -566,18 +566,6 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp,
}
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
struct nfsd4_deviceid *devid)
{
__be32 *p;
p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
if (!p)
return nfserr_bad_xdr;
memcpy(devid, p, sizeof(*devid));
return nfs_ok;
}
static __be32
nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
@@ -1733,7 +1721,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
__be32 status;
memset(gdev, 0, sizeof(*gdev));
status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)

View File

@@ -370,6 +370,16 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
if (error)
goto out;
/*
* NLM is allowed to bypass the xprtsec policy check because lockd
* doesn't support xprtsec.
*/
if (!(access & NFSD_MAY_LOCK)) {
error = check_xprtsec_policy(exp, rqstp);
if (error)
goto out;
}
/*
* pseudoflavor restrictions are not enforced on NLM,
* which clients virtually always use auth_sys for,
@@ -386,7 +396,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
&& exp->ex_path.dentry == dentry)
goto skip_pseudoflavor_check;
error = check_nfsd_access(exp, rqstp);
error = check_security_flavor(exp, rqstp);
if (error)
goto out;

View File

@@ -459,9 +459,43 @@ struct nfsd4_reclaim_complete {
struct nfsd4_deviceid {
u64 fsid_idx;
u32 generation;
u32 pad;
};
static inline __be32 *
svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid)
{
__be64 *q = (__be64 *)p;
*q = (__force __be64)devid->fsid_idx;
p += 2;
*p++ = (__force __be32)devid->generation;
*p++ = xdr_zero;
return p;
}
static inline __be32 *
svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid)
{
__be64 *q = (__be64 *)p;
devid->fsid_idx = (__force u64)(*q);
p += 2;
devid->generation = (__force u32)(*p++);
p++; /* NFSD does not use the remaining octets */
return p;
}
static inline __be32
nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid)
{
__be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
if (unlikely(!p))
return nfserr_bad_xdr;
svcxdr_decode_deviceid4(p, devid);
return nfs_ok;
}
struct nfsd4_layout_seg {
u32 iomode;
u64 offset;

View File

@@ -680,8 +680,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
int blocksize;
int err;
down_write(&nilfs->ns_sem);
blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
nilfs_err(sb, "unable to set blocksize");
@@ -757,7 +755,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
set_nilfs_init(nilfs);
err = 0;
out:
up_write(&nilfs->ns_sem);
return err;
failed_sbh:

View File

@@ -1571,15 +1571,13 @@ static int __init ocfs2_init(void)
ocfs2_set_locking_protocol();
status = register_quota_format(&ocfs2_quota_format);
if (status < 0)
goto out3;
register_quota_format(&ocfs2_quota_format);
status = register_filesystem(&ocfs2_fs_type);
if (!status)
return 0;
unregister_quota_format(&ocfs2_quota_format);
out3:
debugfs_remove(ocfs2_debugfs_root);
ocfs2_free_mem_caches();
out2:

View File

@@ -163,13 +163,15 @@ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
/* SLAB cache for dquot structures */
static struct kmem_cache *dquot_cachep;
int register_quota_format(struct quota_format_type *fmt)
/* workqueue for work quota_release_work*/
static struct workqueue_struct *quota_unbound_wq;
void register_quota_format(struct quota_format_type *fmt)
{
spin_lock(&dq_list_lock);
fmt->qf_next = quota_formats;
quota_formats = fmt;
spin_unlock(&dq_list_lock);
return 0;
}
EXPORT_SYMBOL(register_quota_format);
@@ -892,7 +894,7 @@ void dqput(struct dquot *dquot)
put_releasing_dquots(dquot);
atomic_dec(&dquot->dq_count);
spin_unlock(&dq_list_lock);
queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
queue_delayed_work(quota_unbound_wq, &quota_release_work, 1);
}
EXPORT_SYMBOL(dqput);
@@ -3047,6 +3049,11 @@ static int __init dquot_init(void)
if (register_shrinker(&dqcache_shrinker, "dquota-cache"))
panic("Cannot register dquot shrinker");
quota_unbound_wq = alloc_workqueue("quota_events_unbound",
WQ_UNBOUND | WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
if (!quota_unbound_wq)
panic("Cannot create quota_unbound_wq\n");
return 0;
}
fs_initcall(dquot_init);

View File

@@ -229,7 +229,8 @@ static struct quota_format_type v1_quota_format = {
static int __init init_v1_quota_format(void)
{
return register_quota_format(&v1_quota_format);
register_quota_format(&v1_quota_format);
return 0;
}
static void __exit exit_v1_quota_format(void)

View File

@@ -422,12 +422,9 @@ static struct quota_format_type v2r1_quota_format = {
static int __init init_v2_quota_format(void)
{
int ret;
ret = register_quota_format(&v2r0_quota_format);
if (ret)
return ret;
return register_quota_format(&v2r1_quota_format);
register_quota_format(&v2r0_quota_format);
register_quota_format(&v2r1_quota_format);
return 0;
}
static void __exit exit_v2_quota_format(void)

View File

@@ -2319,8 +2319,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
tcon = tlink_tcon(tlink);
server = tcon->ses->server;
if (!server->ops->rename)
return -ENOSYS;
if (!server->ops->rename) {
rc = -ENOSYS;
goto do_rename_exit;
}
/* try path-based rename first */
rc = server->ops->rename(xid, tcon, from_dentry,

View File

@@ -922,6 +922,14 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
char *data_end;
struct dfs_referral_level_3 *ref;
if (rsp_size < sizeof(*rsp)) {
cifs_dbg(VFS | ONCE,
"%s: header is malformed (size is %u, must be %zu)\n",
__func__, rsp_size, sizeof(*rsp));
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
if (*num_of_nodes < 1) {
@@ -931,6 +939,15 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
goto parse_DFS_referrals_exit;
}
if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
cifs_dbg(VFS | ONCE,
"%s: malformed buffer (size is %u, must be at least %zu)\n",
__func__, rsp_size,
sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
if (ref->VersionNumber != cpu_to_le16(3)) {
cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",

View File

@@ -3072,8 +3072,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
free_xid(xid);
return ERR_PTR(rc);
goto put_tlink;
}
oparms = (struct cifs_open_parms) {
@@ -3105,6 +3104,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
put_tlink:
cifs_put_tlink(tlink);
free_xid(xid);
@@ -3145,8 +3145,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path) {
rc = -ENOMEM;
free_xid(xid);
return rc;
goto put_tlink;
}
oparms = (struct cifs_open_parms) {
@@ -3167,6 +3166,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
}
put_tlink:
cifs_put_tlink(tlink);
free_xid(xid);
return rc;

View File

@@ -108,8 +108,9 @@ struct ksmbd_startup_request {
__u32 smb2_max_credits; /* MAX credits */
__u32 smbd_max_io_size; /* smbd read write size */
__u32 max_connections; /* Number of maximum simultaneous connections */
__s8 bind_interfaces_only;
__u32 max_ip_connections; /* Number of maximum connection per ip address */
__u32 reserved[125]; /* Reserved room */
__s8 reserved[499]; /* Reserved room */
__u32 ifc_list_sz; /* interfaces list size */
__s8 ____payload[];
} __packed;

View File

@@ -46,6 +46,7 @@ struct ksmbd_server_config {
unsigned int max_ip_connections;
char *conf[SERVER_CONF_WORK_GROUP + 1];
bool bind_interfaces_only;
};
extern struct ksmbd_server_config server_conf;

View File

@@ -38,6 +38,7 @@
#include "mgmt/user_session.h"
#include "mgmt/ksmbd_ida.h"
#include "ndr.h"
#include "transport_tcp.h"
static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
{
@@ -7790,6 +7791,9 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
if (netdev->type == ARPHRD_LOOPBACK)
continue;
if (!ksmbd_find_netdev_name_iface_list(netdev->name))
continue;
flags = dev_get_flags(netdev);
if (!(flags & IFF_RUNNING))
continue;

View File

@@ -327,6 +327,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
ret = ksmbd_set_netbios_name(req->netbios_name);
ret |= ksmbd_set_server_string(req->server_string);
ret |= ksmbd_set_work_group(req->work_group);
server_conf.bind_interfaces_only = req->bind_interfaces_only;
ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
req->ifc_list_sz);
out:

View File

@@ -551,30 +551,37 @@ out_clear:
return ret;
}
struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name)
{
struct interface *iface;
list_for_each_entry(iface, &iface_list, entry)
if (!strcmp(iface->name, netdev_name))
return iface;
return NULL;
}
static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct interface *iface;
int ret, found = 0;
int ret;
switch (event) {
case NETDEV_UP:
if (netif_is_bridge_port(netdev))
return NOTIFY_OK;
list_for_each_entry(iface, &iface_list, entry) {
if (!strcmp(iface->name, netdev->name)) {
found = 1;
if (iface->state != IFACE_STATE_DOWN)
break;
ret = create_socket(iface);
if (ret)
return NOTIFY_OK;
break;
}
iface = ksmbd_find_netdev_name_iface_list(netdev->name);
if (iface && iface->state == IFACE_STATE_DOWN) {
ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n",
iface->name);
ret = create_socket(iface);
if (ret)
return NOTIFY_OK;
}
if (!found && bind_additional_ifaces) {
if (!iface && bind_additional_ifaces) {
iface = alloc_iface(kstrdup(netdev->name, GFP_KERNEL));
if (!iface)
return NOTIFY_OK;
@@ -584,19 +591,19 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event,
}
break;
case NETDEV_DOWN:
list_for_each_entry(iface, &iface_list, entry) {
if (!strcmp(iface->name, netdev->name) &&
iface->state == IFACE_STATE_CONFIGURED) {
tcp_stop_kthread(iface->ksmbd_kthread);
iface->ksmbd_kthread = NULL;
mutex_lock(&iface->sock_release_lock);
tcp_destroy_socket(iface->ksmbd_socket);
iface->ksmbd_socket = NULL;
mutex_unlock(&iface->sock_release_lock);
iface = ksmbd_find_netdev_name_iface_list(netdev->name);
if (iface && iface->state == IFACE_STATE_CONFIGURED) {
ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n",
iface->name);
tcp_stop_kthread(iface->ksmbd_kthread);
iface->ksmbd_kthread = NULL;
mutex_lock(&iface->sock_release_lock);
tcp_destroy_socket(iface->ksmbd_socket);
iface->ksmbd_socket = NULL;
mutex_unlock(&iface->sock_release_lock);
iface->state = IFACE_STATE_DOWN;
break;
}
iface->state = IFACE_STATE_DOWN;
break;
}
break;
}
@@ -665,18 +672,6 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz)
int sz = 0;
if (!ifc_list_sz) {
struct net_device *netdev;
rtnl_lock();
for_each_netdev(&init_net, netdev) {
if (netif_is_bridge_port(netdev))
continue;
if (!alloc_iface(kstrdup(netdev->name, GFP_KERNEL))) {
rtnl_unlock();
return -ENOMEM;
}
}
rtnl_unlock();
bind_additional_ifaces = 1;
return 0;
}

View File

@@ -8,6 +8,7 @@
int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz);
void ksmbd_free_transport(struct ksmbd_transport *kt);
struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name);
int ksmbd_tcp_init(void);
void ksmbd_tcp_destroy(void);

View File

@@ -171,12 +171,40 @@ typedef struct xlog_rec_header {
__be32 h_prev_block; /* block number to previous LR : 4 */
__be32 h_num_logops; /* number of log operations in this LR : 4 */
__be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE];
/* new fields */
/* fields added by the Linux port: */
__be32 h_fmt; /* format of log record : 4 */
uuid_t h_fs_uuid; /* uuid of FS : 16 */
/* fields added for log v2: */
__be32 h_size; /* iclog size : 4 */
/*
* When h_size added for log v2 support, it caused structure to have
* a different size on i386 vs all other architectures because the
* sum of the size ofthe member is not aligned by that of the largest
* __be64-sized member, and i386 has really odd struct alignment rules.
*
* Due to the way the log headers are placed out on-disk that alone is
* not a problem becaue the xlog_rec_header always sits alone in a
* BBSIZEs area, and the rest of that area is padded with zeroes.
* But xlog_cksum used to calculate the checksum based on the structure
* size, and thus gives different checksums for i386 vs the rest.
* We now do two checksum validation passes for both sizes to allow
* moving v5 file systems with unclean logs between i386 and other
* (little-endian) architectures.
*/
__u32 h_pad0;
} xlog_rec_header_t;
#ifdef __i386__
#define XLOG_REC_SIZE offsetofend(struct xlog_rec_header, h_size)
#define XLOG_REC_SIZE_OTHER sizeof(struct xlog_rec_header)
#else
#define XLOG_REC_SIZE sizeof(struct xlog_rec_header)
#define XLOG_REC_SIZE_OTHER offsetofend(struct xlog_rec_header, h_size)
#endif /* __i386__ */
typedef struct xlog_rec_ext_header {
__be32 xh_cycle; /* write cycle of log : 4 */
__be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */

Some files were not shown because too many files have changed in this diff Show More