Merge tag 'v6.6.118' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into odroid-6.6.y

This is the 6.6.118 stable release
This commit is contained in:
Mauro Ribeiro
2026-01-26 11:08:36 -03:00
87 changed files with 649 additions and 403 deletions

View File

@@ -50,18 +50,20 @@ patternProperties:
groups:
description:
Name of the pin group to use for the functions.
$ref: /schemas/types.yaml#/definitions/string
enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp,
uart0_grp, uart1_grp, uart2_grp, uart3_grp,
pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp,
pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp,
pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp,
pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
items:
enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
spi1_grp, spi2_grp, spi3_grp, spi4_grp, spi5_grp, spi6_grp,
uart0_grp, uart1_grp, uart2_grp, uart3_grp,
pwm0_gpio4_grp, pwm0_gpio8_grp, pwm0_gpio12_grp,
pwm0_gpio16_grp, pwm1_gpio5_grp, pwm1_gpio9_grp,
pwm1_gpio13_grp, pwm1_gpio17_grp, pwm2_gpio6_grp,
pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
minItems: 1
maxItems: 8
drive-strength:
enum: [2, 4, 6, 8, 16, 24, 32]

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 117
SUBLEVEL = 118
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@@ -425,7 +425,7 @@ static void __do_ffa_mem_xfer(const u64 func_id,
DECLARE_REG(u32, npages_mbz, ctxt, 4);
struct ffa_composite_mem_region *reg;
struct ffa_mem_region *buf;
u32 offset, nr_ranges;
u32 offset, nr_ranges, checked_offset;
int ret = 0;
if (addr_mbz || npages_mbz || fraglen > len ||
@@ -460,7 +460,12 @@ static void __do_ffa_mem_xfer(const u64 func_id,
goto out_unlock;
}
if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
if (check_add_overflow(offset, sizeof(struct ffa_composite_mem_region), &checked_offset)) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
if (fraglen < checked_offset) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}

View File

@@ -10,10 +10,6 @@
#include <linux/types.h>
#ifndef __KERNEL__
#include <stdint.h>
#endif
/*
* For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
* 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
@@ -41,44 +37,44 @@ struct user_pt_regs {
} __attribute__((aligned(8)));
struct user_fp_state {
uint64_t fpr[32];
uint64_t fcc;
uint32_t fcsr;
__u64 fpr[32];
__u64 fcc;
__u32 fcsr;
};
struct user_lsx_state {
/* 32 registers, 128 bits width per register. */
uint64_t vregs[32*2];
__u64 vregs[32*2];
};
struct user_lasx_state {
/* 32 registers, 256 bits width per register. */
uint64_t vregs[32*4];
__u64 vregs[32*4];
};
struct user_lbt_state {
uint64_t scr[4];
uint32_t eflags;
uint32_t ftop;
__u64 scr[4];
__u32 eflags;
__u32 ftop;
};
struct user_watch_state {
uint64_t dbg_info;
__u64 dbg_info;
struct {
uint64_t addr;
uint64_t mask;
uint32_t ctrl;
uint32_t pad;
__u64 addr;
__u64 mask;
__u32 ctrl;
__u32 pad;
} dbg_regs[8];
};
struct user_watch_state_v2 {
uint64_t dbg_info;
__u64 dbg_info;
struct {
uint64_t addr;
uint64_t mask;
uint32_t ctrl;
uint32_t pad;
__u64 addr;
__u64 mask;
__u32 ctrl;
__u32 pad;
} dbg_regs[14];
};

View File

@@ -51,11 +51,11 @@ static int __init pcibios_init(void)
*/
lsize = cpu_last_level_cache_line_size();
BUG_ON(!lsize);
if (lsize) {
pci_dfl_cache_line_size = lsize >> 2;
pci_dfl_cache_line_size = lsize >> 2;
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
}
return 0;
}

View File

@@ -241,16 +241,22 @@ mips_pci_controller:
#endif
/*
* Setup the Malta max (2GB) memory for PCI DMA in host bridge
* in transparent addressing mode.
* Set up memory mapping in host bridge for PCI DMA masters,
* in transparent addressing mode. For EVA use the Malta
* maximum of 2 GiB memory in the alias space at 0x80000000
* as per PHYS_OFFSET. Otherwise use 256 MiB of memory in
* the regular space, avoiding mapping the PCI MMIO window
* for DMA as it seems to confuse the system controller's
* logic, causing PCI MMIO to stop working.
*/
mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
MSC_WRITE(MSC01_PCI_BAR0, mask);
MSC_WRITE(MSC01_PCI_HEAD4, mask);
mask = PHYS_OFFSET ? PHYS_OFFSET : 0xf0000000;
MSC_WRITE(MSC01_PCI_BAR0,
mask | PCI_BASE_ADDRESS_MEM_PREFETCH);
MSC_WRITE(MSC01_PCI_HEAD4,
PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH);
mask &= MSC01_PCI_BAR0_SIZE_MSK;
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
MSC_WRITE(MSC01_PCI_P2SCMAPL, PHYS_OFFSET);
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==

View File

@@ -1065,17 +1065,15 @@ static inline pte_t pte_mkhuge(pte_t pte)
#define IPTE_NODAT 0x400
#define IPTE_GUEST_ASCE 0x800
static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
unsigned long opt, unsigned long asce,
int local)
static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
{
unsigned long pto;
pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
: "+m" (*ptep)
: [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
[asce] "a" (asce), [m4] "i" (local));
: [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
[m4] "i" (local));
}
static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
@@ -1259,7 +1257,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
* A local RDP can be used to do the flush.
*/
if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
__ptep_rdp(address, ptep, 0, 0, 1);
__ptep_rdp(address, ptep, 1);
}
#define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault

View File

@@ -312,9 +312,9 @@ void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
preempt_disable();
atomic_inc(&mm->context.flush_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
__ptep_rdp(addr, ptep, 0, 0, 1);
__ptep_rdp(addr, ptep, 1);
else
__ptep_rdp(addr, ptep, 0, 0, 0);
__ptep_rdp(addr, ptep, 0);
/*
* PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
* means it is still valid and active, and must not be changed according

View File

@@ -226,6 +226,24 @@ static bool need_sha_check(u32 cur_rev)
return true;
}
static bool cpu_has_entrysign(void)
{
unsigned int fam = x86_family(bsp_cpuid_1_eax);
unsigned int model = x86_model(bsp_cpuid_1_eax);
if (fam == 0x17 || fam == 0x19)
return true;
if (fam == 0x1a) {
if (model <= 0x2f ||
(0x40 <= model && model <= 0x4f) ||
(0x60 <= model && model <= 0x6f))
return true;
}
return false;
}
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
@@ -233,7 +251,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
struct sha256_state s;
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17)
if (!cpu_has_entrysign())
return true;
if (!need_sha_check(cur_rev))

View File

@@ -992,6 +992,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
return;
}
if (ata_id_is_locked(dev->id)) {
/* Security locked */
/* LOGICAL UNIT ACCESS NOT AUTHORIZED */
ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
return;
}
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"Missing result TF: reporting aborted command\n");
@@ -4831,8 +4838,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
if (do_resume) {
ret = scsi_resume_device(sdev);
if (ret == -EWOULDBLOCK)
if (ret == -EWOULDBLOCK) {
scsi_device_put(sdev);
goto unlock_scan;
}
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);

View File

@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
struct device_node *np;
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
np = core->dev.of_node;
if (np && !of_device_is_available(np))
continue;
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)

View File

@@ -54,7 +54,7 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
int num_domains, i, ret;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
struct scmi_pm_domain *scmi_pd;
@@ -112,9 +112,18 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
ret = of_genpd_add_provider_onecell(np, scmi_pd_data);
if (ret)
goto err_rm_genpds;
dev_set_drvdata(dev, scmi_pd_data);
return of_genpd_add_provider_onecell(np, scmi_pd_data);
return 0;
err_rm_genpds:
for (i = num_domains - 1; i >= 0; i--)
pm_genpd_remove(domains[i]);
return ret;
}
static void scmi_pm_domain_remove(struct scmi_device *sdev)

View File

@@ -5296,9 +5296,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
if (vmid)
if (vmid && !ring->adev->gfx.rs64_enable)
gfx_v11_0_ring_emit_de_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
}
if (ring->is_mes_queue)

View File

@@ -1552,7 +1552,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
uint32_t read_dpcd_retry_cnt = 20;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -1598,12 +1598,13 @@ static bool retrieve_link_cap(struct dc_link *link)
status = dpcd_get_tunneling_device_data(link);
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
* time before proceeding with possibly vendor specific transactions
*/
msleep(post_oui_delay);
for (i = 0; i < read_dpcd_retry_cnt; i++) {
/*
* Sink may need to configure internals based on vendor, so allow some
* time before proceeding with possibly vendor specific transactions
*/
msleep(post_oui_delay);
status = core_link_read_dpcd(
link,
DP_DPCD_REV,

View File

@@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
nvkm_memory_unref(&fw->inst);
nvkm_falcon_fw_dtor_sigs(fw);
nvkm_firmware_dtor(&fw->fw);
kfree(fw->boot);
fw->boot = NULL;
}
static const struct nvkm_firmware_func

View File

@@ -3140,6 +3140,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
dc->client.parent = &parent->client;
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
put_device(companion);
}
return 0;

View File

@@ -912,15 +912,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
u32 value;
int err;
/* If the bootloader enabled DSI it needs to be disabled
* in order for the panel initialization commands to be
* properly sent.
*/
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
if (value & DSI_POWER_CONTROL_ENABLE)
tegra_dsi_disable(dsi);
err = tegra_dsi_prepare(dsi);
if (err < 0) {
dev_err(dsi->dev, "failed to prepare: %d\n", err);

View File

@@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
if (err)
goto put_channel;
if (supported)
if (supported) {
struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
context->memory_context = host1x_memory_context_alloc(
host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
host, client->base.dev, pid);
put_pid(pid);
}
if (IS_ERR(context->memory_context)) {
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {

View File

@@ -163,6 +163,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
if (rc)
goto cleanup;
mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
amd_sfh_wait_for_response(privdata, cl_data->sensor_idx[i], DISABLE_SENSOR);
writel(0, privdata->mmio + AMD_P2C_MSG(0));
mp2_ops->start(privdata, info);
status = amd_sfh_wait_for_response

View File

@@ -1517,5 +1517,7 @@
#define USB_DEVICE_ID_VU7PLUS 0x0705
#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
#define USB_VENDOR_ID_JIELI_SDK_DEFAULT 0x4c4a
#define USB_DEVICE_ID_JIELI_SDK_4155 0x4155
#endif

View File

@@ -903,7 +903,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ODROID, USB_DEVICE_ID_VU5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ODROID, USB_DEVICE_ID_VU7PLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
{ }
};
@@ -1060,6 +1059,18 @@ bool hid_ignore(struct hid_device *hdev)
strlen(elan_acpi_id[i].id)))
return true;
break;
case USB_VENDOR_ID_JIELI_SDK_DEFAULT:
/*
* Multiple USB devices with identical IDs (mic & touchscreen).
* The touch screen requires hid core processing, but the
* microphone does not. They can be distinguished by manufacturer
* and serial number.
*/
if (hdev->product == USB_DEVICE_ID_JIELI_SDK_4155 &&
strncmp(hdev->name, "SmartlinkTechnology", 19) == 0 &&
strncmp(hdev->uniq, "20201111000001", 14) == 0)
return true;
break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&

View File

@@ -263,6 +263,12 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_KEY_MATRIX:
pm_wakeup_event(ckdev->dev, 0);
if (!ckdev->idev) {
dev_warn_once(ckdev->dev,
"Unexpected key matrix event\n");
return NOTIFY_OK;
}
if (ckdev->ec->event_size != ckdev->cols) {
dev_err(ckdev->dev,
"Discarded incomplete key matrix event.\n");

View File

@@ -158,7 +158,7 @@ static int imx_sc_key_probe(struct platform_device *pdev)
return error;
}
error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, priv);
if (error)
return error;

View File

@@ -63,6 +63,9 @@
#define BUTTON_PRESSED 0xb5
#define COMMAND_VERSION 0xa9
/* 1 Status + 1 Color + 2 X + 2 Y = 6 bytes */
#define NOTETAKER_PACKET_SIZE 6
/* in xy data packet */
#define BATTERY_NO_REPORT 0x40
#define BATTERY_LOW 0x41
@@ -303,6 +306,12 @@ static int pegasus_probe(struct usb_interface *intf,
}
pegasus->data_len = usb_maxpacket(dev, pipe);
if (pegasus->data_len < NOTETAKER_PACKET_SIZE) {
dev_err(&intf->dev, "packet size is too small (%d)\n",
pegasus->data_len);
error = -EINVAL;
goto err_free_mem;
}
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
&pegasus->data_dma);

View File

@@ -1519,6 +1519,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ "GDIX1002", 0 },
{ "GDIX1003", 0 },
{ "GDX9110", 0 },
{ }
};

View File

@@ -599,6 +599,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
uint8_t *datbuf = NULL, *oobbuf = NULL;
size_t datbuf_len, oobbuf_len;
int ret = 0;
u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -618,7 +619,7 @@ mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
if (req.start + req.len > mtd->size)
if (check_add_overflow(req.start, req.len, &end) || end > mtd->size)
return -EINVAL;
datbuf_len = min_t(size_t, req.len, mtd->erasesize);
@@ -698,6 +699,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
size_t datbuf_len, oobbuf_len;
size_t orig_len, orig_ooblen;
int ret = 0;
u64 end;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
@@ -724,7 +726,7 @@ mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
req.len &= 0xffffffff;
req.ooblen &= 0xffffffff;
if (req.start + req.len > mtd->size) {
if (check_add_overflow(req.start, req.len, &end) || end > mtd->size) {
ret = -EINVAL;
goto out;
}

View File

@@ -2876,7 +2876,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
struct dma_device *dma_dev = cdns_ctrl->dmac->device;
struct dma_device *dma_dev;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2920,6 +2920,7 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
}
}
dma_dev = cdns_ctrl->dmac->device;
cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
cdns_ctrl->io.size,
DMA_BIDIRECTIONAL, 0);

View File

@@ -371,8 +371,18 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
/* Register both leds */
led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
ret = led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
if (ret) {
dev_err(hellcreek->dev, "Failed to register sync_good LED\n");
goto out;
}
ret = led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
if (ret) {
dev_err(hellcreek->dev, "Failed to register is_gm LED\n");
led_classdev_unregister(&hellcreek->led_sync_good);
goto out;
}
ret = 0;

View File

@@ -336,6 +336,7 @@ static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
ksz_pread16(dev, port, reg, &data16);
/* Update tune Adjust */
data16 &= ~PORT_TUNE_ADJ;
data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
ksz_pwrite16(dev, port, reg, data16);

View File

@@ -1296,7 +1296,8 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
struct sk_buff **skb)
struct sk_buff **skb,
struct be_wrb_params *wrb_params)
{
struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
bool os2bmc = false;
@@ -1360,7 +1361,7 @@ done:
* to BMC, asic expects the vlan to be inline in the packet.
*/
if (os2bmc)
*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
*skb = be_insert_vlan_in_pkt(adapter, *skb, wrb_params);
return os2bmc;
}
@@ -1387,7 +1388,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
/* if os2bmc is enabled and if the pkt is destined to bmc,
* enqueue the pkt a 2nd time with mgmt bit set.
*/
if (be_send_pkt_to_bmc(adapter, &skb)) {
if (be_send_pkt_to_bmc(adapter, &skb, &wrb_params)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
if (unlikely(!wrb_cnt))

View File

@@ -323,10 +323,8 @@ err_xa:
free_irq(irq->map.virq, &irq->nh);
err_req_irq:
#ifdef CONFIG_RFS_ACCEL
if (i && rmap && *rmap) {
free_irq_cpu_rmap(*rmap);
*rmap = NULL;
}
if (i && rmap && *rmap)
irq_cpu_rmap_remove(*rmap, irq->map.virq);
err_irq_rmap:
#endif
if (i && pci_msix_can_alloc_dyn(dev->pdev))

View File

@@ -601,6 +601,8 @@ int mlxsw_linecard_devlink_info_get(struct mlxsw_linecard *linecard,
err = devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
info->psid);
if (err)
goto unlock;
sprintf(buf, "%u.%u.%u", info->fw_major, info->fw_minor,
info->fw_sub_minor);

View File

@@ -816,8 +816,10 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
if (!rule)
return -EINVAL;
if (!rule) {
err = -EINVAL;
goto err_rule_get_stats;
}
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
&drops, &lastuse, &used_hw_stats);

View File

@@ -4,6 +4,7 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
#include <linux/array_size.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -960,7 +961,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
{
int i;
for (i = 0; cqe->len_list[i]; i++)
for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@@ -985,7 +986,7 @@ static int qede_tpa_end(struct qede_dev *edev,
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
PAGE_SIZE, rxq->data_direction);
for (i = 0; cqe->len_list[i]; i++)
for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
if (unlikely(i > 1))

View File

@@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_channel = knav_dma_open_channel(dev,
tx_pipe->dma_chan_name, &config);
if (IS_ERR(tx_pipe->dma_channel)) {
if (!tx_pipe->dma_channel) {
dev_err(dev, "failed opening tx chan(%s)\n",
tx_pipe->dma_chan_name);
ret = PTR_ERR(tx_pipe->dma_channel);
ret = -EINVAL;
goto err;
}
@@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
return 0;
err:
if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
if (tx_pipe->dma_channel)
knav_dma_close_channel(tx_pipe->dma_channel);
tx_pipe->dma_channel = NULL;
return ret;
@@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
netcp->dma_chan_name, &config);
if (IS_ERR(netcp->rx_channel)) {
if (!netcp->rx_channel) {
dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
netcp->dma_chan_name);
ret = PTR_ERR(netcp->rx_channel);
ret = -EINVAL;
goto fail;
}

View File

@@ -2349,17 +2349,11 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags;
if (ctrl->ctrl.tagset)
nvme_remove_io_tag_set(&ctrl->ctrl);
/* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags);
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvme_remove_admin_tag_set(&ctrl->ctrl);
kfree(ctrl->queues);
put_device(ctrl->dev);
@@ -3248,13 +3242,20 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
cancel_work_sync(&ctrl->ioerr_work);
if (ctrl->ctrl.tagset)
nvme_remove_io_tag_set(&ctrl->ctrl);
nvme_unquiesce_admin_queue(&ctrl->ctrl);
nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void

View File

@@ -686,7 +686,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
return;
}
nvme_add_ns_head_cdev(head);
kblockd_schedule_work(&head->partition_scan_work);
queue_work(nvme_wq, &head->partition_scan_work);
}
mutex_lock(&head->lock);

View File

@@ -532,6 +532,11 @@ static int cs42l43_gpio_add_pin_ranges(struct gpio_chip *chip)
return ret;
}
static void cs42l43_fwnode_put(void *data)
{
fwnode_handle_put(data);
}
static int cs42l43_pin_probe(struct platform_device *pdev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
@@ -563,10 +568,20 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
priv->gpio_chip.ngpio = CS42L43_NUM_GPIOS;
if (is_of_node(fwnode)) {
fwnode = fwnode_get_named_child_node(fwnode, "pinctrl");
struct fwnode_handle *child;
if (fwnode && !fwnode->dev)
fwnode->dev = priv->dev;
child = fwnode_get_named_child_node(fwnode, "pinctrl");
if (child) {
ret = devm_add_action_or_reset(&pdev->dev,
cs42l43_fwnode_put, child);
if (ret) {
fwnode_handle_put(child);
return ret;
}
if (!child->dev)
child->dev = priv->dev;
fwnode = child;
}
}
priv->gpio_chip.fwnode = fwnode;

View File

@@ -392,6 +392,7 @@ static int s32_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
gpio_pin->pin_id = offset;
gpio_pin->config = config;
INIT_LIST_HEAD(&gpio_pin->list);
spin_lock_irqsave(&ipctl->gpio_configs_lock, flags);
list_add(&gpio_pin->list, &ipctl->gpio_configs);
@@ -943,7 +944,7 @@ int s32_pinctrl_probe(struct platform_device *pdev,
spin_lock_init(&ipctl->gpio_configs_lock);
s32_pinctrl_desc =
devm_kmalloc(&pdev->dev, sizeof(*s32_pinctrl_desc), GFP_KERNEL);
devm_kzalloc(&pdev->dev, sizeof(*s32_pinctrl_desc), GFP_KERNEL);
if (!s32_pinctrl_desc)
return -ENOMEM;

View File

@@ -106,11 +106,11 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_read_config_dword(pdev, 0xD0, &mmio_base);
if (ret)
return ret;
return pcibios_err_to_errno(ret);
ret = pci_read_config_dword(pdev, 0xFC, &pcu_base);
if (ret)
return ret;
return pcibios_err_to_errno(ret);
pcu_base &= GENMASK(10, 0);
base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12;

View File

@@ -512,7 +512,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
return 0;
}
static int imx_gpc_remove(struct platform_device *pdev)
static void imx_gpc_remove(struct platform_device *pdev)
{
struct device_node *pgc_node;
int ret;
@@ -522,7 +522,7 @@ static int imx_gpc_remove(struct platform_device *pdev)
/* bail out if DT too old and doesn't provide the necessary info */
if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
!pgc_node)
return 0;
return;
/*
* If the old DT binding is used the toplevel driver needs to
@@ -532,16 +532,22 @@ static int imx_gpc_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
if (ret)
return ret;
if (ret) {
dev_err(&pdev->dev, "Failed to remove PU power domain (%pe)\n",
ERR_PTR(ret));
return;
}
imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]);
ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base);
if (ret)
return ret;
if (ret) {
dev_err(&pdev->dev, "Failed to remove ARM power domain (%pe)\n",
ERR_PTR(ret));
return;
}
}
return 0;
of_node_put(pgc_node);
}
static struct platform_driver imx_gpc_driver = {
@@ -550,6 +556,6 @@ static struct platform_driver imx_gpc_driver = {
.of_match_table = imx_gpc_dt_ids,
},
.probe = imx_gpc_probe,
.remove = imx_gpc_remove,
.remove_new = imx_gpc_remove,
};
builtin_platform_driver(imx_gpc_driver)

View File

@@ -700,7 +700,6 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
grp->sweep_req_pend_num--;
ctcmpc_send_sweep_resp(ch);
kfree(mpcginfo);
return;
}

View File

@@ -602,8 +602,9 @@ int scsi_host_busy(struct Scsi_Host *shost)
{
int cnt = 0;
blk_mq_tagset_busy_iter(&shost->tag_set,
scsi_host_check_in_flight, &cnt);
if (shost->tag_set.ops)
blk_mq_tagset_busy_iter(&shost->tag_set,
scsi_host_check_in_flight, &cnt);
return cnt;
}
EXPORT_SYMBOL(scsi_host_busy);

View File

@@ -2212,9 +2212,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
write_lock_irqsave(&sfp->rq_list_lock, iflags);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
list_del(&srp->entry);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
sg_finish_rem_req(srp);
/*
* sg_rq_end_io() uses srp->parentfp. Hence, only clear
* srp->parentfp after blk_mq_free_request() has been called.
*/
srp->parentfp = NULL;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);

View File

@@ -402,7 +402,7 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
* @name: slave channel name
* @config: dma configuration parameters
*
* Returns pointer to appropriate DMA channel on success or error.
* Return: Pointer to appropriate DMA channel on success or NULL on error.
*/
void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config)
@@ -414,13 +414,13 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!kdev) {
pr_err("keystone-navigator-dma driver not registered\n");
return (void *)-EINVAL;
return NULL;
}
chan_num = of_channel_match_helper(dev->of_node, name, &instance);
if (chan_num < 0) {
dev_err(kdev->dev, "No DMA instance with name %s\n", name);
return (void *)-EINVAL;
return NULL;
}
dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
@@ -431,7 +431,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (config->direction != DMA_MEM_TO_DEV &&
config->direction != DMA_DEV_TO_MEM) {
dev_err(kdev->dev, "bad direction\n");
return (void *)-EINVAL;
return NULL;
}
/* Look for correct dma instance */
@@ -443,7 +443,7 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
}
if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
return (void *)-EINVAL;
return NULL;
}
/* Look for correct dma channel from dma instance */
@@ -463,14 +463,14 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance);
return (void *)-EINVAL;
return NULL;
}
if (atomic_read(&chan->ref_count) >= 1) {
if (!check_config(chan, config)) {
dev_err(kdev->dev, "channel %d config miss-match\n",
chan_num);
return (void *)-EINVAL;
return NULL;
}
}

View File

@@ -893,6 +893,9 @@ static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
if (!tl_hba->sh)
return -ENODEV;
return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
tl_hba->sh->host_no, tl_tpg->tl_tpgt);
}

View File

@@ -80,9 +80,15 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
{
struct hv_uio_private_data *pdata = info->priv;
struct hv_device *dev = pdata->device;
struct vmbus_channel *primary, *sc;
dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
virt_mb();
primary = dev->channel;
primary->inbound.ring_buffer->interrupt_mask = !irq_state;
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(sc, &primary->sc_list, sc_list)
sc->inbound.ring_buffer->interrupt_mask = !irq_state;
mutex_unlock(&vmbus_connection.channel_mutex);
return 0;
}
@@ -93,11 +99,18 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
static void hv_uio_channel_cb(void *context)
{
struct vmbus_channel *chan = context;
struct hv_device *hv_dev = chan->device_obj;
struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
struct hv_device *hv_dev;
struct hv_uio_private_data *pdata;
virt_mb();
/*
* The callback may come from a subchannel, in which case look
* for the hv device in the primary channel
*/
hv_dev = chan->primary_channel ?
chan->primary_channel->device_obj : chan->device_obj;
pdata = hv_get_drvdata(hv_dev);
uio_event_notify(&pdata->info);
}

View File

@@ -411,7 +411,10 @@ static int exfat_read_boot_sector(struct super_block *sb)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
/* set block size to read super block */
sb_min_blocksize(sb, 512);
if (!sb_min_blocksize(sb, 512)) {
exfat_err(sb, "unable to set blocksize");
return -EINVAL;
}
/* read boot sector */
sbi->boot_bh = sb_bread(sb, 0);

View File

@@ -23,20 +23,18 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;
static void *page_array_alloc(struct inode *inode, int nr)
static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (likely(size <= sbi->page_array_slab_size))
return f2fs_kmem_cache_alloc(sbi->page_array_slab,
GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
GFP_F2FS_ZERO, false, sbi);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}
static void page_array_free(struct inode *inode, void *pages, int nr)
static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int size = sizeof(struct page *) * nr;
if (!pages)
@@ -145,13 +143,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;
cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
{
page_array_free(cc->inode, cc->rpages, cc->cluster_size);
page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -211,13 +209,13 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
dic->rbuf, &dic->rlen);
if (ret != LZO_E_OK) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"lzo decompress failed, ret:%d", ret);
return -EIO;
}
if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"lzo invalid rlen:%zu, expected:%lu",
dic->rlen, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -291,13 +289,13 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
dic->clen, dic->rlen);
if (ret < 0) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"lz4 decompress failed, ret:%d", ret);
return -EIO;
}
if (ret != PAGE_SIZE << dic->log_cluster_size) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"lz4 invalid ret:%d, expected:%lu",
ret, PAGE_SIZE << dic->log_cluster_size);
return -EIO;
@@ -425,7 +423,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"%s zstd_init_dstream failed", __func__);
vfree(workspace);
return -EIO;
@@ -461,14 +459,14 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
if (zstd_is_error(ret)) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"%s zstd_decompress_stream failed, ret: %d",
__func__, zstd_get_error_code(ret));
return -EIO;
}
if (dic->rlen != outbuf.pos) {
f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
f2fs_err_ratelimited(dic->sbi,
"%s ZSTD invalid rlen:%zu, expected:%lu",
__func__, dic->rlen,
PAGE_SIZE << dic->log_cluster_size);
@@ -614,6 +612,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
static int f2fs_compress_pages(struct compress_ctx *cc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -634,7 +633,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
cc->valid_nr_cpages = cc->nr_cpages;
cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -709,7 +708,7 @@ out_free_cpages:
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -727,7 +726,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_sb_info *sbi = dic->sbi;
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -800,7 +799,7 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_sb_info *sbi = dic->sbi;
dec_page_count(sbi, F2FS_RD_DATA);
@@ -1302,7 +1301,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
cic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
@@ -1395,13 +1394,13 @@ unlock_continue:
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc, false);
return 0;
out_destroy_crypt:
page_array_free(cc->inode, cic->rpages, cc->cluster_size);
page_array_free(sbi, cic->rpages, cc->cluster_size);
for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@@ -1419,7 +1418,7 @@ out_free:
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
page_array_free(sbi, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
return -EAGAIN;
}
@@ -1449,7 +1448,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}
page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
page_array_free(sbi, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}
@@ -1580,14 +1579,13 @@ static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
bool pre_alloc)
{
const struct f2fs_compress_ops *cops =
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
int i;
if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
return 0;
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
if (!dic->tpages)
return -ENOMEM;
@@ -1617,10 +1615,9 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
bool bypass_destroy_callback, bool pre_alloc)
{
const struct f2fs_compress_ops *cops =
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
return;
if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
@@ -1647,7 +1644,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);
dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
dic->rpages = page_array_alloc(sbi, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1655,6 +1652,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
dic->sbi = sbi;
dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
atomic_set(&dic->remaining_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
@@ -1668,7 +1667,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;
dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
if (!dic->cpages) {
ret = -ENOMEM;
goto out_free;
@@ -1698,6 +1697,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
bool bypass_destroy_callback)
{
int i;
/* use sbi in dic to avoid UFA of dic->inode*/
struct f2fs_sb_info *sbi = dic->sbi;
f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
@@ -1709,7 +1710,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
page_array_free(dic->inode, dic->tpages, dic->cluster_size);
page_array_free(sbi, dic->tpages, dic->cluster_size);
}
if (dic->cpages) {
@@ -1718,10 +1719,10 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
page_array_free(sbi, dic->cpages, dic->nr_cpages);
}
page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
page_array_free(sbi, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}
@@ -1740,8 +1741,7 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
f2fs_free_dic(dic, false);
} else {
INIT_WORK(&dic->free_work, f2fs_late_free_dic);
queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
&dic->free_work);
queue_work(dic->sbi->post_read_wq, &dic->free_work);
}
}
}

View File

@@ -1493,6 +1493,7 @@ struct compress_io_ctx {
struct decompress_io_ctx {
u32 magic; /* magic number to indicate page is compressed */
struct inode *inode; /* inode the context belong to */
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
pgoff_t cluster_idx; /* cluster index number */
unsigned int cluster_size; /* page count in cluster */
unsigned int log_cluster_size; /* log of cluster size */
@@ -1533,6 +1534,7 @@ struct decompress_io_ctx {
bool failed; /* IO error occurred before decompression? */
bool need_verity; /* need fs-verity verification after decompression? */
unsigned char compress_algorithm; /* backup algorithm type */
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */

View File

@@ -16,6 +16,7 @@ static struct cached_fid *init_cached_dir(const char *path);
static void free_cached_dir(struct cached_fid *cfid);
static void smb2_close_cached_fid(struct kref *ref);
static void cfids_laundromat_worker(struct work_struct *work);
static void close_cached_dir_locked(struct cached_fid *cfid);
struct cached_dir_dentry {
struct list_head entry;
@@ -362,7 +363,7 @@ out:
* lease. Release one here, and the second below.
*/
cfid->has_lease = false;
close_cached_dir(cfid);
close_cached_dir_locked(cfid);
}
spin_unlock(&cfids->cfid_list_lock);
@@ -448,18 +449,52 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
spin_lock(&cfid->cfids->cfid_list_lock);
if (cfid->has_lease) {
cfid->has_lease = false;
close_cached_dir(cfid);
close_cached_dir_locked(cfid);
}
spin_unlock(&cfid->cfids->cfid_list_lock);
close_cached_dir(cfid);
}
/**
* close_cached_dir - drop a reference of a cached dir
*
* The release function will be called with cfid_list_lock held to remove the
* cached dirs from the list before any other thread can take another @cfid
* ref. Must not be called with cfid_list_lock held; use
* close_cached_dir_locked() called instead.
*
* @cfid: cached dir
*/
void close_cached_dir(struct cached_fid *cfid)
{
lockdep_assert_not_held(&cfid->cfids->cfid_list_lock);
kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
}
/**
* close_cached_dir_locked - put a reference of a cached dir with
* cfid_list_lock held
*
* Calling close_cached_dir() with cfid_list_lock held has the potential effect
* of causing a deadlock if the invariant of refcount >= 2 is false.
*
* This function is used in paths that hold cfid_list_lock and expect at least
* two references. If that invariant is violated, WARNs and returns without
* dropping a reference; the final put must still go through
* close_cached_dir().
*
* @cfid: cached dir
*/
static void close_cached_dir_locked(struct cached_fid *cfid)
{
lockdep_assert_held(&cfid->cfids->cfid_list_lock);
if (WARN_ON(kref_read(&cfid->refcount) < 2))
return;
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
/*
* Called from cifs_kill_sb when we unmount a share
*/
@@ -692,7 +727,7 @@ static void cfids_invalidation_worker(struct work_struct *work)
list_for_each_entry_safe(cfid, q, &entry, entry) {
list_del(&cfid->entry);
/* Drop the ref-count acquired in invalidate_all_cached_dirs */
kref_put(&cfid->refcount, smb2_close_cached_fid);
close_cached_dir(cfid);
}
}

View File

@@ -133,7 +133,7 @@ module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
module_param(enable_gcm_256, bool, 0644);
MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
module_param(require_gcm_256, bool, 0644);
MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");

View File

@@ -1726,6 +1726,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->password = NULL;
kfree_sensitive(ctx->password2);
ctx->password2 = NULL;
kfree(ctx->source);
ctx->source = NULL;
kfree(fc->source);
fc->source = NULL;
return -EINVAL;
}

View File

@@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ARRAY_SIZE_H
#define _LINUX_ARRAY_SIZE_H
#include <linux/compiler.h>
/**
* ARRAY_SIZE - get the number of elements in array @arr
* @arr: array to be sized
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#endif /* _LINUX_ARRAY_SIZE_H */

View File

@@ -566,6 +566,7 @@ struct ata_bmdma_prd {
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
#define ata_id_is_locked(id) (((id)[ATA_ID_DLF] & 0x7) == 0x7)
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \

View File

@@ -13,6 +13,7 @@
#include <linux/stdarg.h>
#include <linux/align.h>
#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
@@ -50,12 +51,6 @@
#define READ 0
#define WRITE 1
/**
* ARRAY_SIZE - get the number of elements in array @arr
* @arr: array to be sized
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
#define u64_to_user_ptr(x) ( \

View File

@@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
#include <linux/array_size.h>
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */

View File

@@ -474,6 +474,12 @@ tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
((u64)ntohl(seq) << 32) | RESYNC_REQ);
}
static inline void
tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
{
atomic64_set(&resync_async->req, 0);
}
static inline void
tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
{

View File

@@ -462,7 +462,8 @@ static inline int xfrm_af2proto(unsigned int family)
static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
{
if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
if ((x->sel.family != AF_UNSPEC) ||
(ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
(ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
return &x->inner_mode;
else

View File

@@ -460,10 +460,6 @@ again:
* BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
* trampoline again, and retry register.
*/
/* reset fops->func and fops->trampoline for re-register */
tr->fops->func = NULL;
tr->fops->trampoline = 0;
/* reset im->image memory attr for arch_prepare_bpf_trampoline */
set_memory_nx((long)im->image, 1);
set_memory_rw((long)im->image, 1);

View File

@@ -1132,7 +1132,7 @@ static int __crash_shrink_memory(struct resource *old_res,
old_res->start = 0;
old_res->end = 0;
} else {
crashk_res.end = ram_res->start - 1;
old_res->end = ram_res->start - 1;
}
crash_free_reserved_phys_range(ram_res->start, ram_res->end);

View File

@@ -1413,10 +1413,11 @@ static int __try_to_del_timer_sync(struct timer_list *timer, bool shutdown)
base = lock_timer_base(timer, &flags);
if (base->running_timer != timer)
if (base->running_timer != timer) {
ret = detach_if_pending(timer, base, true);
if (shutdown)
timer->function = NULL;
if (shutdown)
timer->function = NULL;
}
raw_spin_unlock_irqrestore(&base->lock, flags);

View File

@@ -5370,6 +5370,17 @@ static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long
}
}
static void reset_direct(struct ftrace_ops *ops, unsigned long addr)
{
struct ftrace_hash *hash = ops->func_hash->filter_hash;
remove_direct_functions_hash(hash, addr);
/* cleanup for possible another register call */
ops->func = NULL;
ops->trampoline = 0;
}
/**
* register_ftrace_direct - Call a custom trampoline directly
* for multiple functions registered in @ops
@@ -5465,6 +5476,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
ops->direct_call = addr;
err = register_ftrace_function_nolock(ops);
if (err)
reset_direct(ops, addr);
out_unlock:
mutex_unlock(&direct_mutex);
@@ -5497,7 +5510,6 @@ EXPORT_SYMBOL_GPL(register_ftrace_direct);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
bool free_filters)
{
struct ftrace_hash *hash = ops->func_hash->filter_hash;
int err;
if (check_direct_multi(ops))
@@ -5507,13 +5519,9 @@ int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
mutex_lock(&direct_mutex);
err = unregister_ftrace_function(ops);
remove_direct_functions_hash(hash, addr);
reset_direct(ops, addr);
mutex_unlock(&direct_mutex);
/* cleanup for possible another register call */
ops->func = NULL;
ops->trampoline = 0;
if (free_filters)
ftrace_free_filter(ops);
return err;

View File

@@ -62,6 +62,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/maple_tree.h>
#define TP_FCT tracepoint_string(__func__)
#define MA_ROOT_PARENT 1
/*
@@ -2990,7 +2992,7 @@ static inline int mas_rebalance(struct ma_state *mas,
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(__func__, mas);
trace_ma_op(TP_FCT, mas);
/*
* Rebalancing occurs if a node is insufficient. Data is rebalanced
@@ -3365,7 +3367,7 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(__func__, mas);
trace_ma_op(TP_FCT, mas);
mas->depth = mas_mt_height(mas);
/* Allocation failures will happen early. */
mas_node_count(mas, 1 + mas->depth * 2);
@@ -3598,7 +3600,7 @@ static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
return false;
}
trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry);
return true;
}
@@ -3845,7 +3847,7 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
* of data may happen.
*/
mas = wr_mas->mas;
trace_ma_op(__func__, mas);
trace_ma_op(TP_FCT, mas);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
@@ -3996,7 +3998,7 @@ done:
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);
mas_update_gap(mas);
return true;
}
@@ -4042,7 +4044,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
return false;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);
/*
* Only update gap when the new entry is empty or there is an empty
* entry in the original two ranges.
@@ -4178,7 +4180,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry);
return true;
}
@@ -4192,7 +4194,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
{
struct maple_big_node b_node;
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
@@ -5395,7 +5397,7 @@ void *mas_store(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
trace_ma_write(TP_FCT, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MAS_WARN_ON(mas, mas->index > mas->last))
pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
@@ -5433,7 +5435,7 @@ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
MA_WR_STATE(wr_mas, mas, entry);
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
trace_ma_write(TP_FCT, mas, 0, entry);
retry:
mas_wr_store_entry(&wr_mas);
if (unlikely(mas_nomem(mas, gfp)))
@@ -5457,7 +5459,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
MA_WR_STATE(wr_mas, mas, entry);
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
trace_ma_write(TP_FCT, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
mas_destroy(mas);
@@ -6245,7 +6247,7 @@ void *mtree_load(struct maple_tree *mt, unsigned long index)
MA_STATE(mas, mt, index, index);
void *entry;
trace_ma_read(__func__, &mas);
trace_ma_read(TP_FCT, &mas);
rcu_read_lock();
retry:
entry = mas_start(&mas);
@@ -6288,7 +6290,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
MA_STATE(mas, mt, index, last);
MA_WR_STATE(wr_mas, &mas, entry);
trace_ma_write(__func__, &mas, 0, entry);
trace_ma_write(TP_FCT, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
@@ -6470,7 +6472,7 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index)
void *entry = NULL;
MA_STATE(mas, mt, index, index);
trace_ma_op(__func__, &mas);
trace_ma_op(TP_FCT, &mas);
mtree_lock(mt);
entry = mas_erase(&mas);
@@ -6536,7 +6538,7 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
unsigned long copy = *index;
#endif
trace_ma_read(__func__, &mas);
trace_ma_read(TP_FCT, &mas);
if ((*index) > max)
return NULL;

View File

@@ -64,10 +64,20 @@ static void check_element(mempool_t *pool, void *element)
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
kunmap_atomic(addr);
#ifdef CONFIG_HIGHMEM
for (int i = 0; i < (1 << order); i++) {
struct page *page = (struct page *)element;
void *addr = kmap_local_page(page + i);
__check_element(pool, addr, PAGE_SIZE);
kunmap_local(addr);
}
#else
void *addr = page_address((struct page *)element);
__check_element(pool, addr, PAGE_SIZE << order);
#endif
}
}
@@ -89,10 +99,20 @@ static void poison_element(mempool_t *pool, void *element)
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
kunmap_atomic(addr);
#ifdef CONFIG_HIGHMEM
for (int i = 0; i < (1 << order); i++) {
struct page *page = (struct page *)element;
void *addr = kmap_local_page(page + i);
__poison_element(addr, PAGE_SIZE);
kunmap_local(addr);
}
#else
void *addr = page_address((struct page *)element);
__poison_element(addr, PAGE_SIZE << order);
#endif
}
}
#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */

View File

@@ -126,8 +126,7 @@ struct shmem_options {
#define SHMEM_SEEN_INODES 2
#define SHMEM_SEEN_HUGE 4
#define SHMEM_SEEN_INUMS 8
#define SHMEM_SEEN_NOSWAP 16
#define SHMEM_SEEN_QUOTA 32
#define SHMEM_SEEN_QUOTA 16
};
#ifdef CONFIG_TMPFS
@@ -4004,7 +4003,6 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
"Turning off swap in unprivileged tmpfs mounts unsupported");
}
ctx->noswap = true;
ctx->seen |= SHMEM_SEEN_NOSWAP;
break;
case Opt_quota:
if (fc->user_ns != &init_user_ns)
@@ -4154,14 +4152,15 @@ static int shmem_reconfigure(struct fs_context *fc)
err = "Current inum too high to switch to 32-bit inums";
goto out;
}
if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) {
/*
* "noswap" doesn't use fsparam_flag_no, i.e. there's no "swap"
* counterpart for (re-)enabling swap.
*/
if (ctx->noswap && !sbinfo->noswap) {
err = "Cannot disable swap on remount";
goto out;
}
if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) {
err = "Cannot enable swap on remount if it was disabled on first mount";
goto out;
}
if (ctx->seen & SHMEM_SEEN_QUOTA &&
!sb_any_quota_loaded(fc->root->d_sb)) {

View File

@@ -702,13 +702,15 @@ void devl_rate_nodes_destroy(struct devlink *devlink)
if (!devlink_rate->parent)
continue;
refcount_dec(&devlink_rate->parent->refcnt);
if (devlink_rate_is_leaf(devlink_rate))
ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
NULL, NULL);
else if (devlink_rate_is_node(devlink_rate))
ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
NULL, NULL);
refcount_dec(&devlink_rate->parent->refcnt);
devlink_rate->parent = NULL;
}
list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
if (devlink_rate_is_node(devlink_rate)) {

View File

@@ -111,8 +111,10 @@ static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
__be16 type = x->inner_mode.family == AF_INET6 ? htons(ETH_P_IPV6)
: htons(ETH_P_IP);
const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
XFRM_MODE_SKB_CB(skb)->protocol);
__be16 type = inner_mode->family == AF_INET6 ? htons(ETH_P_IPV6)
: htons(ETH_P_IP);
return skb_eth_gso_segment(skb, features, type);
}

View File

@@ -145,8 +145,10 @@ static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
struct sk_buff *skb,
netdev_features_t features)
{
__be16 type = x->inner_mode.family == AF_INET ? htons(ETH_P_IP)
: htons(ETH_P_IPV6);
const struct xfrm_mode *inner_mode = xfrm_ip2inner_mode(x,
XFRM_MODE_SKB_CB(skb)->protocol);
__be16 type = inner_mode->family == AF_INET ? htons(ETH_P_IP)
: htons(ETH_P_IPV6);
return skb_eth_gso_segment(skb, features, type);
}

View File

@@ -839,8 +839,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
opts->suboptions = 0;
/* Force later mptcp_write_options(), but do not use any actual
* option space.
*/
if (unlikely(__mptcp_check_fallback(msk) && !mptcp_check_infinite_map(skb)))
return false;
return true;
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
@@ -1041,6 +1044,31 @@ static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una)
msk->snd_una = new_snd_una;
}
static void rwin_update(struct mptcp_sock *msk, struct sock *ssk,
struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct tcp_sock *tp = tcp_sk(ssk);
u64 mptcp_rcv_wnd;
/* Avoid touching extra cachelines if TCP is going to accept this
* skb without filling the TCP-level window even with a possibly
* outdated mptcp-level rwin.
*/
if (!skb->len || skb->len < tcp_receive_window(tp))
return;
mptcp_rcv_wnd = atomic64_read(&msk->rcv_wnd_sent);
if (!after64(mptcp_rcv_wnd, subflow->rcv_wnd_sent))
return;
/* Some other subflow grew the mptcp-level rwin since rcv_wup,
* resync.
*/
tp->rcv_wnd += mptcp_rcv_wnd - subflow->rcv_wnd_sent;
subflow->rcv_wnd_sent = mptcp_rcv_wnd;
}
static void ack_update_msk(struct mptcp_sock *msk,
struct sock *ssk,
struct mptcp_options_received *mp_opt)
@@ -1207,6 +1235,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
*/
if (mp_opt.use_ack)
ack_update_msk(msk, sk, &mp_opt);
rwin_update(msk, sk, skb);
/* Zero-data-length packets are dropped by the caller and not
* propagated to the MPTCP layer, so the skb extension does not
@@ -1293,6 +1322,10 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
if (rcv_wnd_new != rcv_wnd_old) {
raise_win:
/* The msk-level rcv wnd is after the tcp level one,
* sync the latter.
*/
rcv_wnd_new = rcv_wnd_old;
win = rcv_wnd_old - ack_seq;
tp->rcv_wnd = min_t(u64, win, U32_MAX);
new_win = tp->rcv_wnd;
@@ -1316,6 +1349,21 @@ raise_win:
update_wspace:
WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
subflow->rcv_wnd_sent = rcv_wnd_new;
}
static void mptcp_track_rwin(struct tcp_sock *tp)
{
const struct sock *ssk = (const struct sock *)tp;
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk;
if (!ssk)
return;
subflow = mptcp_subflow_ctx(ssk);
msk = mptcp_sk(subflow->conn);
WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
}
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
@@ -1610,6 +1658,10 @@ mp_rst:
opts->reset_transient,
opts->reset_reason);
return;
} else if (unlikely(!opts->suboptions)) {
/* Fallback to TCP */
mptcp_track_rwin(tp);
return;
}
if (OPTION_MPTCP_PRIO & opts->suboptions) {

View File

@@ -29,6 +29,7 @@ struct mptcp_pm_add_entry {
u8 retrans_times;
struct timer_list add_timer;
struct mptcp_sock *sock;
struct rcu_head rcu;
};
struct pm_nl_pernet {
@@ -344,22 +345,27 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
struct timer_list *add_timer = NULL;
bool stop_timer = false;
rcu_read_lock();
spin_lock_bh(&msk->pm.lock);
entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
if (entry && (!check_id || entry->addr.id == addr->id)) {
entry->retrans_times = ADD_ADDR_RETRANS_MAX;
add_timer = &entry->add_timer;
stop_timer = true;
}
if (!check_id && entry)
list_del(&entry->list);
spin_unlock_bh(&msk->pm.lock);
/* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
if (add_timer)
sk_stop_timer_sync(sk, add_timer);
/* Note: entry might have been removed by another thread.
* We hold rcu_read_lock() to ensure it is not freed under us.
*/
if (stop_timer)
sk_stop_timer_sync(sk, &entry->add_timer);
rcu_read_unlock();
return entry;
}
@@ -415,7 +421,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
list_for_each_entry_safe(entry, tmp, &free_list, list) {
sk_stop_timer_sync(sk, &entry->add_timer);
kfree(entry);
kfree_rcu(entry, rcu);
}
}
@@ -1573,7 +1579,7 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
entry = mptcp_pm_del_add_timer(msk, addr, false);
if (entry) {
kfree(entry);
kfree_rcu(entry, rcu);
return true;
}

View File

@@ -57,11 +57,13 @@ static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
{
unsigned short family = READ_ONCE(sk->sk_family);
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if (sk->sk_prot == &tcpv6_prot)
if (family == AF_INET6)
return &inet6_stream_ops;
#endif
WARN_ON_ONCE(sk->sk_prot != &tcp_prot);
WARN_ON_ONCE(family != AF_INET);
return &inet_stream_ops;
}
@@ -902,6 +904,13 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
if (sk->sk_state != TCP_ESTABLISHED)
return false;
/* The caller possibly is not holding the msk socket lock, but
* in the fallback case only the current subflow is touching
* the OoO queue.
*/
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
return false;
spin_lock_bh(&msk->fallback_lock);
if (!msk->allow_subflows) {
spin_unlock_bh(&msk->fallback_lock);
@@ -959,14 +968,19 @@ static void mptcp_reset_rtx_timer(struct sock *sk)
bool mptcp_schedule_work(struct sock *sk)
{
if (inet_sk_state_load(sk) != TCP_CLOSE &&
schedule_work(&mptcp_sk(sk)->work)) {
/* each subflow already holds a reference to the sk, and the
* workqueue is invoked by a subflow, so sk can't go away here.
*/
sock_hold(sk);
if (inet_sk_state_load(sk) == TCP_CLOSE)
return false;
/* Get a reference on this socket, mptcp_worker() will release it.
* As mptcp_worker() might complete before us, we can not avoid
* a sock_hold()/sock_put() if schedule_work() returns false.
*/
sock_hold(sk);
if (schedule_work(&mptcp_sk(sk)->work))
return true;
}
sock_put(sk);
return false;
}
@@ -2578,7 +2592,8 @@ static void __mptcp_close_subflow(struct sock *sk)
if (ssk_state != TCP_CLOSE &&
(ssk_state != TCP_CLOSE_WAIT ||
inet_sk_state_load(sk) != TCP_ESTABLISHED))
inet_sk_state_load(sk) != TCP_ESTABLISHED ||
__mptcp_check_fallback(msk)))
continue;
/* 'subflow_data_ready' will re-sched once rx queue is empty */
@@ -2814,7 +2829,11 @@ static void mptcp_worker(struct work_struct *work)
__mptcp_close_subflow(sk);
if (mptcp_close_tout_expired(sk)) {
struct mptcp_subflow_context *subflow, *tmp;
mptcp_do_fastclose(sk);
mptcp_for_each_subflow_safe(msk, subflow, tmp)
__mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0);
mptcp_close_wake_up(sk);
}
@@ -3242,7 +3261,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
/* msk->subflow is still intact, the following will not free the first
* subflow
*/
mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
mptcp_do_fastclose(sk);
mptcp_destroy_common(msk);
/* The first subflow is already in TCP_CLOSE status, the following
* can't overlap with a fallback anymore
@@ -3424,7 +3444,7 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
}
void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
void mptcp_destroy_common(struct mptcp_sock *msk)
{
struct mptcp_subflow_context *subflow, *tmp;
struct sock *sk = (struct sock *)msk;
@@ -3433,7 +3453,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
/* join list will be eventually flushed (with rst) at sock lock release time */
mptcp_for_each_subflow_safe(msk, subflow, tmp)
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
__mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0);
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
mptcp_data_lock(sk);
@@ -3458,7 +3478,7 @@ static void mptcp_destroy(struct sock *sk)
/* allow the following to close even the initial subflow */
msk->free_first = 1;
mptcp_destroy_common(msk, 0);
mptcp_destroy_common(msk);
sk_sockets_allocated_dec(sk);
}

View File

@@ -478,6 +478,7 @@ struct mptcp_subflow_context {
u64 remote_key;
u64 idsn;
u64 map_seq;
u64 rcv_wnd_sent;
u32 snd_isn;
u32 token;
u32 rel_write_seq;
@@ -870,7 +871,7 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
local_bh_enable();
}
void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
void mptcp_destroy_common(struct mptcp_sock *msk);
#define MPTCP_TOKEN_MAX_RETRIES 4

View File

@@ -2095,6 +2095,10 @@ void __init mptcp_subflow_init(void)
tcp_prot_override = tcp_prot;
tcp_prot_override.release_cb = tcp_release_cb_override;
tcp_prot_override.diag_destroy = tcp_abort_override;
#ifdef CONFIG_BPF_SYSCALL
/* Disable sockmap processing for subflows */
tcp_prot_override.psock_update_sk_prot = NULL;
#endif
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
@@ -2132,6 +2136,10 @@ void __init mptcp_subflow_init(void)
tcpv6_prot_override = tcpv6_prot;
tcpv6_prot_override.release_cb = tcp_release_cb_override;
tcpv6_prot_override.diag_destroy = tcp_abort_override;
#ifdef CONFIG_BPF_SYSCALL
/* Disable sockmap processing for subflows */
tcpv6_prot_override.psock_update_sk_prot = NULL;
#endif
#endif
mptcp_diag_subflow_init(&subflow_ulp_ops);

View File

@@ -597,69 +597,6 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
const struct nlattr *a)
{
struct nshhdr *nh;
size_t length;
int err;
u8 flags;
u8 ttl;
int i;
struct ovs_key_nsh key;
struct ovs_key_nsh mask;
err = nsh_key_from_nlattr(a, &key, &mask);
if (err)
return err;
/* Make sure the NSH base header is there */
if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN))
return -ENOMEM;
nh = nsh_hdr(skb);
length = nsh_hdr_len(nh);
/* Make sure the whole NSH header is there */
err = skb_ensure_writable(skb, skb_network_offset(skb) +
length);
if (unlikely(err))
return err;
nh = nsh_hdr(skb);
skb_postpull_rcsum(skb, nh, length);
flags = nsh_get_flags(nh);
flags = OVS_MASKED(flags, key.base.flags, mask.base.flags);
flow_key->nsh.base.flags = flags;
ttl = nsh_get_ttl(nh);
ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl);
flow_key->nsh.base.ttl = ttl;
nsh_set_flags_and_ttl(nh, flags, ttl);
nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr,
mask.base.path_hdr);
flow_key->nsh.base.path_hdr = nh->path_hdr;
switch (nh->mdtype) {
case NSH_M_TYPE1:
for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) {
nh->md1.context[i] =
OVS_MASKED(nh->md1.context[i], key.context[i],
mask.context[i]);
}
memcpy(flow_key->nsh.context, nh->md1.context,
sizeof(nh->md1.context));
break;
case NSH_M_TYPE2:
memset(flow_key->nsh.context, 0,
sizeof(flow_key->nsh.context));
break;
default:
return -EINVAL;
}
skb_postpush_rcsum(skb, nh, length);
return 0;
}
/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
@@ -1143,10 +1080,6 @@ static int execute_masked_set_action(struct sk_buff *skb,
get_mask(a, struct ovs_key_ethernet *));
break;
case OVS_KEY_ATTR_NSH:
err = set_nsh(skb, flow_key, a);
break;
case OVS_KEY_ATTR_IPV4:
err = set_ipv4(skb, flow_key, nla_data(a),
get_mask(a, struct ovs_key_ipv4 *));
@@ -1183,6 +1116,7 @@ static int execute_masked_set_action(struct sk_buff *skb,
case OVS_KEY_ATTR_CT_LABELS:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
case OVS_KEY_ATTR_NSH:
err = -EINVAL;
break;
}

View File

@@ -1292,6 +1292,11 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
return 0;
}
/*
* Constructs NSH header 'nh' from attributes of OVS_ACTION_ATTR_PUSH_NSH,
* where 'nh' points to a memory block of 'size' bytes. It's assumed that
* attributes were previously validated with validate_push_nsh().
*/
int nsh_hdr_from_nlattr(const struct nlattr *attr,
struct nshhdr *nh, size_t size)
{
@@ -1301,8 +1306,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr,
u8 ttl = 0;
int mdlen = 0;
/* validate_nsh has check this, so we needn't do duplicate check here
*/
if (size < NSH_BASE_HDR_LEN)
return -ENOBUFS;
@@ -1346,46 +1349,6 @@ int nsh_hdr_from_nlattr(const struct nlattr *attr,
return 0;
}
int nsh_key_from_nlattr(const struct nlattr *attr,
struct ovs_key_nsh *nsh, struct ovs_key_nsh *nsh_mask)
{
struct nlattr *a;
int rem;
/* validate_nsh has check this, so we needn't do duplicate check here
*/
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
switch (type) {
case OVS_NSH_KEY_ATTR_BASE: {
const struct ovs_nsh_key_base *base = nla_data(a);
const struct ovs_nsh_key_base *base_mask = base + 1;
nsh->base = *base;
nsh_mask->base = *base_mask;
break;
}
case OVS_NSH_KEY_ATTR_MD1: {
const struct ovs_nsh_key_md1 *md1 = nla_data(a);
const struct ovs_nsh_key_md1 *md1_mask = md1 + 1;
memcpy(nsh->context, md1->context, sizeof(*md1));
memcpy(nsh_mask->context, md1_mask->context,
sizeof(*md1_mask));
break;
}
case OVS_NSH_KEY_ATTR_MD2:
/* Not supported yet */
return -ENOTSUPP;
default:
return -EINVAL;
}
}
return 0;
}
static int nsh_key_put_from_nlattr(const struct nlattr *attr,
struct sw_flow_match *match, bool is_mask,
bool is_push_nsh, bool log)
@@ -2825,17 +2788,13 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
return err;
}
static bool validate_nsh(const struct nlattr *attr, bool is_mask,
bool is_push_nsh, bool log)
static bool validate_push_nsh(const struct nlattr *attr, bool log)
{
struct sw_flow_match match;
struct sw_flow_key key;
int ret = 0;
ovs_match_init(&match, &key, true, NULL);
ret = nsh_key_put_from_nlattr(attr, &match, is_mask,
is_push_nsh, log);
return !ret;
return !nsh_key_put_from_nlattr(attr, &match, false, true, log);
}
/* Return false if there are any non-masked bits set.
@@ -2983,13 +2942,6 @@ static int validate_set(const struct nlattr *a,
break;
case OVS_KEY_ATTR_NSH:
if (eth_type != htons(ETH_P_NSH))
return -EINVAL;
if (!validate_nsh(nla_data(a), masked, false, log))
return -EINVAL;
break;
default:
return -EINVAL;
}
@@ -3399,7 +3351,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
return -EINVAL;
}
mac_proto = MAC_PROTO_NONE;
if (!validate_nsh(nla_data(a), false, true, true))
if (!validate_push_nsh(nla_data(a), log))
return -EINVAL;
break;

View File

@@ -65,8 +65,6 @@ int ovs_nla_put_actions(const struct nlattr *attr,
void ovs_nla_free_flow_actions(struct sw_flow_actions *);
void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
int nsh_key_from_nlattr(const struct nlattr *attr, struct ovs_key_nsh *nsh,
struct ovs_key_nsh *nsh_mask);
int nsh_hdr_from_nlattr(const struct nlattr *attr, struct nshhdr *nh,
size_t size);

View File

@@ -727,8 +727,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
/* shouldn't get to wraparound:
* too long in async stage, something bad happened
*/
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
tls_offload_rx_resync_async_request_cancel(resync_async);
return false;
}
/* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request

View File

@@ -1550,18 +1550,40 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
timeout = schedule_timeout(timeout);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
vsock_remove_connected(vsk);
goto out_wait;
} else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
err = -ETIMEDOUT;
/* Connection established. Whatever happens to socket once we
* release it, that's not connect()'s concern. No need to go
* into signal and timeout handling. Call it a day.
*
* Note that allowing to "reset" an already established socket
* here is racy and insecure.
*/
if (sk->sk_state == TCP_ESTABLISHED)
break;
/* If connection was _not_ established and a signal/timeout came
* to be, we want the socket's state reset. User space may want
* to retry.
*
* sk_state != TCP_ESTABLISHED implies that socket is not on
* vsock_connected_table. We keep the binding and the transport
* assigned.
*/
if (signal_pending(current) || timeout == 0) {
err = timeout == 0 ? -ETIMEDOUT : sock_intr_errno(timeout);
/* Listener might have already responded with
* VIRTIO_VSOCK_OP_RESPONSE. Its handling expects our
* sk_state == TCP_SYN_SENT, which hereby we break.
* In such case VIRTIO_VSOCK_OP_RST will follow.
*/
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
/* Try to cancel VIRTIO_VSOCK_OP_REQUEST skb sent out by
* transport->connect().
*/
vsock_transport_cancel_pkt(vsk);
goto out_wait;
}

View File

@@ -4208,6 +4208,9 @@ EXPORT_SYMBOL(regulatory_pre_cac_allowed);
static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev;
wiphy_lock(&rdev->wiphy);
/* If we finished CAC or received radar, we should end any
* CAC running on the same channels.
* the check !cfg80211_chandef_dfs_usable contain 2 options:
@@ -4231,6 +4234,8 @@ static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev)
if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef))
rdev_end_cac(rdev, wdev->netdev);
}
wiphy_unlock(&rdev->wiphy);
}
void regulatory_propagate_dfs_state(struct wiphy *wiphy,

View File

@@ -766,8 +766,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
/* Exclusive direct xmit for tunnel mode, as
* some filtering or matching rules may apply
* in transport mode.
* Locally generated packets also require
* the normal XFRM path for L2 header setup,
* as the hardware needs the L2 header to match
* for encryption, so skip direct output as well.
*/
if (x->props.mode == XFRM_MODE_TUNNEL)
if (x->props.mode == XFRM_MODE_TUNNEL && !skb->sk)
return xfrm_dev_direct_output(sk, x, skb);
return xfrm_output_resume(sk, skb, 0);

View File

@@ -12,6 +12,7 @@
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <locale.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
@@ -1015,6 +1016,8 @@ int main(int ac, char **av)
signal(SIGINT, sig_handler);
setlocale(LC_ALL, "");
if (ac > 1 && strcmp(av[1], "-s") == 0) {
silent = 1;
/* Silence conf_read() until the real callback is set up */

View File

@@ -7,6 +7,7 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <locale.h>
#include <string.h>
#include <strings.h>
#include <stdlib.h>
@@ -1569,6 +1570,8 @@ int main(int ac, char **av)
int lines, columns;
char *mode;
setlocale(LC_ALL, "");
if (ac > 1 && strcmp(av[1], "-s") == 0) {
/* Silence conf_read() until the real callback is set up */
conf_set_message_callback(NULL);

View File

@@ -930,7 +930,7 @@ static int parse_term_uac2_clock_source(struct mixer_build *state,
{
struct uac_clock_source_descriptor *d = p1;
term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */
term->type = UAC2_CLOCK_SOURCE << 16; /* virtual type */
term->id = id;
term->name = d->iClockSource;
return 0;

View File

@@ -1,4 +1,4 @@
#!/bin/sh
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
# Test various bareudp tunnel configurations.

View File

@@ -3741,7 +3741,7 @@ endpoint_tests()
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
speed=slow \
test_linkfail=128 speed=slow \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
@@ -3768,7 +3768,7 @@ endpoint_tests()
pm_nl_set_limits $ns2 0 3
pm_nl_add_endpoint $ns2 10.0.1.2 id 1 dev ns2eth1 flags subflow
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
test_linkfail=4 speed=5 \
test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
@@ -3845,7 +3845,7 @@ endpoint_tests()
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
pm_nl_add_endpoint $ns1 10.0.1.1 id 42 flags signal
test_linkfail=4 speed=5 \
test_linkfail=128 speed=5 \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!
@@ -3917,7 +3917,7 @@ endpoint_tests()
# broadcast IP: no packet for this address will be received on ns1
pm_nl_add_endpoint $ns1 224.0.0.1 id 2 flags signal
pm_nl_add_endpoint $ns2 10.0.3.2 id 3 flags subflow
test_linkfail=4 speed=20 \
test_linkfail=128 speed=20 \
run_tests $ns1 $ns2 10.0.1.1 &
local tests_pid=$!

View File

@@ -1725,7 +1725,7 @@ static void show_usage(void)
"-n, --notrace\t\tIf latency is detected, do not print out the content of\n"
"\t\t\tthe trace file to standard output\n\n"
"-t, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n"
"-e, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n"
"-r, --random\t\tArbitrarily sleep a certain amount of time, default\n"
"\t\t\t%ld ms, before reading the trace file. The\n"