mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-25 03:50:24 +09:00
Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
This commit is contained in:
@@ -18,6 +18,7 @@ Required properties:
|
||||
"allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
|
||||
"allwinner,sun4i-a10-axi-clk" - for the AXI clock
|
||||
"allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
|
||||
"allwinner,sun4i-a10-gates-clk" - for generic gates on all compatible SoCs
|
||||
"allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
|
||||
"allwinner,sun4i-a10-ahb-clk" - for the AHB clock
|
||||
"allwinner,sun5i-a13-ahb-clk" - for the AHB clock on A13
|
||||
@@ -43,6 +44,7 @@ Required properties:
|
||||
"allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
|
||||
"allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
|
||||
"allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
|
||||
"allwinner,sun8i-h3-apb0-gates-clk" - for the APB0 gates on H3
|
||||
"allwinner,sun9i-a80-apb0-gates-clk" - for the APB0 gates on A80
|
||||
"allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
|
||||
"allwinner,sun9i-a80-apb1-clk" - for the APB1 bus clock on A80
|
||||
|
||||
@@ -265,6 +265,13 @@ aio-nr can grow to.
|
||||
|
||||
==============================================================
|
||||
|
||||
mount-max:
|
||||
|
||||
This denotes the maximum number of mounts that may exist
|
||||
in a mount namespace.
|
||||
|
||||
==============================================================
|
||||
|
||||
|
||||
2. /proc/sys/fs/binfmt_misc
|
||||
----------------------------------------------------------
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 64
|
||||
SUBLEVEL = 66
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
;
|
||||
; Now manually save: r12, sp, fp, gp, r25
|
||||
|
||||
PUSH r30
|
||||
PUSH r12
|
||||
|
||||
; Saving pt_regs->sp correctly requires some extra work due to the way
|
||||
@@ -72,6 +73,7 @@
|
||||
POPAX AUX_USER_SP
|
||||
1:
|
||||
POP r12
|
||||
POP r30
|
||||
|
||||
.endm
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ struct pt_regs {
|
||||
unsigned long fp;
|
||||
unsigned long sp; /* user/kernel sp depending on where we came from */
|
||||
|
||||
unsigned long r12;
|
||||
unsigned long r12, r30;
|
||||
|
||||
/*------- Below list auto saved by h/w -----------*/
|
||||
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
|
||||
|
||||
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
|
||||
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
|
||||
2, "timer_sys_ck", NULL, false);
|
||||
|
||||
if (of_have_populated_dt())
|
||||
clocksource_probe();
|
||||
clocksource_probe();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
|
||||
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
|
||||
{
|
||||
__omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
|
||||
2, "timer_sys_ck", NULL, false);
|
||||
|
||||
clocksource_probe();
|
||||
}
|
||||
#endif /* CONFIG_ARCH_OMAP3 */
|
||||
|
||||
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
|
||||
{
|
||||
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
|
||||
1, "timer_sys_ck", "ti,timer-alwon", true);
|
||||
|
||||
clocksource_probe();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -14,12 +14,22 @@ static int crashing_cpu = -1;
|
||||
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void crash_shutdown_secondary(void *ignore)
|
||||
static void crash_shutdown_secondary(void *passed_regs)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
struct pt_regs *regs = passed_regs;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
regs = task_pt_regs(current);
|
||||
/*
|
||||
* If we are passed registers, use those. Otherwise get the
|
||||
* regs from the last interrupt, which should be correct, as
|
||||
* we are in an interrupt. But if the regs are not there,
|
||||
* pull them from the top of the stack. They are probably
|
||||
* wrong, but we need something to keep from crashing again.
|
||||
*/
|
||||
if (!regs)
|
||||
regs = get_irq_regs();
|
||||
if (!regs)
|
||||
regs = task_pt_regs(current);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
|
||||
@@ -206,7 +206,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
|
||||
else if ((prog_req.fr1 && prog_req.frdefault) ||
|
||||
(prog_req.single && !prog_req.frdefault))
|
||||
/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
|
||||
state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
|
||||
state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
|
||||
cpu_has_mips_r2_r6) ?
|
||||
FP_FR1 : FP_FR0;
|
||||
else if (prog_req.fr1)
|
||||
|
||||
@@ -244,9 +244,6 @@ static int compute_signal(int tt)
|
||||
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
|
||||
{
|
||||
int reg;
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
|
||||
struct pt_regs *regs = (struct pt_regs *)ksp - 1;
|
||||
#if (KGDB_GDB_REG_SIZE == 32)
|
||||
u32 *ptr = (u32 *)gdb_regs;
|
||||
#else
|
||||
@@ -254,25 +251,46 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
|
||||
#endif
|
||||
|
||||
for (reg = 0; reg < 16; reg++)
|
||||
*(ptr++) = regs->regs[reg];
|
||||
*(ptr++) = 0;
|
||||
|
||||
/* S0 - S7 */
|
||||
for (reg = 16; reg < 24; reg++)
|
||||
*(ptr++) = regs->regs[reg];
|
||||
*(ptr++) = p->thread.reg16;
|
||||
*(ptr++) = p->thread.reg17;
|
||||
*(ptr++) = p->thread.reg18;
|
||||
*(ptr++) = p->thread.reg19;
|
||||
*(ptr++) = p->thread.reg20;
|
||||
*(ptr++) = p->thread.reg21;
|
||||
*(ptr++) = p->thread.reg22;
|
||||
*(ptr++) = p->thread.reg23;
|
||||
|
||||
for (reg = 24; reg < 28; reg++)
|
||||
*(ptr++) = 0;
|
||||
|
||||
/* GP, SP, FP, RA */
|
||||
for (reg = 28; reg < 32; reg++)
|
||||
*(ptr++) = regs->regs[reg];
|
||||
*(ptr++) = (long)p;
|
||||
*(ptr++) = p->thread.reg29;
|
||||
*(ptr++) = p->thread.reg30;
|
||||
*(ptr++) = p->thread.reg31;
|
||||
|
||||
*(ptr++) = regs->cp0_status;
|
||||
*(ptr++) = regs->lo;
|
||||
*(ptr++) = regs->hi;
|
||||
*(ptr++) = regs->cp0_badvaddr;
|
||||
*(ptr++) = regs->cp0_cause;
|
||||
*(ptr++) = regs->cp0_epc;
|
||||
*(ptr++) = p->thread.cp0_status;
|
||||
|
||||
/* lo, hi */
|
||||
*(ptr++) = 0;
|
||||
*(ptr++) = 0;
|
||||
|
||||
/*
|
||||
* BadVAddr, Cause
|
||||
* Ideally these would come from the last exception frame up the stack
|
||||
* but that requires unwinding, otherwise we can't know much for sure.
|
||||
*/
|
||||
*(ptr++) = 0;
|
||||
*(ptr++) = 0;
|
||||
|
||||
/*
|
||||
* PC
|
||||
* use return address (RA), i.e. the moment after return from resume()
|
||||
*/
|
||||
*(ptr++) = p->thread.reg31;
|
||||
}
|
||||
|
||||
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
||||
|
||||
@@ -668,6 +668,14 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
return pte_pfn(pte);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
static inline unsigned long pmd_write(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline unsigned long pmd_dirty(pmd_t pmd)
|
||||
{
|
||||
@@ -683,13 +691,6 @@ static inline unsigned long pmd_young(pmd_t pmd)
|
||||
return pte_young(pte);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_write(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
return pte_write(pte);
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
pte_t pte = __pte(pmd_val(pmd));
|
||||
|
||||
@@ -1493,7 +1493,7 @@ bool kern_addr_valid(unsigned long addr)
|
||||
if ((long)addr < 0L) {
|
||||
unsigned long pa = __pa(addr);
|
||||
|
||||
if ((addr >> max_phys_bits) != 0UL)
|
||||
if ((pa >> max_phys_bits) != 0UL)
|
||||
return false;
|
||||
|
||||
return pfn_valid(pa >> PAGE_SHIFT);
|
||||
|
||||
@@ -977,6 +977,18 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long return_hooker = (unsigned long)
|
||||
&return_to_handler;
|
||||
|
||||
/*
|
||||
* When resuming from suspend-to-ram, this function can be indirectly
|
||||
* called from early CPU startup code while the CPU is in real mode,
|
||||
* which would fail miserably. Make sure the stack pointer is a
|
||||
* virtual address.
|
||||
*
|
||||
* This check isn't as accurate as virt_addr_valid(), but it should be
|
||||
* good enough for this purpose, and it's fast.
|
||||
*/
|
||||
if (unlikely((long)__builtin_frame_address(0) >= 0))
|
||||
return;
|
||||
|
||||
if (unlikely(ftrace_graph_is_dead()))
|
||||
return;
|
||||
|
||||
|
||||
@@ -343,11 +343,11 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
|
||||
WARN_ON(!clockevent_state_oneshot(evt));
|
||||
|
||||
single.timeout_abs_ns = get_abs_timeout(delta);
|
||||
single.flags = VCPU_SSHOTTMR_future;
|
||||
/* Get an event anyway, even if the timeout is already expired */
|
||||
single.flags = 0;
|
||||
|
||||
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
|
||||
|
||||
BUG_ON(ret != 0 && ret != -ETIME);
|
||||
BUG_ON(ret != 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -488,6 +488,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
tcrypt_complete, &result);
|
||||
|
||||
iv_len = crypto_aead_ivsize(tfm);
|
||||
|
||||
for (i = 0, j = 0; i < tcount; i++) {
|
||||
if (template[i].np)
|
||||
continue;
|
||||
@@ -508,7 +510,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
|
||||
memcpy(input, template[i].input, template[i].ilen);
|
||||
memcpy(assoc, template[i].assoc, template[i].alen);
|
||||
iv_len = crypto_aead_ivsize(tfm);
|
||||
if (template[i].iv)
|
||||
memcpy(iv, template[i].iv, iv_len);
|
||||
else
|
||||
@@ -617,7 +618,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
|
||||
j++;
|
||||
|
||||
if (template[i].iv)
|
||||
memcpy(iv, template[i].iv, MAX_IVLEN);
|
||||
memcpy(iv, template[i].iv, iv_len);
|
||||
else
|
||||
memset(iv, 0, MAX_IVLEN);
|
||||
|
||||
|
||||
@@ -98,6 +98,8 @@ static void __init sunxi_simple_gates_init(struct device_node *node)
|
||||
sunxi_simple_gates_setup(node, NULL, 0);
|
||||
}
|
||||
|
||||
CLK_OF_DECLARE(sun4i_a10_gates, "allwinner,sun4i-a10-gates-clk",
|
||||
sunxi_simple_gates_init);
|
||||
CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
|
||||
sunxi_simple_gates_init);
|
||||
CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
|
||||
|
||||
@@ -2258,7 +2258,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
if (pi->caps_stable_p_state) {
|
||||
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
|
||||
|
||||
for (i = table->count - 1; i >= 0; i++) {
|
||||
for (i = table->count - 1; i >= 0; i--) {
|
||||
if (stable_p_state_sclk >= table->entries[i].clk) {
|
||||
stable_p_state_sclk = table->entries[i].clk;
|
||||
break;
|
||||
|
||||
@@ -685,6 +685,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
||||
if (best_dist_disk < 0) {
|
||||
if (is_badblock(rdev, this_sector, sectors,
|
||||
&first_bad, &bad_sectors)) {
|
||||
if (first_bad < this_sector)
|
||||
if (first_bad <= this_sector)
|
||||
/* Cannot use this */
|
||||
continue;
|
||||
best_good_sectors = first_bad - this_sector;
|
||||
|
||||
@@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
if (!g) {
|
||||
netif_info(lio, tx_err, lio->netdev,
|
||||
"Transmit scatter gather: glist null!\n");
|
||||
goto lio_xmit_failed;
|
||||
goto lio_xmit_dma_failed;
|
||||
}
|
||||
|
||||
cmdsetup.s.gather = 1;
|
||||
@@ -2894,7 +2894,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
else
|
||||
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
|
||||
if (status == IQ_SEND_FAILED)
|
||||
goto lio_xmit_failed;
|
||||
goto lio_xmit_dma_failed;
|
||||
|
||||
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
|
||||
|
||||
@@ -2908,12 +2908,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
lio_xmit_dma_failed:
|
||||
dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
|
||||
ndata.datasize, DMA_TO_DEVICE);
|
||||
lio_xmit_failed:
|
||||
stats->tx_dropped++;
|
||||
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
|
||||
iq_no, stats->tx_dropped);
|
||||
dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
|
||||
ndata.datasize, DMA_TO_DEVICE);
|
||||
recv_buffer_free(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@@ -1110,6 +1110,7 @@ static int macvlan_port_create(struct net_device *dev)
|
||||
static void macvlan_port_destroy(struct net_device *dev)
|
||||
{
|
||||
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
|
||||
struct sk_buff *skb;
|
||||
|
||||
dev->priv_flags &= ~IFF_MACVLAN_PORT;
|
||||
netdev_rx_handler_unregister(dev);
|
||||
@@ -1118,7 +1119,15 @@ static void macvlan_port_destroy(struct net_device *dev)
|
||||
* but we need to cancel it and purge left skbs if any.
|
||||
*/
|
||||
cancel_work_sync(&port->bc_work);
|
||||
__skb_queue_purge(&port->bc_queue);
|
||||
|
||||
while ((skb = __skb_dequeue(&port->bc_queue))) {
|
||||
const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
|
||||
|
||||
if (src)
|
||||
dev_put(src->dev);
|
||||
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
kfree_rcu(port, rcu);
|
||||
}
|
||||
|
||||
@@ -1436,8 +1436,6 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
|
||||
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
|
||||
skb_queue_tail(&dp83640->rx_queue, skb);
|
||||
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
|
||||
} else {
|
||||
netif_rx_ni(skb);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
@@ -538,7 +538,7 @@ void phy_stop_machine(struct phy_device *phydev)
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
if (phydev->state > PHY_UP)
|
||||
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
|
||||
phydev->state = PHY_UP;
|
||||
mutex_unlock(&phydev->lock);
|
||||
}
|
||||
|
||||
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
|
||||
spin_lock_bh(&local->baplock);
|
||||
|
||||
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
|
||||
if (!res)
|
||||
res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
|
||||
if (res)
|
||||
goto unlock;
|
||||
|
||||
res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
|
||||
if (res)
|
||||
goto unlock;
|
||||
|
||||
if (le16_to_cpu(rec.len) == 0) {
|
||||
/* RID not available */
|
||||
res = -ENODATA;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rlen = (le16_to_cpu(rec.len) - 1) * 2;
|
||||
if (!res && exact_len && rlen != len) {
|
||||
if (exact_len && rlen != len) {
|
||||
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
|
||||
"rid=0x%04x, len=%d (expected %d)\n",
|
||||
dev->name, rid, rlen, len);
|
||||
res = -ENODATA;
|
||||
}
|
||||
|
||||
if (!res)
|
||||
res = hfa384x_from_bap(dev, BAP0, buf, len);
|
||||
res = hfa384x_from_bap(dev, BAP0, buf, len);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(&local->baplock);
|
||||
mutex_unlock(&local->rid_bap_mtx);
|
||||
|
||||
|
||||
@@ -1519,6 +1519,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
|
||||
ret = regulator_enable(rdev->supply);
|
||||
if (ret < 0) {
|
||||
_regulator_put(rdev->supply);
|
||||
rdev->supply = NULL;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
|
||||
struct tty_struct *tty = tty_port_tty_get(&port->port);
|
||||
int i, ret;
|
||||
|
||||
read_mem32((u32 *) &size, addr, 4);
|
||||
size = __le32_to_cpu(readl(addr));
|
||||
/* DBG1( "%d bytes port: %d", size, index); */
|
||||
|
||||
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
|
||||
|
||||
@@ -361,7 +361,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
|
||||
/* allocate a bunch of read buffers and queue them all at once. */
|
||||
for (i = 0; i < midi->qlen && err == 0; i++) {
|
||||
struct usb_request *req =
|
||||
midi_alloc_ep_req(midi->out_ep, midi->buflen);
|
||||
midi_alloc_ep_req(midi->out_ep,
|
||||
max_t(unsigned, midi->buflen,
|
||||
bulk_out_desc.wMaxPacketSize));
|
||||
if (req == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -562,8 +562,9 @@ static long vfio_pci_ioctl(void *device_data,
|
||||
|
||||
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
|
||||
struct vfio_irq_set hdr;
|
||||
size_t size;
|
||||
u8 *data = NULL;
|
||||
int ret = 0;
|
||||
int max, ret = 0;
|
||||
|
||||
minsz = offsetofend(struct vfio_irq_set, count);
|
||||
|
||||
@@ -571,23 +572,31 @@ static long vfio_pci_ioctl(void *device_data,
|
||||
return -EFAULT;
|
||||
|
||||
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
|
||||
hdr.count >= (U32_MAX - hdr.start) ||
|
||||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
|
||||
VFIO_IRQ_SET_ACTION_TYPE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
|
||||
size_t size;
|
||||
int max = vfio_pci_get_irq_count(vdev, hdr.index);
|
||||
max = vfio_pci_get_irq_count(vdev, hdr.index);
|
||||
if (hdr.start >= max || hdr.start + hdr.count > max)
|
||||
return -EINVAL;
|
||||
|
||||
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
|
||||
size = sizeof(uint8_t);
|
||||
else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
|
||||
size = sizeof(int32_t);
|
||||
else
|
||||
return -EINVAL;
|
||||
switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
|
||||
case VFIO_IRQ_SET_DATA_NONE:
|
||||
size = 0;
|
||||
break;
|
||||
case VFIO_IRQ_SET_DATA_BOOL:
|
||||
size = sizeof(uint8_t);
|
||||
break;
|
||||
case VFIO_IRQ_SET_DATA_EVENTFD:
|
||||
size = sizeof(int32_t);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hdr.argsz - minsz < hdr.count * size ||
|
||||
hdr.start >= max || hdr.start + hdr.count > max)
|
||||
if (size) {
|
||||
if (hdr.argsz - minsz < hdr.count * size)
|
||||
return -EINVAL;
|
||||
|
||||
data = memdup_user((void __user *)(arg + minsz),
|
||||
|
||||
@@ -255,7 +255,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
||||
if (!is_irq_none(vdev))
|
||||
return -EINVAL;
|
||||
|
||||
vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
|
||||
vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
|
||||
if (!vdev->ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
@@ -233,6 +233,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
|
||||
void *end, const char *function, unsigned int line)
|
||||
{
|
||||
struct ext4_xattr_entry *entry = IFIRST(header);
|
||||
int error = -EFSCORRUPTED;
|
||||
|
||||
if (((void *) header >= end) ||
|
||||
(header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
|
||||
goto errout;
|
||||
error = ext4_xattr_check_names(entry, end, entry);
|
||||
errout:
|
||||
if (error)
|
||||
__ext4_error_inode(inode, function, line, 0,
|
||||
"corrupted in-inode xattr");
|
||||
return error;
|
||||
}
|
||||
|
||||
#define xattr_check_inode(inode, header, end) \
|
||||
__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
|
||||
|
||||
static inline int
|
||||
ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
|
||||
{
|
||||
@@ -344,7 +365,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
|
||||
header = IHDR(inode, raw_inode);
|
||||
entry = IFIRST(header);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
error = ext4_xattr_check_names(entry, end, entry);
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
error = ext4_xattr_find_entry(&entry, name_index, name,
|
||||
@@ -475,7 +496,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
header = IHDR(inode, raw_inode);
|
||||
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
error = ext4_xattr_list_entries(dentry, IFIRST(header),
|
||||
@@ -1017,8 +1038,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
|
||||
is->s.here = is->s.first;
|
||||
is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
|
||||
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
|
||||
error = ext4_xattr_check_names(IFIRST(header), is->s.end,
|
||||
IFIRST(header));
|
||||
error = xattr_check_inode(inode, header, is->s.end);
|
||||
if (error)
|
||||
return error;
|
||||
/* Find the named attribute. */
|
||||
@@ -1319,6 +1339,10 @@ retry:
|
||||
last = entry;
|
||||
total_ino = sizeof(struct ext4_xattr_ibody_header);
|
||||
|
||||
error = xattr_check_inode(inode, header, end);
|
||||
if (error)
|
||||
goto cleanup;
|
||||
|
||||
free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
|
||||
if (free >= isize_diff) {
|
||||
entry = IFIRST(header);
|
||||
|
||||
@@ -918,6 +918,79 @@ static loff_t max_file_size(unsigned bits)
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline bool sanity_check_area_boundary(struct super_block *sb,
|
||||
struct f2fs_super_block *raw_super)
|
||||
{
|
||||
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
|
||||
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
|
||||
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
|
||||
u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
|
||||
u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
|
||||
u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
|
||||
u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
|
||||
u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
|
||||
u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
|
||||
u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
|
||||
u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
|
||||
u32 segment_count = le32_to_cpu(raw_super->segment_count);
|
||||
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
|
||||
|
||||
if (segment0_blkaddr != cp_blkaddr) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Mismatch start address, segment0(%u) cp_blkaddr(%u)",
|
||||
segment0_blkaddr, cp_blkaddr);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
|
||||
sit_blkaddr) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Wrong CP boundary, start(%u) end(%u) blocks(%u)",
|
||||
cp_blkaddr, sit_blkaddr,
|
||||
segment_count_ckpt << log_blocks_per_seg);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
|
||||
nat_blkaddr) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
|
||||
sit_blkaddr, nat_blkaddr,
|
||||
segment_count_sit << log_blocks_per_seg);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
|
||||
ssa_blkaddr) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
|
||||
nat_blkaddr, ssa_blkaddr,
|
||||
segment_count_nat << log_blocks_per_seg);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
|
||||
main_blkaddr) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
|
||||
ssa_blkaddr, main_blkaddr,
|
||||
segment_count_ssa << log_blocks_per_seg);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
|
||||
segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
|
||||
main_blkaddr,
|
||||
segment0_blkaddr + (segment_count << log_blocks_per_seg),
|
||||
segment_count_main << log_blocks_per_seg);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int sanity_check_raw_super(struct super_block *sb,
|
||||
struct f2fs_super_block *raw_super)
|
||||
{
|
||||
@@ -973,6 +1046,23 @@ static int sanity_check_raw_super(struct super_block *sb,
|
||||
le32_to_cpu(raw_super->log_sectorsize));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* check reserved ino info */
|
||||
if (le32_to_cpu(raw_super->node_ino) != 1 ||
|
||||
le32_to_cpu(raw_super->meta_ino) != 2 ||
|
||||
le32_to_cpu(raw_super->root_ino) != 3) {
|
||||
f2fs_msg(sb, KERN_INFO,
|
||||
"Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
|
||||
le32_to_cpu(raw_super->node_ino),
|
||||
le32_to_cpu(raw_super->meta_ino),
|
||||
le32_to_cpu(raw_super->root_ino));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
|
||||
if (sanity_check_area_boundary(sb, raw_super))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -760,7 +760,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
|
||||
int error;
|
||||
|
||||
error = get_leaf_nr(dip, index, &leaf_no);
|
||||
if (!error)
|
||||
if (!IS_ERR_VALUE(error))
|
||||
error = get_leaf(dip, leaf_no, bh_out);
|
||||
|
||||
return error;
|
||||
@@ -976,7 +976,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
|
||||
|
||||
index = name->hash >> (32 - dip->i_depth);
|
||||
error = get_leaf_nr(dip, index, &leaf_no);
|
||||
if (error)
|
||||
if (IS_ERR_VALUE(error))
|
||||
return error;
|
||||
|
||||
/* Get the old leaf block */
|
||||
|
||||
@@ -13,6 +13,8 @@ struct mnt_namespace {
|
||||
u64 seq; /* Sequence number to prevent loops */
|
||||
wait_queue_head_t poll;
|
||||
u64 event;
|
||||
unsigned int mounts; /* # of mounts in the namespace */
|
||||
unsigned int pending_mounts;
|
||||
};
|
||||
|
||||
struct mnt_pcp {
|
||||
|
||||
@@ -27,6 +27,9 @@
|
||||
#include "pnode.h"
|
||||
#include "internal.h"
|
||||
|
||||
/* Maximum number of mounts in a mount namespace */
|
||||
unsigned int sysctl_mount_max __read_mostly = 100000;
|
||||
|
||||
static unsigned int m_hash_mask __read_mostly;
|
||||
static unsigned int m_hash_shift __read_mostly;
|
||||
static unsigned int mp_hash_mask __read_mostly;
|
||||
@@ -926,6 +929,9 @@ static void commit_tree(struct mount *mnt)
|
||||
|
||||
list_splice(&head, n->list.prev);
|
||||
|
||||
n->mounts += n->pending_mounts;
|
||||
n->pending_mounts = 0;
|
||||
|
||||
__attach_mnt(mnt, parent);
|
||||
touch_mnt_namespace(n);
|
||||
}
|
||||
@@ -1465,11 +1471,16 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
|
||||
propagate_umount(&tmp_list);
|
||||
|
||||
while (!list_empty(&tmp_list)) {
|
||||
struct mnt_namespace *ns;
|
||||
bool disconnect;
|
||||
p = list_first_entry(&tmp_list, struct mount, mnt_list);
|
||||
list_del_init(&p->mnt_expire);
|
||||
list_del_init(&p->mnt_list);
|
||||
__touch_mnt_namespace(p->mnt_ns);
|
||||
ns = p->mnt_ns;
|
||||
if (ns) {
|
||||
ns->mounts--;
|
||||
__touch_mnt_namespace(ns);
|
||||
}
|
||||
p->mnt_ns = NULL;
|
||||
if (how & UMOUNT_SYNC)
|
||||
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
|
||||
@@ -1870,6 +1881,28 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
|
||||
{
|
||||
unsigned int max = READ_ONCE(sysctl_mount_max);
|
||||
unsigned int mounts = 0, old, pending, sum;
|
||||
struct mount *p;
|
||||
|
||||
for (p = mnt; p; p = next_mnt(p, mnt))
|
||||
mounts++;
|
||||
|
||||
old = ns->mounts;
|
||||
pending = ns->pending_mounts;
|
||||
sum = old + pending;
|
||||
if ((old > sum) ||
|
||||
(pending > sum) ||
|
||||
(max < sum) ||
|
||||
(mounts > (max - sum)))
|
||||
return -ENOSPC;
|
||||
|
||||
ns->pending_mounts = pending + mounts;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* @source_mnt : mount tree to be attached
|
||||
* @nd : place the mount tree @source_mnt is attached
|
||||
@@ -1939,6 +1972,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
|
||||
struct path *parent_path)
|
||||
{
|
||||
HLIST_HEAD(tree_list);
|
||||
struct mnt_namespace *ns = dest_mnt->mnt_ns;
|
||||
struct mountpoint *smp;
|
||||
struct mount *child, *p;
|
||||
struct hlist_node *n;
|
||||
@@ -1951,6 +1985,13 @@ static int attach_recursive_mnt(struct mount *source_mnt,
|
||||
if (IS_ERR(smp))
|
||||
return PTR_ERR(smp);
|
||||
|
||||
/* Is there space to add these mounts to the mount namespace? */
|
||||
if (!parent_path) {
|
||||
err = count_mounts(ns, source_mnt);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (IS_MNT_SHARED(dest_mnt)) {
|
||||
err = invent_group_ids(source_mnt, true);
|
||||
if (err)
|
||||
@@ -1990,11 +2031,14 @@ static int attach_recursive_mnt(struct mount *source_mnt,
|
||||
out_cleanup_ids:
|
||||
while (!hlist_empty(&tree_list)) {
|
||||
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
|
||||
child->mnt_parent->mnt_ns->pending_mounts = 0;
|
||||
umount_tree(child, UMOUNT_SYNC);
|
||||
}
|
||||
unlock_mount_hash();
|
||||
cleanup_group_ids(source_mnt, NULL);
|
||||
out:
|
||||
ns->pending_mounts = 0;
|
||||
|
||||
read_seqlock_excl(&mount_lock);
|
||||
put_mountpoint(smp);
|
||||
read_sequnlock_excl(&mount_lock);
|
||||
@@ -2830,6 +2874,8 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
|
||||
init_waitqueue_head(&new_ns->poll);
|
||||
new_ns->event = 0;
|
||||
new_ns->user_ns = get_user_ns(user_ns);
|
||||
new_ns->mounts = 0;
|
||||
new_ns->pending_mounts = 0;
|
||||
return new_ns;
|
||||
}
|
||||
|
||||
@@ -2879,6 +2925,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
|
||||
q = new;
|
||||
while (p) {
|
||||
q->mnt_ns = new_ns;
|
||||
new_ns->mounts++;
|
||||
if (new_fs) {
|
||||
if (&p->mnt == new_fs->root.mnt) {
|
||||
new_fs->root.mnt = mntget(&q->mnt);
|
||||
@@ -2917,6 +2964,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
|
||||
struct mount *mnt = real_mount(m);
|
||||
mnt->mnt_ns = new_ns;
|
||||
new_ns->root = mnt;
|
||||
new_ns->mounts++;
|
||||
list_add(&mnt->mnt_list, &new_ns->list);
|
||||
} else {
|
||||
mntput(m);
|
||||
|
||||
@@ -656,6 +656,37 @@ static __be32 map_new_errors(u32 vers, __be32 nfserr)
|
||||
return nfserr;
|
||||
}
|
||||
|
||||
/*
|
||||
* A write procedure can have a large argument, and a read procedure can
|
||||
* have a large reply, but no NFSv2 or NFSv3 procedure has argument and
|
||||
* reply that can both be larger than a page. The xdr code has taken
|
||||
* advantage of this assumption to be a sloppy about bounds checking in
|
||||
* some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
|
||||
* problem, we enforce these assumptions here:
|
||||
*/
|
||||
static bool nfs_request_too_big(struct svc_rqst *rqstp,
|
||||
struct svc_procedure *proc)
|
||||
{
|
||||
/*
|
||||
* The ACL code has more careful bounds-checking and is not
|
||||
* susceptible to this problem:
|
||||
*/
|
||||
if (rqstp->rq_prog != NFS_PROGRAM)
|
||||
return false;
|
||||
/*
|
||||
* Ditto NFSv4 (which can in theory have argument and reply both
|
||||
* more than a page):
|
||||
*/
|
||||
if (rqstp->rq_vers >= 4)
|
||||
return false;
|
||||
/* The reply will be small, we're OK: */
|
||||
if (proc->pc_xdrressize > 0 &&
|
||||
proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
|
||||
return false;
|
||||
|
||||
return rqstp->rq_arg.len > PAGE_SIZE;
|
||||
}
|
||||
|
||||
int
|
||||
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
|
||||
{
|
||||
@@ -668,6 +699,11 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
|
||||
rqstp->rq_vers, rqstp->rq_proc);
|
||||
proc = rqstp->rq_procinfo;
|
||||
|
||||
if (nfs_request_too_big(rqstp, proc)) {
|
||||
dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
|
||||
*statp = rpc_garbage_args;
|
||||
return 1;
|
||||
}
|
||||
/*
|
||||
* Give the xdr decoder a chance to change this if it wants
|
||||
* (necessary in the NFSv4.0 compound case)
|
||||
|
||||
@@ -259,7 +259,7 @@ static int propagate_one(struct mount *m)
|
||||
read_sequnlock_excl(&mount_lock);
|
||||
}
|
||||
hlist_add_head(&child->mnt_hash, list);
|
||||
return 0;
|
||||
return count_mounts(m->mnt_ns, child);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -55,4 +55,5 @@ void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
|
||||
struct mount *copy_tree(struct mount *, struct dentry *, int);
|
||||
bool is_path_reachable(struct mount *, struct dentry *,
|
||||
const struct path *root);
|
||||
int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
|
||||
#endif /* _LINUX_PNODE_H */
|
||||
|
||||
@@ -96,4 +96,6 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
|
||||
|
||||
extern dev_t name_to_dev_t(const char *name);
|
||||
|
||||
extern unsigned int sysctl_mount_max;
|
||||
|
||||
#endif /* _LINUX_MOUNT_H */
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
#define RTF_PREF(pref) ((pref) << 27)
|
||||
#define RTF_PREF_MASK 0x18000000
|
||||
|
||||
#define RTF_PCPU 0x40000000
|
||||
#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
|
||||
#define RTF_LOCAL 0x80000000
|
||||
|
||||
|
||||
|
||||
@@ -8390,6 +8390,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Variation on perf_event_ctx_lock_nested(), except we take two context
|
||||
* mutexes.
|
||||
*/
|
||||
static struct perf_event_context *
|
||||
__perf_event_ctx_lock_double(struct perf_event *group_leader,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
struct perf_event_context *gctx;
|
||||
|
||||
again:
|
||||
rcu_read_lock();
|
||||
gctx = READ_ONCE(group_leader->ctx);
|
||||
if (!atomic_inc_not_zero(&gctx->refcount)) {
|
||||
rcu_read_unlock();
|
||||
goto again;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
mutex_lock_double(&gctx->mutex, &ctx->mutex);
|
||||
|
||||
if (group_leader->ctx != gctx) {
|
||||
mutex_unlock(&ctx->mutex);
|
||||
mutex_unlock(&gctx->mutex);
|
||||
put_ctx(gctx);
|
||||
goto again;
|
||||
}
|
||||
|
||||
return gctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* sys_perf_event_open - open a performance event, associate it to a task/cpu
|
||||
*
|
||||
@@ -8630,8 +8661,26 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
}
|
||||
|
||||
if (move_group) {
|
||||
gctx = group_leader->ctx;
|
||||
mutex_lock_double(&gctx->mutex, &ctx->mutex);
|
||||
gctx = __perf_event_ctx_lock_double(group_leader, ctx);
|
||||
|
||||
/*
|
||||
* Check if we raced against another sys_perf_event_open() call
|
||||
* moving the software group underneath us.
|
||||
*/
|
||||
if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
|
||||
/*
|
||||
* If someone moved the group out from under us, check
|
||||
* if this new event wound up on the same ctx, if so
|
||||
* its the regular !move_group case, otherwise fail.
|
||||
*/
|
||||
if (gctx != ctx) {
|
||||
err = -EINVAL;
|
||||
goto err_locked;
|
||||
} else {
|
||||
perf_event_ctx_unlock(group_leader, gctx);
|
||||
move_group = 0;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mutex_lock(&ctx->mutex);
|
||||
}
|
||||
@@ -8726,7 +8775,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
perf_unpin_context(ctx);
|
||||
|
||||
if (move_group)
|
||||
mutex_unlock(&gctx->mutex);
|
||||
perf_event_ctx_unlock(group_leader, gctx);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
if (task) {
|
||||
@@ -8754,7 +8803,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
|
||||
err_locked:
|
||||
if (move_group)
|
||||
mutex_unlock(&gctx->mutex);
|
||||
perf_event_ctx_unlock(group_leader, gctx);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
/* err_file: */
|
||||
fput(event_file);
|
||||
|
||||
@@ -65,6 +65,7 @@
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/mount.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/processor.h>
|
||||
@@ -1853,6 +1854,14 @@ static struct ctl_table fs_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
},
|
||||
{
|
||||
.procname = "mount-max",
|
||||
.data = &sysctl_mount_max,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
@@ -2101,6 +2101,10 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
|
||||
trace_9p_protocol_dump(clnt, req->rc);
|
||||
goto free_and_error;
|
||||
}
|
||||
if (rsize < count) {
|
||||
pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
|
||||
count = rsize;
|
||||
}
|
||||
|
||||
p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
|
||||
|
||||
|
||||
@@ -859,7 +859,8 @@ static void neigh_probe(struct neighbour *neigh)
|
||||
if (skb)
|
||||
skb = skb_clone(skb, GFP_ATOMIC);
|
||||
write_unlock(&neigh->lock);
|
||||
neigh->ops->solicit(neigh, skb);
|
||||
if (neigh->ops->solicit)
|
||||
neigh->ops->solicit(neigh, skb);
|
||||
atomic_inc(&neigh->probes);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
|
||||
while ((skb = skb_dequeue(&npinfo->txq))) {
|
||||
struct net_device *dev = skb->dev;
|
||||
struct netdev_queue *txq;
|
||||
unsigned int q_index;
|
||||
|
||||
if (!netif_device_present(dev) || !netif_running(dev)) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
txq = skb_get_tx_queue(dev, skb);
|
||||
|
||||
local_irq_save(flags);
|
||||
/* check if skb->queue_mapping is still valid */
|
||||
q_index = skb_get_queue_mapping(skb);
|
||||
if (unlikely(q_index >= dev->real_num_tx_queues)) {
|
||||
q_index = q_index % dev->real_num_tx_queues;
|
||||
skb_set_queue_mapping(skb, q_index);
|
||||
}
|
||||
txq = netdev_get_tx_queue(dev, q_index);
|
||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||
if (netif_xmit_frozen_or_stopped(txq) ||
|
||||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
|
||||
|
||||
@@ -154,17 +154,18 @@ void ping_hash(struct sock *sk)
|
||||
void ping_unhash(struct sock *sk)
|
||||
{
|
||||
struct inet_sock *isk = inet_sk(sk);
|
||||
|
||||
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
|
||||
write_lock_bh(&ping_table.lock);
|
||||
if (sk_hashed(sk)) {
|
||||
write_lock_bh(&ping_table.lock);
|
||||
hlist_nulls_del(&sk->sk_nulls_node);
|
||||
sk_nulls_node_init(&sk->sk_nulls_node);
|
||||
sock_put(sk);
|
||||
isk->inet_num = 0;
|
||||
isk->inet_sport = 0;
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
write_unlock_bh(&ping_table.lock);
|
||||
}
|
||||
write_unlock_bh(&ping_table.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ping_unhash);
|
||||
|
||||
|
||||
@@ -2571,7 +2571,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
|
||||
ip_hdr(skb)->protocol = IPPROTO_ICMP;
|
||||
ip_hdr(skb)->protocol = IPPROTO_UDP;
|
||||
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
|
||||
|
||||
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
|
||||
|
||||
@@ -2260,6 +2260,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
tcp_init_send_head(sk);
|
||||
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
|
||||
__sk_dst_reset(sk);
|
||||
tcp_saved_syn_free(tp);
|
||||
|
||||
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
|
||||
|
||||
|
||||
@@ -1049,7 +1049,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct net *net = t->net;
|
||||
struct net_device_stats *stats = &t->dev->stats;
|
||||
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
|
||||
struct ipv6hdr *ipv6h;
|
||||
struct ipv6_tel_txoption opt;
|
||||
struct dst_entry *dst = NULL, *ndst = NULL;
|
||||
struct net_device *tdev;
|
||||
@@ -1061,26 +1061,28 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
|
||||
|
||||
/* NBMA tunnel */
|
||||
if (ipv6_addr_any(&t->parms.raddr)) {
|
||||
struct in6_addr *addr6;
|
||||
struct neighbour *neigh;
|
||||
int addr_type;
|
||||
if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
struct in6_addr *addr6;
|
||||
struct neighbour *neigh;
|
||||
int addr_type;
|
||||
|
||||
if (!skb_dst(skb))
|
||||
goto tx_err_link_failure;
|
||||
if (!skb_dst(skb))
|
||||
goto tx_err_link_failure;
|
||||
|
||||
neigh = dst_neigh_lookup(skb_dst(skb),
|
||||
&ipv6_hdr(skb)->daddr);
|
||||
if (!neigh)
|
||||
goto tx_err_link_failure;
|
||||
neigh = dst_neigh_lookup(skb_dst(skb),
|
||||
&ipv6_hdr(skb)->daddr);
|
||||
if (!neigh)
|
||||
goto tx_err_link_failure;
|
||||
|
||||
addr6 = (struct in6_addr *)&neigh->primary_key;
|
||||
addr_type = ipv6_addr_type(addr6);
|
||||
addr6 = (struct in6_addr *)&neigh->primary_key;
|
||||
addr_type = ipv6_addr_type(addr6);
|
||||
|
||||
if (addr_type == IPV6_ADDR_ANY)
|
||||
addr6 = &ipv6_hdr(skb)->daddr;
|
||||
if (addr_type == IPV6_ADDR_ANY)
|
||||
addr6 = &ipv6_hdr(skb)->daddr;
|
||||
|
||||
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
|
||||
neigh_release(neigh);
|
||||
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
|
||||
neigh_release(neigh);
|
||||
}
|
||||
} else if (!(t->parms.flags &
|
||||
(IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
|
||||
/* enable the cache only only if the routing decision does
|
||||
|
||||
@@ -774,7 +774,8 @@ failure:
|
||||
* Delete a VIF entry
|
||||
*/
|
||||
|
||||
static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
|
||||
static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct mif_device *v;
|
||||
struct net_device *dev;
|
||||
@@ -820,7 +821,7 @@ static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
|
||||
dev->ifindex, &in6_dev->cnf);
|
||||
}
|
||||
|
||||
if (v->flags & MIFF_REGISTER)
|
||||
if ((v->flags & MIFF_REGISTER) && !notify)
|
||||
unregister_netdevice_queue(dev, head);
|
||||
|
||||
dev_put(dev);
|
||||
@@ -1330,7 +1331,6 @@ static int ip6mr_device_event(struct notifier_block *this,
|
||||
struct mr6_table *mrt;
|
||||
struct mif_device *v;
|
||||
int ct;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (event != NETDEV_UNREGISTER)
|
||||
return NOTIFY_DONE;
|
||||
@@ -1339,10 +1339,9 @@ static int ip6mr_device_event(struct notifier_block *this,
|
||||
v = &mrt->vif6_table[0];
|
||||
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
|
||||
if (v->dev == dev)
|
||||
mif6_delete(mrt, ct, &list);
|
||||
mif6_delete(mrt, ct, 1, NULL);
|
||||
}
|
||||
}
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@@ -1551,7 +1550,7 @@ static void mroute_clean_tables(struct mr6_table *mrt, bool all)
|
||||
for (i = 0; i < mrt->maxvif; i++) {
|
||||
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
|
||||
continue;
|
||||
mif6_delete(mrt, i, &list);
|
||||
mif6_delete(mrt, i, 0, &list);
|
||||
}
|
||||
unregister_netdevice_many(&list);
|
||||
|
||||
@@ -1704,7 +1703,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
|
||||
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
|
||||
return -EFAULT;
|
||||
rtnl_lock();
|
||||
ret = mif6_delete(mrt, mifi, NULL);
|
||||
ret = mif6_delete(mrt, mifi, 0, NULL);
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -1145,8 +1145,7 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
skb = skb_peek(&sk->sk_receive_queue);
|
||||
if (skb)
|
||||
amount = skb_tail_pointer(skb) -
|
||||
skb_transport_header(skb);
|
||||
amount = skb->len;
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
return put_user(amount, (int __user *)arg);
|
||||
}
|
||||
|
||||
@@ -1759,6 +1759,10 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
|
||||
int addr_type;
|
||||
int err = -EINVAL;
|
||||
|
||||
/* RTF_PCPU is an internal flag; can not be set by userspace */
|
||||
if (cfg->fc_flags & RTF_PCPU)
|
||||
goto out;
|
||||
|
||||
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
|
||||
goto out;
|
||||
#ifndef CONFIG_IPV6_SUBTREES
|
||||
|
||||
@@ -278,7 +278,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_find);
|
||||
|
||||
struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
|
||||
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
|
||||
bool do_ref)
|
||||
{
|
||||
int hash;
|
||||
struct l2tp_session *session;
|
||||
@@ -288,6 +289,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
|
||||
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
|
||||
hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
|
||||
if (++count > nth) {
|
||||
l2tp_session_inc_refcount(session);
|
||||
if (do_ref && session->ref)
|
||||
session->ref(session);
|
||||
read_unlock_bh(&tunnel->hlist_lock);
|
||||
return session;
|
||||
}
|
||||
@@ -298,7 +302,7 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
|
||||
EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
|
||||
|
||||
/* Lookup a session by interface name.
|
||||
* This is very inefficient but is only used by management interfaces.
|
||||
|
||||
@@ -243,7 +243,8 @@ out:
|
||||
struct l2tp_session *l2tp_session_find(struct net *net,
|
||||
struct l2tp_tunnel *tunnel,
|
||||
u32 session_id);
|
||||
struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
|
||||
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
|
||||
bool do_ref);
|
||||
struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
|
||||
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
|
||||
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
|
||||
|
||||
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
|
||||
|
||||
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
|
||||
{
|
||||
pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
|
||||
pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
|
||||
pd->session_idx++;
|
||||
|
||||
if (pd->session == NULL) {
|
||||
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
|
||||
}
|
||||
|
||||
/* Show the tunnel or session context */
|
||||
if (pd->session == NULL)
|
||||
if (!pd->session) {
|
||||
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
|
||||
else
|
||||
} else {
|
||||
l2tp_dfs_seq_session_show(m, pd->session);
|
||||
if (pd->session->deref)
|
||||
pd->session->deref(pd->session);
|
||||
l2tp_session_dec_refcount(pd->session);
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
|
||||
@@ -827,7 +827,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
|
||||
goto out;
|
||||
}
|
||||
|
||||
session = l2tp_session_find_nth(tunnel, si);
|
||||
session = l2tp_session_get_nth(tunnel, si, false);
|
||||
if (session == NULL) {
|
||||
ti++;
|
||||
tunnel = NULL;
|
||||
@@ -837,8 +837,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
|
||||
|
||||
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
||||
session, L2TP_CMD_SESSION_GET) < 0)
|
||||
session, L2TP_CMD_SESSION_GET) < 0) {
|
||||
l2tp_session_dec_refcount(session);
|
||||
break;
|
||||
}
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
si++;
|
||||
}
|
||||
|
||||
@@ -467,6 +467,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
|
||||
static void pppol2tp_session_destruct(struct sock *sk)
|
||||
{
|
||||
struct l2tp_session *session = sk->sk_user_data;
|
||||
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
|
||||
if (session) {
|
||||
sk->sk_user_data = NULL;
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
@@ -505,9 +509,6 @@ static int pppol2tp_release(struct socket *sock)
|
||||
l2tp_session_queue_purge(session);
|
||||
sock_put(sk);
|
||||
}
|
||||
skb_queue_purge(&sk->sk_receive_queue);
|
||||
skb_queue_purge(&sk->sk_write_queue);
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
/* This will delete the session context via
|
||||
@@ -1574,7 +1575,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
|
||||
|
||||
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
|
||||
{
|
||||
pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
|
||||
pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
|
||||
pd->session_idx++;
|
||||
|
||||
if (pd->session == NULL) {
|
||||
@@ -1701,10 +1702,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
|
||||
|
||||
/* Show the tunnel or session context.
|
||||
*/
|
||||
if (pd->session == NULL)
|
||||
if (!pd->session) {
|
||||
pppol2tp_seq_tunnel_show(m, pd->tunnel);
|
||||
else
|
||||
} else {
|
||||
pppol2tp_seq_session_show(m, pd->session);
|
||||
if (pd->session->deref)
|
||||
pd->session->deref(pd->session);
|
||||
l2tp_session_dec_refcount(pd->session);
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
@@ -1863,4 +1868,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
|
||||
MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
|
||||
MODULE_ALIAS_L2TP_PWTYPE(11);
|
||||
MODULE_ALIAS_L2TP_PWTYPE(7);
|
||||
|
||||
@@ -3626,6 +3626,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
|
||||
return -EBUSY;
|
||||
if (copy_from_user(&val, optval, sizeof(val)))
|
||||
return -EFAULT;
|
||||
if (val > INT_MAX)
|
||||
return -EINVAL;
|
||||
po->tp_reserve = val;
|
||||
return 0;
|
||||
}
|
||||
@@ -4150,6 +4152,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
|
||||
if (unlikely(rb->frames_per_block == 0))
|
||||
goto out;
|
||||
if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
|
||||
goto out;
|
||||
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
|
||||
req->tp_frame_nr))
|
||||
goto out;
|
||||
|
||||
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
|
||||
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
|
||||
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
|
||||
|
||||
__set_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
set_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
}
|
||||
|
||||
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
|
||||
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
|
||||
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
|
||||
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
|
||||
|
||||
__clear_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
clear_bit_le(off, (void *)map->m_page_addrs[i]);
|
||||
}
|
||||
|
||||
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
|
||||
|
||||
@@ -36,14 +36,15 @@ static DEFINE_SPINLOCK(mirred_list_lock);
|
||||
static void tcf_mirred_release(struct tc_action *a, int bind)
|
||||
{
|
||||
struct tcf_mirred *m = to_mirred(a);
|
||||
struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
|
||||
struct net_device *dev;
|
||||
|
||||
/* We could be called either in a RCU callback or with RTNL lock held. */
|
||||
spin_lock_bh(&mirred_list_lock);
|
||||
list_del(&m->tcfm_list);
|
||||
spin_unlock_bh(&mirred_list_lock);
|
||||
dev = rcu_dereference_protected(m->tcfm_dev, 1);
|
||||
if (dev)
|
||||
dev_put(dev);
|
||||
spin_unlock_bh(&mirred_list_lock);
|
||||
}
|
||||
|
||||
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
|
||||
|
||||
@@ -6394,6 +6394,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
|
||||
if (sock->state != SS_UNCONNECTED)
|
||||
goto out;
|
||||
|
||||
if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
|
||||
goto out;
|
||||
|
||||
/* If backlog is zero, disable listening. */
|
||||
if (!backlog) {
|
||||
if (sctp_sstate(sk, CLOSED))
|
||||
|
||||
@@ -381,6 +381,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
|
||||
dev = dev_get_by_name(net, driver_name);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
if (tipc_mtu_bad(dev, 0)) {
|
||||
dev_put(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Associate TIPC bearer with L2 bearer */
|
||||
rcu_assign_pointer(b->media_ptr, dev);
|
||||
@@ -570,14 +574,19 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
|
||||
if (!b_ptr)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
b_ptr->mtu = dev->mtu;
|
||||
|
||||
switch (evt) {
|
||||
case NETDEV_CHANGE:
|
||||
if (netif_carrier_ok(dev))
|
||||
break;
|
||||
case NETDEV_GOING_DOWN:
|
||||
tipc_reset_bearer(net, b_ptr);
|
||||
break;
|
||||
case NETDEV_CHANGEMTU:
|
||||
if (tipc_mtu_bad(dev, 0)) {
|
||||
bearer_disable(net, b_ptr);
|
||||
break;
|
||||
}
|
||||
b_ptr->mtu = dev->mtu;
|
||||
tipc_reset_bearer(net, b_ptr);
|
||||
break;
|
||||
case NETDEV_CHANGEADDR:
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
|
||||
#include "netlink.h"
|
||||
#include "core.h"
|
||||
#include "msg.h"
|
||||
#include <net/genetlink.h>
|
||||
|
||||
#define MAX_MEDIA 3
|
||||
@@ -61,6 +62,9 @@
|
||||
#define TIPC_MEDIA_TYPE_IB 2
|
||||
#define TIPC_MEDIA_TYPE_UDP 3
|
||||
|
||||
/* minimum bearer MTU */
|
||||
#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
|
||||
|
||||
/**
|
||||
* struct tipc_node_map - set of node identifiers
|
||||
* @count: # of nodes in set
|
||||
@@ -226,4 +230,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
|
||||
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
|
||||
struct sk_buff_head *xmitq);
|
||||
|
||||
/* check if device MTU is too low for tipc headers */
|
||||
static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
|
||||
{
|
||||
if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
|
||||
return false;
|
||||
netdev_warn(dev, "MTU too low for tipc bearer\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _TIPC_BEARER_H */
|
||||
|
||||
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
|
||||
if (err)
|
||||
goto out_nametbl;
|
||||
|
||||
INIT_LIST_HEAD(&tn->dist_queue);
|
||||
err = tipc_topsrv_start(net);
|
||||
if (err)
|
||||
goto out_subscr;
|
||||
|
||||
@@ -103,6 +103,9 @@ struct tipc_net {
|
||||
spinlock_t nametbl_lock;
|
||||
struct name_table *nametbl;
|
||||
|
||||
/* Name dist queue */
|
||||
struct list_head dist_queue;
|
||||
|
||||
/* Topology subscription server */
|
||||
struct tipc_server *topsrv;
|
||||
atomic_t subscription_count;
|
||||
|
||||
@@ -40,11 +40,6 @@
|
||||
|
||||
int sysctl_tipc_named_timeout __read_mostly = 2000;
|
||||
|
||||
/**
|
||||
* struct tipc_dist_queue - queue holding deferred name table updates
|
||||
*/
|
||||
static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
|
||||
|
||||
struct distr_queue_item {
|
||||
struct distr_item i;
|
||||
u32 dtype;
|
||||
@@ -67,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
|
||||
|
||||
/**
|
||||
* named_prepare_buf - allocate & initialize a publication message
|
||||
*
|
||||
* The buffer returned is of size INT_H_SIZE + payload size
|
||||
*/
|
||||
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
|
||||
u32 dest)
|
||||
@@ -171,9 +168,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
|
||||
struct publication *publ;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct distr_item *item = NULL;
|
||||
uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
|
||||
ITEM_SIZE;
|
||||
uint msg_rem = msg_dsz;
|
||||
u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
|
||||
ITEM_SIZE) * ITEM_SIZE;
|
||||
u32 msg_rem = msg_dsz;
|
||||
|
||||
list_for_each_entry(publ, pls, local_list) {
|
||||
/* Prepare next buffer: */
|
||||
@@ -340,9 +337,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
|
||||
* tipc_named_add_backlog - add a failed name table update to the backlog
|
||||
*
|
||||
*/
|
||||
static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
|
||||
static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
|
||||
u32 type, u32 node)
|
||||
{
|
||||
struct distr_queue_item *e;
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
unsigned long now = get_jiffies_64();
|
||||
|
||||
e = kzalloc(sizeof(*e), GFP_ATOMIC);
|
||||
@@ -352,7 +351,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
|
||||
e->node = node;
|
||||
e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
|
||||
memcpy(e, i, sizeof(*i));
|
||||
list_add_tail(&e->next, &tipc_dist_queue);
|
||||
list_add_tail(&e->next, &tn->dist_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -362,10 +361,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
|
||||
void tipc_named_process_backlog(struct net *net)
|
||||
{
|
||||
struct distr_queue_item *e, *tmp;
|
||||
struct tipc_net *tn = net_generic(net, tipc_net_id);
|
||||
char addr[16];
|
||||
unsigned long now = get_jiffies_64();
|
||||
|
||||
list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
|
||||
list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
|
||||
if (time_after(e->expires, now)) {
|
||||
if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
|
||||
continue;
|
||||
@@ -405,7 +405,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
|
||||
node = msg_orignode(msg);
|
||||
while (count--) {
|
||||
if (!tipc_update_nametbl(net, item, node, mtype))
|
||||
tipc_named_add_backlog(item, mtype, node);
|
||||
tipc_named_add_backlog(net, item, mtype, node);
|
||||
item++;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
@@ -728,7 +728,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
|
||||
state = SELF_UP_PEER_UP;
|
||||
break;
|
||||
case SELF_LOST_CONTACT_EVT:
|
||||
state = SELF_DOWN_PEER_LEAVING;
|
||||
state = SELF_DOWN_PEER_DOWN;
|
||||
break;
|
||||
case SELF_ESTABL_CONTACT_EVT:
|
||||
case PEER_LOST_CONTACT_EVT:
|
||||
@@ -747,7 +747,7 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
|
||||
state = SELF_UP_PEER_UP;
|
||||
break;
|
||||
case PEER_LOST_CONTACT_EVT:
|
||||
state = SELF_LEAVING_PEER_DOWN;
|
||||
state = SELF_DOWN_PEER_DOWN;
|
||||
break;
|
||||
case SELF_LOST_CONTACT_EVT:
|
||||
case PEER_ESTABL_CONTACT_EVT:
|
||||
|
||||
@@ -777,9 +777,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
|
||||
* @tsk: receiving socket
|
||||
* @skb: pointer to message buffer.
|
||||
*/
|
||||
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
|
||||
static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
|
||||
struct sk_buff_head *xmitq)
|
||||
{
|
||||
struct sock *sk = &tsk->sk;
|
||||
u32 onode = tsk_own_node(tsk);
|
||||
struct tipc_msg *hdr = buf_msg(skb);
|
||||
int mtyp = msg_type(hdr);
|
||||
int conn_cong;
|
||||
@@ -792,7 +794,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
|
||||
|
||||
if (mtyp == CONN_PROBE) {
|
||||
msg_set_type(hdr, CONN_PROBE_REPLY);
|
||||
tipc_sk_respond(sk, skb, TIPC_OK);
|
||||
if (tipc_msg_reverse(onode, &skb, TIPC_OK))
|
||||
__skb_queue_tail(xmitq, skb);
|
||||
return;
|
||||
} else if (mtyp == CONN_ACK) {
|
||||
conn_cong = tsk_conn_cong(tsk);
|
||||
@@ -1647,7 +1650,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
|
||||
*
|
||||
* Returns true if message was added to socket receive queue, otherwise false
|
||||
*/
|
||||
static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
|
||||
struct sk_buff_head *xmitq)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
struct tipc_sock *tsk = tipc_sk(sk);
|
||||
@@ -1657,7 +1661,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
int usr = msg_user(hdr);
|
||||
|
||||
if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
|
||||
tipc_sk_proto_rcv(tsk, skb);
|
||||
tipc_sk_proto_rcv(tsk, skb, xmitq);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1700,7 +1704,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
return true;
|
||||
|
||||
reject:
|
||||
tipc_sk_respond(sk, skb, err);
|
||||
if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
|
||||
__skb_queue_tail(xmitq, skb);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1716,9 +1721,24 @@ reject:
|
||||
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
unsigned int truesize = skb->truesize;
|
||||
struct sk_buff_head xmitq;
|
||||
u32 dnode, selector;
|
||||
|
||||
if (likely(filter_rcv(sk, skb)))
|
||||
__skb_queue_head_init(&xmitq);
|
||||
|
||||
if (likely(filter_rcv(sk, skb, &xmitq))) {
|
||||
atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (skb_queue_empty(&xmitq))
|
||||
return 0;
|
||||
|
||||
/* Send response/rejected message */
|
||||
skb = __skb_dequeue(&xmitq);
|
||||
dnode = msg_destnode(buf_msg(skb));
|
||||
selector = msg_origport(buf_msg(skb));
|
||||
tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1732,12 +1752,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
|
||||
* Caller must hold socket lock
|
||||
*/
|
||||
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
|
||||
u32 dport)
|
||||
u32 dport, struct sk_buff_head *xmitq)
|
||||
{
|
||||
unsigned long time_limit = jiffies + 2;
|
||||
struct sk_buff *skb;
|
||||
unsigned int lim;
|
||||
atomic_t *dcnt;
|
||||
struct sk_buff *skb;
|
||||
unsigned long time_limit = jiffies + 2;
|
||||
u32 onode;
|
||||
|
||||
while (skb_queue_len(inputq)) {
|
||||
if (unlikely(time_after_eq(jiffies, time_limit)))
|
||||
@@ -1749,20 +1770,22 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
|
||||
|
||||
/* Add message directly to receive queue if possible */
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
filter_rcv(sk, skb);
|
||||
filter_rcv(sk, skb, xmitq);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Try backlog, compensating for double-counted bytes */
|
||||
dcnt = &tipc_sk(sk)->dupl_rcvcnt;
|
||||
if (sk->sk_backlog.len)
|
||||
if (!sk->sk_backlog.len)
|
||||
atomic_set(dcnt, 0);
|
||||
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
|
||||
if (likely(!sk_add_backlog(sk, skb, lim)))
|
||||
continue;
|
||||
|
||||
/* Overload => reject message back to sender */
|
||||
tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
|
||||
onode = tipc_own_addr(sock_net(sk));
|
||||
if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
|
||||
__skb_queue_tail(xmitq, skb);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -1775,12 +1798,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
|
||||
*/
|
||||
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
|
||||
{
|
||||
struct sk_buff_head xmitq;
|
||||
u32 dnode, dport = 0;
|
||||
int err;
|
||||
struct tipc_sock *tsk;
|
||||
struct sock *sk;
|
||||
struct sk_buff *skb;
|
||||
|
||||
__skb_queue_head_init(&xmitq);
|
||||
while (skb_queue_len(inputq)) {
|
||||
dport = tipc_skb_peek_port(inputq, dport);
|
||||
tsk = tipc_sk_lookup(net, dport);
|
||||
@@ -1788,9 +1813,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
|
||||
if (likely(tsk)) {
|
||||
sk = &tsk->sk;
|
||||
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
|
||||
tipc_sk_enqueue(inputq, sk, dport);
|
||||
tipc_sk_enqueue(inputq, sk, dport, &xmitq);
|
||||
spin_unlock_bh(&sk->sk_lock.slock);
|
||||
}
|
||||
/* Send pending response/rejected messages, if any */
|
||||
while ((skb = __skb_dequeue(&xmitq))) {
|
||||
dnode = msg_destnode(buf_msg(skb));
|
||||
tipc_node_xmit_skb(net, skb, dnode, dport);
|
||||
}
|
||||
sock_put(sk);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
/* IANA assigned UDP port */
|
||||
#define UDP_PORT_DEFAULT 6118
|
||||
|
||||
#define UDP_MIN_HEADROOM 28
|
||||
#define UDP_MIN_HEADROOM 48
|
||||
|
||||
static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
|
||||
[TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
|
||||
@@ -376,6 +376,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
|
||||
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
|
||||
udp_conf.use_udp_checksums = false;
|
||||
ub->ifindex = dev->ifindex;
|
||||
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
|
||||
sizeof(struct udphdr))) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
b->mtu = dev->mtu - sizeof(struct iphdr)
|
||||
- sizeof(struct udphdr);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
@@ -28,19 +28,16 @@
|
||||
/* wait until all locks are released */
|
||||
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
|
||||
{
|
||||
int max_count = 5 * HZ;
|
||||
int warn_count = 5 * HZ;
|
||||
|
||||
if (atomic_read(lockp) < 0) {
|
||||
pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
|
||||
return;
|
||||
}
|
||||
while (atomic_read(lockp) > 0) {
|
||||
if (max_count == 0) {
|
||||
pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
|
||||
break;
|
||||
}
|
||||
if (warn_count-- == 0)
|
||||
pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
max_count--;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ struct snd_fw_async_midi_port {
|
||||
|
||||
struct snd_rawmidi_substream *substream;
|
||||
snd_fw_async_midi_port_fill fill;
|
||||
unsigned int consume_bytes;
|
||||
int consume_bytes;
|
||||
};
|
||||
|
||||
int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
|
||||
|
||||
Reference in New Issue
Block a user