mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-24 19:40:21 +09:00
Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
Conflicts:
fs/f2fs/super.c
Pick changes from LTS commit 76517ed2a7
("f2fs: sanity check checkpoint segno and blkoff"),
over changes from AOSP commit 0f127e451b
("FROMLIST: f2fs: sanity check checkpoint segno and blkoff")
Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 80
|
||||
SUBLEVEL = 82
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pca0_pins>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
@@ -101,7 +101,7 @@
|
||||
compatible = "nxp,pca9555";
|
||||
pinctrl-names = "default";
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
|
||||
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
|
||||
|
||||
#define ftrace_return_address(n) return_address(n)
|
||||
|
||||
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
||||
|
||||
static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
const char *name)
|
||||
{
|
||||
if (!strcmp(sym, "sys_mmap2"))
|
||||
sym = "sys_mmap_pgoff";
|
||||
else if (!strcmp(sym, "sys_statfs64_wrapper"))
|
||||
sym = "sys_statfs64";
|
||||
else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
|
||||
sym = "sys_fstatfs64";
|
||||
else if (!strcmp(sym, "sys_arm_fadvise64_64"))
|
||||
sym = "sys_fadvise64_64";
|
||||
|
||||
/* Ignore case since sym may start with "SyS" instead of "sys" */
|
||||
return !strcasecmp(sym, name);
|
||||
}
|
||||
|
||||
#endif /* ifndef __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARM_FTRACE */
|
||||
|
||||
@@ -1636,12 +1636,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
trace_kvm_age_hva(start, end);
|
||||
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
|
||||
}
|
||||
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
trace_kvm_test_age_hva(hva);
|
||||
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
|
||||
}
|
||||
|
||||
@@ -12,3 +12,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
||||
|
||||
GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
|
||||
@@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
||||
insn_count = bpf_jit_insn(jit, fp, i);
|
||||
if (insn_count < 0)
|
||||
return -1;
|
||||
jit->addrs[i + 1] = jit->prg; /* Next instruction address */
|
||||
/* Next instruction address */
|
||||
jit->addrs[i + insn_count] = jit->prg;
|
||||
}
|
||||
bpf_jit_epilogue(jit);
|
||||
|
||||
|
||||
@@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm);
|
||||
void __tsb_context_switch(unsigned long pgd_pa,
|
||||
struct tsb_config *tsb_base,
|
||||
struct tsb_config *tsb_huge,
|
||||
unsigned long tsb_descr_pa);
|
||||
unsigned long tsb_descr_pa,
|
||||
unsigned long secondary_ctx);
|
||||
|
||||
static inline void tsb_context_switch(struct mm_struct *mm)
|
||||
static inline void tsb_context_switch_ctx(struct mm_struct *mm,
|
||||
unsigned long ctx)
|
||||
{
|
||||
__tsb_context_switch(__pa(mm->pgd),
|
||||
&mm->context.tsb_block[0],
|
||||
@@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
|
||||
#else
|
||||
NULL
|
||||
#endif
|
||||
, __pa(&mm->context.tsb_descr[0]));
|
||||
, __pa(&mm->context.tsb_descr[0]),
|
||||
ctx);
|
||||
}
|
||||
|
||||
#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
|
||||
|
||||
void tsb_grow(struct mm_struct *mm,
|
||||
unsigned long tsb_index,
|
||||
unsigned long mm_rss);
|
||||
@@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
||||
* cpu0 to update it's TSB because at that point the cpu_vm_mask
|
||||
* only had cpu1 set in it.
|
||||
*/
|
||||
load_secondary_context(mm);
|
||||
tsb_context_switch(mm);
|
||||
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
|
||||
|
||||
/* Any time a processor runs a context on an address space
|
||||
* for the first time, we must flush that context out of the
|
||||
|
||||
@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
|
||||
void init_cur_cpu_trap(struct thread_info *);
|
||||
void setup_tba(void);
|
||||
extern int ncpus_probed;
|
||||
extern u64 cpu_mondo_counter[NR_CPUS];
|
||||
|
||||
unsigned long real_hard_smp_processor_id(void);
|
||||
|
||||
|
||||
@@ -617,22 +617,48 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
/* Multi-cpu list version. */
|
||||
#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
|
||||
#define MONDO_USEC_WAIT_MIN 2
|
||||
#define MONDO_USEC_WAIT_MAX 100
|
||||
#define MONDO_RETRY_LIMIT 500000
|
||||
|
||||
/* Multi-cpu list version.
|
||||
*
|
||||
* Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
|
||||
* Sometimes not all cpus receive the mondo, requiring us to re-send
|
||||
* the mondo until all cpus have received, or cpus are truly stuck
|
||||
* unable to receive mondo, and we timeout.
|
||||
* Occasionally a target cpu strand is borrowed briefly by hypervisor to
|
||||
* perform guest service, such as PCIe error handling. Consider the
|
||||
* service time, 1 second overall wait is reasonable for 1 cpu.
|
||||
* Here two in-between mondo check wait time are defined: 2 usec for
|
||||
* single cpu quick turn around and up to 100usec for large cpu count.
|
||||
* Deliver mondo to large number of cpus could take longer, we adjusts
|
||||
* the retry count as long as target cpus are making forward progress.
|
||||
*/
|
||||
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
|
||||
{
|
||||
int retries, this_cpu, prev_sent, i, saw_cpu_error;
|
||||
int this_cpu, tot_cpus, prev_sent, i, rem;
|
||||
int usec_wait, retries, tot_retries;
|
||||
u16 first_cpu = 0xffff;
|
||||
unsigned long xc_rcvd = 0;
|
||||
unsigned long status;
|
||||
int ecpuerror_id = 0;
|
||||
int enocpu_id = 0;
|
||||
u16 *cpu_list;
|
||||
u16 cpu;
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
|
||||
cpu_list = __va(tb->cpu_list_pa);
|
||||
|
||||
saw_cpu_error = 0;
|
||||
retries = 0;
|
||||
usec_wait = cnt * MONDO_USEC_WAIT_MIN;
|
||||
if (usec_wait > MONDO_USEC_WAIT_MAX)
|
||||
usec_wait = MONDO_USEC_WAIT_MAX;
|
||||
retries = tot_retries = 0;
|
||||
tot_cpus = cnt;
|
||||
prev_sent = 0;
|
||||
|
||||
do {
|
||||
int forward_progress, n_sent;
|
||||
int n_sent, mondo_delivered, target_cpu_busy;
|
||||
|
||||
status = sun4v_cpu_mondo_send(cnt,
|
||||
tb->cpu_list_pa,
|
||||
@@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
|
||||
|
||||
/* HV_EOK means all cpus received the xcall, we're done. */
|
||||
if (likely(status == HV_EOK))
|
||||
break;
|
||||
goto xcall_done;
|
||||
|
||||
/* If not these non-fatal errors, panic */
|
||||
if (unlikely((status != HV_EWOULDBLOCK) &&
|
||||
(status != HV_ECPUERROR) &&
|
||||
(status != HV_ENOCPU)))
|
||||
goto fatal_errors;
|
||||
|
||||
/* First, see if we made any forward progress.
|
||||
*
|
||||
* Go through the cpu_list, count the target cpus that have
|
||||
* received our mondo (n_sent), and those that did not (rem).
|
||||
* Re-pack cpu_list with the cpus remain to be retried in the
|
||||
* front - this simplifies tracking the truly stalled cpus.
|
||||
*
|
||||
* The hypervisor indicates successful sends by setting
|
||||
* cpu list entries to the value 0xffff.
|
||||
*
|
||||
* EWOULDBLOCK means some target cpus did not receive the
|
||||
* mondo and retry usually helps.
|
||||
*
|
||||
* ECPUERROR means at least one target cpu is in error state,
|
||||
* it's usually safe to skip the faulty cpu and retry.
|
||||
*
|
||||
* ENOCPU means one of the target cpu doesn't belong to the
|
||||
* domain, perhaps offlined which is unexpected, but not
|
||||
* fatal and it's okay to skip the offlined cpu.
|
||||
*/
|
||||
rem = 0;
|
||||
n_sent = 0;
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (likely(cpu_list[i] == 0xffff))
|
||||
cpu = cpu_list[i];
|
||||
if (likely(cpu == 0xffff)) {
|
||||
n_sent++;
|
||||
} else if ((status == HV_ECPUERROR) &&
|
||||
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
|
||||
ecpuerror_id = cpu + 1;
|
||||
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
|
||||
enocpu_id = cpu + 1;
|
||||
} else {
|
||||
cpu_list[rem++] = cpu;
|
||||
}
|
||||
}
|
||||
|
||||
forward_progress = 0;
|
||||
if (n_sent > prev_sent)
|
||||
forward_progress = 1;
|
||||
/* No cpu remained, we're done. */
|
||||
if (rem == 0)
|
||||
break;
|
||||
|
||||
/* Otherwise, update the cpu count for retry. */
|
||||
cnt = rem;
|
||||
|
||||
/* Record the overall number of mondos received by the
|
||||
* first of the remaining cpus.
|
||||
*/
|
||||
if (first_cpu != cpu_list[0]) {
|
||||
first_cpu = cpu_list[0];
|
||||
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
|
||||
}
|
||||
|
||||
/* Was any mondo delivered successfully? */
|
||||
mondo_delivered = (n_sent > prev_sent);
|
||||
prev_sent = n_sent;
|
||||
|
||||
/* If we get a HV_ECPUERROR, then one or more of the cpus
|
||||
* in the list are in error state. Use the cpu_state()
|
||||
* hypervisor call to find out which cpus are in error state.
|
||||
/* or, was any target cpu busy processing other mondos? */
|
||||
target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
|
||||
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
|
||||
|
||||
/* Retry count is for no progress. If we're making progress,
|
||||
* reset the retry count.
|
||||
*/
|
||||
if (unlikely(status == HV_ECPUERROR)) {
|
||||
for (i = 0; i < cnt; i++) {
|
||||
long err;
|
||||
u16 cpu;
|
||||
|
||||
cpu = cpu_list[i];
|
||||
if (cpu == 0xffff)
|
||||
continue;
|
||||
|
||||
err = sun4v_cpu_state(cpu);
|
||||
if (err == HV_CPU_STATE_ERROR) {
|
||||
saw_cpu_error = (cpu + 1);
|
||||
cpu_list[i] = 0xffff;
|
||||
}
|
||||
}
|
||||
} else if (unlikely(status != HV_EWOULDBLOCK))
|
||||
goto fatal_mondo_error;
|
||||
|
||||
/* Don't bother rewriting the CPU list, just leave the
|
||||
* 0xffff and non-0xffff entries in there and the
|
||||
* hypervisor will do the right thing.
|
||||
*
|
||||
* Only advance timeout state if we didn't make any
|
||||
* forward progress.
|
||||
*/
|
||||
if (unlikely(!forward_progress)) {
|
||||
if (unlikely(++retries > 10000))
|
||||
goto fatal_mondo_timeout;
|
||||
|
||||
/* Delay a little bit to let other cpus catch up
|
||||
* on their cpu mondo queue work.
|
||||
*/
|
||||
udelay(2 * cnt);
|
||||
if (likely(mondo_delivered || target_cpu_busy)) {
|
||||
tot_retries += retries;
|
||||
retries = 0;
|
||||
} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
|
||||
goto fatal_mondo_timeout;
|
||||
}
|
||||
|
||||
/* Delay a little bit to let other cpus catch up on
|
||||
* their cpu mondo queue work.
|
||||
*/
|
||||
if (!mondo_delivered)
|
||||
udelay(usec_wait);
|
||||
|
||||
retries++;
|
||||
} while (1);
|
||||
|
||||
if (unlikely(saw_cpu_error))
|
||||
goto fatal_mondo_cpu_error;
|
||||
|
||||
xcall_done:
|
||||
if (unlikely(ecpuerror_id > 0)) {
|
||||
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
|
||||
this_cpu, ecpuerror_id - 1);
|
||||
} else if (unlikely(enocpu_id > 0)) {
|
||||
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
|
||||
this_cpu, enocpu_id - 1);
|
||||
}
|
||||
return;
|
||||
|
||||
fatal_mondo_cpu_error:
|
||||
printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
|
||||
"(including %d) were in error state\n",
|
||||
this_cpu, saw_cpu_error - 1);
|
||||
return;
|
||||
fatal_errors:
|
||||
/* fatal errors include bad alignment, etc */
|
||||
pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
|
||||
this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
|
||||
panic("Unexpected SUN4V mondo error %lu\n", status);
|
||||
|
||||
fatal_mondo_timeout:
|
||||
printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
|
||||
" progress after %d retries.\n",
|
||||
this_cpu, retries);
|
||||
goto dump_cpu_list_and_out;
|
||||
|
||||
fatal_mondo_error:
|
||||
printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
|
||||
this_cpu, status);
|
||||
printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
|
||||
"mondo_block_pa(%lx)\n",
|
||||
this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
|
||||
|
||||
dump_cpu_list_and_out:
|
||||
printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
|
||||
for (i = 0; i < cnt; i++)
|
||||
printk("%u ", cpu_list[i]);
|
||||
printk("]\n");
|
||||
/* some cpus being non-responsive to the cpu mondo */
|
||||
pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
|
||||
this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
|
||||
panic("SUN4V mondo timeout panic\n");
|
||||
}
|
||||
|
||||
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
|
||||
|
||||
@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
|
||||
ldxa [%g0] ASI_SCRATCHPAD, %g4
|
||||
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
|
||||
|
||||
/* Get smp_processor_id() into %g3 */
|
||||
sethi %hi(trap_block), %g5
|
||||
or %g5, %lo(trap_block), %g5
|
||||
sub %g4, %g5, %g3
|
||||
srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
|
||||
|
||||
/* Increment cpu_mondo_counter[smp_processor_id()] */
|
||||
sethi %hi(cpu_mondo_counter), %g5
|
||||
or %g5, %lo(cpu_mondo_counter), %g5
|
||||
sllx %g3, 3, %g3
|
||||
add %g5, %g3, %g5
|
||||
ldx [%g5], %g3
|
||||
add %g3, 1, %g3
|
||||
stx %g3, [%g5]
|
||||
|
||||
/* Get CPU mondo queue base phys address into %g7. */
|
||||
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
|
||||
|
||||
|
||||
@@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs)
|
||||
}
|
||||
}
|
||||
|
||||
u64 cpu_mondo_counter[NR_CPUS] = {0};
|
||||
struct trap_per_cpu trap_block[NR_CPUS];
|
||||
EXPORT_SYMBOL(trap_block);
|
||||
|
||||
|
||||
@@ -375,6 +375,7 @@ tsb_flush:
|
||||
* %o1: TSB base config pointer
|
||||
* %o2: TSB huge config pointer, or NULL if none
|
||||
* %o3: Hypervisor TSB descriptor physical address
|
||||
* %o4: Secondary context to load, if non-zero
|
||||
*
|
||||
* We have to run this whole thing with interrupts
|
||||
* disabled so that the current cpu doesn't change
|
||||
@@ -387,6 +388,17 @@ __tsb_context_switch:
|
||||
rdpr %pstate, %g1
|
||||
wrpr %g1, PSTATE_IE, %pstate
|
||||
|
||||
brz,pn %o4, 1f
|
||||
mov SECONDARY_CONTEXT, %o5
|
||||
|
||||
661: stxa %o4, [%o5] ASI_DMMU
|
||||
.section .sun4v_1insn_patch, "ax"
|
||||
.word 661b
|
||||
stxa %o4, [%o5] ASI_MMU
|
||||
.previous
|
||||
flush %g6
|
||||
|
||||
1:
|
||||
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
|
||||
|
||||
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
|
||||
|
||||
@@ -35,6 +35,5 @@ void restore_processor_state(void)
|
||||
{
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
|
||||
load_secondary_context(mm);
|
||||
tsb_context_switch(mm);
|
||||
tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include "ctype.h"
|
||||
#include "string.h"
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
|
||||
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
|
||||
#define memset(d,c,l) __builtin_memset(d,c,l)
|
||||
#define memcmp __builtin_memcmp
|
||||
|
||||
extern int strcmp(const char *str1, const char *str2);
|
||||
extern int strncmp(const char *cs, const char *ct, size_t count);
|
||||
extern size_t strlen(const char *s);
|
||||
extern char *strstr(const char *s1, const char *s2);
|
||||
extern size_t strnlen(const char *s, size_t maxlen);
|
||||
extern unsigned int atou(const char *s);
|
||||
extern unsigned long long simple_strtoull(const char *cp, char **endp,
|
||||
unsigned int base);
|
||||
|
||||
#endif /* BOOT_STRING_H */
|
||||
|
||||
@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
|
||||
if (hlist_unhashed(&n.link))
|
||||
break;
|
||||
|
||||
rcu_irq_exit();
|
||||
|
||||
if (!n.halted) {
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
|
||||
/*
|
||||
* We cannot reschedule. So halt.
|
||||
*/
|
||||
rcu_irq_exit();
|
||||
native_safe_halt();
|
||||
local_irq_disable();
|
||||
rcu_irq_enter();
|
||||
}
|
||||
|
||||
rcu_irq_enter();
|
||||
}
|
||||
if (!n.halted)
|
||||
finish_wait(&n.wq, &wait);
|
||||
|
||||
@@ -2832,10 +2832,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
||||
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
|
||||
{
|
||||
if (!sata_pmp_attached(ap)) {
|
||||
if (likely(devno < ata_link_max_devices(&ap->link)))
|
||||
if (likely(devno >= 0 &&
|
||||
devno < ata_link_max_devices(&ap->link)))
|
||||
return &ap->link.device[devno];
|
||||
} else {
|
||||
if (likely(devno < ap->nr_pmp_links))
|
||||
if (likely(devno >= 0 &&
|
||||
devno < ap->nr_pmp_links))
|
||||
return &ap->pmp_link[devno].device[0];
|
||||
}
|
||||
|
||||
|
||||
@@ -641,11 +641,12 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||
if (err)
|
||||
goto out_put_disk;
|
||||
|
||||
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
|
||||
q = blk_mq_init_queue(&vblk->tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_tags;
|
||||
}
|
||||
vblk->disk->queue = q;
|
||||
|
||||
q->queuedata = vblk;
|
||||
|
||||
|
||||
@@ -296,7 +296,7 @@ static int rcar_du_probe(struct platform_device *pdev)
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
|
||||
if (IS_ERR(rcdu->mmio))
|
||||
ret = PTR_ERR(rcdu->mmio);
|
||||
return PTR_ERR(rcdu->mmio);
|
||||
|
||||
/* DRM/KMS objects */
|
||||
ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
|
||||
|
||||
@@ -338,7 +338,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
|
||||
info->fbops = &virtio_gpufb_ops;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
|
||||
info->screen_base = obj->vmap;
|
||||
info->screen_buffer = obj->vmap;
|
||||
info->screen_size = obj->gem_base.size;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(info, &vfbdev->helper,
|
||||
|
||||
@@ -1581,7 +1581,7 @@ isert_rcv_completion(struct iser_rx_desc *desc,
|
||||
struct isert_conn *isert_conn,
|
||||
u32 xfer_len)
|
||||
{
|
||||
struct ib_device *ib_dev = isert_conn->cm_id->device;
|
||||
struct ib_device *ib_dev = isert_conn->device->ib_device;
|
||||
struct iscsi_hdr *hdr;
|
||||
u64 rx_dma;
|
||||
int rx_buflen;
|
||||
|
||||
@@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
||||
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
|
||||
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
|
||||
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
|
||||
memcpy(msg, &msg_tmp, sizeof(*msg));
|
||||
|
||||
/* No need to update the read positions, because this was a peek */
|
||||
/* If the caller specifically want to peek, return */
|
||||
if (peekonly) {
|
||||
memcpy(msg, &msg_tmp, sizeof(*msg));
|
||||
goto peekout;
|
||||
}
|
||||
|
||||
@@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
||||
space_rem = bus->m_dwSizeGetRing - curr_grp;
|
||||
|
||||
if (space_rem < sizeof(*msg)) {
|
||||
/* msg wraps around the ring */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
|
||||
memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
|
||||
sizeof(*msg) - space_rem);
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
|
||||
space_rem, buf_size);
|
||||
|
||||
} else if (space_rem == sizeof(*msg)) {
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
|
||||
} else {
|
||||
/* Additional data wraps around the ring */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf) {
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
|
||||
sizeof(*msg), space_rem - sizeof(*msg));
|
||||
@@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
||||
|
||||
} else {
|
||||
/* No wrapping */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
|
||||
buf_size);
|
||||
}
|
||||
/* Convert from little endian to CPU */
|
||||
msg->size = le16_to_cpu((__force __le16)msg->size);
|
||||
msg->command = le32_to_cpu((__force __le32)msg->command);
|
||||
msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
|
||||
|
||||
/* Update the read positions, adjusting the ring */
|
||||
saa7164_writel(bus->m_dwGetReadPos, new_grp);
|
||||
|
||||
@@ -1709,27 +1709,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
|
||||
|
||||
switch (cmd) {
|
||||
case VPFE_CMD_S_CCDC_RAW_PARAMS:
|
||||
ret = -EINVAL;
|
||||
v4l2_warn(&vpfe_dev->v4l2_dev,
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
|
||||
if (ccdc_dev->hw_ops.set_params) {
|
||||
ret = ccdc_dev->hw_ops.set_params(param);
|
||||
if (ret) {
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"Error setting parameters in CCDC\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
ret = vpfe_get_ccdc_image_format(vpfe_dev,
|
||||
&vpfe_dev->fmt);
|
||||
if (ret < 0) {
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"Invalid image format at CCDC\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
|
||||
}
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
|
||||
@@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
|
||||
return 0;
|
||||
|
||||
case LIRC_GET_REC_RESOLUTION:
|
||||
val = dev->rx_resolution;
|
||||
val = dev->rx_resolution / 1000;
|
||||
break;
|
||||
|
||||
case LIRC_SET_WIDEBAND_RECEIVER:
|
||||
|
||||
@@ -608,7 +608,7 @@ static void nb8800_mac_config(struct net_device *dev)
|
||||
mac_mode |= HALF_DUPLEX;
|
||||
|
||||
if (gigabit) {
|
||||
if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
|
||||
if (phy_interface_is_rgmii(dev->phydev))
|
||||
mac_mode |= RGMII_MODE;
|
||||
|
||||
mac_mode |= GMAC_MODE;
|
||||
@@ -1295,11 +1295,10 @@ static int nb8800_tangox_init(struct net_device *dev)
|
||||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
pad_mode = PAD_MODE_RGMII;
|
||||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
|
||||
pad_mode = PAD_MODE_RGMII;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -8722,11 +8722,14 @@ static void tg3_free_consistent(struct tg3 *tp)
|
||||
tg3_mem_rx_release(tp);
|
||||
tg3_mem_tx_release(tp);
|
||||
|
||||
/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
|
||||
tg3_full_lock(tp, 0);
|
||||
if (tp->hw_stats) {
|
||||
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
|
||||
tp->hw_stats, tp->stats_mapping);
|
||||
tp->hw_stats = NULL;
|
||||
}
|
||||
tg3_full_unlock(tp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -630,6 +630,10 @@ static void dump_command(struct mlx5_core_dev *dev,
|
||||
pr_debug("\n");
|
||||
}
|
||||
|
||||
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
|
||||
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
|
||||
struct mlx5_cmd_msg *msg);
|
||||
|
||||
static void cmd_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
|
||||
@@ -638,16 +642,27 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
struct mlx5_cmd_layout *lay;
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
int alloc_ret;
|
||||
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
down(sem);
|
||||
if (!ent->page_queue) {
|
||||
ent->idx = alloc_ent(cmd);
|
||||
if (ent->idx < 0) {
|
||||
alloc_ret = alloc_ent(cmd);
|
||||
if (alloc_ret < 0) {
|
||||
if (ent->callback) {
|
||||
ent->callback(-EAGAIN, ent->context);
|
||||
mlx5_free_cmd_msg(dev, ent->out);
|
||||
free_msg(dev, ent->in);
|
||||
free_cmd(ent);
|
||||
} else {
|
||||
ent->ret = -EAGAIN;
|
||||
complete(&ent->done);
|
||||
}
|
||||
mlx5_core_err(dev, "failed to allocate command entry\n");
|
||||
up(sem);
|
||||
return;
|
||||
}
|
||||
ent->idx = alloc_ret;
|
||||
} else {
|
||||
ent->idx = cmd->max_reg_cmds;
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
|
||||
@@ -819,6 +819,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
|
||||
.rpadir_value = 2 << 16,
|
||||
.no_trimd = 1,
|
||||
.no_ade = 1,
|
||||
.hw_crc = 1,
|
||||
.tsu = 1,
|
||||
.select_mii = 1,
|
||||
.shift_rd0 = 1,
|
||||
|
||||
@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
|
||||
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
|
||||
{
|
||||
struct usb_device *dev = mcs->usbdev;
|
||||
int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
|
||||
MCS_RD_RTYPE, 0, reg, val, 2,
|
||||
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
|
||||
void *dmabuf;
|
||||
int ret;
|
||||
|
||||
dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
|
||||
if (!dmabuf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
|
||||
MCS_RD_RTYPE, 0, reg, dmabuf, 2,
|
||||
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
|
||||
|
||||
memcpy(val, dmabuf, sizeof(__u16));
|
||||
kfree(dmabuf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#define MII_DP83867_MICR 0x12
|
||||
#define MII_DP83867_ISR 0x13
|
||||
#define DP83867_CTRL 0x1f
|
||||
#define DP83867_CFG3 0x1e
|
||||
|
||||
/* Extended Registers */
|
||||
#define DP83867_RGMIICTL 0x0032
|
||||
@@ -89,6 +90,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
|
||||
micr_status |=
|
||||
(MII_DP83867_MICR_AN_ERR_INT_EN |
|
||||
MII_DP83867_MICR_SPEED_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
|
||||
MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
|
||||
|
||||
@@ -184,6 +187,13 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||
DP83867_DEVADDR, phydev->addr, delay);
|
||||
}
|
||||
|
||||
/* Enable Interrupt output INT_OE in CFG3 register */
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
val = phy_read(phydev, DP83867_CFG3);
|
||||
val |= BIT(7);
|
||||
phy_write(phydev, DP83867_CFG3, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -541,6 +541,9 @@ void phy_stop_machine(struct phy_device *phydev)
|
||||
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
|
||||
phydev->state = PHY_UP;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
/* Now we can run the state machine synchronously */
|
||||
phy_state_machine(&phydev->state_queue.work);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -918,6 +921,15 @@ void phy_state_machine(struct work_struct *work)
|
||||
if (old_link != phydev->link)
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
}
|
||||
/*
|
||||
* Failsafe: check that nobody set phydev->link=0 between two
|
||||
* poll cycles, otherwise we won't leave RUNNING state as long
|
||||
* as link remains down.
|
||||
*/
|
||||
if (!phydev->link && phydev->state == PHY_RUNNING) {
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
dev_err(&phydev->dev, "no link in PHY_RUNNING\n");
|
||||
}
|
||||
break;
|
||||
case PHY_CHANGELINK:
|
||||
err = phy_read_status(phydev);
|
||||
|
||||
@@ -1368,6 +1368,8 @@ static int phy_remove(struct device *dev)
|
||||
{
|
||||
struct phy_device *phydev = to_phy_device(dev);
|
||||
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->state = PHY_DOWN;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
@@ -201,6 +201,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
||||
unsigned long remaining_credit;
|
||||
struct timer_list credit_timeout;
|
||||
u64 credit_window_start;
|
||||
bool rate_limited;
|
||||
|
||||
/* Statistics */
|
||||
struct xenvif_stats stats;
|
||||
|
||||
@@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
if (work_done < budget) {
|
||||
napi_complete(napi);
|
||||
xenvif_napi_schedule_or_enable_events(queue);
|
||||
/* If the queue is rate-limited, it shall be
|
||||
* rescheduled in the timer callback.
|
||||
*/
|
||||
if (likely(!queue->rate_limited))
|
||||
xenvif_napi_schedule_or_enable_events(queue);
|
||||
}
|
||||
|
||||
return work_done;
|
||||
|
||||
@@ -687,6 +687,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
|
||||
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
|
||||
|
||||
queue->remaining_credit = min(max_credit, max_burst);
|
||||
queue->rate_limited = false;
|
||||
}
|
||||
|
||||
void xenvif_tx_credit_callback(unsigned long data)
|
||||
@@ -1184,8 +1185,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
||||
msecs_to_jiffies(queue->credit_usec / 1000);
|
||||
|
||||
/* Timer could already be pending in rare cases. */
|
||||
if (timer_pending(&queue->credit_timeout))
|
||||
if (timer_pending(&queue->credit_timeout)) {
|
||||
queue->rate_limited = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Passed the point where we can replenish credit? */
|
||||
if (time_after_eq64(now, next_credit)) {
|
||||
@@ -1200,6 +1203,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
||||
mod_timer(&queue->credit_timeout,
|
||||
next_credit);
|
||||
queue->credit_window_start = next_credit;
|
||||
queue->rate_limited = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
ssize_t rval = 0;
|
||||
|
||||
if (ha->optrom_state != QLA_SREADING)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
|
||||
if (ha->optrom_state != QLA_SREADING)
|
||||
goto out;
|
||||
|
||||
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
|
||||
ha->optrom_region_size);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
|
||||
return rval;
|
||||
@@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
|
||||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (ha->optrom_state != QLA_SWRITING)
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
|
||||
if (ha->optrom_state != QLA_SWRITING) {
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
return -EINVAL;
|
||||
if (off > ha->optrom_region_size)
|
||||
}
|
||||
if (off > ha->optrom_region_size) {
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (off + count > ha->optrom_region_size)
|
||||
count = ha->optrom_region_size - off;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
memcpy(&ha->optrom_buffer[off], buf, count);
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
|
||||
|
||||
@@ -3965,6 +3965,8 @@ int iscsi_target_tx_thread(void *arg)
|
||||
{
|
||||
int ret = 0;
|
||||
struct iscsi_conn *conn = arg;
|
||||
bool conn_freed = false;
|
||||
|
||||
/*
|
||||
* Allow ourselves to be interrupted by SIGINT so that a
|
||||
* connection recovery / failure event can be triggered externally.
|
||||
@@ -3990,12 +3992,14 @@ get_immediate:
|
||||
goto transport_err;
|
||||
|
||||
ret = iscsit_handle_response_queue(conn);
|
||||
if (ret == 1)
|
||||
if (ret == 1) {
|
||||
goto get_immediate;
|
||||
else if (ret == -ECONNRESET)
|
||||
} else if (ret == -ECONNRESET) {
|
||||
conn_freed = true;
|
||||
goto out;
|
||||
else if (ret < 0)
|
||||
} else if (ret < 0) {
|
||||
goto transport_err;
|
||||
}
|
||||
}
|
||||
|
||||
transport_err:
|
||||
@@ -4005,8 +4009,13 @@ transport_err:
|
||||
* responsible for cleaning up the early connection failure.
|
||||
*/
|
||||
if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
|
||||
iscsit_take_action_for_connection_exit(conn);
|
||||
iscsit_take_action_for_connection_exit(conn, &conn_freed);
|
||||
out:
|
||||
if (!conn_freed) {
|
||||
while (!kthread_should_stop()) {
|
||||
msleep(100);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4105,6 +4114,7 @@ int iscsi_target_rx_thread(void *arg)
|
||||
u32 checksum = 0, digest = 0;
|
||||
struct iscsi_conn *conn = arg;
|
||||
struct kvec iov;
|
||||
bool conn_freed = false;
|
||||
/*
|
||||
* Allow ourselves to be interrupted by SIGINT so that a
|
||||
* connection recovery / failure event can be triggered externally.
|
||||
@@ -4116,7 +4126,7 @@ int iscsi_target_rx_thread(void *arg)
|
||||
*/
|
||||
rc = wait_for_completion_interruptible(&conn->rx_login_comp);
|
||||
if (rc < 0 || iscsi_target_check_conn_state(conn))
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
|
||||
struct completion comp;
|
||||
@@ -4201,7 +4211,13 @@ int iscsi_target_rx_thread(void *arg)
|
||||
transport_err:
|
||||
if (!signal_pending(current))
|
||||
atomic_set(&conn->transport_failed, 1);
|
||||
iscsit_take_action_for_connection_exit(conn);
|
||||
iscsit_take_action_for_connection_exit(conn, &conn_freed);
|
||||
out:
|
||||
if (!conn_freed) {
|
||||
while (!kthread_should_stop()) {
|
||||
msleep(100);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -4575,8 +4591,11 @@ static void iscsit_logout_post_handler_closesession(
|
||||
* always sleep waiting for RX/TX thread shutdown to complete
|
||||
* within iscsit_close_connection().
|
||||
*/
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP)
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP) {
|
||||
sleep = cmpxchg(&conn->tx_thread_active, true, false);
|
||||
if (!sleep)
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&conn->conn_logout_remove, 0);
|
||||
complete(&conn->conn_logout_comp);
|
||||
@@ -4592,8 +4611,11 @@ static void iscsit_logout_post_handler_samecid(
|
||||
{
|
||||
int sleep = 1;
|
||||
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP)
|
||||
if (conn->conn_transport->transport_type == ISCSI_TCP) {
|
||||
sleep = cmpxchg(&conn->tx_thread_active, true, false);
|
||||
if (!sleep)
|
||||
return;
|
||||
}
|
||||
|
||||
atomic_set(&conn->conn_logout_remove, 0);
|
||||
complete(&conn->conn_logout_comp);
|
||||
|
||||
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
|
||||
}
|
||||
}
|
||||
|
||||
void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
|
||||
void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
|
||||
{
|
||||
*conn_freed = false;
|
||||
|
||||
spin_lock_bh(&conn->state_lock);
|
||||
if (atomic_read(&conn->connection_exit)) {
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
|
||||
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
iscsit_close_connection(conn);
|
||||
*conn_freed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
|
||||
iscsit_handle_connection_cleanup(conn);
|
||||
*conn_freed = true;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
|
||||
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
|
||||
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
|
||||
extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
|
||||
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
|
||||
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
|
||||
|
||||
#endif /*** ISCSI_TARGET_ERL0_H ***/
|
||||
|
||||
@@ -1436,5 +1436,9 @@ int iscsi_target_login_thread(void *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -489,14 +489,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
|
||||
|
||||
static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
|
||||
|
||||
static bool iscsi_target_sk_state_check(struct sock *sk)
|
||||
static bool __iscsi_target_sk_check_close(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
|
||||
pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
|
||||
pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
|
||||
"returning FALSE\n");
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = (__iscsi_target_sk_check_close(sk) ||
|
||||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = test_bit(flag, &conn->login_flags);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
state = (__iscsi_target_sk_check_close(sk) ||
|
||||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
|
||||
if (!state)
|
||||
clear_bit(flag, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
|
||||
@@ -536,6 +582,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
||||
|
||||
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
|
||||
conn, current->comm, current->pid);
|
||||
/*
|
||||
* If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
|
||||
* before initial PDU processing in iscsi_target_start_negotiation()
|
||||
* has completed, go ahead and retry until it's cleared.
|
||||
*
|
||||
* Otherwise if the TCP connection drops while this is occuring,
|
||||
* iscsi_target_start_negotiation() will detect the failure, call
|
||||
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
|
||||
* remaining iscsi connection resources from iscsi_np process context.
|
||||
*/
|
||||
if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
|
||||
schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_state_lock);
|
||||
state = (tpg->tpg_state == TPG_STATE_ACTIVE);
|
||||
@@ -543,26 +603,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
}
|
||||
if (iscsi_target_sk_check_close(conn)) {
|
||||
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
conn->login_kworker = current;
|
||||
@@ -580,34 +626,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
||||
flush_signals(current);
|
||||
conn->login_kworker = NULL;
|
||||
|
||||
if (rc < 0) {
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
}
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
|
||||
conn, current->comm, current->pid);
|
||||
|
||||
rc = iscsi_target_do_login(conn, login);
|
||||
if (rc < 0) {
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
goto err;
|
||||
} else if (!rc) {
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
|
||||
goto err;
|
||||
} else if (rc == 1) {
|
||||
iscsi_target_nego_release(conn);
|
||||
iscsi_post_login_handler(np, conn, zero_tsih);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
}
|
||||
return;
|
||||
|
||||
err:
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
}
|
||||
|
||||
static void iscsi_target_do_cleanup(struct work_struct *work)
|
||||
@@ -655,31 +696,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
state = __iscsi_target_sk_check_close(sk);
|
||||
pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
|
||||
|
||||
if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
|
||||
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
|
||||
" conn: %p\n", conn);
|
||||
if (state)
|
||||
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
|
||||
if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
|
||||
pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
|
||||
conn);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
|
||||
|
||||
if (!state) {
|
||||
/*
|
||||
* If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
|
||||
* but only queue conn->login_work -> iscsi_target_do_login_rx()
|
||||
* processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
|
||||
*
|
||||
* When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
|
||||
* will detect the dropped TCP connection from delayed workqueue context.
|
||||
*
|
||||
* If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
|
||||
* iscsi_target_start_negotiation() is running, iscsi_target_do_login()
|
||||
* via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
|
||||
* via iscsi_target_sk_check_and_clear() is responsible for detecting the
|
||||
* dropped TCP connection in iscsi_np process context, and cleaning up
|
||||
* the remaining iscsi connection resources.
|
||||
*/
|
||||
if (state) {
|
||||
pr_debug("iscsi_target_sk_state_change got failed state\n");
|
||||
schedule_delayed_work(&conn->login_cleanup_work, 0);
|
||||
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
|
||||
state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
orig_state_change(sk);
|
||||
|
||||
if (!state)
|
||||
schedule_delayed_work(&conn->login_work, 0);
|
||||
return;
|
||||
}
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
orig_state_change(sk);
|
||||
}
|
||||
|
||||
@@ -944,6 +1008,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
|
||||
if (iscsi_target_handle_csg_one(conn, login) < 0)
|
||||
return -1;
|
||||
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
|
||||
/*
|
||||
* Check to make sure the TCP connection has not
|
||||
* dropped asynchronously while session reinstatement
|
||||
* was occuring in this kthread context, before
|
||||
* transitioning to full feature phase operation.
|
||||
*/
|
||||
if (iscsi_target_sk_check_close(conn))
|
||||
return -1;
|
||||
|
||||
login->tsih = conn->sess->tsih;
|
||||
login->login_complete = 1;
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
@@ -970,21 +1043,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
|
||||
break;
|
||||
}
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
bool state;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login() failed state for"
|
||||
" conn: %p\n", conn);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1248,16 +1306,28 @@ int iscsi_target_start_negotiation(
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = iscsi_target_do_login(conn, login);
|
||||
if (!ret) {
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
} else if (ret < 0) {
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
|
||||
set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
/*
|
||||
* If iscsi_target_do_login returns zero to signal more PDU
|
||||
* exchanges are required to complete the login, go ahead and
|
||||
* clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
|
||||
* is still active.
|
||||
*
|
||||
* Otherwise if TCP connection dropped asynchronously, go ahead
|
||||
* and perform connection cleanup now.
|
||||
*/
|
||||
ret = iscsi_target_do_login(conn, login);
|
||||
if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
|
||||
ret = -1;
|
||||
|
||||
if (ret < 0) {
|
||||
cancel_delayed_work_sync(&conn->login_work);
|
||||
cancel_delayed_work_sync(&conn->login_cleanup_work);
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
|
||||
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
|
||||
pr_err("Source se_lun->lun_se_dev does not exist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lun->lun_shutdown) {
|
||||
pr_err("Unable to create mappedlun symlink because"
|
||||
" lun->lun_shutdown=true\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
se_tpg = lun->lun_tpg;
|
||||
|
||||
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
|
||||
|
||||
@@ -673,6 +673,8 @@ void core_tpg_remove_lun(
|
||||
*/
|
||||
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
|
||||
|
||||
lun->lun_shutdown = true;
|
||||
|
||||
core_clear_lun_from_tpg(lun, tpg);
|
||||
/*
|
||||
* Wait for any active I/O references to percpu se_lun->lun_ref to
|
||||
@@ -694,6 +696,8 @@ void core_tpg_remove_lun(
|
||||
}
|
||||
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
|
||||
hlist_del_rcu(&lun->link);
|
||||
|
||||
lun->lun_shutdown = false;
|
||||
mutex_unlock(&tpg->tpg_lun_mutex);
|
||||
|
||||
percpu_ref_exit(&lun->lun_ref);
|
||||
|
||||
@@ -500,6 +500,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
||||
lastoff = page_offset(page);
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
if (lastoff + bh->b_size <= startoff)
|
||||
goto next;
|
||||
if (buffer_uptodate(bh) ||
|
||||
buffer_unwritten(bh)) {
|
||||
if (whence == SEEK_DATA)
|
||||
@@ -514,6 +516,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
||||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
next:
|
||||
lastoff += bh->b_size;
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
||||
@@ -1926,7 +1926,8 @@ retry:
|
||||
n_desc_blocks = o_desc_blocks +
|
||||
le16_to_cpu(es->s_reserved_gdt_blocks);
|
||||
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
|
||||
n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
|
||||
n_blocks_count = (ext4_fsblk_t)n_group *
|
||||
EXT4_BLOCKS_PER_GROUP(sb);
|
||||
n_group--; /* set to last group number */
|
||||
}
|
||||
|
||||
|
||||
@@ -1091,20 +1091,18 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
if (unlikely(fsmeta >= total))
|
||||
return 1;
|
||||
|
||||
main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
|
||||
main_segs = le32_to_cpu(raw_super->segment_count_main);
|
||||
blocks_per_seg = sbi->blocks_per_seg;
|
||||
|
||||
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
|
||||
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
|
||||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
|
||||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
||||
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
|
||||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
|
||||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
|
||||
@@ -510,6 +510,10 @@ struct mm_struct {
|
||||
* PROT_NONE or PROT_NUMA mapped page.
|
||||
*/
|
||||
bool tlb_flush_pending;
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
/* See flush_tlb_batched_pending() */
|
||||
bool tlb_flush_batched;
|
||||
#endif
|
||||
struct uprobes_state uprobes_state;
|
||||
#ifdef CONFIG_X86_INTEL_MPX
|
||||
|
||||
@@ -813,6 +813,16 @@ struct signal_struct {
|
||||
|
||||
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
||||
|
||||
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
|
||||
SIGNAL_STOP_CONTINUED)
|
||||
|
||||
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
unsigned int flags)
|
||||
{
|
||||
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
|
||||
@@ -215,7 +215,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
||||
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
@@ -228,7 +228,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
||||
* be allocated from the same page.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
|
||||
#define KMALLOC_SHIFT_MAX 30
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
||||
@@ -311,6 +311,7 @@ enum {
|
||||
|
||||
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
||||
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
||||
__WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
@@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
||||
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||
*/
|
||||
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
|
||||
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
|
||||
|
||||
#define create_workqueue(name) \
|
||||
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
|
||||
|
||||
@@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
|
||||
memcpy(stream + lcp_len,
|
||||
((char *) &iwe->u) + IW_EV_POINT_OFF,
|
||||
IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
|
||||
memcpy(stream + point_len, extra, iwe->u.data.length);
|
||||
if (iwe->u.data.length && extra)
|
||||
memcpy(stream + point_len, extra, iwe->u.data.length);
|
||||
stream += event_len;
|
||||
}
|
||||
return stream;
|
||||
|
||||
@@ -444,6 +444,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
|
||||
|
||||
#define _sctp_walk_params(pos, chunk, end, member)\
|
||||
for (pos.v = chunk->member;\
|
||||
(pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
|
||||
(void *)chunk + end) &&\
|
||||
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
|
||||
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
|
||||
pos.v += WORD_ROUND(ntohs(pos.p->length)))
|
||||
@@ -454,6 +456,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
|
||||
#define _sctp_walk_errors(err, chunk_hdr, end)\
|
||||
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
|
||||
sizeof(sctp_chunkhdr_t));\
|
||||
((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
|
||||
(void *)chunk_hdr + end) &&\
|
||||
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
|
||||
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
|
||||
err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
|
||||
|
||||
@@ -562,6 +562,7 @@ struct iscsi_conn {
|
||||
#define LOGIN_FLAGS_READ_ACTIVE 1
|
||||
#define LOGIN_FLAGS_CLOSED 2
|
||||
#define LOGIN_FLAGS_READY 4
|
||||
#define LOGIN_FLAGS_INITIAL_PDU 8
|
||||
unsigned long login_flags;
|
||||
struct delayed_work login_work;
|
||||
struct delayed_work login_cleanup_work;
|
||||
|
||||
@@ -714,6 +714,7 @@ struct se_lun {
|
||||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
||||
u32 lun_link_magic;
|
||||
u32 lun_access;
|
||||
bool lun_shutdown;
|
||||
u32 lun_index;
|
||||
|
||||
/* RELATIVE TARGET PORT IDENTIFER */
|
||||
|
||||
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
|
||||
* fresh group stop. Read comment in do_signal_stop() for details.
|
||||
*/
|
||||
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
|
||||
sig->flags = SIGNAL_STOP_STOPPED;
|
||||
signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
||||
* will take ->siglock, notice SIGNAL_CLD_MASK, and
|
||||
* notify its parent. See get_signal_to_deliver().
|
||||
*/
|
||||
signal->flags = why | SIGNAL_STOP_CONTINUED;
|
||||
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
|
||||
signal->group_stop_count = 0;
|
||||
signal->group_exit_code = 0;
|
||||
}
|
||||
|
||||
@@ -3647,8 +3647,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
|
||||
return -EINVAL;
|
||||
|
||||
/* creating multiple pwqs breaks ordering guarantee */
|
||||
if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
|
||||
return -EINVAL;
|
||||
if (!list_empty(&wq->pwqs)) {
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return -EINVAL;
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
}
|
||||
|
||||
ctx = apply_wqattrs_prepare(wq, attrs);
|
||||
|
||||
@@ -3834,6 +3838,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
||||
struct workqueue_struct *wq;
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
/*
|
||||
* Unbound && max_active == 1 used to imply ordered, which is no
|
||||
* longer the case on NUMA machines due to per-node pools. While
|
||||
* alloc_ordered_workqueue() is the right way to create an ordered
|
||||
* workqueue, keep the previous behavior to avoid subtle breakages
|
||||
* on NUMA.
|
||||
*/
|
||||
if ((flags & WQ_UNBOUND) && max_active == 1)
|
||||
flags |= __WQ_ORDERED;
|
||||
|
||||
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
||||
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
|
||||
flags |= WQ_UNBOUND;
|
||||
@@ -4022,13 +4036,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
/* disallow meddling with max_active for ordered workqueues */
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return;
|
||||
|
||||
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
wq->saved_max_active = max_active;
|
||||
|
||||
for_each_pwq(pwq, wq)
|
||||
@@ -5154,7 +5169,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
|
||||
* attributes breaks ordering guarantee. Disallow exposing ordered
|
||||
* workqueues.
|
||||
*/
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return -EINVAL;
|
||||
|
||||
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
|
||||
|
||||
@@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED
|
||||
|
||||
config DEBUG_INFO_SPLIT
|
||||
bool "Produce split debuginfo in .dwo files"
|
||||
depends on DEBUG_INFO
|
||||
depends on DEBUG_INFO && !FRV
|
||||
help
|
||||
Generate debug info into separate .dwo files. This significantly
|
||||
reduces the build directory size for builds with DEBUG_INFO,
|
||||
|
||||
@@ -453,6 +453,7 @@ struct tlbflush_unmap_batch;
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
void try_to_unmap_flush(void);
|
||||
void try_to_unmap_flush_dirty(void);
|
||||
void flush_tlb_batched_pending(struct mm_struct *mm);
|
||||
#else
|
||||
static inline void try_to_unmap_flush(void)
|
||||
{
|
||||
@@ -460,6 +461,8 @@ static inline void try_to_unmap_flush(void)
|
||||
static inline void try_to_unmap_flush_dirty(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
|
||||
#endif /* __MM_INTERNAL_H */
|
||||
|
||||
@@ -1127,6 +1127,7 @@ again:
|
||||
init_rss_vec(rss);
|
||||
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
pte = start_pte;
|
||||
flush_tlb_batched_pending(mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
do {
|
||||
pte_t ptent = *pte;
|
||||
|
||||
@@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool)
|
||||
void *element = pool->elements[--pool->curr_nr];
|
||||
|
||||
BUG_ON(pool->curr_nr < 0);
|
||||
check_element(pool, element);
|
||||
kasan_unpoison_element(pool, element);
|
||||
check_element(pool, element);
|
||||
return element;
|
||||
}
|
||||
|
||||
|
||||
@@ -72,6 +72,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
if (!pte)
|
||||
return 0;
|
||||
|
||||
flush_tlb_batched_pending(vma->vm_mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
do {
|
||||
oldpte = *pte;
|
||||
|
||||
@@ -135,6 +135,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
new_ptl = pte_lockptr(mm, new_pmd);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||
flush_tlb_batched_pending(vma->vm_mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
|
||||
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
|
||||
|
||||
@@ -1532,14 +1532,14 @@ int move_freepages(struct zone *zone,
|
||||
#endif
|
||||
|
||||
for (page = start_page; page <= end_page;) {
|
||||
/* Make sure we are not inadvertently changing nodes */
|
||||
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
||||
|
||||
if (!pfn_valid_within(page_to_pfn(page))) {
|
||||
page++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Make sure we are not inadvertently changing nodes */
|
||||
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
||||
|
||||
if (!PageBuddy(page)) {
|
||||
page++;
|
||||
continue;
|
||||
@@ -5852,8 +5852,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
|
||||
}
|
||||
|
||||
if (pages && s)
|
||||
pr_info("Freeing %s memory: %ldK (%p - %p)\n",
|
||||
s, pages << (PAGE_SHIFT - 10), start, end);
|
||||
pr_info("Freeing %s memory: %ldK\n",
|
||||
s, pages << (PAGE_SHIFT - 10));
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
36
mm/rmap.c
36
mm/rmap.c
@@ -648,6 +648,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
||||
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
|
||||
tlb_ubc->flush_required = true;
|
||||
|
||||
/*
|
||||
* Ensure compiler does not re-order the setting of tlb_flush_batched
|
||||
* before the PTE is cleared.
|
||||
*/
|
||||
barrier();
|
||||
mm->tlb_flush_batched = true;
|
||||
|
||||
/*
|
||||
* If the PTE was dirty then it's best to assume it's writable. The
|
||||
* caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
|
||||
@@ -675,6 +682,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
|
||||
|
||||
return should_defer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reclaim unmaps pages under the PTL but do not flush the TLB prior to
|
||||
* releasing the PTL if TLB flushes are batched. It's possible for a parallel
|
||||
* operation such as mprotect or munmap to race between reclaim unmapping
|
||||
* the page and flushing the page. If this race occurs, it potentially allows
|
||||
* access to data via a stale TLB entry. Tracking all mm's that have TLB
|
||||
* batching in flight would be expensive during reclaim so instead track
|
||||
* whether TLB batching occurred in the past and if so then do a flush here
|
||||
* if required. This will cost one additional flush per reclaim cycle paid
|
||||
* by the first operation at risk such as mprotect and mumap.
|
||||
*
|
||||
* This must be called under the PTL so that an access to tlb_flush_batched
|
||||
* that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
|
||||
* via the PTL.
|
||||
*/
|
||||
void flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->tlb_flush_batched) {
|
||||
flush_tlb_mm(mm);
|
||||
|
||||
/*
|
||||
* Do not allow the compiler to re-order the clearing of
|
||||
* tlb_flush_batched before the tlb is flushed.
|
||||
*/
|
||||
barrier();
|
||||
mm->tlb_flush_batched = false;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
||||
struct page *page, bool writable)
|
||||
|
||||
@@ -2551,7 +2551,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
|
||||
{
|
||||
if (tx_path)
|
||||
return skb->ip_summed != CHECKSUM_PARTIAL &&
|
||||
skb->ip_summed != CHECKSUM_NONE;
|
||||
skb->ip_summed != CHECKSUM_UNNECESSARY;
|
||||
|
||||
return skb->ip_summed == CHECKSUM_NONE;
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
|
||||
|
||||
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
|
||||
return -EFAULT;
|
||||
ifr.ifr_name[IFNAMSIZ-1] = 0;
|
||||
|
||||
error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
|
||||
if (error)
|
||||
|
||||
@@ -1742,7 +1742,8 @@ static int do_setlink(const struct sk_buff *skb,
|
||||
struct sockaddr *sa;
|
||||
int len;
|
||||
|
||||
len = sizeof(sa_family_t) + dev->addr_len;
|
||||
len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
|
||||
sizeof(*sa));
|
||||
sa = kmalloc(len, GFP_KERNEL);
|
||||
if (!sa) {
|
||||
err = -ENOMEM;
|
||||
|
||||
@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
|
||||
* singleton values (which always leads to failure).
|
||||
* These settings can still (later) be overridden via sockopts.
|
||||
*/
|
||||
if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
|
||||
ccid_get_builtin_ccids(&rx.val, &rx.len))
|
||||
if (ccid_get_builtin_ccids(&tx.val, &tx.len))
|
||||
return -ENOBUFS;
|
||||
if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
|
||||
kfree(tx.val);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
|
||||
!dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
|
||||
|
||||
@@ -635,6 +635,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
goto drop_and_free;
|
||||
|
||||
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
|
||||
reqsk_put(req);
|
||||
return 0;
|
||||
|
||||
drop_and_free:
|
||||
|
||||
@@ -376,6 +376,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
goto drop_and_free;
|
||||
|
||||
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
|
||||
reqsk_put(req);
|
||||
return 0;
|
||||
|
||||
drop_and_free:
|
||||
|
||||
@@ -1320,13 +1320,14 @@ static struct pernet_operations fib_net_ops = {
|
||||
|
||||
void __init ip_fib_init(void)
|
||||
{
|
||||
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
|
||||
fib_trie_init();
|
||||
|
||||
register_pernet_subsys(&fib_net_ops);
|
||||
|
||||
register_netdevice_notifier(&fib_netdev_notifier);
|
||||
register_inetaddr_notifier(&fib_inetaddr_notifier);
|
||||
|
||||
fib_trie_init();
|
||||
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
|
||||
}
|
||||
|
||||
@@ -922,10 +922,12 @@ static int __ip_append_data(struct sock *sk,
|
||||
csummode = CHECKSUM_PARTIAL;
|
||||
|
||||
cork->length += length;
|
||||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
if ((skb && skb_is_gso(skb)) ||
|
||||
(((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
|
||||
(skb_queue_len(queue) <= 1) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
|
||||
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, transhdrlen,
|
||||
maxfraglen, flags);
|
||||
@@ -1241,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
|
||||
return -EINVAL;
|
||||
|
||||
if ((size + skb->len > mtu) &&
|
||||
(skb_queue_len(&sk->sk_write_queue) == 1) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO)) {
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
|
||||
@@ -337,6 +337,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
treq = tcp_rsk(req);
|
||||
treq->rcv_isn = ntohl(th->seq) - 1;
|
||||
treq->snt_isn = cookie;
|
||||
treq->txhash = net_tx_rndhash();
|
||||
req->mss = mss;
|
||||
ireq->ir_num = ntohs(th->dest);
|
||||
ireq->ir_rmt_port = th->source;
|
||||
|
||||
@@ -2504,8 +2504,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
|
||||
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
|
||||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
|
||||
if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
|
||||
(inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
|
||||
tp->snd_cwnd = tp->snd_ssthresh;
|
||||
tp->snd_cwnd_stamp = tcp_time_stamp;
|
||||
}
|
||||
|
||||
@@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk)
|
||||
struct sk_buff *buff;
|
||||
int err;
|
||||
|
||||
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
|
||||
return -EHOSTUNREACH; /* Routing failure or similar. */
|
||||
|
||||
tcp_connect_init(sk);
|
||||
|
||||
if (unlikely(tp->repair)) {
|
||||
|
||||
@@ -606,7 +606,8 @@ static void tcp_keepalive_timer (unsigned long data)
|
||||
goto death;
|
||||
}
|
||||
|
||||
if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
|
||||
if (!sock_flag(sk, SOCK_KEEPOPEN) ||
|
||||
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
|
||||
goto out;
|
||||
|
||||
elapsed = keepalive_time_when(tp);
|
||||
|
||||
@@ -819,7 +819,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
|
||||
if (is_udplite) /* UDP-Lite */
|
||||
csum = udplite_csum(skb);
|
||||
|
||||
else if (sk->sk_no_check_tx) { /* UDP csum disabled */
|
||||
else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
goto send;
|
||||
|
||||
@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
|
||||
if (uh->check == 0)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
/* Fragment the skb. IP headers of the fragments are updated in
|
||||
* inet_gso_segment()
|
||||
|
||||
@@ -647,8 +647,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
*prevhdr = NEXTHDR_FRAGMENT;
|
||||
tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
|
||||
if (!tmp_hdr) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
@@ -767,8 +765,6 @@ slow_path:
|
||||
frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
|
||||
hroom + troom, GFP_ATOMIC);
|
||||
if (!frag) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
@@ -1361,11 +1357,12 @@ emsgsize:
|
||||
*/
|
||||
|
||||
cork->length += length;
|
||||
if ((((length + fragheaderlen) > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
if ((skb && skb_is_gso(skb)) ||
|
||||
(((length + (skb ? skb->len : headersize)) > mtu) &&
|
||||
(skb_queue_len(queue) <= 1) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) &&
|
||||
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
|
||||
(sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
|
||||
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, exthdrlen,
|
||||
transhdrlen, mtu, flags, fl6);
|
||||
|
||||
@@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
|
||||
|
||||
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
||||
{
|
||||
u16 offset = sizeof(struct ipv6hdr);
|
||||
unsigned int offset = sizeof(struct ipv6hdr);
|
||||
unsigned int packet_len = skb_tail_pointer(skb) -
|
||||
skb_network_header(skb);
|
||||
int found_rhdr = 0;
|
||||
@@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
||||
|
||||
while (offset <= packet_len) {
|
||||
struct ipv6_opt_hdr *exthdr;
|
||||
unsigned int len;
|
||||
|
||||
switch (**nexthdr) {
|
||||
|
||||
@@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
||||
|
||||
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
|
||||
offset);
|
||||
offset += ipv6_optlen(exthdr);
|
||||
len = ipv6_optlen(exthdr);
|
||||
if (len + offset >= IPV6_MAXPLEN)
|
||||
return -EINVAL;
|
||||
offset += len;
|
||||
*nexthdr = &exthdr->nexthdr;
|
||||
}
|
||||
|
||||
|
||||
@@ -210,6 +210,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||
treq->snt_synack.v64 = 0;
|
||||
treq->rcv_isn = ntohl(th->seq) - 1;
|
||||
treq->snt_isn = cookie;
|
||||
treq->txhash = net_tx_rndhash();
|
||||
|
||||
/*
|
||||
* We need to lookup the dst_entry to get the correct window size.
|
||||
|
||||
@@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
|
||||
if (uh->check == 0)
|
||||
uh->check = CSUM_MANGLED_0;
|
||||
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
/* Check if there is enough headroom to insert fragment header. */
|
||||
tnl_hlen = skb_tnl_header_len(skb);
|
||||
|
||||
@@ -577,8 +577,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
|
||||
|
||||
nla_for_each_nested(a, attr, rem) {
|
||||
int type = nla_type(a);
|
||||
int maxlen = ovs_ct_attr_lens[type].maxlen;
|
||||
int minlen = ovs_ct_attr_lens[type].minlen;
|
||||
int maxlen;
|
||||
int minlen;
|
||||
|
||||
if (type > OVS_CT_ATTR_MAX) {
|
||||
OVS_NLERR(log,
|
||||
@@ -586,6 +586,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
|
||||
type, OVS_CT_ATTR_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
maxlen = ovs_ct_attr_lens[type].maxlen;
|
||||
minlen = ovs_ct_attr_lens[type].minlen;
|
||||
if (nla_len(a) < minlen || nla_len(a) > maxlen) {
|
||||
OVS_NLERR(log,
|
||||
"Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
|
||||
|
||||
@@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
|
||||
|
||||
if (optlen != sizeof(val))
|
||||
return -EINVAL;
|
||||
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
|
||||
return -EBUSY;
|
||||
if (copy_from_user(&val, optval, sizeof(val)))
|
||||
return -EFAULT;
|
||||
if (val > INT_MAX)
|
||||
return -EINVAL;
|
||||
po->tp_reserve = val;
|
||||
return 0;
|
||||
lock_sock(sk);
|
||||
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
po->tp_reserve = val;
|
||||
ret = 0;
|
||||
}
|
||||
release_sock(sk);
|
||||
return ret;
|
||||
}
|
||||
case PACKET_LOSS:
|
||||
{
|
||||
@@ -4225,7 +4230,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
register_prot_hook(sk);
|
||||
}
|
||||
spin_unlock(&po->bind_lock);
|
||||
if (closing && (po->tp_version > TPACKET_V2)) {
|
||||
if (pg_vec && (po->tp_version > TPACKET_V2)) {
|
||||
/* Because we don't support block-based V3 on tx-ring */
|
||||
if (!tx_ring)
|
||||
prb_shutdown_retire_blk_timer(po, rb_queue);
|
||||
|
||||
@@ -42,8 +42,8 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int
|
||||
return PTR_ERR(target);
|
||||
|
||||
t->u.kernel.target = target;
|
||||
memset(&par, 0, sizeof(par));
|
||||
par.table = table;
|
||||
par.entryinfo = NULL;
|
||||
par.target = target;
|
||||
par.targinfo = t->data;
|
||||
par.hook_mask = hook;
|
||||
|
||||
@@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
|
||||
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
|
||||
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
|
||||
SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
|
||||
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
|
||||
SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
|
||||
|
||||
|
||||
@@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
|
||||
dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
|
||||
be->dai_link->name, event, dir);
|
||||
|
||||
if ((event == SND_SOC_DAPM_STREAM_STOP) &&
|
||||
(be->dpcm[dir].users >= 1))
|
||||
continue;
|
||||
|
||||
snd_soc_dapm_stream_event(be, dir, event);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user