Merge tag 'v6.6.90' of git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable into odroid-6.6.y

This is the 6.6.90 stable release

Change-Id: I3f787842cb229c0d9a97f4e690aec63c0bb86bf8
This commit is contained in:
Mauro Ribeiro
2025-05-14 20:50:03 -03:00
142 changed files with 1760 additions and 653 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 89
SUBLEVEL = 90
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@@ -40,6 +40,9 @@
reg = <1>;
interrupt-parent = <&gpio4>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET_REF>;
clock-names = "rmii-ref";
status = "okay";
};
};

View File

@@ -73,14 +73,13 @@
};
intc: interrupt-controller@4ac00000 {
compatible = "arm,cortex-a7-gic";
compatible = "arm,gic-400";
#interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller;
reg = <0x0 0x4ac10000 0x0 0x1000>,
<0x0 0x4ac20000 0x0 0x2000>,
<0x0 0x4ac40000 0x0 0x2000>,
<0x0 0x4ac60000 0x0 0x2000>;
<0x0 0x4ac20000 0x0 0x20000>,
<0x0 0x4ac40000 0x0 0x20000>,
<0x0 0x4ac60000 0x0 0x20000>;
};
psci {

View File

@@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void)
static const struct midr_range spectre_bhb_k132_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
{},
};
static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
{},
};
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),

View File

@@ -103,9 +103,19 @@ handle_fpe(struct pt_regs *regs)
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
(void __user *) regs->iaoq[0]);
return -1;
int sig = signalcode >> 24;
if (sig == SIGFPE) {
/*
* Clear floating point trap bit to avoid trapping
* again on the first floating-point instruction in
* the userspace signal handler.
*/
regs->fr[0] &= ~(1ULL << 38);
}
force_sig_fault(sig, signalcode & 0xffffff,
(void __user *) regs->iaoq[0]);
return -1;
}
return signalcode ? -1 : 0;

View File

@@ -234,10 +234,8 @@ fi
# suppress some warnings in recent ld versions
nowarn="-z noexecstack"
if ! ld_is_lld; then
if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
nowarn="$nowarn --no-warn-rwx-segments"
fi
if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
nowarn="$nowarn --no-warn-rwx-segments"
fi
platformo=$object/"$platform".o

View File

@@ -1056,6 +1056,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd;
pte_t *pte;
/*
* Make sure we align the start vmemmap addr so that we calculate
* the correct start_pfn in altmap boundary check to decided whether
* we should use altmap or RAM based backing memory allocation. Also
* the address need to be aligned for set_pte operation.
* If the start addr is already PMD_SIZE aligned we will try to use
* a pmd mapping. We don't want to be too aggressive here beacause
* that will cause more allocations in RAM. So only if the namespace
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
*/
start = ALIGN_DOWN(start, PAGE_SIZE);
for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end);
@@ -1081,8 +1094,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation.
*/
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/*
* make sure we don't create altmap mappings
* covering things outside the device.

View File

@@ -9,7 +9,7 @@
int patch_insn_write(void *addr, const void *insn, size_t len);
int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text_set_nosync(void *addr, u8 c, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns);
int patch_text(void *addr, u32 *insns, size_t len);
extern int riscv_patch_in_stop_machine;

View File

@@ -19,7 +19,7 @@
struct patch_insn {
void *addr;
u32 *insns;
int ninsns;
size_t len;
atomic_t cpu_count;
};
@@ -234,14 +234,10 @@ NOKPROBE_SYMBOL(patch_text_nosync);
static int patch_text_cb(void *data)
{
struct patch_insn *patch = data;
unsigned long len;
int i, ret = 0;
int ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
len = GET_INSN_LENGTH(patch->insns[i]);
ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
}
ret = patch_insn_write(patch->addr, patch->insns, patch->len);
/*
* Make sure the patching store is effective *before* we
* increment the counter which releases all waiting CPUs
@@ -262,13 +258,13 @@ static int patch_text_cb(void *data)
}
NOKPROBE_SYMBOL(patch_text_cb);
int patch_text(void *addr, u32 *insns, int ninsns)
int patch_text(void *addr, u32 *insns, size_t len)
{
int ret;
struct patch_insn patch = {
.addr = addr,
.insns = insns,
.ninsns = ninsns,
.len = len,
.cpu_count = ATOMIC_INIT(0),
};

View File

@@ -23,13 +23,13 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
size_t len = GET_INSN_LENGTH(p->opcode);
u32 insn = __BUG_INSN_32;
unsigned long offset = GET_INSN_LENGTH(p->opcode);
p->ainsn.api.restore = (unsigned long)p->addr + offset;
p->ainsn.api.restore = (unsigned long)p->addr + len;
patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
patch_text_nosync((void *)p->ainsn.api.insn + offset, &insn, 1);
patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
@@ -116,16 +116,18 @@ void *alloc_insn_page(void)
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
__BUG_INSN_32 : __BUG_INSN_16;
size_t len = GET_INSN_LENGTH(p->opcode);
u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16;
patch_text(p->addr, &insn, 1);
patch_text(p->addr, &insn, len);
}
/* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, &p->opcode, 1);
size_t len = GET_INSN_LENGTH(p->opcode);
patch_text(p->addr, &p->opcode, len);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)

View File

@@ -14,6 +14,7 @@
#include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2
#define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4)
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
@@ -681,7 +682,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
if (ret)
return ret;
if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4))
if (memcmp(ip, old_insns, RV_FENTRY_NBYTES))
return -EFAULT;
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
@@ -690,8 +691,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
cpus_read_lock();
mutex_lock(&text_mutex);
if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4))
ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS);
if (memcmp(ip, new_insns, RV_FENTRY_NBYTES))
ret = patch_text(ip, new_insns, RV_FENTRY_NBYTES);
mutex_unlock(&text_mutex);
cpus_read_unlock();

View File

@@ -4206,7 +4206,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
};
if (arr[pebs_enable].host) {

View File

@@ -48,6 +48,7 @@ KVM_X86_OP(set_idt)
KVM_X86_OP(get_gdt)
KVM_X86_OP(set_gdt)
KVM_X86_OP(sync_dirty_debug_regs)
KVM_X86_OP(set_dr6)
KVM_X86_OP(set_dr7)
KVM_X86_OP(cache_reg)
KVM_X86_OP(get_rflags)

View File

@@ -1595,6 +1595,7 @@ struct kvm_x86_ops {
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);

View File

@@ -2014,11 +2014,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->asid = sd->next_asid++;
}
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vmcb *vmcb = svm->vmcb;
struct vmcb *vmcb = to_svm(vcpu)->vmcb;
if (svm->vcpu.arch.guest_state_protected)
if (vcpu->arch.guest_state_protected)
return;
if (unlikely(value != vmcb->save.dr6)) {
@@ -4220,10 +4220,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
* Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB.
*/
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
svm_set_dr6(svm, vcpu->arch.dr6);
else
svm_set_dr6(svm, DR6_ACTIVE_LOW);
if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
clgi();
kvm_load_guest_xsave_state(vcpu);
@@ -5002,6 +5000,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
.set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
.cache_reg = svm_cache_reg,

View File

@@ -5617,6 +5617,12 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
set_debugreg(DR6_RESERVED, 6);
}
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
{
lockdep_assert_irqs_disabled();
set_debugreg(vcpu->arch.dr6, 6);
}
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{
vmcs_writel(GUEST_DR7, val);
@@ -7356,10 +7362,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4;
}
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
set_debugreg(vcpu->arch.dr6, 6);
/* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug
@@ -8292,6 +8294,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
.set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7,
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
.cache_reg = vmx_cache_reg,

View File

@@ -10772,6 +10772,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6);
} else if (unlikely(hw_breakpoint_active())) {
set_debugreg(0, 7);
}

View File

@@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, struct device_driver *drv)
if (mod)
mk = &mod->mkobj;
else if (drv->mod_name) {
struct kobject *mkobj;
/* Lookup built-in module entry in /sys/modules */
mkobj = kset_find_obj(module_kset, drv->mod_name);
if (mkobj) {
mk = container_of(mkobj, struct module_kobject, kobj);
/* Lookup or create built-in module entry in /sys/modules */
mk = lookup_or_create_module_kobject(drv->mod_name);
if (mk) {
/* remember our module structure */
drv->p->mkobj = mk;
/* kset_find_obj took a reference */
kobject_put(mkobj);
/* lookup_or_create_module_kobject took a reference */
kobject_put(&mk->kobj);
}
}

View File

@@ -3521,22 +3521,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
}
/*
* ==0: not a dump pkt.
* < 0: fails to handle a dump pkt
* > 0: otherwise.
*/
/* Return: 0 on success, negative errno on failure. */
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
int ret = 1;
int ret = 0;
u8 pkt_type;
u8 *sk_ptr;
unsigned int sk_len;
u16 seqno;
u32 dump_size;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
struct btusb_data *btdata = hci_get_drvdata(hdev);
struct usb_device *udev = btdata->udev;
@@ -3546,30 +3540,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
sk_len = skb->len;
if (pkt_type == HCI_ACLDATA_PKT) {
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return 0;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
} else {
event_hdr = hci_event_hdr(skb);
}
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return 0;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return 0;
/*it is dump pkt now*/
seqno = le16_to_cpu(dump_hdr->seqno);
if (seqno == 0) {
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
@@ -3643,17 +3621,84 @@ out:
return ret;
}
/* Return: true if the ACL packet is a dump packet, false otherwise. */
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct hci_acl_hdr *acl_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
acl_hdr = hci_acl_hdr(skb);
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
return false;
sk_ptr += HCI_ACL_HDR_SIZE;
sk_len -= HCI_ACL_HDR_SIZE;
event_hdr = (struct hci_event_hdr *)sk_ptr;
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
/* Return: true if the event packet is a dump packet, false otherwise. */
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 *sk_ptr;
unsigned int sk_len;
struct hci_event_hdr *event_hdr;
struct qca_dump_hdr *dump_hdr;
sk_ptr = skb->data;
sk_len = skb->len;
event_hdr = hci_event_hdr(skb);
if ((event_hdr->evt != HCI_VENDOR_PKT)
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
return false;
sk_ptr += HCI_EVENT_HDR_SIZE;
sk_len -= HCI_EVENT_HDR_SIZE;
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
return false;
return true;
}
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
if (handle_dump_pkt_qca(hdev, skb))
return 0;
if (acl_pkt_is_dump_qca(hdev, skb))
return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
{
if (handle_dump_pkt_qca(hdev, skb))
return 0;
if (evt_pkt_is_dump_qca(hdev, skb))
return handle_dump_pkt_qca(hdev, skb);
return hci_recv_frame(hdev, skb);
}

View File

@@ -534,16 +534,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
unsigned int target_freq,
unsigned int min, unsigned int max,
unsigned int relation)
{
unsigned int idx;
target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = clamp_val(target_freq, min, max);
if (!policy->freq_table)
return target_freq;
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
policy->cached_resolved_idx = idx;
policy->cached_target_freq = target_freq;
return policy->freq_table[idx].frequency;
@@ -563,7 +565,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
unsigned int min = READ_ONCE(policy->min);
unsigned int max = READ_ONCE(policy->max);
/*
* If this function runs in parallel with cpufreq_set_policy(), it may
* read policy->min before the update and policy->max after the update
* or the other way around, so there is no ordering guarantee.
*
* Resolve this by always honoring the max (in case it comes from
* thermal throttling or similar).
*/
if (unlikely(min > max))
min = max;
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2335,7 +2351,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
target_freq = __resolve_freq(policy, target_freq, relation);
target_freq = __resolve_freq(policy, target_freq, policy->min,
policy->max, relation);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -2625,11 +2642,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
*
* Avoid storing intermediate values in policy->max or policy->min and
* compiler optimizations around them because they may be accessed
* concurrently by cpufreq_driver_resolve_freq() during the update.
*/
policy->min = new_data.min;
policy->max = new_data.max;
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
new_data.min, new_data.max,
CPUFREQ_RELATION_H));
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
new_data.max, CPUFREQ_RELATION_L);
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;

View File

@@ -77,7 +77,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
return freq_next;
}
index = cpufreq_frequency_table_target(policy, freq_next, relation);
index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
policy->max, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;

View File

@@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
unsigned int target_freq, unsigned int min,
unsigned int max, unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((freq < policy->min) || (freq > policy->max))
if (freq < min || freq > max)
continue;
if (freq == target_freq) {
optimal.driver_data = i;

View File

@@ -98,7 +98,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
if (status & priv->ecc_stat_ce_mask) {
regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset,
&err_addr);
if (priv->ecc_uecnt_offset)
if (priv->ecc_cecnt_offset)
regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset,
&err_count);
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
@@ -1015,9 +1015,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
}
}
/* Interrupt mode set to every SBERR */
regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
ALTR_A10_ECC_INTMODE);
/* Enable ECC */
ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
ALTR_A10_ECC_CTRL_OFST));
@@ -2138,6 +2135,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
/* Set irq mask for DDR SBE to avoid any pending irq before registration */
regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
(A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0));
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;

View File

@@ -249,6 +249,8 @@ struct altr_sdram_mc_data {
#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16)
#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17)
#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0

View File

@@ -225,7 +225,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
buf_sz);
ffa_rx_release();
if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
ffa_rx_release();
mutex_unlock(&drv_info->rx_lock);

View File

@@ -240,6 +240,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
if (!dev)
return NULL;
/* Drop the refcnt bumped implicitly by device_find_child */
put_device(dev);
return to_scmi_dev(dev);
}

View File

@@ -172,7 +172,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
guard(mutex)(&hdcp_w->mutex);
drm_connector_get(&aconnector->base);
if (hdcp_w->aconnector[conn_index])
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
memset(&link_adjust, 0, sizeof(link_adjust));
@@ -209,7 +212,6 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output);
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
@@ -220,8 +222,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
struct drm_connector_state *conn_state = aconnector->base.state;
unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
hdcp_w->aconnector[conn_index] = aconnector;
guard(mutex)(&hdcp_w->mutex);
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -237,9 +238,11 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
if (hdcp_w->aconnector[conn_index]) {
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = NULL;
}
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -247,7 +250,7 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
unsigned int conn_index;
mutex_lock(&hdcp_w->mutex);
guard(mutex)(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
@@ -256,11 +259,13 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
hdcp_w->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
if (hdcp_w->aconnector[conn_index]) {
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = NULL;
}
}
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
@@ -277,7 +282,7 @@ static void event_callback(struct work_struct *work)
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
callback_dwork);
mutex_lock(&hdcp_work->mutex);
guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->callback_dwork);
@@ -285,8 +290,6 @@ static void event_callback(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
}
static void event_property_update(struct work_struct *work)
@@ -323,7 +326,7 @@ static void event_property_update(struct work_struct *work)
continue;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp_work->mutex);
guard(mutex)(&hdcp_work->mutex);
if (conn_state->commit) {
ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
@@ -355,7 +358,6 @@ static void event_property_update(struct work_struct *work)
drm_hdcp_update_content_protection(connector,
DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
}
@@ -368,7 +370,7 @@ static void event_property_validate(struct work_struct *work)
struct amdgpu_dm_connector *aconnector;
unsigned int conn_index;
mutex_lock(&hdcp_work->mutex);
guard(mutex)(&hdcp_work->mutex);
for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
conn_index++) {
@@ -408,8 +410,6 @@ static void event_property_validate(struct work_struct *work)
schedule_work(&hdcp_work->property_update_work);
}
}
mutex_unlock(&hdcp_work->mutex);
}
static void event_watchdog_timer(struct work_struct *work)
@@ -420,7 +420,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue,
watchdog_timer_dwork);
mutex_lock(&hdcp_work->mutex);
guard(mutex)(&hdcp_work->mutex);
cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
@@ -429,8 +429,6 @@ static void event_watchdog_timer(struct work_struct *work)
&hdcp_work->output);
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
}
static void event_cpirq(struct work_struct *work)
@@ -439,13 +437,11 @@ static void event_cpirq(struct work_struct *work)
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
mutex_lock(&hdcp_work->mutex);
guard(mutex)(&hdcp_work->mutex);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
}
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
@@ -479,7 +475,7 @@ static bool enable_assr(void *handle, struct dc_link *link)
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf;
mutex_lock(&psp->dtm_context.mutex);
guard(mutex)(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
@@ -494,8 +490,6 @@ static bool enable_assr(void *handle, struct dc_link *link)
res = false;
}
mutex_unlock(&psp->dtm_context.mutex);
return res;
}
@@ -504,6 +498,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_work = handle;
struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx;
int link_index = aconnector->dc_link->link_index;
unsigned int conn_index = aconnector->base.index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -557,13 +552,14 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
(!!aconnector->base.state) ?
aconnector->base.state->hdcp_content_type : -1);
mutex_lock(&hdcp_w->mutex);
guard(mutex)(&hdcp_w->mutex);
mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output);
drm_connector_get(&aconnector->base);
if (hdcp_w->aconnector[conn_index])
drm_connector_put(&hdcp_w->aconnector[conn_index]->base);
hdcp_w->aconnector[conn_index] = aconnector;
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
/**

View File

@@ -1015,6 +1015,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
struct drm_file *file = f->private_data;
struct drm_device *dev = file->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
if (!drm_dev_enter(dev, &idx))
return;
drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name);
drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id);
@@ -1029,6 +1033,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f)
if (dev->driver->show_fdinfo)
dev->driver->show_fdinfo(&p, file);
drm_dev_exit(idx);
}
EXPORT_SYMBOL(drm_show_fdinfo);

View File

@@ -25,6 +25,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#else
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
@@ -36,8 +37,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
return 0;
}
static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
{
return false;
}
#endif
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
#endif /*__INTEL_PXP_GSCCS_H__ */

View File

@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
FREQ_1000_1001(params[i].pixel_freq));
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
i, params[i].phy_freq,
FREQ_1000_1001(params[i].phy_freq/1000)*1000);
FREQ_1000_1001(params[i].phy_freq/10)*10);
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
(vclk_freq == params[freq].vclk_freq ||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)

View File

@@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
if (error)
if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
if (nouveau_fence_signal(fence))

View File

@@ -616,9 +616,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
return 0;
rpm_disable:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}

View File

@@ -3682,6 +3682,14 @@ found:
while (*uid == '0' && *(uid + 1))
uid++;
if (strlen(hid) >= ACPIHID_HID_LEN) {
pr_err("Invalid command line: hid is too long\n");
return 1;
} else if (strlen(uid) >= ACPIHID_UID_LEN) {
pr_err("Invalid command line: uid is too long\n");
return 1;
}
i = early_acpihid_map_size++;
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));

View File

@@ -1443,26 +1443,37 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return 0;
}
static int arm_smmu_streams_cmp_key(const void *lhs, const struct rb_node *rhs)
{
struct arm_smmu_stream *stream_rhs =
rb_entry(rhs, struct arm_smmu_stream, node);
const u32 *sid_lhs = lhs;
if (*sid_lhs < stream_rhs->id)
return -1;
if (*sid_lhs > stream_rhs->id)
return 1;
return 0;
}
static int arm_smmu_streams_cmp_node(struct rb_node *lhs,
const struct rb_node *rhs)
{
return arm_smmu_streams_cmp_key(
&rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs);
}
static struct arm_smmu_master *
arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
{
struct rb_node *node;
struct arm_smmu_stream *stream;
lockdep_assert_held(&smmu->streams_mutex);
node = smmu->streams.rb_node;
while (node) {
stream = rb_entry(node, struct arm_smmu_stream, node);
if (stream->id < sid)
node = node->rb_right;
else if (stream->id > sid)
node = node->rb_left;
else
return stream->master;
}
return NULL;
node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key);
if (!node)
return NULL;
return rb_entry(node, struct arm_smmu_stream, node)->master;
}
/* IRQ and event handlers */
@@ -2575,8 +2586,6 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
{
int i;
int ret = 0;
struct arm_smmu_stream *new_stream, *cur_stream;
struct rb_node **new_node, *parent_node = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams),
@@ -2587,9 +2596,10 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
mutex_lock(&smmu->streams_mutex);
for (i = 0; i < fwspec->num_ids; i++) {
struct arm_smmu_stream *new_stream = &master->streams[i];
struct rb_node *existing;
u32 sid = fwspec->ids[i];
new_stream = &master->streams[i];
new_stream->id = sid;
new_stream->master = master;
@@ -2598,28 +2608,23 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu,
break;
/* Insert into SID tree */
new_node = &(smmu->streams.rb_node);
while (*new_node) {
cur_stream = rb_entry(*new_node, struct arm_smmu_stream,
node);
parent_node = *new_node;
if (cur_stream->id > new_stream->id) {
new_node = &((*new_node)->rb_left);
} else if (cur_stream->id < new_stream->id) {
new_node = &((*new_node)->rb_right);
} else {
dev_warn(master->dev,
"stream %u already in tree\n",
cur_stream->id);
ret = -EINVAL;
break;
}
}
if (ret)
break;
existing = rb_find_add(&new_stream->node, &smmu->streams,
arm_smmu_streams_cmp_node);
if (existing) {
struct arm_smmu_master *existing_master =
rb_entry(existing, struct arm_smmu_stream, node)
->master;
rb_link_node(&new_stream->node, parent_node, new_node);
rb_insert_color(&new_stream->node, &smmu->streams);
/* Bridged PCI devices may end up with duplicated IDs */
if (existing_master == master)
continue;
dev_warn(master->dev,
"stream %u already in tree from dev %s\n", sid,
dev_name(existing_master->dev));
ret = -EINVAL;
break;
}
}
if (ret) {

View File

@@ -4855,6 +4855,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
/* QM57/QS57 integrated gfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
/* Broadwell igfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
@@ -4932,7 +4935,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);

View File

@@ -566,6 +566,18 @@ int iommu_probe_device(struct device *dev)
mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, NULL);
mutex_unlock(&iommu_probe_device_lock);
/*
* The dma_configure replay paths need bus_iommu_probe() to
* finish before they can call arch_setup_dma_ops()
*/
if (IS_ENABLED(CONFIG_IOMMU_DMA) && !ret && dev->iommu_group) {
mutex_lock(&dev->iommu_group->mutex);
if (!dev->iommu_group->default_domain &&
!dev_iommu_ops(dev)->set_platform_dma_ops)
ret = -EPROBE_DEFER;
mutex_unlock(&dev->iommu_group->mutex);
}
if (ret)
return ret;
@@ -3149,6 +3161,12 @@ int iommu_device_use_default_domain(struct device *dev)
return 0;
mutex_lock(&group->mutex);
/* We may race against bus_iommu_probe() finalising groups here */
if (IS_ENABLED(CONFIG_IOMMU_DMA) && !group->default_domain &&
!dev_iommu_ops(dev)->set_platform_dma_ops) {
ret = -EPROBE_DEFER;
goto unlock_out;
}
if (group->owner_cnt) {
if (group->owner || !iommu_is_default_domain(group) ||
!xa_empty(&group->pasid_array)) {

View File

@@ -226,6 +226,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
if (pin == GPIO_NO_WAKE_IRQ)
return irq_domain_disconnect_hierarchy(domain, virq);
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
&qcom_mpm_chip, priv);
if (ret)

View File

@@ -68,6 +68,8 @@
#define LIST_DIRTY 1
#define LIST_SIZE 2
#define SCAN_RESCHED_CYCLE 16
/*--------------------------------------------------------------*/
/*
@@ -2387,7 +2389,12 @@ static void __scan(struct dm_bufio_client *c)
atomic_long_dec(&c->need_shrink);
freed++;
cond_resched();
if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) {
dm_bufio_unlock(c);
cond_resched();
dm_bufio_lock(c);
}
}
}
}

View File

@@ -4687,7 +4687,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
BUG_ON(!list_empty(&ic->wait_list));
if (ic->mode == 'B')
if (ic->mode == 'B' && ic->bitmap_flush_work.work.func)
cancel_delayed_work_sync(&ic->bitmap_flush_work);
if (ic->metadata_wq)
destroy_workqueue(ic->metadata_wq);

View File

@@ -500,8 +500,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv)
gfp = GFP_NOIO;
}
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
if (argv && old_argv) {
memcpy(argv, old_argv, *size * sizeof(*argv));
if (argv) {
if (old_argv)
memcpy(argv, old_argv, *size * sizeof(*argv));
*size = new_size;
}

View File

@@ -1107,26 +1107,26 @@ int renesas_sdhi_probe(struct platform_device *pdev,
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
ret = num_irqs;
goto eirq;
goto edisclk;
}
/* There must be at least one IRQ source */
if (!num_irqs) {
ret = -ENXIO;
goto eirq;
goto edisclk;
}
for (i = 0; i < num_irqs; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
goto eirq;
goto edisclk;
}
ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
goto edisclk;
}
ret = tmio_mmc_host_probe(host);
@@ -1138,8 +1138,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
return ret;
eirq:
tmio_mmc_host_remove(host);
edisclk:
renesas_sdhi_clk_disable(host);
efree:

View File

@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
struct tc_taprio_qopt_offload *taprio;
struct ocelot_port *ocelot_port;
struct timespec64 base_ts;
int port;
int i, port;
u32 val;
mutex_lock(&ocelot->fwd_domain_lock);
@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
QSYS_PARAM_CFG_REG_3);
for (i = 0; i < taprio->num_entries; i++)
vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
QSYS_TAS_PARAM_CFG_CTRL);

View File

@@ -172,48 +172,57 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
return padev;
}
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
int err = 0;
if (!*pd_ptr)
return;
mutex_lock(&pf->config_lock);
padev = pf->vfs[cf->vf_id].padev;
if (padev) {
pds_client_unregister(pf, padev->client_id);
auxiliary_device_delete(&padev->aux_dev);
auxiliary_device_uninit(&padev->aux_dev);
padev->client_id = 0;
}
pf->vfs[cf->vf_id].padev = NULL;
padev = *pd_ptr;
pds_client_unregister(pf, padev->client_id);
auxiliary_device_delete(&padev->aux_dev);
auxiliary_device_uninit(&padev->aux_dev);
*pd_ptr = NULL;
mutex_unlock(&pf->config_lock);
return err;
}
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
enum pds_core_vif_types vt,
struct pds_auxiliary_dev **pd_ptr)
{
struct pds_auxiliary_dev *padev;
enum pds_core_vif_types vt;
char devname[PDS_DEVNAME_LEN];
unsigned long mask;
u16 vt_support;
int client_id;
int err = 0;
if (!cf)
return -ENODEV;
if (vt >= PDS_DEV_TYPE_MAX)
return -EINVAL;
mutex_lock(&pf->config_lock);
/* We only support vDPA so far, so it is the only one to
* be verified that it is available in the Core device and
* enabled in the devlink param. In the future this might
* become a loop for several VIF types.
*/
mask = BIT_ULL(PDSC_S_FW_DEAD) |
BIT_ULL(PDSC_S_STOPPING_DRIVER);
if (cf->state & mask) {
dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
__func__, cf->state);
err = -ENXIO;
goto out_unlock;
}
/* Verify that the type is supported and enabled. It is not
* an error if there is no auxbus device support for this
* VF, it just means something else needs to happen with it.
*/
vt = PDS_DEV_TYPE_VDPA;
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
if (!(vt_support &&
pf->viftype_status[vt].supported &&
@@ -239,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
err = PTR_ERR(padev);
goto out_unlock;
}
pf->vfs[cf->vf_id].padev = padev;
*pd_ptr = padev;
out_unlock:
mutex_unlock(&pf->config_lock);

View File

@@ -300,8 +300,11 @@ void pdsc_health_thread(struct work_struct *work);
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void pdsc_notify(unsigned long event, void *data);
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
enum pds_core_vif_types vt,
struct pds_auxiliary_dev **pd_ptr);
void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
struct pds_auxiliary_dev **pd_ptr);
void pdsc_process_adminq(struct pdsc_qcq *qcq);
void pdsc_work_thread(struct work_struct *work);

View File

@@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
return -ERANGE;
case PDS_RC_BAD_ADDR:
return -EFAULT;
case PDS_RC_BAD_PCI:
return -ENXIO;
case PDS_RC_EOPCODE:
case PDS_RC_EINTR:
case PDS_RC_DEV_CMD:
@@ -65,7 +67,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
/* Firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
*/
return (pdsc->fw_status != 0xff) &&
return (pdsc->fw_status != PDS_RC_BAD_PCI) &&
(pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
}
@@ -131,6 +133,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
unsigned long max_wait;
unsigned long duration;
int timeout = 0;
bool running;
int done = 0;
int err = 0;
int status;
@@ -139,6 +142,10 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
max_wait = start_time + (max_seconds * HZ);
while (!done && !timeout) {
running = pdsc_is_fw_running(pdsc);
if (!running)
break;
done = pdsc_devcmd_done(pdsc);
if (done)
break;
@@ -155,7 +162,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
opcode, pdsc_devcmd_str(opcode), duration / HZ);
if (!done || timeout) {
if ((!done || timeout) && running) {
dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
opcode, pdsc_devcmd_str(opcode), done, timeout,
max_seconds);

View File

@@ -55,8 +55,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
struct pdsc *vf = pdsc->vfs[vf_id].vf;
err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
pdsc_auxbus_dev_del(vf, pdsc);
if (ctx->val.vbool)
err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
&pdsc->vfs[vf_id].padev);
else
pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
}
return err;

View File

@@ -189,7 +189,8 @@ static int pdsc_init_vf(struct pdsc *vf)
devl_unlock(dl);
pf->vfs[vf->vf_id].vf = vf;
err = pdsc_auxbus_dev_add(vf, pf);
err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
&pf->vfs[vf->vf_id].padev);
if (err) {
devl_lock(dl);
devl_unregister(dl);
@@ -415,7 +416,7 @@ static void pdsc_remove(struct pci_dev *pdev)
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf)) {
pdsc_auxbus_dev_del(pdsc, pf);
pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
pf->vfs[pdsc->vf_id].vf = NULL;
}
} else {
@@ -475,6 +476,15 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
if (pdev->is_virtfn) {
struct pdsc *pf;
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
pdsc_auxbus_dev_del(pdsc, pf,
&pf->vfs[pdsc->vf_id].padev);
}
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -510,6 +520,15 @@ static void pdsc_reset_done(struct pci_dev *pdev)
pdsc_fw_up(pdsc);
pdsc_restart_health_thread(pdsc);
if (pdev->is_virtfn) {
struct pdsc *pf;
pf = pdsc_get_pf_struct(pdsc->pdev);
if (!IS_ERR(pf))
pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
&pf->vfs[pdsc->vf_id].padev);
}
}
static const struct pci_error_handlers pdsc_err_handler = {

View File

@@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
}
/* Set up the header page info */
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
if (pdata->netdev->features & NETIF_F_RXCSUM) {
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
XGBE_SKB_ALLOC_SIZE);
} else {
xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
pdata->rx_buf_size);
}
/* Set up the buffer page info */
xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,

View File

@@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
{
unsigned int i;
for (i = 0; i < pdata->channel_count; i++) {
if (!pdata->channel[i]->rx_ring)
break;
XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
}
}
static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
unsigned int index, unsigned int val)
{
@@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
xgbe_config_sph_mode(pdata);
xgbe_config_rss(pdata);
if (pdata->netdev->features & NETIF_F_RXCSUM) {
xgbe_config_sph_mode(pdata);
xgbe_config_rss(pdata);
}
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->disable_vxlan = xgbe_disable_vxlan;
hw_if->set_vxlan_id = xgbe_set_vxlan_id;
/* For Split Header*/
hw_if->enable_sph = xgbe_config_sph_mode;
hw_if->disable_sph = xgbe_disable_sph_mode;
DBGPR("<--xgbe_init_function_ptrs\n");
}

View File

@@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev,
if (ret)
return ret;
if ((features & NETIF_F_RXCSUM) && !rxcsum)
if ((features & NETIF_F_RXCSUM) && !rxcsum) {
hw_if->enable_sph(pdata);
hw_if->enable_vxlan(pdata);
hw_if->enable_rx_csum(pdata);
else if (!(features & NETIF_F_RXCSUM) && rxcsum)
schedule_work(&pdata->restart_work);
} else if (!(features & NETIF_F_RXCSUM) && rxcsum) {
hw_if->disable_sph(pdata);
hw_if->disable_vxlan(pdata);
hw_if->disable_rx_csum(pdata);
schedule_work(&pdata->restart_work);
}
if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
hw_if->enable_rx_vlan_stripping(pdata);

View File

@@ -865,6 +865,10 @@ struct xgbe_hw_if {
void (*enable_vxlan)(struct xgbe_prv_data *);
void (*disable_vxlan)(struct xgbe_prv_data *);
void (*set_vxlan_id)(struct xgbe_prv_data *);
/* For Split Header */
void (*enable_sph)(struct xgbe_prv_data *pdata);
void (*disable_sph)(struct xgbe_prv_data *pdata);
};
/* This structure represents implementation specific routines for an

View File

@@ -66,20 +66,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
}
}
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
memcpy(info->dest_buf + off, dma_buf, len);
} else {
rc = -ENOBUFS;
break;
}
}
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
info->dest_buf_size += len;
if (info->dest_buf) {
if ((info->seg_start + off + len) <=
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
u16 copylen = min_t(u16, len,
info->dest_buf_size - off);
memcpy(info->dest_buf + off, dma_buf, copylen);
if (copylen < len)
break;
} else {
rc = -ENOBUFS;
if (cmn_req->req_type ==
cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
kfree(info->dest_buf);
info->dest_buf = NULL;
}
break;
}
}
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
break;

View File

@@ -1393,6 +1393,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
return reg_len;
}
#define BNXT_PCIE_32B_ENTRY(start, end) \
{ offsetof(struct pcie_ctx_hw_stats, start), \
offsetof(struct pcie_ctx_hw_stats, end) }
static const struct {
u16 start;
u16 end;
} bnxt_pcie_32b_entries[] = {
BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
};
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
@@ -1424,12 +1435,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
rc = hwrm_req_send(bp, req);
if (!rc) {
__le64 *src = (__le64 *)hw_pcie_stats;
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
int i;
u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
u8 *src = (u8 *)hw_pcie_stats;
int i, j;
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
dst[i] = le64_to_cpu(src[i]);
for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
if (i >= bnxt_pcie_32b_entries[j].start &&
i <= bnxt_pcie_32b_entries[j].end) {
u32 *dst32 = (u32 *)(dst + i);
*dst32 = le32_to_cpu(*(__le32 *)(src + i));
i += 4;
if (i > bnxt_pcie_32b_entries[j].end &&
j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
j++;
} else {
u64 *dst64 = (u64 *)(dst + i);
*dst64 = le64_to_cpu(*(__le64 *)(src + i));
i += 8;
}
}
}
hwrm_req_drop(bp, req);
}

View File

@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
np->led_mode = psrom->led_mode;
np->led_mode = le16_to_cpu(psrom->led_mode);
return 0;
}

View File

@@ -335,7 +335,7 @@ typedef struct t_SROM {
u16 sub_system_id; /* 0x06 */
u16 pci_base_1; /* 0x08 (IP1000A only) */
u16 pci_base_2; /* 0x0a (IP1000A only) */
u16 led_mode; /* 0x0c (IP1000A only) */
__le16 led_mode; /* 0x0c (IP1000A only) */
u16 reserved1[9]; /* 0x0e-0x1f */
u8 mac_addr[6]; /* 0x20-0x25 */
u8 reserved2[10]; /* 0x26-0x2f */

View File

@@ -695,7 +695,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
txq->bd.cur = bdp;
/* Trigger transmission start */
writel(0, txq->bd.reg_desc_active);
if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(txq->bd.reg_desc_active) ||
!readl(txq->bd.reg_desc_active) ||
!readl(txq->bd.reg_desc_active) ||
!readl(txq->bd.reg_desc_active))
writel(0, txq->bd.reg_desc_active);
return 0;
}

View File

@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tm_qset",
.cmd = HNAE3_DBG_CMD_TM_QSET,
.dentry = HNS3_DBG_DENTRY_TM,
.buf_len = HNS3_DBG_READ_LEN,
.buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{

View File

@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
writel(mask_en, tqp_vector->mask_addr);
}
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
{
napi_enable(&tqp_vector->napi);
enable_irq(tqp_vector->vector_irq);
/* enable vector */
hns3_mask_vector_irq(tqp_vector, 1);
}
static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
{
/* disable vector */
hns3_mask_vector_irq(tqp_vector, 0);
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
cancel_work_sync(&tqp_vector->rx_group.dim.work);
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
return 0;
}
static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u16 i;
for (i = 0; i < priv->vector_num; i++)
hns3_irq_enable(&priv->tqp_vector[i]);
for (i = 0; i < priv->vector_num; i++)
hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_enable(h->kinfo.tqp[i]);
}
static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
u16 i;
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_disable(h->kinfo.tqp[i]);
for (i = 0; i < priv->vector_num; i++)
hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
for (i = 0; i < priv->vector_num; i++)
hns3_irq_disable(&priv->tqp_vector[i]);
}
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
int i, j;
int ret;
ret = hns3_nic_reset_all_ring(h);
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
/* enable the vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
/* enable rcb */
for (j = 0; j < h->kinfo.num_tqps; j++)
hns3_tqp_enable(h->kinfo.tqp[j]);
hns3_enable_irqs_and_tqps(netdev);
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret) {
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
while (j--)
hns3_tqp_disable(h->kinfo.tqp[j]);
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
hns3_disable_irqs_and_tqps(netdev);
}
return ret;
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
/* disable rcb */
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_disable(h->kinfo.tqp[i]);
hns3_disable_irqs_and_tqps(netdev);
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
@@ -5870,8 +5877,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int i;
if (!if_running)
return;
@@ -5882,11 +5887,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
netif_carrier_off(ndev);
netif_tx_disable(ndev);
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_disable(h->kinfo.tqp[i]);
hns3_disable_irqs_and_tqps(ndev);
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
* during reset process, because driver may not be able
@@ -5902,7 +5903,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
int i;
if (!if_running)
return;
@@ -5918,11 +5918,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
for (i = 0; i < h->kinfo.num_tqps; i++)
hns3_tqp_enable(h->kinfo.tqp[i]);
hns3_enable_irqs_and_tqps(ndev);
netif_tx_wake_all_queues(ndev);

View File

@@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.settime64 = hclge_ptp_settime;
ptp->info.n_alarm = 0;
spin_lock_init(&ptp->lock);
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
if (IS_ERR(ptp->clock)) {
dev_err(&hdev->pdev->dev,
@@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
return -ENODEV;
}
spin_lock_init(&ptp->lock);
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
return 0;
}

View File

@@ -1257,9 +1257,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
rtnl_unlock();
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@@ -1268,6 +1267,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret;
ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
if (ret)
return ret;
hdev->rxvtag_strip_en = enable;
return 0;
}
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
{
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
@@ -2143,12 +2155,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
tc_valid, tc_size);
}
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
bool rxvtag_strip_en)
{
struct hnae3_handle *nic = &hdev->nic;
int ret;
ret = hclgevf_en_hw_strip_rxvtag(nic, true);
ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to enable rx vlan offload, ret = %d\n", ret);
@@ -2815,7 +2828,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
ret = hclgevf_init_vlan_config(hdev);
ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);
@@ -2928,7 +2941,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
ret = hclgevf_init_vlan_config(hdev);
ret = hclgevf_init_vlan_config(hdev, true);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to initialize VLAN config\n", ret);

View File

@@ -253,6 +253,7 @@ struct hclgevf_dev {
int *vector_irq;
bool gro_en;
bool rxvtag_strip_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];

View File

@@ -1824,6 +1824,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err_exit;
}
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||

View File

@@ -1237,6 +1237,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* reset the tstamp_config */
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
mutex_lock(&adapter->ptm_lock);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) {
@@ -1255,7 +1257,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
if (!igc_is_crosststamp_supported(adapter))
break;
mutex_lock(&adapter->ptm_lock);
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
@@ -1279,7 +1280,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
igc_ptm_reset(hw);
mutex_unlock(&adapter->ptm_lock);
break;
default:
/* No work to do. */
@@ -1296,5 +1296,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
mutex_unlock(&adapter->ptm_lock);
wrfl();
}

View File

@@ -917,7 +917,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
miss_cnt);
rtnl_lock();
if (netif_running(oct->netdev))
octep_stop(oct->netdev);
dev_close(oct->netdev);
rtnl_unlock();
}

View File

@@ -2180,14 +2180,18 @@ skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
if (unlikely(dma_addr == DMA_MAPPING_ERROR))
addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
rxd->rxd2);
else
addr64 = RX_DMA_PREP_ADDR64(dma_addr);
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
ring->calc_idx = idx;
done++;

View File

@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
unsigned long flags;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
netif_wake_queue(ndev);
if (napi_complete(napi)) {
spin_lock(&priv->lock);
spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, false, true);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
}
return 0;
@@ -1341,16 +1342,16 @@ push_new_skb:
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
unsigned long flags;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
spin_lock(&priv->lock);
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&priv->lock, flags);
mtk_star_enable_dma_irq(priv, true, false);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->lock, flags);
}
return work_done;

View File

@@ -3499,7 +3499,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
err = mlx5_rdma_enable_roce(esw->dev);
if (err)
goto err_roce;
err = mlx5_esw_host_number_init(esw);
if (err)
@@ -3560,6 +3562,7 @@ err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
err_roce:
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}

View File

@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
u8 mac[ETH_ALEN] = {};
union ib_gid gid;
u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
mlx5_nic_vport_disable_roce(dev);
}
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
return;
return 0;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
return;
return err;
}
err = mlx5_rdma_add_roce_addr(dev);
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
goto del_roce_addr;
}
return;
return err;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
return err;
}

View File

@@ -8,12 +8,12 @@
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */

View File

@@ -1949,6 +1949,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
if (nr_frags <= 0) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
tx->frame_last = tx->frame_first;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
@@ -2018,6 +2019,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
tx->frame_first = 0;
tx->frame_data0 = 0;
tx->frame_tail = 0;
tx->frame_last = 0;
return -ENOMEM;
}
@@ -2058,16 +2060,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
TX_DESC_DATA0_DTYPE_DATA_) {
tx->frame_data0 |= TX_DESC_DATA0_LS_;
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
tx->frame_last = tx->frame_tail;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
buffer_info = &tx->buffer_info[tx->frame_last];
buffer_info->skb = skb;
if (time_stamp)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;

View File

@@ -974,6 +974,7 @@ struct lan743x_tx {
u32 frame_first;
u32 frame_data0;
u32 frame_tail;
u32 frame_last;
struct lan743x_tx_buffer_info *buffer_info;

View File

@@ -453,9 +453,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
return VLAN_N_VID - bridge_num - 1;
}
/**
* ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
*
* @ocelot: Switch private data structure
* @port: Index of ingress port
*
* IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
* component conformance" suggest that a C-VLAN component should only recognize
* and filter on C-Tags, and an S-VLAN component should only recognize and
* process based on C-Tags.
*
* In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
* components are largely represented by a bridge with vlan_protocol 802.1Q,
* and S-VLAN components by a bridge with vlan_protocol 802.1ad.
*
* Currently the driver only offloads vlan_protocol 802.1Q, but the hardware
* design is non-conformant, because the switch assigns each frame to a VLAN
* based on an entirely different question, as detailed in figure "Basic VLAN
* Classification Flow" from its manual and reproduced below.
*
* Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
* if VLAN_AWARE_ENA[port] and frame has outer tag then:
* if VLAN_INNER_TAG_ENA[port] and frame has inner tag then:
* TAG_TYPE = (Frame.InnerTPID <> 0x8100)
* Set PCP, DEI, VID to values from inner VLAN header
* else:
* TAG_TYPE = (Frame.OuterTPID <> 0x8100)
* Set PCP, DEI, VID to values from outer VLAN header
* if VID == 0 then:
* VID = VLAN_CFG.VLAN_VID
*
* Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN
* "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1
* (if 802.1ad). It will classify based on whichever of the tags is "outer", no
* matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]).
*
* In the VLAN Table, the TAG_TYPE information is not accessible - just the
* classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
* C-VLAN X, and S-VLAN X.
*
* Whereas the Linux bridge behavior is to only filter on frames with a TPID
* equal to the vlan_protocol, and treat everything else as VLAN-untagged.
*
* Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5,
* received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame
* should be treated as 802.1Q-untagged, and classified to the PVID of that
* bridge port. Not to VID=3, and not to VID=5.
*
* The VCAP IS1 TCAM has everything we need to overwrite the choices made in
* the basic VLAN classification pipeline: it can match on TAG_TYPE in the key,
* and it can modify the classified VID in the action. Thus, for each port
* under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to
* match on 802.1ad tagged frames and modify their classified VID to the 802.1Q
* PVID of the port. This effectively makes it appear to the outside world as
* if those packets were processed as VLAN-untagged.
*
* The rule needs to be updated each time the bridge PVID changes, and needs
* to be deleted if the bridge PVID is deleted, or if the port becomes
* VLAN-unaware.
*/
static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port)
{
unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port);
struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
const struct ocelot_bridge_vlan *pvid_vlan;
struct ocelot_vcap_filter *filter;
int err, val, pcp, dei;
bool vid_replace_ena;
u16 vid;
pvid_vlan = ocelot_port->pvid_vlan;
vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan;
filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie,
false);
if (!vid_replace_ena) {
/* If the reclassification filter doesn't need to exist, delete
* it if it was previously installed, and exit doing nothing
* otherwise.
*/
if (filter)
return ocelot_vcap_filter_del(ocelot, filter);
return 0;
}
/* The reclassification rule must apply. See if it already exists
* or if it must be created.
*/
/* Treating as VLAN-untagged means using as classified VID equal to
* the bridge PVID, and PCP/DEI set to the port default QoS values.
*/
vid = pvid_vlan->vid;
val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL);
if (filter) {
bool changed = false;
/* Filter exists, just update it */
if (filter->action.vid != vid) {
filter->action.vid = vid;
changed = true;
}
if (filter->action.pcp != pcp) {
filter->action.pcp = pcp;
changed = true;
}
if (filter->action.dei != dei) {
filter->action.dei = dei;
changed = true;
}
if (!changed)
return 0;
return ocelot_vcap_filter_replace(ocelot, filter);
}
/* Filter doesn't exist, create it */
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
filter->key_type = OCELOT_VCAP_KEY_ANY;
filter->ingress_port_mask = BIT(port);
filter->vlan.tpid = OCELOT_VCAP_BIT_1;
filter->prio = 1;
filter->id.cookie = cookie;
filter->id.tc_offload = false;
filter->block_id = VCAP_IS1;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
filter->lookup = 0;
filter->action.vid_replace_ena = true;
filter->action.pcp_dei_ena = true;
filter->action.vid = vid;
filter->action.pcp = pcp;
filter->action.dei = dei;
err = ocelot_vcap_filter_add(ocelot, filter, NULL);
if (err)
kfree(filter);
return err;
}
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
const struct ocelot_bridge_vlan *pvid_vlan)
static int ocelot_port_set_pvid(struct ocelot *ocelot, int port,
const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
@@ -475,15 +624,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* happens automatically), but also 802.1p traffic which gets
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
*
* Also, we only support the bridge 802.1Q VLAN protocol, so
* 802.1ad-tagged frames (carrying S-Tags) should be considered
* 802.1Q-untagged, and also dropped.
*/
if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA,
ANA_PORT_DROP_CFG, port);
return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
@@ -631,7 +788,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
if (err)
return err;
ocelot_port_manage_port_tag(ocelot, port);
return 0;
@@ -670,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
@@ -684,9 +845,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
return err;
/* Default ingress vlan classification */
if (pvid)
ocelot_port_set_pvid(ocelot, port,
ocelot_bridge_vlan_find(ocelot, vid));
if (pvid) {
err = ocelot_port_set_pvid(ocelot, port,
ocelot_bridge_vlan_find(ocelot, vid));
if (err)
return err;
} else if (ocelot_port->pvid_vlan &&
ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
err = ocelot_port_set_pvid(ocelot, port, NULL);
if (err)
return err;
}
/* Untagged egress vlan clasification */
ocelot_port_manage_port_tag(ocelot, port);
@@ -712,8 +881,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
if (del_pvid) {
err = ocelot_port_set_pvid(ocelot, port, NULL);
if (err)
return err;
}
/* Egress */
ocelot_port_manage_port_tag(ocelot, port);
@@ -2607,7 +2779,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
ANA_PORT_QOS_CFG,
port);
return 0;
return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);

View File

@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid);
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,

View File

@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -33,7 +34,7 @@
#define CMD_CTR (0x2 << CMD_SHIFT)
#define CMD_MASK GENMASK(15, CMD_SHIFT)
#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
#define DET_CMD_LEN 4
#define DET_SOF_LEN 2
@@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
}
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
unsigned int frame_len)
unsigned int frame_len, bool drop)
{
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
struct spi_transfer *xfer = &mses->spi_xfer;
@@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
__func__, ret);
mse->stats.xfer_err++;
} else if (drop) {
netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
ret = -EINVAL;
} else if (*sof != cpu_to_be16(DET_SOF)) {
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
__func__, *sof);
@@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
struct sk_buff *skb;
unsigned int rxalign;
unsigned int rxlen;
bool drop = false;
__be16 rx = 0;
u16 cmd_resp;
u8 *rxpkt;
@@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
__func__, cmd_resp);
mse->stats.invalid_rts++;
return;
drop = true;
goto drop;
}
net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
@@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
}
rxlen = cmd_resp & LEN_MASK;
if (!rxlen) {
net_dbg_ratelimited("%s: No frame length defined\n", __func__);
if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
rxlen);
mse->stats.invalid_len++;
return;
drop = true;
}
/* In case of a invalid CMD_RTS, the frame must be consumed anyway.
* So assume the maximum possible frame length.
*/
drop:
if (drop)
rxlen = VLAN_ETH_FRAME_LEN;
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
if (!skb)
@@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
* They are copied, but ignored.
*/
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
mse->ndev->stats.rx_errors++;
dev_kfree_skb(skb);
return;
@@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
static int mse102x_net_open(struct net_device *ndev)
{
struct mse102x_net *mse = netdev_priv(ndev);
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
int ret;
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
@@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev)
netif_carrier_on(ndev);
/* The SPI interrupt can stuck in case of pending packet(s).
* So poll for possible packet(s) to re-arm the interrupt.
*/
mutex_lock(&mses->lock);
mse102x_rx_pkt_spi(mse);
mutex_unlock(&mses->lock);
netif_dbg(mse, ifup, ndev, "network device up\n");
return 0;

View File

@@ -17,6 +17,7 @@
#define REG2_LEDACT GENMASK(23, 22)
#define REG2_LEDLINK GENMASK(25, 24)
#define REG2_DIV4SEL BIT(27)
#define REG2_REVERSED BIT(28)
#define REG2_ADCBYPASS BIT(30)
#define REG2_CLKINSEL BIT(31)
#define ETH_REG3 0x4
@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
* The only constraint is that it must match the one in
* drivers/net/phy/meson-gxl.c to properly match the PHY.
*/
writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
priv->regs + ETH_REG2);
/* Enable the internal phy */

View File

@@ -630,16 +630,6 @@ static const struct driver_info zte_rndis_info = {
.tx_fixup = rndis_tx_fixup,
};
static const struct driver_info wwan_rndis_info = {
.description = "Mobile Broadband RNDIS device",
.flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
.bind = rndis_bind,
.unbind = rndis_unbind,
.status = rndis_status,
.rx_fixup = rndis_rx_fixup,
.tx_fixup = rndis_tx_fixup,
};
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
@@ -676,11 +666,9 @@ static const struct usb_device_id products [] = {
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
}, {
/* Mobile Broadband Modem, seen in Novatel Verizon USB730L and
* Telit FN990A (RNDIS)
*/
/* Novatel Verizon USB730L */
USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
.driver_info = (unsigned long)&wwan_rndis_info,
.driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};

View File

@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
* default dst remote_ip previously added for this vni
*/
if (!vxlan_addr_any(&vninode->remote_ip) ||
!vxlan_addr_any(&dst->remote_ip))
!vxlan_addr_any(&dst->remote_ip)) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
vninode->vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
__vxlan_fdb_delete(vxlan, all_zeros_mac,
(vxlan_addr_any(&vninode->remote_ip) ?
dst->remote_ip : vninode->remote_ip),
@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
vninode->vni, vninode->vni,
dst->remote_ifindex,
true);
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
if (vxlan->dev->flags & IFF_UP) {
if (vxlan_addr_multicast(&vninode->remote_ip) &&

View File

@@ -903,14 +903,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
}
/* 1) Prepare USB boot loader for runtime image */
brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
if (err)
goto fail;
rdlstate = le32_to_cpu(state.state);
rdlbytes = le32_to_cpu(state.bytes);
/* 2) Check we are in the Waiting state */
if (rdlstate != DL_WAITING) {
brcmf_err("Failed to DL_START\n");
brcmf_err("Invalid DL state: %u\n", rdlstate);
err = -EINVAL;
goto fail;
}

View File

@@ -103,7 +103,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
void plfxlc_mac_release(struct plfxlc_mac *mac)
{
plfxlc_chip_release(&mac->chip);
lockdep_assert_held(&mac->lock);
}
int plfxlc_op_start(struct ieee80211_hw *hw)

View File

@@ -3377,7 +3377,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
if (!nvme_try_sched_reset(&dev->ctrl))
if (nvme_try_sched_reset(&dev->ctrl))
nvme_unquiesce_io_queues(&dev->ctrl);
return PCI_ERS_RESULT_RECOVERED;
}

View File

@@ -1710,7 +1710,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
cancel_work_sync(&queue->io_work);
}
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1724,6 +1724,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
mutex_unlock(&queue->queue_lock);
}
static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
int timeout = 100;
while (timeout > 0) {
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
!sk_wmem_alloc_get(queue->sock->sk))
return;
msleep(2);
timeout -= 2;
}
dev_warn(nctrl->device,
"qid %d: timeout draining sock wmem allocation expired\n",
qid);
}
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
{
nvme_tcp_stop_queue_nowait(nctrl, qid);
nvme_tcp_wait_queue(nctrl, qid);
}
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
{
write_lock_bh(&queue->sock->sk->sk_callback_lock);
@@ -1790,7 +1815,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
int i;
for (i = 1; i < ctrl->queue_count; i++)
nvme_tcp_stop_queue(ctrl, i);
nvme_tcp_stop_queue_nowait(ctrl, i);
for (i = 1; i < ctrl->queue_count; i++)
nvme_tcp_wait_queue(ctrl, i);
}
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,

View File

@@ -1283,7 +1283,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
case IMX8MQ_EP:
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
imx6_pcie->controller_id = 1;
fallthrough;
case IMX7D:
imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
"pciephy");
if (IS_ERR(imx6_pcie->pciephy_reset)) {

View File

@@ -823,10 +823,9 @@ static void amd_pmc_s2idle_check(void)
struct smu_metrics table;
int rc;
/* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */
if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) &&
table.s0i3_last_entry_status)
usleep_range(10000, 20000);
/* Avoid triggering OVP */
if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status)
msleep(2500);
/* Dump the IdleMask before we add to the STB */
amd_pmc_idlemask_read(pdev, pdev->dev, NULL);

View File

@@ -121,15 +121,13 @@ static int uncore_event_cpu_online(unsigned int cpu)
{
struct uncore_data *data;
int target;
int ret;
/* Check if there is an online cpu in the package for uncore MSR */
target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
if (target < nr_cpu_ids)
return 0;
/* Use this CPU on this die as a control CPU */
cpumask_set_cpu(cpu, &uncore_cpu_mask);
data = uncore_get_instance(cpu);
if (!data)
return 0;
@@ -138,7 +136,14 @@ static int uncore_event_cpu_online(unsigned int cpu)
data->die_id = topology_die_id(cpu);
data->domain_id = UNCORE_DOMAIN_ID_INVALID;
return uncore_freq_add_entry(data, cpu);
ret = uncore_freq_add_entry(data, cpu);
if (ret)
return ret;
/* Use this CPU on this die as a control CPU */
cpumask_set_cpu(cpu, &uncore_cpu_mask);
return 0;
}
static int uncore_event_cpu_offline(unsigned int cpu)

View File

@@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
u32 inactive_cycles;
u8 cs_state;
if (setup->unit != SPI_DELAY_UNIT_SCK ||
hold->unit != SPI_DELAY_UNIT_SCK ||
inactive->unit != SPI_DELAY_UNIT_SCK) {
if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) ||
(hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) ||
(inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);

View File

@@ -693,7 +693,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
"command-ring",
xhci->debugfs_root);
xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring,
xhci_debugfs_create_ring_dir(xhci, &xhci->interrupters[0]->event_ring,
"event-ring",
xhci->debugfs_root);

View File

@@ -1880,9 +1880,10 @@ int xhci_bus_resume(struct usb_hcd *hcd)
int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
u32 portsc;
struct xhci_hub *rhub;
struct xhci_port **ports;
bool disabled_irq = false;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
@@ -1898,17 +1899,20 @@ int xhci_bus_resume(struct usb_hcd *hcd)
return -ESHUTDOWN;
}
/* delay the irqs */
temp = readl(&xhci->op_regs->command);
temp &= ~CMD_EIE;
writel(temp, &xhci->op_regs->command);
/* bus specific resume for ports we suspended at bus_suspend */
if (hcd->speed >= HCD_USB3)
if (hcd->speed >= HCD_USB3) {
next_state = XDEV_U0;
else
} else {
next_state = XDEV_RESUME;
if (bus_state->bus_suspended) {
/*
* prevent port event interrupts from interfering
* with usb2 port resume process
*/
xhci_disable_interrupter(xhci->interrupters[0]);
disabled_irq = true;
}
}
port_index = max_ports;
while (port_index--) {
portsc = readl(ports[port_index]->addr);
@@ -1977,11 +1981,9 @@ int xhci_bus_resume(struct usb_hcd *hcd)
(void) readl(&xhci->op_regs->command);
bus_state->next_statechange = jiffies + msecs_to_jiffies(5);
/* re-enable irqs */
temp = readl(&xhci->op_regs->command);
temp |= CMD_EIE;
writel(temp, &xhci->op_regs->command);
temp = readl(&xhci->op_regs->command);
/* re-enable interrupter */
if (disabled_irq)
xhci_enable_interrupter(xhci->interrupters[0]);
spin_unlock_irqrestore(&xhci->lock, flags);
return 0;

View File

@@ -29,6 +29,7 @@
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state,
unsigned int max_packet,
unsigned int num,
gfp_t flags)
{
struct xhci_segment *seg;
@@ -60,6 +61,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
}
seg->num = num;
seg->dma = dma;
seg->next = NULL;
@@ -316,6 +318,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
@@ -324,6 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_segment *prev;
unsigned int num = 0;
bool chain_links;
/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
@@ -331,16 +335,17 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)));
prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
if (!prev)
return -ENOMEM;
num_segs--;
num++;
*first = prev;
while (num_segs > 0) {
while (num < num_segs) {
struct xhci_segment *next;
next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
flags);
if (!next) {
prev = *first;
while (prev) {
@@ -353,7 +358,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
xhci_link_segments(prev, next, type, chain_links);
prev = next;
num_segs--;
num++;
}
xhci_link_segments(prev, *first, type, chain_links);
*last = prev;
@@ -1799,23 +1804,13 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
}
static void
xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t erst_size;
u64 tmp64;
u32 tmp;
if (!ir)
return;
erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
if (ir->erst.entries)
dma_free_coherent(dev, erst_size,
ir->erst.entries,
ir->erst.erst_dma_addr);
ir->erst.entries = NULL;
/*
* Clean out interrupter registers except ERSTBA. Clearing either the
* low or high 32 bits of ERSTBA immediately causes the controller to
@@ -1826,19 +1821,60 @@ xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
tmp &= ERST_SIZE_MASK;
writel(tmp, &ir->ir_set->erst_size);
tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
tmp64 &= (u64) ERST_PTR_MASK;
xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue);
}
}
/* free interrrupter event ring */
static void
xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
size_t erst_size;
if (!ir)
return;
erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries;
if (ir->erst.entries)
dma_free_coherent(dev, erst_size,
ir->erst.entries,
ir->erst.erst_dma_addr);
ir->erst.entries = NULL;
/* free interrupter event ring */
if (ir->event_ring)
xhci_ring_free(xhci, ir->event_ring);
ir->event_ring = NULL;
kfree(ir);
}
void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
unsigned int intr_num;
spin_lock_irq(&xhci->lock);
/* interrupter 0 is primary interrupter, don't touch it */
if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) {
xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n");
spin_unlock_irq(&xhci->lock);
return;
}
intr_num = ir->intr_num;
xhci_remove_interrupter(xhci, ir);
xhci->interrupters[intr_num] = NULL;
spin_unlock_irq(&xhci->lock);
xhci_free_interrupter(xhci, ir);
}
EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
@@ -1846,9 +1882,14 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
xhci_free_interrupter(xhci, xhci->interrupter);
xhci->interrupter = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
for (i = 0; xhci->interrupters && i < xhci->max_interrupters; i++) {
if (xhci->interrupters[i]) {
xhci_remove_interrupter(xhci, xhci->interrupters[i]);
xhci_free_interrupter(xhci, xhci->interrupters[i]);
xhci->interrupters[i] = NULL;
}
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters");
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
@@ -1918,6 +1959,7 @@ no_bw:
for (i = 0; i < xhci->num_port_caps; i++)
kfree(xhci->port_caps[i].psi);
kfree(xhci->port_caps);
kfree(xhci->interrupters);
xhci->num_port_caps = 0;
xhci->usb2_rhub.ports = NULL;
@@ -1926,6 +1968,7 @@ no_bw:
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
xhci->port_caps = NULL;
xhci->interrupters = NULL;
xhci->page_size = 0;
xhci->page_shift = 0;
@@ -1935,7 +1978,6 @@ no_bw:
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u64 temp;
dma_addr_t deq;
deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
@@ -1943,15 +1985,12 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter
if (!deq)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
temp &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK),
&ir->ir_set->erst_dequeue);
}
@@ -2236,18 +2275,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
}
static struct xhci_interrupter *
xhci_alloc_interrupter(struct xhci_hcd *xhci, gfp_t flags)
xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
unsigned int max_segs;
int ret;
if (!segs)
segs = ERST_DEFAULT_SEGS;
max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2));
segs = min(segs, max_segs);
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
0, flags);
ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
@@ -2278,12 +2323,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
return -EINVAL;
}
if (xhci->interrupters[intr_num]) {
xhci_warn(xhci, "Interrupter %d\n already set up", intr_num);
return -EINVAL;
}
xhci->interrupters[intr_num] = ir;
ir->intr_num = intr_num;
ir->ir_set = &xhci->run_regs->ir_set[intr_num];
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
erst_size &= ERST_SIZE_MASK;
erst_size |= ERST_NUM_SEGS;
erst_size |= ir->event_ring->num_segs;
writel(erst_size, &ir->ir_set->erst_size);
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
@@ -2300,10 +2352,58 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
return 0;
}
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
u32 imod_interval)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
unsigned int i;
int err = -ENOSPC;
if (!xhci->interrupters || xhci->max_interrupters <= 1)
return NULL;
ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
if (!ir)
return NULL;
spin_lock_irq(&xhci->lock);
/* Find available secondary interrupter, interrupter 0 is reserved for primary */
for (i = 1; i < xhci->max_interrupters; i++) {
if (xhci->interrupters[i] == NULL) {
err = xhci_add_interrupter(xhci, ir, i);
break;
}
}
spin_unlock_irq(&xhci->lock);
if (err) {
xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n",
xhci->max_interrupters);
xhci_free_interrupter(xhci, ir);
return NULL;
}
err = xhci_set_interrupter_moderation(ir, imod_interval);
if (err)
xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n",
i, imod_interval);
xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n",
i, xhci->max_interrupters);
return ir;
}
EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct xhci_interrupter *ir;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
dma_addr_t dma;
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
@@ -2428,11 +2528,14 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Allocate and set up primary interrupter 0 with an event ring. */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating primary event ring");
xhci->interrupter = xhci_alloc_interrupter(xhci, flags);
if (!xhci->interrupter)
xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters),
flags, dev_to_node(dev));
ir = xhci_alloc_interrupter(xhci, 0, flags);
if (!ir)
goto fail;
if (xhci_add_interrupter(xhci, xhci->interrupter, 0))
if (xhci_add_interrupter(xhci, ir, 0))
goto fail;
xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;

View File

@@ -3167,7 +3167,7 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
return;
/* Update HC event ring dequeue pointer */
temp_64 &= ERST_DESI_MASK;
temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
}
@@ -3225,7 +3225,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
writel(status, &xhci->op_regs->status);
/* This is the handler of the primary interrupter */
ir = xhci->interrupter;
ir = xhci->interrupters[0];
if (!hcd->msi_enabled) {
u32 irq_pending;
irq_pending = readl(&ir->ir_set->irq_pending);

View File

@@ -297,7 +297,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
xhci_info(xhci, "Fault detected\n");
}
static int xhci_enable_interrupter(struct xhci_interrupter *ir)
int xhci_enable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
@@ -310,7 +310,7 @@ static int xhci_enable_interrupter(struct xhci_interrupter *ir)
return 0;
}
static int xhci_disable_interrupter(struct xhci_interrupter *ir)
int xhci_disable_interrupter(struct xhci_interrupter *ir)
{
u32 iman;
@@ -323,6 +323,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
return 0;
}
/* interrupt moderation interval imod_interval in nanoseconds */
int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
u32 imod_interval)
{
u32 imod;
if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
return -EINVAL;
imod = readl(&ir->ir_set->irq_control);
imod &= ~ER_IRQ_INTERVAL_MASK;
imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
writel(imod, &ir->ir_set->irq_control);
return 0;
}
static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
@@ -457,7 +474,7 @@ static int xhci_init(struct usb_hcd *hcd)
static int xhci_run_finished(struct xhci_hcd *xhci)
{
struct xhci_interrupter *ir = xhci->interrupter;
struct xhci_interrupter *ir = xhci->interrupters[0];
unsigned long flags;
u32 temp;
@@ -505,11 +522,10 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
*/
int xhci_run(struct usb_hcd *hcd)
{
u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir = xhci->interrupter;
struct xhci_interrupter *ir = xhci->interrupters[0];
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
*/
@@ -525,12 +541,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set the interrupt modulation register");
temp = readl(&ir->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
writel(temp, &ir->ir_set->irq_control);
xhci_set_interrupter_moderation(ir, xhci->imod_interval);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -573,7 +584,7 @@ void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir = xhci->interrupter;
struct xhci_interrupter *ir = xhci->interrupters[0];
mutex_lock(&xhci->mutex);
@@ -669,36 +680,51 @@ EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
struct xhci_interrupter *ir = xhci->interrupter;
struct xhci_interrupter *ir;
unsigned int i;
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
if (!ir)
return;
/* save both primary and all secondary interrupters */
/* fixme, shold we lock to prevent race with remove secondary interrupter? */
for (i = 0; i < xhci->max_interrupters; i++) {
ir = xhci->interrupters[i];
if (!ir)
continue;
ir->s3_erst_size = readl(&ir->ir_set->erst_size);
ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
ir->s3_irq_control = readl(&ir->ir_set->irq_control);
ir->s3_erst_size = readl(&ir->ir_set->erst_size);
ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
ir->s3_irq_control = readl(&ir->ir_set->irq_control);
}
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
struct xhci_interrupter *ir = xhci->interrupter;
struct xhci_interrupter *ir;
unsigned int i;
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
writel(ir->s3_erst_size, &ir->ir_set->erst_size);
xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
writel(ir->s3_irq_control, &ir->ir_set->irq_control);
/* FIXME should we lock to protect against freeing of interrupters */
for (i = 0; i < xhci->max_interrupters; i++) {
ir = xhci->interrupters[i];
if (!ir)
continue;
writel(ir->s3_erst_size, &ir->ir_set->erst_size);
xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
writel(ir->s3_irq_control, &ir->ir_set->irq_control);
}
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -1061,7 +1087,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
xhci_disable_interrupter(xhci->interrupter);
xhci_disable_interrupter(xhci->interrupters[0]);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);

View File

@@ -1293,6 +1293,7 @@ struct xhci_segment {
union xhci_trb *trbs;
/* private to HCD */
struct xhci_segment *next;
unsigned int num;
dma_addr_t dma;
/* Max packet sized bounce buffer for td-fragmant alignment */
dma_addr_t bounce_dma;
@@ -1422,12 +1423,8 @@ struct urb_priv {
struct xhci_td td[];
};
/*
* Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
* meaning 64 ring segments.
* Initial allocated size of the ERST, in number of entries */
#define ERST_NUM_SEGS 1
/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */
#define ERST_DEFAULT_SEGS 2
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
@@ -1552,7 +1549,7 @@ struct xhci_hcd {
struct reset_control *reset;
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_interrupter *interrupter;
struct xhci_interrupter **interrupters;
struct xhci_ring *cmd_ring;
unsigned int cmd_ring_state;
#define CMD_RING_STATE_RUNNING (1 << 0)
@@ -1869,6 +1866,11 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags);
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
u32 imod_interval);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
@@ -1904,6 +1906,10 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
u32 imod_interval);
int xhci_enable_interrupter(struct xhci_interrupter *ir);
int xhci_disable_interrupter(struct xhci_interrupter *ir);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);

View File

@@ -2058,12 +2058,13 @@ next_slot:
/*
* If the found extent starts after requested offset, then
* adjust extent_end to be right before this extent begins
* adjust cur_offset to be right before this extent begins.
*/
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
goto must_cow;
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = found_key.offset;
goto next_slot;
}
/*

View File

@@ -2932,6 +2932,7 @@ replay_again:
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
pc_buf = iov[n_iov-1].iov_base;
}

View File

@@ -546,7 +546,19 @@ int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
retval = -ENOMEM;
goto out;
}
sess->user = user;
if (!sess->user) {
/* First successful authentication */
sess->user = user;
} else {
if (!ksmbd_compare_user(sess->user, user)) {
ksmbd_debug(AUTH, "different user tried to reuse session\n");
retval = -EPERM;
ksmbd_free_user(user);
goto out;
}
ksmbd_free_user(user);
}
memcpy(sess->sess_key, resp->payload, resp->session_key_len);
memcpy(out_blob, resp->payload + resp->session_key_len,

View File

@@ -1599,11 +1599,6 @@ static int krb5_authenticate(struct ksmbd_work *work,
if (prev_sess_id && prev_sess_id != sess->id)
destroy_previous_session(conn, sess->user, prev_sess_id);
if (sess->state == SMB2_SESSION_VALID) {
ksmbd_free_user(sess->user);
sess->user = NULL;
}
retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
out_blob, &out_len);
if (retval) {

View File

@@ -1430,6 +1430,7 @@ struct bpf_prog_aux {
bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
bool changes_pkt_data;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */

View File

@@ -573,6 +573,7 @@ struct bpf_subprog_info {
bool tail_call_reachable;
bool has_ld_abs;
bool is_async_cb;
bool changes_pkt_data;
};
struct bpf_verifier_env;

View File

@@ -787,8 +787,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
unsigned int target_freq, unsigned int min,
unsigned int max, unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
@@ -853,12 +853,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
return best;
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
unsigned int target_freq,
bool efficiencies)
static inline int find_index_l(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int min, unsigned int max,
bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_al(policy, target_freq,
@@ -868,6 +868,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
efficiencies);
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
unsigned int target_freq,
bool efficiencies)
{
return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -921,12 +929,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
return best;
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
unsigned int target_freq,
bool efficiencies)
static inline int find_index_h(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int min, unsigned int max,
bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ah(policy, target_freq,
@@ -936,6 +944,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
efficiencies);
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
unsigned int target_freq,
bool efficiencies)
{
return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq,
@@ -1006,12 +1022,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
return best;
}
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
unsigned int target_freq,
bool efficiencies)
static inline int find_index_c(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int min, unsigned int max,
bool efficiencies)
{
target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
return cpufreq_table_find_index_ac(policy, target_freq,
@@ -1021,7 +1037,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
efficiencies);
}
static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
/* Works only on sorted freq-tables */
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
unsigned int target_freq,
bool efficiencies)
{
return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
}
static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max,
int idx)
{
unsigned int freq;
@@ -1030,11 +1056,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
freq = policy->freq_table[idx].frequency;
return freq == clamp_val(freq, policy->min, policy->max);
return freq == clamp_val(freq, min, max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int min,
unsigned int max,
unsigned int relation)
{
bool efficiencies = policy->efficiencies_available &&
@@ -1045,29 +1073,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
relation &= ~CPUFREQ_RELATION_E;
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
return cpufreq_table_index_unsorted(policy, target_freq,
relation);
return cpufreq_table_index_unsorted(policy, target_freq, min,
max, relation);
retry:
switch (relation) {
case CPUFREQ_RELATION_L:
idx = cpufreq_table_find_index_l(policy, target_freq,
efficiencies);
idx = find_index_l(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_H:
idx = cpufreq_table_find_index_h(policy, target_freq,
efficiencies);
idx = find_index_h(policy, target_freq, min, max, efficiencies);
break;
case CPUFREQ_RELATION_C:
idx = cpufreq_table_find_index_c(policy, target_freq,
efficiencies);
idx = find_index_c(policy, target_freq, min, max, efficiencies);
break;
default:
WARN_ON_ONCE(1);
return 0;
}
/* Limit frequency index to honor policy->min/max */
if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
/* Limit frequency index to honor min and max */
if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
efficiencies = false;
goto retry;
}

View File

@@ -915,7 +915,7 @@ bool bpf_jit_needs_zext(void);
bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_helper_changes_pkt_data(void *func);
bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
{

View File

@@ -162,6 +162,8 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
struct module_kobject *lookup_or_create_module_kobject(const char *name);
/* Generic info of form tag = "info" */
#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)

Some files were not shown because too many files have changed in this diff Show More