mirror of
https://github.com/hardkernel/linux.git
synced 2026-03-26 04:20:23 +09:00
Merge 5.15.115 into android14-5.15-lts
Changes in 5.15.115
power: supply: bq27xxx: expose battery data when CI=1
power: supply: bq27xxx: Move bq27xxx_battery_update() down
power: supply: bq27xxx: Ensure power_supply_changed() is called on current sign changes
power: supply: bq27xxx: After charger plug in/out wait 0.5s for things to stabilize
power: supply: core: Refactor power_supply_set_input_current_limit_from_supplier()
power: supply: bq24190: Call power_supply_changed() after updating input current
bpf: fix a memory leak in the LRU and LRU_PERCPU hash maps
net/mlx5: devcom only supports 2 ports
net/mlx5e: Fix deadlock in tc route query code
net/mlx5: Devcom, serialize devcom registration
platform/x86: ISST: PUNIT device mapping with Sub-NUMA clustering
platform/x86: ISST: Remove 8 socket limit
net: phy: mscc: enable VSC8501/2 RGMII RX clock
net: dsa: introduce helpers for iterating through ports using dp
net: dsa: mt7530: rework mt753[01]_setup
net: dsa: mt7530: split-off common parts from mt7531_setup
net: dsa: mt7530: fix network connectivity with multiple CPU ports
Bonding: add arp_missed_max option
bonding: fix send_peer_notif overflow
binder: fix UAF caused by faulty buffer cleanup
irqchip/mips-gic: Get rid of the reliance on irq_cpu_online()
irqchip/mips-gic: Use raw spinlock for gic_lock
net/mlx5e: Fix SQ wake logic in ptp napi_poll context
xdp: Allow registering memory model without rxq reference
net: page_pool: use in_softirq() instead
page_pool: fix inconsistency for page_pool_ring_[un]lock()
irqchip/mips-gic: Don't touch vl_map if a local interrupt is not routable
xdp: xdp_mem_allocator can be NULL in trace_mem_connect().
bluetooth: Add cmd validity checks at the start of hci_sock_ioctl()
Revert "binder_alloc: add missing mmap_lock calls when using the VMA"
Revert "android: binder: stop saving a pointer to the VMA"
binder: add lockless binder_alloc_(set|get)_vma()
binder: fix UAF of alloc->vma in race with munmap()
ipv{4,6}/raw: fix output xfrm lookup wrt protocol
netfilter: ctnetlink: Support offloaded conntrack entry deletion
Linux 5.15.115
Change-Id: I04ebd85160057dcc604a7b2b13f7fdadc08329ac
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -422,6 +422,17 @@ arp_all_targets
|
||||
consider the slave up only when all of the arp_ip_targets
|
||||
are reachable
|
||||
|
||||
arp_missed_max
|
||||
|
||||
Specifies the number of arp_interval monitor checks that must
|
||||
fail in order for an interface to be marked down by the ARP monitor.
|
||||
|
||||
In order to provide orderly failover semantics, backup interfaces
|
||||
are permitted an extra monitor check (i.e., they must fail
|
||||
arp_missed_max + 1 times before being marked down).
|
||||
|
||||
The default value is 2, and the allowable range is 1 - 255.
|
||||
|
||||
downdelay
|
||||
|
||||
Specifies the time, in milliseconds, to wait before disabling
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 114
|
||||
SUBLEVEL = 115
|
||||
EXTRAVERSION =
|
||||
NAME = Trick or Treat
|
||||
|
||||
|
||||
@@ -213,8 +213,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
mm = alloc->vma_vm_mm;
|
||||
|
||||
if (mm) {
|
||||
mmap_read_lock(mm);
|
||||
vma = vma_lookup(mm, alloc->vma_addr);
|
||||
mmap_write_lock(mm);
|
||||
vma = alloc->vma;
|
||||
}
|
||||
|
||||
if (!vma && need_mm) {
|
||||
@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
trace_binder_alloc_page_end(alloc, index);
|
||||
}
|
||||
if (mm) {
|
||||
mmap_read_unlock(mm);
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
return 0;
|
||||
@@ -304,35 +304,24 @@ err_page_ptr_cleared:
|
||||
}
|
||||
err_no_vma:
|
||||
if (mm) {
|
||||
mmap_read_unlock(mm);
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
}
|
||||
|
||||
|
||||
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long vm_start = 0;
|
||||
|
||||
if (vma) {
|
||||
vm_start = vma->vm_start;
|
||||
mmap_assert_write_locked(alloc->vma_vm_mm);
|
||||
}
|
||||
|
||||
alloc->vma_addr = vm_start;
|
||||
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
|
||||
smp_store_release(&alloc->vma, vma);
|
||||
}
|
||||
|
||||
static inline struct vm_area_struct *binder_alloc_get_vma(
|
||||
struct binder_alloc *alloc)
|
||||
{
|
||||
struct vm_area_struct *vma = NULL;
|
||||
|
||||
if (alloc->vma_addr)
|
||||
vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
|
||||
|
||||
return vma;
|
||||
/* pairs with smp_store_release in binder_alloc_set_vma() */
|
||||
return smp_load_acquire(&alloc->vma);
|
||||
}
|
||||
|
||||
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
|
||||
@@ -395,15 +384,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
size_t size, data_offsets_size;
|
||||
int ret;
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
/* Check binder_alloc is fully initialized */
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
||||
"%d: binder_alloc_buf, no vma\n",
|
||||
alloc->pid);
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
|
||||
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
|
||||
ALIGN(offsets_size, sizeof(void *));
|
||||
@@ -794,6 +781,8 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
buffer->free = 1;
|
||||
binder_insert_free_buffer(alloc, buffer);
|
||||
alloc->free_async_space = alloc->buffer_size / 2;
|
||||
|
||||
/* Signal binder_alloc is fully initialized */
|
||||
binder_alloc_set_vma(alloc, vma);
|
||||
|
||||
return 0;
|
||||
@@ -824,8 +813,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
||||
|
||||
buffers = 0;
|
||||
mutex_lock(&alloc->mutex);
|
||||
BUG_ON(alloc->vma_addr &&
|
||||
vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
|
||||
BUG_ON(alloc->vma);
|
||||
|
||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
@@ -932,25 +920,17 @@ void binder_alloc_print_pages(struct seq_file *m,
|
||||
* Make sure the binder_alloc is fully initialized, otherwise we might
|
||||
* read inconsistent state.
|
||||
*/
|
||||
|
||||
mmap_read_lock(alloc->vma_vm_mm);
|
||||
if (binder_alloc_get_vma(alloc) == NULL) {
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
goto uninitialized;
|
||||
if (binder_alloc_get_vma(alloc) != NULL) {
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
}
|
||||
|
||||
mmap_read_unlock(alloc->vma_vm_mm);
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
|
||||
uninitialized:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
|
||||
@@ -100,7 +100,7 @@ struct binder_lru_page {
|
||||
*/
|
||||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
unsigned long vma_addr;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *vma_vm_mm;
|
||||
void __user *buffer;
|
||||
struct list_head buffers;
|
||||
|
||||
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
|
||||
if (!binder_selftest_run)
|
||||
return;
|
||||
mutex_lock(&binder_selftest_lock);
|
||||
if (!binder_selftest_run || !alloc->vma_addr)
|
||||
if (!binder_selftest_run || !alloc->vma)
|
||||
goto done;
|
||||
pr_info("STARTED\n");
|
||||
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
||||
|
||||
@@ -49,7 +49,7 @@ void __iomem *mips_gic_base;
|
||||
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
|
||||
|
||||
static DEFINE_SPINLOCK(gic_lock);
|
||||
static DEFINE_RAW_SPINLOCK(gic_lock);
|
||||
static struct irq_domain *gic_irq_domain;
|
||||
static int gic_shared_intrs;
|
||||
static unsigned int gic_cpu_pin;
|
||||
@@ -210,7 +210,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||
|
||||
irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
switch (type & IRQ_TYPE_SENSE_MASK) {
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
pol = GIC_POL_FALLING_EDGE;
|
||||
@@ -250,7 +250,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||
else
|
||||
irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
|
||||
handle_level_irq, NULL);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -268,7 +268,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||
return -EINVAL;
|
||||
|
||||
/* Assumption : cpumask refers to a single CPU */
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
|
||||
/* Re-route this IRQ */
|
||||
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
|
||||
@@ -279,7 +279,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
@@ -357,12 +357,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
|
||||
cd = irq_data_get_irq_chip_data(d);
|
||||
cd->mask = false;
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
for_each_online_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_rmask(BIT(intr));
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
}
|
||||
|
||||
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
|
||||
@@ -375,32 +375,45 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
|
||||
cd = irq_data_get_irq_chip_data(d);
|
||||
cd->mask = true;
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
for_each_online_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_smask(BIT(intr));
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
}
|
||||
|
||||
static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
|
||||
static void gic_all_vpes_irq_cpu_online(void)
|
||||
{
|
||||
struct gic_all_vpes_chip_data *cd;
|
||||
unsigned int intr;
|
||||
static const unsigned int local_intrs[] = {
|
||||
GIC_LOCAL_INT_TIMER,
|
||||
GIC_LOCAL_INT_PERFCTR,
|
||||
GIC_LOCAL_INT_FDC,
|
||||
};
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
|
||||
cd = irq_data_get_irq_chip_data(d);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
|
||||
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
|
||||
if (cd->mask)
|
||||
write_gic_vl_smask(BIT(intr));
|
||||
for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
|
||||
unsigned int intr = local_intrs[i];
|
||||
struct gic_all_vpes_chip_data *cd;
|
||||
|
||||
if (!gic_local_irq_is_routable(intr))
|
||||
continue;
|
||||
cd = &gic_all_vpes_chip_data[intr];
|
||||
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
|
||||
if (cd->mask)
|
||||
write_gic_vl_smask(BIT(intr));
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip gic_all_vpes_local_irq_controller = {
|
||||
.name = "MIPS GIC Local",
|
||||
.irq_mask = gic_mask_local_irq_all_vpes,
|
||||
.irq_unmask = gic_unmask_local_irq_all_vpes,
|
||||
.irq_cpu_online = gic_all_vpes_irq_cpu_online,
|
||||
};
|
||||
|
||||
static void __gic_irq_dispatch(void)
|
||||
@@ -424,11 +437,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
|
||||
data = irq_get_irq_data(virq);
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
|
||||
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
|
||||
irq_data_update_effective_affinity(data, cpumask_of(cpu));
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -481,6 +494,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
|
||||
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
|
||||
|
||||
/*
|
||||
* If adding support for more per-cpu interrupts, keep the the
|
||||
* array in gic_all_vpes_irq_cpu_online() in sync.
|
||||
*/
|
||||
switch (intr) {
|
||||
case GIC_LOCAL_INT_TIMER:
|
||||
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
|
||||
@@ -519,12 +536,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
if (!gic_local_irq_is_routable(intr))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
raw_spin_lock_irqsave(&gic_lock, flags);
|
||||
for_each_online_cpu(cpu) {
|
||||
write_gic_vl_other(mips_cm_vp_id(cpu));
|
||||
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
|
||||
}
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -711,8 +728,8 @@ static int gic_cpu_startup(unsigned int cpu)
|
||||
/* Clear all local IRQ masks (ie. disable all local interrupts) */
|
||||
write_gic_vl_rmask(~0);
|
||||
|
||||
/* Invoke irq_cpu_online callbacks to enable desired interrupts */
|
||||
irq_cpu_online();
|
||||
/* Enable desired interrupts */
|
||||
gic_all_vpes_irq_cpu_online();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3145,8 +3145,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
|
||||
* when the source ip is 0, so don't take the link down
|
||||
* if we don't know our ip yet
|
||||
*/
|
||||
if (!bond_time_in_interval(bond, trans_start, 2) ||
|
||||
!bond_time_in_interval(bond, slave->last_rx, 2)) {
|
||||
if (!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
|
||||
!bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
|
||||
|
||||
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||||
slave_state_changed = 1;
|
||||
@@ -3240,7 +3240,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
||||
|
||||
/* Backup slave is down if:
|
||||
* - No current_arp_slave AND
|
||||
* - more than 3*delta since last receive AND
|
||||
* - more than (missed_max+1)*delta since last receive AND
|
||||
* - the bond has an IP address
|
||||
*
|
||||
* Note: a non-null current_arp_slave indicates
|
||||
@@ -3252,20 +3252,20 @@ static int bond_ab_arp_inspect(struct bonding *bond)
|
||||
*/
|
||||
if (!bond_is_active_slave(slave) &&
|
||||
!rcu_access_pointer(bond->current_arp_slave) &&
|
||||
!bond_time_in_interval(bond, last_rx, 3)) {
|
||||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
|
||||
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||||
commit++;
|
||||
}
|
||||
|
||||
/* Active slave is down if:
|
||||
* - more than 2*delta since transmitting OR
|
||||
* - (more than 2*delta since receive AND
|
||||
* - more than missed_max*delta since transmitting OR
|
||||
* - (more than missed_max*delta since receive AND
|
||||
* the bond has an IP address)
|
||||
*/
|
||||
trans_start = dev_trans_start(slave->dev);
|
||||
if (bond_is_active_slave(slave) &&
|
||||
(!bond_time_in_interval(bond, trans_start, 2) ||
|
||||
!bond_time_in_interval(bond, last_rx, 2))) {
|
||||
(!bond_time_in_interval(bond, trans_start, bond->params.missed_max) ||
|
||||
!bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
|
||||
bond_propose_link_state(slave, BOND_LINK_DOWN);
|
||||
commit++;
|
||||
}
|
||||
@@ -5886,6 +5886,7 @@ static int bond_check_params(struct bond_params *params)
|
||||
params->arp_interval = arp_interval;
|
||||
params->arp_validate = arp_validate_value;
|
||||
params->arp_all_targets = arp_all_targets_value;
|
||||
params->missed_max = 2;
|
||||
params->updelay = updelay;
|
||||
params->downdelay = downdelay;
|
||||
params->peer_notif_delay = 0;
|
||||
|
||||
@@ -79,6 +79,11 @@ nla_put_failure:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/* Limit the max delay range to 300s */
|
||||
static struct netlink_range_validation delay_range = {
|
||||
.max = 300000,
|
||||
};
|
||||
|
||||
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
|
||||
[IFLA_BOND_MODE] = { .type = NLA_U8 },
|
||||
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
|
||||
@@ -109,7 +114,8 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
|
||||
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
|
||||
.len = ETH_ALEN },
|
||||
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
|
||||
[IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
|
||||
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
|
||||
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
|
||||
};
|
||||
|
||||
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
|
||||
@@ -453,6 +459,15 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
|
||||
return err;
|
||||
}
|
||||
|
||||
if (data[IFLA_BOND_MISSED_MAX]) {
|
||||
int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
|
||||
|
||||
bond_opt_initval(&newval, missed_max);
|
||||
err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -515,6 +530,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
|
||||
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
|
||||
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
|
||||
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
|
||||
nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
|
||||
0;
|
||||
}
|
||||
|
||||
@@ -650,6 +666,10 @@ static int bond_fill_info(struct sk_buff *skb,
|
||||
bond->params.tlb_dynamic_lb))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX,
|
||||
bond->params.missed_max))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
struct ad_info info;
|
||||
|
||||
|
||||
@@ -78,6 +78,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval);
|
||||
static int bond_option_ad_user_port_key_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval);
|
||||
static int bond_option_missed_max_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval);
|
||||
|
||||
|
||||
static const struct bond_opt_value bond_mode_tbl[] = {
|
||||
@@ -163,6 +165,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
|
||||
{ NULL, -1, 0}
|
||||
};
|
||||
|
||||
static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
|
||||
{ "off", 0, 0},
|
||||
{ "maxval", 300000, BOND_VALFLAG_MAX},
|
||||
{ NULL, -1, 0}
|
||||
};
|
||||
|
||||
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
|
||||
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
|
||||
{ "better", BOND_PRI_RESELECT_BETTER, 0},
|
||||
@@ -213,6 +221,13 @@ static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
|
||||
{ NULL, -1, 0},
|
||||
};
|
||||
|
||||
static const struct bond_opt_value bond_missed_max_tbl[] = {
|
||||
{ "minval", 1, BOND_VALFLAG_MIN},
|
||||
{ "maxval", 255, BOND_VALFLAG_MAX},
|
||||
{ "default", 2, BOND_VALFLAG_DEFAULT},
|
||||
{ NULL, -1, 0},
|
||||
};
|
||||
|
||||
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
|
||||
[BOND_OPT_MODE] = {
|
||||
.id = BOND_OPT_MODE,
|
||||
@@ -270,6 +285,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
|
||||
.values = bond_intmax_tbl,
|
||||
.set = bond_option_arp_interval_set
|
||||
},
|
||||
[BOND_OPT_MISSED_MAX] = {
|
||||
.id = BOND_OPT_MISSED_MAX,
|
||||
.name = "arp_missed_max",
|
||||
.desc = "Maximum number of missed ARP interval",
|
||||
.unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
|
||||
BIT(BOND_MODE_ALB),
|
||||
.values = bond_missed_max_tbl,
|
||||
.set = bond_option_missed_max_set
|
||||
},
|
||||
[BOND_OPT_ARP_TARGETS] = {
|
||||
.id = BOND_OPT_ARP_TARGETS,
|
||||
.name = "arp_ip_target",
|
||||
@@ -449,7 +473,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
|
||||
.id = BOND_OPT_PEER_NOTIF_DELAY,
|
||||
.name = "peer_notif_delay",
|
||||
.desc = "Delay between each peer notification on failover event, in milliseconds",
|
||||
.values = bond_intmax_tbl,
|
||||
.values = bond_peer_notif_delay_tbl,
|
||||
.set = bond_option_peer_notif_delay_set
|
||||
}
|
||||
};
|
||||
@@ -1186,6 +1210,16 @@ static int bond_option_arp_all_targets_set(struct bonding *bond,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bond_option_missed_max_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval)
|
||||
{
|
||||
netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n",
|
||||
newval->string, newval->value);
|
||||
bond->params.missed_max = newval->value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bond_option_primary_set(struct bonding *bond,
|
||||
const struct bond_opt_value *newval)
|
||||
{
|
||||
|
||||
@@ -115,6 +115,8 @@ static void bond_info_show_master(struct seq_file *seq)
|
||||
|
||||
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
|
||||
bond->params.arp_interval);
|
||||
seq_printf(seq, "ARP Missed Max: %u\n",
|
||||
bond->params.missed_max);
|
||||
|
||||
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
|
||||
|
||||
|
||||
@@ -303,6 +303,18 @@ static ssize_t bonding_show_arp_targets(struct device *d,
|
||||
static DEVICE_ATTR(arp_ip_target, 0644,
|
||||
bonding_show_arp_targets, bonding_sysfs_store_option);
|
||||
|
||||
/* Show the arp missed max. */
|
||||
static ssize_t bonding_show_missed_max(struct device *d,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct bonding *bond = to_bond(d);
|
||||
|
||||
return sprintf(buf, "%u\n", bond->params.missed_max);
|
||||
}
|
||||
static DEVICE_ATTR(arp_missed_max, 0644,
|
||||
bonding_show_missed_max, bonding_sysfs_store_option);
|
||||
|
||||
/* Show the up and down delays. */
|
||||
static ssize_t bonding_show_downdelay(struct device *d,
|
||||
struct device_attribute *attr,
|
||||
@@ -779,6 +791,7 @@ static struct attribute *per_bond_attrs[] = {
|
||||
&dev_attr_ad_actor_sys_prio.attr,
|
||||
&dev_attr_ad_actor_system.attr,
|
||||
&dev_attr_ad_user_port_key.attr,
|
||||
&dev_attr_arp_missed_max.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
||||
@@ -1010,9 +1010,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
|
||||
mt7530_write(priv, MT7530_PVC_P(port),
|
||||
PORT_SPEC_TAG);
|
||||
|
||||
/* Disable flooding by default */
|
||||
mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK,
|
||||
BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
|
||||
/* Enable flooding on the CPU port */
|
||||
mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
|
||||
UNU_FFP(BIT(port)));
|
||||
|
||||
/* Set CPU port number */
|
||||
if (priv->id == ID_MT7621)
|
||||
@@ -2094,11 +2094,12 @@ static int
|
||||
mt7530_setup(struct dsa_switch *ds)
|
||||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
struct device_node *dn = NULL;
|
||||
struct device_node *phy_node;
|
||||
struct device_node *mac_np;
|
||||
struct mt7530_dummy_poll p;
|
||||
phy_interface_t interface;
|
||||
struct device_node *dn;
|
||||
struct dsa_port *cpu_dp;
|
||||
u32 id, val;
|
||||
int ret, i;
|
||||
|
||||
@@ -2106,7 +2107,19 @@ mt7530_setup(struct dsa_switch *ds)
|
||||
* controller also is the container for two GMACs nodes representing
|
||||
* as two netdev instances.
|
||||
*/
|
||||
dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
|
||||
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
|
||||
dn = cpu_dp->master->dev.of_node->parent;
|
||||
/* It doesn't matter which CPU port is found first,
|
||||
* their masters should share the same parent OF node
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dn) {
|
||||
dev_err(ds->dev, "parent OF node of DSA master not found");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ds->assisted_learning_on_cpu_port = true;
|
||||
ds->mtu_enforcement_ingress = true;
|
||||
|
||||
@@ -2274,13 +2287,71 @@ mt7530_setup(struct dsa_switch *ds)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7531_setup_common(struct dsa_switch *ds)
|
||||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
struct dsa_port *cpu_dp;
|
||||
int ret, i;
|
||||
|
||||
/* BPDU to CPU port */
|
||||
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
|
||||
mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
|
||||
BIT(cpu_dp->index));
|
||||
break;
|
||||
}
|
||||
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
|
||||
MT753X_BPDU_CPU_ONLY);
|
||||
|
||||
/* Enable and reset MIB counters */
|
||||
mt7530_mib_reset(ds);
|
||||
|
||||
/* Disable flooding on all ports */
|
||||
mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
|
||||
UNU_FFP_MASK);
|
||||
|
||||
for (i = 0; i < MT7530_NUM_PORTS; i++) {
|
||||
/* Disable forwarding by default on all ports */
|
||||
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
|
||||
PCR_MATRIX_CLR);
|
||||
|
||||
/* Disable learning by default on all ports */
|
||||
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
|
||||
|
||||
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
|
||||
|
||||
if (dsa_is_cpu_port(ds, i)) {
|
||||
ret = mt753x_cpu_port_enable(ds, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
mt7530_port_disable(ds, i);
|
||||
|
||||
/* Set default PVID to 0 on all user ports */
|
||||
mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
|
||||
G0_PORT_VID_DEF);
|
||||
}
|
||||
|
||||
/* Enable consistent egress tag */
|
||||
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
|
||||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||
}
|
||||
|
||||
/* Flush the FDB table */
|
||||
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7531_setup(struct dsa_switch *ds)
|
||||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
struct mt7530_dummy_poll p;
|
||||
u32 val, id;
|
||||
int ret, i;
|
||||
int ret;
|
||||
|
||||
/* Reset whole chip through gpio pin or memory-mapped registers for
|
||||
* different type of hardware
|
||||
@@ -2352,41 +2423,7 @@ mt7531_setup(struct dsa_switch *ds)
|
||||
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
|
||||
CORE_PLL_GROUP4, val);
|
||||
|
||||
/* BPDU to CPU port */
|
||||
mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
|
||||
BIT(MT7530_CPU_PORT));
|
||||
mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
|
||||
MT753X_BPDU_CPU_ONLY);
|
||||
|
||||
/* Enable and reset MIB counters */
|
||||
mt7530_mib_reset(ds);
|
||||
|
||||
for (i = 0; i < MT7530_NUM_PORTS; i++) {
|
||||
/* Disable forwarding by default on all ports */
|
||||
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
|
||||
PCR_MATRIX_CLR);
|
||||
|
||||
/* Disable learning by default on all ports */
|
||||
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
|
||||
|
||||
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
|
||||
|
||||
if (dsa_is_cpu_port(ds, i)) {
|
||||
ret = mt753x_cpu_port_enable(ds, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
mt7530_port_disable(ds, i);
|
||||
|
||||
/* Set default PVID to 0 on all user ports */
|
||||
mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
|
||||
G0_PORT_VID_DEF);
|
||||
}
|
||||
|
||||
/* Enable consistent egress tag */
|
||||
mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
|
||||
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
|
||||
}
|
||||
mt7531_setup_common(ds);
|
||||
|
||||
/* Setup VLAN ID 0 for VLAN-unaware bridges */
|
||||
ret = mt7530_setup_vlan0(priv);
|
||||
@@ -2396,11 +2433,6 @@ mt7531_setup(struct dsa_switch *ds)
|
||||
ds->assisted_learning_on_cpu_port = true;
|
||||
ds->mtu_enforcement_ingress = true;
|
||||
|
||||
/* Flush the FDB table */
|
||||
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -126,6 +126,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
|
||||
/* ensure cq space is freed before enabling more cqes */
|
||||
wmb();
|
||||
|
||||
mlx5e_txqsq_wake(&ptpsq->txqsq);
|
||||
|
||||
return work_done == budget;
|
||||
}
|
||||
|
||||
|
||||
@@ -172,6 +172,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
|
||||
return pi;
|
||||
}
|
||||
|
||||
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
|
||||
|
||||
struct mlx5e_icosq_wqe_info {
|
||||
u8 wqe_type;
|
||||
u8 num_wqebbs;
|
||||
|
||||
@@ -1308,11 +1308,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
|
||||
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
|
||||
{
|
||||
struct mlx5e_priv *out_priv, *route_priv;
|
||||
struct mlx5_devcom *devcom = NULL;
|
||||
struct mlx5_core_dev *route_mdev;
|
||||
struct mlx5_eswitch *esw;
|
||||
u16 vhca_id;
|
||||
int err;
|
||||
|
||||
out_priv = netdev_priv(out_dev);
|
||||
esw = out_priv->mdev->priv.eswitch;
|
||||
@@ -1321,6 +1319,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
|
||||
|
||||
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
|
||||
if (mlx5_lag_is_active(out_priv->mdev)) {
|
||||
struct mlx5_devcom *devcom;
|
||||
int err;
|
||||
|
||||
/* In lag case we may get devices from different eswitch instances.
|
||||
* If we failed to get vport num, it means, mostly, that we on the wrong
|
||||
* eswitch.
|
||||
@@ -1329,16 +1330,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
|
||||
if (err != -ENOENT)
|
||||
return err;
|
||||
|
||||
rcu_read_lock();
|
||||
devcom = out_priv->mdev->priv.devcom;
|
||||
esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
if (!esw)
|
||||
return -ENODEV;
|
||||
esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
|
||||
if (devcom)
|
||||
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
|
||||
return err;
|
||||
return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
|
||||
}
|
||||
|
||||
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
|
||||
|
||||
@@ -810,6 +810,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
|
||||
{
|
||||
if (netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
|
||||
mlx5e_ptpsq_fifo_has_room(sq) &&
|
||||
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
|
||||
netif_tx_wake_queue(sq->txq);
|
||||
sq->stats->wake++;
|
||||
}
|
||||
}
|
||||
|
||||
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
{
|
||||
struct mlx5e_sq_stats *stats;
|
||||
@@ -909,13 +920,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||
|
||||
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
|
||||
|
||||
if (netif_tx_queue_stopped(sq->txq) &&
|
||||
mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
|
||||
mlx5e_ptpsq_fifo_has_room(sq) &&
|
||||
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
|
||||
netif_tx_wake_queue(sq->txq);
|
||||
stats->wake++;
|
||||
}
|
||||
mlx5e_txqsq_wake(sq);
|
||||
|
||||
return (i == MLX5E_TX_CQ_POLL_BUDGET);
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/mlx5/vport.h>
|
||||
#include "lib/devcom.h"
|
||||
#include "mlx5_core.h"
|
||||
|
||||
static LIST_HEAD(devcom_list);
|
||||
|
||||
@@ -13,8 +14,8 @@ static LIST_HEAD(devcom_list);
|
||||
|
||||
struct mlx5_devcom_component {
|
||||
struct {
|
||||
void *data;
|
||||
} device[MLX5_MAX_PORTS];
|
||||
void __rcu *data;
|
||||
} device[MLX5_DEVCOM_PORTS_SUPPORTED];
|
||||
|
||||
mlx5_devcom_event_handler_t handler;
|
||||
struct rw_semaphore sem;
|
||||
@@ -25,7 +26,7 @@ struct mlx5_devcom_list {
|
||||
struct list_head list;
|
||||
|
||||
struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
|
||||
struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
|
||||
struct mlx5_core_dev *devs[MLX5_DEVCOM_PORTS_SUPPORTED];
|
||||
};
|
||||
|
||||
struct mlx5_devcom {
|
||||
@@ -74,13 +75,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
|
||||
|
||||
if (!mlx5_core_is_pf(dev))
|
||||
return NULL;
|
||||
if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
|
||||
return NULL;
|
||||
|
||||
mlx5_dev_list_lock();
|
||||
sguid0 = mlx5_query_nic_system_image_guid(dev);
|
||||
list_for_each_entry(iter, &devcom_list, list) {
|
||||
struct mlx5_core_dev *tmp_dev = NULL;
|
||||
|
||||
idx = -1;
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++) {
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
|
||||
if (iter->devs[i])
|
||||
tmp_dev = iter->devs[i];
|
||||
else
|
||||
@@ -100,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
|
||||
|
||||
if (!priv) {
|
||||
priv = mlx5_devcom_list_alloc();
|
||||
if (!priv)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!priv) {
|
||||
devcom = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
idx = 0;
|
||||
new_priv = true;
|
||||
@@ -112,12 +118,14 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
|
||||
if (!devcom) {
|
||||
if (new_priv)
|
||||
kfree(priv);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
devcom = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (new_priv)
|
||||
list_add(&priv->list, &devcom_list);
|
||||
|
||||
out:
|
||||
mlx5_dev_list_unlock();
|
||||
return devcom;
|
||||
}
|
||||
|
||||
@@ -130,20 +138,23 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return;
|
||||
|
||||
mlx5_dev_list_lock();
|
||||
priv = devcom->priv;
|
||||
priv->devs[devcom->idx] = NULL;
|
||||
|
||||
kfree(devcom);
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
|
||||
if (priv->devs[i])
|
||||
break;
|
||||
|
||||
if (i != MLX5_MAX_PORTS)
|
||||
return;
|
||||
if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
|
||||
goto out;
|
||||
|
||||
list_del(&priv->list);
|
||||
kfree(priv);
|
||||
out:
|
||||
mlx5_dev_list_unlock();
|
||||
}
|
||||
|
||||
void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
|
||||
@@ -161,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
|
||||
comp = &devcom->priv->components[id];
|
||||
down_write(&comp->sem);
|
||||
comp->handler = handler;
|
||||
comp->device[devcom->idx].data = data;
|
||||
rcu_assign_pointer(comp->device[devcom->idx].data, data);
|
||||
up_write(&comp->sem);
|
||||
}
|
||||
|
||||
@@ -175,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
down_write(&comp->sem);
|
||||
comp->device[devcom->idx].data = NULL;
|
||||
RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
|
||||
up_write(&comp->sem);
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
|
||||
@@ -192,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
down_write(&comp->sem);
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
if (i != devcom->idx && comp->device[i].data) {
|
||||
err = comp->handler(event, comp->device[i].data,
|
||||
event_data);
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
|
||||
void *data = rcu_dereference_protected(comp->device[i].data,
|
||||
lockdep_is_held(&comp->sem));
|
||||
|
||||
if (i != devcom->idx && data) {
|
||||
err = comp->handler(event, data, event_data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
up_write(&comp->sem);
|
||||
return err;
|
||||
@@ -212,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
|
||||
comp = &devcom->priv->components[id];
|
||||
WARN_ON(!rwsem_is_locked(&comp->sem));
|
||||
|
||||
comp->paired = paired;
|
||||
WRITE_ONCE(comp->paired, paired);
|
||||
}
|
||||
|
||||
bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
|
||||
@@ -221,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return false;
|
||||
|
||||
return devcom->priv->components[id].paired;
|
||||
return READ_ONCE(devcom->priv->components[id].paired);
|
||||
}
|
||||
|
||||
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
|
||||
@@ -235,16 +250,38 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
down_read(&comp->sem);
|
||||
if (!comp->paired) {
|
||||
if (!READ_ONCE(comp->paired)) {
|
||||
up_read(&comp->sem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < MLX5_MAX_PORTS; i++)
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
|
||||
if (i != devcom->idx)
|
||||
break;
|
||||
|
||||
return comp->device[i].data;
|
||||
return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
|
||||
}
|
||||
|
||||
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
|
||||
{
|
||||
struct mlx5_devcom_component *comp;
|
||||
int i;
|
||||
|
||||
if (IS_ERR_OR_NULL(devcom))
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
|
||||
if (i != devcom->idx)
|
||||
break;
|
||||
|
||||
comp = &devcom->priv->components[id];
|
||||
/* This can change concurrently, however 'data' pointer will remain
|
||||
* valid for the duration of RCU read section.
|
||||
*/
|
||||
if (!READ_ONCE(comp->paired))
|
||||
return NULL;
|
||||
|
||||
return rcu_dereference(comp->device[i].data);
|
||||
}
|
||||
|
||||
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define MLX5_DEVCOM_PORTS_SUPPORTED 2
|
||||
|
||||
enum mlx5_devcom_components {
|
||||
MLX5_DEVCOM_ESW_OFFLOADS,
|
||||
|
||||
@@ -39,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
|
||||
|
||||
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id);
|
||||
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
|
||||
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
|
||||
enum mlx5_devcom_components id);
|
||||
|
||||
|
||||
@@ -179,6 +179,7 @@ enum rgmii_clock_delay {
|
||||
#define VSC8502_RGMII_CNTL 20
|
||||
#define VSC8502_RGMII_RX_DELAY_MASK 0x0070
|
||||
#define VSC8502_RGMII_TX_DELAY_MASK 0x0007
|
||||
#define VSC8502_RGMII_RX_CLK_DISABLE 0x0800
|
||||
|
||||
#define MSCC_PHY_WOL_LOWER_MAC_ADDR 21
|
||||
#define MSCC_PHY_WOL_MID_MAC_ADDR 22
|
||||
|
||||
@@ -527,14 +527,27 @@ out_unlock:
|
||||
* * 2.0 ns (which causes the data to be sampled at exactly half way between
|
||||
* clock transitions at 1000 Mbps) if delays should be enabled
|
||||
*/
|
||||
static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
|
||||
u16 rgmii_rx_delay_mask,
|
||||
u16 rgmii_tx_delay_mask)
|
||||
static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
|
||||
u16 rgmii_rx_delay_mask,
|
||||
u16 rgmii_tx_delay_mask)
|
||||
{
|
||||
u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
|
||||
u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
|
||||
u16 reg_val = 0;
|
||||
int rc;
|
||||
u16 mask = 0;
|
||||
int rc = 0;
|
||||
|
||||
/* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit
|
||||
* to be unset for all PHY modes, so do that as part of the paged
|
||||
* register modification.
|
||||
* For some family members (like VSC8530/31/40/41) this bit is reserved
|
||||
* and read-only, and the RX clock is enabled by default.
|
||||
*/
|
||||
if (rgmii_cntl == VSC8502_RGMII_CNTL)
|
||||
mask |= VSC8502_RGMII_RX_CLK_DISABLE;
|
||||
|
||||
if (phy_interface_is_rgmii(phydev))
|
||||
mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
@@ -545,10 +558,9 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
|
||||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
|
||||
reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
|
||||
|
||||
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
|
||||
rgmii_cntl,
|
||||
rgmii_rx_delay_mask | rgmii_tx_delay_mask,
|
||||
reg_val);
|
||||
if (mask)
|
||||
rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
|
||||
rgmii_cntl, mask, reg_val);
|
||||
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
@@ -557,19 +569,11 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
|
||||
|
||||
static int vsc85xx_default_config(struct phy_device *phydev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
|
||||
|
||||
if (phy_interface_mode_is_rgmii(phydev->interface)) {
|
||||
rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
|
||||
VSC8502_RGMII_RX_DELAY_MASK,
|
||||
VSC8502_RGMII_TX_DELAY_MASK);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL,
|
||||
VSC8502_RGMII_RX_DELAY_MASK,
|
||||
VSC8502_RGMII_TX_DELAY_MASK);
|
||||
}
|
||||
|
||||
static int vsc85xx_get_tunable(struct phy_device *phydev,
|
||||
@@ -1766,13 +1770,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (phy_interface_is_rgmii(phydev)) {
|
||||
ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
|
||||
VSC8572_RGMII_RX_DELAY_MASK,
|
||||
VSC8572_RGMII_TX_DELAY_MASK);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL,
|
||||
VSC8572_RGMII_RX_DELAY_MASK,
|
||||
VSC8572_RGMII_TX_DELAY_MASK);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = genphy_soft_reset(phydev);
|
||||
if (ret)
|
||||
|
||||
@@ -277,39 +277,46 @@ static int isst_if_get_platform_info(void __user *argp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ISST_MAX_BUS_NUMBER 2
|
||||
|
||||
struct isst_if_cpu_info {
|
||||
/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
|
||||
int bus_info[2];
|
||||
struct pci_dev *pci_dev[2];
|
||||
int bus_info[ISST_MAX_BUS_NUMBER];
|
||||
struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
|
||||
int punit_cpu_id;
|
||||
int numa_node;
|
||||
};
|
||||
|
||||
struct isst_if_pkg_info {
|
||||
struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
|
||||
};
|
||||
|
||||
static struct isst_if_cpu_info *isst_cpu_info;
|
||||
#define ISST_MAX_PCI_DOMAINS 8
|
||||
static struct isst_if_pkg_info *isst_pkg_info;
|
||||
|
||||
static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
|
||||
{
|
||||
struct pci_dev *matched_pci_dev = NULL;
|
||||
struct pci_dev *pci_dev = NULL;
|
||||
int no_matches = 0;
|
||||
int i, bus_number;
|
||||
struct pci_dev *_pci_dev = NULL;
|
||||
int no_matches = 0, pkg_id;
|
||||
int bus_number;
|
||||
|
||||
if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
|
||||
cpu >= num_possible_cpus())
|
||||
if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
|
||||
cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
|
||||
return NULL;
|
||||
|
||||
pkg_id = topology_physical_package_id(cpu);
|
||||
|
||||
bus_number = isst_cpu_info[cpu].bus_info[bus_no];
|
||||
if (bus_number < 0)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
|
||||
struct pci_dev *_pci_dev;
|
||||
for_each_pci_dev(_pci_dev) {
|
||||
int node;
|
||||
|
||||
_pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
|
||||
if (!_pci_dev)
|
||||
if (_pci_dev->bus->number != bus_number ||
|
||||
_pci_dev->devfn != PCI_DEVFN(dev, fn))
|
||||
continue;
|
||||
|
||||
++no_matches;
|
||||
@@ -324,6 +331,8 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
|
||||
}
|
||||
|
||||
if (node == isst_cpu_info[cpu].numa_node) {
|
||||
isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
|
||||
|
||||
pci_dev = _pci_dev;
|
||||
break;
|
||||
}
|
||||
@@ -342,6 +351,10 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
|
||||
if (!pci_dev && no_matches == 1)
|
||||
pci_dev = matched_pci_dev;
|
||||
|
||||
/* Return pci_dev pointer for any matched CPU in the package */
|
||||
if (!pci_dev)
|
||||
pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
|
||||
|
||||
return pci_dev;
|
||||
}
|
||||
|
||||
@@ -361,8 +374,8 @@ struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
|
||||
{
|
||||
struct pci_dev *pci_dev;
|
||||
|
||||
if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
|
||||
cpu >= num_possible_cpus())
|
||||
if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
|
||||
cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
|
||||
return NULL;
|
||||
|
||||
pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
|
||||
@@ -417,10 +430,19 @@ static int isst_if_cpu_info_init(void)
|
||||
if (!isst_cpu_info)
|
||||
return -ENOMEM;
|
||||
|
||||
isst_pkg_info = kcalloc(topology_max_packages(),
|
||||
sizeof(*isst_pkg_info),
|
||||
GFP_KERNEL);
|
||||
if (!isst_pkg_info) {
|
||||
kfree(isst_cpu_info);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"platform/x86/isst-if:online",
|
||||
isst_if_cpu_online, NULL);
|
||||
if (ret < 0) {
|
||||
kfree(isst_pkg_info);
|
||||
kfree(isst_cpu_info);
|
||||
return ret;
|
||||
}
|
||||
@@ -433,6 +455,7 @@ static int isst_if_cpu_info_init(void)
|
||||
static void isst_if_cpu_info_exit(void)
|
||||
{
|
||||
cpuhp_remove_state(isst_if_online_id);
|
||||
kfree(isst_pkg_info);
|
||||
kfree(isst_cpu_info);
|
||||
};
|
||||
|
||||
|
||||
@@ -1201,8 +1201,19 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
|
||||
struct bq24190_dev_info *bdi =
|
||||
container_of(work, struct bq24190_dev_info,
|
||||
input_current_limit_work.work);
|
||||
union power_supply_propval val;
|
||||
int ret;
|
||||
|
||||
power_supply_set_input_current_limit_from_supplier(bdi->charger);
|
||||
ret = power_supply_get_property_from_supplier(bdi->charger,
|
||||
POWER_SUPPLY_PROP_CURRENT_MAX,
|
||||
&val);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
bq24190_charger_set_property(bdi->charger,
|
||||
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
|
||||
&val);
|
||||
power_supply_changed(bdi->charger);
|
||||
}
|
||||
|
||||
/* Sync the input-current-limit with our parent supply (if we have one) */
|
||||
|
||||
@@ -1572,14 +1572,6 @@ static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg)
|
||||
*/
|
||||
static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di)
|
||||
{
|
||||
int flags;
|
||||
|
||||
if (di->opts & BQ27XXX_O_ZERO) {
|
||||
flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true);
|
||||
if (flags >= 0 && (flags & BQ27000_FLAG_CI))
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC);
|
||||
}
|
||||
|
||||
@@ -1742,6 +1734,18 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags)
|
||||
return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if reported battery capacity is inaccurate
|
||||
*/
|
||||
static bool bq27xxx_battery_capacity_inaccurate(struct bq27xxx_device_info *di,
|
||||
u16 flags)
|
||||
{
|
||||
if (di->opts & BQ27XXX_O_HAS_CI)
|
||||
return (flags & BQ27000_FLAG_CI);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
|
||||
{
|
||||
/* Unlikely but important to return first */
|
||||
@@ -1751,83 +1755,12 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
|
||||
return POWER_SUPPLY_HEALTH_COLD;
|
||||
if (unlikely(bq27xxx_battery_dead(di, di->cache.flags)))
|
||||
return POWER_SUPPLY_HEALTH_DEAD;
|
||||
if (unlikely(bq27xxx_battery_capacity_inaccurate(di, di->cache.flags)))
|
||||
return POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
|
||||
|
||||
return POWER_SUPPLY_HEALTH_GOOD;
|
||||
}
|
||||
|
||||
static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
|
||||
{
|
||||
struct bq27xxx_reg_cache cache = {0, };
|
||||
bool has_ci_flag = di->opts & BQ27XXX_O_HAS_CI;
|
||||
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
|
||||
|
||||
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
|
||||
if ((cache.flags & 0xff) == 0xff)
|
||||
cache.flags = -1; /* read error */
|
||||
if (cache.flags >= 0) {
|
||||
cache.temperature = bq27xxx_battery_read_temperature(di);
|
||||
if (has_ci_flag && (cache.flags & BQ27000_FLAG_CI)) {
|
||||
dev_info_once(di->dev, "battery is not calibrated! ignoring capacity values\n");
|
||||
cache.capacity = -ENODATA;
|
||||
cache.energy = -ENODATA;
|
||||
cache.time_to_empty = -ENODATA;
|
||||
cache.time_to_empty_avg = -ENODATA;
|
||||
cache.time_to_full = -ENODATA;
|
||||
cache.charge_full = -ENODATA;
|
||||
cache.health = -ENODATA;
|
||||
} else {
|
||||
if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
|
||||
cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
|
||||
if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
|
||||
cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
|
||||
if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
|
||||
cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
|
||||
|
||||
cache.charge_full = bq27xxx_battery_read_fcc(di);
|
||||
cache.capacity = bq27xxx_battery_read_soc(di);
|
||||
if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
|
||||
cache.energy = bq27xxx_battery_read_energy(di);
|
||||
di->cache.flags = cache.flags;
|
||||
cache.health = bq27xxx_battery_read_health(di);
|
||||
}
|
||||
if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
|
||||
cache.cycle_count = bq27xxx_battery_read_cyct(di);
|
||||
|
||||
/* We only have to read charge design full once */
|
||||
if (di->charge_design_full <= 0)
|
||||
di->charge_design_full = bq27xxx_battery_read_dcap(di);
|
||||
}
|
||||
|
||||
if ((di->cache.capacity != cache.capacity) ||
|
||||
(di->cache.flags != cache.flags))
|
||||
power_supply_changed(di->bat);
|
||||
|
||||
if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
|
||||
di->cache = cache;
|
||||
|
||||
di->last_update = jiffies;
|
||||
|
||||
if (!di->removed && poll_interval > 0)
|
||||
mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
|
||||
}
|
||||
|
||||
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
|
||||
{
|
||||
mutex_lock(&di->lock);
|
||||
bq27xxx_battery_update_unlocked(di);
|
||||
mutex_unlock(&di->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
|
||||
|
||||
static void bq27xxx_battery_poll(struct work_struct *work)
|
||||
{
|
||||
struct bq27xxx_device_info *di =
|
||||
container_of(work, struct bq27xxx_device_info,
|
||||
work.work);
|
||||
|
||||
bq27xxx_battery_update(di);
|
||||
}
|
||||
|
||||
static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
|
||||
{
|
||||
if (di->opts & BQ27XXX_O_ZERO)
|
||||
@@ -1901,6 +1834,78 @@ static int bq27xxx_battery_current_and_status(
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
|
||||
{
|
||||
union power_supply_propval status = di->last_status;
|
||||
struct bq27xxx_reg_cache cache = {0, };
|
||||
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
|
||||
|
||||
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
|
||||
if ((cache.flags & 0xff) == 0xff)
|
||||
cache.flags = -1; /* read error */
|
||||
if (cache.flags >= 0) {
|
||||
cache.temperature = bq27xxx_battery_read_temperature(di);
|
||||
if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
|
||||
cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
|
||||
if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
|
||||
cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
|
||||
if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
|
||||
cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
|
||||
|
||||
cache.charge_full = bq27xxx_battery_read_fcc(di);
|
||||
cache.capacity = bq27xxx_battery_read_soc(di);
|
||||
if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
|
||||
cache.energy = bq27xxx_battery_read_energy(di);
|
||||
di->cache.flags = cache.flags;
|
||||
cache.health = bq27xxx_battery_read_health(di);
|
||||
if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
|
||||
cache.cycle_count = bq27xxx_battery_read_cyct(di);
|
||||
|
||||
/*
|
||||
* On gauges with signed current reporting the current must be
|
||||
* checked to detect charging <-> discharging status changes.
|
||||
*/
|
||||
if (!(di->opts & BQ27XXX_O_ZERO))
|
||||
bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
|
||||
|
||||
/* We only have to read charge design full once */
|
||||
if (di->charge_design_full <= 0)
|
||||
di->charge_design_full = bq27xxx_battery_read_dcap(di);
|
||||
}
|
||||
|
||||
if ((di->cache.capacity != cache.capacity) ||
|
||||
(di->cache.flags != cache.flags) ||
|
||||
(di->last_status.intval != status.intval)) {
|
||||
di->last_status.intval = status.intval;
|
||||
power_supply_changed(di->bat);
|
||||
}
|
||||
|
||||
if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
|
||||
di->cache = cache;
|
||||
|
||||
di->last_update = jiffies;
|
||||
|
||||
if (!di->removed && poll_interval > 0)
|
||||
mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
|
||||
}
|
||||
|
||||
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
|
||||
{
|
||||
mutex_lock(&di->lock);
|
||||
bq27xxx_battery_update_unlocked(di);
|
||||
mutex_unlock(&di->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
|
||||
|
||||
static void bq27xxx_battery_poll(struct work_struct *work)
|
||||
{
|
||||
struct bq27xxx_device_info *di =
|
||||
container_of(work, struct bq27xxx_device_info,
|
||||
work.work);
|
||||
|
||||
bq27xxx_battery_update(di);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the average power in µW
|
||||
* Return < 0 if something fails.
|
||||
@@ -2094,8 +2099,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
|
||||
{
|
||||
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
|
||||
|
||||
cancel_delayed_work_sync(&di->work);
|
||||
schedule_delayed_work(&di->work, 0);
|
||||
/* After charger plug in/out wait 0.5s for things to stabilize */
|
||||
mod_delayed_work(system_wq, &di->work, HZ / 2);
|
||||
}
|
||||
|
||||
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
|
||||
|
||||
@@ -382,46 +382,49 @@ int power_supply_is_system_supplied(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
|
||||
|
||||
static int __power_supply_get_supplier_max_current(struct device *dev,
|
||||
void *data)
|
||||
struct psy_get_supplier_prop_data {
|
||||
struct power_supply *psy;
|
||||
enum power_supply_property psp;
|
||||
union power_supply_propval *val;
|
||||
};
|
||||
|
||||
static int __power_supply_get_supplier_property(struct device *dev, void *_data)
|
||||
{
|
||||
union power_supply_propval ret = {0,};
|
||||
struct power_supply *epsy = dev_get_drvdata(dev);
|
||||
struct power_supply *psy = data;
|
||||
struct psy_get_supplier_prop_data *data = _data;
|
||||
|
||||
if (__power_supply_is_supplied_by(epsy, psy))
|
||||
if (!epsy->desc->get_property(epsy,
|
||||
POWER_SUPPLY_PROP_CURRENT_MAX,
|
||||
&ret))
|
||||
return ret.intval;
|
||||
if (__power_supply_is_supplied_by(epsy, data->psy))
|
||||
if (!epsy->desc->get_property(epsy, data->psp, data->val))
|
||||
return 1; /* Success */
|
||||
|
||||
return 0;
|
||||
return 0; /* Continue iterating */
|
||||
}
|
||||
|
||||
int power_supply_set_input_current_limit_from_supplier(struct power_supply *psy)
|
||||
int power_supply_get_property_from_supplier(struct power_supply *psy,
|
||||
enum power_supply_property psp,
|
||||
union power_supply_propval *val)
|
||||
{
|
||||
union power_supply_propval val = {0,};
|
||||
int curr;
|
||||
|
||||
if (!psy->desc->set_property)
|
||||
return -EINVAL;
|
||||
struct psy_get_supplier_prop_data data = {
|
||||
.psy = psy,
|
||||
.psp = psp,
|
||||
.val = val,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* This function is not intended for use with a supply with multiple
|
||||
* suppliers, we simply pick the first supply to report a non 0
|
||||
* max-current.
|
||||
* suppliers, we simply pick the first supply to report the psp.
|
||||
*/
|
||||
curr = class_for_each_device(power_supply_class, NULL, psy,
|
||||
__power_supply_get_supplier_max_current);
|
||||
if (curr <= 0)
|
||||
return (curr == 0) ? -ENODEV : curr;
|
||||
ret = class_for_each_device(power_supply_class, NULL, &data,
|
||||
__power_supply_get_supplier_property);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret == 0)
|
||||
return -ENODEV;
|
||||
|
||||
val.intval = curr;
|
||||
|
||||
return psy->desc->set_property(psy,
|
||||
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(power_supply_set_input_current_limit_from_supplier);
|
||||
EXPORT_SYMBOL_GPL(power_supply_get_property_from_supplier);
|
||||
|
||||
int power_supply_set_battery_charged(struct power_supply *psy)
|
||||
{
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#ifndef __LINUX_BQ27X00_BATTERY_H__
|
||||
#define __LINUX_BQ27X00_BATTERY_H__
|
||||
|
||||
#include <linux/power_supply.h>
|
||||
|
||||
enum bq27xxx_chip {
|
||||
BQ27000 = 1, /* bq27000, bq27200 */
|
||||
BQ27010, /* bq27010, bq27210 */
|
||||
@@ -70,6 +72,7 @@ struct bq27xxx_device_info {
|
||||
int charge_design_full;
|
||||
bool removed;
|
||||
unsigned long last_update;
|
||||
union power_supply_propval last_status;
|
||||
struct delayed_work work;
|
||||
struct power_supply *bat;
|
||||
struct list_head list;
|
||||
|
||||
@@ -445,8 +445,9 @@ power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table
|
||||
int table_len, int temp);
|
||||
extern void power_supply_changed(struct power_supply *psy);
|
||||
extern int power_supply_am_i_supplied(struct power_supply *psy);
|
||||
extern int power_supply_set_input_current_limit_from_supplier(
|
||||
struct power_supply *psy);
|
||||
int power_supply_get_property_from_supplier(struct power_supply *psy,
|
||||
enum power_supply_property psp,
|
||||
union power_supply_propval *val);
|
||||
extern int power_supply_set_battery_charged(struct power_supply *psy);
|
||||
|
||||
#ifdef CONFIG_POWER_SUPPLY
|
||||
|
||||
@@ -65,6 +65,7 @@ enum {
|
||||
BOND_OPT_NUM_PEER_NOTIF_ALIAS,
|
||||
BOND_OPT_PEER_NOTIF_DELAY,
|
||||
BOND_OPT_LACP_ACTIVE,
|
||||
BOND_OPT_MISSED_MAX,
|
||||
BOND_OPT_LAST
|
||||
};
|
||||
|
||||
|
||||
@@ -121,6 +121,7 @@ struct bond_params {
|
||||
int xmit_policy;
|
||||
int miimon;
|
||||
u8 num_peer_notif;
|
||||
u8 missed_max;
|
||||
int arp_interval;
|
||||
int arp_validate;
|
||||
int arp_all_targets;
|
||||
@@ -227,7 +228,7 @@ struct bonding {
|
||||
*/
|
||||
spinlock_t mode_lock;
|
||||
spinlock_t stats_lock;
|
||||
u8 send_peer_notif;
|
||||
u32 send_peer_notif;
|
||||
u8 igmp_retrans;
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *proc_entry;
|
||||
|
||||
@@ -472,6 +472,34 @@ static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
|
||||
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
|
||||
}
|
||||
|
||||
#define dsa_tree_for_each_user_port(_dp, _dst) \
|
||||
list_for_each_entry((_dp), &(_dst)->ports, list) \
|
||||
if (dsa_port_is_user((_dp)))
|
||||
|
||||
#define dsa_switch_for_each_port(_dp, _ds) \
|
||||
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
|
||||
if ((_dp)->ds == (_ds))
|
||||
|
||||
#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
|
||||
list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
|
||||
if ((_dp)->ds == (_ds))
|
||||
|
||||
#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
|
||||
list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
|
||||
if ((_dp)->ds == (_ds))
|
||||
|
||||
#define dsa_switch_for_each_available_port(_dp, _ds) \
|
||||
dsa_switch_for_each_port((_dp), (_ds)) \
|
||||
if (!dsa_port_is_unused((_dp)))
|
||||
|
||||
#define dsa_switch_for_each_user_port(_dp, _ds) \
|
||||
dsa_switch_for_each_port((_dp), (_ds)) \
|
||||
if (dsa_port_is_user((_dp)))
|
||||
|
||||
#define dsa_switch_for_each_cpu_port(_dp, _ds) \
|
||||
dsa_switch_for_each_port((_dp), (_ds)) \
|
||||
if (dsa_port_is_cpu((_dp)))
|
||||
|
||||
static inline u32 dsa_user_ports(struct dsa_switch *ds)
|
||||
{
|
||||
u32 mask = 0;
|
||||
|
||||
@@ -75,6 +75,7 @@ struct ipcm_cookie {
|
||||
__be32 addr;
|
||||
int oif;
|
||||
struct ip_options_rcu *opt;
|
||||
__u8 protocol;
|
||||
__u8 ttl;
|
||||
__s16 tos;
|
||||
char priority;
|
||||
@@ -95,6 +96,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
|
||||
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
|
||||
ipcm->oif = inet->sk.sk_bound_dev_if;
|
||||
ipcm->addr = inet->inet_saddr;
|
||||
ipcm->protocol = inet->inet_num;
|
||||
}
|
||||
|
||||
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
|
||||
|
||||
@@ -285,22 +285,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
|
||||
page_pool_update_nid(pool, new_nid);
|
||||
}
|
||||
|
||||
static inline void page_pool_ring_lock(struct page_pool *pool)
|
||||
__acquires(&pool->ring.producer_lock)
|
||||
{
|
||||
if (in_serving_softirq())
|
||||
spin_lock(&pool->ring.producer_lock);
|
||||
else
|
||||
spin_lock_bh(&pool->ring.producer_lock);
|
||||
}
|
||||
|
||||
static inline void page_pool_ring_unlock(struct page_pool *pool)
|
||||
__releases(&pool->ring.producer_lock)
|
||||
{
|
||||
if (in_serving_softirq())
|
||||
spin_unlock(&pool->ring.producer_lock);
|
||||
else
|
||||
spin_unlock_bh(&pool->ring.producer_lock);
|
||||
}
|
||||
|
||||
#endif /* _NET_PAGE_POOL_H */
|
||||
|
||||
@@ -260,6 +260,9 @@ bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
|
||||
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
enum xdp_mem_type type, void *allocator);
|
||||
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
|
||||
int xdp_reg_mem_model(struct xdp_mem_info *mem,
|
||||
enum xdp_mem_type type, void *allocator);
|
||||
void xdp_unreg_mem_model(struct xdp_mem_info *mem);
|
||||
|
||||
/* Drivers not supporting XDP metadata can use this helper, which
|
||||
* rejects any room expansion for metadata as a result.
|
||||
|
||||
@@ -858,6 +858,7 @@ enum {
|
||||
IFLA_BOND_TLB_DYNAMIC_LB,
|
||||
IFLA_BOND_PEER_NOTIF_DELAY,
|
||||
IFLA_BOND_AD_LACP_ACTIVE,
|
||||
IFLA_BOND_MISSED_MAX,
|
||||
__IFLA_BOND_MAX,
|
||||
};
|
||||
|
||||
|
||||
@@ -159,6 +159,8 @@ struct in_addr {
|
||||
#define MCAST_MSFILTER 48
|
||||
#define IP_MULTICAST_ALL 49
|
||||
#define IP_UNICAST_IF 50
|
||||
#define IP_LOCAL_PORT_RANGE 51
|
||||
#define IP_PROTOCOL 52
|
||||
|
||||
#define MCAST_EXCLUDE 0
|
||||
#define MCAST_INCLUDE 1
|
||||
|
||||
@@ -1165,7 +1165,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_lock_bucket;
|
||||
|
||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
||||
@@ -1186,6 +1186,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
err:
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
|
||||
err_lock_bucket:
|
||||
if (ret)
|
||||
htab_lru_push_free(htab, l_new);
|
||||
else if (l_old)
|
||||
@@ -1288,7 +1289,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
|
||||
ret = htab_lock_bucket(htab, b, hash, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_lock_bucket;
|
||||
|
||||
l_old = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
||||
@@ -1311,6 +1312,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
ret = 0;
|
||||
err:
|
||||
htab_unlock_bucket(htab, b, hash, flags);
|
||||
err_lock_bucket:
|
||||
if (l_new)
|
||||
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
|
||||
return ret;
|
||||
|
||||
@@ -980,6 +980,34 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
|
||||
BT_DBG("cmd %x arg %lx", cmd, arg);
|
||||
|
||||
/* Make sure the cmd is valid before doing anything */
|
||||
switch (cmd) {
|
||||
case HCIGETDEVLIST:
|
||||
case HCIGETDEVINFO:
|
||||
case HCIGETCONNLIST:
|
||||
case HCIDEVUP:
|
||||
case HCIDEVDOWN:
|
||||
case HCIDEVRESET:
|
||||
case HCIDEVRESTAT:
|
||||
case HCISETSCAN:
|
||||
case HCISETAUTH:
|
||||
case HCISETENCRYPT:
|
||||
case HCISETPTYPE:
|
||||
case HCISETLINKPOL:
|
||||
case HCISETLINKMODE:
|
||||
case HCISETACLMTU:
|
||||
case HCISETSCOMTU:
|
||||
case HCIINQUIRY:
|
||||
case HCISETRAW:
|
||||
case HCIGETCONNINFO:
|
||||
case HCIGETAUTHINFO:
|
||||
case HCIBLOCKADDR:
|
||||
case HCIUNBLOCKADDR:
|
||||
break;
|
||||
default:
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
|
||||
|
||||
@@ -26,6 +26,29 @@
|
||||
|
||||
#define BIAS_MAX LONG_MAX
|
||||
|
||||
static bool page_pool_producer_lock(struct page_pool *pool)
|
||||
__acquires(&pool->ring.producer_lock)
|
||||
{
|
||||
bool in_softirq = in_softirq();
|
||||
|
||||
if (in_softirq)
|
||||
spin_lock(&pool->ring.producer_lock);
|
||||
else
|
||||
spin_lock_bh(&pool->ring.producer_lock);
|
||||
|
||||
return in_softirq;
|
||||
}
|
||||
|
||||
static void page_pool_producer_unlock(struct page_pool *pool,
|
||||
bool in_softirq)
|
||||
__releases(&pool->ring.producer_lock)
|
||||
{
|
||||
if (in_softirq)
|
||||
spin_unlock(&pool->ring.producer_lock);
|
||||
else
|
||||
spin_unlock_bh(&pool->ring.producer_lock);
|
||||
}
|
||||
|
||||
static int page_pool_init(struct page_pool *pool,
|
||||
const struct page_pool_params *params)
|
||||
{
|
||||
@@ -390,8 +413,8 @@ static void page_pool_return_page(struct page_pool *pool, struct page *page)
|
||||
static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
|
||||
{
|
||||
int ret;
|
||||
/* BH protection not needed if current is serving softirq */
|
||||
if (in_serving_softirq())
|
||||
/* BH protection not needed if current is softirq */
|
||||
if (in_softirq())
|
||||
ret = ptr_ring_produce(&pool->ring, page);
|
||||
else
|
||||
ret = ptr_ring_produce_bh(&pool->ring, page);
|
||||
@@ -446,7 +469,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
|
||||
page_pool_dma_sync_for_device(pool, page,
|
||||
dma_sync_size);
|
||||
|
||||
if (allow_direct && in_serving_softirq() &&
|
||||
if (allow_direct && in_softirq() &&
|
||||
page_pool_recycle_in_cache(page, pool))
|
||||
return NULL;
|
||||
|
||||
@@ -489,6 +512,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
|
||||
int count)
|
||||
{
|
||||
int i, bulk_len = 0;
|
||||
bool in_softirq;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
struct page *page = virt_to_head_page(data[i]);
|
||||
@@ -503,12 +527,12 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
|
||||
return;
|
||||
|
||||
/* Bulk producer into ptr_ring page_pool cache */
|
||||
page_pool_ring_lock(pool);
|
||||
in_softirq = page_pool_producer_lock(pool);
|
||||
for (i = 0; i < bulk_len; i++) {
|
||||
if (__ptr_ring_produce(&pool->ring, data[i]))
|
||||
break; /* ring full */
|
||||
}
|
||||
page_pool_ring_unlock(pool);
|
||||
page_pool_producer_unlock(pool, in_softirq);
|
||||
|
||||
/* Hopefully all pages was return into ptr_ring */
|
||||
if (likely(i == bulk_len))
|
||||
|
||||
@@ -110,20 +110,15 @@ static void mem_allocator_disconnect(void *allocator)
|
||||
mutex_unlock(&mem_id_lock);
|
||||
}
|
||||
|
||||
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
|
||||
void xdp_unreg_mem_model(struct xdp_mem_info *mem)
|
||||
{
|
||||
struct xdp_mem_allocator *xa;
|
||||
int type = xdp_rxq->mem.type;
|
||||
int id = xdp_rxq->mem.id;
|
||||
int type = mem->type;
|
||||
int id = mem->id;
|
||||
|
||||
/* Reset mem info to defaults */
|
||||
xdp_rxq->mem.id = 0;
|
||||
xdp_rxq->mem.type = 0;
|
||||
|
||||
if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
|
||||
WARN(1, "Missing register, driver bug");
|
||||
return;
|
||||
}
|
||||
mem->id = 0;
|
||||
mem->type = 0;
|
||||
|
||||
if (id == 0)
|
||||
return;
|
||||
@@ -135,6 +130,17 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
|
||||
|
||||
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
|
||||
{
|
||||
if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
|
||||
WARN(1, "Missing register, driver bug");
|
||||
return;
|
||||
}
|
||||
|
||||
xdp_unreg_mem_model(&xdp_rxq->mem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
|
||||
|
||||
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
|
||||
@@ -261,28 +267,24 @@ static bool __is_supported_mem_type(enum xdp_mem_type type)
|
||||
return true;
|
||||
}
|
||||
|
||||
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
enum xdp_mem_type type, void *allocator)
|
||||
static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
|
||||
enum xdp_mem_type type,
|
||||
void *allocator)
|
||||
{
|
||||
struct xdp_mem_allocator *xdp_alloc;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
int id, errno, ret;
|
||||
void *ptr;
|
||||
|
||||
if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
|
||||
WARN(1, "Missing register, driver bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!__is_supported_mem_type(type))
|
||||
return -EOPNOTSUPP;
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
xdp_rxq->mem.type = type;
|
||||
mem->type = type;
|
||||
|
||||
if (!allocator) {
|
||||
if (type == MEM_TYPE_PAGE_POOL)
|
||||
return -EINVAL; /* Setup time check page_pool req */
|
||||
return 0;
|
||||
return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Delay init of rhashtable to save memory if feature isn't used */
|
||||
@@ -292,13 +294,13 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
mutex_unlock(&mem_id_lock);
|
||||
if (ret < 0) {
|
||||
WARN_ON(1);
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
|
||||
if (!xdp_alloc)
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&mem_id_lock);
|
||||
id = __mem_id_cyclic_get(gfp);
|
||||
@@ -306,15 +308,15 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
errno = id;
|
||||
goto err;
|
||||
}
|
||||
xdp_rxq->mem.id = id;
|
||||
xdp_alloc->mem = xdp_rxq->mem;
|
||||
mem->id = id;
|
||||
xdp_alloc->mem = *mem;
|
||||
xdp_alloc->allocator = allocator;
|
||||
|
||||
/* Insert allocator into ID lookup table */
|
||||
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
|
||||
if (IS_ERR(ptr)) {
|
||||
ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
|
||||
xdp_rxq->mem.id = 0;
|
||||
ida_simple_remove(&mem_id_pool, mem->id);
|
||||
mem->id = 0;
|
||||
errno = PTR_ERR(ptr);
|
||||
goto err;
|
||||
}
|
||||
@@ -324,13 +326,44 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
|
||||
mutex_unlock(&mem_id_lock);
|
||||
|
||||
trace_mem_connect(xdp_alloc, xdp_rxq);
|
||||
return 0;
|
||||
return xdp_alloc;
|
||||
err:
|
||||
mutex_unlock(&mem_id_lock);
|
||||
kfree(xdp_alloc);
|
||||
return errno;
|
||||
return ERR_PTR(errno);
|
||||
}
|
||||
|
||||
int xdp_reg_mem_model(struct xdp_mem_info *mem,
|
||||
enum xdp_mem_type type, void *allocator)
|
||||
{
|
||||
struct xdp_mem_allocator *xdp_alloc;
|
||||
|
||||
xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
|
||||
if (IS_ERR(xdp_alloc))
|
||||
return PTR_ERR(xdp_alloc);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
|
||||
|
||||
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||
enum xdp_mem_type type, void *allocator)
|
||||
{
|
||||
struct xdp_mem_allocator *xdp_alloc;
|
||||
|
||||
if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
|
||||
WARN(1, "Missing register, driver bug");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
|
||||
if (IS_ERR(xdp_alloc))
|
||||
return PTR_ERR(xdp_alloc);
|
||||
|
||||
if (trace_mem_connect_enabled() && xdp_alloc)
|
||||
trace_mem_connect(xdp_alloc, xdp_rxq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
|
||||
|
||||
/* XDP RX runs under NAPI protection, and in different delivery error
|
||||
|
||||
@@ -317,7 +317,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
|
||||
ipc->tos = val;
|
||||
ipc->priority = rt_tos2priority(ipc->tos);
|
||||
break;
|
||||
|
||||
case IP_PROTOCOL:
|
||||
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
|
||||
return -EINVAL;
|
||||
val = *(int *)CMSG_DATA(cmsg);
|
||||
if (val < 1 || val > 255)
|
||||
return -EINVAL;
|
||||
ipc->protocol = val;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1724,6 +1731,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
|
||||
case IP_MINTTL:
|
||||
val = inet->min_ttl;
|
||||
break;
|
||||
case IP_PROTOCOL:
|
||||
val = inet_sk(sk)->inet_num;
|
||||
break;
|
||||
default:
|
||||
release_sock(sk);
|
||||
return -ENOPROTOOPT;
|
||||
|
||||
@@ -559,6 +559,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
}
|
||||
|
||||
ipcm_init_sk(&ipc, inet);
|
||||
/* Keep backward compat */
|
||||
if (hdrincl)
|
||||
ipc.protocol = IPPROTO_RAW;
|
||||
|
||||
if (msg->msg_controllen) {
|
||||
err = ip_cmsg_send(sk, msg, &ipc, false);
|
||||
@@ -626,7 +629,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
|
||||
RT_SCOPE_UNIVERSE,
|
||||
hdrincl ? IPPROTO_RAW : sk->sk_protocol,
|
||||
hdrincl ? ipc.protocol : sk->sk_protocol,
|
||||
inet_sk_flowi_flags(sk) |
|
||||
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
|
||||
daddr, saddr, 0, 0, sk->sk_uid);
|
||||
|
||||
@@ -828,7 +828,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
|
||||
if (!proto)
|
||||
proto = inet->inet_num;
|
||||
else if (proto != inet->inet_num)
|
||||
else if (proto != inet->inet_num &&
|
||||
inet->inet_num != IPPROTO_RAW)
|
||||
return -EINVAL;
|
||||
|
||||
if (proto > 255)
|
||||
|
||||
@@ -1546,9 +1546,6 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
|
||||
|
||||
static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
|
||||
{
|
||||
if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
|
||||
return 0;
|
||||
|
||||
return ctnetlink_filter_match(ct, data);
|
||||
}
|
||||
|
||||
@@ -1612,11 +1609,6 @@ static int ctnetlink_del_conntrack(struct sk_buff *skb,
|
||||
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
|
||||
nf_ct_put(ct);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (cda[CTA_ID]) {
|
||||
__be32 id = nla_get_be32(cda[CTA_ID]);
|
||||
|
||||
|
||||
@@ -655,6 +655,7 @@ enum {
|
||||
IFLA_BOND_TLB_DYNAMIC_LB,
|
||||
IFLA_BOND_PEER_NOTIF_DELAY,
|
||||
IFLA_BOND_AD_LACP_ACTIVE,
|
||||
IFLA_BOND_MISSED_MAX,
|
||||
__IFLA_BOND_MAX,
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user