Revert "bpf: Defer the free of inner map when necessary"

This reverts commit 37d98fb9c3 which is
commit 876673364161da50eed6b472d746ef88242b2368 upstream.

It breaks the build as other previous commits that were reverted for ABI
issues are not in the tree here.

Bug: 161946584
Change-Id: I6f30910efb8877d024e8e0866f2e62e3b25e0959
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2024-05-22 09:52:18 +00:00
parent 8c23052a2a
commit 6a87f57d6b
3 changed files with 6 additions and 38 deletions

View File

@@ -192,14 +192,9 @@ struct bpf_map {
*/
atomic64_t refcnt ____cacheline_aligned;
atomic64_t usercnt;
/* rcu is used before freeing and work is only used during freeing */
union {
struct work_struct work;
struct rcu_head rcu;
};
struct work_struct work;
struct mutex freeze_mutex;
atomic64_t writecnt;
bool free_after_mult_rcu_gp;
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)

View File

@@ -110,15 +110,10 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
void bpf_map_fd_put_ptr(void *ptr)
{
struct bpf_map *inner_map = ptr;
/* The inner map may still be used by both non-sleepable and sleepable
* bpf program, so free it after one RCU grace period and one tasks
* trace RCU grace period.
/* ptr->ops->map_free() has to go through one
* rcu grace period by itself.
*/
if (need_defer)
WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
bpf_map_put(inner_map);
bpf_map_put(ptr);
}
u32 bpf_map_fd_sys_lookup_elem(void *ptr)

View File

@@ -489,25 +489,6 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
}
static void bpf_map_free_in_work(struct bpf_map *map)
{
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
}
static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
{
bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
}
static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
{
if (rcu_trace_implies_rcu_gp())
bpf_map_free_rcu_gp(rcu);
else
call_rcu(rcu, bpf_map_free_rcu_gp);
}
/* decrement map refcnt and schedule it for freeing via workqueue
* (unrelying map implementation ops->map_free() might sleep)
*/
@@ -517,11 +498,8 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
/* bpf_map_free_id() must be called first */
bpf_map_free_id(map, do_idr_lock);
btf_put(map->btf);
if (READ_ONCE(map->free_after_mult_rcu_gp))
call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
else
bpf_map_free_in_work(map);
INIT_WORK(&map->work, bpf_map_free_deferred);
schedule_work(&map->work);
}
}