bpf: Factor out a common helper free_all()

Factor out a common helper free_all() to free all normal elements or
per-cpu elements on a lock-less list.

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20230606035310.4026145-2-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Hou Tao
2023-06-06 11:53:08 +08:00
committed by Alexei Starovoitov
parent 095641817e
commit aa7881fcfe

View File

@@ -211,9 +211,9 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
mem_cgroup_put(memcg); mem_cgroup_put(memcg);
} }
static void free_one(struct bpf_mem_cache *c, void *obj) static void free_one(void *obj, bool percpu)
{ {
if (c->percpu_size) { if (percpu) {
free_percpu(((void **)obj)[1]); free_percpu(((void **)obj)[1]);
kfree(obj); kfree(obj);
return; return;
@@ -222,14 +222,19 @@ static void free_one(struct bpf_mem_cache *c, void *obj)
kfree(obj); kfree(obj);
} }
static void __free_rcu(struct rcu_head *head) static void free_all(struct llist_node *llnode, bool percpu)
{ {
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
struct llist_node *llnode = llist_del_all(&c->waiting_for_gp);
struct llist_node *pos, *t; struct llist_node *pos, *t;
llist_for_each_safe(pos, t, llnode) llist_for_each_safe(pos, t, llnode)
free_one(c, pos); free_one(pos, percpu);
}
static void __free_rcu(struct rcu_head *head)
{
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
atomic_set(&c->call_rcu_in_progress, 0); atomic_set(&c->call_rcu_in_progress, 0);
} }
@@ -432,7 +437,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
static void drain_mem_cache(struct bpf_mem_cache *c) static void drain_mem_cache(struct bpf_mem_cache *c)
{ {
struct llist_node *llnode, *t; bool percpu = !!c->percpu_size;
/* No progs are using this bpf_mem_cache, but htab_map_free() called /* No progs are using this bpf_mem_cache, but htab_map_free() called
* bpf_mem_cache_free() for all remaining elements and they can be in * bpf_mem_cache_free() for all remaining elements and they can be in
@@ -441,14 +446,10 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
* Except for waiting_for_gp list, there are no concurrent operations * Except for waiting_for_gp list, there are no concurrent operations
* on these lists, so it is safe to use __llist_del_all(). * on these lists, so it is safe to use __llist_del_all().
*/ */
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) free_all(__llist_del_all(&c->free_by_rcu), percpu);
free_one(c, llnode); free_all(llist_del_all(&c->waiting_for_gp), percpu);
llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp)) free_all(__llist_del_all(&c->free_llist), percpu);
free_one(c, llnode); free_all(__llist_del_all(&c->free_llist_extra), percpu);
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist))
free_one(c, llnode);
llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra))
free_one(c, llnode);
} }
static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)