mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
bpf: Split cgroup_bpf_enabled per attach type
When we attach any cgroup hook, the rest (even if unused/unattached) start to contribute small overhead. In particular, the one we want to avoid is __cgroup_bpf_run_filter_skb which does two redirections to get to the cgroup and pushes/pulls skb. Let's split cgroup_bpf_enabled to be per-attach to make sure only used attach types trigger. I've dropped some existing high-level cgroup_bpf_enabled in some places because BPF_PROG_CGROUP_XXX_RUN macros usually have another cgroup_bpf_enabled check. I also had to copy-paste BPF_CGROUP_RUN_SA_PROG_LOCK for GETPEERNAME/GETSOCKNAME because type for cgroup_bpf_enabled[type] has to be constant and known at compile time. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20210115163501.805133-4-sdf@google.com
This commit is contained in:
committed by
Alexei Starovoitov
parent
20f2505fb4
commit
a9ed15dae0
@@ -19,7 +19,7 @@
|
||||
|
||||
#include "../cgroup/cgroup-internal.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
|
||||
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_BPF_ATTACH_TYPE);
|
||||
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
|
||||
|
||||
void cgroup_bpf_offline(struct cgroup *cgrp)
|
||||
@@ -128,7 +128,7 @@ static void cgroup_bpf_release(struct work_struct *work)
|
||||
if (pl->link)
|
||||
bpf_cgroup_link_auto_detach(pl->link);
|
||||
kfree(pl);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[type]);
|
||||
}
|
||||
old_array = rcu_dereference_protected(
|
||||
cgrp->bpf.effective[type],
|
||||
@@ -499,7 +499,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
else
|
||||
static_branch_inc(&cgroup_bpf_enabled_key);
|
||||
static_branch_inc(&cgroup_bpf_enabled_key[type]);
|
||||
bpf_cgroup_storages_link(new_storage, cgrp, type);
|
||||
return 0;
|
||||
|
||||
@@ -698,7 +698,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
cgrp->bpf.flags[type] = 0;
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[type]);
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
@@ -1360,8 +1360,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
||||
* attached to the hook so we don't waste time allocating
|
||||
* memory and locking the socket.
|
||||
*/
|
||||
if (!cgroup_bpf_enabled ||
|
||||
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
|
||||
return 0;
|
||||
|
||||
/* Allocate a bit more than the initial user buffer for
|
||||
@@ -1457,8 +1456,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
* attached to the hook so we don't waste time allocating
|
||||
* memory and locking the socket.
|
||||
*/
|
||||
if (!cgroup_bpf_enabled ||
|
||||
__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
|
||||
return retval;
|
||||
|
||||
ctx.optlen = max_optlen;
|
||||
|
Reference in New Issue
Block a user