mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
xdp: Use bulking for non-map XDP_REDIRECT and consolidate code paths
Since the bulk queue used by XDP_REDIRECT now lives in struct net_device, we can re-use the bulking for the non-map version of the bpf_redirect() helper. This is a simple matter of having xdp_do_redirect_slow() queue the frame on the bulk queue instead of sending it out with __bpf_tx_xdp(). Unfortunately we can't make the bpf_redirect() helper return an error if the ifindex doesn't exit (as bpf_redirect_map() does), because we don't have a reference to the network namespace of the ingress device at the time the helper is called. So we have to leave it as-is and keep the device lookup in xdp_do_redirect_slow(). Since this leaves less reason to have the non-map redirect code in a separate function, so we get rid of the xdp_do_redirect_slow() function entirely. This does lose us the tracepoint disambiguation, but fortunately the xdp_redirect and xdp_redirect_map tracepoints use the same tracepoint entry structures. This means both can contain a map index, so we can just amend the tracepoint definitions so we always emit the xdp_redirect(_err) tracepoints, but with the map ID only populated if a map is present. This means we retire the xdp_redirect_map(_err) tracepoints entirely, but keep the definitions around in case someone is still listening for them. With this change, the performance of the xdp_redirect sample program goes from 5Mpps to 8.4Mpps (a 68% increase). Since the flush functions are no longer map-specific, rename the flush() functions to drop _map from their names. One of the renamed functions is the xdp_do_flush_map() callback used in all the xdp-enabled drivers. To keep from having to update all drivers, use a #define to keep the old name working, and only update the virtual drivers in this patch. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/157918768505.1458396.17518057312953572912.stgit@toke.dk
This commit is contained in:
committed by
Alexei Starovoitov
parent
75ccae62cb
commit
1d233886dd
@@ -81,7 +81,7 @@ struct bpf_dtab {
|
||||
u32 n_buckets;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct list_head, dev_map_flush_list);
|
||||
static DEFINE_PER_CPU(struct list_head, dev_flush_list);
|
||||
static DEFINE_SPINLOCK(dev_map_lock);
|
||||
static LIST_HEAD(dev_map_list);
|
||||
|
||||
@@ -357,16 +357,16 @@ error:
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
|
||||
/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
|
||||
* from the driver before returning from its napi->poll() routine. The poll()
|
||||
* routine is called either from busy_poll context or net_rx_action signaled
|
||||
* from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
|
||||
* net device can be torn down. On devmap tear down we ensure the flush list
|
||||
* is empty before completing to ensure all flush operations have completed.
|
||||
*/
|
||||
void __dev_map_flush(void)
|
||||
void __dev_flush(void)
|
||||
{
|
||||
struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
|
||||
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
|
||||
struct xdp_dev_bulk_queue *bq, *tmp;
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -396,9 +396,8 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
*/
|
||||
static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||
struct net_device *dev_rx)
|
||||
|
||||
{
|
||||
struct list_head *flush_list = this_cpu_ptr(&dev_map_flush_list);
|
||||
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
|
||||
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
|
||||
|
||||
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
|
||||
@@ -419,10 +418,9 @@ static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
{
|
||||
struct net_device *dev = dst->dev;
|
||||
struct xdp_frame *xdpf;
|
||||
int err;
|
||||
|
||||
@@ -440,6 +438,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||
return bq_enqueue(dev, xdpf, dev_rx);
|
||||
}
|
||||
|
||||
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
{
|
||||
return __xdp_enqueue(dev, xdp, dev_rx);
|
||||
}
|
||||
|
||||
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx)
|
||||
{
|
||||
struct net_device *dev = dst->dev;
|
||||
|
||||
return __xdp_enqueue(dev, xdp, dev_rx);
|
||||
}
|
||||
|
||||
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
@@ -762,7 +774,7 @@ static int __init dev_map_init(void)
|
||||
register_netdevice_notifier(&dev_map_notifier);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
INIT_LIST_HEAD(&per_cpu(dev_map_flush_list, cpu));
|
||||
INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user