mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
bpf: {cpu,dev}map: Change various functions return type from int to void
The functions bq_enqueue(), bq_flush_to_queue(), and bq_xmit_all() in {cpu,dev}map.c always return zero. Changing the return type from int to void makes the code easier to follow. Suggested-by: David Ahern <dsahern@gmail.com> Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20200901083928.6199-1-bjorn.topel@gmail.com
This commit is contained in:
parent
f56407fa6e
commit
ebc4ecd48c
2 changed files with 10 additions and 16 deletions
|
@ -79,8 +79,6 @@ struct bpf_cpu_map {
|
|||
|
||||
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
|
||||
|
||||
static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
|
||||
|
||||
static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
u32 value_size = attr->value_size;
|
||||
|
@ -670,7 +668,7 @@ const struct bpf_map_ops cpu_map_ops = {
|
|||
.map_btf_id = &cpu_map_btf_id,
|
||||
};
|
||||
|
||||
static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
|
||||
static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
|
||||
{
|
||||
struct bpf_cpu_map_entry *rcpu = bq->obj;
|
||||
unsigned int processed = 0, drops = 0;
|
||||
|
@ -679,7 +677,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
|
|||
int i;
|
||||
|
||||
if (unlikely(!bq->count))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
q = rcpu->queue;
|
||||
spin_lock(&q->producer_lock);
|
||||
|
@ -702,13 +700,12 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
|
|||
|
||||
/* Feedback loop via tracepoints */
|
||||
trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
|
||||
* Thus, safe percpu variable access.
|
||||
*/
|
||||
static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
|
||||
static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
|
||||
{
|
||||
struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
|
||||
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
|
||||
|
@ -729,8 +726,6 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
|
|||
|
||||
if (!bq->flush_node.prev)
|
||||
list_add(&bq->flush_node, flush_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
||||
|
|
|
@ -341,14 +341,14 @@ bool dev_map_can_have_prog(struct bpf_map *map)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||
{
|
||||
struct net_device *dev = bq->dev;
|
||||
int sent = 0, drops = 0, err = 0;
|
||||
int i;
|
||||
|
||||
if (unlikely(!bq->count))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
for (i = 0; i < bq->count; i++) {
|
||||
struct xdp_frame *xdpf = bq->q[i];
|
||||
|
@ -369,7 +369,7 @@ static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
|||
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
|
||||
bq->dev_rx = NULL;
|
||||
__list_del_clearprev(&bq->flush_node);
|
||||
return 0;
|
||||
return;
|
||||
error:
|
||||
/* If ndo_xdp_xmit fails with an errno, no frames have been
|
||||
* xmit'ed and it's our responsibility to them free all.
|
||||
|
@ -421,8 +421,8 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
|||
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
|
||||
* Thus, safe percpu variable access.
|
||||
*/
|
||||
static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||
struct net_device *dev_rx)
|
||||
static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
||||
struct net_device *dev_rx)
|
||||
{
|
||||
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
|
||||
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
|
||||
|
@ -441,8 +441,6 @@ static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
|
|||
|
||||
if (!bq->flush_node.prev)
|
||||
list_add(&bq->flush_node, flush_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
||||
|
@ -462,7 +460,8 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
|||
if (unlikely(!xdpf))
|
||||
return -EOVERFLOW;
|
||||
|
||||
return bq_enqueue(dev, xdpf, dev_rx);
|
||||
bq_enqueue(dev, xdpf, dev_rx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
|
||||
|
|
Loading…
Reference in a new issue