1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
7 /* The 'cpumap' is primarily used as a backend map for XDP BPF helper
8 * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
10 * Unlike devmap which redirects XDP frames out another NIC device,
11 * this map type redirects raw XDP frames to another CPU. The remote
12 * CPU will do SKB-allocation and call the normal network stack.
14 * This is a scalability and isolation mechanism, that allow
15 * separating the early driver network XDP layer, from the rest of the
16 * netstack, and assigning dedicated CPUs for this stage. This
17 * basically allows for 10G wirespeed pre-filtering via bpf.
19 #include <linux/bpf.h>
20 #include <linux/filter.h>
21 #include <linux/ptr_ring.h>
24 #include <linux/sched.h>
25 #include <linux/workqueue.h>
26 #include <linux/kthread.h>
27 #include <linux/capability.h>
28 #include <trace/events/xdp.h>
30 #include <linux/netdevice.h> /* netif_receive_skb_core */
31 #include <linux/etherdevice.h> /* eth_type_trans */
33 /* General idea: XDP packets getting XDP redirected to another CPU,
34 * will maximum be stored/queued for one driver ->poll() call. It is
35 * guaranteed that queueing the frame and the flush operation happen on
36 * same CPU. Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
37 * which queue in bpf_cpu_map_entry contains packets.
40 #define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */
41 struct bpf_cpu_map_entry
;
44 struct xdp_bulk_queue
{
45 void *q
[CPU_MAP_BULK_SIZE
];
46 struct list_head flush_node
;
47 struct bpf_cpu_map_entry
*obj
;
51 /* Struct for every remote "destination" CPU in map */
52 struct bpf_cpu_map_entry
{
53 u32 cpu
; /* kthread CPU and map index */
54 int map_id
; /* Back reference to map */
56 /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
57 struct xdp_bulk_queue __percpu
*bulkq
;
59 struct bpf_cpu_map
*cmap
;
61 /* Queue with potential multi-producers, and single-consumer kthread */
62 struct ptr_ring
*queue
;
63 struct task_struct
*kthread
;
65 struct bpf_cpumap_val value
;
66 struct bpf_prog
*prog
;
68 atomic_t refcnt
; /* Control when this struct can be free'ed */
71 struct work_struct kthread_stop_wq
;
76 /* Below members specific for map type */
77 struct bpf_cpu_map_entry
**cpu_map
;
80 static DEFINE_PER_CPU(struct list_head
, cpu_map_flush_list
);
82 static struct bpf_map
*cpu_map_alloc(union bpf_attr
*attr
)
84 u32 value_size
= attr
->value_size
;
85 struct bpf_cpu_map
*cmap
;
89 return ERR_PTR(-EPERM
);
91 /* check sanity of attributes */
92 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
93 (value_size
!= offsetofend(struct bpf_cpumap_val
, qsize
) &&
94 value_size
!= offsetofend(struct bpf_cpumap_val
, bpf_prog
.fd
)) ||
95 attr
->map_flags
& ~BPF_F_NUMA_NODE
)
96 return ERR_PTR(-EINVAL
);
98 cmap
= kzalloc(sizeof(*cmap
), GFP_USER
| __GFP_ACCOUNT
);
100 return ERR_PTR(-ENOMEM
);
102 bpf_map_init_from_attr(&cmap
->map
, attr
);
104 /* Pre-limit array size based on NR_CPUS, not final CPU check */
105 if (cmap
->map
.max_entries
> NR_CPUS
) {
110 /* Alloc array for possible remote "destination" CPUs */
111 cmap
->cpu_map
= bpf_map_area_alloc(cmap
->map
.max_entries
*
112 sizeof(struct bpf_cpu_map_entry
*),
113 cmap
->map
.numa_node
);
123 static void get_cpu_map_entry(struct bpf_cpu_map_entry
*rcpu
)
125 atomic_inc(&rcpu
->refcnt
);
128 /* called from workqueue, to workaround syscall using preempt_disable */
129 static void cpu_map_kthread_stop(struct work_struct
*work
)
131 struct bpf_cpu_map_entry
*rcpu
;
133 rcpu
= container_of(work
, struct bpf_cpu_map_entry
, kthread_stop_wq
);
135 /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
136 * as it waits until all in-flight call_rcu() callbacks complete.
140 /* kthread_stop will wake_up_process and wait for it to complete */
141 kthread_stop(rcpu
->kthread
);
144 static struct sk_buff
*cpu_map_build_skb(struct xdp_frame
*xdpf
,
147 unsigned int hard_start_headroom
;
148 unsigned int frame_size
;
149 void *pkt_data_start
;
151 /* Part of headroom was reserved to xdpf */
152 hard_start_headroom
= sizeof(struct xdp_frame
) + xdpf
->headroom
;
154 /* Memory size backing xdp_frame data already have reserved
155 * room for build_skb to place skb_shared_info in tailroom.
157 frame_size
= xdpf
->frame_sz
;
159 pkt_data_start
= xdpf
->data
- hard_start_headroom
;
160 skb
= build_skb_around(skb
, pkt_data_start
, frame_size
);
164 skb_reserve(skb
, hard_start_headroom
);
165 __skb_put(skb
, xdpf
->len
);
167 skb_metadata_set(skb
, xdpf
->metasize
);
169 /* Essential SKB info: protocol and skb->dev */
170 skb
->protocol
= eth_type_trans(skb
, xdpf
->dev_rx
);
172 /* Optional SKB info, currently missing:
173 * - HW checksum info (skb->ip_summed)
174 * - HW RX hash (skb_set_hash)
175 * - RX ring dev queue index (skb_record_rx_queue)
178 /* Until page_pool get SKB return path, release DMA here */
179 xdp_release_frame(xdpf
);
181 /* Allow SKB to reuse area used by xdp_frame */
182 xdp_scrub_frame(xdpf
);
187 static void __cpu_map_ring_cleanup(struct ptr_ring
*ring
)
189 /* The tear-down procedure should have made sure that queue is
190 * empty. See __cpu_map_entry_replace() and work-queue
191 * invoked cpu_map_kthread_stop(). Catch any broken behaviour
192 * gracefully and warn once.
194 struct xdp_frame
*xdpf
;
196 while ((xdpf
= ptr_ring_consume(ring
)))
197 if (WARN_ON_ONCE(xdpf
))
198 xdp_return_frame(xdpf
);
201 static void put_cpu_map_entry(struct bpf_cpu_map_entry
*rcpu
)
203 if (atomic_dec_and_test(&rcpu
->refcnt
)) {
205 bpf_prog_put(rcpu
->prog
);
206 /* The queue should be empty at this point */
207 __cpu_map_ring_cleanup(rcpu
->queue
);
208 ptr_ring_cleanup(rcpu
->queue
, NULL
);
214 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry
*rcpu
,
215 void **frames
, int n
,
216 struct xdp_cpumap_stats
*stats
)
218 struct xdp_rxq_info rxq
;
227 xdp_set_return_frame_no_direct();
230 for (i
= 0; i
< n
; i
++) {
231 struct xdp_frame
*xdpf
= frames
[i
];
235 rxq
.dev
= xdpf
->dev_rx
;
237 /* TODO: report queue_index to xdp_rxq_info */
239 xdp_convert_frame_to_buff(xdpf
, &xdp
);
241 act
= bpf_prog_run_xdp(rcpu
->prog
, &xdp
);
244 err
= xdp_update_frame_from_buff(&xdp
, xdpf
);
246 xdp_return_frame(xdpf
);
249 frames
[nframes
++] = xdpf
;
254 err
= xdp_do_redirect(xdpf
->dev_rx
, &xdp
,
257 xdp_return_frame(xdpf
);
264 bpf_warn_invalid_xdp_action(act
);
267 xdp_return_frame(xdpf
);
276 xdp_clear_return_frame_no_direct();
278 rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
283 #define CPUMAP_BATCH 8
285 static int cpu_map_kthread_run(void *data
)
287 struct bpf_cpu_map_entry
*rcpu
= data
;
289 set_current_state(TASK_INTERRUPTIBLE
);
291 /* When kthread gives stop order, then rcpu have been disconnected
292 * from map, thus no new packets can enter. Remaining in-flight
293 * per CPU stored packets are flushed to this queue. Wait honoring
294 * kthread_stop signal until queue is empty.
296 while (!kthread_should_stop() || !__ptr_ring_empty(rcpu
->queue
)) {
297 struct xdp_cpumap_stats stats
= {}; /* zero stats */
298 gfp_t gfp
= __GFP_ZERO
| GFP_ATOMIC
;
299 unsigned int drops
= 0, sched
= 0;
300 void *frames
[CPUMAP_BATCH
];
301 void *skbs
[CPUMAP_BATCH
];
302 int i
, n
, m
, nframes
;
304 /* Release CPU reschedule checks */
305 if (__ptr_ring_empty(rcpu
->queue
)) {
306 set_current_state(TASK_INTERRUPTIBLE
);
307 /* Recheck to avoid lost wake-up */
308 if (__ptr_ring_empty(rcpu
->queue
)) {
312 __set_current_state(TASK_RUNNING
);
315 sched
= cond_resched();
319 * The bpf_cpu_map_entry is single consumer, with this
320 * kthread CPU pinned. Lockless access to ptr_ring
321 * consume side valid as no-resize allowed of queue.
323 n
= __ptr_ring_consume_batched(rcpu
->queue
, frames
,
325 for (i
= 0; i
< n
; i
++) {
327 struct page
*page
= virt_to_page(f
);
329 /* Bring struct page memory area to curr CPU. Read by
330 * build_skb_around via page_is_pfmemalloc(), and when
331 * freed written by page_frag_free call.
336 /* Support running another XDP prog on this CPU */
337 nframes
= cpu_map_bpf_prog_run_xdp(rcpu
, frames
, n
, &stats
);
339 m
= kmem_cache_alloc_bulk(skbuff_head_cache
, gfp
, nframes
, skbs
);
340 if (unlikely(m
== 0)) {
341 for (i
= 0; i
< nframes
; i
++)
342 skbs
[i
] = NULL
; /* effect: xdp_return_frame */
348 for (i
= 0; i
< nframes
; i
++) {
349 struct xdp_frame
*xdpf
= frames
[i
];
350 struct sk_buff
*skb
= skbs
[i
];
353 skb
= cpu_map_build_skb(xdpf
, skb
);
355 xdp_return_frame(xdpf
);
359 /* Inject into network stack */
360 ret
= netif_receive_skb_core(skb
);
361 if (ret
== NET_RX_DROP
)
364 /* Feedback loop via tracepoint */
365 trace_xdp_cpumap_kthread(rcpu
->map_id
, n
, drops
, sched
, &stats
);
367 local_bh_enable(); /* resched point, may call do_softirq() */
369 __set_current_state(TASK_RUNNING
);
371 put_cpu_map_entry(rcpu
);
375 bool cpu_map_prog_allowed(struct bpf_map
*map
)
377 return map
->map_type
== BPF_MAP_TYPE_CPUMAP
&&
378 map
->value_size
!= offsetofend(struct bpf_cpumap_val
, qsize
);
381 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry
*rcpu
, int fd
)
383 struct bpf_prog
*prog
;
385 prog
= bpf_prog_get_type(fd
, BPF_PROG_TYPE_XDP
);
387 return PTR_ERR(prog
);
389 if (prog
->expected_attach_type
!= BPF_XDP_CPUMAP
) {
394 rcpu
->value
.bpf_prog
.id
= prog
->aux
->id
;
400 static struct bpf_cpu_map_entry
*
401 __cpu_map_entry_alloc(struct bpf_map
*map
, struct bpf_cpumap_val
*value
,
404 int numa
, err
, i
, fd
= value
->bpf_prog
.fd
;
405 gfp_t gfp
= GFP_KERNEL
| __GFP_NOWARN
;
406 struct bpf_cpu_map_entry
*rcpu
;
407 struct xdp_bulk_queue
*bq
;
409 /* Have map->numa_node, but choose node of redirect target CPU */
410 numa
= cpu_to_node(cpu
);
412 rcpu
= bpf_map_kmalloc_node(map
, sizeof(*rcpu
), gfp
| __GFP_ZERO
, numa
);
416 /* Alloc percpu bulkq */
417 rcpu
->bulkq
= bpf_map_alloc_percpu(map
, sizeof(*rcpu
->bulkq
),
418 sizeof(void *), gfp
);
422 for_each_possible_cpu(i
) {
423 bq
= per_cpu_ptr(rcpu
->bulkq
, i
);
428 rcpu
->queue
= bpf_map_kmalloc_node(map
, sizeof(*rcpu
->queue
), gfp
,
433 err
= ptr_ring_init(rcpu
->queue
, value
->qsize
, gfp
);
438 rcpu
->map_id
= map
->id
;
439 rcpu
->value
.qsize
= value
->qsize
;
441 if (fd
> 0 && __cpu_map_load_bpf_program(rcpu
, fd
))
445 rcpu
->kthread
= kthread_create_on_node(cpu_map_kthread_run
, rcpu
, numa
,
446 "cpumap/%d/map:%d", cpu
,
448 if (IS_ERR(rcpu
->kthread
))
451 get_cpu_map_entry(rcpu
); /* 1-refcnt for being in cmap->cpu_map[] */
452 get_cpu_map_entry(rcpu
); /* 1-refcnt for kthread */
454 /* Make sure kthread runs on a single CPU */
455 kthread_bind(rcpu
->kthread
, cpu
);
456 wake_up_process(rcpu
->kthread
);
462 bpf_prog_put(rcpu
->prog
);
464 ptr_ring_cleanup(rcpu
->queue
, NULL
);
468 free_percpu(rcpu
->bulkq
);
474 static void __cpu_map_entry_free(struct rcu_head
*rcu
)
476 struct bpf_cpu_map_entry
*rcpu
;
478 /* This cpu_map_entry have been disconnected from map and one
479 * RCU grace-period have elapsed. Thus, XDP cannot queue any
480 * new packets and cannot change/set flush_needed that can
483 rcpu
= container_of(rcu
, struct bpf_cpu_map_entry
, rcu
);
485 free_percpu(rcpu
->bulkq
);
486 /* Cannot kthread_stop() here, last put free rcpu resources */
487 put_cpu_map_entry(rcpu
);
490 /* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
491 * ensure any driver rcu critical sections have completed, but this
492 * does not guarantee a flush has happened yet. Because driver side
493 * rcu_read_lock/unlock only protects the running XDP program. The
494 * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
495 * pending flush op doesn't fail.
497 * The bpf_cpu_map_entry is still used by the kthread, and there can
498 * still be pending packets (in queue and percpu bulkq). A refcnt
499 * makes sure to last user (kthread_stop vs. call_rcu) free memory
502 * The rcu callback __cpu_map_entry_free flush remaining packets in
503 * percpu bulkq to queue. Due to caller map_delete_elem() disable
504 * preemption, cannot call kthread_stop() to make sure queue is empty.
505 * Instead a work_queue is started for stopping kthread,
506 * cpu_map_kthread_stop, which waits for an RCU grace period before
507 * stopping kthread, emptying the queue.
509 static void __cpu_map_entry_replace(struct bpf_cpu_map
*cmap
,
510 u32 key_cpu
, struct bpf_cpu_map_entry
*rcpu
)
512 struct bpf_cpu_map_entry
*old_rcpu
;
514 old_rcpu
= xchg(&cmap
->cpu_map
[key_cpu
], rcpu
);
516 call_rcu(&old_rcpu
->rcu
, __cpu_map_entry_free
);
517 INIT_WORK(&old_rcpu
->kthread_stop_wq
, cpu_map_kthread_stop
);
518 schedule_work(&old_rcpu
->kthread_stop_wq
);
522 static int cpu_map_delete_elem(struct bpf_map
*map
, void *key
)
524 struct bpf_cpu_map
*cmap
= container_of(map
, struct bpf_cpu_map
, map
);
525 u32 key_cpu
= *(u32
*)key
;
527 if (key_cpu
>= map
->max_entries
)
530 /* notice caller map_delete_elem() use preempt_disable() */
531 __cpu_map_entry_replace(cmap
, key_cpu
, NULL
);
535 static int cpu_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
538 struct bpf_cpu_map
*cmap
= container_of(map
, struct bpf_cpu_map
, map
);
539 struct bpf_cpumap_val cpumap_value
= {};
540 struct bpf_cpu_map_entry
*rcpu
;
541 /* Array index key correspond to CPU number */
542 u32 key_cpu
= *(u32
*)key
;
544 memcpy(&cpumap_value
, value
, map
->value_size
);
546 if (unlikely(map_flags
> BPF_EXIST
))
548 if (unlikely(key_cpu
>= cmap
->map
.max_entries
))
550 if (unlikely(map_flags
== BPF_NOEXIST
))
552 if (unlikely(cpumap_value
.qsize
> 16384)) /* sanity limit on qsize */
555 /* Make sure CPU is a valid possible cpu */
556 if (key_cpu
>= nr_cpumask_bits
|| !cpu_possible(key_cpu
))
559 if (cpumap_value
.qsize
== 0) {
560 rcpu
= NULL
; /* Same as deleting */
562 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
563 rcpu
= __cpu_map_entry_alloc(map
, &cpumap_value
, key_cpu
);
569 __cpu_map_entry_replace(cmap
, key_cpu
, rcpu
);
574 static void cpu_map_free(struct bpf_map
*map
)
576 struct bpf_cpu_map
*cmap
= container_of(map
, struct bpf_cpu_map
, map
);
579 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
580 * so the bpf programs (can be more than one that used this map) were
581 * disconnected from events. Wait for outstanding critical sections in
582 * these programs to complete. The rcu critical section only guarantees
583 * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
584 * It does __not__ ensure pending flush operations (if any) are
588 bpf_clear_redirect_map(map
);
591 /* For cpu_map the remote CPUs can still be using the entries
592 * (struct bpf_cpu_map_entry).
594 for (i
= 0; i
< cmap
->map
.max_entries
; i
++) {
595 struct bpf_cpu_map_entry
*rcpu
;
597 rcpu
= READ_ONCE(cmap
->cpu_map
[i
]);
601 /* bq flush and cleanup happens after RCU grace-period */
602 __cpu_map_entry_replace(cmap
, i
, NULL
); /* call_rcu */
604 bpf_map_area_free(cmap
->cpu_map
);
608 struct bpf_cpu_map_entry
*__cpu_map_lookup_elem(struct bpf_map
*map
, u32 key
)
610 struct bpf_cpu_map
*cmap
= container_of(map
, struct bpf_cpu_map
, map
);
611 struct bpf_cpu_map_entry
*rcpu
;
613 if (key
>= map
->max_entries
)
616 rcpu
= READ_ONCE(cmap
->cpu_map
[key
]);
620 static void *cpu_map_lookup_elem(struct bpf_map
*map
, void *key
)
622 struct bpf_cpu_map_entry
*rcpu
=
623 __cpu_map_lookup_elem(map
, *(u32
*)key
);
625 return rcpu
? &rcpu
->value
: NULL
;
628 static int cpu_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
630 struct bpf_cpu_map
*cmap
= container_of(map
, struct bpf_cpu_map
, map
);
631 u32 index
= key
? *(u32
*)key
: U32_MAX
;
632 u32
*next
= next_key
;
634 if (index
>= cmap
->map
.max_entries
) {
639 if (index
== cmap
->map
.max_entries
- 1)
645 static int cpu_map_btf_id
;
646 const struct bpf_map_ops cpu_map_ops
= {
647 .map_meta_equal
= bpf_map_meta_equal
,
648 .map_alloc
= cpu_map_alloc
,
649 .map_free
= cpu_map_free
,
650 .map_delete_elem
= cpu_map_delete_elem
,
651 .map_update_elem
= cpu_map_update_elem
,
652 .map_lookup_elem
= cpu_map_lookup_elem
,
653 .map_get_next_key
= cpu_map_get_next_key
,
654 .map_check_btf
= map_check_no_btf
,
655 .map_btf_name
= "bpf_cpu_map",
656 .map_btf_id
= &cpu_map_btf_id
,
659 static void bq_flush_to_queue(struct xdp_bulk_queue
*bq
)
661 struct bpf_cpu_map_entry
*rcpu
= bq
->obj
;
662 unsigned int processed
= 0, drops
= 0;
663 const int to_cpu
= rcpu
->cpu
;
667 if (unlikely(!bq
->count
))
671 spin_lock(&q
->producer_lock
);
673 for (i
= 0; i
< bq
->count
; i
++) {
674 struct xdp_frame
*xdpf
= bq
->q
[i
];
677 err
= __ptr_ring_produce(q
, xdpf
);
680 xdp_return_frame_rx_napi(xdpf
);
685 spin_unlock(&q
->producer_lock
);
687 __list_del_clearprev(&bq
->flush_node
);
689 /* Feedback loop via tracepoints */
690 trace_xdp_cpumap_enqueue(rcpu
->map_id
, processed
, drops
, to_cpu
);
693 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
694 * Thus, safe percpu variable access.
696 static void bq_enqueue(struct bpf_cpu_map_entry
*rcpu
, struct xdp_frame
*xdpf
)
698 struct list_head
*flush_list
= this_cpu_ptr(&cpu_map_flush_list
);
699 struct xdp_bulk_queue
*bq
= this_cpu_ptr(rcpu
->bulkq
);
701 if (unlikely(bq
->count
== CPU_MAP_BULK_SIZE
))
702 bq_flush_to_queue(bq
);
704 /* Notice, xdp_buff/page MUST be queued here, long enough for
705 * driver to code invoking us to finished, due to driver
706 * (e.g. ixgbe) recycle tricks based on page-refcnt.
708 * Thus, incoming xdp_frame is always queued here (else we race
709 * with another CPU on page-refcnt and remaining driver code).
710 * Queue time is very short, as driver will invoke flush
711 * operation, when completing napi->poll call.
713 bq
->q
[bq
->count
++] = xdpf
;
715 if (!bq
->flush_node
.prev
)
716 list_add(&bq
->flush_node
, flush_list
);
719 int cpu_map_enqueue(struct bpf_cpu_map_entry
*rcpu
, struct xdp_buff
*xdp
,
720 struct net_device
*dev_rx
)
722 struct xdp_frame
*xdpf
;
724 xdpf
= xdp_convert_buff_to_frame(xdp
);
728 /* Info needed when constructing SKB on remote CPU */
729 xdpf
->dev_rx
= dev_rx
;
731 bq_enqueue(rcpu
, xdpf
);
735 void __cpu_map_flush(void)
737 struct list_head
*flush_list
= this_cpu_ptr(&cpu_map_flush_list
);
738 struct xdp_bulk_queue
*bq
, *tmp
;
740 list_for_each_entry_safe(bq
, tmp
, flush_list
, flush_node
) {
741 bq_flush_to_queue(bq
);
743 /* If already running, costs spin_lock_irqsave + smb_mb */
744 wake_up_process(bq
->obj
->kthread
);
748 static int __init
cpu_map_init(void)
752 for_each_possible_cpu(cpu
)
753 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list
, cpu
));
757 subsys_initcall(cpu_map_init
);