1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 /* Devmaps primary use is as a backend map for XDP BPF helper call
14 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15 * spent some effort to ensure the datapath with redirect maps does not use
16 * any locking. This is a quick note on the details.
18 * We have three possible paths to get into the devmap control plane bpf
19 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20 * will invoke an update, delete, or lookup operation. To ensure updates and
21 * deletes appear atomic from the datapath side xchg() is used to modify the
22 * netdev_map array. Then because the datapath does a lookup into the netdev_map
23 * array (read-only) from an RCU critical section we use call_rcu() to wait for
24 * an rcu grace period before free'ing the old data structures. This ensures the
25 * datapath always has a valid copy. However, the datapath does a "flush"
26 * operation that pushes any pending packets in the driver outside the RCU
27 * critical section. Each bpf_dtab_netdev tracks these pending operations using
28 * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29 * until all bits are cleared indicating outstanding flush operations have
32 * BPF syscalls may race with BPF program calls on any of the update, delete
33 * or lookup operations. As noted above the xchg() operation also keep the
34 * netdev_map consistent in this case. From the devmap side BPF programs
35 * calling into these operations are the same as multiple user space threads
36 * making system calls.
38 * Finally, any of the above may race with a netdev_unregister notifier. The
39 * unregister notifier must search for net devices in the map structure that
40 * contain a reference to the net device and remove them. This is a two step
41 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
42 * check to see if the ifindex is the same as the net_device being removed.
43 * When removing the dev a cmpxchg() is used to ensure the correct dev is
44 * removed, in the case of a concurrent update or delete operation it is
45 * possible that the initially referenced dev is no longer in the map. As the
46 * notifier hook walks the map we know that new dev references can not be
47 * added by the user because core infrastructure ensures dev_get_by_index()
48 * calls will fail at this point.
50 #include <linux/bpf.h>
51 #include <linux/filter.h>
53 #define DEV_CREATE_FLAG_MASK \
54 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
56 struct bpf_dtab_netdev
{
57 struct net_device
*dev
;
58 struct bpf_dtab
*dtab
;
65 struct bpf_dtab_netdev
**netdev_map
;
66 unsigned long __percpu
*flush_needed
;
67 struct list_head list
;
70 static DEFINE_SPINLOCK(dev_map_lock
);
71 static LIST_HEAD(dev_map_list
);
73 static u64
dev_map_bitmap_size(const union bpf_attr
*attr
)
75 return BITS_TO_LONGS((u64
) attr
->max_entries
) * sizeof(unsigned long);
78 static struct bpf_map
*dev_map_alloc(union bpf_attr
*attr
)
80 struct bpf_dtab
*dtab
;
84 if (!capable(CAP_NET_ADMIN
))
85 return ERR_PTR(-EPERM
);
87 /* check sanity of attributes */
88 if (attr
->max_entries
== 0 || attr
->key_size
!= 4 ||
89 attr
->value_size
!= 4 || attr
->map_flags
& ~DEV_CREATE_FLAG_MASK
)
90 return ERR_PTR(-EINVAL
);
92 dtab
= kzalloc(sizeof(*dtab
), GFP_USER
);
94 return ERR_PTR(-ENOMEM
);
96 bpf_map_init_from_attr(&dtab
->map
, attr
);
98 /* make sure page count doesn't overflow */
99 cost
= (u64
) dtab
->map
.max_entries
* sizeof(struct bpf_dtab_netdev
*);
100 cost
+= dev_map_bitmap_size(attr
) * num_possible_cpus();
101 if (cost
>= U32_MAX
- PAGE_SIZE
)
104 dtab
->map
.pages
= round_up(cost
, PAGE_SIZE
) >> PAGE_SHIFT
;
106 /* if map size is larger than memlock limit, reject it early */
107 err
= bpf_map_precharge_memlock(dtab
->map
.pages
);
113 /* A per cpu bitfield with a bit per possible net device */
114 dtab
->flush_needed
= __alloc_percpu_gfp(dev_map_bitmap_size(attr
),
115 __alignof__(unsigned long),
116 GFP_KERNEL
| __GFP_NOWARN
);
117 if (!dtab
->flush_needed
)
120 dtab
->netdev_map
= bpf_map_area_alloc(dtab
->map
.max_entries
*
121 sizeof(struct bpf_dtab_netdev
*),
122 dtab
->map
.numa_node
);
123 if (!dtab
->netdev_map
)
126 spin_lock(&dev_map_lock
);
127 list_add_tail_rcu(&dtab
->list
, &dev_map_list
);
128 spin_unlock(&dev_map_lock
);
132 free_percpu(dtab
->flush_needed
);
137 static void dev_map_free(struct bpf_map
*map
)
139 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
142 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
143 * so the programs (can be more than one that used this map) were
144 * disconnected from events. Wait for outstanding critical sections in
145 * these programs to complete. The rcu critical section only guarantees
146 * no further reads against netdev_map. It does __not__ ensure pending
147 * flush operations (if any) are complete.
150 spin_lock(&dev_map_lock
);
151 list_del_rcu(&dtab
->list
);
152 spin_unlock(&dev_map_lock
);
156 /* To ensure all pending flush operations have completed wait for flush
157 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
158 * Because the above synchronize_rcu() ensures the map is disconnected
159 * from the program we can assume no new bits will be set.
161 for_each_online_cpu(cpu
) {
162 unsigned long *bitmap
= per_cpu_ptr(dtab
->flush_needed
, cpu
);
164 while (!bitmap_empty(bitmap
, dtab
->map
.max_entries
))
168 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
169 struct bpf_dtab_netdev
*dev
;
171 dev
= dtab
->netdev_map
[i
];
179 free_percpu(dtab
->flush_needed
);
180 bpf_map_area_free(dtab
->netdev_map
);
184 static int dev_map_get_next_key(struct bpf_map
*map
, void *key
, void *next_key
)
186 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
187 u32 index
= key
? *(u32
*)key
: U32_MAX
;
188 u32
*next
= next_key
;
190 if (index
>= dtab
->map
.max_entries
) {
195 if (index
== dtab
->map
.max_entries
- 1)
201 void __dev_map_insert_ctx(struct bpf_map
*map
, u32 bit
)
203 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
204 unsigned long *bitmap
= this_cpu_ptr(dtab
->flush_needed
);
206 __set_bit(bit
, bitmap
);
209 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
210 * from the driver before returning from its napi->poll() routine. The poll()
211 * routine is called either from busy_poll context or net_rx_action signaled
212 * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
213 * net device can be torn down. On devmap tear down we ensure the ctx bitmap
214 * is zeroed before completing to ensure all flush operations have completed.
216 void __dev_map_flush(struct bpf_map
*map
)
218 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
219 unsigned long *bitmap
= this_cpu_ptr(dtab
->flush_needed
);
222 for_each_set_bit(bit
, bitmap
, map
->max_entries
) {
223 struct bpf_dtab_netdev
*dev
= READ_ONCE(dtab
->netdev_map
[bit
]);
224 struct net_device
*netdev
;
226 /* This is possible if the dev entry is removed by user space
227 * between xdp redirect and flush op.
232 __clear_bit(bit
, bitmap
);
234 if (likely(netdev
->netdev_ops
->ndo_xdp_flush
))
235 netdev
->netdev_ops
->ndo_xdp_flush(netdev
);
239 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
240 * update happens in parallel here a dev_put wont happen until after reading the
243 struct net_device
*__dev_map_lookup_elem(struct bpf_map
*map
, u32 key
)
245 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
246 struct bpf_dtab_netdev
*dev
;
248 if (key
>= map
->max_entries
)
251 dev
= READ_ONCE(dtab
->netdev_map
[key
]);
252 return dev
? dev
->dev
: NULL
;
255 static void *dev_map_lookup_elem(struct bpf_map
*map
, void *key
)
257 struct net_device
*dev
= __dev_map_lookup_elem(map
, *(u32
*)key
);
259 return dev
? &dev
->ifindex
: NULL
;
262 static void dev_map_flush_old(struct bpf_dtab_netdev
*dev
)
264 if (dev
->dev
->netdev_ops
->ndo_xdp_flush
) {
265 struct net_device
*fl
= dev
->dev
;
266 unsigned long *bitmap
;
269 for_each_online_cpu(cpu
) {
270 bitmap
= per_cpu_ptr(dev
->dtab
->flush_needed
, cpu
);
271 __clear_bit(dev
->bit
, bitmap
);
273 fl
->netdev_ops
->ndo_xdp_flush(dev
->dev
);
278 static void __dev_map_entry_free(struct rcu_head
*rcu
)
280 struct bpf_dtab_netdev
*dev
;
282 dev
= container_of(rcu
, struct bpf_dtab_netdev
, rcu
);
283 dev_map_flush_old(dev
);
288 static int dev_map_delete_elem(struct bpf_map
*map
, void *key
)
290 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
291 struct bpf_dtab_netdev
*old_dev
;
294 if (k
>= map
->max_entries
)
297 /* Use call_rcu() here to ensure any rcu critical sections have
298 * completed, but this does not guarantee a flush has happened
299 * yet. Because driver side rcu_read_lock/unlock only protects the
300 * running XDP program. However, for pending flush operations the
301 * dev and ctx are stored in another per cpu map. And additionally,
302 * the driver tear down ensures all soft irqs are complete before
303 * removing the net device in the case of dev_put equals zero.
305 old_dev
= xchg(&dtab
->netdev_map
[k
], NULL
);
307 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
311 static int dev_map_update_elem(struct bpf_map
*map
, void *key
, void *value
,
314 struct bpf_dtab
*dtab
= container_of(map
, struct bpf_dtab
, map
);
315 struct net
*net
= current
->nsproxy
->net_ns
;
316 struct bpf_dtab_netdev
*dev
, *old_dev
;
318 u32 ifindex
= *(u32
*)value
;
320 if (unlikely(map_flags
> BPF_EXIST
))
322 if (unlikely(i
>= dtab
->map
.max_entries
))
324 if (unlikely(map_flags
== BPF_NOEXIST
))
330 dev
= kmalloc_node(sizeof(*dev
), GFP_ATOMIC
| __GFP_NOWARN
,
335 dev
->dev
= dev_get_by_index(net
, ifindex
);
345 /* Use call_rcu() here to ensure rcu critical sections have completed
346 * Remembering the driver side flush operation will happen before the
347 * net device is removed.
349 old_dev
= xchg(&dtab
->netdev_map
[i
], dev
);
351 call_rcu(&old_dev
->rcu
, __dev_map_entry_free
);
356 const struct bpf_map_ops dev_map_ops
= {
357 .map_alloc
= dev_map_alloc
,
358 .map_free
= dev_map_free
,
359 .map_get_next_key
= dev_map_get_next_key
,
360 .map_lookup_elem
= dev_map_lookup_elem
,
361 .map_update_elem
= dev_map_update_elem
,
362 .map_delete_elem
= dev_map_delete_elem
,
365 static int dev_map_notification(struct notifier_block
*notifier
,
366 ulong event
, void *ptr
)
368 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
369 struct bpf_dtab
*dtab
;
373 case NETDEV_UNREGISTER
:
374 /* This rcu_read_lock/unlock pair is needed because
375 * dev_map_list is an RCU list AND to ensure a delete
376 * operation does not free a netdev_map entry while we
377 * are comparing it against the netdev being unregistered.
380 list_for_each_entry_rcu(dtab
, &dev_map_list
, list
) {
381 for (i
= 0; i
< dtab
->map
.max_entries
; i
++) {
382 struct bpf_dtab_netdev
*dev
, *odev
;
384 dev
= READ_ONCE(dtab
->netdev_map
[i
]);
386 dev
->dev
->ifindex
!= netdev
->ifindex
)
388 odev
= cmpxchg(&dtab
->netdev_map
[i
], dev
, NULL
);
391 __dev_map_entry_free
);
402 static struct notifier_block dev_map_notifier
= {
403 .notifier_call
= dev_map_notification
,
406 static int __init
dev_map_init(void)
408 register_netdevice_notifier(&dev_map_notifier
);
412 subsys_initcall(dev_map_init
);