mt76x2: apply coverage class on slot time too
[linux/fpc-iii.git] / kernel / bpf / xskmap.c
blobcb3a12137404c711170d5cbaab38732aabcae117
1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
15 #include <linux/bpf.h>
16 #include <linux/capability.h>
17 #include <net/xdp_sock.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
21 struct xsk_map {
22 struct bpf_map map;
23 struct xdp_sock **xsk_map;
24 struct list_head __percpu *flush_list;
27 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
29 int cpu, err = -EINVAL;
30 struct xsk_map *m;
31 u64 cost;
33 if (!capable(CAP_NET_ADMIN))
34 return ERR_PTR(-EPERM);
36 if (attr->max_entries == 0 || attr->key_size != 4 ||
37 attr->value_size != 4 ||
38 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
39 return ERR_PTR(-EINVAL);
41 m = kzalloc(sizeof(*m), GFP_USER);
42 if (!m)
43 return ERR_PTR(-ENOMEM);
45 bpf_map_init_from_attr(&m->map, attr);
47 cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
48 cost += sizeof(struct list_head) * num_possible_cpus();
49 if (cost >= U32_MAX - PAGE_SIZE)
50 goto free_m;
52 m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
54 /* Notice returns -EPERM on if map size is larger than memlock limit */
55 err = bpf_map_precharge_memlock(m->map.pages);
56 if (err)
57 goto free_m;
59 err = -ENOMEM;
61 m->flush_list = alloc_percpu(struct list_head);
62 if (!m->flush_list)
63 goto free_m;
65 for_each_possible_cpu(cpu)
66 INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu));
68 m->xsk_map = bpf_map_area_alloc(m->map.max_entries *
69 sizeof(struct xdp_sock *),
70 m->map.numa_node);
71 if (!m->xsk_map)
72 goto free_percpu;
73 return &m->map;
75 free_percpu:
76 free_percpu(m->flush_list);
77 free_m:
78 kfree(m);
79 return ERR_PTR(err);
82 static void xsk_map_free(struct bpf_map *map)
84 struct xsk_map *m = container_of(map, struct xsk_map, map);
85 int i;
87 synchronize_net();
89 for (i = 0; i < map->max_entries; i++) {
90 struct xdp_sock *xs;
92 xs = m->xsk_map[i];
93 if (!xs)
94 continue;
96 sock_put((struct sock *)xs);
99 free_percpu(m->flush_list);
100 bpf_map_area_free(m->xsk_map);
101 kfree(m);
104 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
106 struct xsk_map *m = container_of(map, struct xsk_map, map);
107 u32 index = key ? *(u32 *)key : U32_MAX;
108 u32 *next = next_key;
110 if (index >= m->map.max_entries) {
111 *next = 0;
112 return 0;
115 if (index == m->map.max_entries - 1)
116 return -ENOENT;
117 *next = index + 1;
118 return 0;
121 struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
123 struct xsk_map *m = container_of(map, struct xsk_map, map);
124 struct xdp_sock *xs;
126 if (key >= map->max_entries)
127 return NULL;
129 xs = READ_ONCE(m->xsk_map[key]);
130 return xs;
133 int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
134 struct xdp_sock *xs)
136 struct xsk_map *m = container_of(map, struct xsk_map, map);
137 struct list_head *flush_list = this_cpu_ptr(m->flush_list);
138 int err;
140 err = xsk_rcv(xs, xdp);
141 if (err)
142 return err;
144 if (!xs->flush_node.prev)
145 list_add(&xs->flush_node, flush_list);
147 return 0;
150 void __xsk_map_flush(struct bpf_map *map)
152 struct xsk_map *m = container_of(map, struct xsk_map, map);
153 struct list_head *flush_list = this_cpu_ptr(m->flush_list);
154 struct xdp_sock *xs, *tmp;
156 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
157 xsk_flush(xs);
158 __list_del(xs->flush_node.prev, xs->flush_node.next);
159 xs->flush_node.prev = NULL;
163 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
165 return NULL;
168 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
169 u64 map_flags)
171 struct xsk_map *m = container_of(map, struct xsk_map, map);
172 u32 i = *(u32 *)key, fd = *(u32 *)value;
173 struct xdp_sock *xs, *old_xs;
174 struct socket *sock;
175 int err;
177 if (unlikely(map_flags > BPF_EXIST))
178 return -EINVAL;
179 if (unlikely(i >= m->map.max_entries))
180 return -E2BIG;
181 if (unlikely(map_flags == BPF_NOEXIST))
182 return -EEXIST;
184 sock = sockfd_lookup(fd, &err);
185 if (!sock)
186 return err;
188 if (sock->sk->sk_family != PF_XDP) {
189 sockfd_put(sock);
190 return -EOPNOTSUPP;
193 xs = (struct xdp_sock *)sock->sk;
195 if (!xsk_is_setup_for_bpf_map(xs)) {
196 sockfd_put(sock);
197 return -EOPNOTSUPP;
200 sock_hold(sock->sk);
202 old_xs = xchg(&m->xsk_map[i], xs);
203 if (old_xs) {
204 /* Make sure we've flushed everything. */
205 synchronize_net();
206 sock_put((struct sock *)old_xs);
209 sockfd_put(sock);
210 return 0;
213 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
215 struct xsk_map *m = container_of(map, struct xsk_map, map);
216 struct xdp_sock *old_xs;
217 int k = *(u32 *)key;
219 if (k >= map->max_entries)
220 return -EINVAL;
222 old_xs = xchg(&m->xsk_map[k], NULL);
223 if (old_xs) {
224 /* Make sure we've flushed everything. */
225 synchronize_net();
226 sock_put((struct sock *)old_xs);
229 return 0;
232 const struct bpf_map_ops xsk_map_ops = {
233 .map_alloc = xsk_map_alloc,
234 .map_free = xsk_map_free,
235 .map_get_next_key = xsk_map_get_next_key,
236 .map_lookup_elem = xsk_map_lookup_elem,
237 .map_update_elem = xsk_map_update_elem,
238 .map_delete_elem = xsk_map_delete_elem,