drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / core / page_pool_user.c
blob48335766c1bfd61dda6559748ea3c02380518570
1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/mutex.h>
4 #include <linux/netdevice.h>
5 #include <linux/xarray.h>
6 #include <net/net_debug.h>
7 #include <net/netdev_rx_queue.h>
8 #include <net/page_pool/helpers.h>
9 #include <net/page_pool/types.h>
10 #include <net/sock.h>
12 #include "devmem.h"
13 #include "page_pool_priv.h"
14 #include "netdev-genl-gen.h"
16 static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
17 /* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
18 * Ordering: inside rtnl_lock
20 static DEFINE_MUTEX(page_pools_lock);
22 /* Page pools are only reachable from user space (via netlink) if they are
23 * linked to a netdev at creation time. Following page pool "visibility"
24 * states are possible:
25 * - normal
26 * - user.list: linked to real netdev, netdev: real netdev
27 * - orphaned - real netdev has disappeared
28 * - user.list: linked to lo, netdev: lo
29 * - invisible - either (a) created without netdev linking, (b) unlisted due
30 * to error, or (c) the entire namespace which owned this pool disappeared
31 * - user.list: unhashed, netdev: unknown
34 typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool,
35 const struct genl_info *info);
37 static int
38 netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill)
40 struct page_pool *pool;
41 struct sk_buff *rsp;
42 int err;
44 mutex_lock(&page_pools_lock);
45 pool = xa_load(&page_pools, id);
46 if (!pool || hlist_unhashed(&pool->user.list) ||
47 !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) {
48 err = -ENOENT;
49 goto err_unlock;
52 rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
53 if (!rsp) {
54 err = -ENOMEM;
55 goto err_unlock;
58 err = fill(rsp, pool, info);
59 if (err)
60 goto err_free_msg;
62 mutex_unlock(&page_pools_lock);
64 return genlmsg_reply(rsp, info);
66 err_free_msg:
67 nlmsg_free(rsp);
68 err_unlock:
69 mutex_unlock(&page_pools_lock);
70 return err;
73 struct page_pool_dump_cb {
74 unsigned long ifindex;
75 u32 pp_id;
78 static int
79 netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
80 pp_nl_fill_cb fill)
82 struct page_pool_dump_cb *state = (void *)cb->ctx;
83 const struct genl_info *info = genl_info_dump(cb);
84 struct net *net = sock_net(skb->sk);
85 struct net_device *netdev;
86 struct page_pool *pool;
87 int err = 0;
89 rtnl_lock();
90 mutex_lock(&page_pools_lock);
91 for_each_netdev_dump(net, netdev, state->ifindex) {
92 hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
93 if (state->pp_id && state->pp_id < pool->user.id)
94 continue;
96 state->pp_id = pool->user.id;
97 err = fill(skb, pool, info);
98 if (err)
99 goto out;
102 state->pp_id = 0;
104 out:
105 mutex_unlock(&page_pools_lock);
106 rtnl_unlock();
108 return err;
111 static int
112 page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
113 const struct genl_info *info)
115 #ifdef CONFIG_PAGE_POOL_STATS
116 struct page_pool_stats stats = {};
117 struct nlattr *nest;
118 void *hdr;
120 if (!page_pool_get_stats(pool, &stats))
121 return 0;
123 hdr = genlmsg_iput(rsp, info);
124 if (!hdr)
125 return -EMSGSIZE;
127 nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
129 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
130 (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
131 nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
132 pool->slow.netdev->ifindex)))
133 goto err_cancel_nest;
135 nla_nest_end(rsp, nest);
137 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
138 stats.alloc_stats.fast) ||
139 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
140 stats.alloc_stats.slow) ||
141 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
142 stats.alloc_stats.slow_high_order) ||
143 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
144 stats.alloc_stats.empty) ||
145 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
146 stats.alloc_stats.refill) ||
147 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
148 stats.alloc_stats.waive) ||
149 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
150 stats.recycle_stats.cached) ||
151 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
152 stats.recycle_stats.cache_full) ||
153 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
154 stats.recycle_stats.ring) ||
155 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
156 stats.recycle_stats.ring_full) ||
157 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
158 stats.recycle_stats.released_refcnt))
159 goto err_cancel_msg;
161 genlmsg_end(rsp, hdr);
163 return 0;
164 err_cancel_nest:
165 nla_nest_cancel(rsp, nest);
166 err_cancel_msg:
167 genlmsg_cancel(rsp, hdr);
168 return -EMSGSIZE;
169 #else
170 GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
171 return -EOPNOTSUPP;
172 #endif
175 int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
176 struct genl_info *info)
178 struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
179 struct nlattr *nest;
180 int err;
181 u32 id;
183 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
184 return -EINVAL;
186 nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
187 err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
188 netdev_page_pool_info_nl_policy,
189 info->extack);
190 if (err)
191 return err;
193 if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
194 return -EINVAL;
195 if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
196 NL_SET_ERR_MSG_ATTR(info->extack,
197 tb[NETDEV_A_PAGE_POOL_IFINDEX],
198 "selecting by ifindex not supported");
199 return -EINVAL;
202 id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
204 return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
207 int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
208 struct netlink_callback *cb)
210 return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
213 static int
214 page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
215 const struct genl_info *info)
217 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
218 size_t inflight, refsz;
219 void *hdr;
221 hdr = genlmsg_iput(rsp, info);
222 if (!hdr)
223 return -EMSGSIZE;
225 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id))
226 goto err_cancel;
228 if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
229 nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
230 pool->slow.netdev->ifindex))
231 goto err_cancel;
232 if (pool->user.napi_id &&
233 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
234 goto err_cancel;
236 inflight = page_pool_inflight(pool, false);
237 refsz = PAGE_SIZE << pool->p.order;
238 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) ||
239 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
240 inflight * refsz))
241 goto err_cancel;
242 if (pool->user.detach_time &&
243 nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME,
244 pool->user.detach_time))
245 goto err_cancel;
247 if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id))
248 goto err_cancel;
250 genlmsg_end(rsp, hdr);
252 return 0;
253 err_cancel:
254 genlmsg_cancel(rsp, hdr);
255 return -EMSGSIZE;
258 static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd)
260 struct genl_info info;
261 struct sk_buff *ntf;
262 struct net *net;
264 lockdep_assert_held(&page_pools_lock);
266 /* 'invisible' page pools don't matter */
267 if (hlist_unhashed(&pool->user.list))
268 return;
269 net = dev_net(pool->slow.netdev);
271 if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL))
272 return;
274 genl_info_init_ntf(&info, &netdev_nl_family, cmd);
276 ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
277 if (!ntf)
278 return;
280 if (page_pool_nl_fill(ntf, pool, &info)) {
281 nlmsg_free(ntf);
282 return;
285 genlmsg_multicast_netns(&netdev_nl_family, net, ntf,
286 0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL);
289 int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
291 u32 id;
293 if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID))
294 return -EINVAL;
296 id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]);
298 return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill);
301 int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
302 struct netlink_callback *cb)
304 return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill);
307 int page_pool_list(struct page_pool *pool)
309 static u32 id_alloc_next;
310 int err;
312 mutex_lock(&page_pools_lock);
313 err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b,
314 &id_alloc_next, GFP_KERNEL);
315 if (err < 0)
316 goto err_unlock;
318 INIT_HLIST_NODE(&pool->user.list);
319 if (pool->slow.netdev) {
320 hlist_add_head(&pool->user.list,
321 &pool->slow.netdev->page_pools);
322 pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
324 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
327 mutex_unlock(&page_pools_lock);
328 return 0;
330 err_unlock:
331 mutex_unlock(&page_pools_lock);
332 return err;
335 void page_pool_detached(struct page_pool *pool)
337 mutex_lock(&page_pools_lock);
338 pool->user.detach_time = ktime_get_boottime_seconds();
339 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
340 mutex_unlock(&page_pools_lock);
343 void page_pool_unlist(struct page_pool *pool)
345 mutex_lock(&page_pools_lock);
346 netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF);
347 xa_erase(&page_pools, pool->user.id);
348 if (!hlist_unhashed(&pool->user.list))
349 hlist_del(&pool->user.list);
350 mutex_unlock(&page_pools_lock);
353 int page_pool_check_memory_provider(struct net_device *dev,
354 struct netdev_rx_queue *rxq)
356 struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
357 struct page_pool *pool;
358 struct hlist_node *n;
360 if (!binding)
361 return 0;
363 mutex_lock(&page_pools_lock);
364 hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) {
365 if (pool->mp_priv != binding)
366 continue;
368 if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) {
369 mutex_unlock(&page_pools_lock);
370 return 0;
373 mutex_unlock(&page_pools_lock);
374 return -ENODATA;
377 static void page_pool_unreg_netdev_wipe(struct net_device *netdev)
379 struct page_pool *pool;
380 struct hlist_node *n;
382 mutex_lock(&page_pools_lock);
383 hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) {
384 hlist_del_init(&pool->user.list);
385 pool->slow.netdev = NET_PTR_POISON;
387 mutex_unlock(&page_pools_lock);
390 static void page_pool_unreg_netdev(struct net_device *netdev)
392 struct page_pool *pool, *last;
393 struct net_device *lo;
395 lo = dev_net(netdev)->loopback_dev;
397 mutex_lock(&page_pools_lock);
398 last = NULL;
399 hlist_for_each_entry(pool, &netdev->page_pools, user.list) {
400 pool->slow.netdev = lo;
401 netdev_nl_page_pool_event(pool,
402 NETDEV_CMD_PAGE_POOL_CHANGE_NTF);
403 last = pool;
405 if (last)
406 hlist_splice_init(&netdev->page_pools, &last->user.list,
407 &lo->page_pools);
408 mutex_unlock(&page_pools_lock);
411 static int
412 page_pool_netdevice_event(struct notifier_block *nb,
413 unsigned long event, void *ptr)
415 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
417 if (event != NETDEV_UNREGISTER)
418 return NOTIFY_DONE;
420 if (hlist_empty(&netdev->page_pools))
421 return NOTIFY_OK;
423 if (netdev->ifindex != LOOPBACK_IFINDEX)
424 page_pool_unreg_netdev(netdev);
425 else
426 page_pool_unreg_netdev_wipe(netdev);
427 return NOTIFY_OK;
430 static struct notifier_block page_pool_netdevice_nb = {
431 .notifier_call = page_pool_netdevice_event,
434 static int __init page_pool_user_init(void)
436 return register_netdevice_notifier(&page_pool_netdevice_nb);
439 subsys_initcall(page_pool_user_init);