1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/netdevice.h>
4 #include <linux/notifier.h>
5 #include <linux/rtnetlink.h>
6 #include <net/busy_poll.h>
7 #include <net/net_namespace.h>
8 #include <net/netdev_queues.h>
9 #include <net/netdev_rx_queue.h>
12 #include <net/xdp_sock.h>
16 #include "netdev-genl-gen.h"
18 struct netdev_nl_dump_ctx
{
19 unsigned long ifindex
;
25 static struct netdev_nl_dump_ctx
*netdev_dump_ctx(struct netlink_callback
*cb
)
27 NL_ASSERT_DUMP_CTX_FITS(struct netdev_nl_dump_ctx
);
29 return (struct netdev_nl_dump_ctx
*)cb
->ctx
;
33 netdev_nl_dev_fill(struct net_device
*netdev
, struct sk_buff
*rsp
,
34 const struct genl_info
*info
)
40 hdr
= genlmsg_iput(rsp
, info
);
44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
47 XDP_METADATA_KFUNC_xxx
48 #undef XDP_METADATA_KFUNC
50 if (netdev
->xsk_tx_metadata_ops
) {
51 if (netdev
->xsk_tx_metadata_ops
->tmo_fill_timestamp
)
52 xsk_features
|= NETDEV_XSK_FLAGS_TX_TIMESTAMP
;
53 if (netdev
->xsk_tx_metadata_ops
->tmo_request_checksum
)
54 xsk_features
|= NETDEV_XSK_FLAGS_TX_CHECKSUM
;
57 if (nla_put_u32(rsp
, NETDEV_A_DEV_IFINDEX
, netdev
->ifindex
) ||
58 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XDP_FEATURES
,
59 netdev
->xdp_features
, NETDEV_A_DEV_PAD
) ||
60 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES
,
61 xdp_rx_meta
, NETDEV_A_DEV_PAD
) ||
62 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XSK_FEATURES
,
63 xsk_features
, NETDEV_A_DEV_PAD
))
66 if (netdev
->xdp_features
& NETDEV_XDP_ACT_XSK_ZEROCOPY
) {
67 if (nla_put_u32(rsp
, NETDEV_A_DEV_XDP_ZC_MAX_SEGS
,
68 netdev
->xdp_zc_max_segs
))
72 genlmsg_end(rsp
, hdr
);
77 genlmsg_cancel(rsp
, hdr
);
82 netdev_genl_dev_notify(struct net_device
*netdev
, int cmd
)
84 struct genl_info info
;
87 if (!genl_has_listeners(&netdev_nl_family
, dev_net(netdev
),
91 genl_info_init_ntf(&info
, &netdev_nl_family
, cmd
);
93 ntf
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
97 if (netdev_nl_dev_fill(netdev
, ntf
, &info
)) {
102 genlmsg_multicast_netns(&netdev_nl_family
, dev_net(netdev
), ntf
,
103 0, NETDEV_NLGRP_MGMT
, GFP_KERNEL
);
106 int netdev_nl_dev_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
108 struct net_device
*netdev
;
113 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DEV_IFINDEX
))
116 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_DEV_IFINDEX
]);
118 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
124 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
126 err
= netdev_nl_dev_fill(netdev
, rsp
, info
);
135 return genlmsg_reply(rsp
, info
);
142 int netdev_nl_dev_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
144 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
145 struct net
*net
= sock_net(skb
->sk
);
146 struct net_device
*netdev
;
150 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
151 err
= netdev_nl_dev_fill(netdev
, skb
, genl_info_dump(cb
));
161 netdev_nl_napi_fill_one(struct sk_buff
*rsp
, struct napi_struct
*napi
,
162 const struct genl_info
*info
)
167 if (WARN_ON_ONCE(!napi
->dev
))
169 if (!(napi
->dev
->flags
& IFF_UP
))
172 hdr
= genlmsg_iput(rsp
, info
);
176 if (napi
->napi_id
>= MIN_NAPI_ID
&&
177 nla_put_u32(rsp
, NETDEV_A_NAPI_ID
, napi
->napi_id
))
178 goto nla_put_failure
;
180 if (nla_put_u32(rsp
, NETDEV_A_NAPI_IFINDEX
, napi
->dev
->ifindex
))
181 goto nla_put_failure
;
183 if (napi
->irq
>= 0 && nla_put_u32(rsp
, NETDEV_A_NAPI_IRQ
, napi
->irq
))
184 goto nla_put_failure
;
187 pid
= task_pid_nr(napi
->thread
);
188 if (nla_put_u32(rsp
, NETDEV_A_NAPI_PID
, pid
))
189 goto nla_put_failure
;
192 genlmsg_end(rsp
, hdr
);
197 genlmsg_cancel(rsp
, hdr
);
201 int netdev_nl_napi_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
203 struct napi_struct
*napi
;
208 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_NAPI_ID
))
211 napi_id
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_ID
]);
213 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
219 napi
= napi_by_id(napi_id
);
221 err
= netdev_nl_napi_fill_one(rsp
, napi
, info
);
223 NL_SET_BAD_ATTR(info
->extack
, info
->attrs
[NETDEV_A_NAPI_ID
]);
232 return genlmsg_reply(rsp
, info
);
240 netdev_nl_napi_dump_one(struct net_device
*netdev
, struct sk_buff
*rsp
,
241 const struct genl_info
*info
,
242 struct netdev_nl_dump_ctx
*ctx
)
244 struct napi_struct
*napi
;
247 if (!(netdev
->flags
& IFF_UP
))
250 list_for_each_entry(napi
, &netdev
->napi_list
, dev_list
) {
251 if (ctx
->napi_id
&& napi
->napi_id
>= ctx
->napi_id
)
254 err
= netdev_nl_napi_fill_one(rsp
, napi
, info
);
257 ctx
->napi_id
= napi
->napi_id
;
262 int netdev_nl_napi_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
264 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
265 const struct genl_info
*info
= genl_info_dump(cb
);
266 struct net
*net
= sock_net(skb
->sk
);
267 struct net_device
*netdev
;
271 if (info
->attrs
[NETDEV_A_NAPI_IFINDEX
])
272 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_IFINDEX
]);
276 netdev
= __dev_get_by_index(net
, ifindex
);
278 err
= netdev_nl_napi_dump_one(netdev
, skb
, info
, ctx
);
282 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
283 err
= netdev_nl_napi_dump_one(netdev
, skb
, info
, ctx
);
295 netdev_nl_queue_fill_one(struct sk_buff
*rsp
, struct net_device
*netdev
,
296 u32 q_idx
, u32 q_type
, const struct genl_info
*info
)
298 struct net_devmem_dmabuf_binding
*binding
;
299 struct netdev_rx_queue
*rxq
;
300 struct netdev_queue
*txq
;
303 hdr
= genlmsg_iput(rsp
, info
);
307 if (nla_put_u32(rsp
, NETDEV_A_QUEUE_ID
, q_idx
) ||
308 nla_put_u32(rsp
, NETDEV_A_QUEUE_TYPE
, q_type
) ||
309 nla_put_u32(rsp
, NETDEV_A_QUEUE_IFINDEX
, netdev
->ifindex
))
310 goto nla_put_failure
;
313 case NETDEV_QUEUE_TYPE_RX
:
314 rxq
= __netif_get_rx_queue(netdev
, q_idx
);
315 if (rxq
->napi
&& nla_put_u32(rsp
, NETDEV_A_QUEUE_NAPI_ID
,
317 goto nla_put_failure
;
319 binding
= rxq
->mp_params
.mp_priv
;
321 nla_put_u32(rsp
, NETDEV_A_QUEUE_DMABUF
, binding
->id
))
322 goto nla_put_failure
;
325 case NETDEV_QUEUE_TYPE_TX
:
326 txq
= netdev_get_tx_queue(netdev
, q_idx
);
327 if (txq
->napi
&& nla_put_u32(rsp
, NETDEV_A_QUEUE_NAPI_ID
,
329 goto nla_put_failure
;
332 genlmsg_end(rsp
, hdr
);
337 genlmsg_cancel(rsp
, hdr
);
341 static int netdev_nl_queue_validate(struct net_device
*netdev
, u32 q_id
,
345 case NETDEV_QUEUE_TYPE_RX
:
346 if (q_id
>= netdev
->real_num_rx_queues
)
349 case NETDEV_QUEUE_TYPE_TX
:
350 if (q_id
>= netdev
->real_num_tx_queues
)
357 netdev_nl_queue_fill(struct sk_buff
*rsp
, struct net_device
*netdev
, u32 q_idx
,
358 u32 q_type
, const struct genl_info
*info
)
362 if (!(netdev
->flags
& IFF_UP
))
365 err
= netdev_nl_queue_validate(netdev
, q_idx
, q_type
);
369 return netdev_nl_queue_fill_one(rsp
, netdev
, q_idx
, q_type
, info
);
372 int netdev_nl_queue_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
374 u32 q_id
, q_type
, ifindex
;
375 struct net_device
*netdev
;
379 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_ID
) ||
380 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_TYPE
) ||
381 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_IFINDEX
))
384 q_id
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_ID
]);
385 q_type
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_TYPE
]);
386 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_IFINDEX
]);
388 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
394 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
396 err
= netdev_nl_queue_fill(rsp
, netdev
, q_id
, q_type
, info
);
405 return genlmsg_reply(rsp
, info
);
413 netdev_nl_queue_dump_one(struct net_device
*netdev
, struct sk_buff
*rsp
,
414 const struct genl_info
*info
,
415 struct netdev_nl_dump_ctx
*ctx
)
420 if (!(netdev
->flags
& IFF_UP
))
423 for (i
= ctx
->rxq_idx
; i
< netdev
->real_num_rx_queues
;) {
424 err
= netdev_nl_queue_fill_one(rsp
, netdev
, i
,
425 NETDEV_QUEUE_TYPE_RX
, info
);
430 for (i
= ctx
->txq_idx
; i
< netdev
->real_num_tx_queues
;) {
431 err
= netdev_nl_queue_fill_one(rsp
, netdev
, i
,
432 NETDEV_QUEUE_TYPE_TX
, info
);
441 int netdev_nl_queue_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
443 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
444 const struct genl_info
*info
= genl_info_dump(cb
);
445 struct net
*net
= sock_net(skb
->sk
);
446 struct net_device
*netdev
;
450 if (info
->attrs
[NETDEV_A_QUEUE_IFINDEX
])
451 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_IFINDEX
]);
455 netdev
= __dev_get_by_index(net
, ifindex
);
457 err
= netdev_nl_queue_dump_one(netdev
, skb
, info
, ctx
);
461 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
462 err
= netdev_nl_queue_dump_one(netdev
, skb
, info
, ctx
);
474 #define NETDEV_STAT_NOT_SET (~0ULL)
476 static void netdev_nl_stats_add(void *_sum
, const void *_add
, size_t size
)
478 const u64
*add
= _add
;
482 if (*add
!= NETDEV_STAT_NOT_SET
&& *sum
!= NETDEV_STAT_NOT_SET
)
490 static int netdev_stat_put(struct sk_buff
*rsp
, unsigned int attr_id
, u64 value
)
492 if (value
== NETDEV_STAT_NOT_SET
)
494 return nla_put_uint(rsp
, attr_id
, value
);
498 netdev_nl_stats_write_rx(struct sk_buff
*rsp
, struct netdev_queue_stats_rx
*rx
)
500 if (netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_PACKETS
, rx
->packets
) ||
501 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_BYTES
, rx
->bytes
) ||
502 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_ALLOC_FAIL
, rx
->alloc_fail
) ||
503 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROPS
, rx
->hw_drops
) ||
504 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS
, rx
->hw_drop_overruns
) ||
505 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY
, rx
->csum_unnecessary
) ||
506 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_NONE
, rx
->csum_none
) ||
507 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_BAD
, rx
->csum_bad
) ||
508 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS
, rx
->hw_gro_packets
) ||
509 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_BYTES
, rx
->hw_gro_bytes
) ||
510 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS
, rx
->hw_gro_wire_packets
) ||
511 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES
, rx
->hw_gro_wire_bytes
) ||
512 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS
, rx
->hw_drop_ratelimits
))
518 netdev_nl_stats_write_tx(struct sk_buff
*rsp
, struct netdev_queue_stats_tx
*tx
)
520 if (netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_PACKETS
, tx
->packets
) ||
521 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_BYTES
, tx
->bytes
) ||
522 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROPS
, tx
->hw_drops
) ||
523 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS
, tx
->hw_drop_errors
) ||
524 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_CSUM_NONE
, tx
->csum_none
) ||
525 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_NEEDS_CSUM
, tx
->needs_csum
) ||
526 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS
, tx
->hw_gso_packets
) ||
527 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_BYTES
, tx
->hw_gso_bytes
) ||
528 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS
, tx
->hw_gso_wire_packets
) ||
529 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES
, tx
->hw_gso_wire_bytes
) ||
530 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS
, tx
->hw_drop_ratelimits
) ||
531 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_STOP
, tx
->stop
) ||
532 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_WAKE
, tx
->wake
))
538 netdev_nl_stats_queue(struct net_device
*netdev
, struct sk_buff
*rsp
,
539 u32 q_type
, int i
, const struct genl_info
*info
)
541 const struct netdev_stat_ops
*ops
= netdev
->stat_ops
;
542 struct netdev_queue_stats_rx rx
;
543 struct netdev_queue_stats_tx tx
;
546 hdr
= genlmsg_iput(rsp
, info
);
549 if (nla_put_u32(rsp
, NETDEV_A_QSTATS_IFINDEX
, netdev
->ifindex
) ||
550 nla_put_u32(rsp
, NETDEV_A_QSTATS_QUEUE_TYPE
, q_type
) ||
551 nla_put_u32(rsp
, NETDEV_A_QSTATS_QUEUE_ID
, i
))
552 goto nla_put_failure
;
555 case NETDEV_QUEUE_TYPE_RX
:
556 memset(&rx
, 0xff, sizeof(rx
));
557 ops
->get_queue_stats_rx(netdev
, i
, &rx
);
558 if (!memchr_inv(&rx
, 0xff, sizeof(rx
)))
560 if (netdev_nl_stats_write_rx(rsp
, &rx
))
561 goto nla_put_failure
;
563 case NETDEV_QUEUE_TYPE_TX
:
564 memset(&tx
, 0xff, sizeof(tx
));
565 ops
->get_queue_stats_tx(netdev
, i
, &tx
);
566 if (!memchr_inv(&tx
, 0xff, sizeof(tx
)))
568 if (netdev_nl_stats_write_tx(rsp
, &tx
))
569 goto nla_put_failure
;
573 genlmsg_end(rsp
, hdr
);
577 genlmsg_cancel(rsp
, hdr
);
580 genlmsg_cancel(rsp
, hdr
);
585 netdev_nl_stats_by_queue(struct net_device
*netdev
, struct sk_buff
*rsp
,
586 const struct genl_info
*info
,
587 struct netdev_nl_dump_ctx
*ctx
)
589 const struct netdev_stat_ops
*ops
= netdev
->stat_ops
;
592 if (!(netdev
->flags
& IFF_UP
))
596 while (ops
->get_queue_stats_rx
&& i
< netdev
->real_num_rx_queues
) {
597 err
= netdev_nl_stats_queue(netdev
, rsp
, NETDEV_QUEUE_TYPE_RX
,
604 while (ops
->get_queue_stats_tx
&& i
< netdev
->real_num_tx_queues
) {
605 err
= netdev_nl_stats_queue(netdev
, rsp
, NETDEV_QUEUE_TYPE_TX
,
618 netdev_nl_stats_by_netdev(struct net_device
*netdev
, struct sk_buff
*rsp
,
619 const struct genl_info
*info
)
621 struct netdev_queue_stats_rx rx_sum
, rx
;
622 struct netdev_queue_stats_tx tx_sum
, tx
;
623 const struct netdev_stat_ops
*ops
;
627 ops
= netdev
->stat_ops
;
628 /* Netdev can't guarantee any complete counters */
629 if (!ops
->get_base_stats
)
632 memset(&rx_sum
, 0xff, sizeof(rx_sum
));
633 memset(&tx_sum
, 0xff, sizeof(tx_sum
));
635 ops
->get_base_stats(netdev
, &rx_sum
, &tx_sum
);
637 /* The op was there, but nothing reported, don't bother */
638 if (!memchr_inv(&rx_sum
, 0xff, sizeof(rx_sum
)) &&
639 !memchr_inv(&tx_sum
, 0xff, sizeof(tx_sum
)))
642 hdr
= genlmsg_iput(rsp
, info
);
645 if (nla_put_u32(rsp
, NETDEV_A_QSTATS_IFINDEX
, netdev
->ifindex
))
646 goto nla_put_failure
;
648 for (i
= 0; i
< netdev
->real_num_rx_queues
; i
++) {
649 memset(&rx
, 0xff, sizeof(rx
));
650 if (ops
->get_queue_stats_rx
)
651 ops
->get_queue_stats_rx(netdev
, i
, &rx
);
652 netdev_nl_stats_add(&rx_sum
, &rx
, sizeof(rx
));
654 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++) {
655 memset(&tx
, 0xff, sizeof(tx
));
656 if (ops
->get_queue_stats_tx
)
657 ops
->get_queue_stats_tx(netdev
, i
, &tx
);
658 netdev_nl_stats_add(&tx_sum
, &tx
, sizeof(tx
));
661 if (netdev_nl_stats_write_rx(rsp
, &rx_sum
) ||
662 netdev_nl_stats_write_tx(rsp
, &tx_sum
))
663 goto nla_put_failure
;
665 genlmsg_end(rsp
, hdr
);
669 genlmsg_cancel(rsp
, hdr
);
674 netdev_nl_qstats_get_dump_one(struct net_device
*netdev
, unsigned int scope
,
675 struct sk_buff
*skb
, const struct genl_info
*info
,
676 struct netdev_nl_dump_ctx
*ctx
)
678 if (!netdev
->stat_ops
)
683 return netdev_nl_stats_by_netdev(netdev
, skb
, info
);
684 case NETDEV_QSTATS_SCOPE_QUEUE
:
685 return netdev_nl_stats_by_queue(netdev
, skb
, info
, ctx
);
688 return -EINVAL
; /* Should not happen, per netlink policy */
691 int netdev_nl_qstats_get_dumpit(struct sk_buff
*skb
,
692 struct netlink_callback
*cb
)
694 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
695 const struct genl_info
*info
= genl_info_dump(cb
);
696 struct net
*net
= sock_net(skb
->sk
);
697 struct net_device
*netdev
;
698 unsigned int ifindex
;
703 if (info
->attrs
[NETDEV_A_QSTATS_SCOPE
])
704 scope
= nla_get_uint(info
->attrs
[NETDEV_A_QSTATS_SCOPE
]);
707 if (info
->attrs
[NETDEV_A_QSTATS_IFINDEX
])
708 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QSTATS_IFINDEX
]);
712 netdev
= __dev_get_by_index(net
, ifindex
);
713 if (netdev
&& netdev
->stat_ops
) {
714 err
= netdev_nl_qstats_get_dump_one(netdev
, scope
, skb
,
717 NL_SET_BAD_ATTR(info
->extack
,
718 info
->attrs
[NETDEV_A_QSTATS_IFINDEX
]);
719 err
= netdev
? -EOPNOTSUPP
: -ENODEV
;
722 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
723 err
= netdev_nl_qstats_get_dump_one(netdev
, scope
, skb
,
734 int netdev_nl_bind_rx_doit(struct sk_buff
*skb
, struct genl_info
*info
)
736 struct nlattr
*tb
[ARRAY_SIZE(netdev_queue_id_nl_policy
)];
737 struct net_devmem_dmabuf_binding
*binding
;
738 struct list_head
*sock_binding_list
;
739 u32 ifindex
, dmabuf_fd
, rxq_idx
;
740 struct net_device
*netdev
;
746 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DEV_IFINDEX
) ||
747 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DMABUF_FD
) ||
748 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DMABUF_QUEUES
))
751 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_DEV_IFINDEX
]);
752 dmabuf_fd
= nla_get_u32(info
->attrs
[NETDEV_A_DMABUF_FD
]);
754 sock_binding_list
= genl_sk_priv_get(&netdev_nl_family
,
756 if (IS_ERR(sock_binding_list
))
757 return PTR_ERR(sock_binding_list
);
759 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
763 hdr
= genlmsg_iput(rsp
, info
);
766 goto err_genlmsg_free
;
771 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
772 if (!netdev
|| !netif_device_present(netdev
)) {
777 if (dev_xdp_prog_count(netdev
)) {
778 NL_SET_ERR_MSG(info
->extack
, "unable to bind dmabuf to device with XDP program attached");
783 binding
= net_devmem_bind_dmabuf(netdev
, dmabuf_fd
, info
->extack
);
784 if (IS_ERR(binding
)) {
785 err
= PTR_ERR(binding
);
789 nla_for_each_attr_type(attr
, NETDEV_A_DMABUF_QUEUES
,
790 genlmsg_data(info
->genlhdr
),
791 genlmsg_len(info
->genlhdr
), rem
) {
792 err
= nla_parse_nested(
793 tb
, ARRAY_SIZE(netdev_queue_id_nl_policy
) - 1, attr
,
794 netdev_queue_id_nl_policy
, info
->extack
);
798 if (NL_REQ_ATTR_CHECK(info
->extack
, attr
, tb
, NETDEV_A_QUEUE_ID
) ||
799 NL_REQ_ATTR_CHECK(info
->extack
, attr
, tb
, NETDEV_A_QUEUE_TYPE
)) {
804 if (nla_get_u32(tb
[NETDEV_A_QUEUE_TYPE
]) != NETDEV_QUEUE_TYPE_RX
) {
805 NL_SET_BAD_ATTR(info
->extack
, tb
[NETDEV_A_QUEUE_TYPE
]);
810 rxq_idx
= nla_get_u32(tb
[NETDEV_A_QUEUE_ID
]);
812 err
= net_devmem_bind_dmabuf_to_queue(netdev
, rxq_idx
, binding
,
818 list_add(&binding
->list
, sock_binding_list
);
820 nla_put_u32(rsp
, NETDEV_A_DMABUF_ID
, binding
->id
);
821 genlmsg_end(rsp
, hdr
);
823 err
= genlmsg_reply(rsp
, info
);
832 net_devmem_unbind_dmabuf(binding
);
840 void netdev_nl_sock_priv_init(struct list_head
*priv
)
842 INIT_LIST_HEAD(priv
);
845 void netdev_nl_sock_priv_destroy(struct list_head
*priv
)
847 struct net_devmem_dmabuf_binding
*binding
;
848 struct net_devmem_dmabuf_binding
*temp
;
850 list_for_each_entry_safe(binding
, temp
, priv
, list
) {
852 net_devmem_unbind_dmabuf(binding
);
857 static int netdev_genl_netdevice_event(struct notifier_block
*nb
,
858 unsigned long event
, void *ptr
)
860 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
863 case NETDEV_REGISTER
:
864 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_ADD_NTF
);
866 case NETDEV_UNREGISTER
:
867 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_DEL_NTF
);
869 case NETDEV_XDP_FEAT_CHANGE
:
870 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_CHANGE_NTF
);
877 static struct notifier_block netdev_genl_nb
= {
878 .notifier_call
= netdev_genl_netdevice_event
,
881 static int __init
netdev_genl_init(void)
885 err
= register_netdevice_notifier(&netdev_genl_nb
);
889 err
= genl_register_family(&netdev_nl_family
);
896 unregister_netdevice_notifier(&netdev_genl_nb
);
900 subsys_initcall(netdev_genl_init
);