1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/netdevice.h>
4 #include <linux/notifier.h>
5 #include <linux/rtnetlink.h>
6 #include <net/busy_poll.h>
7 #include <net/net_namespace.h>
8 #include <net/netdev_queues.h>
9 #include <net/netdev_rx_queue.h>
12 #include <net/xdp_sock.h>
16 #include "netdev-genl-gen.h"
18 struct netdev_nl_dump_ctx
{
19 unsigned long ifindex
;
25 static struct netdev_nl_dump_ctx
*netdev_dump_ctx(struct netlink_callback
*cb
)
27 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx
);
29 return (struct netdev_nl_dump_ctx
*)cb
->ctx
;
33 netdev_nl_dev_fill(struct net_device
*netdev
, struct sk_buff
*rsp
,
34 const struct genl_info
*info
)
40 hdr
= genlmsg_iput(rsp
, info
);
44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
47 XDP_METADATA_KFUNC_xxx
48 #undef XDP_METADATA_KFUNC
50 if (netdev
->xsk_tx_metadata_ops
) {
51 if (netdev
->xsk_tx_metadata_ops
->tmo_fill_timestamp
)
52 xsk_features
|= NETDEV_XSK_FLAGS_TX_TIMESTAMP
;
53 if (netdev
->xsk_tx_metadata_ops
->tmo_request_checksum
)
54 xsk_features
|= NETDEV_XSK_FLAGS_TX_CHECKSUM
;
57 if (nla_put_u32(rsp
, NETDEV_A_DEV_IFINDEX
, netdev
->ifindex
) ||
58 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XDP_FEATURES
,
59 netdev
->xdp_features
, NETDEV_A_DEV_PAD
) ||
60 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES
,
61 xdp_rx_meta
, NETDEV_A_DEV_PAD
) ||
62 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XSK_FEATURES
,
63 xsk_features
, NETDEV_A_DEV_PAD
))
66 if (netdev
->xdp_features
& NETDEV_XDP_ACT_XSK_ZEROCOPY
) {
67 if (nla_put_u32(rsp
, NETDEV_A_DEV_XDP_ZC_MAX_SEGS
,
68 netdev
->xdp_zc_max_segs
))
72 genlmsg_end(rsp
, hdr
);
77 genlmsg_cancel(rsp
, hdr
);
82 netdev_genl_dev_notify(struct net_device
*netdev
, int cmd
)
84 struct genl_info info
;
87 if (!genl_has_listeners(&netdev_nl_family
, dev_net(netdev
),
91 genl_info_init_ntf(&info
, &netdev_nl_family
, cmd
);
93 ntf
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
97 if (netdev_nl_dev_fill(netdev
, ntf
, &info
)) {
102 genlmsg_multicast_netns(&netdev_nl_family
, dev_net(netdev
), ntf
,
103 0, NETDEV_NLGRP_MGMT
, GFP_KERNEL
);
106 int netdev_nl_dev_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
108 struct net_device
*netdev
;
113 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DEV_IFINDEX
))
116 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_DEV_IFINDEX
]);
118 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
124 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
126 err
= netdev_nl_dev_fill(netdev
, rsp
, info
);
135 return genlmsg_reply(rsp
, info
);
142 int netdev_nl_dev_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
144 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
145 struct net
*net
= sock_net(skb
->sk
);
146 struct net_device
*netdev
;
150 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
151 err
= netdev_nl_dev_fill(netdev
, skb
, genl_info_dump(cb
));
161 netdev_nl_napi_fill_one(struct sk_buff
*rsp
, struct napi_struct
*napi
,
162 const struct genl_info
*info
)
164 unsigned long irq_suspend_timeout
;
165 unsigned long gro_flush_timeout
;
166 u32 napi_defer_hard_irqs
;
170 if (WARN_ON_ONCE(!napi
->dev
))
172 if (!(napi
->dev
->flags
& IFF_UP
))
175 hdr
= genlmsg_iput(rsp
, info
);
179 if (napi
->napi_id
>= MIN_NAPI_ID
&&
180 nla_put_u32(rsp
, NETDEV_A_NAPI_ID
, napi
->napi_id
))
181 goto nla_put_failure
;
183 if (nla_put_u32(rsp
, NETDEV_A_NAPI_IFINDEX
, napi
->dev
->ifindex
))
184 goto nla_put_failure
;
186 if (napi
->irq
>= 0 && nla_put_u32(rsp
, NETDEV_A_NAPI_IRQ
, napi
->irq
))
187 goto nla_put_failure
;
190 pid
= task_pid_nr(napi
->thread
);
191 if (nla_put_u32(rsp
, NETDEV_A_NAPI_PID
, pid
))
192 goto nla_put_failure
;
195 napi_defer_hard_irqs
= napi_get_defer_hard_irqs(napi
);
196 if (nla_put_s32(rsp
, NETDEV_A_NAPI_DEFER_HARD_IRQS
,
197 napi_defer_hard_irqs
))
198 goto nla_put_failure
;
200 irq_suspend_timeout
= napi_get_irq_suspend_timeout(napi
);
201 if (nla_put_uint(rsp
, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT
,
202 irq_suspend_timeout
))
203 goto nla_put_failure
;
205 gro_flush_timeout
= napi_get_gro_flush_timeout(napi
);
206 if (nla_put_uint(rsp
, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT
,
208 goto nla_put_failure
;
210 genlmsg_end(rsp
, hdr
);
215 genlmsg_cancel(rsp
, hdr
);
219 int netdev_nl_napi_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
221 struct napi_struct
*napi
;
226 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_NAPI_ID
))
229 napi_id
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_ID
]);
231 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
238 napi
= napi_by_id(napi_id
);
240 err
= netdev_nl_napi_fill_one(rsp
, napi
, info
);
242 NL_SET_BAD_ATTR(info
->extack
, info
->attrs
[NETDEV_A_NAPI_ID
]);
252 return genlmsg_reply(rsp
, info
);
260 netdev_nl_napi_dump_one(struct net_device
*netdev
, struct sk_buff
*rsp
,
261 const struct genl_info
*info
,
262 struct netdev_nl_dump_ctx
*ctx
)
264 struct napi_struct
*napi
;
267 if (!(netdev
->flags
& IFF_UP
))
270 list_for_each_entry(napi
, &netdev
->napi_list
, dev_list
) {
271 if (ctx
->napi_id
&& napi
->napi_id
>= ctx
->napi_id
)
274 err
= netdev_nl_napi_fill_one(rsp
, napi
, info
);
277 ctx
->napi_id
= napi
->napi_id
;
282 int netdev_nl_napi_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
284 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
285 const struct genl_info
*info
= genl_info_dump(cb
);
286 struct net
*net
= sock_net(skb
->sk
);
287 struct net_device
*netdev
;
291 if (info
->attrs
[NETDEV_A_NAPI_IFINDEX
])
292 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_IFINDEX
]);
296 netdev
= __dev_get_by_index(net
, ifindex
);
298 err
= netdev_nl_napi_dump_one(netdev
, skb
, info
, ctx
);
302 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
303 err
= netdev_nl_napi_dump_one(netdev
, skb
, info
, ctx
);
315 netdev_nl_napi_set_config(struct napi_struct
*napi
, struct genl_info
*info
)
317 u64 irq_suspend_timeout
= 0;
318 u64 gro_flush_timeout
= 0;
321 if (info
->attrs
[NETDEV_A_NAPI_DEFER_HARD_IRQS
]) {
322 defer
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_DEFER_HARD_IRQS
]);
323 napi_set_defer_hard_irqs(napi
, defer
);
326 if (info
->attrs
[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT
]) {
327 irq_suspend_timeout
= nla_get_uint(info
->attrs
[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT
]);
328 napi_set_irq_suspend_timeout(napi
, irq_suspend_timeout
);
331 if (info
->attrs
[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT
]) {
332 gro_flush_timeout
= nla_get_uint(info
->attrs
[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT
]);
333 napi_set_gro_flush_timeout(napi
, gro_flush_timeout
);
339 int netdev_nl_napi_set_doit(struct sk_buff
*skb
, struct genl_info
*info
)
341 struct napi_struct
*napi
;
342 unsigned int napi_id
;
345 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_NAPI_ID
))
348 napi_id
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_ID
]);
353 napi
= napi_by_id(napi_id
);
355 err
= netdev_nl_napi_set_config(napi
, info
);
357 NL_SET_BAD_ATTR(info
->extack
, info
->attrs
[NETDEV_A_NAPI_ID
]);
368 netdev_nl_queue_fill_one(struct sk_buff
*rsp
, struct net_device
*netdev
,
369 u32 q_idx
, u32 q_type
, const struct genl_info
*info
)
371 struct net_devmem_dmabuf_binding
*binding
;
372 struct netdev_rx_queue
*rxq
;
373 struct netdev_queue
*txq
;
376 hdr
= genlmsg_iput(rsp
, info
);
380 if (nla_put_u32(rsp
, NETDEV_A_QUEUE_ID
, q_idx
) ||
381 nla_put_u32(rsp
, NETDEV_A_QUEUE_TYPE
, q_type
) ||
382 nla_put_u32(rsp
, NETDEV_A_QUEUE_IFINDEX
, netdev
->ifindex
))
383 goto nla_put_failure
;
386 case NETDEV_QUEUE_TYPE_RX
:
387 rxq
= __netif_get_rx_queue(netdev
, q_idx
);
388 if (rxq
->napi
&& nla_put_u32(rsp
, NETDEV_A_QUEUE_NAPI_ID
,
390 goto nla_put_failure
;
392 binding
= rxq
->mp_params
.mp_priv
;
394 nla_put_u32(rsp
, NETDEV_A_QUEUE_DMABUF
, binding
->id
))
395 goto nla_put_failure
;
398 case NETDEV_QUEUE_TYPE_TX
:
399 txq
= netdev_get_tx_queue(netdev
, q_idx
);
400 if (txq
->napi
&& nla_put_u32(rsp
, NETDEV_A_QUEUE_NAPI_ID
,
402 goto nla_put_failure
;
405 genlmsg_end(rsp
, hdr
);
410 genlmsg_cancel(rsp
, hdr
);
414 static int netdev_nl_queue_validate(struct net_device
*netdev
, u32 q_id
,
418 case NETDEV_QUEUE_TYPE_RX
:
419 if (q_id
>= netdev
->real_num_rx_queues
)
422 case NETDEV_QUEUE_TYPE_TX
:
423 if (q_id
>= netdev
->real_num_tx_queues
)
430 netdev_nl_queue_fill(struct sk_buff
*rsp
, struct net_device
*netdev
, u32 q_idx
,
431 u32 q_type
, const struct genl_info
*info
)
435 if (!(netdev
->flags
& IFF_UP
))
438 err
= netdev_nl_queue_validate(netdev
, q_idx
, q_type
);
442 return netdev_nl_queue_fill_one(rsp
, netdev
, q_idx
, q_type
, info
);
445 int netdev_nl_queue_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
447 u32 q_id
, q_type
, ifindex
;
448 struct net_device
*netdev
;
452 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_ID
) ||
453 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_TYPE
) ||
454 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_IFINDEX
))
457 q_id
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_ID
]);
458 q_type
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_TYPE
]);
459 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_IFINDEX
]);
461 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
467 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
469 err
= netdev_nl_queue_fill(rsp
, netdev
, q_id
, q_type
, info
);
478 return genlmsg_reply(rsp
, info
);
486 netdev_nl_queue_dump_one(struct net_device
*netdev
, struct sk_buff
*rsp
,
487 const struct genl_info
*info
,
488 struct netdev_nl_dump_ctx
*ctx
)
493 if (!(netdev
->flags
& IFF_UP
))
496 for (i
= ctx
->rxq_idx
; i
< netdev
->real_num_rx_queues
;) {
497 err
= netdev_nl_queue_fill_one(rsp
, netdev
, i
,
498 NETDEV_QUEUE_TYPE_RX
, info
);
503 for (i
= ctx
->txq_idx
; i
< netdev
->real_num_tx_queues
;) {
504 err
= netdev_nl_queue_fill_one(rsp
, netdev
, i
,
505 NETDEV_QUEUE_TYPE_TX
, info
);
514 int netdev_nl_queue_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
516 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
517 const struct genl_info
*info
= genl_info_dump(cb
);
518 struct net
*net
= sock_net(skb
->sk
);
519 struct net_device
*netdev
;
523 if (info
->attrs
[NETDEV_A_QUEUE_IFINDEX
])
524 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_IFINDEX
]);
528 netdev
= __dev_get_by_index(net
, ifindex
);
530 err
= netdev_nl_queue_dump_one(netdev
, skb
, info
, ctx
);
534 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
535 err
= netdev_nl_queue_dump_one(netdev
, skb
, info
, ctx
);
547 #define NETDEV_STAT_NOT_SET (~0ULL)
549 static void netdev_nl_stats_add(void *_sum
, const void *_add
, size_t size
)
551 const u64
*add
= _add
;
555 if (*add
!= NETDEV_STAT_NOT_SET
&& *sum
!= NETDEV_STAT_NOT_SET
)
563 static int netdev_stat_put(struct sk_buff
*rsp
, unsigned int attr_id
, u64 value
)
565 if (value
== NETDEV_STAT_NOT_SET
)
567 return nla_put_uint(rsp
, attr_id
, value
);
571 netdev_nl_stats_write_rx(struct sk_buff
*rsp
, struct netdev_queue_stats_rx
*rx
)
573 if (netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_PACKETS
, rx
->packets
) ||
574 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_BYTES
, rx
->bytes
) ||
575 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_ALLOC_FAIL
, rx
->alloc_fail
) ||
576 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROPS
, rx
->hw_drops
) ||
577 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS
, rx
->hw_drop_overruns
) ||
578 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY
, rx
->csum_unnecessary
) ||
579 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_NONE
, rx
->csum_none
) ||
580 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_BAD
, rx
->csum_bad
) ||
581 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS
, rx
->hw_gro_packets
) ||
582 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_BYTES
, rx
->hw_gro_bytes
) ||
583 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS
, rx
->hw_gro_wire_packets
) ||
584 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES
, rx
->hw_gro_wire_bytes
) ||
585 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS
, rx
->hw_drop_ratelimits
))
591 netdev_nl_stats_write_tx(struct sk_buff
*rsp
, struct netdev_queue_stats_tx
*tx
)
593 if (netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_PACKETS
, tx
->packets
) ||
594 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_BYTES
, tx
->bytes
) ||
595 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROPS
, tx
->hw_drops
) ||
596 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS
, tx
->hw_drop_errors
) ||
597 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_CSUM_NONE
, tx
->csum_none
) ||
598 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_NEEDS_CSUM
, tx
->needs_csum
) ||
599 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS
, tx
->hw_gso_packets
) ||
600 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_BYTES
, tx
->hw_gso_bytes
) ||
601 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS
, tx
->hw_gso_wire_packets
) ||
602 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES
, tx
->hw_gso_wire_bytes
) ||
603 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS
, tx
->hw_drop_ratelimits
) ||
604 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_STOP
, tx
->stop
) ||
605 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_WAKE
, tx
->wake
))
611 netdev_nl_stats_queue(struct net_device
*netdev
, struct sk_buff
*rsp
,
612 u32 q_type
, int i
, const struct genl_info
*info
)
614 const struct netdev_stat_ops
*ops
= netdev
->stat_ops
;
615 struct netdev_queue_stats_rx rx
;
616 struct netdev_queue_stats_tx tx
;
619 hdr
= genlmsg_iput(rsp
, info
);
622 if (nla_put_u32(rsp
, NETDEV_A_QSTATS_IFINDEX
, netdev
->ifindex
) ||
623 nla_put_u32(rsp
, NETDEV_A_QSTATS_QUEUE_TYPE
, q_type
) ||
624 nla_put_u32(rsp
, NETDEV_A_QSTATS_QUEUE_ID
, i
))
625 goto nla_put_failure
;
628 case NETDEV_QUEUE_TYPE_RX
:
629 memset(&rx
, 0xff, sizeof(rx
));
630 ops
->get_queue_stats_rx(netdev
, i
, &rx
);
631 if (!memchr_inv(&rx
, 0xff, sizeof(rx
)))
633 if (netdev_nl_stats_write_rx(rsp
, &rx
))
634 goto nla_put_failure
;
636 case NETDEV_QUEUE_TYPE_TX
:
637 memset(&tx
, 0xff, sizeof(tx
));
638 ops
->get_queue_stats_tx(netdev
, i
, &tx
);
639 if (!memchr_inv(&tx
, 0xff, sizeof(tx
)))
641 if (netdev_nl_stats_write_tx(rsp
, &tx
))
642 goto nla_put_failure
;
646 genlmsg_end(rsp
, hdr
);
650 genlmsg_cancel(rsp
, hdr
);
653 genlmsg_cancel(rsp
, hdr
);
658 netdev_nl_stats_by_queue(struct net_device
*netdev
, struct sk_buff
*rsp
,
659 const struct genl_info
*info
,
660 struct netdev_nl_dump_ctx
*ctx
)
662 const struct netdev_stat_ops
*ops
= netdev
->stat_ops
;
665 if (!(netdev
->flags
& IFF_UP
))
669 while (ops
->get_queue_stats_rx
&& i
< netdev
->real_num_rx_queues
) {
670 err
= netdev_nl_stats_queue(netdev
, rsp
, NETDEV_QUEUE_TYPE_RX
,
677 while (ops
->get_queue_stats_tx
&& i
< netdev
->real_num_tx_queues
) {
678 err
= netdev_nl_stats_queue(netdev
, rsp
, NETDEV_QUEUE_TYPE_TX
,
691 netdev_nl_stats_by_netdev(struct net_device
*netdev
, struct sk_buff
*rsp
,
692 const struct genl_info
*info
)
694 struct netdev_queue_stats_rx rx_sum
, rx
;
695 struct netdev_queue_stats_tx tx_sum
, tx
;
696 const struct netdev_stat_ops
*ops
;
700 ops
= netdev
->stat_ops
;
701 /* Netdev can't guarantee any complete counters */
702 if (!ops
->get_base_stats
)
705 memset(&rx_sum
, 0xff, sizeof(rx_sum
));
706 memset(&tx_sum
, 0xff, sizeof(tx_sum
));
708 ops
->get_base_stats(netdev
, &rx_sum
, &tx_sum
);
710 /* The op was there, but nothing reported, don't bother */
711 if (!memchr_inv(&rx_sum
, 0xff, sizeof(rx_sum
)) &&
712 !memchr_inv(&tx_sum
, 0xff, sizeof(tx_sum
)))
715 hdr
= genlmsg_iput(rsp
, info
);
718 if (nla_put_u32(rsp
, NETDEV_A_QSTATS_IFINDEX
, netdev
->ifindex
))
719 goto nla_put_failure
;
721 for (i
= 0; i
< netdev
->real_num_rx_queues
; i
++) {
722 memset(&rx
, 0xff, sizeof(rx
));
723 if (ops
->get_queue_stats_rx
)
724 ops
->get_queue_stats_rx(netdev
, i
, &rx
);
725 netdev_nl_stats_add(&rx_sum
, &rx
, sizeof(rx
));
727 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++) {
728 memset(&tx
, 0xff, sizeof(tx
));
729 if (ops
->get_queue_stats_tx
)
730 ops
->get_queue_stats_tx(netdev
, i
, &tx
);
731 netdev_nl_stats_add(&tx_sum
, &tx
, sizeof(tx
));
734 if (netdev_nl_stats_write_rx(rsp
, &rx_sum
) ||
735 netdev_nl_stats_write_tx(rsp
, &tx_sum
))
736 goto nla_put_failure
;
738 genlmsg_end(rsp
, hdr
);
742 genlmsg_cancel(rsp
, hdr
);
747 netdev_nl_qstats_get_dump_one(struct net_device
*netdev
, unsigned int scope
,
748 struct sk_buff
*skb
, const struct genl_info
*info
,
749 struct netdev_nl_dump_ctx
*ctx
)
751 if (!netdev
->stat_ops
)
756 return netdev_nl_stats_by_netdev(netdev
, skb
, info
);
757 case NETDEV_QSTATS_SCOPE_QUEUE
:
758 return netdev_nl_stats_by_queue(netdev
, skb
, info
, ctx
);
761 return -EINVAL
; /* Should not happen, per netlink policy */
764 int netdev_nl_qstats_get_dumpit(struct sk_buff
*skb
,
765 struct netlink_callback
*cb
)
767 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
768 const struct genl_info
*info
= genl_info_dump(cb
);
769 struct net
*net
= sock_net(skb
->sk
);
770 struct net_device
*netdev
;
771 unsigned int ifindex
;
776 if (info
->attrs
[NETDEV_A_QSTATS_SCOPE
])
777 scope
= nla_get_uint(info
->attrs
[NETDEV_A_QSTATS_SCOPE
]);
780 if (info
->attrs
[NETDEV_A_QSTATS_IFINDEX
])
781 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QSTATS_IFINDEX
]);
785 netdev
= __dev_get_by_index(net
, ifindex
);
786 if (netdev
&& netdev
->stat_ops
) {
787 err
= netdev_nl_qstats_get_dump_one(netdev
, scope
, skb
,
790 NL_SET_BAD_ATTR(info
->extack
,
791 info
->attrs
[NETDEV_A_QSTATS_IFINDEX
]);
792 err
= netdev
? -EOPNOTSUPP
: -ENODEV
;
795 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
796 err
= netdev_nl_qstats_get_dump_one(netdev
, scope
, skb
,
807 int netdev_nl_bind_rx_doit(struct sk_buff
*skb
, struct genl_info
*info
)
809 struct nlattr
*tb
[ARRAY_SIZE(netdev_queue_id_nl_policy
)];
810 struct net_devmem_dmabuf_binding
*binding
;
811 struct list_head
*sock_binding_list
;
812 u32 ifindex
, dmabuf_fd
, rxq_idx
;
813 struct net_device
*netdev
;
819 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DEV_IFINDEX
) ||
820 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DMABUF_FD
) ||
821 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DMABUF_QUEUES
))
824 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_DEV_IFINDEX
]);
825 dmabuf_fd
= nla_get_u32(info
->attrs
[NETDEV_A_DMABUF_FD
]);
827 sock_binding_list
= genl_sk_priv_get(&netdev_nl_family
,
829 if (IS_ERR(sock_binding_list
))
830 return PTR_ERR(sock_binding_list
);
832 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
836 hdr
= genlmsg_iput(rsp
, info
);
839 goto err_genlmsg_free
;
844 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
845 if (!netdev
|| !netif_device_present(netdev
)) {
850 if (dev_xdp_prog_count(netdev
)) {
851 NL_SET_ERR_MSG(info
->extack
, "unable to bind dmabuf to device with XDP program attached");
856 binding
= net_devmem_bind_dmabuf(netdev
, dmabuf_fd
, info
->extack
);
857 if (IS_ERR(binding
)) {
858 err
= PTR_ERR(binding
);
862 nla_for_each_attr_type(attr
, NETDEV_A_DMABUF_QUEUES
,
863 genlmsg_data(info
->genlhdr
),
864 genlmsg_len(info
->genlhdr
), rem
) {
865 err
= nla_parse_nested(
866 tb
, ARRAY_SIZE(netdev_queue_id_nl_policy
) - 1, attr
,
867 netdev_queue_id_nl_policy
, info
->extack
);
871 if (NL_REQ_ATTR_CHECK(info
->extack
, attr
, tb
, NETDEV_A_QUEUE_ID
) ||
872 NL_REQ_ATTR_CHECK(info
->extack
, attr
, tb
, NETDEV_A_QUEUE_TYPE
)) {
877 if (nla_get_u32(tb
[NETDEV_A_QUEUE_TYPE
]) != NETDEV_QUEUE_TYPE_RX
) {
878 NL_SET_BAD_ATTR(info
->extack
, tb
[NETDEV_A_QUEUE_TYPE
]);
883 rxq_idx
= nla_get_u32(tb
[NETDEV_A_QUEUE_ID
]);
885 err
= net_devmem_bind_dmabuf_to_queue(netdev
, rxq_idx
, binding
,
891 list_add(&binding
->list
, sock_binding_list
);
893 nla_put_u32(rsp
, NETDEV_A_DMABUF_ID
, binding
->id
);
894 genlmsg_end(rsp
, hdr
);
896 err
= genlmsg_reply(rsp
, info
);
905 net_devmem_unbind_dmabuf(binding
);
913 void netdev_nl_sock_priv_init(struct list_head
*priv
)
915 INIT_LIST_HEAD(priv
);
918 void netdev_nl_sock_priv_destroy(struct list_head
*priv
)
920 struct net_devmem_dmabuf_binding
*binding
;
921 struct net_devmem_dmabuf_binding
*temp
;
923 list_for_each_entry_safe(binding
, temp
, priv
, list
) {
925 net_devmem_unbind_dmabuf(binding
);
930 static int netdev_genl_netdevice_event(struct notifier_block
*nb
,
931 unsigned long event
, void *ptr
)
933 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
936 case NETDEV_REGISTER
:
937 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_ADD_NTF
);
939 case NETDEV_UNREGISTER
:
940 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_DEL_NTF
);
942 case NETDEV_XDP_FEAT_CHANGE
:
943 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_CHANGE_NTF
);
950 static struct notifier_block netdev_genl_nb
= {
951 .notifier_call
= netdev_genl_netdevice_event
,
954 static int __init
netdev_genl_init(void)
958 err
= register_netdevice_notifier(&netdev_genl_nb
);
962 err
= genl_register_family(&netdev_nl_family
);
969 unregister_netdevice_notifier(&netdev_genl_nb
);
973 subsys_initcall(netdev_genl_init
);