1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/netdevice.h>
4 #include <linux/notifier.h>
5 #include <linux/rtnetlink.h>
6 #include <net/busy_poll.h>
7 #include <net/net_namespace.h>
8 #include <net/netdev_queues.h>
9 #include <net/netdev_rx_queue.h>
12 #include <net/xdp_sock.h>
16 #include "netdev-genl-gen.h"
18 struct netdev_nl_dump_ctx
{
19 unsigned long ifindex
;
25 static struct netdev_nl_dump_ctx
*netdev_dump_ctx(struct netlink_callback
*cb
)
27 NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx
);
29 return (struct netdev_nl_dump_ctx
*)cb
->ctx
;
33 netdev_nl_dev_fill(struct net_device
*netdev
, struct sk_buff
*rsp
,
34 const struct genl_info
*info
)
40 hdr
= genlmsg_iput(rsp
, info
);
44 #define XDP_METADATA_KFUNC(_, flag, __, xmo) \
45 if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \
47 XDP_METADATA_KFUNC_xxx
48 #undef XDP_METADATA_KFUNC
50 if (netdev
->xsk_tx_metadata_ops
) {
51 if (netdev
->xsk_tx_metadata_ops
->tmo_fill_timestamp
)
52 xsk_features
|= NETDEV_XSK_FLAGS_TX_TIMESTAMP
;
53 if (netdev
->xsk_tx_metadata_ops
->tmo_request_checksum
)
54 xsk_features
|= NETDEV_XSK_FLAGS_TX_CHECKSUM
;
57 if (nla_put_u32(rsp
, NETDEV_A_DEV_IFINDEX
, netdev
->ifindex
) ||
58 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XDP_FEATURES
,
59 netdev
->xdp_features
, NETDEV_A_DEV_PAD
) ||
60 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES
,
61 xdp_rx_meta
, NETDEV_A_DEV_PAD
) ||
62 nla_put_u64_64bit(rsp
, NETDEV_A_DEV_XSK_FEATURES
,
63 xsk_features
, NETDEV_A_DEV_PAD
))
66 if (netdev
->xdp_features
& NETDEV_XDP_ACT_XSK_ZEROCOPY
) {
67 if (nla_put_u32(rsp
, NETDEV_A_DEV_XDP_ZC_MAX_SEGS
,
68 netdev
->xdp_zc_max_segs
))
72 genlmsg_end(rsp
, hdr
);
77 genlmsg_cancel(rsp
, hdr
);
82 netdev_genl_dev_notify(struct net_device
*netdev
, int cmd
)
84 struct genl_info info
;
87 if (!genl_has_listeners(&netdev_nl_family
, dev_net(netdev
),
91 genl_info_init_ntf(&info
, &netdev_nl_family
, cmd
);
93 ntf
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
97 if (netdev_nl_dev_fill(netdev
, ntf
, &info
)) {
102 genlmsg_multicast_netns(&netdev_nl_family
, dev_net(netdev
), ntf
,
103 0, NETDEV_NLGRP_MGMT
, GFP_KERNEL
);
106 int netdev_nl_dev_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
108 struct net_device
*netdev
;
113 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DEV_IFINDEX
))
116 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_DEV_IFINDEX
]);
118 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
124 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
126 err
= netdev_nl_dev_fill(netdev
, rsp
, info
);
135 return genlmsg_reply(rsp
, info
);
142 int netdev_nl_dev_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
144 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
145 struct net
*net
= sock_net(skb
->sk
);
146 struct net_device
*netdev
;
150 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
151 err
= netdev_nl_dev_fill(netdev
, skb
, genl_info_dump(cb
));
161 netdev_nl_napi_fill_one(struct sk_buff
*rsp
, struct napi_struct
*napi
,
162 const struct genl_info
*info
)
164 unsigned long irq_suspend_timeout
;
165 unsigned long gro_flush_timeout
;
166 u32 napi_defer_hard_irqs
;
173 hdr
= genlmsg_iput(rsp
, info
);
177 if (nla_put_u32(rsp
, NETDEV_A_NAPI_ID
, napi
->napi_id
))
178 goto nla_put_failure
;
180 if (nla_put_u32(rsp
, NETDEV_A_NAPI_IFINDEX
, napi
->dev
->ifindex
))
181 goto nla_put_failure
;
183 if (napi
->irq
>= 0 && nla_put_u32(rsp
, NETDEV_A_NAPI_IRQ
, napi
->irq
))
184 goto nla_put_failure
;
187 pid
= task_pid_nr(napi
->thread
);
188 if (nla_put_u32(rsp
, NETDEV_A_NAPI_PID
, pid
))
189 goto nla_put_failure
;
192 napi_defer_hard_irqs
= napi_get_defer_hard_irqs(napi
);
193 if (nla_put_s32(rsp
, NETDEV_A_NAPI_DEFER_HARD_IRQS
,
194 napi_defer_hard_irqs
))
195 goto nla_put_failure
;
197 irq_suspend_timeout
= napi_get_irq_suspend_timeout(napi
);
198 if (nla_put_uint(rsp
, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT
,
199 irq_suspend_timeout
))
200 goto nla_put_failure
;
202 gro_flush_timeout
= napi_get_gro_flush_timeout(napi
);
203 if (nla_put_uint(rsp
, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT
,
205 goto nla_put_failure
;
207 genlmsg_end(rsp
, hdr
);
212 genlmsg_cancel(rsp
, hdr
);
216 int netdev_nl_napi_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
218 struct napi_struct
*napi
;
223 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_NAPI_ID
))
226 napi_id
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_ID
]);
228 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
232 napi
= netdev_napi_by_id_lock(genl_info_net(info
), napi_id
);
234 err
= netdev_nl_napi_fill_one(rsp
, napi
, info
);
235 netdev_unlock(napi
->dev
);
237 NL_SET_BAD_ATTR(info
->extack
, info
->attrs
[NETDEV_A_NAPI_ID
]);
243 } else if (!rsp
->len
) {
248 return genlmsg_reply(rsp
, info
);
256 netdev_nl_napi_dump_one(struct net_device
*netdev
, struct sk_buff
*rsp
,
257 const struct genl_info
*info
,
258 struct netdev_nl_dump_ctx
*ctx
)
260 struct napi_struct
*napi
;
261 unsigned int prev_id
;
268 list_for_each_entry(napi
, &netdev
->napi_list
, dev_list
) {
269 if (napi
->napi_id
< MIN_NAPI_ID
)
272 /* Dump continuation below depends on the list being sorted */
273 WARN_ON_ONCE(napi
->napi_id
>= prev_id
);
274 prev_id
= napi
->napi_id
;
276 if (ctx
->napi_id
&& napi
->napi_id
>= ctx
->napi_id
)
279 err
= netdev_nl_napi_fill_one(rsp
, napi
, info
);
282 ctx
->napi_id
= napi
->napi_id
;
287 int netdev_nl_napi_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
289 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
290 const struct genl_info
*info
= genl_info_dump(cb
);
291 struct net
*net
= sock_net(skb
->sk
);
292 struct net_device
*netdev
;
296 if (info
->attrs
[NETDEV_A_NAPI_IFINDEX
])
297 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_IFINDEX
]);
300 netdev
= netdev_get_by_index_lock(net
, ifindex
);
302 err
= netdev_nl_napi_dump_one(netdev
, skb
, info
, ctx
);
303 netdev_unlock(netdev
);
308 for_each_netdev_lock_scoped(net
, netdev
, ctx
->ifindex
) {
309 err
= netdev_nl_napi_dump_one(netdev
, skb
, info
, ctx
);
320 netdev_nl_napi_set_config(struct napi_struct
*napi
, struct genl_info
*info
)
322 u64 irq_suspend_timeout
= 0;
323 u64 gro_flush_timeout
= 0;
326 if (info
->attrs
[NETDEV_A_NAPI_DEFER_HARD_IRQS
]) {
327 defer
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_DEFER_HARD_IRQS
]);
328 napi_set_defer_hard_irqs(napi
, defer
);
331 if (info
->attrs
[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT
]) {
332 irq_suspend_timeout
= nla_get_uint(info
->attrs
[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT
]);
333 napi_set_irq_suspend_timeout(napi
, irq_suspend_timeout
);
336 if (info
->attrs
[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT
]) {
337 gro_flush_timeout
= nla_get_uint(info
->attrs
[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT
]);
338 napi_set_gro_flush_timeout(napi
, gro_flush_timeout
);
344 int netdev_nl_napi_set_doit(struct sk_buff
*skb
, struct genl_info
*info
)
346 struct napi_struct
*napi
;
347 unsigned int napi_id
;
350 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_NAPI_ID
))
353 napi_id
= nla_get_u32(info
->attrs
[NETDEV_A_NAPI_ID
]);
355 napi
= netdev_napi_by_id_lock(genl_info_net(info
), napi_id
);
357 err
= netdev_nl_napi_set_config(napi
, info
);
358 netdev_unlock(napi
->dev
);
360 NL_SET_BAD_ATTR(info
->extack
, info
->attrs
[NETDEV_A_NAPI_ID
]);
368 netdev_nl_queue_fill_one(struct sk_buff
*rsp
, struct net_device
*netdev
,
369 u32 q_idx
, u32 q_type
, const struct genl_info
*info
)
371 struct net_devmem_dmabuf_binding
*binding
;
372 struct netdev_rx_queue
*rxq
;
373 struct netdev_queue
*txq
;
376 hdr
= genlmsg_iput(rsp
, info
);
380 if (nla_put_u32(rsp
, NETDEV_A_QUEUE_ID
, q_idx
) ||
381 nla_put_u32(rsp
, NETDEV_A_QUEUE_TYPE
, q_type
) ||
382 nla_put_u32(rsp
, NETDEV_A_QUEUE_IFINDEX
, netdev
->ifindex
))
383 goto nla_put_failure
;
386 case NETDEV_QUEUE_TYPE_RX
:
387 rxq
= __netif_get_rx_queue(netdev
, q_idx
);
388 if (rxq
->napi
&& nla_put_u32(rsp
, NETDEV_A_QUEUE_NAPI_ID
,
390 goto nla_put_failure
;
392 binding
= rxq
->mp_params
.mp_priv
;
394 nla_put_u32(rsp
, NETDEV_A_QUEUE_DMABUF
, binding
->id
))
395 goto nla_put_failure
;
398 case NETDEV_QUEUE_TYPE_TX
:
399 txq
= netdev_get_tx_queue(netdev
, q_idx
);
400 if (txq
->napi
&& nla_put_u32(rsp
, NETDEV_A_QUEUE_NAPI_ID
,
402 goto nla_put_failure
;
405 genlmsg_end(rsp
, hdr
);
410 genlmsg_cancel(rsp
, hdr
);
414 static int netdev_nl_queue_validate(struct net_device
*netdev
, u32 q_id
,
418 case NETDEV_QUEUE_TYPE_RX
:
419 if (q_id
>= netdev
->real_num_rx_queues
)
422 case NETDEV_QUEUE_TYPE_TX
:
423 if (q_id
>= netdev
->real_num_tx_queues
)
430 netdev_nl_queue_fill(struct sk_buff
*rsp
, struct net_device
*netdev
, u32 q_idx
,
431 u32 q_type
, const struct genl_info
*info
)
438 err
= netdev_nl_queue_validate(netdev
, q_idx
, q_type
);
442 return netdev_nl_queue_fill_one(rsp
, netdev
, q_idx
, q_type
, info
);
445 int netdev_nl_queue_get_doit(struct sk_buff
*skb
, struct genl_info
*info
)
447 u32 q_id
, q_type
, ifindex
;
448 struct net_device
*netdev
;
452 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_ID
) ||
453 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_TYPE
) ||
454 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_QUEUE_IFINDEX
))
457 q_id
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_ID
]);
458 q_type
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_TYPE
]);
459 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_IFINDEX
]);
461 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
467 netdev
= netdev_get_by_index_lock(genl_info_net(info
), ifindex
);
469 err
= netdev_nl_queue_fill(rsp
, netdev
, q_id
, q_type
, info
);
470 netdev_unlock(netdev
);
480 return genlmsg_reply(rsp
, info
);
488 netdev_nl_queue_dump_one(struct net_device
*netdev
, struct sk_buff
*rsp
,
489 const struct genl_info
*info
,
490 struct netdev_nl_dump_ctx
*ctx
)
497 for (; ctx
->rxq_idx
< netdev
->real_num_rx_queues
; ctx
->rxq_idx
++) {
498 err
= netdev_nl_queue_fill_one(rsp
, netdev
, ctx
->rxq_idx
,
499 NETDEV_QUEUE_TYPE_RX
, info
);
503 for (; ctx
->txq_idx
< netdev
->real_num_tx_queues
; ctx
->txq_idx
++) {
504 err
= netdev_nl_queue_fill_one(rsp
, netdev
, ctx
->txq_idx
,
505 NETDEV_QUEUE_TYPE_TX
, info
);
513 int netdev_nl_queue_get_dumpit(struct sk_buff
*skb
, struct netlink_callback
*cb
)
515 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
516 const struct genl_info
*info
= genl_info_dump(cb
);
517 struct net
*net
= sock_net(skb
->sk
);
518 struct net_device
*netdev
;
522 if (info
->attrs
[NETDEV_A_QUEUE_IFINDEX
])
523 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QUEUE_IFINDEX
]);
527 netdev
= netdev_get_by_index_lock(net
, ifindex
);
529 err
= netdev_nl_queue_dump_one(netdev
, skb
, info
, ctx
);
530 netdev_unlock(netdev
);
535 for_each_netdev_lock_scoped(net
, netdev
, ctx
->ifindex
) {
536 err
= netdev_nl_queue_dump_one(netdev
, skb
, info
, ctx
);
548 #define NETDEV_STAT_NOT_SET (~0ULL)
550 static void netdev_nl_stats_add(void *_sum
, const void *_add
, size_t size
)
552 const u64
*add
= _add
;
556 if (*add
!= NETDEV_STAT_NOT_SET
&& *sum
!= NETDEV_STAT_NOT_SET
)
564 static int netdev_stat_put(struct sk_buff
*rsp
, unsigned int attr_id
, u64 value
)
566 if (value
== NETDEV_STAT_NOT_SET
)
568 return nla_put_uint(rsp
, attr_id
, value
);
572 netdev_nl_stats_write_rx(struct sk_buff
*rsp
, struct netdev_queue_stats_rx
*rx
)
574 if (netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_PACKETS
, rx
->packets
) ||
575 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_BYTES
, rx
->bytes
) ||
576 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_ALLOC_FAIL
, rx
->alloc_fail
) ||
577 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROPS
, rx
->hw_drops
) ||
578 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS
, rx
->hw_drop_overruns
) ||
579 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY
, rx
->csum_unnecessary
) ||
580 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_NONE
, rx
->csum_none
) ||
581 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_CSUM_BAD
, rx
->csum_bad
) ||
582 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS
, rx
->hw_gro_packets
) ||
583 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_BYTES
, rx
->hw_gro_bytes
) ||
584 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS
, rx
->hw_gro_wire_packets
) ||
585 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES
, rx
->hw_gro_wire_bytes
) ||
586 netdev_stat_put(rsp
, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS
, rx
->hw_drop_ratelimits
))
592 netdev_nl_stats_write_tx(struct sk_buff
*rsp
, struct netdev_queue_stats_tx
*tx
)
594 if (netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_PACKETS
, tx
->packets
) ||
595 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_BYTES
, tx
->bytes
) ||
596 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROPS
, tx
->hw_drops
) ||
597 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS
, tx
->hw_drop_errors
) ||
598 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_CSUM_NONE
, tx
->csum_none
) ||
599 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_NEEDS_CSUM
, tx
->needs_csum
) ||
600 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS
, tx
->hw_gso_packets
) ||
601 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_BYTES
, tx
->hw_gso_bytes
) ||
602 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS
, tx
->hw_gso_wire_packets
) ||
603 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES
, tx
->hw_gso_wire_bytes
) ||
604 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS
, tx
->hw_drop_ratelimits
) ||
605 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_STOP
, tx
->stop
) ||
606 netdev_stat_put(rsp
, NETDEV_A_QSTATS_TX_WAKE
, tx
->wake
))
612 netdev_nl_stats_queue(struct net_device
*netdev
, struct sk_buff
*rsp
,
613 u32 q_type
, int i
, const struct genl_info
*info
)
615 const struct netdev_stat_ops
*ops
= netdev
->stat_ops
;
616 struct netdev_queue_stats_rx rx
;
617 struct netdev_queue_stats_tx tx
;
620 hdr
= genlmsg_iput(rsp
, info
);
623 if (nla_put_u32(rsp
, NETDEV_A_QSTATS_IFINDEX
, netdev
->ifindex
) ||
624 nla_put_u32(rsp
, NETDEV_A_QSTATS_QUEUE_TYPE
, q_type
) ||
625 nla_put_u32(rsp
, NETDEV_A_QSTATS_QUEUE_ID
, i
))
626 goto nla_put_failure
;
629 case NETDEV_QUEUE_TYPE_RX
:
630 memset(&rx
, 0xff, sizeof(rx
));
631 ops
->get_queue_stats_rx(netdev
, i
, &rx
);
632 if (!memchr_inv(&rx
, 0xff, sizeof(rx
)))
634 if (netdev_nl_stats_write_rx(rsp
, &rx
))
635 goto nla_put_failure
;
637 case NETDEV_QUEUE_TYPE_TX
:
638 memset(&tx
, 0xff, sizeof(tx
));
639 ops
->get_queue_stats_tx(netdev
, i
, &tx
);
640 if (!memchr_inv(&tx
, 0xff, sizeof(tx
)))
642 if (netdev_nl_stats_write_tx(rsp
, &tx
))
643 goto nla_put_failure
;
647 genlmsg_end(rsp
, hdr
);
651 genlmsg_cancel(rsp
, hdr
);
654 genlmsg_cancel(rsp
, hdr
);
659 netdev_nl_stats_by_queue(struct net_device
*netdev
, struct sk_buff
*rsp
,
660 const struct genl_info
*info
,
661 struct netdev_nl_dump_ctx
*ctx
)
663 const struct netdev_stat_ops
*ops
= netdev
->stat_ops
;
666 if (!(netdev
->flags
& IFF_UP
))
670 while (ops
->get_queue_stats_rx
&& i
< netdev
->real_num_rx_queues
) {
671 err
= netdev_nl_stats_queue(netdev
, rsp
, NETDEV_QUEUE_TYPE_RX
,
678 while (ops
->get_queue_stats_tx
&& i
< netdev
->real_num_tx_queues
) {
679 err
= netdev_nl_stats_queue(netdev
, rsp
, NETDEV_QUEUE_TYPE_TX
,
692 netdev_nl_stats_by_netdev(struct net_device
*netdev
, struct sk_buff
*rsp
,
693 const struct genl_info
*info
)
695 struct netdev_queue_stats_rx rx_sum
, rx
;
696 struct netdev_queue_stats_tx tx_sum
, tx
;
697 const struct netdev_stat_ops
*ops
;
701 ops
= netdev
->stat_ops
;
702 /* Netdev can't guarantee any complete counters */
703 if (!ops
->get_base_stats
)
706 memset(&rx_sum
, 0xff, sizeof(rx_sum
));
707 memset(&tx_sum
, 0xff, sizeof(tx_sum
));
709 ops
->get_base_stats(netdev
, &rx_sum
, &tx_sum
);
711 /* The op was there, but nothing reported, don't bother */
712 if (!memchr_inv(&rx_sum
, 0xff, sizeof(rx_sum
)) &&
713 !memchr_inv(&tx_sum
, 0xff, sizeof(tx_sum
)))
716 hdr
= genlmsg_iput(rsp
, info
);
719 if (nla_put_u32(rsp
, NETDEV_A_QSTATS_IFINDEX
, netdev
->ifindex
))
720 goto nla_put_failure
;
722 for (i
= 0; i
< netdev
->real_num_rx_queues
; i
++) {
723 memset(&rx
, 0xff, sizeof(rx
));
724 if (ops
->get_queue_stats_rx
)
725 ops
->get_queue_stats_rx(netdev
, i
, &rx
);
726 netdev_nl_stats_add(&rx_sum
, &rx
, sizeof(rx
));
728 for (i
= 0; i
< netdev
->real_num_tx_queues
; i
++) {
729 memset(&tx
, 0xff, sizeof(tx
));
730 if (ops
->get_queue_stats_tx
)
731 ops
->get_queue_stats_tx(netdev
, i
, &tx
);
732 netdev_nl_stats_add(&tx_sum
, &tx
, sizeof(tx
));
735 if (netdev_nl_stats_write_rx(rsp
, &rx_sum
) ||
736 netdev_nl_stats_write_tx(rsp
, &tx_sum
))
737 goto nla_put_failure
;
739 genlmsg_end(rsp
, hdr
);
743 genlmsg_cancel(rsp
, hdr
);
748 netdev_nl_qstats_get_dump_one(struct net_device
*netdev
, unsigned int scope
,
749 struct sk_buff
*skb
, const struct genl_info
*info
,
750 struct netdev_nl_dump_ctx
*ctx
)
752 if (!netdev
->stat_ops
)
757 return netdev_nl_stats_by_netdev(netdev
, skb
, info
);
758 case NETDEV_QSTATS_SCOPE_QUEUE
:
759 return netdev_nl_stats_by_queue(netdev
, skb
, info
, ctx
);
762 return -EINVAL
; /* Should not happen, per netlink policy */
765 int netdev_nl_qstats_get_dumpit(struct sk_buff
*skb
,
766 struct netlink_callback
*cb
)
768 struct netdev_nl_dump_ctx
*ctx
= netdev_dump_ctx(cb
);
769 const struct genl_info
*info
= genl_info_dump(cb
);
770 struct net
*net
= sock_net(skb
->sk
);
771 struct net_device
*netdev
;
772 unsigned int ifindex
;
777 if (info
->attrs
[NETDEV_A_QSTATS_SCOPE
])
778 scope
= nla_get_uint(info
->attrs
[NETDEV_A_QSTATS_SCOPE
]);
781 if (info
->attrs
[NETDEV_A_QSTATS_IFINDEX
])
782 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_QSTATS_IFINDEX
]);
786 netdev
= __dev_get_by_index(net
, ifindex
);
787 if (netdev
&& netdev
->stat_ops
) {
788 err
= netdev_nl_qstats_get_dump_one(netdev
, scope
, skb
,
791 NL_SET_BAD_ATTR(info
->extack
,
792 info
->attrs
[NETDEV_A_QSTATS_IFINDEX
]);
793 err
= netdev
? -EOPNOTSUPP
: -ENODEV
;
796 for_each_netdev_dump(net
, netdev
, ctx
->ifindex
) {
797 err
= netdev_nl_qstats_get_dump_one(netdev
, scope
, skb
,
808 int netdev_nl_bind_rx_doit(struct sk_buff
*skb
, struct genl_info
*info
)
810 struct nlattr
*tb
[ARRAY_SIZE(netdev_queue_id_nl_policy
)];
811 struct net_devmem_dmabuf_binding
*binding
;
812 struct list_head
*sock_binding_list
;
813 u32 ifindex
, dmabuf_fd
, rxq_idx
;
814 struct net_device
*netdev
;
820 if (GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DEV_IFINDEX
) ||
821 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DMABUF_FD
) ||
822 GENL_REQ_ATTR_CHECK(info
, NETDEV_A_DMABUF_QUEUES
))
825 ifindex
= nla_get_u32(info
->attrs
[NETDEV_A_DEV_IFINDEX
]);
826 dmabuf_fd
= nla_get_u32(info
->attrs
[NETDEV_A_DMABUF_FD
]);
828 sock_binding_list
= genl_sk_priv_get(&netdev_nl_family
,
830 if (IS_ERR(sock_binding_list
))
831 return PTR_ERR(sock_binding_list
);
833 rsp
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
837 hdr
= genlmsg_iput(rsp
, info
);
840 goto err_genlmsg_free
;
845 netdev
= __dev_get_by_index(genl_info_net(info
), ifindex
);
846 if (!netdev
|| !netif_device_present(netdev
)) {
851 if (dev_xdp_prog_count(netdev
)) {
852 NL_SET_ERR_MSG(info
->extack
, "unable to bind dmabuf to device with XDP program attached");
857 binding
= net_devmem_bind_dmabuf(netdev
, dmabuf_fd
, info
->extack
);
858 if (IS_ERR(binding
)) {
859 err
= PTR_ERR(binding
);
863 nla_for_each_attr_type(attr
, NETDEV_A_DMABUF_QUEUES
,
864 genlmsg_data(info
->genlhdr
),
865 genlmsg_len(info
->genlhdr
), rem
) {
866 err
= nla_parse_nested(
867 tb
, ARRAY_SIZE(netdev_queue_id_nl_policy
) - 1, attr
,
868 netdev_queue_id_nl_policy
, info
->extack
);
872 if (NL_REQ_ATTR_CHECK(info
->extack
, attr
, tb
, NETDEV_A_QUEUE_ID
) ||
873 NL_REQ_ATTR_CHECK(info
->extack
, attr
, tb
, NETDEV_A_QUEUE_TYPE
)) {
878 if (nla_get_u32(tb
[NETDEV_A_QUEUE_TYPE
]) != NETDEV_QUEUE_TYPE_RX
) {
879 NL_SET_BAD_ATTR(info
->extack
, tb
[NETDEV_A_QUEUE_TYPE
]);
884 rxq_idx
= nla_get_u32(tb
[NETDEV_A_QUEUE_ID
]);
886 err
= net_devmem_bind_dmabuf_to_queue(netdev
, rxq_idx
, binding
,
892 list_add(&binding
->list
, sock_binding_list
);
894 nla_put_u32(rsp
, NETDEV_A_DMABUF_ID
, binding
->id
);
895 genlmsg_end(rsp
, hdr
);
897 err
= genlmsg_reply(rsp
, info
);
906 net_devmem_unbind_dmabuf(binding
);
914 void netdev_nl_sock_priv_init(struct list_head
*priv
)
916 INIT_LIST_HEAD(priv
);
919 void netdev_nl_sock_priv_destroy(struct list_head
*priv
)
921 struct net_devmem_dmabuf_binding
*binding
;
922 struct net_devmem_dmabuf_binding
*temp
;
924 list_for_each_entry_safe(binding
, temp
, priv
, list
) {
926 net_devmem_unbind_dmabuf(binding
);
931 static int netdev_genl_netdevice_event(struct notifier_block
*nb
,
932 unsigned long event
, void *ptr
)
934 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
937 case NETDEV_REGISTER
:
938 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_ADD_NTF
);
940 case NETDEV_UNREGISTER
:
941 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_DEL_NTF
);
943 case NETDEV_XDP_FEAT_CHANGE
:
944 netdev_genl_dev_notify(netdev
, NETDEV_CMD_DEV_CHANGE_NTF
);
951 static struct notifier_block netdev_genl_nb
= {
952 .notifier_call
= netdev_genl_netdevice_event
,
955 static int __init
netdev_genl_init(void)
959 err
= register_netdevice_notifier(&netdev_genl_nb
);
963 err
= genl_register_family(&netdev_nl_family
);
970 unregister_netdevice_notifier(&netdev_genl_nb
);
974 subsys_initcall(netdev_genl_init
);