1 // SPDX-License-Identifier: GPL-2.0-only
3 * Monitoring code for network dropped packet alerts
5 * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/string.h>
13 #include <linux/if_arp.h>
14 #include <linux/inetdevice.h>
15 #include <linux/inet.h>
16 #include <linux/interrupt.h>
17 #include <linux/netpoll.h>
18 #include <linux/sched.h>
19 #include <linux/delay.h>
20 #include <linux/types.h>
21 #include <linux/workqueue.h>
22 #include <linux/netlink.h>
23 #include <linux/net_dropmon.h>
24 #include <linux/percpu.h>
25 #include <linux/timer.h>
26 #include <linux/bitops.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <net/drop_monitor.h>
30 #include <net/genetlink.h>
31 #include <net/netevent.h>
33 #include <trace/events/skb.h>
34 #include <trace/events/napi.h>
36 #include <asm/unaligned.h>
42 * Globals, our netlink socket pointer
43 * and the work handle that will send up
46 static int trace_state
= TRACE_OFF
;
47 static bool monitor_hw
;
51 * An overall lock guarding every operation coming from userspace.
52 * It also guards the global 'hw_stats_list' list.
54 static DEFINE_MUTEX(net_dm_mutex
);
58 struct u64_stats_sync syncp
;
61 #define NET_DM_MAX_HW_TRAP_NAME_LEN 40
63 struct net_dm_hw_entry
{
64 char trap_name
[NET_DM_MAX_HW_TRAP_NAME_LEN
];
68 struct net_dm_hw_entries
{
70 struct net_dm_hw_entry entries
[0];
73 struct per_cpu_dm_data
{
74 spinlock_t lock
; /* Protects 'skb', 'hw_entries' and
79 struct net_dm_hw_entries
*hw_entries
;
81 struct sk_buff_head drop_queue
;
82 struct work_struct dm_alert_work
;
83 struct timer_list send_timer
;
84 struct net_dm_stats stats
;
87 struct dm_hw_stat_delta
{
88 struct net_device
*dev
;
89 unsigned long last_rx
;
90 struct list_head list
;
92 unsigned long last_drop_val
;
95 static struct genl_family net_drop_monitor_family
;
97 static DEFINE_PER_CPU(struct per_cpu_dm_data
, dm_cpu_data
);
98 static DEFINE_PER_CPU(struct per_cpu_dm_data
, dm_hw_cpu_data
);
100 static int dm_hit_limit
= 64;
101 static int dm_delay
= 1;
102 static unsigned long dm_hw_check_delta
= 2*HZ
;
103 static LIST_HEAD(hw_stats_list
);
105 static enum net_dm_alert_mode net_dm_alert_mode
= NET_DM_ALERT_MODE_SUMMARY
;
106 static u32 net_dm_trunc_len
;
107 static u32 net_dm_queue_len
= 1000;
109 struct net_dm_alert_ops
{
110 void (*kfree_skb_probe
)(void *ignore
, struct sk_buff
*skb
,
112 void (*napi_poll_probe
)(void *ignore
, struct napi_struct
*napi
,
113 int work
, int budget
);
114 void (*work_item_func
)(struct work_struct
*work
);
115 void (*hw_work_item_func
)(struct work_struct
*work
);
116 void (*hw_probe
)(struct sk_buff
*skb
,
117 const struct net_dm_hw_metadata
*hw_metadata
);
120 struct net_dm_skb_cb
{
122 struct net_dm_hw_metadata
*hw_metadata
;
127 #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
129 static struct sk_buff
*reset_per_cpu_data(struct per_cpu_dm_data
*data
)
132 struct net_dm_alert_msg
*msg
;
138 al
= sizeof(struct net_dm_alert_msg
);
139 al
+= dm_hit_limit
* sizeof(struct net_dm_drop_point
);
140 al
+= sizeof(struct nlattr
);
142 skb
= genlmsg_new(al
, GFP_KERNEL
);
147 msg_header
= genlmsg_put(skb
, 0, 0, &net_drop_monitor_family
,
148 0, NET_DM_CMD_ALERT
);
154 nla
= nla_reserve(skb
, NLA_UNSPEC
,
155 sizeof(struct net_dm_alert_msg
));
166 mod_timer(&data
->send_timer
, jiffies
+ HZ
/ 10);
168 spin_lock_irqsave(&data
->lock
, flags
);
169 swap(data
->skb
, skb
);
170 spin_unlock_irqrestore(&data
->lock
, flags
);
173 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb
->data
;
174 struct genlmsghdr
*gnlh
= (struct genlmsghdr
*)nlmsg_data(nlh
);
176 genlmsg_end(skb
, genlmsg_data(gnlh
));
182 static const struct genl_multicast_group dropmon_mcgrps
[] = {
183 { .name
= "events", },
186 static void send_dm_alert(struct work_struct
*work
)
189 struct per_cpu_dm_data
*data
;
191 data
= container_of(work
, struct per_cpu_dm_data
, dm_alert_work
);
193 skb
= reset_per_cpu_data(data
);
196 genlmsg_multicast(&net_drop_monitor_family
, skb
, 0,
201 * This is the timer function to delay the sending of an alert
202 * in the event that more drops will arrive during the
205 static void sched_send_work(struct timer_list
*t
)
207 struct per_cpu_dm_data
*data
= from_timer(data
, t
, send_timer
);
209 schedule_work(&data
->dm_alert_work
);
212 static void trace_drop_common(struct sk_buff
*skb
, void *location
)
214 struct net_dm_alert_msg
*msg
;
215 struct nlmsghdr
*nlh
;
218 struct sk_buff
*dskb
;
219 struct per_cpu_dm_data
*data
;
222 local_irq_save(flags
);
223 data
= this_cpu_ptr(&dm_cpu_data
);
224 spin_lock(&data
->lock
);
230 nlh
= (struct nlmsghdr
*)dskb
->data
;
231 nla
= genlmsg_data(nlmsg_data(nlh
));
233 for (i
= 0; i
< msg
->entries
; i
++) {
234 if (!memcmp(&location
, msg
->points
[i
].pc
, sizeof(void *))) {
235 msg
->points
[i
].count
++;
239 if (msg
->entries
== dm_hit_limit
)
242 * We need to create a new entry
244 __nla_reserve_nohdr(dskb
, sizeof(struct net_dm_drop_point
));
245 nla
->nla_len
+= NLA_ALIGN(sizeof(struct net_dm_drop_point
));
246 memcpy(msg
->points
[msg
->entries
].pc
, &location
, sizeof(void *));
247 msg
->points
[msg
->entries
].count
= 1;
250 if (!timer_pending(&data
->send_timer
)) {
251 data
->send_timer
.expires
= jiffies
+ dm_delay
* HZ
;
252 add_timer(&data
->send_timer
);
256 spin_unlock_irqrestore(&data
->lock
, flags
);
259 static void trace_kfree_skb_hit(void *ignore
, struct sk_buff
*skb
, void *location
)
261 trace_drop_common(skb
, location
);
264 static void trace_napi_poll_hit(void *ignore
, struct napi_struct
*napi
,
265 int work
, int budget
)
267 struct dm_hw_stat_delta
*new_stat
;
270 * Don't check napi structures with no associated device
276 list_for_each_entry_rcu(new_stat
, &hw_stats_list
, list
) {
278 * only add a note to our monitor buffer if:
279 * 1) this is the dev we received on
280 * 2) its after the last_rx delta
281 * 3) our rx_dropped count has gone up
283 if ((new_stat
->dev
== napi
->dev
) &&
284 (time_after(jiffies
, new_stat
->last_rx
+ dm_hw_check_delta
)) &&
285 (napi
->dev
->stats
.rx_dropped
!= new_stat
->last_drop_val
)) {
286 trace_drop_common(NULL
, NULL
);
287 new_stat
->last_drop_val
= napi
->dev
->stats
.rx_dropped
;
288 new_stat
->last_rx
= jiffies
;
295 static struct net_dm_hw_entries
*
296 net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data
*hw_data
)
298 struct net_dm_hw_entries
*hw_entries
;
301 hw_entries
= kzalloc(struct_size(hw_entries
, entries
, dm_hit_limit
),
304 /* If the memory allocation failed, we try to perform another
305 * allocation in 1/10 second. Otherwise, the probe function
306 * will constantly bail out.
308 mod_timer(&hw_data
->send_timer
, jiffies
+ HZ
/ 10);
311 spin_lock_irqsave(&hw_data
->lock
, flags
);
312 swap(hw_data
->hw_entries
, hw_entries
);
313 spin_unlock_irqrestore(&hw_data
->lock
, flags
);
318 static int net_dm_hw_entry_put(struct sk_buff
*msg
,
319 const struct net_dm_hw_entry
*hw_entry
)
323 attr
= nla_nest_start(msg
, NET_DM_ATTR_HW_ENTRY
);
327 if (nla_put_string(msg
, NET_DM_ATTR_HW_TRAP_NAME
, hw_entry
->trap_name
))
328 goto nla_put_failure
;
330 if (nla_put_u32(msg
, NET_DM_ATTR_HW_TRAP_COUNT
, hw_entry
->count
))
331 goto nla_put_failure
;
333 nla_nest_end(msg
, attr
);
338 nla_nest_cancel(msg
, attr
);
342 static int net_dm_hw_entries_put(struct sk_buff
*msg
,
343 const struct net_dm_hw_entries
*hw_entries
)
348 attr
= nla_nest_start(msg
, NET_DM_ATTR_HW_ENTRIES
);
352 for (i
= 0; i
< hw_entries
->num_entries
; i
++) {
355 rc
= net_dm_hw_entry_put(msg
, &hw_entries
->entries
[i
]);
357 goto nla_put_failure
;
360 nla_nest_end(msg
, attr
);
365 nla_nest_cancel(msg
, attr
);
370 net_dm_hw_summary_report_fill(struct sk_buff
*msg
,
371 const struct net_dm_hw_entries
*hw_entries
)
373 struct net_dm_alert_msg anc_hdr
= { 0 };
377 hdr
= genlmsg_put(msg
, 0, 0, &net_drop_monitor_family
, 0,
382 /* We need to put the ancillary header in order not to break user
385 if (nla_put(msg
, NLA_UNSPEC
, sizeof(anc_hdr
), &anc_hdr
))
386 goto nla_put_failure
;
388 rc
= net_dm_hw_entries_put(msg
, hw_entries
);
390 goto nla_put_failure
;
392 genlmsg_end(msg
, hdr
);
397 genlmsg_cancel(msg
, hdr
);
401 static void net_dm_hw_summary_work(struct work_struct
*work
)
403 struct net_dm_hw_entries
*hw_entries
;
404 struct per_cpu_dm_data
*hw_data
;
408 hw_data
= container_of(work
, struct per_cpu_dm_data
, dm_alert_work
);
410 hw_entries
= net_dm_hw_reset_per_cpu_data(hw_data
);
414 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
418 rc
= net_dm_hw_summary_report_fill(msg
, hw_entries
);
424 genlmsg_multicast(&net_drop_monitor_family
, msg
, 0, 0, GFP_KERNEL
);
431 net_dm_hw_summary_probe(struct sk_buff
*skb
,
432 const struct net_dm_hw_metadata
*hw_metadata
)
434 struct net_dm_hw_entries
*hw_entries
;
435 struct net_dm_hw_entry
*hw_entry
;
436 struct per_cpu_dm_data
*hw_data
;
440 hw_data
= this_cpu_ptr(&dm_hw_cpu_data
);
441 spin_lock_irqsave(&hw_data
->lock
, flags
);
442 hw_entries
= hw_data
->hw_entries
;
447 for (i
= 0; i
< hw_entries
->num_entries
; i
++) {
448 hw_entry
= &hw_entries
->entries
[i
];
449 if (!strncmp(hw_entry
->trap_name
, hw_metadata
->trap_name
,
450 NET_DM_MAX_HW_TRAP_NAME_LEN
- 1)) {
455 if (WARN_ON_ONCE(hw_entries
->num_entries
== dm_hit_limit
))
458 hw_entry
= &hw_entries
->entries
[hw_entries
->num_entries
];
459 strlcpy(hw_entry
->trap_name
, hw_metadata
->trap_name
,
460 NET_DM_MAX_HW_TRAP_NAME_LEN
- 1);
462 hw_entries
->num_entries
++;
464 if (!timer_pending(&hw_data
->send_timer
)) {
465 hw_data
->send_timer
.expires
= jiffies
+ dm_delay
* HZ
;
466 add_timer(&hw_data
->send_timer
);
470 spin_unlock_irqrestore(&hw_data
->lock
, flags
);
473 static const struct net_dm_alert_ops net_dm_alert_summary_ops
= {
474 .kfree_skb_probe
= trace_kfree_skb_hit
,
475 .napi_poll_probe
= trace_napi_poll_hit
,
476 .work_item_func
= send_dm_alert
,
477 .hw_work_item_func
= net_dm_hw_summary_work
,
478 .hw_probe
= net_dm_hw_summary_probe
,
481 static void net_dm_packet_trace_kfree_skb_hit(void *ignore
,
485 ktime_t tstamp
= ktime_get_real();
486 struct per_cpu_dm_data
*data
;
487 struct sk_buff
*nskb
;
490 if (!skb_mac_header_was_set(skb
))
493 nskb
= skb_clone(skb
, GFP_ATOMIC
);
497 NET_DM_SKB_CB(nskb
)->pc
= location
;
498 /* Override the timestamp because we care about the time when the
499 * packet was dropped.
501 nskb
->tstamp
= tstamp
;
503 data
= this_cpu_ptr(&dm_cpu_data
);
505 spin_lock_irqsave(&data
->drop_queue
.lock
, flags
);
506 if (skb_queue_len(&data
->drop_queue
) < net_dm_queue_len
)
507 __skb_queue_tail(&data
->drop_queue
, nskb
);
510 spin_unlock_irqrestore(&data
->drop_queue
.lock
, flags
);
512 schedule_work(&data
->dm_alert_work
);
517 spin_unlock_irqrestore(&data
->drop_queue
.lock
, flags
);
518 u64_stats_update_begin(&data
->stats
.syncp
);
519 data
->stats
.dropped
++;
520 u64_stats_update_end(&data
->stats
.syncp
);
524 static void net_dm_packet_trace_napi_poll_hit(void *ignore
,
525 struct napi_struct
*napi
,
526 int work
, int budget
)
530 static size_t net_dm_in_port_size(void)
532 /* NET_DM_ATTR_IN_PORT nest */
533 return nla_total_size(0) +
534 /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */
535 nla_total_size(sizeof(u32
)) +
536 /* NET_DM_ATTR_PORT_NETDEV_NAME */
537 nla_total_size(IFNAMSIZ
+ 1);
540 #define NET_DM_MAX_SYMBOL_LEN 40
542 static size_t net_dm_packet_report_size(size_t payload_len
)
546 size
= nlmsg_msg_size(GENL_HDRLEN
+ net_drop_monitor_family
.hdrsize
);
548 return NLMSG_ALIGN(size
) +
549 /* NET_DM_ATTR_ORIGIN */
550 nla_total_size(sizeof(u16
)) +
552 nla_total_size(sizeof(u64
)) +
553 /* NET_DM_ATTR_SYMBOL */
554 nla_total_size(NET_DM_MAX_SYMBOL_LEN
+ 1) +
555 /* NET_DM_ATTR_IN_PORT */
556 net_dm_in_port_size() +
557 /* NET_DM_ATTR_TIMESTAMP */
558 nla_total_size(sizeof(u64
)) +
559 /* NET_DM_ATTR_ORIG_LEN */
560 nla_total_size(sizeof(u32
)) +
561 /* NET_DM_ATTR_PROTO */
562 nla_total_size(sizeof(u16
)) +
563 /* NET_DM_ATTR_PAYLOAD */
564 nla_total_size(payload_len
);
567 static int net_dm_packet_report_in_port_put(struct sk_buff
*msg
, int ifindex
,
572 attr
= nla_nest_start(msg
, NET_DM_ATTR_IN_PORT
);
577 nla_put_u32(msg
, NET_DM_ATTR_PORT_NETDEV_IFINDEX
, ifindex
))
578 goto nla_put_failure
;
580 if (name
&& nla_put_string(msg
, NET_DM_ATTR_PORT_NETDEV_NAME
, name
))
581 goto nla_put_failure
;
583 nla_nest_end(msg
, attr
);
588 nla_nest_cancel(msg
, attr
);
592 static int net_dm_packet_report_fill(struct sk_buff
*msg
, struct sk_buff
*skb
,
595 u64 pc
= (u64
)(uintptr_t) NET_DM_SKB_CB(skb
)->pc
;
596 char buf
[NET_DM_MAX_SYMBOL_LEN
];
601 hdr
= genlmsg_put(msg
, 0, 0, &net_drop_monitor_family
, 0,
602 NET_DM_CMD_PACKET_ALERT
);
606 if (nla_put_u16(msg
, NET_DM_ATTR_ORIGIN
, NET_DM_ORIGIN_SW
))
607 goto nla_put_failure
;
609 if (nla_put_u64_64bit(msg
, NET_DM_ATTR_PC
, pc
, NET_DM_ATTR_PAD
))
610 goto nla_put_failure
;
612 snprintf(buf
, sizeof(buf
), "%pS", NET_DM_SKB_CB(skb
)->pc
);
613 if (nla_put_string(msg
, NET_DM_ATTR_SYMBOL
, buf
))
614 goto nla_put_failure
;
616 rc
= net_dm_packet_report_in_port_put(msg
, skb
->skb_iif
, NULL
);
618 goto nla_put_failure
;
620 if (nla_put_u64_64bit(msg
, NET_DM_ATTR_TIMESTAMP
,
621 ktime_to_ns(skb
->tstamp
), NET_DM_ATTR_PAD
))
622 goto nla_put_failure
;
624 if (nla_put_u32(msg
, NET_DM_ATTR_ORIG_LEN
, skb
->len
))
625 goto nla_put_failure
;
630 if (nla_put_u16(msg
, NET_DM_ATTR_PROTO
, be16_to_cpu(skb
->protocol
)))
631 goto nla_put_failure
;
633 attr
= skb_put(msg
, nla_total_size(payload_len
));
634 attr
->nla_type
= NET_DM_ATTR_PAYLOAD
;
635 attr
->nla_len
= nla_attr_size(payload_len
);
636 if (skb_copy_bits(skb
, 0, nla_data(attr
), payload_len
))
637 goto nla_put_failure
;
640 genlmsg_end(msg
, hdr
);
645 genlmsg_cancel(msg
, hdr
);
649 #define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO)
651 static void net_dm_packet_report(struct sk_buff
*skb
)
657 /* Make sure we start copying the packet from the MAC header */
658 if (skb
->data
> skb_mac_header(skb
))
659 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
661 skb_pull(skb
, skb_mac_header(skb
) - skb
->data
);
663 /* Ensure packet fits inside a single netlink attribute */
664 payload_len
= min_t(size_t, skb
->len
, NET_DM_MAX_PACKET_SIZE
);
665 if (net_dm_trunc_len
)
666 payload_len
= min_t(size_t, net_dm_trunc_len
, payload_len
);
668 msg
= nlmsg_new(net_dm_packet_report_size(payload_len
), GFP_KERNEL
);
672 rc
= net_dm_packet_report_fill(msg
, skb
, payload_len
);
678 genlmsg_multicast(&net_drop_monitor_family
, msg
, 0, 0, GFP_KERNEL
);
684 static void net_dm_packet_work(struct work_struct
*work
)
686 struct per_cpu_dm_data
*data
;
687 struct sk_buff_head list
;
691 data
= container_of(work
, struct per_cpu_dm_data
, dm_alert_work
);
693 __skb_queue_head_init(&list
);
695 spin_lock_irqsave(&data
->drop_queue
.lock
, flags
);
696 skb_queue_splice_tail_init(&data
->drop_queue
, &list
);
697 spin_unlock_irqrestore(&data
->drop_queue
.lock
, flags
);
699 while ((skb
= __skb_dequeue(&list
)))
700 net_dm_packet_report(skb
);
704 net_dm_hw_packet_report_size(size_t payload_len
,
705 const struct net_dm_hw_metadata
*hw_metadata
)
709 size
= nlmsg_msg_size(GENL_HDRLEN
+ net_drop_monitor_family
.hdrsize
);
711 return NLMSG_ALIGN(size
) +
712 /* NET_DM_ATTR_ORIGIN */
713 nla_total_size(sizeof(u16
)) +
714 /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */
715 nla_total_size(strlen(hw_metadata
->trap_group_name
) + 1) +
716 /* NET_DM_ATTR_HW_TRAP_NAME */
717 nla_total_size(strlen(hw_metadata
->trap_name
) + 1) +
718 /* NET_DM_ATTR_IN_PORT */
719 net_dm_in_port_size() +
720 /* NET_DM_ATTR_TIMESTAMP */
721 nla_total_size(sizeof(u64
)) +
722 /* NET_DM_ATTR_ORIG_LEN */
723 nla_total_size(sizeof(u32
)) +
724 /* NET_DM_ATTR_PROTO */
725 nla_total_size(sizeof(u16
)) +
726 /* NET_DM_ATTR_PAYLOAD */
727 nla_total_size(payload_len
);
730 static int net_dm_hw_packet_report_fill(struct sk_buff
*msg
,
731 struct sk_buff
*skb
, size_t payload_len
)
733 struct net_dm_hw_metadata
*hw_metadata
;
737 hw_metadata
= NET_DM_SKB_CB(skb
)->hw_metadata
;
739 hdr
= genlmsg_put(msg
, 0, 0, &net_drop_monitor_family
, 0,
740 NET_DM_CMD_PACKET_ALERT
);
744 if (nla_put_u16(msg
, NET_DM_ATTR_ORIGIN
, NET_DM_ORIGIN_HW
))
745 goto nla_put_failure
;
747 if (nla_put_string(msg
, NET_DM_ATTR_HW_TRAP_GROUP_NAME
,
748 hw_metadata
->trap_group_name
))
749 goto nla_put_failure
;
751 if (nla_put_string(msg
, NET_DM_ATTR_HW_TRAP_NAME
,
752 hw_metadata
->trap_name
))
753 goto nla_put_failure
;
755 if (hw_metadata
->input_dev
) {
756 struct net_device
*dev
= hw_metadata
->input_dev
;
759 rc
= net_dm_packet_report_in_port_put(msg
, dev
->ifindex
,
762 goto nla_put_failure
;
765 if (nla_put_u64_64bit(msg
, NET_DM_ATTR_TIMESTAMP
,
766 ktime_to_ns(skb
->tstamp
), NET_DM_ATTR_PAD
))
767 goto nla_put_failure
;
769 if (nla_put_u32(msg
, NET_DM_ATTR_ORIG_LEN
, skb
->len
))
770 goto nla_put_failure
;
775 if (nla_put_u16(msg
, NET_DM_ATTR_PROTO
, be16_to_cpu(skb
->protocol
)))
776 goto nla_put_failure
;
778 attr
= skb_put(msg
, nla_total_size(payload_len
));
779 attr
->nla_type
= NET_DM_ATTR_PAYLOAD
;
780 attr
->nla_len
= nla_attr_size(payload_len
);
781 if (skb_copy_bits(skb
, 0, nla_data(attr
), payload_len
))
782 goto nla_put_failure
;
785 genlmsg_end(msg
, hdr
);
790 genlmsg_cancel(msg
, hdr
);
794 static struct net_dm_hw_metadata
*
795 net_dm_hw_metadata_clone(const struct net_dm_hw_metadata
*hw_metadata
)
797 struct net_dm_hw_metadata
*n_hw_metadata
;
798 const char *trap_group_name
;
799 const char *trap_name
;
801 n_hw_metadata
= kmalloc(sizeof(*hw_metadata
), GFP_ATOMIC
);
805 trap_group_name
= kstrdup(hw_metadata
->trap_group_name
, GFP_ATOMIC
);
806 if (!trap_group_name
)
807 goto free_hw_metadata
;
808 n_hw_metadata
->trap_group_name
= trap_group_name
;
810 trap_name
= kstrdup(hw_metadata
->trap_name
, GFP_ATOMIC
);
812 goto free_trap_group
;
813 n_hw_metadata
->trap_name
= trap_name
;
815 n_hw_metadata
->input_dev
= hw_metadata
->input_dev
;
816 if (n_hw_metadata
->input_dev
)
817 dev_hold(n_hw_metadata
->input_dev
);
819 return n_hw_metadata
;
822 kfree(trap_group_name
);
824 kfree(n_hw_metadata
);
829 net_dm_hw_metadata_free(const struct net_dm_hw_metadata
*hw_metadata
)
831 if (hw_metadata
->input_dev
)
832 dev_put(hw_metadata
->input_dev
);
833 kfree(hw_metadata
->trap_name
);
834 kfree(hw_metadata
->trap_group_name
);
838 static void net_dm_hw_packet_report(struct sk_buff
*skb
)
840 struct net_dm_hw_metadata
*hw_metadata
;
845 if (skb
->data
> skb_mac_header(skb
))
846 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
848 skb_pull(skb
, skb_mac_header(skb
) - skb
->data
);
850 payload_len
= min_t(size_t, skb
->len
, NET_DM_MAX_PACKET_SIZE
);
851 if (net_dm_trunc_len
)
852 payload_len
= min_t(size_t, net_dm_trunc_len
, payload_len
);
854 hw_metadata
= NET_DM_SKB_CB(skb
)->hw_metadata
;
855 msg
= nlmsg_new(net_dm_hw_packet_report_size(payload_len
, hw_metadata
),
860 rc
= net_dm_hw_packet_report_fill(msg
, skb
, payload_len
);
866 genlmsg_multicast(&net_drop_monitor_family
, msg
, 0, 0, GFP_KERNEL
);
869 net_dm_hw_metadata_free(NET_DM_SKB_CB(skb
)->hw_metadata
);
873 static void net_dm_hw_packet_work(struct work_struct
*work
)
875 struct per_cpu_dm_data
*hw_data
;
876 struct sk_buff_head list
;
880 hw_data
= container_of(work
, struct per_cpu_dm_data
, dm_alert_work
);
882 __skb_queue_head_init(&list
);
884 spin_lock_irqsave(&hw_data
->drop_queue
.lock
, flags
);
885 skb_queue_splice_tail_init(&hw_data
->drop_queue
, &list
);
886 spin_unlock_irqrestore(&hw_data
->drop_queue
.lock
, flags
);
888 while ((skb
= __skb_dequeue(&list
)))
889 net_dm_hw_packet_report(skb
);
893 net_dm_hw_packet_probe(struct sk_buff
*skb
,
894 const struct net_dm_hw_metadata
*hw_metadata
)
896 struct net_dm_hw_metadata
*n_hw_metadata
;
897 ktime_t tstamp
= ktime_get_real();
898 struct per_cpu_dm_data
*hw_data
;
899 struct sk_buff
*nskb
;
902 if (!skb_mac_header_was_set(skb
))
905 nskb
= skb_clone(skb
, GFP_ATOMIC
);
909 n_hw_metadata
= net_dm_hw_metadata_clone(hw_metadata
);
913 NET_DM_SKB_CB(nskb
)->hw_metadata
= n_hw_metadata
;
914 nskb
->tstamp
= tstamp
;
916 hw_data
= this_cpu_ptr(&dm_hw_cpu_data
);
918 spin_lock_irqsave(&hw_data
->drop_queue
.lock
, flags
);
919 if (skb_queue_len(&hw_data
->drop_queue
) < net_dm_queue_len
)
920 __skb_queue_tail(&hw_data
->drop_queue
, nskb
);
923 spin_unlock_irqrestore(&hw_data
->drop_queue
.lock
, flags
);
925 schedule_work(&hw_data
->dm_alert_work
);
930 spin_unlock_irqrestore(&hw_data
->drop_queue
.lock
, flags
);
931 u64_stats_update_begin(&hw_data
->stats
.syncp
);
932 hw_data
->stats
.dropped
++;
933 u64_stats_update_end(&hw_data
->stats
.syncp
);
934 net_dm_hw_metadata_free(n_hw_metadata
);
939 static const struct net_dm_alert_ops net_dm_alert_packet_ops
= {
940 .kfree_skb_probe
= net_dm_packet_trace_kfree_skb_hit
,
941 .napi_poll_probe
= net_dm_packet_trace_napi_poll_hit
,
942 .work_item_func
= net_dm_packet_work
,
943 .hw_work_item_func
= net_dm_hw_packet_work
,
944 .hw_probe
= net_dm_hw_packet_probe
,
947 static const struct net_dm_alert_ops
*net_dm_alert_ops_arr
[] = {
948 [NET_DM_ALERT_MODE_SUMMARY
] = &net_dm_alert_summary_ops
,
949 [NET_DM_ALERT_MODE_PACKET
] = &net_dm_alert_packet_ops
,
952 void net_dm_hw_report(struct sk_buff
*skb
,
953 const struct net_dm_hw_metadata
*hw_metadata
)
960 net_dm_alert_ops_arr
[net_dm_alert_mode
]->hw_probe(skb
, hw_metadata
);
965 EXPORT_SYMBOL_GPL(net_dm_hw_report
);
967 static int net_dm_hw_monitor_start(struct netlink_ext_ack
*extack
)
969 const struct net_dm_alert_ops
*ops
;
973 NL_SET_ERR_MSG_MOD(extack
, "Hardware monitoring already enabled");
977 ops
= net_dm_alert_ops_arr
[net_dm_alert_mode
];
979 if (!try_module_get(THIS_MODULE
)) {
980 NL_SET_ERR_MSG_MOD(extack
, "Failed to take reference on module");
984 for_each_possible_cpu(cpu
) {
985 struct per_cpu_dm_data
*hw_data
= &per_cpu(dm_hw_cpu_data
, cpu
);
986 struct net_dm_hw_entries
*hw_entries
;
988 INIT_WORK(&hw_data
->dm_alert_work
, ops
->hw_work_item_func
);
989 timer_setup(&hw_data
->send_timer
, sched_send_work
, 0);
990 hw_entries
= net_dm_hw_reset_per_cpu_data(hw_data
);
999 static void net_dm_hw_monitor_stop(struct netlink_ext_ack
*extack
)
1004 NL_SET_ERR_MSG_MOD(extack
, "Hardware monitoring already disabled");
1008 /* After this call returns we are guaranteed that no CPU is processing
1009 * any hardware drops.
1013 for_each_possible_cpu(cpu
) {
1014 struct per_cpu_dm_data
*hw_data
= &per_cpu(dm_hw_cpu_data
, cpu
);
1015 struct sk_buff
*skb
;
1017 del_timer_sync(&hw_data
->send_timer
);
1018 cancel_work_sync(&hw_data
->dm_alert_work
);
1019 while ((skb
= __skb_dequeue(&hw_data
->drop_queue
))) {
1020 struct net_dm_hw_metadata
*hw_metadata
;
1022 hw_metadata
= NET_DM_SKB_CB(skb
)->hw_metadata
;
1023 net_dm_hw_metadata_free(hw_metadata
);
1028 module_put(THIS_MODULE
);
1031 static int net_dm_trace_on_set(struct netlink_ext_ack
*extack
)
1033 const struct net_dm_alert_ops
*ops
;
1036 ops
= net_dm_alert_ops_arr
[net_dm_alert_mode
];
1038 if (!try_module_get(THIS_MODULE
)) {
1039 NL_SET_ERR_MSG_MOD(extack
, "Failed to take reference on module");
1043 for_each_possible_cpu(cpu
) {
1044 struct per_cpu_dm_data
*data
= &per_cpu(dm_cpu_data
, cpu
);
1045 struct sk_buff
*skb
;
1047 INIT_WORK(&data
->dm_alert_work
, ops
->work_item_func
);
1048 timer_setup(&data
->send_timer
, sched_send_work
, 0);
1049 /* Allocate a new per-CPU skb for the summary alert message and
1050 * free the old one which might contain stale data from
1053 skb
= reset_per_cpu_data(data
);
1057 rc
= register_trace_kfree_skb(ops
->kfree_skb_probe
, NULL
);
1059 NL_SET_ERR_MSG_MOD(extack
, "Failed to connect probe to kfree_skb() tracepoint");
1060 goto err_module_put
;
1063 rc
= register_trace_napi_poll(ops
->napi_poll_probe
, NULL
);
1065 NL_SET_ERR_MSG_MOD(extack
, "Failed to connect probe to napi_poll() tracepoint");
1066 goto err_unregister_trace
;
1071 err_unregister_trace
:
1072 unregister_trace_kfree_skb(ops
->kfree_skb_probe
, NULL
);
1074 module_put(THIS_MODULE
);
1078 static void net_dm_trace_off_set(void)
1080 struct dm_hw_stat_delta
*new_stat
, *temp
;
1081 const struct net_dm_alert_ops
*ops
;
1084 ops
= net_dm_alert_ops_arr
[net_dm_alert_mode
];
1086 unregister_trace_napi_poll(ops
->napi_poll_probe
, NULL
);
1087 unregister_trace_kfree_skb(ops
->kfree_skb_probe
, NULL
);
1089 tracepoint_synchronize_unregister();
1091 /* Make sure we do not send notifications to user space after request
1092 * to stop tracing returns.
1094 for_each_possible_cpu(cpu
) {
1095 struct per_cpu_dm_data
*data
= &per_cpu(dm_cpu_data
, cpu
);
1096 struct sk_buff
*skb
;
1098 del_timer_sync(&data
->send_timer
);
1099 cancel_work_sync(&data
->dm_alert_work
);
1100 while ((skb
= __skb_dequeue(&data
->drop_queue
)))
1104 list_for_each_entry_safe(new_stat
, temp
, &hw_stats_list
, list
) {
1105 if (new_stat
->dev
== NULL
) {
1106 list_del_rcu(&new_stat
->list
);
1107 kfree_rcu(new_stat
, rcu
);
1111 module_put(THIS_MODULE
);
1114 static int set_all_monitor_traces(int state
, struct netlink_ext_ack
*extack
)
1118 if (state
== trace_state
) {
1119 NL_SET_ERR_MSG_MOD(extack
, "Trace state already set to requested state");
1125 rc
= net_dm_trace_on_set(extack
);
1128 net_dm_trace_off_set();
1136 trace_state
= state
;
1143 static bool net_dm_is_monitoring(void)
1145 return trace_state
== TRACE_ON
|| monitor_hw
;
1148 static int net_dm_alert_mode_get_from_info(struct genl_info
*info
,
1149 enum net_dm_alert_mode
*p_alert_mode
)
1153 val
= nla_get_u8(info
->attrs
[NET_DM_ATTR_ALERT_MODE
]);
1156 case NET_DM_ALERT_MODE_SUMMARY
: /* fall-through */
1157 case NET_DM_ALERT_MODE_PACKET
:
1158 *p_alert_mode
= val
;
1167 static int net_dm_alert_mode_set(struct genl_info
*info
)
1169 struct netlink_ext_ack
*extack
= info
->extack
;
1170 enum net_dm_alert_mode alert_mode
;
1173 if (!info
->attrs
[NET_DM_ATTR_ALERT_MODE
])
1176 rc
= net_dm_alert_mode_get_from_info(info
, &alert_mode
);
1178 NL_SET_ERR_MSG_MOD(extack
, "Invalid alert mode");
1182 net_dm_alert_mode
= alert_mode
;
1187 static void net_dm_trunc_len_set(struct genl_info
*info
)
1189 if (!info
->attrs
[NET_DM_ATTR_TRUNC_LEN
])
1192 net_dm_trunc_len
= nla_get_u32(info
->attrs
[NET_DM_ATTR_TRUNC_LEN
]);
1195 static void net_dm_queue_len_set(struct genl_info
*info
)
1197 if (!info
->attrs
[NET_DM_ATTR_QUEUE_LEN
])
1200 net_dm_queue_len
= nla_get_u32(info
->attrs
[NET_DM_ATTR_QUEUE_LEN
]);
1203 static int net_dm_cmd_config(struct sk_buff
*skb
,
1204 struct genl_info
*info
)
1206 struct netlink_ext_ack
*extack
= info
->extack
;
1209 if (net_dm_is_monitoring()) {
1210 NL_SET_ERR_MSG_MOD(extack
, "Cannot configure drop monitor during monitoring");
1214 rc
= net_dm_alert_mode_set(info
);
1218 net_dm_trunc_len_set(info
);
1220 net_dm_queue_len_set(info
);
1225 static int net_dm_monitor_start(bool set_sw
, bool set_hw
,
1226 struct netlink_ext_ack
*extack
)
1228 bool sw_set
= false;
1232 rc
= set_all_monitor_traces(TRACE_ON
, extack
);
1239 rc
= net_dm_hw_monitor_start(extack
);
1241 goto err_monitor_hw
;
1248 set_all_monitor_traces(TRACE_OFF
, extack
);
1252 static void net_dm_monitor_stop(bool set_sw
, bool set_hw
,
1253 struct netlink_ext_ack
*extack
)
1256 net_dm_hw_monitor_stop(extack
);
1258 set_all_monitor_traces(TRACE_OFF
, extack
);
1261 static int net_dm_cmd_trace(struct sk_buff
*skb
,
1262 struct genl_info
*info
)
1264 bool set_sw
= !!info
->attrs
[NET_DM_ATTR_SW_DROPS
];
1265 bool set_hw
= !!info
->attrs
[NET_DM_ATTR_HW_DROPS
];
1266 struct netlink_ext_ack
*extack
= info
->extack
;
1268 /* To maintain backward compatibility, we start / stop monitoring of
1269 * software drops if no flag is specified.
1271 if (!set_sw
&& !set_hw
)
1274 switch (info
->genlhdr
->cmd
) {
1275 case NET_DM_CMD_START
:
1276 return net_dm_monitor_start(set_sw
, set_hw
, extack
);
1277 case NET_DM_CMD_STOP
:
1278 net_dm_monitor_stop(set_sw
, set_hw
, extack
);
1285 static int net_dm_config_fill(struct sk_buff
*msg
, struct genl_info
*info
)
1289 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
1290 &net_drop_monitor_family
, 0, NET_DM_CMD_CONFIG_NEW
);
1294 if (nla_put_u8(msg
, NET_DM_ATTR_ALERT_MODE
, net_dm_alert_mode
))
1295 goto nla_put_failure
;
1297 if (nla_put_u32(msg
, NET_DM_ATTR_TRUNC_LEN
, net_dm_trunc_len
))
1298 goto nla_put_failure
;
1300 if (nla_put_u32(msg
, NET_DM_ATTR_QUEUE_LEN
, net_dm_queue_len
))
1301 goto nla_put_failure
;
1303 genlmsg_end(msg
, hdr
);
1308 genlmsg_cancel(msg
, hdr
);
1312 static int net_dm_cmd_config_get(struct sk_buff
*skb
, struct genl_info
*info
)
1314 struct sk_buff
*msg
;
1317 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1321 rc
= net_dm_config_fill(msg
, info
);
1325 return genlmsg_reply(msg
, info
);
1332 static void net_dm_stats_read(struct net_dm_stats
*stats
)
1336 memset(stats
, 0, sizeof(*stats
));
1337 for_each_possible_cpu(cpu
) {
1338 struct per_cpu_dm_data
*data
= &per_cpu(dm_cpu_data
, cpu
);
1339 struct net_dm_stats
*cpu_stats
= &data
->stats
;
1344 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
1345 dropped
= cpu_stats
->dropped
;
1346 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
1348 stats
->dropped
+= dropped
;
1352 static int net_dm_stats_put(struct sk_buff
*msg
)
1354 struct net_dm_stats stats
;
1355 struct nlattr
*attr
;
1357 net_dm_stats_read(&stats
);
1359 attr
= nla_nest_start(msg
, NET_DM_ATTR_STATS
);
1363 if (nla_put_u64_64bit(msg
, NET_DM_ATTR_STATS_DROPPED
,
1364 stats
.dropped
, NET_DM_ATTR_PAD
))
1365 goto nla_put_failure
;
1367 nla_nest_end(msg
, attr
);
1372 nla_nest_cancel(msg
, attr
);
1376 static void net_dm_hw_stats_read(struct net_dm_stats
*stats
)
1380 memset(stats
, 0, sizeof(*stats
));
1381 for_each_possible_cpu(cpu
) {
1382 struct per_cpu_dm_data
*hw_data
= &per_cpu(dm_hw_cpu_data
, cpu
);
1383 struct net_dm_stats
*cpu_stats
= &hw_data
->stats
;
1388 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
1389 dropped
= cpu_stats
->dropped
;
1390 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
1392 stats
->dropped
+= dropped
;
1396 static int net_dm_hw_stats_put(struct sk_buff
*msg
)
1398 struct net_dm_stats stats
;
1399 struct nlattr
*attr
;
1401 net_dm_hw_stats_read(&stats
);
1403 attr
= nla_nest_start(msg
, NET_DM_ATTR_HW_STATS
);
1407 if (nla_put_u64_64bit(msg
, NET_DM_ATTR_STATS_DROPPED
,
1408 stats
.dropped
, NET_DM_ATTR_PAD
))
1409 goto nla_put_failure
;
1411 nla_nest_end(msg
, attr
);
1416 nla_nest_cancel(msg
, attr
);
1420 static int net_dm_stats_fill(struct sk_buff
*msg
, struct genl_info
*info
)
1425 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
1426 &net_drop_monitor_family
, 0, NET_DM_CMD_STATS_NEW
);
1430 rc
= net_dm_stats_put(msg
);
1432 goto nla_put_failure
;
1434 rc
= net_dm_hw_stats_put(msg
);
1436 goto nla_put_failure
;
1438 genlmsg_end(msg
, hdr
);
1443 genlmsg_cancel(msg
, hdr
);
1447 static int net_dm_cmd_stats_get(struct sk_buff
*skb
, struct genl_info
*info
)
1449 struct sk_buff
*msg
;
1452 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1456 rc
= net_dm_stats_fill(msg
, info
);
1460 return genlmsg_reply(msg
, info
);
1467 static int dropmon_net_event(struct notifier_block
*ev_block
,
1468 unsigned long event
, void *ptr
)
1470 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1471 struct dm_hw_stat_delta
*new_stat
= NULL
;
1472 struct dm_hw_stat_delta
*tmp
;
1475 case NETDEV_REGISTER
:
1476 new_stat
= kzalloc(sizeof(struct dm_hw_stat_delta
), GFP_KERNEL
);
1481 new_stat
->dev
= dev
;
1482 new_stat
->last_rx
= jiffies
;
1483 mutex_lock(&net_dm_mutex
);
1484 list_add_rcu(&new_stat
->list
, &hw_stats_list
);
1485 mutex_unlock(&net_dm_mutex
);
1487 case NETDEV_UNREGISTER
:
1488 mutex_lock(&net_dm_mutex
);
1489 list_for_each_entry_safe(new_stat
, tmp
, &hw_stats_list
, list
) {
1490 if (new_stat
->dev
== dev
) {
1491 new_stat
->dev
= NULL
;
1492 if (trace_state
== TRACE_OFF
) {
1493 list_del_rcu(&new_stat
->list
);
1494 kfree_rcu(new_stat
, rcu
);
1499 mutex_unlock(&net_dm_mutex
);
1506 static const struct nla_policy net_dm_nl_policy
[NET_DM_ATTR_MAX
+ 1] = {
1507 [NET_DM_ATTR_UNSPEC
] = { .strict_start_type
= NET_DM_ATTR_UNSPEC
+ 1 },
1508 [NET_DM_ATTR_ALERT_MODE
] = { .type
= NLA_U8
},
1509 [NET_DM_ATTR_TRUNC_LEN
] = { .type
= NLA_U32
},
1510 [NET_DM_ATTR_QUEUE_LEN
] = { .type
= NLA_U32
},
1511 [NET_DM_ATTR_SW_DROPS
] = {. type
= NLA_FLAG
},
1512 [NET_DM_ATTR_HW_DROPS
] = {. type
= NLA_FLAG
},
1515 static const struct genl_ops dropmon_ops
[] = {
1517 .cmd
= NET_DM_CMD_CONFIG
,
1518 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1519 .doit
= net_dm_cmd_config
,
1520 .flags
= GENL_ADMIN_PERM
,
1523 .cmd
= NET_DM_CMD_START
,
1524 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1525 .doit
= net_dm_cmd_trace
,
1528 .cmd
= NET_DM_CMD_STOP
,
1529 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1530 .doit
= net_dm_cmd_trace
,
1533 .cmd
= NET_DM_CMD_CONFIG_GET
,
1534 .doit
= net_dm_cmd_config_get
,
1537 .cmd
= NET_DM_CMD_STATS_GET
,
1538 .doit
= net_dm_cmd_stats_get
,
1542 static int net_dm_nl_pre_doit(const struct genl_ops
*ops
,
1543 struct sk_buff
*skb
, struct genl_info
*info
)
1545 mutex_lock(&net_dm_mutex
);
1550 static void net_dm_nl_post_doit(const struct genl_ops
*ops
,
1551 struct sk_buff
*skb
, struct genl_info
*info
)
1553 mutex_unlock(&net_dm_mutex
);
1556 static struct genl_family net_drop_monitor_family __ro_after_init
= {
1560 .maxattr
= NET_DM_ATTR_MAX
,
1561 .policy
= net_dm_nl_policy
,
1562 .pre_doit
= net_dm_nl_pre_doit
,
1563 .post_doit
= net_dm_nl_post_doit
,
1564 .module
= THIS_MODULE
,
1566 .n_ops
= ARRAY_SIZE(dropmon_ops
),
1567 .mcgrps
= dropmon_mcgrps
,
1568 .n_mcgrps
= ARRAY_SIZE(dropmon_mcgrps
),
1571 static struct notifier_block dropmon_net_notifier
= {
1572 .notifier_call
= dropmon_net_event
1575 static void __net_dm_cpu_data_init(struct per_cpu_dm_data
*data
)
1577 spin_lock_init(&data
->lock
);
1578 skb_queue_head_init(&data
->drop_queue
);
1579 u64_stats_init(&data
->stats
.syncp
);
1582 static void __net_dm_cpu_data_fini(struct per_cpu_dm_data
*data
)
1584 WARN_ON(!skb_queue_empty(&data
->drop_queue
));
1587 static void net_dm_cpu_data_init(int cpu
)
1589 struct per_cpu_dm_data
*data
;
1591 data
= &per_cpu(dm_cpu_data
, cpu
);
1592 __net_dm_cpu_data_init(data
);
1595 static void net_dm_cpu_data_fini(int cpu
)
1597 struct per_cpu_dm_data
*data
;
1599 data
= &per_cpu(dm_cpu_data
, cpu
);
1600 /* At this point, we should have exclusive access
1601 * to this struct and can free the skb inside it.
1603 consume_skb(data
->skb
);
1604 __net_dm_cpu_data_fini(data
);
1607 static void net_dm_hw_cpu_data_init(int cpu
)
1609 struct per_cpu_dm_data
*hw_data
;
1611 hw_data
= &per_cpu(dm_hw_cpu_data
, cpu
);
1612 __net_dm_cpu_data_init(hw_data
);
1615 static void net_dm_hw_cpu_data_fini(int cpu
)
1617 struct per_cpu_dm_data
*hw_data
;
1619 hw_data
= &per_cpu(dm_hw_cpu_data
, cpu
);
1620 kfree(hw_data
->hw_entries
);
1621 __net_dm_cpu_data_fini(hw_data
);
1624 static int __init
init_net_drop_monitor(void)
1628 pr_info("Initializing network drop monitor service\n");
1630 if (sizeof(void *) > 8) {
1631 pr_err("Unable to store program counters on this arch, Drop monitor failed\n");
1635 rc
= genl_register_family(&net_drop_monitor_family
);
1637 pr_err("Could not create drop monitor netlink family\n");
1640 WARN_ON(net_drop_monitor_family
.mcgrp_offset
!= NET_DM_GRP_ALERT
);
1642 rc
= register_netdevice_notifier(&dropmon_net_notifier
);
1644 pr_crit("Failed to register netdevice notifier\n");
1650 for_each_possible_cpu(cpu
) {
1651 net_dm_cpu_data_init(cpu
);
1652 net_dm_hw_cpu_data_init(cpu
);
1658 genl_unregister_family(&net_drop_monitor_family
);
1663 static void exit_net_drop_monitor(void)
1667 BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier
));
1670 * Because of the module_get/put we do in the trace state change path
1671 * we are guarnateed not to have any current users when we get here
1674 for_each_possible_cpu(cpu
) {
1675 net_dm_hw_cpu_data_fini(cpu
);
1676 net_dm_cpu_data_fini(cpu
);
1679 BUG_ON(genl_unregister_family(&net_drop_monitor_family
));
1682 module_init(init_net_drop_monitor
);
1683 module_exit(exit_net_drop_monitor
);
1685 MODULE_LICENSE("GPL v2");
1686 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1687 MODULE_ALIAS_GENL_FAMILY("NET_DM");