1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - 2019 Cambridge Greys Limited
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
7 * James Leu (jleu@mindspring.net).
8 * Copyright (C) 2001 by various other people who didn't put their name here.
11 #include <linux/version.h>
12 #include <linux/memblock.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/inetdevice.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/netdevice.h>
19 #include <linux/platform_device.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/skbuff.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/firmware.h>
26 #include <uapi/linux/filter.h>
32 #include "mconsole_kern.h"
33 #include "vector_user.h"
34 #include "vector_kern.h"
37 * Adapted from network devices with the following major changes:
38 * All transports are static - simplifies the code significantly
39 * Multiple FDs/IRQs per device
40 * Vector IO optionally used for read/write, falling back to legacy
41 * based on configuration and/or availability
42 * Configuration is no longer positional - L2TPv3 and GRE require up to
43 * 10 parameters, passing this as positional is not fit for purpose.
44 * Only socket transports are supported
48 #define DRIVER_NAME "uml-vector"
49 #define DRIVER_VERSION "01"
50 struct vector_cmd_line_arg
{
51 struct list_head list
;
56 struct vector_device
{
57 struct list_head list
;
58 struct net_device
*dev
;
59 struct platform_device pdev
;
64 static LIST_HEAD(vec_cmd_line
);
66 static DEFINE_SPINLOCK(vector_devices_lock
);
67 static LIST_HEAD(vector_devices
);
69 static int driver_registered
;
71 static void vector_eth_configure(int n
, struct arglist
*def
);
73 /* Argument accessors to set variables (and/or set default values)
74 * mtu, buffer sizing, default headroom, etc
77 #define DEFAULT_HEADROOM 2
78 #define SAFETY_MARGIN 32
79 #define DEFAULT_VECTOR_SIZE 64
80 #define TX_SMALL_PACKET 128
81 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
82 #define MAX_ITERATIONS 64
85 const char string
[ETH_GSTRING_LEN
];
86 } ethtool_stats_keys
[] = {
88 { "rx_queue_running_average" },
90 { "tx_queue_running_average" },
91 { "rx_encaps_errors" },
92 { "tx_timeout_count" },
93 { "tx_restart_queue" },
95 { "tx_flow_control_xon" },
96 { "tx_flow_control_xoff" },
97 { "rx_csum_offload_good" },
98 { "rx_csum_offload_errors"},
103 #define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
105 static void vector_reset_stats(struct vector_private
*vp
)
107 vp
->estats
.rx_queue_max
= 0;
108 vp
->estats
.rx_queue_running_average
= 0;
109 vp
->estats
.tx_queue_max
= 0;
110 vp
->estats
.tx_queue_running_average
= 0;
111 vp
->estats
.rx_encaps_errors
= 0;
112 vp
->estats
.tx_timeout_count
= 0;
113 vp
->estats
.tx_restart_queue
= 0;
114 vp
->estats
.tx_kicks
= 0;
115 vp
->estats
.tx_flow_control_xon
= 0;
116 vp
->estats
.tx_flow_control_xoff
= 0;
117 vp
->estats
.sg_ok
= 0;
118 vp
->estats
.sg_linearized
= 0;
121 static int get_mtu(struct arglist
*def
)
123 char *mtu
= uml_vector_fetch_arg(def
, "mtu");
127 if (kstrtoul(mtu
, 10, &result
) == 0)
128 if ((result
< (1 << 16) - 1) && (result
>= 576))
131 return ETH_MAX_PACKET
;
134 static char *get_bpf_file(struct arglist
*def
)
136 return uml_vector_fetch_arg(def
, "bpffile");
139 static bool get_bpf_flash(struct arglist
*def
)
141 char *allow
= uml_vector_fetch_arg(def
, "bpfflash");
145 if (kstrtoul(allow
, 10, &result
) == 0)
151 static int get_depth(struct arglist
*def
)
153 char *mtu
= uml_vector_fetch_arg(def
, "depth");
157 if (kstrtoul(mtu
, 10, &result
) == 0)
160 return DEFAULT_VECTOR_SIZE
;
163 static int get_headroom(struct arglist
*def
)
165 char *mtu
= uml_vector_fetch_arg(def
, "headroom");
169 if (kstrtoul(mtu
, 10, &result
) == 0)
172 return DEFAULT_HEADROOM
;
175 static int get_req_size(struct arglist
*def
)
177 char *gro
= uml_vector_fetch_arg(def
, "gro");
181 if (kstrtoul(gro
, 10, &result
) == 0) {
186 return get_mtu(def
) + ETH_HEADER_OTHER
+
187 get_headroom(def
) + SAFETY_MARGIN
;
191 static int get_transport_options(struct arglist
*def
)
193 char *transport
= uml_vector_fetch_arg(def
, "transport");
194 char *vector
= uml_vector_fetch_arg(def
, "vec");
196 int vec_rx
= VECTOR_RX
;
197 int vec_tx
= VECTOR_TX
;
201 if (vector
!= NULL
) {
202 if (kstrtoul(vector
, 10, &parsed
) == 0) {
210 if (get_bpf_flash(def
))
211 result
= VECTOR_BPF_FLASH
;
213 if (strncmp(transport
, TRANS_TAP
, TRANS_TAP_LEN
) == 0)
215 if (strncmp(transport
, TRANS_HYBRID
, TRANS_HYBRID_LEN
) == 0)
216 return (result
| vec_rx
| VECTOR_BPF
);
217 if (strncmp(transport
, TRANS_RAW
, TRANS_RAW_LEN
) == 0)
218 return (result
| vec_rx
| vec_tx
| VECTOR_QDISC_BYPASS
);
219 return (result
| vec_rx
| vec_tx
);
223 /* A mini-buffer for packet drop read
224 * All of our supported transports are datagram oriented and we always
225 * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
226 * than the packet size it still counts as full packet read and will
227 * clean the incoming stream to keep sigio/epoll happy
230 #define DROP_BUFFER_SIZE 32
232 static char *drop_buffer
;
234 /* Array backed queues optimized for bulk enqueue/dequeue and
235 * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
236 * For more details and full design rationale see
237 * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
242 * Advance the mmsg queue head by n = advance. Resets the queue to
243 * maximum enqueue/dequeue-at-once capacity if possible. Called by
244 * dequeuers. Caller must hold the head_lock!
247 static int vector_advancehead(struct vector_queue
*qi
, int advance
)
256 spin_lock(&qi
->tail_lock
);
257 qi
->queue_depth
-= advance
;
259 /* we are at 0, use this to
260 * reset head and tail so we can use max size vectors
263 if (qi
->queue_depth
== 0) {
267 queue_depth
= qi
->queue_depth
;
268 spin_unlock(&qi
->tail_lock
);
272 /* Advance the queue tail by n = advance.
273 * This is called by enqueuers which should hold the
277 static int vector_advancetail(struct vector_queue
*qi
, int advance
)
284 spin_lock(&qi
->head_lock
);
285 qi
->queue_depth
+= advance
;
286 queue_depth
= qi
->queue_depth
;
287 spin_unlock(&qi
->head_lock
);
291 static int prep_msg(struct vector_private
*vp
,
297 skb_frag_t
*skb_frag
;
299 nr_frags
= skb_shinfo(skb
)->nr_frags
;
300 if (nr_frags
> MAX_IOV_SIZE
) {
301 if (skb_linearize(skb
) != 0)
304 if (vp
->header_size
> 0) {
305 iov
[iov_index
].iov_len
= vp
->header_size
;
306 vp
->form_header(iov
[iov_index
].iov_base
, skb
, vp
);
309 iov
[iov_index
].iov_base
= skb
->data
;
311 iov
[iov_index
].iov_len
= skb
->len
- skb
->data_len
;
314 iov
[iov_index
].iov_len
= skb
->len
;
316 for (frag
= 0; frag
< nr_frags
; frag
++) {
317 skb_frag
= &skb_shinfo(skb
)->frags
[frag
];
318 iov
[iov_index
].iov_base
= skb_frag_address_safe(skb_frag
);
319 iov
[iov_index
].iov_len
= skb_frag_size(skb_frag
);
327 * Generic vector enqueue with support for forming headers using transport
328 * specific callback. Allows GRE, L2TPv3, RAW and other transports
329 * to use a common enqueue procedure in vector mode
332 static int vector_enqueue(struct vector_queue
*qi
, struct sk_buff
*skb
)
334 struct vector_private
*vp
= netdev_priv(qi
->dev
);
337 struct mmsghdr
*mmsg_vector
= qi
->mmsg_vector
;
340 spin_lock(&qi
->tail_lock
);
341 spin_lock(&qi
->head_lock
);
342 queue_depth
= qi
->queue_depth
;
343 spin_unlock(&qi
->head_lock
);
346 packet_len
= skb
->len
;
348 if (queue_depth
< qi
->max_depth
) {
350 *(qi
->skbuff_vector
+ qi
->tail
) = skb
;
351 mmsg_vector
+= qi
->tail
;
352 iov_count
= prep_msg(
355 mmsg_vector
->msg_hdr
.msg_iov
359 mmsg_vector
->msg_hdr
.msg_iovlen
= iov_count
;
360 mmsg_vector
->msg_hdr
.msg_name
= vp
->fds
->remote_addr
;
361 mmsg_vector
->msg_hdr
.msg_namelen
= vp
->fds
->remote_addr_size
;
362 queue_depth
= vector_advancetail(qi
, 1);
365 spin_unlock(&qi
->tail_lock
);
368 qi
->dev
->stats
.tx_dropped
++;
370 packet_len
= skb
->len
;
371 dev_consume_skb_any(skb
);
372 netdev_completed_queue(qi
->dev
, 1, packet_len
);
374 spin_unlock(&qi
->tail_lock
);
378 static int consume_vector_skbs(struct vector_queue
*qi
, int count
)
384 for (skb_index
= qi
->head
; skb_index
< qi
->head
+ count
; skb_index
++) {
385 skb
= *(qi
->skbuff_vector
+ skb_index
);
386 /* mark as empty to ensure correct destruction if
389 bytes_compl
+= skb
->len
;
390 *(qi
->skbuff_vector
+ skb_index
) = NULL
;
391 dev_consume_skb_any(skb
);
393 qi
->dev
->stats
.tx_bytes
+= bytes_compl
;
394 qi
->dev
->stats
.tx_packets
+= count
;
395 netdev_completed_queue(qi
->dev
, count
, bytes_compl
);
396 return vector_advancehead(qi
, count
);
400 * Generic vector deque via sendmmsg with support for forming headers
401 * using transport specific callback. Allows GRE, L2TPv3, RAW and
402 * other transports to use a common dequeue procedure in vector mode
406 static int vector_send(struct vector_queue
*qi
)
408 struct vector_private
*vp
= netdev_priv(qi
->dev
);
409 struct mmsghdr
*send_from
;
410 int result
= 0, send_len
, queue_depth
= qi
->max_depth
;
412 if (spin_trylock(&qi
->head_lock
)) {
413 if (spin_trylock(&qi
->tail_lock
)) {
414 /* update queue_depth to current value */
415 queue_depth
= qi
->queue_depth
;
416 spin_unlock(&qi
->tail_lock
);
417 while (queue_depth
> 0) {
418 /* Calculate the start of the vector */
419 send_len
= queue_depth
;
420 send_from
= qi
->mmsg_vector
;
421 send_from
+= qi
->head
;
422 /* Adjust vector size if wraparound */
423 if (send_len
+ qi
->head
> qi
->max_depth
)
424 send_len
= qi
->max_depth
- qi
->head
;
425 /* Try to TX as many packets as possible */
427 result
= uml_vector_sendmmsg(
434 (result
!= send_len
);
436 /* For some of the sendmmsg error scenarios
437 * we may end being unsure in the TX success
438 * for all packets. It is safer to declare
439 * them all TX-ed and blame the network.
443 netdev_err(vp
->dev
, "sendmmsg err=%i\n",
450 consume_vector_skbs(qi
, result
);
451 /* This is equivalent to an TX IRQ.
452 * Restart the upper layers to feed us
455 if (result
> vp
->estats
.tx_queue_max
)
456 vp
->estats
.tx_queue_max
= result
;
457 vp
->estats
.tx_queue_running_average
=
458 (vp
->estats
.tx_queue_running_average
+ result
) >> 1;
460 netif_trans_update(qi
->dev
);
461 netif_wake_queue(qi
->dev
);
462 /* if TX is busy, break out of the send loop,
463 * poll write IRQ will reschedule xmit for us
465 if (result
!= send_len
) {
466 vp
->estats
.tx_restart_queue
++;
471 spin_unlock(&qi
->head_lock
);
473 tasklet_schedule(&vp
->tx_poll
);
478 /* Queue destructor. Deliberately stateless so we can use
479 * it in queue cleanup if initialization fails.
482 static void destroy_queue(struct vector_queue
*qi
)
486 struct vector_private
*vp
= netdev_priv(qi
->dev
);
487 struct mmsghdr
*mmsg_vector
;
491 /* deallocate any skbuffs - we rely on any unused to be
494 if (qi
->skbuff_vector
!= NULL
) {
495 for (i
= 0; i
< qi
->max_depth
; i
++) {
496 if (*(qi
->skbuff_vector
+ i
) != NULL
)
497 dev_kfree_skb_any(*(qi
->skbuff_vector
+ i
));
499 kfree(qi
->skbuff_vector
);
501 /* deallocate matching IOV structures including header buffs */
502 if (qi
->mmsg_vector
!= NULL
) {
503 mmsg_vector
= qi
->mmsg_vector
;
504 for (i
= 0; i
< qi
->max_depth
; i
++) {
505 iov
= mmsg_vector
->msg_hdr
.msg_iov
;
507 if ((vp
->header_size
> 0) &&
508 (iov
->iov_base
!= NULL
))
509 kfree(iov
->iov_base
);
514 kfree(qi
->mmsg_vector
);
520 * Queue constructor. Create a queue with a given side.
522 static struct vector_queue
*create_queue(
523 struct vector_private
*vp
,
528 struct vector_queue
*result
;
531 struct mmsghdr
*mmsg_vector
;
533 result
= kmalloc(sizeof(struct vector_queue
), GFP_KERNEL
);
536 result
->max_depth
= max_size
;
537 result
->dev
= vp
->dev
;
538 result
->mmsg_vector
= kmalloc(
539 (sizeof(struct mmsghdr
) * max_size
), GFP_KERNEL
);
540 if (result
->mmsg_vector
== NULL
)
542 result
->skbuff_vector
= kmalloc(
543 (sizeof(void *) * max_size
), GFP_KERNEL
);
544 if (result
->skbuff_vector
== NULL
)
547 /* further failures can be handled safely by destroy_queue*/
549 mmsg_vector
= result
->mmsg_vector
;
550 for (i
= 0; i
< max_size
; i
++) {
551 /* Clear all pointers - we use non-NULL as marking on
552 * what to free on destruction
554 *(result
->skbuff_vector
+ i
) = NULL
;
555 mmsg_vector
->msg_hdr
.msg_iov
= NULL
;
558 mmsg_vector
= result
->mmsg_vector
;
559 result
->max_iov_frags
= num_extra_frags
;
560 for (i
= 0; i
< max_size
; i
++) {
561 if (vp
->header_size
> 0)
562 iov
= kmalloc_array(3 + num_extra_frags
,
563 sizeof(struct iovec
),
567 iov
= kmalloc_array(2 + num_extra_frags
,
568 sizeof(struct iovec
),
573 mmsg_vector
->msg_hdr
.msg_iov
= iov
;
574 mmsg_vector
->msg_hdr
.msg_iovlen
= 1;
575 mmsg_vector
->msg_hdr
.msg_control
= NULL
;
576 mmsg_vector
->msg_hdr
.msg_controllen
= 0;
577 mmsg_vector
->msg_hdr
.msg_flags
= MSG_DONTWAIT
;
578 mmsg_vector
->msg_hdr
.msg_name
= NULL
;
579 mmsg_vector
->msg_hdr
.msg_namelen
= 0;
580 if (vp
->header_size
> 0) {
581 iov
->iov_base
= kmalloc(header_size
, GFP_KERNEL
);
582 if (iov
->iov_base
== NULL
)
584 iov
->iov_len
= header_size
;
585 mmsg_vector
->msg_hdr
.msg_iovlen
= 2;
588 iov
->iov_base
= NULL
;
592 spin_lock_init(&result
->head_lock
);
593 spin_lock_init(&result
->tail_lock
);
594 result
->queue_depth
= 0;
599 kfree(result
->mmsg_vector
);
604 destroy_queue(result
);
609 * We do not use the RX queue as a proper wraparound queue for now
610 * This is not necessary because the consumption via netif_rx()
611 * happens in-line. While we can try using the return code of
612 * netif_rx() for flow control there are no drivers doing this today.
613 * For this RX specific use we ignore the tail/head locks and
614 * just read into a prepared queue filled with skbuffs.
617 static struct sk_buff
*prep_skb(
618 struct vector_private
*vp
,
619 struct user_msghdr
*msg
)
621 int linear
= vp
->max_packet
+ vp
->headroom
+ SAFETY_MARGIN
;
622 struct sk_buff
*result
;
623 int iov_index
= 0, len
;
624 struct iovec
*iov
= msg
->msg_iov
;
625 int err
, nr_frags
, frag
;
626 skb_frag_t
*skb_frag
;
628 if (vp
->req_size
<= linear
)
632 result
= alloc_skb_with_frags(
634 len
- vp
->max_packet
,
639 if (vp
->header_size
> 0)
641 if (result
== NULL
) {
642 iov
[iov_index
].iov_base
= NULL
;
643 iov
[iov_index
].iov_len
= 0;
646 skb_reserve(result
, vp
->headroom
);
647 result
->dev
= vp
->dev
;
648 skb_put(result
, vp
->max_packet
);
649 result
->data_len
= len
- vp
->max_packet
;
650 result
->len
+= len
- vp
->max_packet
;
651 skb_reset_mac_header(result
);
652 result
->ip_summed
= CHECKSUM_NONE
;
653 iov
[iov_index
].iov_base
= result
->data
;
654 iov
[iov_index
].iov_len
= vp
->max_packet
;
657 nr_frags
= skb_shinfo(result
)->nr_frags
;
658 for (frag
= 0; frag
< nr_frags
; frag
++) {
659 skb_frag
= &skb_shinfo(result
)->frags
[frag
];
660 iov
[iov_index
].iov_base
= skb_frag_address_safe(skb_frag
);
661 if (iov
[iov_index
].iov_base
!= NULL
)
662 iov
[iov_index
].iov_len
= skb_frag_size(skb_frag
);
664 iov
[iov_index
].iov_len
= 0;
668 msg
->msg_iovlen
= iov_index
;
673 /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
675 static void prep_queue_for_rx(struct vector_queue
*qi
)
677 struct vector_private
*vp
= netdev_priv(qi
->dev
);
678 struct mmsghdr
*mmsg_vector
= qi
->mmsg_vector
;
679 void **skbuff_vector
= qi
->skbuff_vector
;
682 if (qi
->queue_depth
== 0)
684 for (i
= 0; i
< qi
->queue_depth
; i
++) {
685 /* it is OK if allocation fails - recvmmsg with NULL data in
686 * iov argument still performs an RX, just drops the packet
687 * This allows us stop faffing around with a "drop buffer"
690 *skbuff_vector
= prep_skb(vp
, &mmsg_vector
->msg_hdr
);
697 static struct vector_device
*find_device(int n
)
699 struct vector_device
*device
;
700 struct list_head
*ele
;
702 spin_lock(&vector_devices_lock
);
703 list_for_each(ele
, &vector_devices
) {
704 device
= list_entry(ele
, struct vector_device
, list
);
705 if (device
->unit
== n
)
710 spin_unlock(&vector_devices_lock
);
714 static int vector_parse(char *str
, int *index_out
, char **str_out
,
722 while ((*str
!= ':') && (strlen(str
) > 1))
725 *error_out
= "Expected ':' after device number";
730 err
= kstrtouint(start
, 0, &n
);
732 *error_out
= "Bad device number";
737 if (find_device(n
)) {
738 *error_out
= "Device already configured";
747 static int vector_config(char *str
, char **error_out
)
751 struct arglist
*parsed
;
753 err
= vector_parse(str
, &n
, ¶ms
, error_out
);
757 /* This string is broken up and the pieces used by the underlying
758 * driver. We should copy it to make sure things do not go wrong
762 params
= kstrdup(params
, GFP_KERNEL
);
763 if (params
== NULL
) {
764 *error_out
= "vector_config failed to strdup string";
768 parsed
= uml_parse_vector_ifspec(params
);
770 if (parsed
== NULL
) {
771 *error_out
= "vector_config failed to parse parameters";
775 vector_eth_configure(n
, parsed
);
779 static int vector_id(char **str
, int *start_out
, int *end_out
)
784 n
= simple_strtoul(*str
, &end
, 0);
785 if ((*end
!= '\0') || (end
== *str
))
794 static int vector_remove(int n
, char **error_out
)
796 struct vector_device
*vec_d
;
797 struct net_device
*dev
;
798 struct vector_private
*vp
;
800 vec_d
= find_device(n
);
804 vp
= netdev_priv(dev
);
807 unregister_netdev(dev
);
808 platform_device_unregister(&vec_d
->pdev
);
813 * There is no shared per-transport initialization code, so
814 * we will just initialize each interface one by one and
818 static struct platform_driver uml_net_driver
= {
825 static void vector_device_release(struct device
*dev
)
827 struct vector_device
*device
= dev_get_drvdata(dev
);
828 struct net_device
*netdev
= device
->dev
;
830 list_del(&device
->list
);
835 /* Bog standard recv using recvmsg - not used normally unless the user
836 * explicitly specifies not to use recvmmsg vector RX.
839 static int vector_legacy_rx(struct vector_private
*vp
)
842 struct user_msghdr hdr
;
843 struct iovec iov
[2 + MAX_IOV_SIZE
]; /* header + data use case only */
850 hdr
.msg_iov
= (struct iovec
*) &iov
;
851 hdr
.msg_control
= NULL
;
852 hdr
.msg_controllen
= 0;
855 if (vp
->header_size
> 0) {
856 iov
[0].iov_base
= vp
->header_rxbuffer
;
857 iov
[0].iov_len
= vp
->header_size
;
860 skb
= prep_skb(vp
, &hdr
);
863 /* Read a packet into drop_buffer and don't do
866 iov
[iovpos
].iov_base
= drop_buffer
;
867 iov
[iovpos
].iov_len
= DROP_BUFFER_SIZE
;
869 vp
->dev
->stats
.rx_dropped
++;
872 pkt_len
= uml_vector_recvmsg(vp
->fds
->rx_fd
, &hdr
, 0);
879 if (pkt_len
> vp
->header_size
) {
880 if (vp
->header_size
> 0) {
881 header_check
= vp
->verify_header(
882 vp
->header_rxbuffer
, skb
, vp
);
883 if (header_check
< 0) {
884 dev_kfree_skb_irq(skb
);
885 vp
->dev
->stats
.rx_dropped
++;
886 vp
->estats
.rx_encaps_errors
++;
889 if (header_check
> 0) {
890 vp
->estats
.rx_csum_offload_good
++;
891 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
894 pskb_trim(skb
, pkt_len
- vp
->rx_header_size
);
895 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
896 vp
->dev
->stats
.rx_bytes
+= skb
->len
;
897 vp
->dev
->stats
.rx_packets
++;
900 dev_kfree_skb_irq(skb
);
907 * Packet at a time TX which falls back to vector TX if the
908 * underlying transport is busy.
913 static int writev_tx(struct vector_private
*vp
, struct sk_buff
*skb
)
915 struct iovec iov
[3 + MAX_IOV_SIZE
];
916 int iov_count
, pkt_len
= 0;
918 iov
[0].iov_base
= vp
->header_txbuffer
;
919 iov_count
= prep_msg(vp
, skb
, (struct iovec
*) &iov
);
924 pkt_len
= uml_vector_writev(
926 (struct iovec
*) &iov
,
933 netif_trans_update(vp
->dev
);
934 netif_wake_queue(vp
->dev
);
937 vp
->dev
->stats
.tx_bytes
+= skb
->len
;
938 vp
->dev
->stats
.tx_packets
++;
940 vp
->dev
->stats
.tx_dropped
++;
945 vp
->dev
->stats
.tx_dropped
++;
953 * Receive as many messages as we can in one call using the special
954 * mmsg vector matched to an skb vector which we prepared earlier.
957 static int vector_mmsg_rx(struct vector_private
*vp
)
960 struct vector_queue
*qi
= vp
->rx_queue
;
962 struct mmsghdr
*mmsg_vector
= qi
->mmsg_vector
;
963 void **skbuff_vector
= qi
->skbuff_vector
;
966 /* Refresh the vector and make sure it is with new skbs and the
967 * iovs are updated to point to them.
970 prep_queue_for_rx(qi
);
972 /* Fire the Lazy Gun - get as many packets as we can in one go. */
974 packet_count
= uml_vector_recvmmsg(
975 vp
->fds
->rx_fd
, qi
->mmsg_vector
, qi
->max_depth
, 0);
977 if (packet_count
< 0)
980 if (packet_count
<= 0)
983 /* We treat packet processing as enqueue, buffer refresh as dequeue
984 * The queue_depth tells us how many buffers have been used and how
985 * many do we need to prep the next time prep_queue_for_rx() is called.
988 qi
->queue_depth
= packet_count
;
990 for (i
= 0; i
< packet_count
; i
++) {
991 skb
= (*skbuff_vector
);
992 if (mmsg_vector
->msg_len
> vp
->header_size
) {
993 if (vp
->header_size
> 0) {
994 header_check
= vp
->verify_header(
995 mmsg_vector
->msg_hdr
.msg_iov
->iov_base
,
999 if (header_check
< 0) {
1000 /* Overlay header failed to verify - discard.
1001 * We can actually keep this skb and reuse it,
1002 * but that will make the prep logic too
1005 dev_kfree_skb_irq(skb
);
1006 vp
->estats
.rx_encaps_errors
++;
1009 if (header_check
> 0) {
1010 vp
->estats
.rx_csum_offload_good
++;
1011 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1015 mmsg_vector
->msg_len
- vp
->rx_header_size
);
1016 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1018 * We do not need to lock on updating stats here
1019 * The interrupt loop is non-reentrant.
1021 vp
->dev
->stats
.rx_bytes
+= skb
->len
;
1022 vp
->dev
->stats
.rx_packets
++;
1025 /* Overlay header too short to do anything - discard.
1026 * We can actually keep this skb and reuse it,
1027 * but that will make the prep logic too complex.
1030 dev_kfree_skb_irq(skb
);
1032 (*skbuff_vector
) = NULL
;
1033 /* Move to the next buffer element */
1037 if (packet_count
> 0) {
1038 if (vp
->estats
.rx_queue_max
< packet_count
)
1039 vp
->estats
.rx_queue_max
= packet_count
;
1040 vp
->estats
.rx_queue_running_average
=
1041 (vp
->estats
.rx_queue_running_average
+ packet_count
) >> 1;
1043 return packet_count
;
1046 static void vector_rx(struct vector_private
*vp
)
1051 if ((vp
->options
& VECTOR_RX
) > 0)
1052 while (((err
= vector_mmsg_rx(vp
)) > 0) && (iter
< MAX_ITERATIONS
))
1055 while (((err
= vector_legacy_rx(vp
)) > 0) && (iter
< MAX_ITERATIONS
))
1057 if ((err
!= 0) && net_ratelimit())
1058 netdev_err(vp
->dev
, "vector_rx: error(%d)\n", err
);
1059 if (iter
== MAX_ITERATIONS
)
1060 netdev_err(vp
->dev
, "vector_rx: device stuck, remote end may have closed the connection\n");
1063 static int vector_net_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1065 struct vector_private
*vp
= netdev_priv(dev
);
1066 int queue_depth
= 0;
1069 deactivate_fd(vp
->fds
->rx_fd
, vp
->rx_irq
);
1070 if ((vp
->fds
->rx_fd
!= vp
->fds
->tx_fd
) && (vp
->tx_irq
!= 0))
1071 deactivate_fd(vp
->fds
->tx_fd
, vp
->tx_irq
);
1072 return NETDEV_TX_BUSY
;
1075 if ((vp
->options
& VECTOR_TX
) == 0) {
1077 return NETDEV_TX_OK
;
1080 /* We do BQL only in the vector path, no point doing it in
1081 * packet at a time mode as there is no device queue
1084 netdev_sent_queue(vp
->dev
, skb
->len
);
1085 queue_depth
= vector_enqueue(vp
->tx_queue
, skb
);
1087 /* if the device queue is full, stop the upper layers and
1091 if (queue_depth
>= vp
->tx_queue
->max_depth
- 1) {
1092 vp
->estats
.tx_kicks
++;
1093 netif_stop_queue(dev
);
1094 vector_send(vp
->tx_queue
);
1095 return NETDEV_TX_OK
;
1097 if (netdev_xmit_more()) {
1098 mod_timer(&vp
->tl
, vp
->coalesce
);
1099 return NETDEV_TX_OK
;
1101 if (skb
->len
< TX_SMALL_PACKET
) {
1102 vp
->estats
.tx_kicks
++;
1103 vector_send(vp
->tx_queue
);
1105 tasklet_schedule(&vp
->tx_poll
);
1106 return NETDEV_TX_OK
;
1109 static irqreturn_t
vector_rx_interrupt(int irq
, void *dev_id
)
1111 struct net_device
*dev
= dev_id
;
1112 struct vector_private
*vp
= netdev_priv(dev
);
1114 if (!netif_running(dev
))
1121 static irqreturn_t
vector_tx_interrupt(int irq
, void *dev_id
)
1123 struct net_device
*dev
= dev_id
;
1124 struct vector_private
*vp
= netdev_priv(dev
);
1126 if (!netif_running(dev
))
1128 /* We need to pay attention to it only if we got
1129 * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
1130 * we ignore it. In the future, it may be worth
1131 * it to improve the IRQ controller a bit to make
1132 * tweaking the IRQ mask less costly
1135 if (vp
->in_write_poll
)
1136 tasklet_schedule(&vp
->tx_poll
);
1143 static int vector_net_close(struct net_device
*dev
)
1145 struct vector_private
*vp
= netdev_priv(dev
);
1146 unsigned long flags
;
1148 netif_stop_queue(dev
);
1151 if (vp
->fds
== NULL
)
1154 /* Disable and free all IRQS */
1155 if (vp
->rx_irq
> 0) {
1156 um_free_irq(vp
->rx_irq
, dev
);
1159 if (vp
->tx_irq
> 0) {
1160 um_free_irq(vp
->tx_irq
, dev
);
1163 tasklet_kill(&vp
->tx_poll
);
1164 if (vp
->fds
->rx_fd
> 0) {
1166 uml_vector_detach_bpf(vp
->fds
->rx_fd
, vp
->bpf
);
1167 os_close_file(vp
->fds
->rx_fd
);
1168 vp
->fds
->rx_fd
= -1;
1170 if (vp
->fds
->tx_fd
> 0) {
1171 os_close_file(vp
->fds
->tx_fd
);
1172 vp
->fds
->tx_fd
= -1;
1174 if (vp
->bpf
!= NULL
)
1175 kfree(vp
->bpf
->filter
);
1178 kfree(vp
->fds
->remote_addr
);
1179 kfree(vp
->transport_data
);
1180 kfree(vp
->header_rxbuffer
);
1181 kfree(vp
->header_txbuffer
);
1182 if (vp
->rx_queue
!= NULL
)
1183 destroy_queue(vp
->rx_queue
);
1184 if (vp
->tx_queue
!= NULL
)
1185 destroy_queue(vp
->tx_queue
);
1188 spin_lock_irqsave(&vp
->lock
, flags
);
1190 vp
->in_error
= false;
1191 spin_unlock_irqrestore(&vp
->lock
, flags
);
1197 static void vector_tx_poll(unsigned long data
)
1199 struct vector_private
*vp
= (struct vector_private
*)data
;
1201 vp
->estats
.tx_kicks
++;
1202 vector_send(vp
->tx_queue
);
1204 static void vector_reset_tx(struct work_struct
*work
)
1206 struct vector_private
*vp
=
1207 container_of(work
, struct vector_private
, reset_tx
);
1208 netdev_reset_queue(vp
->dev
);
1209 netif_start_queue(vp
->dev
);
1210 netif_wake_queue(vp
->dev
);
1213 static int vector_net_open(struct net_device
*dev
)
1215 struct vector_private
*vp
= netdev_priv(dev
);
1216 unsigned long flags
;
1218 struct vector_device
*vdevice
;
1220 spin_lock_irqsave(&vp
->lock
, flags
);
1222 spin_unlock_irqrestore(&vp
->lock
, flags
);
1226 spin_unlock_irqrestore(&vp
->lock
, flags
);
1228 vp
->bpf
= uml_vector_user_bpf(get_bpf_file(vp
->parsed
));
1230 vp
->fds
= uml_vector_user_open(vp
->unit
, vp
->parsed
);
1232 if (vp
->fds
== NULL
)
1235 if (build_transport_data(vp
) < 0)
1238 if ((vp
->options
& VECTOR_RX
) > 0) {
1239 vp
->rx_queue
= create_queue(
1241 get_depth(vp
->parsed
),
1245 vp
->rx_queue
->queue_depth
= get_depth(vp
->parsed
);
1247 vp
->header_rxbuffer
= kmalloc(
1251 if (vp
->header_rxbuffer
== NULL
)
1254 if ((vp
->options
& VECTOR_TX
) > 0) {
1255 vp
->tx_queue
= create_queue(
1257 get_depth(vp
->parsed
),
1262 vp
->header_txbuffer
= kmalloc(vp
->header_size
, GFP_KERNEL
);
1263 if (vp
->header_txbuffer
== NULL
)
1268 err
= um_request_irq(
1269 irq_rr
+ VECTOR_BASE_IRQ
, vp
->fds
->rx_fd
,
1270 IRQ_READ
, vector_rx_interrupt
,
1271 IRQF_SHARED
, dev
->name
, dev
);
1273 netdev_err(dev
, "vector_open: failed to get rx irq(%d)\n", err
);
1277 vp
->rx_irq
= irq_rr
+ VECTOR_BASE_IRQ
;
1278 dev
->irq
= irq_rr
+ VECTOR_BASE_IRQ
;
1279 irq_rr
= (irq_rr
+ 1) % VECTOR_IRQ_SPACE
;
1281 /* WRITE IRQ - we need it only if we have vector TX */
1282 if ((vp
->options
& VECTOR_TX
) > 0) {
1283 err
= um_request_irq(
1284 irq_rr
+ VECTOR_BASE_IRQ
, vp
->fds
->tx_fd
,
1285 IRQ_WRITE
, vector_tx_interrupt
,
1286 IRQF_SHARED
, dev
->name
, dev
);
1289 "vector_open: failed to get tx irq(%d)\n", err
);
1293 vp
->tx_irq
= irq_rr
+ VECTOR_BASE_IRQ
;
1294 irq_rr
= (irq_rr
+ 1) % VECTOR_IRQ_SPACE
;
1297 if ((vp
->options
& VECTOR_QDISC_BYPASS
) != 0) {
1298 if (!uml_raw_enable_qdisc_bypass(vp
->fds
->rx_fd
))
1299 vp
->options
|= VECTOR_BPF
;
1301 if (((vp
->options
& VECTOR_BPF
) != 0) && (vp
->bpf
== NULL
))
1302 vp
->bpf
= uml_vector_default_bpf(dev
->dev_addr
);
1304 if (vp
->bpf
!= NULL
)
1305 uml_vector_attach_bpf(vp
->fds
->rx_fd
, vp
->bpf
);
1307 netif_start_queue(dev
);
1309 /* clear buffer - it can happen that the host side of the interface
1310 * is full when we get here. In this case, new data is never queued,
1311 * SIGIOs never arrive, and the net never works.
1316 vector_reset_stats(vp
);
1317 vdevice
= find_device(vp
->unit
);
1318 vdevice
->opened
= 1;
1320 if ((vp
->options
& VECTOR_TX
) != 0)
1324 vector_net_close(dev
);
1329 static void vector_net_set_multicast_list(struct net_device
*dev
)
1331 /* TODO: - we can do some BPF games here */
1335 static void vector_net_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
1337 struct vector_private
*vp
= netdev_priv(dev
);
1339 vp
->estats
.tx_timeout_count
++;
1340 netif_trans_update(dev
);
1341 schedule_work(&vp
->reset_tx
);
1344 static netdev_features_t
vector_fix_features(struct net_device
*dev
,
1345 netdev_features_t features
)
1347 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
1351 static int vector_set_features(struct net_device
*dev
,
1352 netdev_features_t features
)
1354 struct vector_private
*vp
= netdev_priv(dev
);
1355 /* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
1356 * no way to negotiate it on raw sockets, so we can change
1359 if (features
& NETIF_F_GRO
)
1360 /* All new frame buffers will be GRO-sized */
1361 vp
->req_size
= 65536;
1363 /* All new frame buffers will be normal sized */
1364 vp
->req_size
= vp
->max_packet
+ vp
->headroom
+ SAFETY_MARGIN
;
1368 #ifdef CONFIG_NET_POLL_CONTROLLER
1369 static void vector_net_poll_controller(struct net_device
*dev
)
1371 disable_irq(dev
->irq
);
1372 vector_rx_interrupt(dev
->irq
, dev
);
1373 enable_irq(dev
->irq
);
1377 static void vector_net_get_drvinfo(struct net_device
*dev
,
1378 struct ethtool_drvinfo
*info
)
1380 strlcpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1381 strlcpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
1384 static int vector_net_load_bpf_flash(struct net_device
*dev
,
1385 struct ethtool_flash
*efl
)
1387 struct vector_private
*vp
= netdev_priv(dev
);
1388 struct vector_device
*vdevice
;
1389 const struct firmware
*fw
;
1392 if (!(vp
->options
& VECTOR_BPF_FLASH
)) {
1393 netdev_err(dev
, "loading firmware not permitted: %s\n", efl
->data
);
1397 spin_lock(&vp
->lock
);
1399 if (vp
->bpf
!= NULL
) {
1401 uml_vector_detach_bpf(vp
->fds
->rx_fd
, vp
->bpf
);
1402 kfree(vp
->bpf
->filter
);
1403 vp
->bpf
->filter
= NULL
;
1405 vp
->bpf
= kmalloc(sizeof(struct sock_fprog
), GFP_KERNEL
);
1406 if (vp
->bpf
== NULL
) {
1407 netdev_err(dev
, "failed to allocate memory for firmware\n");
1412 vdevice
= find_device(vp
->unit
);
1414 if (request_firmware(&fw
, efl
->data
, &vdevice
->pdev
.dev
))
1417 vp
->bpf
->filter
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
1418 if (!vp
->bpf
->filter
)
1421 vp
->bpf
->len
= fw
->size
/ sizeof(struct sock_filter
);
1422 release_firmware(fw
);
1425 result
= uml_vector_attach_bpf(vp
->fds
->rx_fd
, vp
->bpf
);
1427 spin_unlock(&vp
->lock
);
1432 release_firmware(fw
);
1435 spin_unlock(&vp
->lock
);
1436 if (vp
->bpf
!= NULL
)
1437 kfree(vp
->bpf
->filter
);
1443 static void vector_get_ringparam(struct net_device
*netdev
,
1444 struct ethtool_ringparam
*ring
)
1446 struct vector_private
*vp
= netdev_priv(netdev
);
1448 ring
->rx_max_pending
= vp
->rx_queue
->max_depth
;
1449 ring
->tx_max_pending
= vp
->tx_queue
->max_depth
;
1450 ring
->rx_pending
= vp
->rx_queue
->max_depth
;
1451 ring
->tx_pending
= vp
->tx_queue
->max_depth
;
1454 static void vector_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
1456 switch (stringset
) {
1461 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
1469 static int vector_get_sset_count(struct net_device
*dev
, int sset
)
1475 return VECTOR_NUM_STATS
;
1481 static void vector_get_ethtool_stats(struct net_device
*dev
,
1482 struct ethtool_stats
*estats
,
1485 struct vector_private
*vp
= netdev_priv(dev
);
1487 memcpy(tmp_stats
, &vp
->estats
, sizeof(struct vector_estats
));
1490 static int vector_get_coalesce(struct net_device
*netdev
,
1491 struct ethtool_coalesce
*ec
)
1493 struct vector_private
*vp
= netdev_priv(netdev
);
1495 ec
->tx_coalesce_usecs
= (vp
->coalesce
* 1000000) / HZ
;
1499 static int vector_set_coalesce(struct net_device
*netdev
,
1500 struct ethtool_coalesce
*ec
)
1502 struct vector_private
*vp
= netdev_priv(netdev
);
1504 vp
->coalesce
= (ec
->tx_coalesce_usecs
* HZ
) / 1000000;
1505 if (vp
->coalesce
== 0)
1510 static const struct ethtool_ops vector_net_ethtool_ops
= {
1511 .get_drvinfo
= vector_net_get_drvinfo
,
1512 .get_link
= ethtool_op_get_link
,
1513 .get_ts_info
= ethtool_op_get_ts_info
,
1514 .get_ringparam
= vector_get_ringparam
,
1515 .get_strings
= vector_get_strings
,
1516 .get_sset_count
= vector_get_sset_count
,
1517 .get_ethtool_stats
= vector_get_ethtool_stats
,
1518 .get_coalesce
= vector_get_coalesce
,
1519 .set_coalesce
= vector_set_coalesce
,
1520 .flash_device
= vector_net_load_bpf_flash
,
1524 static const struct net_device_ops vector_netdev_ops
= {
1525 .ndo_open
= vector_net_open
,
1526 .ndo_stop
= vector_net_close
,
1527 .ndo_start_xmit
= vector_net_start_xmit
,
1528 .ndo_set_rx_mode
= vector_net_set_multicast_list
,
1529 .ndo_tx_timeout
= vector_net_tx_timeout
,
1530 .ndo_set_mac_address
= eth_mac_addr
,
1531 .ndo_validate_addr
= eth_validate_addr
,
1532 .ndo_fix_features
= vector_fix_features
,
1533 .ndo_set_features
= vector_set_features
,
1534 #ifdef CONFIG_NET_POLL_CONTROLLER
1535 .ndo_poll_controller
= vector_net_poll_controller
,
1540 static void vector_timer_expire(struct timer_list
*t
)
1542 struct vector_private
*vp
= from_timer(vp
, t
, tl
);
1544 vp
->estats
.tx_kicks
++;
1545 vector_send(vp
->tx_queue
);
1548 static void vector_eth_configure(
1553 struct vector_device
*device
;
1554 struct net_device
*dev
;
1555 struct vector_private
*vp
;
1558 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1559 if (device
== NULL
) {
1560 printk(KERN_ERR
"eth_configure failed to allocate struct "
1564 dev
= alloc_etherdev(sizeof(struct vector_private
));
1566 printk(KERN_ERR
"eth_configure: failed to allocate struct "
1567 "net_device for vec%d\n", n
);
1568 goto out_free_device
;
1571 dev
->mtu
= get_mtu(def
);
1573 INIT_LIST_HEAD(&device
->list
);
1576 /* If this name ends up conflicting with an existing registered
1577 * netdevice, that is OK, register_netdev{,ice}() will notice this
1580 snprintf(dev
->name
, sizeof(dev
->name
), "vec%d", n
);
1581 uml_net_setup_etheraddr(dev
, uml_vector_fetch_arg(def
, "mac"));
1582 vp
= netdev_priv(dev
);
1584 /* sysfs register */
1585 if (!driver_registered
) {
1586 platform_driver_register(¨_net_driver
);
1587 driver_registered
= 1;
1589 device
->pdev
.id
= n
;
1590 device
->pdev
.name
= DRIVER_NAME
;
1591 device
->pdev
.dev
.release
= vector_device_release
;
1592 dev_set_drvdata(&device
->pdev
.dev
, device
);
1593 if (platform_device_register(&device
->pdev
))
1594 goto out_free_netdev
;
1595 SET_NETDEV_DEV(dev
, &device
->pdev
.dev
);
1599 *vp
= ((struct vector_private
)
1601 .list
= LIST_HEAD_INIT(vp
->list
),
1604 .options
= get_transport_options(def
),
1608 .max_packet
= get_mtu(def
) + ETH_HEADER_OTHER
,
1609 /* TODO - we need to calculate headroom so that ip header
1610 * is 16 byte aligned all the time
1612 .headroom
= get_headroom(def
),
1613 .form_header
= NULL
,
1614 .verify_header
= NULL
,
1615 .header_rxbuffer
= NULL
,
1616 .header_txbuffer
= NULL
,
1618 .rx_header_size
= 0,
1619 .rexmit_scheduled
= false,
1621 .transport_data
= NULL
,
1622 .in_write_poll
= false,
1624 .req_size
= get_req_size(def
),
1629 dev
->features
= dev
->hw_features
= (NETIF_F_SG
| NETIF_F_FRAGLIST
);
1630 tasklet_init(&vp
->tx_poll
, vector_tx_poll
, (unsigned long)vp
);
1631 INIT_WORK(&vp
->reset_tx
, vector_reset_tx
);
1633 timer_setup(&vp
->tl
, vector_timer_expire
, 0);
1634 spin_lock_init(&vp
->lock
);
1637 dev
->netdev_ops
= &vector_netdev_ops
;
1638 dev
->ethtool_ops
= &vector_net_ethtool_ops
;
1639 dev
->watchdog_timeo
= (HZ
>> 1);
1640 /* primary IRQ - fixme */
1641 dev
->irq
= 0; /* we will adjust this once opened */
1644 err
= register_netdevice(dev
);
1647 goto out_undo_user_init
;
1649 spin_lock(&vector_devices_lock
);
1650 list_add(&device
->list
, &vector_devices
);
1651 spin_unlock(&vector_devices_lock
);
1667 * Invoked late in the init
1670 static int __init
vector_init(void)
1672 struct list_head
*ele
;
1673 struct vector_cmd_line_arg
*def
;
1674 struct arglist
*parsed
;
1676 list_for_each(ele
, &vec_cmd_line
) {
1677 def
= list_entry(ele
, struct vector_cmd_line_arg
, list
);
1678 parsed
= uml_parse_vector_ifspec(def
->arguments
);
1680 vector_eth_configure(def
->unit
, parsed
);
1686 /* Invoked at initial argument parsing, only stores
1687 * arguments until a proper vector_init is called
1691 static int __init
vector_setup(char *str
)
1695 struct vector_cmd_line_arg
*new;
1697 err
= vector_parse(str
, &n
, &str
, &error
);
1699 printk(KERN_ERR
"vector_setup - Couldn't parse '%s' : %s\n",
1703 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES
);
1705 panic("%s: Failed to allocate %zu bytes\n", __func__
,
1707 INIT_LIST_HEAD(&new->list
);
1709 new->arguments
= str
;
1710 list_add_tail(&new->list
, &vec_cmd_line
);
1714 __setup("vec", vector_setup
);
1715 __uml_help(vector_setup
,
1716 "vec[0-9]+:<option>=<value>,<option>=<value>\n"
1717 " Configure a vector io network device.\n\n"
1720 late_initcall(vector_init
);
1722 static struct mc_device vector_mc
= {
1723 .list
= LIST_HEAD_INIT(vector_mc
.list
),
1725 .config
= vector_config
,
1728 .remove
= vector_remove
,
1732 static int vector_inetaddr_event(
1733 struct notifier_block
*this,
1734 unsigned long event
,
1740 static struct notifier_block vector_inetaddr_notifier
= {
1741 .notifier_call
= vector_inetaddr_event
,
1744 static void inet_register(void)
1746 register_inetaddr_notifier(&vector_inetaddr_notifier
);
1749 static inline void inet_register(void)
1754 static int vector_net_init(void)
1756 mconsole_register_dev(&vector_mc
);
1761 __initcall(vector_net_init
);