2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/config.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/major.h>
26 #include <linux/sched.h>
27 #include <linux/timer.h>
28 #include <linux/string.h>
29 #include <linux/sockios.h>
30 #include <linux/net.h>
31 #include <linux/fcntl.h>
33 #include <linux/slab.h>
34 #include <linux/interrupt.h>
35 #include <linux/capability.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/security.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
42 #include <asm/string.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
47 #include <net/protocol.h>
49 #include <net/route.h>
52 #include <net/pkt_sched.h>
54 DECLARE_MUTEX(rtnl_sem
);
61 int rtnl_lock_interruptible(void)
63 return down_interruptible(&rtnl_sem
);
66 void rtnl_unlock(void)
73 int rtattr_parse(struct rtattr
*tb
[], int maxattr
, struct rtattr
*rta
, int len
)
75 memset(tb
, 0, sizeof(struct rtattr
*)*maxattr
);
77 while (RTA_OK(rta
, len
)) {
78 unsigned flavor
= rta
->rta_type
;
79 if (flavor
&& flavor
<= maxattr
)
81 rta
= RTA_NEXT(rta
, len
);
88 struct rtnetlink_link
* rtnetlink_links
[NPROTO
];
90 static const int rtm_min
[(RTM_MAX
+1-RTM_BASE
)/4] =
92 NLMSG_LENGTH(sizeof(struct ifinfomsg
)),
93 NLMSG_LENGTH(sizeof(struct ifaddrmsg
)),
94 NLMSG_LENGTH(sizeof(struct rtmsg
)),
95 NLMSG_LENGTH(sizeof(struct ndmsg
)),
96 NLMSG_LENGTH(sizeof(struct rtmsg
)),
97 NLMSG_LENGTH(sizeof(struct tcmsg
)),
98 NLMSG_LENGTH(sizeof(struct tcmsg
)),
99 NLMSG_LENGTH(sizeof(struct tcmsg
)),
100 NLMSG_LENGTH(sizeof(struct tcamsg
))
103 static const int rta_max
[(RTM_MAX
+1-RTM_BASE
)/4] =
116 void __rta_fill(struct sk_buff
*skb
, int attrtype
, int attrlen
, const void *data
)
119 int size
= RTA_LENGTH(attrlen
);
121 rta
= (struct rtattr
*)skb_put(skb
, RTA_ALIGN(size
));
122 rta
->rta_type
= attrtype
;
124 memcpy(RTA_DATA(rta
), data
, attrlen
);
127 size_t rtattr_strlcpy(char *dest
, const struct rtattr
*rta
, size_t size
)
129 size_t ret
= RTA_PAYLOAD(rta
);
130 char *src
= RTA_DATA(rta
);
132 if (ret
> 0 && src
[ret
- 1] == '\0')
135 size_t len
= (ret
>= size
) ? size
- 1 : ret
;
136 memset(dest
, 0, size
);
137 memcpy(dest
, src
, len
);
142 int rtnetlink_send(struct sk_buff
*skb
, u32 pid
, unsigned group
, int echo
)
146 NETLINK_CB(skb
).dst_groups
= group
;
148 atomic_inc(&skb
->users
);
149 netlink_broadcast(rtnl
, skb
, pid
, group
, GFP_KERNEL
);
151 err
= netlink_unicast(rtnl
, skb
, pid
, MSG_DONTWAIT
);
155 int rtnetlink_put_metrics(struct sk_buff
*skb
, u32
*metrics
)
157 struct rtattr
*mx
= (struct rtattr
*)skb
->tail
;
160 RTA_PUT(skb
, RTA_METRICS
, 0, NULL
);
161 for (i
=0; i
<RTAX_MAX
; i
++) {
163 RTA_PUT(skb
, i
+1, sizeof(u32
), metrics
+i
);
165 mx
->rta_len
= skb
->tail
- (u8
*)mx
;
166 if (mx
->rta_len
== RTA_LENGTH(0))
167 skb_trim(skb
, (u8
*)mx
- skb
->data
);
171 skb_trim(skb
, (u8
*)mx
- skb
->data
);
176 static int rtnetlink_fill_ifinfo(struct sk_buff
*skb
, struct net_device
*dev
,
177 int type
, u32 pid
, u32 seq
, u32 change
)
180 struct nlmsghdr
*nlh
;
181 unsigned char *b
= skb
->tail
;
183 nlh
= NLMSG_PUT(skb
, pid
, seq
, type
, sizeof(*r
));
184 if (pid
) nlh
->nlmsg_flags
|= NLM_F_MULTI
;
186 r
->ifi_family
= AF_UNSPEC
;
187 r
->ifi_type
= dev
->type
;
188 r
->ifi_index
= dev
->ifindex
;
189 r
->ifi_flags
= dev_get_flags(dev
);
190 r
->ifi_change
= change
;
192 RTA_PUT(skb
, IFLA_IFNAME
, strlen(dev
->name
)+1, dev
->name
);
195 u32 txqlen
= dev
->tx_queue_len
;
196 RTA_PUT(skb
, IFLA_TXQLEN
, sizeof(txqlen
), &txqlen
);
200 u32 weight
= dev
->weight
;
201 RTA_PUT(skb
, IFLA_WEIGHT
, sizeof(weight
), &weight
);
205 struct rtnl_link_ifmap map
= {
206 .mem_start
= dev
->mem_start
,
207 .mem_end
= dev
->mem_end
,
208 .base_addr
= dev
->base_addr
,
211 .port
= dev
->if_port
,
213 RTA_PUT(skb
, IFLA_MAP
, sizeof(map
), &map
);
217 RTA_PUT(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
);
218 RTA_PUT(skb
, IFLA_BROADCAST
, dev
->addr_len
, dev
->broadcast
);
223 RTA_PUT(skb
, IFLA_MTU
, sizeof(mtu
), &mtu
);
226 if (dev
->ifindex
!= dev
->iflink
) {
227 u32 iflink
= dev
->iflink
;
228 RTA_PUT(skb
, IFLA_LINK
, sizeof(iflink
), &iflink
);
231 if (dev
->qdisc_sleeping
)
232 RTA_PUT(skb
, IFLA_QDISC
,
233 strlen(dev
->qdisc_sleeping
->ops
->id
) + 1,
234 dev
->qdisc_sleeping
->ops
->id
);
237 u32 master
= dev
->master
->ifindex
;
238 RTA_PUT(skb
, IFLA_MASTER
, sizeof(master
), &master
);
241 if (dev
->get_stats
) {
242 unsigned long *stats
= (unsigned long*)dev
->get_stats(dev
);
247 int n
= sizeof(struct rtnl_link_stats
)/4;
249 a
= __RTA_PUT(skb
, IFLA_STATS
, n
*4);
255 nlh
->nlmsg_len
= skb
->tail
- b
;
260 skb_trim(skb
, b
- skb
->data
);
264 static int rtnetlink_dump_ifinfo(struct sk_buff
*skb
, struct netlink_callback
*cb
)
267 int s_idx
= cb
->args
[0];
268 struct net_device
*dev
;
270 read_lock(&dev_base_lock
);
271 for (dev
=dev_base
, idx
=0; dev
; dev
= dev
->next
, idx
++) {
274 if (rtnetlink_fill_ifinfo(skb
, dev
, RTM_NEWLINK
, NETLINK_CB(cb
->skb
).pid
, cb
->nlh
->nlmsg_seq
, 0) <= 0)
277 read_unlock(&dev_base_lock
);
283 static int do_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
285 struct ifinfomsg
*ifm
= NLMSG_DATA(nlh
);
286 struct rtattr
**ida
= arg
;
287 struct net_device
*dev
;
288 int err
, send_addr_notify
= 0;
290 if (ifm
->ifi_index
>= 0)
291 dev
= dev_get_by_index(ifm
->ifi_index
);
292 else if (ida
[IFLA_IFNAME
- 1]) {
293 char ifname
[IFNAMSIZ
];
295 if (rtattr_strlcpy(ifname
, ida
[IFLA_IFNAME
- 1],
296 IFNAMSIZ
) >= IFNAMSIZ
)
298 dev
= dev_get_by_name(ifname
);
308 dev_change_flags(dev
, ifm
->ifi_flags
);
310 if (ida
[IFLA_MAP
- 1]) {
311 struct rtnl_link_ifmap
*u_map
;
314 if (!dev
->set_config
) {
319 if (!netif_device_present(dev
)) {
324 if (ida
[IFLA_MAP
- 1]->rta_len
!= RTA_LENGTH(sizeof(*u_map
)))
327 u_map
= RTA_DATA(ida
[IFLA_MAP
- 1]);
329 k_map
.mem_start
= (unsigned long) u_map
->mem_start
;
330 k_map
.mem_end
= (unsigned long) u_map
->mem_end
;
331 k_map
.base_addr
= (unsigned short) u_map
->base_addr
;
332 k_map
.irq
= (unsigned char) u_map
->irq
;
333 k_map
.dma
= (unsigned char) u_map
->dma
;
334 k_map
.port
= (unsigned char) u_map
->port
;
336 err
= dev
->set_config(dev
, &k_map
);
342 if (ida
[IFLA_ADDRESS
- 1]) {
343 if (!dev
->set_mac_address
) {
347 if (!netif_device_present(dev
)) {
351 if (ida
[IFLA_ADDRESS
- 1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
354 err
= dev
->set_mac_address(dev
, RTA_DATA(ida
[IFLA_ADDRESS
- 1]));
357 send_addr_notify
= 1;
360 if (ida
[IFLA_BROADCAST
- 1]) {
361 if (ida
[IFLA_BROADCAST
- 1]->rta_len
!= RTA_LENGTH(dev
->addr_len
))
363 memcpy(dev
->broadcast
, RTA_DATA(ida
[IFLA_BROADCAST
- 1]),
365 send_addr_notify
= 1;
368 if (ida
[IFLA_MTU
- 1]) {
369 if (ida
[IFLA_MTU
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
371 err
= dev_set_mtu(dev
, *((u32
*) RTA_DATA(ida
[IFLA_MTU
- 1])));
378 if (ida
[IFLA_TXQLEN
- 1]) {
379 if (ida
[IFLA_TXQLEN
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
382 dev
->tx_queue_len
= *((u32
*) RTA_DATA(ida
[IFLA_TXQLEN
- 1]));
385 if (ida
[IFLA_WEIGHT
- 1]) {
386 if (ida
[IFLA_WEIGHT
- 1]->rta_len
!= RTA_LENGTH(sizeof(u32
)))
389 dev
->weight
= *((u32
*) RTA_DATA(ida
[IFLA_WEIGHT
- 1]));
392 if (ifm
->ifi_index
>= 0 && ida
[IFLA_IFNAME
- 1]) {
393 char ifname
[IFNAMSIZ
];
395 if (rtattr_strlcpy(ifname
, ida
[IFLA_IFNAME
- 1],
396 IFNAMSIZ
) >= IFNAMSIZ
)
398 err
= dev_change_name(dev
, ifname
);
406 if (send_addr_notify
)
407 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
413 static int rtnetlink_dump_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
416 int s_idx
= cb
->family
;
420 for (idx
=1; idx
<NPROTO
; idx
++) {
421 int type
= cb
->nlh
->nlmsg_type
-RTM_BASE
;
422 if (idx
< s_idx
|| idx
== PF_PACKET
)
424 if (rtnetlink_links
[idx
] == NULL
||
425 rtnetlink_links
[idx
][type
].dumpit
== NULL
)
428 memset(&cb
->args
[0], 0, sizeof(cb
->args
));
429 if (rtnetlink_links
[idx
][type
].dumpit(skb
, cb
))
437 void rtmsg_ifinfo(int type
, struct net_device
*dev
, unsigned change
)
440 int size
= NLMSG_SPACE(sizeof(struct ifinfomsg
) +
441 sizeof(struct rtnl_link_ifmap
) +
442 sizeof(struct rtnl_link_stats
) + 128);
444 skb
= alloc_skb(size
, GFP_KERNEL
);
448 if (rtnetlink_fill_ifinfo(skb
, dev
, type
, 0, 0, change
) < 0) {
452 NETLINK_CB(skb
).dst_groups
= RTMGRP_LINK
;
453 netlink_broadcast(rtnl
, skb
, 0, RTMGRP_LINK
, GFP_KERNEL
);
456 static int rtnetlink_done(struct netlink_callback
*cb
)
461 /* Protected by RTNL sempahore. */
462 static struct rtattr
**rta_buf
;
463 static int rtattr_max
;
465 /* Process one rtnetlink message. */
467 static __inline__
int
468 rtnetlink_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, int *errp
)
470 struct rtnetlink_link
*link
;
471 struct rtnetlink_link
*link_tab
;
478 /* Only requests are handled by kernel now */
479 if (!(nlh
->nlmsg_flags
&NLM_F_REQUEST
))
482 type
= nlh
->nlmsg_type
;
484 /* A control message: ignore them */
488 /* Unknown message: reply with EINVAL */
494 /* All the messages must have at least 1 byte length */
495 if (nlh
->nlmsg_len
< NLMSG_LENGTH(sizeof(struct rtgenmsg
)))
498 family
= ((struct rtgenmsg
*)NLMSG_DATA(nlh
))->rtgen_family
;
499 if (family
>= NPROTO
) {
500 *errp
= -EAFNOSUPPORT
;
504 link_tab
= rtnetlink_links
[family
];
505 if (link_tab
== NULL
)
506 link_tab
= rtnetlink_links
[PF_UNSPEC
];
507 link
= &link_tab
[type
];
512 if (kind
!= 2 && security_netlink_recv(skb
)) {
517 if (kind
== 2 && nlh
->nlmsg_flags
&NLM_F_DUMP
) {
520 if (link
->dumpit
== NULL
)
521 link
= &(rtnetlink_links
[PF_UNSPEC
][type
]);
523 if (link
->dumpit
== NULL
)
526 if ((*errp
= netlink_dump_start(rtnl
, skb
, nlh
,
528 rtnetlink_done
)) != 0) {
531 rlen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
538 memset(rta_buf
, 0, (rtattr_max
* sizeof(struct rtattr
*)));
540 min_len
= rtm_min
[sz_idx
];
541 if (nlh
->nlmsg_len
< min_len
)
544 if (nlh
->nlmsg_len
> min_len
) {
545 int attrlen
= nlh
->nlmsg_len
- NLMSG_ALIGN(min_len
);
546 struct rtattr
*attr
= (void*)nlh
+ NLMSG_ALIGN(min_len
);
548 while (RTA_OK(attr
, attrlen
)) {
549 unsigned flavor
= attr
->rta_type
;
551 if (flavor
> rta_max
[sz_idx
])
553 rta_buf
[flavor
-1] = attr
;
555 attr
= RTA_NEXT(attr
, attrlen
);
559 if (link
->doit
== NULL
)
560 link
= &(rtnetlink_links
[PF_UNSPEC
][type
]);
561 if (link
->doit
== NULL
)
563 err
= link
->doit(skb
, nlh
, (void *)&rta_buf
[0]);
574 * Process one packet of messages.
575 * Malformed skbs with wrong lengths of messages are discarded silently.
578 static inline int rtnetlink_rcv_skb(struct sk_buff
*skb
)
581 struct nlmsghdr
* nlh
;
583 while (skb
->len
>= NLMSG_SPACE(0)) {
586 nlh
= (struct nlmsghdr
*)skb
->data
;
587 if (nlh
->nlmsg_len
< sizeof(*nlh
) || skb
->len
< nlh
->nlmsg_len
)
589 rlen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
592 if (rtnetlink_rcv_msg(skb
, nlh
, &err
)) {
593 /* Not error, but we must interrupt processing here:
594 * Note, that in this case we do not pull message
595 * from skb, it will be processed later.
599 netlink_ack(skb
, nlh
, err
);
600 } else if (nlh
->nlmsg_flags
&NLM_F_ACK
)
601 netlink_ack(skb
, nlh
, 0);
609 * rtnetlink input queue processing routine:
610 * - try to acquire shared lock. If it is failed, defer processing.
611 * - feed skbs to rtnetlink_rcv_skb, until it refuse a message,
612 * that will occur, when a dump started and/or acquisition of
613 * exclusive lock failed.
616 static void rtnetlink_rcv(struct sock
*sk
, int len
)
621 if (rtnl_shlock_nowait())
624 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
625 if (rtnetlink_rcv_skb(skb
)) {
627 skb_queue_head(&sk
->sk_receive_queue
,
639 } while (rtnl
&& rtnl
->sk_receive_queue
.qlen
);
642 static struct rtnetlink_link link_rtnetlink_table
[RTM_MAX
-RTM_BASE
+1] =
644 [RTM_GETLINK
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_ifinfo
},
645 [RTM_SETLINK
- RTM_BASE
] = { .doit
= do_setlink
},
646 [RTM_GETADDR
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
647 [RTM_GETROUTE
- RTM_BASE
] = { .dumpit
= rtnetlink_dump_all
},
648 [RTM_NEWNEIGH
- RTM_BASE
] = { .doit
= neigh_add
},
649 [RTM_DELNEIGH
- RTM_BASE
] = { .doit
= neigh_delete
},
650 [RTM_GETNEIGH
- RTM_BASE
] = { .dumpit
= neigh_dump_info
}
653 static int rtnetlink_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
655 struct net_device
*dev
= ptr
;
657 case NETDEV_UNREGISTER
:
658 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U);
660 case NETDEV_REGISTER
:
661 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
665 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
668 case NETDEV_GOING_DOWN
:
671 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0);
677 static struct notifier_block rtnetlink_dev_notifier
= {
678 .notifier_call
= rtnetlink_event
,
681 void __init
rtnetlink_init(void)
686 for (i
= 0; i
< ARRAY_SIZE(rta_max
); i
++)
687 if (rta_max
[i
] > rtattr_max
)
688 rtattr_max
= rta_max
[i
];
689 rta_buf
= kmalloc(rtattr_max
* sizeof(struct rtattr
*), GFP_KERNEL
);
691 panic("rtnetlink_init: cannot allocate rta_buf\n");
693 rtnl
= netlink_kernel_create(NETLINK_ROUTE
, rtnetlink_rcv
);
695 panic("rtnetlink_init: cannot initialize rtnetlink\n");
696 netlink_set_nonroot(NETLINK_ROUTE
, NL_NONROOT_RECV
);
697 register_netdevice_notifier(&rtnetlink_dev_notifier
);
698 rtnetlink_links
[PF_UNSPEC
] = link_rtnetlink_table
;
699 rtnetlink_links
[PF_PACKET
] = link_rtnetlink_table
;
702 EXPORT_SYMBOL(__rta_fill
);
703 EXPORT_SYMBOL(rtattr_strlcpy
);
704 EXPORT_SYMBOL(rtattr_parse
);
705 EXPORT_SYMBOL(rtnetlink_links
);
706 EXPORT_SYMBOL(rtnetlink_put_metrics
);
708 EXPORT_SYMBOL(rtnl_lock
);
709 EXPORT_SYMBOL(rtnl_lock_interruptible
);
710 EXPORT_SYMBOL(rtnl_sem
);
711 EXPORT_SYMBOL(rtnl_unlock
);