1 #include <linux/types.h>
2 #include <linux/skbuff.h>
3 #include <linux/socket.h>
4 #include <linux/sysctl.h>
6 #include <linux/module.h>
7 #include <linux/if_arp.h>
8 #include <linux/ipv6.h>
9 #include <linux/mpls.h>
10 #include <linux/nospec.h>
11 #include <linux/vmalloc.h>
16 #include <net/ip_fib.h>
17 #include <net/netevent.h>
18 #include <net/netns/generic.h>
19 #if IS_ENABLED(CONFIG_IPV6)
21 #include <net/addrconf.h>
23 #include <net/nexthop.h>
26 /* Maximum number of labels to look ahead at when selecting a path of
29 #define MAX_MP_SELECT_LABELS 4
31 #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
34 static int label_limit
= (1 << 20) - 1;
36 static void rtmsg_lfib(int event
, u32 label
, struct mpls_route
*rt
,
37 struct nlmsghdr
*nlh
, struct net
*net
, u32 portid
,
38 unsigned int nlm_flags
);
40 static struct mpls_route
*mpls_route_input_rcu(struct net
*net
, unsigned index
)
42 struct mpls_route
*rt
= NULL
;
44 if (index
< net
->mpls
.platform_labels
) {
45 struct mpls_route __rcu
**platform_label
=
46 rcu_dereference(net
->mpls
.platform_label
);
47 rt
= rcu_dereference(platform_label
[index
]);
52 static inline struct mpls_dev
*mpls_dev_get(const struct net_device
*dev
)
54 return rcu_dereference_rtnl(dev
->mpls_ptr
);
57 bool mpls_output_possible(const struct net_device
*dev
)
59 return dev
&& (dev
->flags
& IFF_UP
) && netif_carrier_ok(dev
);
61 EXPORT_SYMBOL_GPL(mpls_output_possible
);
63 static u8
*__mpls_nh_via(struct mpls_route
*rt
, struct mpls_nh
*nh
)
65 u8
*nh0_via
= PTR_ALIGN((u8
*)&rt
->rt_nh
[rt
->rt_nhn
], VIA_ALEN_ALIGN
);
66 int nh_index
= nh
- rt
->rt_nh
;
68 return nh0_via
+ rt
->rt_max_alen
* nh_index
;
71 static const u8
*mpls_nh_via(const struct mpls_route
*rt
,
72 const struct mpls_nh
*nh
)
74 return __mpls_nh_via((struct mpls_route
*)rt
, (struct mpls_nh
*)nh
);
77 static unsigned int mpls_nh_header_size(const struct mpls_nh
*nh
)
79 /* The size of the layer 2.5 labels to be added for this route */
80 return nh
->nh_labels
* sizeof(struct mpls_shim_hdr
);
83 unsigned int mpls_dev_mtu(const struct net_device
*dev
)
85 /* The amount of data the layer 2 frame can hold */
88 EXPORT_SYMBOL_GPL(mpls_dev_mtu
);
90 bool mpls_pkt_too_big(const struct sk_buff
*skb
, unsigned int mtu
)
95 if (skb_is_gso(skb
) && skb_gso_validate_mtu(skb
, mtu
))
100 EXPORT_SYMBOL_GPL(mpls_pkt_too_big
);
102 static u32
mpls_multipath_hash(struct mpls_route
*rt
, struct sk_buff
*skb
)
104 struct mpls_entry_decoded dec
;
105 unsigned int mpls_hdr_len
= 0;
106 struct mpls_shim_hdr
*hdr
;
107 bool eli_seen
= false;
111 for (label_index
= 0; label_index
< MAX_MP_SELECT_LABELS
;
113 mpls_hdr_len
+= sizeof(*hdr
);
114 if (!pskb_may_pull(skb
, mpls_hdr_len
))
117 /* Read and decode the current label */
118 hdr
= mpls_hdr(skb
) + label_index
;
119 dec
= mpls_entry_decode(hdr
);
121 /* RFC6790 - reserved labels MUST NOT be used as keys
122 * for the load-balancing function
124 if (likely(dec
.label
>= MPLS_LABEL_FIRST_UNRESERVED
)) {
125 hash
= jhash_1word(dec
.label
, hash
);
127 /* The entropy label follows the entropy label
128 * indicator, so this means that the entropy
129 * label was just added to the hash - no need to
130 * go any deeper either in the label stack or in the
135 } else if (dec
.label
== MPLS_LABEL_ENTROPY
) {
142 /* found bottom label; does skb have room for a header? */
143 if (pskb_may_pull(skb
, mpls_hdr_len
+ sizeof(struct iphdr
))) {
144 const struct iphdr
*v4hdr
;
146 v4hdr
= (const struct iphdr
*)(hdr
+ 1);
147 if (v4hdr
->version
== 4) {
148 hash
= jhash_3words(ntohl(v4hdr
->saddr
),
150 v4hdr
->protocol
, hash
);
151 } else if (v4hdr
->version
== 6 &&
152 pskb_may_pull(skb
, mpls_hdr_len
+
153 sizeof(struct ipv6hdr
))) {
154 const struct ipv6hdr
*v6hdr
;
156 v6hdr
= (const struct ipv6hdr
*)(hdr
+ 1);
157 hash
= __ipv6_addr_jhash(&v6hdr
->saddr
, hash
);
158 hash
= __ipv6_addr_jhash(&v6hdr
->daddr
, hash
);
159 hash
= jhash_1word(v6hdr
->nexthdr
, hash
);
169 static struct mpls_nh
*mpls_select_multipath(struct mpls_route
*rt
,
172 int alive
= ACCESS_ONCE(rt
->rt_nhn_alive
);
177 /* No need to look further into packet if there's only
186 hash
= mpls_multipath_hash(rt
, skb
);
187 nh_index
= hash
% alive
;
188 if (alive
== rt
->rt_nhn
)
191 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
196 } endfor_nexthops(rt
);
199 return &rt
->rt_nh
[nh_index
];
202 static bool mpls_egress(struct mpls_route
*rt
, struct sk_buff
*skb
,
203 struct mpls_entry_decoded dec
)
205 enum mpls_payload_type payload_type
;
206 bool success
= false;
208 /* The IPv4 code below accesses through the IPv4 header
209 * checksum, which is 12 bytes into the packet.
210 * The IPv6 code below accesses through the IPv6 hop limit
211 * which is 8 bytes into the packet.
213 * For all supported cases there should always be at least 12
214 * bytes of packet data present. The IPv4 header is 20 bytes
215 * without options and the IPv6 header is always 40 bytes
218 if (!pskb_may_pull(skb
, 12))
221 payload_type
= rt
->rt_payload_type
;
222 if (payload_type
== MPT_UNSPEC
)
223 payload_type
= ip_hdr(skb
)->version
;
225 switch (payload_type
) {
227 struct iphdr
*hdr4
= ip_hdr(skb
);
228 skb
->protocol
= htons(ETH_P_IP
);
229 csum_replace2(&hdr4
->check
,
230 htons(hdr4
->ttl
<< 8),
231 htons(dec
.ttl
<< 8));
237 struct ipv6hdr
*hdr6
= ipv6_hdr(skb
);
238 skb
->protocol
= htons(ETH_P_IPV6
);
239 hdr6
->hop_limit
= dec
.ttl
;
250 static int mpls_forward(struct sk_buff
*skb
, struct net_device
*dev
,
251 struct packet_type
*pt
, struct net_device
*orig_dev
)
253 struct net
*net
= dev_net(dev
);
254 struct mpls_shim_hdr
*hdr
;
255 struct mpls_route
*rt
;
257 struct mpls_entry_decoded dec
;
258 struct net_device
*out_dev
;
259 struct mpls_dev
*mdev
;
261 unsigned int new_header_size
;
265 /* Careful this entire function runs inside of an rcu critical section */
267 mdev
= mpls_dev_get(dev
);
268 if (!mdev
|| !mdev
->input_enabled
)
271 if (skb
->pkt_type
!= PACKET_HOST
)
274 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
277 if (!pskb_may_pull(skb
, sizeof(*hdr
)))
280 /* Read and decode the label */
282 dec
= mpls_entry_decode(hdr
);
284 rt
= mpls_route_input_rcu(net
, dec
.label
);
288 nh
= mpls_select_multipath(rt
, skb
);
292 /* Find the output device */
293 out_dev
= rcu_dereference(nh
->nh_dev
);
294 if (!mpls_output_possible(out_dev
))
298 skb_pull(skb
, sizeof(*hdr
));
299 skb_reset_network_header(skb
);
303 if (skb_warn_if_lro(skb
))
306 skb_forward_csum(skb
);
308 /* Verify ttl is valid */
313 /* Verify the destination can hold the packet */
314 new_header_size
= mpls_nh_header_size(nh
);
315 mtu
= mpls_dev_mtu(out_dev
);
316 if (mpls_pkt_too_big(skb
, mtu
- new_header_size
))
319 hh_len
= LL_RESERVED_SPACE(out_dev
);
320 if (!out_dev
->header_ops
)
323 /* Ensure there is enough space for the headers in the skb */
324 if (skb_cow(skb
, hh_len
+ new_header_size
))
328 skb
->protocol
= htons(ETH_P_MPLS_UC
);
330 if (unlikely(!new_header_size
&& dec
.bos
)) {
331 /* Penultimate hop popping */
332 if (!mpls_egress(rt
, skb
, dec
))
337 skb_push(skb
, new_header_size
);
338 skb_reset_network_header(skb
);
339 /* Push the new labels */
342 for (i
= nh
->nh_labels
- 1; i
>= 0; i
--) {
343 hdr
[i
] = mpls_entry_encode(nh
->nh_label
[i
],
349 /* If via wasn't specified then send out using device address */
350 if (nh
->nh_via_table
== MPLS_NEIGH_TABLE_UNSPEC
)
351 err
= neigh_xmit(NEIGH_LINK_TABLE
, out_dev
,
352 out_dev
->dev_addr
, skb
);
354 err
= neigh_xmit(nh
->nh_via_table
, out_dev
,
355 mpls_nh_via(rt
, nh
), skb
);
357 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
366 static struct packet_type mpls_packet_type __read_mostly
= {
367 .type
= cpu_to_be16(ETH_P_MPLS_UC
),
368 .func
= mpls_forward
,
371 static const struct nla_policy rtm_mpls_policy
[RTA_MAX
+1] = {
372 [RTA_DST
] = { .type
= NLA_U32
},
373 [RTA_OIF
] = { .type
= NLA_U32
},
376 struct mpls_route_config
{
381 u8 rc_via
[MAX_VIA_ALEN
];
384 u32 rc_output_label
[MAX_NEW_LABELS
];
386 enum mpls_payload_type rc_payload_type
;
387 struct nl_info rc_nlinfo
;
388 struct rtnexthop
*rc_mp
;
392 static struct mpls_route
*mpls_rt_alloc(int num_nh
, u8 max_alen
)
394 u8 max_alen_aligned
= ALIGN(max_alen
, VIA_ALEN_ALIGN
);
395 struct mpls_route
*rt
;
397 rt
= kzalloc(ALIGN(sizeof(*rt
) + num_nh
* sizeof(*rt
->rt_nh
),
399 num_nh
* max_alen_aligned
,
403 rt
->rt_nhn_alive
= num_nh
;
404 rt
->rt_max_alen
= max_alen_aligned
;
410 static void mpls_rt_free(struct mpls_route
*rt
)
413 kfree_rcu(rt
, rt_rcu
);
416 static void mpls_notify_route(struct net
*net
, unsigned index
,
417 struct mpls_route
*old
, struct mpls_route
*new,
418 const struct nl_info
*info
)
420 struct nlmsghdr
*nlh
= info
? info
->nlh
: NULL
;
421 unsigned portid
= info
? info
->portid
: 0;
422 int event
= new ? RTM_NEWROUTE
: RTM_DELROUTE
;
423 struct mpls_route
*rt
= new ? new : old
;
424 unsigned nlm_flags
= (old
&& new) ? NLM_F_REPLACE
: 0;
425 /* Ignore reserved labels for now */
426 if (rt
&& (index
>= MPLS_LABEL_FIRST_UNRESERVED
))
427 rtmsg_lfib(event
, index
, rt
, nlh
, net
, portid
, nlm_flags
);
430 static void mpls_route_update(struct net
*net
, unsigned index
,
431 struct mpls_route
*new,
432 const struct nl_info
*info
)
434 struct mpls_route __rcu
**platform_label
;
435 struct mpls_route
*rt
;
439 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
440 rt
= rtnl_dereference(platform_label
[index
]);
441 rcu_assign_pointer(platform_label
[index
], new);
443 mpls_notify_route(net
, index
, rt
, new, info
);
445 /* If we removed a route free it now */
449 static unsigned find_free_label(struct net
*net
)
451 struct mpls_route __rcu
**platform_label
;
452 size_t platform_labels
;
455 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
456 platform_labels
= net
->mpls
.platform_labels
;
457 for (index
= MPLS_LABEL_FIRST_UNRESERVED
; index
< platform_labels
;
459 if (!rtnl_dereference(platform_label
[index
]))
462 return LABEL_NOT_SPECIFIED
;
465 #if IS_ENABLED(CONFIG_INET)
466 static struct net_device
*inet_fib_lookup_dev(struct net
*net
,
469 struct net_device
*dev
;
471 struct in_addr daddr
;
473 memcpy(&daddr
, addr
, sizeof(struct in_addr
));
474 rt
= ip_route_output(net
, daddr
.s_addr
, 0, 0, 0);
486 static struct net_device
*inet_fib_lookup_dev(struct net
*net
,
489 return ERR_PTR(-EAFNOSUPPORT
);
493 #if IS_ENABLED(CONFIG_IPV6)
494 static struct net_device
*inet6_fib_lookup_dev(struct net
*net
,
497 struct net_device
*dev
;
498 struct dst_entry
*dst
;
503 return ERR_PTR(-EAFNOSUPPORT
);
505 memset(&fl6
, 0, sizeof(fl6
));
506 memcpy(&fl6
.daddr
, addr
, sizeof(struct in6_addr
));
507 err
= ipv6_stub
->ipv6_dst_lookup(net
, NULL
, &dst
, &fl6
);
518 static struct net_device
*inet6_fib_lookup_dev(struct net
*net
,
521 return ERR_PTR(-EAFNOSUPPORT
);
525 static struct net_device
*find_outdev(struct net
*net
,
526 struct mpls_route
*rt
,
527 struct mpls_nh
*nh
, int oif
)
529 struct net_device
*dev
= NULL
;
532 switch (nh
->nh_via_table
) {
533 case NEIGH_ARP_TABLE
:
534 dev
= inet_fib_lookup_dev(net
, mpls_nh_via(rt
, nh
));
537 dev
= inet6_fib_lookup_dev(net
, mpls_nh_via(rt
, nh
));
539 case NEIGH_LINK_TABLE
:
543 dev
= dev_get_by_index(net
, oif
);
547 return ERR_PTR(-ENODEV
);
552 /* The caller is holding rtnl anyways, so release the dev reference */
558 static int mpls_nh_assign_dev(struct net
*net
, struct mpls_route
*rt
,
559 struct mpls_nh
*nh
, int oif
)
561 struct net_device
*dev
= NULL
;
564 dev
= find_outdev(net
, rt
, nh
, oif
);
571 /* Ensure this is a supported device */
573 if (!mpls_dev_get(dev
))
576 if ((nh
->nh_via_table
== NEIGH_LINK_TABLE
) &&
577 (dev
->addr_len
!= nh
->nh_via_alen
))
580 RCU_INIT_POINTER(nh
->nh_dev
, dev
);
582 if (!(dev
->flags
& IFF_UP
)) {
583 nh
->nh_flags
|= RTNH_F_DEAD
;
587 flags
= dev_get_flags(dev
);
588 if (!(flags
& (IFF_RUNNING
| IFF_LOWER_UP
)))
589 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
598 static int mpls_nh_build_from_cfg(struct mpls_route_config
*cfg
,
599 struct mpls_route
*rt
)
601 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
602 struct mpls_nh
*nh
= rt
->rt_nh
;
610 /* Ensure only a supported number of labels are present */
611 if (cfg
->rc_output_labels
> MAX_NEW_LABELS
)
614 nh
->nh_labels
= cfg
->rc_output_labels
;
615 for (i
= 0; i
< nh
->nh_labels
; i
++)
616 nh
->nh_label
[i
] = cfg
->rc_output_label
[i
];
618 nh
->nh_via_table
= cfg
->rc_via_table
;
619 memcpy(__mpls_nh_via(rt
, nh
), cfg
->rc_via
, cfg
->rc_via_alen
);
620 nh
->nh_via_alen
= cfg
->rc_via_alen
;
622 err
= mpls_nh_assign_dev(net
, rt
, nh
, cfg
->rc_ifindex
);
626 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
635 static int mpls_nh_build(struct net
*net
, struct mpls_route
*rt
,
636 struct mpls_nh
*nh
, int oif
, struct nlattr
*via
,
637 struct nlattr
*newdst
)
645 err
= nla_get_labels(newdst
, MAX_NEW_LABELS
,
646 &nh
->nh_labels
, nh
->nh_label
);
652 err
= nla_get_via(via
, &nh
->nh_via_alen
, &nh
->nh_via_table
,
653 __mpls_nh_via(rt
, nh
));
657 nh
->nh_via_table
= MPLS_NEIGH_TABLE_UNSPEC
;
660 err
= mpls_nh_assign_dev(net
, rt
, nh
, oif
);
670 static int mpls_count_nexthops(struct rtnexthop
*rtnh
, int len
,
671 u8 cfg_via_alen
, u8
*max_via_alen
)
677 *max_via_alen
= cfg_via_alen
;
683 while (rtnh_ok(rtnh
, remaining
)) {
684 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
687 attrlen
= rtnh_attrlen(rtnh
);
688 nla
= nla_find(attrs
, attrlen
, RTA_VIA
);
689 if (nla
&& nla_len(nla
) >=
690 offsetof(struct rtvia
, rtvia_addr
)) {
691 int via_alen
= nla_len(nla
) -
692 offsetof(struct rtvia
, rtvia_addr
);
694 if (via_alen
<= MAX_VIA_ALEN
)
695 *max_via_alen
= max_t(u16
, *max_via_alen
,
700 rtnh
= rtnh_next(rtnh
, &remaining
);
703 /* leftover implies invalid nexthop configuration, discard it */
704 return remaining
> 0 ? 0 : nhs
;
707 static int mpls_nh_build_multi(struct mpls_route_config
*cfg
,
708 struct mpls_route
*rt
)
710 struct rtnexthop
*rtnh
= cfg
->rc_mp
;
711 struct nlattr
*nla_via
, *nla_newdst
;
712 int remaining
= cfg
->rc_mp_len
;
716 change_nexthops(rt
) {
723 if (!rtnh_ok(rtnh
, remaining
))
726 /* neither weighted multipath nor any flags
729 if (rtnh
->rtnh_hops
|| rtnh
->rtnh_flags
)
732 attrlen
= rtnh_attrlen(rtnh
);
734 struct nlattr
*attrs
= rtnh_attrs(rtnh
);
736 nla_via
= nla_find(attrs
, attrlen
, RTA_VIA
);
737 nla_newdst
= nla_find(attrs
, attrlen
, RTA_NEWDST
);
740 err
= mpls_nh_build(cfg
->rc_nlinfo
.nl_net
, rt
, nh
,
741 rtnh
->rtnh_ifindex
, nla_via
, nla_newdst
);
745 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
748 rtnh
= rtnh_next(rtnh
, &remaining
);
750 } endfor_nexthops(rt
);
760 static bool mpls_label_ok(struct net
*net
, unsigned int *index
)
764 /* Reserved labels may not be set */
765 if (*index
< MPLS_LABEL_FIRST_UNRESERVED
)
768 /* The full 20 bit range may not be supported. */
769 if (is_ok
&& *index
>= net
->mpls
.platform_labels
)
772 *index
= array_index_nospec(*index
, net
->mpls
.platform_labels
);
776 static int mpls_route_add(struct mpls_route_config
*cfg
)
778 struct mpls_route __rcu
**platform_label
;
779 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
780 struct mpls_route
*rt
, *old
;
786 index
= cfg
->rc_label
;
788 /* If a label was not specified during insert pick one */
789 if ((index
== LABEL_NOT_SPECIFIED
) &&
790 (cfg
->rc_nlflags
& NLM_F_CREATE
)) {
791 index
= find_free_label(net
);
794 if (!mpls_label_ok(net
, &index
))
797 /* Append makes no sense with mpls */
799 if (cfg
->rc_nlflags
& NLM_F_APPEND
)
803 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
804 old
= rtnl_dereference(platform_label
[index
]);
805 if ((cfg
->rc_nlflags
& NLM_F_EXCL
) && old
)
809 if (!(cfg
->rc_nlflags
& NLM_F_REPLACE
) && old
)
813 if (!(cfg
->rc_nlflags
& NLM_F_CREATE
) && !old
)
817 nhs
= mpls_count_nexthops(cfg
->rc_mp
, cfg
->rc_mp_len
,
818 cfg
->rc_via_alen
, &max_via_alen
);
823 rt
= mpls_rt_alloc(nhs
, max_via_alen
);
827 rt
->rt_protocol
= cfg
->rc_protocol
;
828 rt
->rt_payload_type
= cfg
->rc_payload_type
;
831 err
= mpls_nh_build_multi(cfg
, rt
);
833 err
= mpls_nh_build_from_cfg(cfg
, rt
);
837 mpls_route_update(net
, index
, rt
, &cfg
->rc_nlinfo
);
847 static int mpls_route_del(struct mpls_route_config
*cfg
)
849 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
853 index
= cfg
->rc_label
;
855 if (!mpls_label_ok(net
, &index
))
858 mpls_route_update(net
, index
, NULL
, &cfg
->rc_nlinfo
);
865 #define MPLS_PERDEV_SYSCTL_OFFSET(field) \
866 (&((struct mpls_dev *)0)->field)
868 static const struct ctl_table mpls_dev_table
[] = {
871 .maxlen
= sizeof(int),
873 .proc_handler
= proc_dointvec
,
874 .data
= MPLS_PERDEV_SYSCTL_OFFSET(input_enabled
),
879 static int mpls_dev_sysctl_register(struct net_device
*dev
,
880 struct mpls_dev
*mdev
)
882 char path
[sizeof("net/mpls/conf/") + IFNAMSIZ
];
883 struct ctl_table
*table
;
886 table
= kmemdup(&mpls_dev_table
, sizeof(mpls_dev_table
), GFP_KERNEL
);
890 /* Table data contains only offsets relative to the base of
891 * the mdev at this point, so make them absolute.
893 for (i
= 0; i
< ARRAY_SIZE(mpls_dev_table
); i
++)
894 table
[i
].data
= (char *)mdev
+ (uintptr_t)table
[i
].data
;
896 snprintf(path
, sizeof(path
), "net/mpls/conf/%s", dev
->name
);
898 mdev
->sysctl
= register_net_sysctl(dev_net(dev
), path
, table
);
910 static void mpls_dev_sysctl_unregister(struct mpls_dev
*mdev
)
912 struct ctl_table
*table
;
914 table
= mdev
->sysctl
->ctl_table_arg
;
915 unregister_net_sysctl_table(mdev
->sysctl
);
919 static struct mpls_dev
*mpls_add_dev(struct net_device
*dev
)
921 struct mpls_dev
*mdev
;
926 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
930 err
= mpls_dev_sysctl_register(dev
, mdev
);
934 rcu_assign_pointer(dev
->mpls_ptr
, mdev
);
943 static void mpls_ifdown(struct net_device
*dev
, int event
)
945 struct mpls_route __rcu
**platform_label
;
946 struct net
*net
= dev_net(dev
);
947 unsigned int nh_flags
= RTNH_F_DEAD
| RTNH_F_LINKDOWN
;
951 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
952 for (index
= 0; index
< net
->mpls
.platform_labels
; index
++) {
953 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
959 change_nexthops(rt
) {
960 if (rtnl_dereference(nh
->nh_dev
) != dev
)
965 case NETDEV_UNREGISTER
:
966 nh
->nh_flags
|= RTNH_F_DEAD
;
969 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
972 if (event
== NETDEV_UNREGISTER
)
973 RCU_INIT_POINTER(nh
->nh_dev
, NULL
);
975 if (!(nh
->nh_flags
& nh_flags
))
977 } endfor_nexthops(rt
);
979 WRITE_ONCE(rt
->rt_nhn_alive
, alive
);
983 static void mpls_ifup(struct net_device
*dev
, unsigned int nh_flags
)
985 struct mpls_route __rcu
**platform_label
;
986 struct net
*net
= dev_net(dev
);
990 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
991 for (index
= 0; index
< net
->mpls
.platform_labels
; index
++) {
992 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
998 change_nexthops(rt
) {
999 struct net_device
*nh_dev
=
1000 rtnl_dereference(nh
->nh_dev
);
1002 if (!(nh
->nh_flags
& nh_flags
)) {
1009 nh
->nh_flags
&= ~nh_flags
;
1010 } endfor_nexthops(rt
);
1012 ACCESS_ONCE(rt
->rt_nhn_alive
) = alive
;
1016 static int mpls_dev_notify(struct notifier_block
*this, unsigned long event
,
1019 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1020 struct mpls_dev
*mdev
;
1023 if (event
== NETDEV_REGISTER
) {
1024 /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
1025 if (dev
->type
== ARPHRD_ETHER
||
1026 dev
->type
== ARPHRD_LOOPBACK
||
1027 dev
->type
== ARPHRD_IPGRE
||
1028 dev
->type
== ARPHRD_SIT
||
1029 dev
->type
== ARPHRD_TUNNEL
) {
1030 mdev
= mpls_add_dev(dev
);
1032 return notifier_from_errno(PTR_ERR(mdev
));
1037 mdev
= mpls_dev_get(dev
);
1043 mpls_ifdown(dev
, event
);
1046 flags
= dev_get_flags(dev
);
1047 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1048 mpls_ifup(dev
, RTNH_F_DEAD
| RTNH_F_LINKDOWN
);
1050 mpls_ifup(dev
, RTNH_F_DEAD
);
1053 flags
= dev_get_flags(dev
);
1054 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1055 mpls_ifup(dev
, RTNH_F_DEAD
| RTNH_F_LINKDOWN
);
1057 mpls_ifdown(dev
, event
);
1059 case NETDEV_UNREGISTER
:
1060 mpls_ifdown(dev
, event
);
1061 mdev
= mpls_dev_get(dev
);
1063 mpls_dev_sysctl_unregister(mdev
);
1064 RCU_INIT_POINTER(dev
->mpls_ptr
, NULL
);
1065 kfree_rcu(mdev
, rcu
);
1068 case NETDEV_CHANGENAME
:
1069 mdev
= mpls_dev_get(dev
);
1073 mpls_dev_sysctl_unregister(mdev
);
1074 err
= mpls_dev_sysctl_register(dev
, mdev
);
1076 return notifier_from_errno(err
);
1083 static struct notifier_block mpls_dev_notifier
= {
1084 .notifier_call
= mpls_dev_notify
,
1087 static int nla_put_via(struct sk_buff
*skb
,
1088 u8 table
, const void *addr
, int alen
)
1090 static const int table_to_family
[NEIGH_NR_TABLES
+ 1] = {
1091 AF_INET
, AF_INET6
, AF_DECnet
, AF_PACKET
,
1095 int family
= AF_UNSPEC
;
1097 nla
= nla_reserve(skb
, RTA_VIA
, alen
+ 2);
1101 if (table
<= NEIGH_NR_TABLES
)
1102 family
= table_to_family
[table
];
1104 via
= nla_data(nla
);
1105 via
->rtvia_family
= family
;
1106 memcpy(via
->rtvia_addr
, addr
, alen
);
1110 int nla_put_labels(struct sk_buff
*skb
, int attrtype
,
1111 u8 labels
, const u32 label
[])
1114 struct mpls_shim_hdr
*nla_label
;
1117 nla
= nla_reserve(skb
, attrtype
, labels
*4);
1121 nla_label
= nla_data(nla
);
1123 for (i
= labels
- 1; i
>= 0; i
--) {
1124 nla_label
[i
] = mpls_entry_encode(label
[i
], 0, 0, bos
);
1130 EXPORT_SYMBOL_GPL(nla_put_labels
);
1132 int nla_get_labels(const struct nlattr
*nla
,
1133 u32 max_labels
, u8
*labels
, u32 label
[])
1135 unsigned len
= nla_len(nla
);
1136 unsigned nla_labels
;
1137 struct mpls_shim_hdr
*nla_label
;
1141 /* len needs to be an even multiple of 4 (the label size) */
1145 /* Limit the number of new labels allowed */
1147 if (nla_labels
> max_labels
)
1150 nla_label
= nla_data(nla
);
1152 for (i
= nla_labels
- 1; i
>= 0; i
--, bos
= false) {
1153 struct mpls_entry_decoded dec
;
1154 dec
= mpls_entry_decode(nla_label
+ i
);
1156 /* Ensure the bottom of stack flag is properly set
1157 * and ttl and tc are both clear.
1159 if ((dec
.bos
!= bos
) || dec
.ttl
|| dec
.tc
)
1162 switch (dec
.label
) {
1163 case MPLS_LABEL_IMPLNULL
:
1164 /* RFC3032: This is a label that an LSR may
1165 * assign and distribute, but which never
1166 * actually appears in the encapsulation.
1171 label
[i
] = dec
.label
;
1173 *labels
= nla_labels
;
1176 EXPORT_SYMBOL_GPL(nla_get_labels
);
1178 int nla_get_via(const struct nlattr
*nla
, u8
*via_alen
,
1179 u8
*via_table
, u8 via_addr
[])
1181 struct rtvia
*via
= nla_data(nla
);
1185 if (nla_len(nla
) < offsetof(struct rtvia
, rtvia_addr
))
1187 alen
= nla_len(nla
) -
1188 offsetof(struct rtvia
, rtvia_addr
);
1189 if (alen
> MAX_VIA_ALEN
)
1192 /* Validate the address family */
1193 switch (via
->rtvia_family
) {
1195 *via_table
= NEIGH_LINK_TABLE
;
1198 *via_table
= NEIGH_ARP_TABLE
;
1203 *via_table
= NEIGH_ND_TABLE
;
1208 /* Unsupported address family */
1212 memcpy(via_addr
, via
->rtvia_addr
, alen
);
1220 static int rtm_to_route_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1221 struct mpls_route_config
*cfg
)
1224 struct nlattr
*tb
[RTA_MAX
+1];
1228 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_mpls_policy
);
1233 rtm
= nlmsg_data(nlh
);
1234 memset(cfg
, 0, sizeof(*cfg
));
1236 if (rtm
->rtm_family
!= AF_MPLS
)
1238 if (rtm
->rtm_dst_len
!= 20)
1240 if (rtm
->rtm_src_len
!= 0)
1242 if (rtm
->rtm_tos
!= 0)
1244 if (rtm
->rtm_table
!= RT_TABLE_MAIN
)
1246 /* Any value is acceptable for rtm_protocol */
1248 /* As mpls uses destination specific addresses
1249 * (or source specific address in the case of multicast)
1250 * all addresses have universal scope.
1252 if (rtm
->rtm_scope
!= RT_SCOPE_UNIVERSE
)
1254 if (rtm
->rtm_type
!= RTN_UNICAST
)
1256 if (rtm
->rtm_flags
!= 0)
1259 cfg
->rc_label
= LABEL_NOT_SPECIFIED
;
1260 cfg
->rc_protocol
= rtm
->rtm_protocol
;
1261 cfg
->rc_via_table
= MPLS_NEIGH_TABLE_UNSPEC
;
1262 cfg
->rc_nlflags
= nlh
->nlmsg_flags
;
1263 cfg
->rc_nlinfo
.portid
= NETLINK_CB(skb
).portid
;
1264 cfg
->rc_nlinfo
.nlh
= nlh
;
1265 cfg
->rc_nlinfo
.nl_net
= sock_net(skb
->sk
);
1267 for (index
= 0; index
<= RTA_MAX
; index
++) {
1268 struct nlattr
*nla
= tb
[index
];
1274 cfg
->rc_ifindex
= nla_get_u32(nla
);
1277 if (nla_get_labels(nla
, MAX_NEW_LABELS
,
1278 &cfg
->rc_output_labels
,
1279 cfg
->rc_output_label
))
1285 if (nla_get_labels(nla
, 1, &label_count
,
1289 if (!mpls_label_ok(cfg
->rc_nlinfo
.nl_net
,
1296 if (nla_get_via(nla
, &cfg
->rc_via_alen
,
1297 &cfg
->rc_via_table
, cfg
->rc_via
))
1303 cfg
->rc_mp
= nla_data(nla
);
1304 cfg
->rc_mp_len
= nla_len(nla
);
1308 /* Unsupported attribute */
1318 static int mpls_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1320 struct mpls_route_config cfg
;
1323 err
= rtm_to_route_config(skb
, nlh
, &cfg
);
1327 return mpls_route_del(&cfg
);
1331 static int mpls_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1333 struct mpls_route_config cfg
;
1336 err
= rtm_to_route_config(skb
, nlh
, &cfg
);
1340 return mpls_route_add(&cfg
);
1343 static int mpls_dump_route(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1344 u32 label
, struct mpls_route
*rt
, int flags
)
1346 struct net_device
*dev
;
1347 struct nlmsghdr
*nlh
;
1350 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1354 rtm
= nlmsg_data(nlh
);
1355 rtm
->rtm_family
= AF_MPLS
;
1356 rtm
->rtm_dst_len
= 20;
1357 rtm
->rtm_src_len
= 0;
1359 rtm
->rtm_table
= RT_TABLE_MAIN
;
1360 rtm
->rtm_protocol
= rt
->rt_protocol
;
1361 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
1362 rtm
->rtm_type
= RTN_UNICAST
;
1365 if (nla_put_labels(skb
, RTA_DST
, 1, &label
))
1366 goto nla_put_failure
;
1367 if (rt
->rt_nhn
== 1) {
1368 const struct mpls_nh
*nh
= rt
->rt_nh
;
1370 if (nh
->nh_labels
&&
1371 nla_put_labels(skb
, RTA_NEWDST
, nh
->nh_labels
,
1373 goto nla_put_failure
;
1374 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
&&
1375 nla_put_via(skb
, nh
->nh_via_table
, mpls_nh_via(rt
, nh
),
1377 goto nla_put_failure
;
1378 dev
= rtnl_dereference(nh
->nh_dev
);
1379 if (dev
&& nla_put_u32(skb
, RTA_OIF
, dev
->ifindex
))
1380 goto nla_put_failure
;
1381 if (nh
->nh_flags
& RTNH_F_LINKDOWN
)
1382 rtm
->rtm_flags
|= RTNH_F_LINKDOWN
;
1383 if (nh
->nh_flags
& RTNH_F_DEAD
)
1384 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1386 struct rtnexthop
*rtnh
;
1391 mp
= nla_nest_start(skb
, RTA_MULTIPATH
);
1393 goto nla_put_failure
;
1396 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1398 goto nla_put_failure
;
1400 dev
= rtnl_dereference(nh
->nh_dev
);
1402 rtnh
->rtnh_ifindex
= dev
->ifindex
;
1403 if (nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1404 rtnh
->rtnh_flags
|= RTNH_F_LINKDOWN
;
1407 if (nh
->nh_flags
& RTNH_F_DEAD
) {
1408 rtnh
->rtnh_flags
|= RTNH_F_DEAD
;
1412 if (nh
->nh_labels
&& nla_put_labels(skb
, RTA_NEWDST
,
1415 goto nla_put_failure
;
1416 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
&&
1417 nla_put_via(skb
, nh
->nh_via_table
,
1418 mpls_nh_via(rt
, nh
),
1420 goto nla_put_failure
;
1422 /* length of rtnetlink header + attributes */
1423 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *)rtnh
;
1424 } endfor_nexthops(rt
);
1426 if (linkdown
== rt
->rt_nhn
)
1427 rtm
->rtm_flags
|= RTNH_F_LINKDOWN
;
1428 if (dead
== rt
->rt_nhn
)
1429 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1431 nla_nest_end(skb
, mp
);
1434 nlmsg_end(skb
, nlh
);
1438 nlmsg_cancel(skb
, nlh
);
1442 static int mpls_dump_routes(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1444 struct net
*net
= sock_net(skb
->sk
);
1445 struct mpls_route __rcu
**platform_label
;
1446 size_t platform_labels
;
1451 index
= cb
->args
[0];
1452 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
1453 index
= MPLS_LABEL_FIRST_UNRESERVED
;
1455 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1456 platform_labels
= net
->mpls
.platform_labels
;
1457 for (; index
< platform_labels
; index
++) {
1458 struct mpls_route
*rt
;
1459 rt
= rtnl_dereference(platform_label
[index
]);
1463 if (mpls_dump_route(skb
, NETLINK_CB(cb
->skb
).portid
,
1464 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
1465 index
, rt
, NLM_F_MULTI
) < 0)
1468 cb
->args
[0] = index
;
1473 static inline size_t lfib_nlmsg_size(struct mpls_route
*rt
)
1476 NLMSG_ALIGN(sizeof(struct rtmsg
))
1477 + nla_total_size(4); /* RTA_DST */
1479 if (rt
->rt_nhn
== 1) {
1480 struct mpls_nh
*nh
= rt
->rt_nh
;
1483 payload
+= nla_total_size(4); /* RTA_OIF */
1484 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
) /* RTA_VIA */
1485 payload
+= nla_total_size(2 + nh
->nh_via_alen
);
1486 if (nh
->nh_labels
) /* RTA_NEWDST */
1487 payload
+= nla_total_size(nh
->nh_labels
* 4);
1489 /* each nexthop is packed in an attribute */
1493 nhsize
+= nla_total_size(sizeof(struct rtnexthop
));
1495 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
)
1496 nhsize
+= nla_total_size(2 + nh
->nh_via_alen
);
1498 nhsize
+= nla_total_size(nh
->nh_labels
* 4);
1499 } endfor_nexthops(rt
);
1500 /* nested attribute */
1501 payload
+= nla_total_size(nhsize
);
1507 static void rtmsg_lfib(int event
, u32 label
, struct mpls_route
*rt
,
1508 struct nlmsghdr
*nlh
, struct net
*net
, u32 portid
,
1509 unsigned int nlm_flags
)
1511 struct sk_buff
*skb
;
1512 u32 seq
= nlh
? nlh
->nlmsg_seq
: 0;
1515 skb
= nlmsg_new(lfib_nlmsg_size(rt
), GFP_KERNEL
);
1519 err
= mpls_dump_route(skb
, portid
, seq
, event
, label
, rt
, nlm_flags
);
1521 /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
1522 WARN_ON(err
== -EMSGSIZE
);
1526 rtnl_notify(skb
, net
, portid
, RTNLGRP_MPLS_ROUTE
, nlh
, GFP_KERNEL
);
1531 rtnl_set_sk_err(net
, RTNLGRP_MPLS_ROUTE
, err
);
1534 static int resize_platform_label_table(struct net
*net
, size_t limit
)
1536 size_t size
= sizeof(struct mpls_route
*) * limit
;
1539 struct mpls_route __rcu
**labels
= NULL
, **old
;
1540 struct mpls_route
*rt0
= NULL
, *rt2
= NULL
;
1544 labels
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_NORETRY
);
1546 labels
= vzalloc(size
);
1552 /* In case the predefined labels need to be populated */
1553 if (limit
> MPLS_LABEL_IPV4NULL
) {
1554 struct net_device
*lo
= net
->loopback_dev
;
1555 rt0
= mpls_rt_alloc(1, lo
->addr_len
);
1558 RCU_INIT_POINTER(rt0
->rt_nh
->nh_dev
, lo
);
1559 rt0
->rt_protocol
= RTPROT_KERNEL
;
1560 rt0
->rt_payload_type
= MPT_IPV4
;
1561 rt0
->rt_nh
->nh_via_table
= NEIGH_LINK_TABLE
;
1562 rt0
->rt_nh
->nh_via_alen
= lo
->addr_len
;
1563 memcpy(__mpls_nh_via(rt0
, rt0
->rt_nh
), lo
->dev_addr
,
1566 if (limit
> MPLS_LABEL_IPV6NULL
) {
1567 struct net_device
*lo
= net
->loopback_dev
;
1568 rt2
= mpls_rt_alloc(1, lo
->addr_len
);
1571 RCU_INIT_POINTER(rt2
->rt_nh
->nh_dev
, lo
);
1572 rt2
->rt_protocol
= RTPROT_KERNEL
;
1573 rt2
->rt_payload_type
= MPT_IPV6
;
1574 rt2
->rt_nh
->nh_via_table
= NEIGH_LINK_TABLE
;
1575 rt2
->rt_nh
->nh_via_alen
= lo
->addr_len
;
1576 memcpy(__mpls_nh_via(rt2
, rt2
->rt_nh
), lo
->dev_addr
,
1581 /* Remember the original table */
1582 old
= rtnl_dereference(net
->mpls
.platform_label
);
1583 old_limit
= net
->mpls
.platform_labels
;
1585 /* Free any labels beyond the new table */
1586 for (index
= limit
; index
< old_limit
; index
++)
1587 mpls_route_update(net
, index
, NULL
, NULL
);
1589 /* Copy over the old labels */
1591 if (old_limit
< limit
)
1592 cp_size
= old_limit
* sizeof(struct mpls_route
*);
1594 memcpy(labels
, old
, cp_size
);
1596 /* If needed set the predefined labels */
1597 if ((old_limit
<= MPLS_LABEL_IPV6NULL
) &&
1598 (limit
> MPLS_LABEL_IPV6NULL
)) {
1599 RCU_INIT_POINTER(labels
[MPLS_LABEL_IPV6NULL
], rt2
);
1603 if ((old_limit
<= MPLS_LABEL_IPV4NULL
) &&
1604 (limit
> MPLS_LABEL_IPV4NULL
)) {
1605 RCU_INIT_POINTER(labels
[MPLS_LABEL_IPV4NULL
], rt0
);
1609 /* Update the global pointers */
1610 net
->mpls
.platform_labels
= limit
;
1611 rcu_assign_pointer(net
->mpls
.platform_label
, labels
);
1632 static int mpls_platform_labels(struct ctl_table
*table
, int write
,
1633 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1635 struct net
*net
= table
->data
;
1636 int platform_labels
= net
->mpls
.platform_labels
;
1638 struct ctl_table tmp
= {
1639 .procname
= table
->procname
,
1640 .data
= &platform_labels
,
1641 .maxlen
= sizeof(int),
1642 .mode
= table
->mode
,
1644 .extra2
= &label_limit
,
1647 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
1649 if (write
&& ret
== 0)
1650 ret
= resize_platform_label_table(net
, platform_labels
);
1655 static const struct ctl_table mpls_table
[] = {
1657 .procname
= "platform_labels",
1659 .maxlen
= sizeof(int),
1661 .proc_handler
= mpls_platform_labels
,
1666 static int mpls_net_init(struct net
*net
)
1668 struct ctl_table
*table
;
1670 net
->mpls
.platform_labels
= 0;
1671 net
->mpls
.platform_label
= NULL
;
1673 table
= kmemdup(mpls_table
, sizeof(mpls_table
), GFP_KERNEL
);
1677 table
[0].data
= net
;
1678 net
->mpls
.ctl
= register_net_sysctl(net
, "net/mpls", table
);
1679 if (net
->mpls
.ctl
== NULL
) {
1687 static void mpls_net_exit(struct net
*net
)
1689 struct mpls_route __rcu
**platform_label
;
1690 size_t platform_labels
;
1691 struct ctl_table
*table
;
1694 table
= net
->mpls
.ctl
->ctl_table_arg
;
1695 unregister_net_sysctl_table(net
->mpls
.ctl
);
1698 /* An rcu grace period has passed since there was a device in
1699 * the network namespace (and thus the last in flight packet)
1700 * left this network namespace. This is because
1701 * unregister_netdevice_many and netdev_run_todo has completed
1702 * for each network device that was in this network namespace.
1704 * As such no additional rcu synchronization is necessary when
1705 * freeing the platform_label table.
1708 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1709 platform_labels
= net
->mpls
.platform_labels
;
1710 for (index
= 0; index
< platform_labels
; index
++) {
1711 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
1712 RCU_INIT_POINTER(platform_label
[index
], NULL
);
1713 mpls_notify_route(net
, index
, rt
, NULL
, NULL
);
1718 kvfree(platform_label
);
1721 static struct pernet_operations mpls_net_ops
= {
1722 .init
= mpls_net_init
,
1723 .exit
= mpls_net_exit
,
1726 static int __init
mpls_init(void)
1730 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr
) != 4);
1732 err
= register_pernet_subsys(&mpls_net_ops
);
1736 err
= register_netdevice_notifier(&mpls_dev_notifier
);
1738 goto out_unregister_pernet
;
1740 dev_add_pack(&mpls_packet_type
);
1742 rtnl_register(PF_MPLS
, RTM_NEWROUTE
, mpls_rtm_newroute
, NULL
, NULL
);
1743 rtnl_register(PF_MPLS
, RTM_DELROUTE
, mpls_rtm_delroute
, NULL
, NULL
);
1744 rtnl_register(PF_MPLS
, RTM_GETROUTE
, NULL
, mpls_dump_routes
, NULL
);
1749 out_unregister_pernet
:
1750 unregister_pernet_subsys(&mpls_net_ops
);
1753 module_init(mpls_init
);
1755 static void __exit
mpls_exit(void)
1757 rtnl_unregister_all(PF_MPLS
);
1758 dev_remove_pack(&mpls_packet_type
);
1759 unregister_netdevice_notifier(&mpls_dev_notifier
);
1760 unregister_pernet_subsys(&mpls_net_ops
);
1762 module_exit(mpls_exit
);
1764 MODULE_DESCRIPTION("MultiProtocol Label Switching");
1765 MODULE_LICENSE("GPL v2");
1766 MODULE_ALIAS_NETPROTO(PF_MPLS
);