1 #include <linux/types.h>
2 #include <linux/skbuff.h>
3 #include <linux/socket.h>
4 #include <linux/sysctl.h>
6 #include <linux/module.h>
7 #include <linux/if_arp.h>
8 #include <linux/ipv6.h>
9 #include <linux/mpls.h>
10 #include <linux/vmalloc.h>
15 #include <net/ip_fib.h>
16 #include <net/netevent.h>
17 #include <net/netns/generic.h>
18 #if IS_ENABLED(CONFIG_IPV6)
20 #include <net/addrconf.h>
22 #include <net/nexthop.h>
25 /* Maximum number of labels to look ahead at when selecting a path of
28 #define MAX_MP_SELECT_LABELS 4
30 #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
33 static int label_limit
= (1 << 20) - 1;
35 static void rtmsg_lfib(int event
, u32 label
, struct mpls_route
*rt
,
36 struct nlmsghdr
*nlh
, struct net
*net
, u32 portid
,
37 unsigned int nlm_flags
);
39 static struct mpls_route
*mpls_route_input_rcu(struct net
*net
, unsigned index
)
41 struct mpls_route
*rt
= NULL
;
43 if (index
< net
->mpls
.platform_labels
) {
44 struct mpls_route __rcu
**platform_label
=
45 rcu_dereference(net
->mpls
.platform_label
);
46 rt
= rcu_dereference(platform_label
[index
]);
51 static inline struct mpls_dev
*mpls_dev_get(const struct net_device
*dev
)
53 return rcu_dereference_rtnl(dev
->mpls_ptr
);
56 bool mpls_output_possible(const struct net_device
*dev
)
58 return dev
&& (dev
->flags
& IFF_UP
) && netif_carrier_ok(dev
);
60 EXPORT_SYMBOL_GPL(mpls_output_possible
);
62 static u8
*__mpls_nh_via(struct mpls_route
*rt
, struct mpls_nh
*nh
)
64 u8
*nh0_via
= PTR_ALIGN((u8
*)&rt
->rt_nh
[rt
->rt_nhn
], VIA_ALEN_ALIGN
);
65 int nh_index
= nh
- rt
->rt_nh
;
67 return nh0_via
+ rt
->rt_max_alen
* nh_index
;
70 static const u8
*mpls_nh_via(const struct mpls_route
*rt
,
71 const struct mpls_nh
*nh
)
73 return __mpls_nh_via((struct mpls_route
*)rt
, (struct mpls_nh
*)nh
);
76 static unsigned int mpls_nh_header_size(const struct mpls_nh
*nh
)
78 /* The size of the layer 2.5 labels to be added for this route */
79 return nh
->nh_labels
* sizeof(struct mpls_shim_hdr
);
82 unsigned int mpls_dev_mtu(const struct net_device
*dev
)
84 /* The amount of data the layer 2 frame can hold */
87 EXPORT_SYMBOL_GPL(mpls_dev_mtu
);
89 bool mpls_pkt_too_big(const struct sk_buff
*skb
, unsigned int mtu
)
94 if (skb_is_gso(skb
) && skb_gso_validate_mtu(skb
, mtu
))
99 EXPORT_SYMBOL_GPL(mpls_pkt_too_big
);
101 static u32
mpls_multipath_hash(struct mpls_route
*rt
,
102 struct sk_buff
*skb
, bool bos
)
104 struct mpls_entry_decoded dec
;
105 struct mpls_shim_hdr
*hdr
;
106 bool eli_seen
= false;
110 for (label_index
= 0; label_index
< MAX_MP_SELECT_LABELS
&& !bos
;
112 if (!pskb_may_pull(skb
, sizeof(*hdr
) * label_index
))
115 /* Read and decode the current label */
116 hdr
= mpls_hdr(skb
) + label_index
;
117 dec
= mpls_entry_decode(hdr
);
119 /* RFC6790 - reserved labels MUST NOT be used as keys
120 * for the load-balancing function
122 if (likely(dec
.label
>= MPLS_LABEL_FIRST_UNRESERVED
)) {
123 hash
= jhash_1word(dec
.label
, hash
);
125 /* The entropy label follows the entropy label
126 * indicator, so this means that the entropy
127 * label was just added to the hash - no need to
128 * go any deeper either in the label stack or in the
133 } else if (dec
.label
== MPLS_LABEL_ENTROPY
) {
138 if (bos
&& pskb_may_pull(skb
, sizeof(*hdr
) * label_index
+
139 sizeof(struct iphdr
))) {
140 const struct iphdr
*v4hdr
;
142 v4hdr
= (const struct iphdr
*)(mpls_hdr(skb
) +
144 if (v4hdr
->version
== 4) {
145 hash
= jhash_3words(ntohl(v4hdr
->saddr
),
147 v4hdr
->protocol
, hash
);
148 } else if (v4hdr
->version
== 6 &&
149 pskb_may_pull(skb
, sizeof(*hdr
) * label_index
+
150 sizeof(struct ipv6hdr
))) {
151 const struct ipv6hdr
*v6hdr
;
153 v6hdr
= (const struct ipv6hdr
*)(mpls_hdr(skb
) +
156 hash
= __ipv6_addr_jhash(&v6hdr
->saddr
, hash
);
157 hash
= __ipv6_addr_jhash(&v6hdr
->daddr
, hash
);
158 hash
= jhash_1word(v6hdr
->nexthdr
, hash
);
166 static struct mpls_nh
*mpls_select_multipath(struct mpls_route
*rt
,
167 struct sk_buff
*skb
, bool bos
)
169 int alive
= ACCESS_ONCE(rt
->rt_nhn_alive
);
174 /* No need to look further into packet if there's only
183 hash
= mpls_multipath_hash(rt
, skb
, bos
);
184 nh_index
= hash
% alive
;
185 if (alive
== rt
->rt_nhn
)
188 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
193 } endfor_nexthops(rt
);
196 return &rt
->rt_nh
[nh_index
];
199 static bool mpls_egress(struct mpls_route
*rt
, struct sk_buff
*skb
,
200 struct mpls_entry_decoded dec
)
202 enum mpls_payload_type payload_type
;
203 bool success
= false;
205 /* The IPv4 code below accesses through the IPv4 header
206 * checksum, which is 12 bytes into the packet.
207 * The IPv6 code below accesses through the IPv6 hop limit
208 * which is 8 bytes into the packet.
210 * For all supported cases there should always be at least 12
211 * bytes of packet data present. The IPv4 header is 20 bytes
212 * without options and the IPv6 header is always 40 bytes
215 if (!pskb_may_pull(skb
, 12))
218 payload_type
= rt
->rt_payload_type
;
219 if (payload_type
== MPT_UNSPEC
)
220 payload_type
= ip_hdr(skb
)->version
;
222 switch (payload_type
) {
224 struct iphdr
*hdr4
= ip_hdr(skb
);
225 skb
->protocol
= htons(ETH_P_IP
);
226 csum_replace2(&hdr4
->check
,
227 htons(hdr4
->ttl
<< 8),
228 htons(dec
.ttl
<< 8));
234 struct ipv6hdr
*hdr6
= ipv6_hdr(skb
);
235 skb
->protocol
= htons(ETH_P_IPV6
);
236 hdr6
->hop_limit
= dec
.ttl
;
247 static int mpls_forward(struct sk_buff
*skb
, struct net_device
*dev
,
248 struct packet_type
*pt
, struct net_device
*orig_dev
)
250 struct net
*net
= dev_net(dev
);
251 struct mpls_shim_hdr
*hdr
;
252 struct mpls_route
*rt
;
254 struct mpls_entry_decoded dec
;
255 struct net_device
*out_dev
;
256 struct mpls_dev
*mdev
;
258 unsigned int new_header_size
;
262 /* Careful this entire function runs inside of an rcu critical section */
264 mdev
= mpls_dev_get(dev
);
265 if (!mdev
|| !mdev
->input_enabled
)
268 if (skb
->pkt_type
!= PACKET_HOST
)
271 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
274 if (!pskb_may_pull(skb
, sizeof(*hdr
)))
277 /* Read and decode the label */
279 dec
= mpls_entry_decode(hdr
);
282 skb_pull(skb
, sizeof(*hdr
));
283 skb_reset_network_header(skb
);
287 rt
= mpls_route_input_rcu(net
, dec
.label
);
291 nh
= mpls_select_multipath(rt
, skb
, dec
.bos
);
295 /* Find the output device */
296 out_dev
= rcu_dereference(nh
->nh_dev
);
297 if (!mpls_output_possible(out_dev
))
300 if (skb_warn_if_lro(skb
))
303 skb_forward_csum(skb
);
305 /* Verify ttl is valid */
310 /* Verify the destination can hold the packet */
311 new_header_size
= mpls_nh_header_size(nh
);
312 mtu
= mpls_dev_mtu(out_dev
);
313 if (mpls_pkt_too_big(skb
, mtu
- new_header_size
))
316 hh_len
= LL_RESERVED_SPACE(out_dev
);
317 if (!out_dev
->header_ops
)
320 /* Ensure there is enough space for the headers in the skb */
321 if (skb_cow(skb
, hh_len
+ new_header_size
))
325 skb
->protocol
= htons(ETH_P_MPLS_UC
);
327 if (unlikely(!new_header_size
&& dec
.bos
)) {
328 /* Penultimate hop popping */
329 if (!mpls_egress(rt
, skb
, dec
))
334 skb_push(skb
, new_header_size
);
335 skb_reset_network_header(skb
);
336 /* Push the new labels */
339 for (i
= nh
->nh_labels
- 1; i
>= 0; i
--) {
340 hdr
[i
] = mpls_entry_encode(nh
->nh_label
[i
],
346 /* If via wasn't specified then send out using device address */
347 if (nh
->nh_via_table
== MPLS_NEIGH_TABLE_UNSPEC
)
348 err
= neigh_xmit(NEIGH_LINK_TABLE
, out_dev
,
349 out_dev
->dev_addr
, skb
);
351 err
= neigh_xmit(nh
->nh_via_table
, out_dev
,
352 mpls_nh_via(rt
, nh
), skb
);
354 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
363 static struct packet_type mpls_packet_type __read_mostly
= {
364 .type
= cpu_to_be16(ETH_P_MPLS_UC
),
365 .func
= mpls_forward
,
368 static const struct nla_policy rtm_mpls_policy
[RTA_MAX
+1] = {
369 [RTA_DST
] = { .type
= NLA_U32
},
370 [RTA_OIF
] = { .type
= NLA_U32
},
373 struct mpls_route_config
{
378 u8 rc_via
[MAX_VIA_ALEN
];
381 u32 rc_output_label
[MAX_NEW_LABELS
];
383 enum mpls_payload_type rc_payload_type
;
384 struct nl_info rc_nlinfo
;
385 struct rtnexthop
*rc_mp
;
389 static struct mpls_route
*mpls_rt_alloc(int num_nh
, u8 max_alen
)
391 u8 max_alen_aligned
= ALIGN(max_alen
, VIA_ALEN_ALIGN
);
392 struct mpls_route
*rt
;
394 rt
= kzalloc(ALIGN(sizeof(*rt
) + num_nh
* sizeof(*rt
->rt_nh
),
396 num_nh
* max_alen_aligned
,
400 rt
->rt_nhn_alive
= num_nh
;
401 rt
->rt_max_alen
= max_alen_aligned
;
407 static void mpls_rt_free(struct mpls_route
*rt
)
410 kfree_rcu(rt
, rt_rcu
);
413 static void mpls_notify_route(struct net
*net
, unsigned index
,
414 struct mpls_route
*old
, struct mpls_route
*new,
415 const struct nl_info
*info
)
417 struct nlmsghdr
*nlh
= info
? info
->nlh
: NULL
;
418 unsigned portid
= info
? info
->portid
: 0;
419 int event
= new ? RTM_NEWROUTE
: RTM_DELROUTE
;
420 struct mpls_route
*rt
= new ? new : old
;
421 unsigned nlm_flags
= (old
&& new) ? NLM_F_REPLACE
: 0;
422 /* Ignore reserved labels for now */
423 if (rt
&& (index
>= MPLS_LABEL_FIRST_UNRESERVED
))
424 rtmsg_lfib(event
, index
, rt
, nlh
, net
, portid
, nlm_flags
);
427 static void mpls_route_update(struct net
*net
, unsigned index
,
428 struct mpls_route
*new,
429 const struct nl_info
*info
)
431 struct mpls_route __rcu
**platform_label
;
432 struct mpls_route
*rt
;
436 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
437 rt
= rtnl_dereference(platform_label
[index
]);
438 rcu_assign_pointer(platform_label
[index
], new);
440 mpls_notify_route(net
, index
, rt
, new, info
);
442 /* If we removed a route free it now */
446 static unsigned find_free_label(struct net
*net
)
448 struct mpls_route __rcu
**platform_label
;
449 size_t platform_labels
;
452 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
453 platform_labels
= net
->mpls
.platform_labels
;
454 for (index
= MPLS_LABEL_FIRST_UNRESERVED
; index
< platform_labels
;
456 if (!rtnl_dereference(platform_label
[index
]))
459 return LABEL_NOT_SPECIFIED
;
462 #if IS_ENABLED(CONFIG_INET)
463 static struct net_device
*inet_fib_lookup_dev(struct net
*net
,
466 struct net_device
*dev
;
468 struct in_addr daddr
;
470 memcpy(&daddr
, addr
, sizeof(struct in_addr
));
471 rt
= ip_route_output(net
, daddr
.s_addr
, 0, 0, 0);
483 static struct net_device
*inet_fib_lookup_dev(struct net
*net
,
486 return ERR_PTR(-EAFNOSUPPORT
);
490 #if IS_ENABLED(CONFIG_IPV6)
491 static struct net_device
*inet6_fib_lookup_dev(struct net
*net
,
494 struct net_device
*dev
;
495 struct dst_entry
*dst
;
500 return ERR_PTR(-EAFNOSUPPORT
);
502 memset(&fl6
, 0, sizeof(fl6
));
503 memcpy(&fl6
.daddr
, addr
, sizeof(struct in6_addr
));
504 err
= ipv6_stub
->ipv6_dst_lookup(net
, NULL
, &dst
, &fl6
);
515 static struct net_device
*inet6_fib_lookup_dev(struct net
*net
,
518 return ERR_PTR(-EAFNOSUPPORT
);
522 static struct net_device
*find_outdev(struct net
*net
,
523 struct mpls_route
*rt
,
524 struct mpls_nh
*nh
, int oif
)
526 struct net_device
*dev
= NULL
;
529 switch (nh
->nh_via_table
) {
530 case NEIGH_ARP_TABLE
:
531 dev
= inet_fib_lookup_dev(net
, mpls_nh_via(rt
, nh
));
534 dev
= inet6_fib_lookup_dev(net
, mpls_nh_via(rt
, nh
));
536 case NEIGH_LINK_TABLE
:
540 dev
= dev_get_by_index(net
, oif
);
544 return ERR_PTR(-ENODEV
);
549 /* The caller is holding rtnl anyways, so release the dev reference */
555 static int mpls_nh_assign_dev(struct net
*net
, struct mpls_route
*rt
,
556 struct mpls_nh
*nh
, int oif
)
558 struct net_device
*dev
= NULL
;
561 dev
= find_outdev(net
, rt
, nh
, oif
);
568 /* Ensure this is a supported device */
570 if (!mpls_dev_get(dev
))
573 if ((nh
->nh_via_table
== NEIGH_LINK_TABLE
) &&
574 (dev
->addr_len
!= nh
->nh_via_alen
))
577 RCU_INIT_POINTER(nh
->nh_dev
, dev
);
579 if (!(dev
->flags
& IFF_UP
)) {
580 nh
->nh_flags
|= RTNH_F_DEAD
;
584 flags
= dev_get_flags(dev
);
585 if (!(flags
& (IFF_RUNNING
| IFF_LOWER_UP
)))
586 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
595 static int mpls_nh_build_from_cfg(struct mpls_route_config
*cfg
,
596 struct mpls_route
*rt
)
598 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
599 struct mpls_nh
*nh
= rt
->rt_nh
;
607 /* Ensure only a supported number of labels are present */
608 if (cfg
->rc_output_labels
> MAX_NEW_LABELS
)
611 nh
->nh_labels
= cfg
->rc_output_labels
;
612 for (i
= 0; i
< nh
->nh_labels
; i
++)
613 nh
->nh_label
[i
] = cfg
->rc_output_label
[i
];
615 nh
->nh_via_table
= cfg
->rc_via_table
;
616 memcpy(__mpls_nh_via(rt
, nh
), cfg
->rc_via
, cfg
->rc_via_alen
);
617 nh
->nh_via_alen
= cfg
->rc_via_alen
;
619 err
= mpls_nh_assign_dev(net
, rt
, nh
, cfg
->rc_ifindex
);
623 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
632 static int mpls_nh_build(struct net
*net
, struct mpls_route
*rt
,
633 struct mpls_nh
*nh
, int oif
, struct nlattr
*via
,
634 struct nlattr
*newdst
)
642 err
= nla_get_labels(newdst
, MAX_NEW_LABELS
,
643 &nh
->nh_labels
, nh
->nh_label
);
649 err
= nla_get_via(via
, &nh
->nh_via_alen
, &nh
->nh_via_table
,
650 __mpls_nh_via(rt
, nh
));
654 nh
->nh_via_table
= MPLS_NEIGH_TABLE_UNSPEC
;
657 err
= mpls_nh_assign_dev(net
, rt
, nh
, oif
);
667 static int mpls_count_nexthops(struct rtnexthop
*rtnh
, int len
,
668 u8 cfg_via_alen
, u8
*max_via_alen
)
674 *max_via_alen
= cfg_via_alen
;
680 while (rtnh_ok(rtnh
, remaining
)) {
681 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
684 attrlen
= rtnh_attrlen(rtnh
);
685 nla
= nla_find(attrs
, attrlen
, RTA_VIA
);
686 if (nla
&& nla_len(nla
) >=
687 offsetof(struct rtvia
, rtvia_addr
)) {
688 int via_alen
= nla_len(nla
) -
689 offsetof(struct rtvia
, rtvia_addr
);
691 if (via_alen
<= MAX_VIA_ALEN
)
692 *max_via_alen
= max_t(u16
, *max_via_alen
,
697 rtnh
= rtnh_next(rtnh
, &remaining
);
700 /* leftover implies invalid nexthop configuration, discard it */
701 return remaining
> 0 ? 0 : nhs
;
704 static int mpls_nh_build_multi(struct mpls_route_config
*cfg
,
705 struct mpls_route
*rt
)
707 struct rtnexthop
*rtnh
= cfg
->rc_mp
;
708 struct nlattr
*nla_via
, *nla_newdst
;
709 int remaining
= cfg
->rc_mp_len
;
713 change_nexthops(rt
) {
720 if (!rtnh_ok(rtnh
, remaining
))
723 /* neither weighted multipath nor any flags
726 if (rtnh
->rtnh_hops
|| rtnh
->rtnh_flags
)
729 attrlen
= rtnh_attrlen(rtnh
);
731 struct nlattr
*attrs
= rtnh_attrs(rtnh
);
733 nla_via
= nla_find(attrs
, attrlen
, RTA_VIA
);
734 nla_newdst
= nla_find(attrs
, attrlen
, RTA_NEWDST
);
737 err
= mpls_nh_build(cfg
->rc_nlinfo
.nl_net
, rt
, nh
,
738 rtnh
->rtnh_ifindex
, nla_via
, nla_newdst
);
742 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
745 rtnh
= rtnh_next(rtnh
, &remaining
);
747 } endfor_nexthops(rt
);
757 static int mpls_route_add(struct mpls_route_config
*cfg
)
759 struct mpls_route __rcu
**platform_label
;
760 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
761 struct mpls_route
*rt
, *old
;
767 index
= cfg
->rc_label
;
769 /* If a label was not specified during insert pick one */
770 if ((index
== LABEL_NOT_SPECIFIED
) &&
771 (cfg
->rc_nlflags
& NLM_F_CREATE
)) {
772 index
= find_free_label(net
);
775 /* Reserved labels may not be set */
776 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
779 /* The full 20 bit range may not be supported. */
780 if (index
>= net
->mpls
.platform_labels
)
783 /* Append makes no sense with mpls */
785 if (cfg
->rc_nlflags
& NLM_F_APPEND
)
789 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
790 old
= rtnl_dereference(platform_label
[index
]);
791 if ((cfg
->rc_nlflags
& NLM_F_EXCL
) && old
)
795 if (!(cfg
->rc_nlflags
& NLM_F_REPLACE
) && old
)
799 if (!(cfg
->rc_nlflags
& NLM_F_CREATE
) && !old
)
803 nhs
= mpls_count_nexthops(cfg
->rc_mp
, cfg
->rc_mp_len
,
804 cfg
->rc_via_alen
, &max_via_alen
);
809 rt
= mpls_rt_alloc(nhs
, max_via_alen
);
813 rt
->rt_protocol
= cfg
->rc_protocol
;
814 rt
->rt_payload_type
= cfg
->rc_payload_type
;
817 err
= mpls_nh_build_multi(cfg
, rt
);
819 err
= mpls_nh_build_from_cfg(cfg
, rt
);
823 mpls_route_update(net
, index
, rt
, &cfg
->rc_nlinfo
);
833 static int mpls_route_del(struct mpls_route_config
*cfg
)
835 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
839 index
= cfg
->rc_label
;
841 /* Reserved labels may not be removed */
842 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
845 /* The full 20 bit range may not be supported */
846 if (index
>= net
->mpls
.platform_labels
)
849 mpls_route_update(net
, index
, NULL
, &cfg
->rc_nlinfo
);
856 #define MPLS_PERDEV_SYSCTL_OFFSET(field) \
857 (&((struct mpls_dev *)0)->field)
859 static const struct ctl_table mpls_dev_table
[] = {
862 .maxlen
= sizeof(int),
864 .proc_handler
= proc_dointvec
,
865 .data
= MPLS_PERDEV_SYSCTL_OFFSET(input_enabled
),
870 static int mpls_dev_sysctl_register(struct net_device
*dev
,
871 struct mpls_dev
*mdev
)
873 char path
[sizeof("net/mpls/conf/") + IFNAMSIZ
];
874 struct ctl_table
*table
;
877 table
= kmemdup(&mpls_dev_table
, sizeof(mpls_dev_table
), GFP_KERNEL
);
881 /* Table data contains only offsets relative to the base of
882 * the mdev at this point, so make them absolute.
884 for (i
= 0; i
< ARRAY_SIZE(mpls_dev_table
); i
++)
885 table
[i
].data
= (char *)mdev
+ (uintptr_t)table
[i
].data
;
887 snprintf(path
, sizeof(path
), "net/mpls/conf/%s", dev
->name
);
889 mdev
->sysctl
= register_net_sysctl(dev_net(dev
), path
, table
);
901 static void mpls_dev_sysctl_unregister(struct mpls_dev
*mdev
)
903 struct ctl_table
*table
;
905 table
= mdev
->sysctl
->ctl_table_arg
;
906 unregister_net_sysctl_table(mdev
->sysctl
);
910 static struct mpls_dev
*mpls_add_dev(struct net_device
*dev
)
912 struct mpls_dev
*mdev
;
917 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
921 err
= mpls_dev_sysctl_register(dev
, mdev
);
925 rcu_assign_pointer(dev
->mpls_ptr
, mdev
);
934 static void mpls_ifdown(struct net_device
*dev
, int event
)
936 struct mpls_route __rcu
**platform_label
;
937 struct net
*net
= dev_net(dev
);
940 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
941 for (index
= 0; index
< net
->mpls
.platform_labels
; index
++) {
942 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
947 change_nexthops(rt
) {
948 if (rtnl_dereference(nh
->nh_dev
) != dev
)
952 case NETDEV_UNREGISTER
:
953 nh
->nh_flags
|= RTNH_F_DEAD
;
956 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
957 ACCESS_ONCE(rt
->rt_nhn_alive
) = rt
->rt_nhn_alive
- 1;
960 if (event
== NETDEV_UNREGISTER
)
961 RCU_INIT_POINTER(nh
->nh_dev
, NULL
);
962 } endfor_nexthops(rt
);
969 static void mpls_ifup(struct net_device
*dev
, unsigned int nh_flags
)
971 struct mpls_route __rcu
**platform_label
;
972 struct net
*net
= dev_net(dev
);
976 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
977 for (index
= 0; index
< net
->mpls
.platform_labels
; index
++) {
978 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
984 change_nexthops(rt
) {
985 struct net_device
*nh_dev
=
986 rtnl_dereference(nh
->nh_dev
);
988 if (!(nh
->nh_flags
& nh_flags
)) {
995 nh
->nh_flags
&= ~nh_flags
;
996 } endfor_nexthops(rt
);
998 ACCESS_ONCE(rt
->rt_nhn_alive
) = alive
;
1004 static int mpls_dev_notify(struct notifier_block
*this, unsigned long event
,
1007 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1008 struct mpls_dev
*mdev
;
1011 if (event
== NETDEV_REGISTER
) {
1012 /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
1013 if (dev
->type
== ARPHRD_ETHER
||
1014 dev
->type
== ARPHRD_LOOPBACK
||
1015 dev
->type
== ARPHRD_IPGRE
||
1016 dev
->type
== ARPHRD_SIT
||
1017 dev
->type
== ARPHRD_TUNNEL
) {
1018 mdev
= mpls_add_dev(dev
);
1020 return notifier_from_errno(PTR_ERR(mdev
));
1025 mdev
= mpls_dev_get(dev
);
1031 mpls_ifdown(dev
, event
);
1034 flags
= dev_get_flags(dev
);
1035 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1036 mpls_ifup(dev
, RTNH_F_DEAD
| RTNH_F_LINKDOWN
);
1038 mpls_ifup(dev
, RTNH_F_DEAD
);
1041 flags
= dev_get_flags(dev
);
1042 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1043 mpls_ifup(dev
, RTNH_F_DEAD
| RTNH_F_LINKDOWN
);
1045 mpls_ifdown(dev
, event
);
1047 case NETDEV_UNREGISTER
:
1048 mpls_ifdown(dev
, event
);
1049 mdev
= mpls_dev_get(dev
);
1051 mpls_dev_sysctl_unregister(mdev
);
1052 RCU_INIT_POINTER(dev
->mpls_ptr
, NULL
);
1053 kfree_rcu(mdev
, rcu
);
1056 case NETDEV_CHANGENAME
:
1057 mdev
= mpls_dev_get(dev
);
1061 mpls_dev_sysctl_unregister(mdev
);
1062 err
= mpls_dev_sysctl_register(dev
, mdev
);
1064 return notifier_from_errno(err
);
1071 static struct notifier_block mpls_dev_notifier
= {
1072 .notifier_call
= mpls_dev_notify
,
1075 static int nla_put_via(struct sk_buff
*skb
,
1076 u8 table
, const void *addr
, int alen
)
1078 static const int table_to_family
[NEIGH_NR_TABLES
+ 1] = {
1079 AF_INET
, AF_INET6
, AF_DECnet
, AF_PACKET
,
1083 int family
= AF_UNSPEC
;
1085 nla
= nla_reserve(skb
, RTA_VIA
, alen
+ 2);
1089 if (table
<= NEIGH_NR_TABLES
)
1090 family
= table_to_family
[table
];
1092 via
= nla_data(nla
);
1093 via
->rtvia_family
= family
;
1094 memcpy(via
->rtvia_addr
, addr
, alen
);
1098 int nla_put_labels(struct sk_buff
*skb
, int attrtype
,
1099 u8 labels
, const u32 label
[])
1102 struct mpls_shim_hdr
*nla_label
;
1105 nla
= nla_reserve(skb
, attrtype
, labels
*4);
1109 nla_label
= nla_data(nla
);
1111 for (i
= labels
- 1; i
>= 0; i
--) {
1112 nla_label
[i
] = mpls_entry_encode(label
[i
], 0, 0, bos
);
1118 EXPORT_SYMBOL_GPL(nla_put_labels
);
1120 int nla_get_labels(const struct nlattr
*nla
,
1121 u32 max_labels
, u8
*labels
, u32 label
[])
1123 unsigned len
= nla_len(nla
);
1124 unsigned nla_labels
;
1125 struct mpls_shim_hdr
*nla_label
;
1129 /* len needs to be an even multiple of 4 (the label size) */
1133 /* Limit the number of new labels allowed */
1135 if (nla_labels
> max_labels
)
1138 nla_label
= nla_data(nla
);
1140 for (i
= nla_labels
- 1; i
>= 0; i
--, bos
= false) {
1141 struct mpls_entry_decoded dec
;
1142 dec
= mpls_entry_decode(nla_label
+ i
);
1144 /* Ensure the bottom of stack flag is properly set
1145 * and ttl and tc are both clear.
1147 if ((dec
.bos
!= bos
) || dec
.ttl
|| dec
.tc
)
1150 switch (dec
.label
) {
1151 case MPLS_LABEL_IMPLNULL
:
1152 /* RFC3032: This is a label that an LSR may
1153 * assign and distribute, but which never
1154 * actually appears in the encapsulation.
1159 label
[i
] = dec
.label
;
1161 *labels
= nla_labels
;
1164 EXPORT_SYMBOL_GPL(nla_get_labels
);
1166 int nla_get_via(const struct nlattr
*nla
, u8
*via_alen
,
1167 u8
*via_table
, u8 via_addr
[])
1169 struct rtvia
*via
= nla_data(nla
);
1173 if (nla_len(nla
) < offsetof(struct rtvia
, rtvia_addr
))
1175 alen
= nla_len(nla
) -
1176 offsetof(struct rtvia
, rtvia_addr
);
1177 if (alen
> MAX_VIA_ALEN
)
1180 /* Validate the address family */
1181 switch (via
->rtvia_family
) {
1183 *via_table
= NEIGH_LINK_TABLE
;
1186 *via_table
= NEIGH_ARP_TABLE
;
1191 *via_table
= NEIGH_ND_TABLE
;
1196 /* Unsupported address family */
1200 memcpy(via_addr
, via
->rtvia_addr
, alen
);
1208 static int rtm_to_route_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1209 struct mpls_route_config
*cfg
)
1212 struct nlattr
*tb
[RTA_MAX
+1];
1216 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_mpls_policy
);
1221 rtm
= nlmsg_data(nlh
);
1222 memset(cfg
, 0, sizeof(*cfg
));
1224 if (rtm
->rtm_family
!= AF_MPLS
)
1226 if (rtm
->rtm_dst_len
!= 20)
1228 if (rtm
->rtm_src_len
!= 0)
1230 if (rtm
->rtm_tos
!= 0)
1232 if (rtm
->rtm_table
!= RT_TABLE_MAIN
)
1234 /* Any value is acceptable for rtm_protocol */
1236 /* As mpls uses destination specific addresses
1237 * (or source specific address in the case of multicast)
1238 * all addresses have universal scope.
1240 if (rtm
->rtm_scope
!= RT_SCOPE_UNIVERSE
)
1242 if (rtm
->rtm_type
!= RTN_UNICAST
)
1244 if (rtm
->rtm_flags
!= 0)
1247 cfg
->rc_label
= LABEL_NOT_SPECIFIED
;
1248 cfg
->rc_protocol
= rtm
->rtm_protocol
;
1249 cfg
->rc_via_table
= MPLS_NEIGH_TABLE_UNSPEC
;
1250 cfg
->rc_nlflags
= nlh
->nlmsg_flags
;
1251 cfg
->rc_nlinfo
.portid
= NETLINK_CB(skb
).portid
;
1252 cfg
->rc_nlinfo
.nlh
= nlh
;
1253 cfg
->rc_nlinfo
.nl_net
= sock_net(skb
->sk
);
1255 for (index
= 0; index
<= RTA_MAX
; index
++) {
1256 struct nlattr
*nla
= tb
[index
];
1262 cfg
->rc_ifindex
= nla_get_u32(nla
);
1265 if (nla_get_labels(nla
, MAX_NEW_LABELS
,
1266 &cfg
->rc_output_labels
,
1267 cfg
->rc_output_label
))
1273 if (nla_get_labels(nla
, 1, &label_count
,
1277 /* Reserved labels may not be set */
1278 if (cfg
->rc_label
< MPLS_LABEL_FIRST_UNRESERVED
)
1285 if (nla_get_via(nla
, &cfg
->rc_via_alen
,
1286 &cfg
->rc_via_table
, cfg
->rc_via
))
1292 cfg
->rc_mp
= nla_data(nla
);
1293 cfg
->rc_mp_len
= nla_len(nla
);
1297 /* Unsupported attribute */
1307 static int mpls_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1309 struct mpls_route_config cfg
;
1312 err
= rtm_to_route_config(skb
, nlh
, &cfg
);
1316 return mpls_route_del(&cfg
);
1320 static int mpls_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1322 struct mpls_route_config cfg
;
1325 err
= rtm_to_route_config(skb
, nlh
, &cfg
);
1329 return mpls_route_add(&cfg
);
1332 static int mpls_dump_route(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1333 u32 label
, struct mpls_route
*rt
, int flags
)
1335 struct net_device
*dev
;
1336 struct nlmsghdr
*nlh
;
1339 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1343 rtm
= nlmsg_data(nlh
);
1344 rtm
->rtm_family
= AF_MPLS
;
1345 rtm
->rtm_dst_len
= 20;
1346 rtm
->rtm_src_len
= 0;
1348 rtm
->rtm_table
= RT_TABLE_MAIN
;
1349 rtm
->rtm_protocol
= rt
->rt_protocol
;
1350 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
1351 rtm
->rtm_type
= RTN_UNICAST
;
1354 if (nla_put_labels(skb
, RTA_DST
, 1, &label
))
1355 goto nla_put_failure
;
1356 if (rt
->rt_nhn
== 1) {
1357 const struct mpls_nh
*nh
= rt
->rt_nh
;
1359 if (nh
->nh_labels
&&
1360 nla_put_labels(skb
, RTA_NEWDST
, nh
->nh_labels
,
1362 goto nla_put_failure
;
1363 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
&&
1364 nla_put_via(skb
, nh
->nh_via_table
, mpls_nh_via(rt
, nh
),
1366 goto nla_put_failure
;
1367 dev
= rtnl_dereference(nh
->nh_dev
);
1368 if (dev
&& nla_put_u32(skb
, RTA_OIF
, dev
->ifindex
))
1369 goto nla_put_failure
;
1370 if (nh
->nh_flags
& RTNH_F_LINKDOWN
)
1371 rtm
->rtm_flags
|= RTNH_F_LINKDOWN
;
1372 if (nh
->nh_flags
& RTNH_F_DEAD
)
1373 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1375 struct rtnexthop
*rtnh
;
1380 mp
= nla_nest_start(skb
, RTA_MULTIPATH
);
1382 goto nla_put_failure
;
1385 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1387 goto nla_put_failure
;
1389 dev
= rtnl_dereference(nh
->nh_dev
);
1391 rtnh
->rtnh_ifindex
= dev
->ifindex
;
1392 if (nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1393 rtnh
->rtnh_flags
|= RTNH_F_LINKDOWN
;
1396 if (nh
->nh_flags
& RTNH_F_DEAD
) {
1397 rtnh
->rtnh_flags
|= RTNH_F_DEAD
;
1401 if (nh
->nh_labels
&& nla_put_labels(skb
, RTA_NEWDST
,
1404 goto nla_put_failure
;
1405 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
&&
1406 nla_put_via(skb
, nh
->nh_via_table
,
1407 mpls_nh_via(rt
, nh
),
1409 goto nla_put_failure
;
1411 /* length of rtnetlink header + attributes */
1412 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *)rtnh
;
1413 } endfor_nexthops(rt
);
1415 if (linkdown
== rt
->rt_nhn
)
1416 rtm
->rtm_flags
|= RTNH_F_LINKDOWN
;
1417 if (dead
== rt
->rt_nhn
)
1418 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1420 nla_nest_end(skb
, mp
);
1423 nlmsg_end(skb
, nlh
);
1427 nlmsg_cancel(skb
, nlh
);
1431 static int mpls_dump_routes(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1433 struct net
*net
= sock_net(skb
->sk
);
1434 struct mpls_route __rcu
**platform_label
;
1435 size_t platform_labels
;
1440 index
= cb
->args
[0];
1441 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
1442 index
= MPLS_LABEL_FIRST_UNRESERVED
;
1444 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1445 platform_labels
= net
->mpls
.platform_labels
;
1446 for (; index
< platform_labels
; index
++) {
1447 struct mpls_route
*rt
;
1448 rt
= rtnl_dereference(platform_label
[index
]);
1452 if (mpls_dump_route(skb
, NETLINK_CB(cb
->skb
).portid
,
1453 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
1454 index
, rt
, NLM_F_MULTI
) < 0)
1457 cb
->args
[0] = index
;
1462 static inline size_t lfib_nlmsg_size(struct mpls_route
*rt
)
1465 NLMSG_ALIGN(sizeof(struct rtmsg
))
1466 + nla_total_size(4); /* RTA_DST */
1468 if (rt
->rt_nhn
== 1) {
1469 struct mpls_nh
*nh
= rt
->rt_nh
;
1472 payload
+= nla_total_size(4); /* RTA_OIF */
1473 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
) /* RTA_VIA */
1474 payload
+= nla_total_size(2 + nh
->nh_via_alen
);
1475 if (nh
->nh_labels
) /* RTA_NEWDST */
1476 payload
+= nla_total_size(nh
->nh_labels
* 4);
1478 /* each nexthop is packed in an attribute */
1482 nhsize
+= nla_total_size(sizeof(struct rtnexthop
));
1484 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
)
1485 nhsize
+= nla_total_size(2 + nh
->nh_via_alen
);
1487 nhsize
+= nla_total_size(nh
->nh_labels
* 4);
1488 } endfor_nexthops(rt
);
1489 /* nested attribute */
1490 payload
+= nla_total_size(nhsize
);
1496 static void rtmsg_lfib(int event
, u32 label
, struct mpls_route
*rt
,
1497 struct nlmsghdr
*nlh
, struct net
*net
, u32 portid
,
1498 unsigned int nlm_flags
)
1500 struct sk_buff
*skb
;
1501 u32 seq
= nlh
? nlh
->nlmsg_seq
: 0;
1504 skb
= nlmsg_new(lfib_nlmsg_size(rt
), GFP_KERNEL
);
1508 err
= mpls_dump_route(skb
, portid
, seq
, event
, label
, rt
, nlm_flags
);
1510 /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
1511 WARN_ON(err
== -EMSGSIZE
);
1515 rtnl_notify(skb
, net
, portid
, RTNLGRP_MPLS_ROUTE
, nlh
, GFP_KERNEL
);
1520 rtnl_set_sk_err(net
, RTNLGRP_MPLS_ROUTE
, err
);
1523 static int resize_platform_label_table(struct net
*net
, size_t limit
)
1525 size_t size
= sizeof(struct mpls_route
*) * limit
;
1528 struct mpls_route __rcu
**labels
= NULL
, **old
;
1529 struct mpls_route
*rt0
= NULL
, *rt2
= NULL
;
1533 labels
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_NORETRY
);
1535 labels
= vzalloc(size
);
1541 /* In case the predefined labels need to be populated */
1542 if (limit
> MPLS_LABEL_IPV4NULL
) {
1543 struct net_device
*lo
= net
->loopback_dev
;
1544 rt0
= mpls_rt_alloc(1, lo
->addr_len
);
1547 RCU_INIT_POINTER(rt0
->rt_nh
->nh_dev
, lo
);
1548 rt0
->rt_protocol
= RTPROT_KERNEL
;
1549 rt0
->rt_payload_type
= MPT_IPV4
;
1550 rt0
->rt_nh
->nh_via_table
= NEIGH_LINK_TABLE
;
1551 rt0
->rt_nh
->nh_via_alen
= lo
->addr_len
;
1552 memcpy(__mpls_nh_via(rt0
, rt0
->rt_nh
), lo
->dev_addr
,
1555 if (limit
> MPLS_LABEL_IPV6NULL
) {
1556 struct net_device
*lo
= net
->loopback_dev
;
1557 rt2
= mpls_rt_alloc(1, lo
->addr_len
);
1560 RCU_INIT_POINTER(rt2
->rt_nh
->nh_dev
, lo
);
1561 rt2
->rt_protocol
= RTPROT_KERNEL
;
1562 rt2
->rt_payload_type
= MPT_IPV6
;
1563 rt2
->rt_nh
->nh_via_table
= NEIGH_LINK_TABLE
;
1564 rt2
->rt_nh
->nh_via_alen
= lo
->addr_len
;
1565 memcpy(__mpls_nh_via(rt2
, rt2
->rt_nh
), lo
->dev_addr
,
1570 /* Remember the original table */
1571 old
= rtnl_dereference(net
->mpls
.platform_label
);
1572 old_limit
= net
->mpls
.platform_labels
;
1574 /* Free any labels beyond the new table */
1575 for (index
= limit
; index
< old_limit
; index
++)
1576 mpls_route_update(net
, index
, NULL
, NULL
);
1578 /* Copy over the old labels */
1580 if (old_limit
< limit
)
1581 cp_size
= old_limit
* sizeof(struct mpls_route
*);
1583 memcpy(labels
, old
, cp_size
);
1585 /* If needed set the predefined labels */
1586 if ((old_limit
<= MPLS_LABEL_IPV6NULL
) &&
1587 (limit
> MPLS_LABEL_IPV6NULL
)) {
1588 RCU_INIT_POINTER(labels
[MPLS_LABEL_IPV6NULL
], rt2
);
1592 if ((old_limit
<= MPLS_LABEL_IPV4NULL
) &&
1593 (limit
> MPLS_LABEL_IPV4NULL
)) {
1594 RCU_INIT_POINTER(labels
[MPLS_LABEL_IPV4NULL
], rt0
);
1598 /* Update the global pointers */
1599 net
->mpls
.platform_labels
= limit
;
1600 rcu_assign_pointer(net
->mpls
.platform_label
, labels
);
1621 static int mpls_platform_labels(struct ctl_table
*table
, int write
,
1622 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1624 struct net
*net
= table
->data
;
1625 int platform_labels
= net
->mpls
.platform_labels
;
1627 struct ctl_table tmp
= {
1628 .procname
= table
->procname
,
1629 .data
= &platform_labels
,
1630 .maxlen
= sizeof(int),
1631 .mode
= table
->mode
,
1633 .extra2
= &label_limit
,
1636 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
1638 if (write
&& ret
== 0)
1639 ret
= resize_platform_label_table(net
, platform_labels
);
1644 static const struct ctl_table mpls_table
[] = {
1646 .procname
= "platform_labels",
1648 .maxlen
= sizeof(int),
1650 .proc_handler
= mpls_platform_labels
,
1655 static int mpls_net_init(struct net
*net
)
1657 struct ctl_table
*table
;
1659 net
->mpls
.platform_labels
= 0;
1660 net
->mpls
.platform_label
= NULL
;
1662 table
= kmemdup(mpls_table
, sizeof(mpls_table
), GFP_KERNEL
);
1666 table
[0].data
= net
;
1667 net
->mpls
.ctl
= register_net_sysctl(net
, "net/mpls", table
);
1668 if (net
->mpls
.ctl
== NULL
) {
1676 static void mpls_net_exit(struct net
*net
)
1678 struct mpls_route __rcu
**platform_label
;
1679 size_t platform_labels
;
1680 struct ctl_table
*table
;
1683 table
= net
->mpls
.ctl
->ctl_table_arg
;
1684 unregister_net_sysctl_table(net
->mpls
.ctl
);
1687 /* An rcu grace period has passed since there was a device in
1688 * the network namespace (and thus the last in flight packet)
1689 * left this network namespace. This is because
1690 * unregister_netdevice_many and netdev_run_todo has completed
1691 * for each network device that was in this network namespace.
1693 * As such no additional rcu synchronization is necessary when
1694 * freeing the platform_label table.
1697 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1698 platform_labels
= net
->mpls
.platform_labels
;
1699 for (index
= 0; index
< platform_labels
; index
++) {
1700 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
1701 RCU_INIT_POINTER(platform_label
[index
], NULL
);
1706 kvfree(platform_label
);
1709 static struct pernet_operations mpls_net_ops
= {
1710 .init
= mpls_net_init
,
1711 .exit
= mpls_net_exit
,
1714 static int __init
mpls_init(void)
1718 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr
) != 4);
1720 err
= register_pernet_subsys(&mpls_net_ops
);
1724 err
= register_netdevice_notifier(&mpls_dev_notifier
);
1726 goto out_unregister_pernet
;
1728 dev_add_pack(&mpls_packet_type
);
1730 rtnl_register(PF_MPLS
, RTM_NEWROUTE
, mpls_rtm_newroute
, NULL
, NULL
);
1731 rtnl_register(PF_MPLS
, RTM_DELROUTE
, mpls_rtm_delroute
, NULL
, NULL
);
1732 rtnl_register(PF_MPLS
, RTM_GETROUTE
, NULL
, mpls_dump_routes
, NULL
);
1737 out_unregister_pernet
:
1738 unregister_pernet_subsys(&mpls_net_ops
);
1741 module_init(mpls_init
);
1743 static void __exit
mpls_exit(void)
1745 rtnl_unregister_all(PF_MPLS
);
1746 dev_remove_pack(&mpls_packet_type
);
1747 unregister_netdevice_notifier(&mpls_dev_notifier
);
1748 unregister_pernet_subsys(&mpls_net_ops
);
1750 module_exit(mpls_exit
);
1752 MODULE_DESCRIPTION("MultiProtocol Label Switching");
1753 MODULE_LICENSE("GPL v2");
1754 MODULE_ALIAS_NETPROTO(PF_MPLS
);