1 #include <linux/types.h>
2 #include <linux/skbuff.h>
3 #include <linux/socket.h>
4 #include <linux/sysctl.h>
6 #include <linux/module.h>
7 #include <linux/if_arp.h>
8 #include <linux/ipv6.h>
9 #include <linux/mpls.h>
10 #include <linux/netconf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/percpu.h>
17 #include <net/ip_fib.h>
18 #include <net/netevent.h>
19 #include <net/netns/generic.h>
20 #if IS_ENABLED(CONFIG_IPV6)
23 #include <net/addrconf.h>
24 #include <net/nexthop.h>
27 /* Maximum number of labels to look ahead at when selecting a path of
30 #define MAX_MP_SELECT_LABELS 4
32 #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
35 static int label_limit
= (1 << 20) - 1;
37 static void rtmsg_lfib(int event
, u32 label
, struct mpls_route
*rt
,
38 struct nlmsghdr
*nlh
, struct net
*net
, u32 portid
,
39 unsigned int nlm_flags
);
41 static struct mpls_route
*mpls_route_input_rcu(struct net
*net
, unsigned index
)
43 struct mpls_route
*rt
= NULL
;
45 if (index
< net
->mpls
.platform_labels
) {
46 struct mpls_route __rcu
**platform_label
=
47 rcu_dereference(net
->mpls
.platform_label
);
48 rt
= rcu_dereference(platform_label
[index
]);
53 bool mpls_output_possible(const struct net_device
*dev
)
55 return dev
&& (dev
->flags
& IFF_UP
) && netif_carrier_ok(dev
);
57 EXPORT_SYMBOL_GPL(mpls_output_possible
);
59 static u8
*__mpls_nh_via(struct mpls_route
*rt
, struct mpls_nh
*nh
)
61 u8
*nh0_via
= PTR_ALIGN((u8
*)&rt
->rt_nh
[rt
->rt_nhn
], VIA_ALEN_ALIGN
);
62 int nh_index
= nh
- rt
->rt_nh
;
64 return nh0_via
+ rt
->rt_max_alen
* nh_index
;
67 static const u8
*mpls_nh_via(const struct mpls_route
*rt
,
68 const struct mpls_nh
*nh
)
70 return __mpls_nh_via((struct mpls_route
*)rt
, (struct mpls_nh
*)nh
);
73 static unsigned int mpls_nh_header_size(const struct mpls_nh
*nh
)
75 /* The size of the layer 2.5 labels to be added for this route */
76 return nh
->nh_labels
* sizeof(struct mpls_shim_hdr
);
79 unsigned int mpls_dev_mtu(const struct net_device
*dev
)
81 /* The amount of data the layer 2 frame can hold */
84 EXPORT_SYMBOL_GPL(mpls_dev_mtu
);
86 bool mpls_pkt_too_big(const struct sk_buff
*skb
, unsigned int mtu
)
91 if (skb_is_gso(skb
) && skb_gso_validate_mtu(skb
, mtu
))
96 EXPORT_SYMBOL_GPL(mpls_pkt_too_big
);
98 void mpls_stats_inc_outucastpkts(struct net_device
*dev
,
99 const struct sk_buff
*skb
)
101 struct mpls_dev
*mdev
;
103 if (skb
->protocol
== htons(ETH_P_MPLS_UC
)) {
104 mdev
= mpls_dev_get(dev
);
106 MPLS_INC_STATS_LEN(mdev
, skb
->len
,
109 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
110 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
111 #if IS_ENABLED(CONFIG_IPV6)
112 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
113 struct inet6_dev
*in6dev
= __in6_dev_get(dev
);
116 IP6_UPD_PO_STATS(dev_net(dev
), in6dev
,
117 IPSTATS_MIB_OUT
, skb
->len
);
121 EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts
);
123 static u32
mpls_multipath_hash(struct mpls_route
*rt
, struct sk_buff
*skb
)
125 struct mpls_entry_decoded dec
;
126 unsigned int mpls_hdr_len
= 0;
127 struct mpls_shim_hdr
*hdr
;
128 bool eli_seen
= false;
132 for (label_index
= 0; label_index
< MAX_MP_SELECT_LABELS
;
134 mpls_hdr_len
+= sizeof(*hdr
);
135 if (!pskb_may_pull(skb
, mpls_hdr_len
))
138 /* Read and decode the current label */
139 hdr
= mpls_hdr(skb
) + label_index
;
140 dec
= mpls_entry_decode(hdr
);
142 /* RFC6790 - reserved labels MUST NOT be used as keys
143 * for the load-balancing function
145 if (likely(dec
.label
>= MPLS_LABEL_FIRST_UNRESERVED
)) {
146 hash
= jhash_1word(dec
.label
, hash
);
148 /* The entropy label follows the entropy label
149 * indicator, so this means that the entropy
150 * label was just added to the hash - no need to
151 * go any deeper either in the label stack or in the
156 } else if (dec
.label
== MPLS_LABEL_ENTROPY
) {
163 /* found bottom label; does skb have room for a header? */
164 if (pskb_may_pull(skb
, mpls_hdr_len
+ sizeof(struct iphdr
))) {
165 const struct iphdr
*v4hdr
;
167 v4hdr
= (const struct iphdr
*)(hdr
+ 1);
168 if (v4hdr
->version
== 4) {
169 hash
= jhash_3words(ntohl(v4hdr
->saddr
),
171 v4hdr
->protocol
, hash
);
172 } else if (v4hdr
->version
== 6 &&
173 pskb_may_pull(skb
, mpls_hdr_len
+
174 sizeof(struct ipv6hdr
))) {
175 const struct ipv6hdr
*v6hdr
;
177 v6hdr
= (const struct ipv6hdr
*)(hdr
+ 1);
178 hash
= __ipv6_addr_jhash(&v6hdr
->saddr
, hash
);
179 hash
= __ipv6_addr_jhash(&v6hdr
->daddr
, hash
);
180 hash
= jhash_1word(v6hdr
->nexthdr
, hash
);
190 static struct mpls_nh
*mpls_select_multipath(struct mpls_route
*rt
,
193 int alive
= ACCESS_ONCE(rt
->rt_nhn_alive
);
198 /* No need to look further into packet if there's only
207 hash
= mpls_multipath_hash(rt
, skb
);
208 nh_index
= hash
% alive
;
209 if (alive
== rt
->rt_nhn
)
212 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
217 } endfor_nexthops(rt
);
220 return &rt
->rt_nh
[nh_index
];
223 static bool mpls_egress(struct mpls_route
*rt
, struct sk_buff
*skb
,
224 struct mpls_entry_decoded dec
)
226 enum mpls_payload_type payload_type
;
227 bool success
= false;
229 /* The IPv4 code below accesses through the IPv4 header
230 * checksum, which is 12 bytes into the packet.
231 * The IPv6 code below accesses through the IPv6 hop limit
232 * which is 8 bytes into the packet.
234 * For all supported cases there should always be at least 12
235 * bytes of packet data present. The IPv4 header is 20 bytes
236 * without options and the IPv6 header is always 40 bytes
239 if (!pskb_may_pull(skb
, 12))
242 payload_type
= rt
->rt_payload_type
;
243 if (payload_type
== MPT_UNSPEC
)
244 payload_type
= ip_hdr(skb
)->version
;
246 switch (payload_type
) {
248 struct iphdr
*hdr4
= ip_hdr(skb
);
249 skb
->protocol
= htons(ETH_P_IP
);
250 csum_replace2(&hdr4
->check
,
251 htons(hdr4
->ttl
<< 8),
252 htons(dec
.ttl
<< 8));
258 struct ipv6hdr
*hdr6
= ipv6_hdr(skb
);
259 skb
->protocol
= htons(ETH_P_IPV6
);
260 hdr6
->hop_limit
= dec
.ttl
;
271 static int mpls_forward(struct sk_buff
*skb
, struct net_device
*dev
,
272 struct packet_type
*pt
, struct net_device
*orig_dev
)
274 struct net
*net
= dev_net(dev
);
275 struct mpls_shim_hdr
*hdr
;
276 struct mpls_route
*rt
;
278 struct mpls_entry_decoded dec
;
279 struct net_device
*out_dev
;
280 struct mpls_dev
*out_mdev
;
281 struct mpls_dev
*mdev
;
283 unsigned int new_header_size
;
287 /* Careful this entire function runs inside of an rcu critical section */
289 mdev
= mpls_dev_get(dev
);
293 MPLS_INC_STATS_LEN(mdev
, skb
->len
, rx_packets
,
296 if (!mdev
->input_enabled
) {
297 MPLS_INC_STATS(mdev
, rx_dropped
);
301 if (skb
->pkt_type
!= PACKET_HOST
)
304 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
307 if (!pskb_may_pull(skb
, sizeof(*hdr
)))
310 /* Read and decode the label */
312 dec
= mpls_entry_decode(hdr
);
314 rt
= mpls_route_input_rcu(net
, dec
.label
);
316 MPLS_INC_STATS(mdev
, rx_noroute
);
320 nh
= mpls_select_multipath(rt
, skb
);
325 skb_pull(skb
, sizeof(*hdr
));
326 skb_reset_network_header(skb
);
330 if (skb_warn_if_lro(skb
))
333 skb_forward_csum(skb
);
335 /* Verify ttl is valid */
340 /* Find the output device */
341 out_dev
= rcu_dereference(nh
->nh_dev
);
342 if (!mpls_output_possible(out_dev
))
345 /* Verify the destination can hold the packet */
346 new_header_size
= mpls_nh_header_size(nh
);
347 mtu
= mpls_dev_mtu(out_dev
);
348 if (mpls_pkt_too_big(skb
, mtu
- new_header_size
))
351 hh_len
= LL_RESERVED_SPACE(out_dev
);
352 if (!out_dev
->header_ops
)
355 /* Ensure there is enough space for the headers in the skb */
356 if (skb_cow(skb
, hh_len
+ new_header_size
))
360 skb
->protocol
= htons(ETH_P_MPLS_UC
);
362 if (unlikely(!new_header_size
&& dec
.bos
)) {
363 /* Penultimate hop popping */
364 if (!mpls_egress(rt
, skb
, dec
))
369 skb_push(skb
, new_header_size
);
370 skb_reset_network_header(skb
);
371 /* Push the new labels */
374 for (i
= nh
->nh_labels
- 1; i
>= 0; i
--) {
375 hdr
[i
] = mpls_entry_encode(nh
->nh_label
[i
],
381 mpls_stats_inc_outucastpkts(out_dev
, skb
);
383 /* If via wasn't specified then send out using device address */
384 if (nh
->nh_via_table
== MPLS_NEIGH_TABLE_UNSPEC
)
385 err
= neigh_xmit(NEIGH_LINK_TABLE
, out_dev
,
386 out_dev
->dev_addr
, skb
);
388 err
= neigh_xmit(nh
->nh_via_table
, out_dev
,
389 mpls_nh_via(rt
, nh
), skb
);
391 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
396 out_mdev
= out_dev
? mpls_dev_get(out_dev
) : NULL
;
398 MPLS_INC_STATS(out_mdev
, tx_errors
);
401 MPLS_INC_STATS(mdev
, rx_errors
);
407 static struct packet_type mpls_packet_type __read_mostly
= {
408 .type
= cpu_to_be16(ETH_P_MPLS_UC
),
409 .func
= mpls_forward
,
412 static const struct nla_policy rtm_mpls_policy
[RTA_MAX
+1] = {
413 [RTA_DST
] = { .type
= NLA_U32
},
414 [RTA_OIF
] = { .type
= NLA_U32
},
417 struct mpls_route_config
{
422 u8 rc_via
[MAX_VIA_ALEN
];
425 u32 rc_output_label
[MAX_NEW_LABELS
];
427 enum mpls_payload_type rc_payload_type
;
428 struct nl_info rc_nlinfo
;
429 struct rtnexthop
*rc_mp
;
433 static struct mpls_route
*mpls_rt_alloc(int num_nh
, u8 max_alen
)
435 u8 max_alen_aligned
= ALIGN(max_alen
, VIA_ALEN_ALIGN
);
436 struct mpls_route
*rt
;
438 rt
= kzalloc(ALIGN(sizeof(*rt
) + num_nh
* sizeof(*rt
->rt_nh
),
440 num_nh
* max_alen_aligned
,
444 rt
->rt_nhn_alive
= num_nh
;
445 rt
->rt_max_alen
= max_alen_aligned
;
451 static void mpls_rt_free(struct mpls_route
*rt
)
454 kfree_rcu(rt
, rt_rcu
);
457 static void mpls_notify_route(struct net
*net
, unsigned index
,
458 struct mpls_route
*old
, struct mpls_route
*new,
459 const struct nl_info
*info
)
461 struct nlmsghdr
*nlh
= info
? info
->nlh
: NULL
;
462 unsigned portid
= info
? info
->portid
: 0;
463 int event
= new ? RTM_NEWROUTE
: RTM_DELROUTE
;
464 struct mpls_route
*rt
= new ? new : old
;
465 unsigned nlm_flags
= (old
&& new) ? NLM_F_REPLACE
: 0;
466 /* Ignore reserved labels for now */
467 if (rt
&& (index
>= MPLS_LABEL_FIRST_UNRESERVED
))
468 rtmsg_lfib(event
, index
, rt
, nlh
, net
, portid
, nlm_flags
);
471 static void mpls_route_update(struct net
*net
, unsigned index
,
472 struct mpls_route
*new,
473 const struct nl_info
*info
)
475 struct mpls_route __rcu
**platform_label
;
476 struct mpls_route
*rt
;
480 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
481 rt
= rtnl_dereference(platform_label
[index
]);
482 rcu_assign_pointer(platform_label
[index
], new);
484 mpls_notify_route(net
, index
, rt
, new, info
);
486 /* If we removed a route free it now */
490 static unsigned find_free_label(struct net
*net
)
492 struct mpls_route __rcu
**platform_label
;
493 size_t platform_labels
;
496 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
497 platform_labels
= net
->mpls
.platform_labels
;
498 for (index
= MPLS_LABEL_FIRST_UNRESERVED
; index
< platform_labels
;
500 if (!rtnl_dereference(platform_label
[index
]))
503 return LABEL_NOT_SPECIFIED
;
506 #if IS_ENABLED(CONFIG_INET)
507 static struct net_device
*inet_fib_lookup_dev(struct net
*net
,
510 struct net_device
*dev
;
512 struct in_addr daddr
;
514 memcpy(&daddr
, addr
, sizeof(struct in_addr
));
515 rt
= ip_route_output(net
, daddr
.s_addr
, 0, 0, 0);
527 static struct net_device
*inet_fib_lookup_dev(struct net
*net
,
530 return ERR_PTR(-EAFNOSUPPORT
);
534 #if IS_ENABLED(CONFIG_IPV6)
535 static struct net_device
*inet6_fib_lookup_dev(struct net
*net
,
538 struct net_device
*dev
;
539 struct dst_entry
*dst
;
544 return ERR_PTR(-EAFNOSUPPORT
);
546 memset(&fl6
, 0, sizeof(fl6
));
547 memcpy(&fl6
.daddr
, addr
, sizeof(struct in6_addr
));
548 err
= ipv6_stub
->ipv6_dst_lookup(net
, NULL
, &dst
, &fl6
);
559 static struct net_device
*inet6_fib_lookup_dev(struct net
*net
,
562 return ERR_PTR(-EAFNOSUPPORT
);
566 static struct net_device
*find_outdev(struct net
*net
,
567 struct mpls_route
*rt
,
568 struct mpls_nh
*nh
, int oif
)
570 struct net_device
*dev
= NULL
;
573 switch (nh
->nh_via_table
) {
574 case NEIGH_ARP_TABLE
:
575 dev
= inet_fib_lookup_dev(net
, mpls_nh_via(rt
, nh
));
578 dev
= inet6_fib_lookup_dev(net
, mpls_nh_via(rt
, nh
));
580 case NEIGH_LINK_TABLE
:
584 dev
= dev_get_by_index(net
, oif
);
588 return ERR_PTR(-ENODEV
);
593 /* The caller is holding rtnl anyways, so release the dev reference */
599 static int mpls_nh_assign_dev(struct net
*net
, struct mpls_route
*rt
,
600 struct mpls_nh
*nh
, int oif
)
602 struct net_device
*dev
= NULL
;
605 dev
= find_outdev(net
, rt
, nh
, oif
);
612 /* Ensure this is a supported device */
614 if (!mpls_dev_get(dev
))
617 if ((nh
->nh_via_table
== NEIGH_LINK_TABLE
) &&
618 (dev
->addr_len
!= nh
->nh_via_alen
))
621 RCU_INIT_POINTER(nh
->nh_dev
, dev
);
623 if (!(dev
->flags
& IFF_UP
)) {
624 nh
->nh_flags
|= RTNH_F_DEAD
;
628 flags
= dev_get_flags(dev
);
629 if (!(flags
& (IFF_RUNNING
| IFF_LOWER_UP
)))
630 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
639 static int mpls_nh_build_from_cfg(struct mpls_route_config
*cfg
,
640 struct mpls_route
*rt
)
642 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
643 struct mpls_nh
*nh
= rt
->rt_nh
;
651 /* Ensure only a supported number of labels are present */
652 if (cfg
->rc_output_labels
> MAX_NEW_LABELS
)
655 nh
->nh_labels
= cfg
->rc_output_labels
;
656 for (i
= 0; i
< nh
->nh_labels
; i
++)
657 nh
->nh_label
[i
] = cfg
->rc_output_label
[i
];
659 nh
->nh_via_table
= cfg
->rc_via_table
;
660 memcpy(__mpls_nh_via(rt
, nh
), cfg
->rc_via
, cfg
->rc_via_alen
);
661 nh
->nh_via_alen
= cfg
->rc_via_alen
;
663 err
= mpls_nh_assign_dev(net
, rt
, nh
, cfg
->rc_ifindex
);
667 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
676 static int mpls_nh_build(struct net
*net
, struct mpls_route
*rt
,
677 struct mpls_nh
*nh
, int oif
, struct nlattr
*via
,
678 struct nlattr
*newdst
)
686 err
= nla_get_labels(newdst
, MAX_NEW_LABELS
,
687 &nh
->nh_labels
, nh
->nh_label
);
693 err
= nla_get_via(via
, &nh
->nh_via_alen
, &nh
->nh_via_table
,
694 __mpls_nh_via(rt
, nh
));
698 nh
->nh_via_table
= MPLS_NEIGH_TABLE_UNSPEC
;
701 err
= mpls_nh_assign_dev(net
, rt
, nh
, oif
);
711 static int mpls_count_nexthops(struct rtnexthop
*rtnh
, int len
,
712 u8 cfg_via_alen
, u8
*max_via_alen
)
718 *max_via_alen
= cfg_via_alen
;
724 while (rtnh_ok(rtnh
, remaining
)) {
725 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
728 attrlen
= rtnh_attrlen(rtnh
);
729 nla
= nla_find(attrs
, attrlen
, RTA_VIA
);
730 if (nla
&& nla_len(nla
) >=
731 offsetof(struct rtvia
, rtvia_addr
)) {
732 int via_alen
= nla_len(nla
) -
733 offsetof(struct rtvia
, rtvia_addr
);
735 if (via_alen
<= MAX_VIA_ALEN
)
736 *max_via_alen
= max_t(u16
, *max_via_alen
,
741 rtnh
= rtnh_next(rtnh
, &remaining
);
744 /* leftover implies invalid nexthop configuration, discard it */
745 return remaining
> 0 ? 0 : nhs
;
748 static int mpls_nh_build_multi(struct mpls_route_config
*cfg
,
749 struct mpls_route
*rt
)
751 struct rtnexthop
*rtnh
= cfg
->rc_mp
;
752 struct nlattr
*nla_via
, *nla_newdst
;
753 int remaining
= cfg
->rc_mp_len
;
757 change_nexthops(rt
) {
764 if (!rtnh_ok(rtnh
, remaining
))
767 /* neither weighted multipath nor any flags
770 if (rtnh
->rtnh_hops
|| rtnh
->rtnh_flags
)
773 attrlen
= rtnh_attrlen(rtnh
);
775 struct nlattr
*attrs
= rtnh_attrs(rtnh
);
777 nla_via
= nla_find(attrs
, attrlen
, RTA_VIA
);
778 nla_newdst
= nla_find(attrs
, attrlen
, RTA_NEWDST
);
781 err
= mpls_nh_build(cfg
->rc_nlinfo
.nl_net
, rt
, nh
,
782 rtnh
->rtnh_ifindex
, nla_via
, nla_newdst
);
786 if (nh
->nh_flags
& (RTNH_F_DEAD
| RTNH_F_LINKDOWN
))
789 rtnh
= rtnh_next(rtnh
, &remaining
);
791 } endfor_nexthops(rt
);
801 static int mpls_route_add(struct mpls_route_config
*cfg
)
803 struct mpls_route __rcu
**platform_label
;
804 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
805 struct mpls_route
*rt
, *old
;
811 index
= cfg
->rc_label
;
813 /* If a label was not specified during insert pick one */
814 if ((index
== LABEL_NOT_SPECIFIED
) &&
815 (cfg
->rc_nlflags
& NLM_F_CREATE
)) {
816 index
= find_free_label(net
);
819 /* Reserved labels may not be set */
820 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
823 /* The full 20 bit range may not be supported. */
824 if (index
>= net
->mpls
.platform_labels
)
827 /* Append makes no sense with mpls */
829 if (cfg
->rc_nlflags
& NLM_F_APPEND
)
833 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
834 old
= rtnl_dereference(platform_label
[index
]);
835 if ((cfg
->rc_nlflags
& NLM_F_EXCL
) && old
)
839 if (!(cfg
->rc_nlflags
& NLM_F_REPLACE
) && old
)
843 if (!(cfg
->rc_nlflags
& NLM_F_CREATE
) && !old
)
847 nhs
= mpls_count_nexthops(cfg
->rc_mp
, cfg
->rc_mp_len
,
848 cfg
->rc_via_alen
, &max_via_alen
);
853 rt
= mpls_rt_alloc(nhs
, max_via_alen
);
857 rt
->rt_protocol
= cfg
->rc_protocol
;
858 rt
->rt_payload_type
= cfg
->rc_payload_type
;
861 err
= mpls_nh_build_multi(cfg
, rt
);
863 err
= mpls_nh_build_from_cfg(cfg
, rt
);
867 mpls_route_update(net
, index
, rt
, &cfg
->rc_nlinfo
);
877 static int mpls_route_del(struct mpls_route_config
*cfg
)
879 struct net
*net
= cfg
->rc_nlinfo
.nl_net
;
883 index
= cfg
->rc_label
;
885 /* Reserved labels may not be removed */
886 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
889 /* The full 20 bit range may not be supported */
890 if (index
>= net
->mpls
.platform_labels
)
893 mpls_route_update(net
, index
, NULL
, &cfg
->rc_nlinfo
);
900 static void mpls_get_stats(struct mpls_dev
*mdev
,
901 struct mpls_link_stats
*stats
)
903 struct mpls_pcpu_stats
*p
;
906 memset(stats
, 0, sizeof(*stats
));
908 for_each_possible_cpu(i
) {
909 struct mpls_link_stats local
;
912 p
= per_cpu_ptr(mdev
->stats
, i
);
914 start
= u64_stats_fetch_begin(&p
->syncp
);
916 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
918 stats
->rx_packets
+= local
.rx_packets
;
919 stats
->rx_bytes
+= local
.rx_bytes
;
920 stats
->tx_packets
+= local
.tx_packets
;
921 stats
->tx_bytes
+= local
.tx_bytes
;
922 stats
->rx_errors
+= local
.rx_errors
;
923 stats
->tx_errors
+= local
.tx_errors
;
924 stats
->rx_dropped
+= local
.rx_dropped
;
925 stats
->tx_dropped
+= local
.tx_dropped
;
926 stats
->rx_noroute
+= local
.rx_noroute
;
930 static int mpls_fill_stats_af(struct sk_buff
*skb
,
931 const struct net_device
*dev
)
933 struct mpls_link_stats
*stats
;
934 struct mpls_dev
*mdev
;
937 mdev
= mpls_dev_get(dev
);
941 nla
= nla_reserve_64bit(skb
, MPLS_STATS_LINK
,
942 sizeof(struct mpls_link_stats
),
947 stats
= nla_data(nla
);
948 mpls_get_stats(mdev
, stats
);
953 static size_t mpls_get_stats_af_size(const struct net_device
*dev
)
955 struct mpls_dev
*mdev
;
957 mdev
= mpls_dev_get(dev
);
961 return nla_total_size_64bit(sizeof(struct mpls_link_stats
));
964 static int mpls_netconf_fill_devconf(struct sk_buff
*skb
, struct mpls_dev
*mdev
,
965 u32 portid
, u32 seq
, int event
,
966 unsigned int flags
, int type
)
968 struct nlmsghdr
*nlh
;
969 struct netconfmsg
*ncm
;
972 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(struct netconfmsg
),
977 if (type
== NETCONFA_ALL
)
980 ncm
= nlmsg_data(nlh
);
981 ncm
->ncm_family
= AF_MPLS
;
983 if (nla_put_s32(skb
, NETCONFA_IFINDEX
, mdev
->dev
->ifindex
) < 0)
984 goto nla_put_failure
;
986 if ((all
|| type
== NETCONFA_INPUT
) &&
987 nla_put_s32(skb
, NETCONFA_INPUT
,
988 mdev
->input_enabled
) < 0)
989 goto nla_put_failure
;
995 nlmsg_cancel(skb
, nlh
);
999 static int mpls_netconf_msgsize_devconf(int type
)
1001 int size
= NLMSG_ALIGN(sizeof(struct netconfmsg
))
1002 + nla_total_size(4); /* NETCONFA_IFINDEX */
1005 if (type
== NETCONFA_ALL
)
1008 if (all
|| type
== NETCONFA_INPUT
)
1009 size
+= nla_total_size(4);
1014 static void mpls_netconf_notify_devconf(struct net
*net
, int type
,
1015 struct mpls_dev
*mdev
)
1017 struct sk_buff
*skb
;
1020 skb
= nlmsg_new(mpls_netconf_msgsize_devconf(type
), GFP_KERNEL
);
1024 err
= mpls_netconf_fill_devconf(skb
, mdev
, 0, 0, RTM_NEWNETCONF
,
1027 /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1028 WARN_ON(err
== -EMSGSIZE
);
1033 rtnl_notify(skb
, net
, 0, RTNLGRP_MPLS_NETCONF
, NULL
, GFP_KERNEL
);
1037 rtnl_set_sk_err(net
, RTNLGRP_MPLS_NETCONF
, err
);
1040 static const struct nla_policy devconf_mpls_policy
[NETCONFA_MAX
+ 1] = {
1041 [NETCONFA_IFINDEX
] = { .len
= sizeof(int) },
1044 static int mpls_netconf_get_devconf(struct sk_buff
*in_skb
,
1045 struct nlmsghdr
*nlh
)
1047 struct net
*net
= sock_net(in_skb
->sk
);
1048 struct nlattr
*tb
[NETCONFA_MAX
+ 1];
1049 struct netconfmsg
*ncm
;
1050 struct net_device
*dev
;
1051 struct mpls_dev
*mdev
;
1052 struct sk_buff
*skb
;
1056 err
= nlmsg_parse(nlh
, sizeof(*ncm
), tb
, NETCONFA_MAX
,
1057 devconf_mpls_policy
);
1062 if (!tb
[NETCONFA_IFINDEX
])
1065 ifindex
= nla_get_s32(tb
[NETCONFA_IFINDEX
]);
1066 dev
= __dev_get_by_index(net
, ifindex
);
1070 mdev
= mpls_dev_get(dev
);
1075 skb
= nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL
), GFP_KERNEL
);
1079 err
= mpls_netconf_fill_devconf(skb
, mdev
,
1080 NETLINK_CB(in_skb
).portid
,
1081 nlh
->nlmsg_seq
, RTM_NEWNETCONF
, 0,
1084 /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1085 WARN_ON(err
== -EMSGSIZE
);
1089 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
1094 static int mpls_netconf_dump_devconf(struct sk_buff
*skb
,
1095 struct netlink_callback
*cb
)
1097 struct net
*net
= sock_net(skb
->sk
);
1098 struct hlist_head
*head
;
1099 struct net_device
*dev
;
1100 struct mpls_dev
*mdev
;
1105 s_idx
= idx
= cb
->args
[1];
1107 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1109 head
= &net
->dev_index_head
[h
];
1111 cb
->seq
= net
->dev_base_seq
;
1112 hlist_for_each_entry_rcu(dev
, head
, index_hlist
) {
1115 mdev
= mpls_dev_get(dev
);
1118 if (mpls_netconf_fill_devconf(skb
, mdev
,
1119 NETLINK_CB(cb
->skb
).portid
,
1123 NETCONFA_ALL
) < 0) {
1127 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1140 #define MPLS_PERDEV_SYSCTL_OFFSET(field) \
1141 (&((struct mpls_dev *)0)->field)
1143 static int mpls_conf_proc(struct ctl_table
*ctl
, int write
,
1144 void __user
*buffer
,
1145 size_t *lenp
, loff_t
*ppos
)
1147 int oval
= *(int *)ctl
->data
;
1148 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
1151 struct mpls_dev
*mdev
= ctl
->extra1
;
1152 int i
= (int *)ctl
->data
- (int *)mdev
;
1153 struct net
*net
= ctl
->extra2
;
1154 int val
= *(int *)ctl
->data
;
1156 if (i
== offsetof(struct mpls_dev
, input_enabled
) &&
1158 mpls_netconf_notify_devconf(net
,
1167 static const struct ctl_table mpls_dev_table
[] = {
1169 .procname
= "input",
1170 .maxlen
= sizeof(int),
1172 .proc_handler
= mpls_conf_proc
,
1173 .data
= MPLS_PERDEV_SYSCTL_OFFSET(input_enabled
),
1178 static int mpls_dev_sysctl_register(struct net_device
*dev
,
1179 struct mpls_dev
*mdev
)
1181 char path
[sizeof("net/mpls/conf/") + IFNAMSIZ
];
1182 struct net
*net
= dev_net(dev
);
1183 struct ctl_table
*table
;
1186 table
= kmemdup(&mpls_dev_table
, sizeof(mpls_dev_table
), GFP_KERNEL
);
1190 /* Table data contains only offsets relative to the base of
1191 * the mdev at this point, so make them absolute.
1193 for (i
= 0; i
< ARRAY_SIZE(mpls_dev_table
); i
++) {
1194 table
[i
].data
= (char *)mdev
+ (uintptr_t)table
[i
].data
;
1195 table
[i
].extra1
= mdev
;
1196 table
[i
].extra2
= net
;
1199 snprintf(path
, sizeof(path
), "net/mpls/conf/%s", dev
->name
);
1201 mdev
->sysctl
= register_net_sysctl(dev_net(dev
), path
, table
);
1213 static void mpls_dev_sysctl_unregister(struct mpls_dev
*mdev
)
1215 struct ctl_table
*table
;
1217 table
= mdev
->sysctl
->ctl_table_arg
;
1218 unregister_net_sysctl_table(mdev
->sysctl
);
1222 static struct mpls_dev
*mpls_add_dev(struct net_device
*dev
)
1224 struct mpls_dev
*mdev
;
1230 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
1232 return ERR_PTR(err
);
1234 mdev
->stats
= alloc_percpu(struct mpls_pcpu_stats
);
1238 for_each_possible_cpu(i
) {
1239 struct mpls_pcpu_stats
*mpls_stats
;
1241 mpls_stats
= per_cpu_ptr(mdev
->stats
, i
);
1242 u64_stats_init(&mpls_stats
->syncp
);
1245 err
= mpls_dev_sysctl_register(dev
, mdev
);
1250 rcu_assign_pointer(dev
->mpls_ptr
, mdev
);
1255 free_percpu(mdev
->stats
);
1257 return ERR_PTR(err
);
1260 static void mpls_dev_destroy_rcu(struct rcu_head
*head
)
1262 struct mpls_dev
*mdev
= container_of(head
, struct mpls_dev
, rcu
);
1264 free_percpu(mdev
->stats
);
1268 static void mpls_ifdown(struct net_device
*dev
, int event
)
1270 struct mpls_route __rcu
**platform_label
;
1271 struct net
*net
= dev_net(dev
);
1272 unsigned int nh_flags
= RTNH_F_DEAD
| RTNH_F_LINKDOWN
;
1276 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1277 for (index
= 0; index
< net
->mpls
.platform_labels
; index
++) {
1278 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
1284 change_nexthops(rt
) {
1285 if (rtnl_dereference(nh
->nh_dev
) != dev
)
1290 case NETDEV_UNREGISTER
:
1291 nh
->nh_flags
|= RTNH_F_DEAD
;
1294 nh
->nh_flags
|= RTNH_F_LINKDOWN
;
1297 if (event
== NETDEV_UNREGISTER
)
1298 RCU_INIT_POINTER(nh
->nh_dev
, NULL
);
1300 if (!(nh
->nh_flags
& nh_flags
))
1302 } endfor_nexthops(rt
);
1304 WRITE_ONCE(rt
->rt_nhn_alive
, alive
);
1308 static void mpls_ifup(struct net_device
*dev
, unsigned int nh_flags
)
1310 struct mpls_route __rcu
**platform_label
;
1311 struct net
*net
= dev_net(dev
);
1315 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1316 for (index
= 0; index
< net
->mpls
.platform_labels
; index
++) {
1317 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
1323 change_nexthops(rt
) {
1324 struct net_device
*nh_dev
=
1325 rtnl_dereference(nh
->nh_dev
);
1327 if (!(nh
->nh_flags
& nh_flags
)) {
1334 nh
->nh_flags
&= ~nh_flags
;
1335 } endfor_nexthops(rt
);
1337 ACCESS_ONCE(rt
->rt_nhn_alive
) = alive
;
1341 static int mpls_dev_notify(struct notifier_block
*this, unsigned long event
,
1344 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1345 struct mpls_dev
*mdev
;
1348 if (event
== NETDEV_REGISTER
) {
1349 /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
1350 if (dev
->type
== ARPHRD_ETHER
||
1351 dev
->type
== ARPHRD_LOOPBACK
||
1352 dev
->type
== ARPHRD_IPGRE
||
1353 dev
->type
== ARPHRD_SIT
||
1354 dev
->type
== ARPHRD_TUNNEL
) {
1355 mdev
= mpls_add_dev(dev
);
1357 return notifier_from_errno(PTR_ERR(mdev
));
1362 mdev
= mpls_dev_get(dev
);
1368 mpls_ifdown(dev
, event
);
1371 flags
= dev_get_flags(dev
);
1372 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1373 mpls_ifup(dev
, RTNH_F_DEAD
| RTNH_F_LINKDOWN
);
1375 mpls_ifup(dev
, RTNH_F_DEAD
);
1378 flags
= dev_get_flags(dev
);
1379 if (flags
& (IFF_RUNNING
| IFF_LOWER_UP
))
1380 mpls_ifup(dev
, RTNH_F_DEAD
| RTNH_F_LINKDOWN
);
1382 mpls_ifdown(dev
, event
);
1384 case NETDEV_UNREGISTER
:
1385 mpls_ifdown(dev
, event
);
1386 mdev
= mpls_dev_get(dev
);
1388 mpls_dev_sysctl_unregister(mdev
);
1389 RCU_INIT_POINTER(dev
->mpls_ptr
, NULL
);
1390 call_rcu(&mdev
->rcu
, mpls_dev_destroy_rcu
);
1393 case NETDEV_CHANGENAME
:
1394 mdev
= mpls_dev_get(dev
);
1398 mpls_dev_sysctl_unregister(mdev
);
1399 err
= mpls_dev_sysctl_register(dev
, mdev
);
1401 return notifier_from_errno(err
);
1408 static struct notifier_block mpls_dev_notifier
= {
1409 .notifier_call
= mpls_dev_notify
,
1412 static int nla_put_via(struct sk_buff
*skb
,
1413 u8 table
, const void *addr
, int alen
)
1415 static const int table_to_family
[NEIGH_NR_TABLES
+ 1] = {
1416 AF_INET
, AF_INET6
, AF_DECnet
, AF_PACKET
,
1420 int family
= AF_UNSPEC
;
1422 nla
= nla_reserve(skb
, RTA_VIA
, alen
+ 2);
1426 if (table
<= NEIGH_NR_TABLES
)
1427 family
= table_to_family
[table
];
1429 via
= nla_data(nla
);
1430 via
->rtvia_family
= family
;
1431 memcpy(via
->rtvia_addr
, addr
, alen
);
1435 int nla_put_labels(struct sk_buff
*skb
, int attrtype
,
1436 u8 labels
, const u32 label
[])
1439 struct mpls_shim_hdr
*nla_label
;
1442 nla
= nla_reserve(skb
, attrtype
, labels
*4);
1446 nla_label
= nla_data(nla
);
1448 for (i
= labels
- 1; i
>= 0; i
--) {
1449 nla_label
[i
] = mpls_entry_encode(label
[i
], 0, 0, bos
);
1455 EXPORT_SYMBOL_GPL(nla_put_labels
);
1457 int nla_get_labels(const struct nlattr
*nla
,
1458 u32 max_labels
, u8
*labels
, u32 label
[])
1460 unsigned len
= nla_len(nla
);
1461 unsigned nla_labels
;
1462 struct mpls_shim_hdr
*nla_label
;
1466 /* len needs to be an even multiple of 4 (the label size) */
1470 /* Limit the number of new labels allowed */
1472 if (nla_labels
> max_labels
)
1475 nla_label
= nla_data(nla
);
1477 for (i
= nla_labels
- 1; i
>= 0; i
--, bos
= false) {
1478 struct mpls_entry_decoded dec
;
1479 dec
= mpls_entry_decode(nla_label
+ i
);
1481 /* Ensure the bottom of stack flag is properly set
1482 * and ttl and tc are both clear.
1484 if ((dec
.bos
!= bos
) || dec
.ttl
|| dec
.tc
)
1487 switch (dec
.label
) {
1488 case MPLS_LABEL_IMPLNULL
:
1489 /* RFC3032: This is a label that an LSR may
1490 * assign and distribute, but which never
1491 * actually appears in the encapsulation.
1496 label
[i
] = dec
.label
;
1498 *labels
= nla_labels
;
1501 EXPORT_SYMBOL_GPL(nla_get_labels
);
1503 int nla_get_via(const struct nlattr
*nla
, u8
*via_alen
,
1504 u8
*via_table
, u8 via_addr
[])
1506 struct rtvia
*via
= nla_data(nla
);
1510 if (nla_len(nla
) < offsetof(struct rtvia
, rtvia_addr
))
1512 alen
= nla_len(nla
) -
1513 offsetof(struct rtvia
, rtvia_addr
);
1514 if (alen
> MAX_VIA_ALEN
)
1517 /* Validate the address family */
1518 switch (via
->rtvia_family
) {
1520 *via_table
= NEIGH_LINK_TABLE
;
1523 *via_table
= NEIGH_ARP_TABLE
;
1528 *via_table
= NEIGH_ND_TABLE
;
1533 /* Unsupported address family */
1537 memcpy(via_addr
, via
->rtvia_addr
, alen
);
1545 static int rtm_to_route_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1546 struct mpls_route_config
*cfg
)
1549 struct nlattr
*tb
[RTA_MAX
+1];
1553 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_mpls_policy
);
1558 rtm
= nlmsg_data(nlh
);
1559 memset(cfg
, 0, sizeof(*cfg
));
1561 if (rtm
->rtm_family
!= AF_MPLS
)
1563 if (rtm
->rtm_dst_len
!= 20)
1565 if (rtm
->rtm_src_len
!= 0)
1567 if (rtm
->rtm_tos
!= 0)
1569 if (rtm
->rtm_table
!= RT_TABLE_MAIN
)
1571 /* Any value is acceptable for rtm_protocol */
1573 /* As mpls uses destination specific addresses
1574 * (or source specific address in the case of multicast)
1575 * all addresses have universal scope.
1577 if (rtm
->rtm_scope
!= RT_SCOPE_UNIVERSE
)
1579 if (rtm
->rtm_type
!= RTN_UNICAST
)
1581 if (rtm
->rtm_flags
!= 0)
1584 cfg
->rc_label
= LABEL_NOT_SPECIFIED
;
1585 cfg
->rc_protocol
= rtm
->rtm_protocol
;
1586 cfg
->rc_via_table
= MPLS_NEIGH_TABLE_UNSPEC
;
1587 cfg
->rc_nlflags
= nlh
->nlmsg_flags
;
1588 cfg
->rc_nlinfo
.portid
= NETLINK_CB(skb
).portid
;
1589 cfg
->rc_nlinfo
.nlh
= nlh
;
1590 cfg
->rc_nlinfo
.nl_net
= sock_net(skb
->sk
);
1592 for (index
= 0; index
<= RTA_MAX
; index
++) {
1593 struct nlattr
*nla
= tb
[index
];
1599 cfg
->rc_ifindex
= nla_get_u32(nla
);
1602 if (nla_get_labels(nla
, MAX_NEW_LABELS
,
1603 &cfg
->rc_output_labels
,
1604 cfg
->rc_output_label
))
1610 if (nla_get_labels(nla
, 1, &label_count
,
1614 /* Reserved labels may not be set */
1615 if (cfg
->rc_label
< MPLS_LABEL_FIRST_UNRESERVED
)
1622 if (nla_get_via(nla
, &cfg
->rc_via_alen
,
1623 &cfg
->rc_via_table
, cfg
->rc_via
))
1629 cfg
->rc_mp
= nla_data(nla
);
1630 cfg
->rc_mp_len
= nla_len(nla
);
1634 /* Unsupported attribute */
1644 static int mpls_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1646 struct mpls_route_config cfg
;
1649 err
= rtm_to_route_config(skb
, nlh
, &cfg
);
1653 return mpls_route_del(&cfg
);
1657 static int mpls_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
1659 struct mpls_route_config cfg
;
1662 err
= rtm_to_route_config(skb
, nlh
, &cfg
);
1666 return mpls_route_add(&cfg
);
1669 static int mpls_dump_route(struct sk_buff
*skb
, u32 portid
, u32 seq
, int event
,
1670 u32 label
, struct mpls_route
*rt
, int flags
)
1672 struct net_device
*dev
;
1673 struct nlmsghdr
*nlh
;
1676 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*rtm
), flags
);
1680 rtm
= nlmsg_data(nlh
);
1681 rtm
->rtm_family
= AF_MPLS
;
1682 rtm
->rtm_dst_len
= 20;
1683 rtm
->rtm_src_len
= 0;
1685 rtm
->rtm_table
= RT_TABLE_MAIN
;
1686 rtm
->rtm_protocol
= rt
->rt_protocol
;
1687 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
1688 rtm
->rtm_type
= RTN_UNICAST
;
1691 if (nla_put_labels(skb
, RTA_DST
, 1, &label
))
1692 goto nla_put_failure
;
1693 if (rt
->rt_nhn
== 1) {
1694 const struct mpls_nh
*nh
= rt
->rt_nh
;
1696 if (nh
->nh_labels
&&
1697 nla_put_labels(skb
, RTA_NEWDST
, nh
->nh_labels
,
1699 goto nla_put_failure
;
1700 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
&&
1701 nla_put_via(skb
, nh
->nh_via_table
, mpls_nh_via(rt
, nh
),
1703 goto nla_put_failure
;
1704 dev
= rtnl_dereference(nh
->nh_dev
);
1705 if (dev
&& nla_put_u32(skb
, RTA_OIF
, dev
->ifindex
))
1706 goto nla_put_failure
;
1707 if (nh
->nh_flags
& RTNH_F_LINKDOWN
)
1708 rtm
->rtm_flags
|= RTNH_F_LINKDOWN
;
1709 if (nh
->nh_flags
& RTNH_F_DEAD
)
1710 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1712 struct rtnexthop
*rtnh
;
1717 mp
= nla_nest_start(skb
, RTA_MULTIPATH
);
1719 goto nla_put_failure
;
1722 rtnh
= nla_reserve_nohdr(skb
, sizeof(*rtnh
));
1724 goto nla_put_failure
;
1726 dev
= rtnl_dereference(nh
->nh_dev
);
1728 rtnh
->rtnh_ifindex
= dev
->ifindex
;
1729 if (nh
->nh_flags
& RTNH_F_LINKDOWN
) {
1730 rtnh
->rtnh_flags
|= RTNH_F_LINKDOWN
;
1733 if (nh
->nh_flags
& RTNH_F_DEAD
) {
1734 rtnh
->rtnh_flags
|= RTNH_F_DEAD
;
1738 if (nh
->nh_labels
&& nla_put_labels(skb
, RTA_NEWDST
,
1741 goto nla_put_failure
;
1742 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
&&
1743 nla_put_via(skb
, nh
->nh_via_table
,
1744 mpls_nh_via(rt
, nh
),
1746 goto nla_put_failure
;
1748 /* length of rtnetlink header + attributes */
1749 rtnh
->rtnh_len
= nlmsg_get_pos(skb
) - (void *)rtnh
;
1750 } endfor_nexthops(rt
);
1752 if (linkdown
== rt
->rt_nhn
)
1753 rtm
->rtm_flags
|= RTNH_F_LINKDOWN
;
1754 if (dead
== rt
->rt_nhn
)
1755 rtm
->rtm_flags
|= RTNH_F_DEAD
;
1757 nla_nest_end(skb
, mp
);
1760 nlmsg_end(skb
, nlh
);
1764 nlmsg_cancel(skb
, nlh
);
1768 static int mpls_dump_routes(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1770 struct net
*net
= sock_net(skb
->sk
);
1771 struct mpls_route __rcu
**platform_label
;
1772 size_t platform_labels
;
1777 index
= cb
->args
[0];
1778 if (index
< MPLS_LABEL_FIRST_UNRESERVED
)
1779 index
= MPLS_LABEL_FIRST_UNRESERVED
;
1781 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
1782 platform_labels
= net
->mpls
.platform_labels
;
1783 for (; index
< platform_labels
; index
++) {
1784 struct mpls_route
*rt
;
1785 rt
= rtnl_dereference(platform_label
[index
]);
1789 if (mpls_dump_route(skb
, NETLINK_CB(cb
->skb
).portid
,
1790 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
1791 index
, rt
, NLM_F_MULTI
) < 0)
1794 cb
->args
[0] = index
;
1799 static inline size_t lfib_nlmsg_size(struct mpls_route
*rt
)
1802 NLMSG_ALIGN(sizeof(struct rtmsg
))
1803 + nla_total_size(4); /* RTA_DST */
1805 if (rt
->rt_nhn
== 1) {
1806 struct mpls_nh
*nh
= rt
->rt_nh
;
1809 payload
+= nla_total_size(4); /* RTA_OIF */
1810 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
) /* RTA_VIA */
1811 payload
+= nla_total_size(2 + nh
->nh_via_alen
);
1812 if (nh
->nh_labels
) /* RTA_NEWDST */
1813 payload
+= nla_total_size(nh
->nh_labels
* 4);
1815 /* each nexthop is packed in an attribute */
1819 nhsize
+= nla_total_size(sizeof(struct rtnexthop
));
1821 if (nh
->nh_via_table
!= MPLS_NEIGH_TABLE_UNSPEC
)
1822 nhsize
+= nla_total_size(2 + nh
->nh_via_alen
);
1824 nhsize
+= nla_total_size(nh
->nh_labels
* 4);
1825 } endfor_nexthops(rt
);
1826 /* nested attribute */
1827 payload
+= nla_total_size(nhsize
);
1833 static void rtmsg_lfib(int event
, u32 label
, struct mpls_route
*rt
,
1834 struct nlmsghdr
*nlh
, struct net
*net
, u32 portid
,
1835 unsigned int nlm_flags
)
1837 struct sk_buff
*skb
;
1838 u32 seq
= nlh
? nlh
->nlmsg_seq
: 0;
1841 skb
= nlmsg_new(lfib_nlmsg_size(rt
), GFP_KERNEL
);
1845 err
= mpls_dump_route(skb
, portid
, seq
, event
, label
, rt
, nlm_flags
);
1847 /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
1848 WARN_ON(err
== -EMSGSIZE
);
1852 rtnl_notify(skb
, net
, portid
, RTNLGRP_MPLS_ROUTE
, nlh
, GFP_KERNEL
);
1857 rtnl_set_sk_err(net
, RTNLGRP_MPLS_ROUTE
, err
);
1860 static int resize_platform_label_table(struct net
*net
, size_t limit
)
1862 size_t size
= sizeof(struct mpls_route
*) * limit
;
1865 struct mpls_route __rcu
**labels
= NULL
, **old
;
1866 struct mpls_route
*rt0
= NULL
, *rt2
= NULL
;
1870 labels
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_NORETRY
);
1872 labels
= vzalloc(size
);
1878 /* In case the predefined labels need to be populated */
1879 if (limit
> MPLS_LABEL_IPV4NULL
) {
1880 struct net_device
*lo
= net
->loopback_dev
;
1881 rt0
= mpls_rt_alloc(1, lo
->addr_len
);
1884 RCU_INIT_POINTER(rt0
->rt_nh
->nh_dev
, lo
);
1885 rt0
->rt_protocol
= RTPROT_KERNEL
;
1886 rt0
->rt_payload_type
= MPT_IPV4
;
1887 rt0
->rt_nh
->nh_via_table
= NEIGH_LINK_TABLE
;
1888 rt0
->rt_nh
->nh_via_alen
= lo
->addr_len
;
1889 memcpy(__mpls_nh_via(rt0
, rt0
->rt_nh
), lo
->dev_addr
,
1892 if (limit
> MPLS_LABEL_IPV6NULL
) {
1893 struct net_device
*lo
= net
->loopback_dev
;
1894 rt2
= mpls_rt_alloc(1, lo
->addr_len
);
1897 RCU_INIT_POINTER(rt2
->rt_nh
->nh_dev
, lo
);
1898 rt2
->rt_protocol
= RTPROT_KERNEL
;
1899 rt2
->rt_payload_type
= MPT_IPV6
;
1900 rt2
->rt_nh
->nh_via_table
= NEIGH_LINK_TABLE
;
1901 rt2
->rt_nh
->nh_via_alen
= lo
->addr_len
;
1902 memcpy(__mpls_nh_via(rt2
, rt2
->rt_nh
), lo
->dev_addr
,
1907 /* Remember the original table */
1908 old
= rtnl_dereference(net
->mpls
.platform_label
);
1909 old_limit
= net
->mpls
.platform_labels
;
1911 /* Free any labels beyond the new table */
1912 for (index
= limit
; index
< old_limit
; index
++)
1913 mpls_route_update(net
, index
, NULL
, NULL
);
1915 /* Copy over the old labels */
1917 if (old_limit
< limit
)
1918 cp_size
= old_limit
* sizeof(struct mpls_route
*);
1920 memcpy(labels
, old
, cp_size
);
1922 /* If needed set the predefined labels */
1923 if ((old_limit
<= MPLS_LABEL_IPV6NULL
) &&
1924 (limit
> MPLS_LABEL_IPV6NULL
)) {
1925 RCU_INIT_POINTER(labels
[MPLS_LABEL_IPV6NULL
], rt2
);
1929 if ((old_limit
<= MPLS_LABEL_IPV4NULL
) &&
1930 (limit
> MPLS_LABEL_IPV4NULL
)) {
1931 RCU_INIT_POINTER(labels
[MPLS_LABEL_IPV4NULL
], rt0
);
1935 /* Update the global pointers */
1936 net
->mpls
.platform_labels
= limit
;
1937 rcu_assign_pointer(net
->mpls
.platform_label
, labels
);
1958 static int mpls_platform_labels(struct ctl_table
*table
, int write
,
1959 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1961 struct net
*net
= table
->data
;
1962 int platform_labels
= net
->mpls
.platform_labels
;
1964 struct ctl_table tmp
= {
1965 .procname
= table
->procname
,
1966 .data
= &platform_labels
,
1967 .maxlen
= sizeof(int),
1968 .mode
= table
->mode
,
1970 .extra2
= &label_limit
,
1973 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
1975 if (write
&& ret
== 0)
1976 ret
= resize_platform_label_table(net
, platform_labels
);
1981 static const struct ctl_table mpls_table
[] = {
1983 .procname
= "platform_labels",
1985 .maxlen
= sizeof(int),
1987 .proc_handler
= mpls_platform_labels
,
1992 static int mpls_net_init(struct net
*net
)
1994 struct ctl_table
*table
;
1996 net
->mpls
.platform_labels
= 0;
1997 net
->mpls
.platform_label
= NULL
;
1999 table
= kmemdup(mpls_table
, sizeof(mpls_table
), GFP_KERNEL
);
2003 table
[0].data
= net
;
2004 net
->mpls
.ctl
= register_net_sysctl(net
, "net/mpls", table
);
2005 if (net
->mpls
.ctl
== NULL
) {
2013 static void mpls_net_exit(struct net
*net
)
2015 struct mpls_route __rcu
**platform_label
;
2016 size_t platform_labels
;
2017 struct ctl_table
*table
;
2020 table
= net
->mpls
.ctl
->ctl_table_arg
;
2021 unregister_net_sysctl_table(net
->mpls
.ctl
);
2024 /* An rcu grace period has passed since there was a device in
2025 * the network namespace (and thus the last in flight packet)
2026 * left this network namespace. This is because
2027 * unregister_netdevice_many and netdev_run_todo has completed
2028 * for each network device that was in this network namespace.
2030 * As such no additional rcu synchronization is necessary when
2031 * freeing the platform_label table.
2034 platform_label
= rtnl_dereference(net
->mpls
.platform_label
);
2035 platform_labels
= net
->mpls
.platform_labels
;
2036 for (index
= 0; index
< platform_labels
; index
++) {
2037 struct mpls_route
*rt
= rtnl_dereference(platform_label
[index
]);
2038 RCU_INIT_POINTER(platform_label
[index
], NULL
);
2039 mpls_notify_route(net
, index
, rt
, NULL
, NULL
);
2044 kvfree(platform_label
);
2047 static struct pernet_operations mpls_net_ops
= {
2048 .init
= mpls_net_init
,
2049 .exit
= mpls_net_exit
,
2052 static struct rtnl_af_ops mpls_af_ops __read_mostly
= {
2054 .fill_stats_af
= mpls_fill_stats_af
,
2055 .get_stats_af_size
= mpls_get_stats_af_size
,
2058 static int __init
mpls_init(void)
2062 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr
) != 4);
2064 err
= register_pernet_subsys(&mpls_net_ops
);
2068 err
= register_netdevice_notifier(&mpls_dev_notifier
);
2070 goto out_unregister_pernet
;
2072 dev_add_pack(&mpls_packet_type
);
2074 rtnl_af_register(&mpls_af_ops
);
2076 rtnl_register(PF_MPLS
, RTM_NEWROUTE
, mpls_rtm_newroute
, NULL
, NULL
);
2077 rtnl_register(PF_MPLS
, RTM_DELROUTE
, mpls_rtm_delroute
, NULL
, NULL
);
2078 rtnl_register(PF_MPLS
, RTM_GETROUTE
, NULL
, mpls_dump_routes
, NULL
);
2079 rtnl_register(PF_MPLS
, RTM_GETNETCONF
, mpls_netconf_get_devconf
,
2080 mpls_netconf_dump_devconf
, NULL
);
2085 out_unregister_pernet
:
2086 unregister_pernet_subsys(&mpls_net_ops
);
2089 module_init(mpls_init
);
2091 static void __exit
mpls_exit(void)
2093 rtnl_unregister_all(PF_MPLS
);
2094 rtnl_af_unregister(&mpls_af_ops
);
2095 dev_remove_pack(&mpls_packet_type
);
2096 unregister_netdevice_notifier(&mpls_dev_notifier
);
2097 unregister_pernet_subsys(&mpls_net_ops
);
2099 module_exit(mpls_exit
);
2101 MODULE_DESCRIPTION("MultiProtocol Label Switching");
2102 MODULE_LICENSE("GPL v2");
2103 MODULE_ALIAS_NETPROTO(PF_MPLS
);