1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of
6 * the License, or (at your option) any later version.
12 static u32 ipvlan_jhash_secret __read_mostly
;
14 void ipvlan_init_secret(void)
16 net_get_random_once(&ipvlan_jhash_secret
, sizeof(ipvlan_jhash_secret
));
19 void ipvlan_count_rx(const struct ipvl_dev
*ipvlan
,
20 unsigned int len
, bool success
, bool mcast
)
22 if (likely(success
)) {
23 struct ipvl_pcpu_stats
*pcptr
;
25 pcptr
= this_cpu_ptr(ipvlan
->pcpu_stats
);
26 u64_stats_update_begin(&pcptr
->syncp
);
28 pcptr
->rx_bytes
+= len
;
31 u64_stats_update_end(&pcptr
->syncp
);
33 this_cpu_inc(ipvlan
->pcpu_stats
->rx_errs
);
36 EXPORT_SYMBOL_GPL(ipvlan_count_rx
);
38 static u8
ipvlan_get_v6_hash(const void *iaddr
)
40 const struct in6_addr
*ip6_addr
= iaddr
;
42 return __ipv6_addr_jhash(ip6_addr
, ipvlan_jhash_secret
) &
46 static u8
ipvlan_get_v4_hash(const void *iaddr
)
48 const struct in_addr
*ip4_addr
= iaddr
;
50 return jhash_1word(ip4_addr
->s_addr
, ipvlan_jhash_secret
) &
54 static struct ipvl_addr
*ipvlan_ht_addr_lookup(const struct ipvl_port
*port
,
55 const void *iaddr
, bool is_v6
)
57 struct ipvl_addr
*addr
;
60 hash
= is_v6
? ipvlan_get_v6_hash(iaddr
) :
61 ipvlan_get_v4_hash(iaddr
);
62 hlist_for_each_entry_rcu(addr
, &port
->hlhead
[hash
], hlnode
) {
63 if (is_v6
&& addr
->atype
== IPVL_IPV6
&&
64 ipv6_addr_equal(&addr
->ip6addr
, iaddr
))
66 else if (!is_v6
&& addr
->atype
== IPVL_IPV4
&&
67 addr
->ip4addr
.s_addr
==
68 ((struct in_addr
*)iaddr
)->s_addr
)
74 void ipvlan_ht_addr_add(struct ipvl_dev
*ipvlan
, struct ipvl_addr
*addr
)
76 struct ipvl_port
*port
= ipvlan
->port
;
79 hash
= (addr
->atype
== IPVL_IPV6
) ?
80 ipvlan_get_v6_hash(&addr
->ip6addr
) :
81 ipvlan_get_v4_hash(&addr
->ip4addr
);
82 if (hlist_unhashed(&addr
->hlnode
))
83 hlist_add_head_rcu(&addr
->hlnode
, &port
->hlhead
[hash
]);
86 void ipvlan_ht_addr_del(struct ipvl_addr
*addr
)
88 hlist_del_init_rcu(&addr
->hlnode
);
91 struct ipvl_addr
*ipvlan_find_addr(const struct ipvl_dev
*ipvlan
,
92 const void *iaddr
, bool is_v6
)
94 struct ipvl_addr
*addr
;
96 list_for_each_entry(addr
, &ipvlan
->addrs
, anode
) {
97 if ((is_v6
&& addr
->atype
== IPVL_IPV6
&&
98 ipv6_addr_equal(&addr
->ip6addr
, iaddr
)) ||
99 (!is_v6
&& addr
->atype
== IPVL_IPV4
&&
100 addr
->ip4addr
.s_addr
== ((struct in_addr
*)iaddr
)->s_addr
))
106 bool ipvlan_addr_busy(struct ipvl_port
*port
, void *iaddr
, bool is_v6
)
108 struct ipvl_dev
*ipvlan
;
112 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
113 if (ipvlan_find_addr(ipvlan
, iaddr
, is_v6
))
119 static void *ipvlan_get_L3_hdr(struct sk_buff
*skb
, int *type
)
123 switch (skb
->protocol
) {
124 case htons(ETH_P_ARP
): {
127 if (unlikely(!pskb_may_pull(skb
, sizeof(*arph
))))
135 case htons(ETH_P_IP
): {
139 if (unlikely(!pskb_may_pull(skb
, sizeof(*ip4h
))))
143 pktlen
= ntohs(ip4h
->tot_len
);
144 if (ip4h
->ihl
< 5 || ip4h
->version
!= 4)
146 if (skb
->len
< pktlen
|| pktlen
< (ip4h
->ihl
* 4))
153 case htons(ETH_P_IPV6
): {
154 struct ipv6hdr
*ip6h
;
156 if (unlikely(!pskb_may_pull(skb
, sizeof(*ip6h
))))
159 ip6h
= ipv6_hdr(skb
);
160 if (ip6h
->version
!= 6)
165 /* Only Neighbour Solicitation pkts need different treatment */
166 if (ipv6_addr_any(&ip6h
->saddr
) &&
167 ip6h
->nexthdr
== NEXTHDR_ICMP
) {
180 unsigned int ipvlan_mac_hash(const unsigned char *addr
)
182 u32 hash
= jhash_1word(__get_unaligned_cpu32(addr
+2),
183 ipvlan_jhash_secret
);
185 return hash
& IPVLAN_MAC_FILTER_MASK
;
188 void ipvlan_process_multicast(struct work_struct
*work
)
190 struct ipvl_port
*port
= container_of(work
, struct ipvl_port
, wq
);
192 struct ipvl_dev
*ipvlan
;
193 struct sk_buff
*skb
, *nskb
;
194 struct sk_buff_head list
;
196 unsigned int mac_hash
;
201 __skb_queue_head_init(&list
);
203 spin_lock_bh(&port
->backlog
.lock
);
204 skb_queue_splice_tail_init(&port
->backlog
, &list
);
205 spin_unlock_bh(&port
->backlog
.lock
);
207 while ((skb
= __skb_dequeue(&list
)) != NULL
) {
208 struct net_device
*dev
= skb
->dev
;
209 bool consumed
= false;
212 tx_pkt
= IPVL_SKB_CB(skb
)->tx_pkt
;
213 mac_hash
= ipvlan_mac_hash(ethh
->h_dest
);
215 if (ether_addr_equal(ethh
->h_dest
, port
->dev
->broadcast
))
216 pkt_type
= PACKET_BROADCAST
;
218 pkt_type
= PACKET_MULTICAST
;
221 list_for_each_entry_rcu(ipvlan
, &port
->ipvlans
, pnode
) {
222 if (tx_pkt
&& (ipvlan
->dev
== skb
->dev
))
224 if (!test_bit(mac_hash
, ipvlan
->mac_filters
))
226 if (!(ipvlan
->dev
->flags
& IFF_UP
))
229 len
= skb
->len
+ ETH_HLEN
;
230 nskb
= skb_clone(skb
, GFP_ATOMIC
);
234 nskb
->pkt_type
= pkt_type
;
235 nskb
->dev
= ipvlan
->dev
;
237 ret
= dev_forward_skb(ipvlan
->dev
, nskb
);
239 ret
= netif_rx(nskb
);
241 ipvlan_count_rx(ipvlan
, len
, ret
== NET_RX_SUCCESS
, true);
247 /* If the packet originated here, send it out. */
248 skb
->dev
= port
->dev
;
249 skb
->pkt_type
= pkt_type
;
262 static void ipvlan_skb_crossing_ns(struct sk_buff
*skb
, struct net_device
*dev
)
267 xnet
= !net_eq(dev_net(skb
->dev
), dev_net(dev
));
269 skb_scrub_packet(skb
, xnet
);
274 static int ipvlan_rcv_frame(struct ipvl_addr
*addr
, struct sk_buff
**pskb
,
277 struct ipvl_dev
*ipvlan
= addr
->master
;
278 struct net_device
*dev
= ipvlan
->dev
;
280 rx_handler_result_t ret
= RX_HANDLER_CONSUMED
;
281 bool success
= false;
282 struct sk_buff
*skb
= *pskb
;
284 len
= skb
->len
+ ETH_HLEN
;
285 /* Only packets exchanged between two local slaves need to have
286 * device-up check as well as skb-share check.
289 if (unlikely(!(dev
->flags
& IFF_UP
))) {
294 skb
= skb_share_check(skb
, GFP_ATOMIC
);
300 ipvlan_skb_crossing_ns(skb
, dev
);
303 skb
->pkt_type
= PACKET_HOST
;
304 if (dev_forward_skb(ipvlan
->dev
, skb
) == NET_RX_SUCCESS
)
307 ret
= RX_HANDLER_ANOTHER
;
312 ipvlan_count_rx(ipvlan
, len
, success
, false);
316 static struct ipvl_addr
*ipvlan_addr_lookup(struct ipvl_port
*port
,
317 void *lyr3h
, int addr_type
,
320 struct ipvl_addr
*addr
= NULL
;
322 if (addr_type
== IPVL_IPV6
) {
323 struct ipv6hdr
*ip6h
;
324 struct in6_addr
*i6addr
;
326 ip6h
= (struct ipv6hdr
*)lyr3h
;
327 i6addr
= use_dest
? &ip6h
->daddr
: &ip6h
->saddr
;
328 addr
= ipvlan_ht_addr_lookup(port
, i6addr
, true);
329 } else if (addr_type
== IPVL_ICMPV6
) {
331 struct in6_addr
*i6addr
;
333 /* Make sure that the NeighborSolicitation ICMPv6 packets
334 * are handled to avoid DAD issue.
336 ndmh
= (struct nd_msg
*)lyr3h
;
337 if (ndmh
->icmph
.icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
) {
338 i6addr
= &ndmh
->target
;
339 addr
= ipvlan_ht_addr_lookup(port
, i6addr
, true);
341 } else if (addr_type
== IPVL_IPV4
) {
345 ip4h
= (struct iphdr
*)lyr3h
;
346 i4addr
= use_dest
? &ip4h
->daddr
: &ip4h
->saddr
;
347 addr
= ipvlan_ht_addr_lookup(port
, i4addr
, false);
348 } else if (addr_type
== IPVL_ARP
) {
350 unsigned char *arp_ptr
;
353 arph
= (struct arphdr
*)lyr3h
;
354 arp_ptr
= (unsigned char *)(arph
+ 1);
356 arp_ptr
+= (2 * port
->dev
->addr_len
) + 4;
358 arp_ptr
+= port
->dev
->addr_len
;
360 memcpy(&dip
, arp_ptr
, 4);
361 addr
= ipvlan_ht_addr_lookup(port
, &dip
, false);
367 static int ipvlan_process_v4_outbound(struct sk_buff
*skb
)
369 const struct iphdr
*ip4h
= ip_hdr(skb
);
370 struct net_device
*dev
= skb
->dev
;
371 struct net
*net
= dev_net(dev
);
373 int err
, ret
= NET_XMIT_DROP
;
374 struct flowi4 fl4
= {
375 .flowi4_oif
= dev
->ifindex
,
376 .flowi4_tos
= RT_TOS(ip4h
->tos
),
377 .flowi4_flags
= FLOWI_FLAG_ANYSRC
,
378 .daddr
= ip4h
->daddr
,
379 .saddr
= ip4h
->saddr
,
382 rt
= ip_route_output_flow(net
, &fl4
, NULL
);
386 if (rt
->rt_type
!= RTN_UNICAST
&& rt
->rt_type
!= RTN_LOCAL
) {
390 skb_dst_set(skb
, &rt
->dst
);
391 err
= ip_local_out(net
, skb
->sk
, skb
);
392 if (unlikely(net_xmit_eval(err
)))
393 dev
->stats
.tx_errors
++;
395 ret
= NET_XMIT_SUCCESS
;
398 dev
->stats
.tx_errors
++;
404 static int ipvlan_process_v6_outbound(struct sk_buff
*skb
)
406 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
407 struct net_device
*dev
= skb
->dev
;
408 struct net
*net
= dev_net(dev
);
409 struct dst_entry
*dst
;
410 int err
, ret
= NET_XMIT_DROP
;
411 struct flowi6 fl6
= {
412 .flowi6_iif
= dev
->ifindex
,
413 .daddr
= ip6h
->daddr
,
414 .saddr
= ip6h
->saddr
,
415 .flowi6_flags
= FLOWI_FLAG_ANYSRC
,
416 .flowlabel
= ip6_flowinfo(ip6h
),
417 .flowi6_mark
= skb
->mark
,
418 .flowi6_proto
= ip6h
->nexthdr
,
421 dst
= ip6_route_output(net
, NULL
, &fl6
);
427 skb_dst_set(skb
, dst
);
428 err
= ip6_local_out(net
, skb
->sk
, skb
);
429 if (unlikely(net_xmit_eval(err
)))
430 dev
->stats
.tx_errors
++;
432 ret
= NET_XMIT_SUCCESS
;
435 dev
->stats
.tx_errors
++;
441 static int ipvlan_process_outbound(struct sk_buff
*skb
)
443 struct ethhdr
*ethh
= eth_hdr(skb
);
444 int ret
= NET_XMIT_DROP
;
446 /* In this mode we dont care about multicast and broadcast traffic */
447 if (is_multicast_ether_addr(ethh
->h_dest
)) {
448 pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
449 ntohs(skb
->protocol
));
454 /* The ipvlan is a pseudo-L2 device, so the packets that we receive
455 * will have L2; which need to discarded and processed further
456 * in the net-ns of the main-device.
458 if (skb_mac_header_was_set(skb
)) {
459 skb_pull(skb
, sizeof(*ethh
));
460 skb
->mac_header
= (typeof(skb
->mac_header
))~0U;
461 skb_reset_network_header(skb
);
464 if (skb
->protocol
== htons(ETH_P_IPV6
))
465 ret
= ipvlan_process_v6_outbound(skb
);
466 else if (skb
->protocol
== htons(ETH_P_IP
))
467 ret
= ipvlan_process_v4_outbound(skb
);
469 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
470 ntohs(skb
->protocol
));
477 static void ipvlan_multicast_enqueue(struct ipvl_port
*port
,
478 struct sk_buff
*skb
, bool tx_pkt
)
480 if (skb
->protocol
== htons(ETH_P_PAUSE
)) {
485 /* Record that the deferred packet is from TX or RX path. By
486 * looking at mac-addresses on packet will lead to erronus decisions.
487 * (This would be true for a loopback-mode on master device or a
488 * hair-pin mode of the switch.)
490 IPVL_SKB_CB(skb
)->tx_pkt
= tx_pkt
;
492 spin_lock(&port
->backlog
.lock
);
493 if (skb_queue_len(&port
->backlog
) < IPVLAN_QBACKLOG_LIMIT
) {
496 __skb_queue_tail(&port
->backlog
, skb
);
497 spin_unlock(&port
->backlog
.lock
);
498 schedule_work(&port
->wq
);
500 spin_unlock(&port
->backlog
.lock
);
501 atomic_long_inc(&skb
->dev
->rx_dropped
);
506 static int ipvlan_xmit_mode_l3(struct sk_buff
*skb
, struct net_device
*dev
)
508 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
510 struct ipvl_addr
*addr
;
513 lyr3h
= ipvlan_get_L3_hdr(skb
, &addr_type
);
517 addr
= ipvlan_addr_lookup(ipvlan
->port
, lyr3h
, addr_type
, true);
519 return ipvlan_rcv_frame(addr
, &skb
, true);
522 ipvlan_skb_crossing_ns(skb
, ipvlan
->phy_dev
);
523 return ipvlan_process_outbound(skb
);
526 static int ipvlan_xmit_mode_l2(struct sk_buff
*skb
, struct net_device
*dev
)
528 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
529 struct ethhdr
*eth
= eth_hdr(skb
);
530 struct ipvl_addr
*addr
;
534 if (ether_addr_equal(eth
->h_dest
, eth
->h_source
)) {
535 lyr3h
= ipvlan_get_L3_hdr(skb
, &addr_type
);
537 addr
= ipvlan_addr_lookup(ipvlan
->port
, lyr3h
, addr_type
, true);
539 return ipvlan_rcv_frame(addr
, &skb
, true);
541 skb
= skb_share_check(skb
, GFP_ATOMIC
);
543 return NET_XMIT_DROP
;
545 /* Packet definitely does not belong to any of the
546 * virtual devices, but the dest is local. So forward
547 * the skb for the main-dev. At the RX side we just return
548 * RX_PASS for it to be processed further on the stack.
550 return dev_forward_skb(ipvlan
->phy_dev
, skb
);
552 } else if (is_multicast_ether_addr(eth
->h_dest
)) {
553 ipvlan_skb_crossing_ns(skb
, NULL
);
554 ipvlan_multicast_enqueue(ipvlan
->port
, skb
, true);
555 return NET_XMIT_SUCCESS
;
558 ipvlan_skb_crossing_ns(skb
, ipvlan
->phy_dev
);
559 return dev_queue_xmit(skb
);
562 int ipvlan_queue_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
564 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
565 struct ipvl_port
*port
= ipvlan_port_get_rcu_bh(ipvlan
->phy_dev
);
570 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
575 return ipvlan_xmit_mode_l2(skb
, dev
);
577 case IPVLAN_MODE_L3S
:
578 return ipvlan_xmit_mode_l3(skb
, dev
);
581 /* Should not reach here */
582 WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
586 return NET_XMIT_DROP
;
589 static bool ipvlan_external_frame(struct sk_buff
*skb
, struct ipvl_port
*port
)
591 struct ethhdr
*eth
= eth_hdr(skb
);
592 struct ipvl_addr
*addr
;
596 if (ether_addr_equal(eth
->h_source
, skb
->dev
->dev_addr
)) {
597 lyr3h
= ipvlan_get_L3_hdr(skb
, &addr_type
);
601 addr
= ipvlan_addr_lookup(port
, lyr3h
, addr_type
, false);
609 static rx_handler_result_t
ipvlan_handle_mode_l3(struct sk_buff
**pskb
,
610 struct ipvl_port
*port
)
614 struct ipvl_addr
*addr
;
615 struct sk_buff
*skb
= *pskb
;
616 rx_handler_result_t ret
= RX_HANDLER_PASS
;
618 lyr3h
= ipvlan_get_L3_hdr(skb
, &addr_type
);
622 addr
= ipvlan_addr_lookup(port
, lyr3h
, addr_type
, true);
624 ret
= ipvlan_rcv_frame(addr
, pskb
, false);
630 static rx_handler_result_t
ipvlan_handle_mode_l2(struct sk_buff
**pskb
,
631 struct ipvl_port
*port
)
633 struct sk_buff
*skb
= *pskb
;
634 struct ethhdr
*eth
= eth_hdr(skb
);
635 rx_handler_result_t ret
= RX_HANDLER_PASS
;
639 if (is_multicast_ether_addr(eth
->h_dest
)) {
640 if (ipvlan_external_frame(skb
, port
)) {
641 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
643 /* External frames are queued for device local
644 * distribution, but a copy is given to master
645 * straight away to avoid sending duplicates later
646 * when work-queue processes this frame. This is
647 * achieved by returning RX_HANDLER_PASS.
650 ipvlan_skb_crossing_ns(nskb
, NULL
);
651 ipvlan_multicast_enqueue(port
, nskb
, false);
655 struct ipvl_addr
*addr
;
657 lyr3h
= ipvlan_get_L3_hdr(skb
, &addr_type
);
661 addr
= ipvlan_addr_lookup(port
, lyr3h
, addr_type
, true);
663 ret
= ipvlan_rcv_frame(addr
, pskb
, false);
669 rx_handler_result_t
ipvlan_handle_frame(struct sk_buff
**pskb
)
671 struct sk_buff
*skb
= *pskb
;
672 struct ipvl_port
*port
= ipvlan_port_get_rcu(skb
->dev
);
675 return RX_HANDLER_PASS
;
677 switch (port
->mode
) {
679 return ipvlan_handle_mode_l2(pskb
, port
);
681 return ipvlan_handle_mode_l3(pskb
, port
);
682 case IPVLAN_MODE_L3S
:
683 return RX_HANDLER_PASS
;
686 /* Should not reach here */
687 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
690 return RX_HANDLER_CONSUMED
;
693 static struct ipvl_addr
*ipvlan_skb_to_addr(struct sk_buff
*skb
,
694 struct net_device
*dev
)
696 struct ipvl_addr
*addr
= NULL
;
697 struct ipvl_port
*port
;
701 if (!dev
|| !netif_is_ipvlan_port(dev
))
704 port
= ipvlan_port_get_rcu(dev
);
705 if (!port
|| port
->mode
!= IPVLAN_MODE_L3S
)
708 lyr3h
= ipvlan_get_L3_hdr(skb
, &addr_type
);
712 addr
= ipvlan_addr_lookup(port
, lyr3h
, addr_type
, true);
717 struct sk_buff
*ipvlan_l3_rcv(struct net_device
*dev
, struct sk_buff
*skb
,
720 struct ipvl_addr
*addr
;
721 struct net_device
*sdev
;
723 addr
= ipvlan_skb_to_addr(skb
, dev
);
727 sdev
= addr
->master
->dev
;
732 struct iphdr
*ip4h
= ip_hdr(skb
);
734 err
= ip_route_input_noref(skb
, ip4h
->daddr
, ip4h
->saddr
,
742 struct dst_entry
*dst
;
743 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
744 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
745 struct flowi6 fl6
= {
746 .flowi6_iif
= sdev
->ifindex
,
747 .daddr
= ip6h
->daddr
,
748 .saddr
= ip6h
->saddr
,
749 .flowlabel
= ip6_flowinfo(ip6h
),
750 .flowi6_mark
= skb
->mark
,
751 .flowi6_proto
= ip6h
->nexthdr
,
755 dst
= ip6_route_input_lookup(dev_net(sdev
), sdev
, &fl6
, flags
);
756 skb_dst_set(skb
, dst
);
767 unsigned int ipvlan_nf_input(void *priv
, struct sk_buff
*skb
,
768 const struct nf_hook_state
*state
)
770 struct ipvl_addr
*addr
;
773 addr
= ipvlan_skb_to_addr(skb
, skb
->dev
);
777 skb
->dev
= addr
->master
->dev
;
778 len
= skb
->len
+ ETH_HLEN
;
779 ipvlan_count_rx(addr
->master
, len
, true, false);