1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
7 static int ipvlan_set_port_mode(struct ipvl_port
*port
, u16 nval
,
8 struct netlink_ext_ack
*extack
)
10 struct ipvl_dev
*ipvlan
;
15 if (port
->mode
!= nval
) {
16 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
17 flags
= ipvlan
->dev
->flags
;
18 if (nval
== IPVLAN_MODE_L3
|| nval
== IPVLAN_MODE_L3S
) {
19 err
= dev_change_flags(ipvlan
->dev
,
23 err
= dev_change_flags(ipvlan
->dev
,
30 if (nval
== IPVLAN_MODE_L3S
) {
32 err
= ipvlan_l3s_register(port
);
35 } else if (port
->mode
== IPVLAN_MODE_L3S
) {
36 /* Old mode was L3S */
37 ipvlan_l3s_unregister(port
);
44 /* Undo the flags changes that have been done so far. */
45 list_for_each_entry_continue_reverse(ipvlan
, &port
->ipvlans
, pnode
) {
46 flags
= ipvlan
->dev
->flags
;
47 if (port
->mode
== IPVLAN_MODE_L3
||
48 port
->mode
== IPVLAN_MODE_L3S
)
49 dev_change_flags(ipvlan
->dev
, flags
| IFF_NOARP
,
52 dev_change_flags(ipvlan
->dev
, flags
& ~IFF_NOARP
,
59 static int ipvlan_port_create(struct net_device
*dev
)
61 struct ipvl_port
*port
;
64 port
= kzalloc(sizeof(struct ipvl_port
), GFP_KERNEL
);
68 write_pnet(&port
->pnet
, dev_net(dev
));
70 port
->mode
= IPVLAN_MODE_L3
;
71 INIT_LIST_HEAD(&port
->ipvlans
);
72 for (idx
= 0; idx
< IPVLAN_HASH_SIZE
; idx
++)
73 INIT_HLIST_HEAD(&port
->hlhead
[idx
]);
75 skb_queue_head_init(&port
->backlog
);
76 INIT_WORK(&port
->wq
, ipvlan_process_multicast
);
78 port
->dev_id_start
= 1;
80 err
= netdev_rx_handler_register(dev
, ipvlan_handle_frame
, port
);
91 static void ipvlan_port_destroy(struct net_device
*dev
)
93 struct ipvl_port
*port
= ipvlan_port_get_rtnl(dev
);
96 if (port
->mode
== IPVLAN_MODE_L3S
)
97 ipvlan_l3s_unregister(port
);
98 netdev_rx_handler_unregister(dev
);
99 cancel_work_sync(&port
->wq
);
100 while ((skb
= __skb_dequeue(&port
->backlog
)) != NULL
) {
105 ida_destroy(&port
->ida
);
109 #define IPVLAN_FEATURES \
110 (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
111 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
112 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
113 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
115 #define IPVLAN_STATE_MASK \
116 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
118 static int ipvlan_init(struct net_device
*dev
)
120 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
121 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
122 struct ipvl_port
*port
;
125 dev
->state
= (dev
->state
& ~IPVLAN_STATE_MASK
) |
126 (phy_dev
->state
& IPVLAN_STATE_MASK
);
127 dev
->features
= phy_dev
->features
& IPVLAN_FEATURES
;
128 dev
->features
|= NETIF_F_LLTX
| NETIF_F_VLAN_CHALLENGED
;
129 dev
->gso_max_size
= phy_dev
->gso_max_size
;
130 dev
->gso_max_segs
= phy_dev
->gso_max_segs
;
131 dev
->hard_header_len
= phy_dev
->hard_header_len
;
133 netdev_lockdep_set_classes(dev
);
135 ipvlan
->pcpu_stats
= netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats
);
136 if (!ipvlan
->pcpu_stats
)
139 if (!netif_is_ipvlan_port(phy_dev
)) {
140 err
= ipvlan_port_create(phy_dev
);
142 free_percpu(ipvlan
->pcpu_stats
);
146 port
= ipvlan_port_get_rtnl(phy_dev
);
151 static void ipvlan_uninit(struct net_device
*dev
)
153 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
154 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
155 struct ipvl_port
*port
;
157 free_percpu(ipvlan
->pcpu_stats
);
159 port
= ipvlan_port_get_rtnl(phy_dev
);
162 ipvlan_port_destroy(port
->dev
);
165 static int ipvlan_open(struct net_device
*dev
)
167 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
168 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
169 struct ipvl_addr
*addr
;
171 if (ipvlan
->port
->mode
== IPVLAN_MODE_L3
||
172 ipvlan
->port
->mode
== IPVLAN_MODE_L3S
)
173 dev
->flags
|= IFF_NOARP
;
175 dev
->flags
&= ~IFF_NOARP
;
178 list_for_each_entry_rcu(addr
, &ipvlan
->addrs
, anode
)
179 ipvlan_ht_addr_add(ipvlan
, addr
);
182 return dev_uc_add(phy_dev
, phy_dev
->dev_addr
);
185 static int ipvlan_stop(struct net_device
*dev
)
187 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
188 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
189 struct ipvl_addr
*addr
;
191 dev_uc_unsync(phy_dev
, dev
);
192 dev_mc_unsync(phy_dev
, dev
);
194 dev_uc_del(phy_dev
, phy_dev
->dev_addr
);
197 list_for_each_entry_rcu(addr
, &ipvlan
->addrs
, anode
)
198 ipvlan_ht_addr_del(addr
);
204 static netdev_tx_t
ipvlan_start_xmit(struct sk_buff
*skb
,
205 struct net_device
*dev
)
207 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
208 int skblen
= skb
->len
;
211 ret
= ipvlan_queue_xmit(skb
, dev
);
212 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
213 struct ipvl_pcpu_stats
*pcptr
;
215 pcptr
= this_cpu_ptr(ipvlan
->pcpu_stats
);
217 u64_stats_update_begin(&pcptr
->syncp
);
219 pcptr
->tx_bytes
+= skblen
;
220 u64_stats_update_end(&pcptr
->syncp
);
222 this_cpu_inc(ipvlan
->pcpu_stats
->tx_drps
);
227 static netdev_features_t
ipvlan_fix_features(struct net_device
*dev
,
228 netdev_features_t features
)
230 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
232 return features
& (ipvlan
->sfeatures
| ~IPVLAN_FEATURES
);
235 static void ipvlan_change_rx_flags(struct net_device
*dev
, int change
)
237 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
238 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
240 if (change
& IFF_ALLMULTI
)
241 dev_set_allmulti(phy_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
244 static void ipvlan_set_multicast_mac_filter(struct net_device
*dev
)
246 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
248 if (dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
249 bitmap_fill(ipvlan
->mac_filters
, IPVLAN_MAC_FILTER_SIZE
);
251 struct netdev_hw_addr
*ha
;
252 DECLARE_BITMAP(mc_filters
, IPVLAN_MAC_FILTER_SIZE
);
254 bitmap_zero(mc_filters
, IPVLAN_MAC_FILTER_SIZE
);
255 netdev_for_each_mc_addr(ha
, dev
)
256 __set_bit(ipvlan_mac_hash(ha
->addr
), mc_filters
);
258 /* Turn-on broadcast bit irrespective of address family,
259 * since broadcast is deferred to a work-queue, hence no
260 * impact on fast-path processing.
262 __set_bit(ipvlan_mac_hash(dev
->broadcast
), mc_filters
);
264 bitmap_copy(ipvlan
->mac_filters
, mc_filters
,
265 IPVLAN_MAC_FILTER_SIZE
);
267 dev_uc_sync(ipvlan
->phy_dev
, dev
);
268 dev_mc_sync(ipvlan
->phy_dev
, dev
);
271 static void ipvlan_get_stats64(struct net_device
*dev
,
272 struct rtnl_link_stats64
*s
)
274 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
276 if (ipvlan
->pcpu_stats
) {
277 struct ipvl_pcpu_stats
*pcptr
;
278 u64 rx_pkts
, rx_bytes
, rx_mcast
, tx_pkts
, tx_bytes
;
279 u32 rx_errs
= 0, tx_drps
= 0;
283 for_each_possible_cpu(idx
) {
284 pcptr
= per_cpu_ptr(ipvlan
->pcpu_stats
, idx
);
286 strt
= u64_stats_fetch_begin_irq(&pcptr
->syncp
);
287 rx_pkts
= pcptr
->rx_pkts
;
288 rx_bytes
= pcptr
->rx_bytes
;
289 rx_mcast
= pcptr
->rx_mcast
;
290 tx_pkts
= pcptr
->tx_pkts
;
291 tx_bytes
= pcptr
->tx_bytes
;
292 } while (u64_stats_fetch_retry_irq(&pcptr
->syncp
,
295 s
->rx_packets
+= rx_pkts
;
296 s
->rx_bytes
+= rx_bytes
;
297 s
->multicast
+= rx_mcast
;
298 s
->tx_packets
+= tx_pkts
;
299 s
->tx_bytes
+= tx_bytes
;
301 /* u32 values are updated without syncp protection. */
302 rx_errs
+= pcptr
->rx_errs
;
303 tx_drps
+= pcptr
->tx_drps
;
305 s
->rx_errors
= rx_errs
;
306 s
->rx_dropped
= rx_errs
;
307 s
->tx_dropped
= tx_drps
;
311 static int ipvlan_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
313 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
314 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
316 return vlan_vid_add(phy_dev
, proto
, vid
);
319 static int ipvlan_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
,
322 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
323 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
325 vlan_vid_del(phy_dev
, proto
, vid
);
329 static int ipvlan_get_iflink(const struct net_device
*dev
)
331 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
333 return ipvlan
->phy_dev
->ifindex
;
336 static const struct net_device_ops ipvlan_netdev_ops
= {
337 .ndo_init
= ipvlan_init
,
338 .ndo_uninit
= ipvlan_uninit
,
339 .ndo_open
= ipvlan_open
,
340 .ndo_stop
= ipvlan_stop
,
341 .ndo_start_xmit
= ipvlan_start_xmit
,
342 .ndo_fix_features
= ipvlan_fix_features
,
343 .ndo_change_rx_flags
= ipvlan_change_rx_flags
,
344 .ndo_set_rx_mode
= ipvlan_set_multicast_mac_filter
,
345 .ndo_get_stats64
= ipvlan_get_stats64
,
346 .ndo_vlan_rx_add_vid
= ipvlan_vlan_rx_add_vid
,
347 .ndo_vlan_rx_kill_vid
= ipvlan_vlan_rx_kill_vid
,
348 .ndo_get_iflink
= ipvlan_get_iflink
,
351 static int ipvlan_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
352 unsigned short type
, const void *daddr
,
353 const void *saddr
, unsigned len
)
355 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
356 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
358 /* TODO Probably use a different field than dev_addr so that the
359 * mac-address on the virtual device is portable and can be carried
360 * while the packets use the mac-addr on the physical device.
362 return dev_hard_header(skb
, phy_dev
, type
, daddr
,
363 saddr
? : phy_dev
->dev_addr
, len
);
366 static const struct header_ops ipvlan_header_ops
= {
367 .create
= ipvlan_hard_header
,
368 .parse
= eth_header_parse
,
369 .cache
= eth_header_cache
,
370 .cache_update
= eth_header_cache_update
,
373 static void ipvlan_adjust_mtu(struct ipvl_dev
*ipvlan
, struct net_device
*dev
)
375 ipvlan
->dev
->mtu
= dev
->mtu
;
378 static bool netif_is_ipvlan(const struct net_device
*dev
)
380 /* both ipvlan and ipvtap devices use the same netdev_ops */
381 return dev
->netdev_ops
== &ipvlan_netdev_ops
;
384 static int ipvlan_ethtool_get_link_ksettings(struct net_device
*dev
,
385 struct ethtool_link_ksettings
*cmd
)
387 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
389 return __ethtool_get_link_ksettings(ipvlan
->phy_dev
, cmd
);
392 static void ipvlan_ethtool_get_drvinfo(struct net_device
*dev
,
393 struct ethtool_drvinfo
*drvinfo
)
395 strlcpy(drvinfo
->driver
, IPVLAN_DRV
, sizeof(drvinfo
->driver
));
396 strlcpy(drvinfo
->version
, IPV_DRV_VER
, sizeof(drvinfo
->version
));
399 static u32
ipvlan_ethtool_get_msglevel(struct net_device
*dev
)
401 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
403 return ipvlan
->msg_enable
;
406 static void ipvlan_ethtool_set_msglevel(struct net_device
*dev
, u32 value
)
408 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
410 ipvlan
->msg_enable
= value
;
413 static const struct ethtool_ops ipvlan_ethtool_ops
= {
414 .get_link
= ethtool_op_get_link
,
415 .get_link_ksettings
= ipvlan_ethtool_get_link_ksettings
,
416 .get_drvinfo
= ipvlan_ethtool_get_drvinfo
,
417 .get_msglevel
= ipvlan_ethtool_get_msglevel
,
418 .set_msglevel
= ipvlan_ethtool_set_msglevel
,
421 static int ipvlan_nl_changelink(struct net_device
*dev
,
422 struct nlattr
*tb
[], struct nlattr
*data
[],
423 struct netlink_ext_ack
*extack
)
425 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
426 struct ipvl_port
*port
= ipvlan_port_get_rtnl(ipvlan
->phy_dev
);
431 if (!ns_capable(dev_net(ipvlan
->phy_dev
)->user_ns
, CAP_NET_ADMIN
))
434 if (data
[IFLA_IPVLAN_MODE
]) {
435 u16 nmode
= nla_get_u16(data
[IFLA_IPVLAN_MODE
]);
437 err
= ipvlan_set_port_mode(port
, nmode
, extack
);
440 if (!err
&& data
[IFLA_IPVLAN_FLAGS
]) {
441 u16 flags
= nla_get_u16(data
[IFLA_IPVLAN_FLAGS
]);
443 if (flags
& IPVLAN_F_PRIVATE
)
444 ipvlan_mark_private(port
);
446 ipvlan_clear_private(port
);
448 if (flags
& IPVLAN_F_VEPA
)
449 ipvlan_mark_vepa(port
);
451 ipvlan_clear_vepa(port
);
457 static size_t ipvlan_nl_getsize(const struct net_device
*dev
)
460 + nla_total_size(2) /* IFLA_IPVLAN_MODE */
461 + nla_total_size(2) /* IFLA_IPVLAN_FLAGS */
465 static int ipvlan_nl_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
466 struct netlink_ext_ack
*extack
)
471 if (data
[IFLA_IPVLAN_MODE
]) {
472 u16 mode
= nla_get_u16(data
[IFLA_IPVLAN_MODE
]);
474 if (mode
>= IPVLAN_MODE_MAX
)
477 if (data
[IFLA_IPVLAN_FLAGS
]) {
478 u16 flags
= nla_get_u16(data
[IFLA_IPVLAN_FLAGS
]);
480 /* Only two bits are used at this moment. */
481 if (flags
& ~(IPVLAN_F_PRIVATE
| IPVLAN_F_VEPA
))
483 /* Also both flags can't be active at the same time. */
484 if ((flags
& (IPVLAN_F_PRIVATE
| IPVLAN_F_VEPA
)) ==
485 (IPVLAN_F_PRIVATE
| IPVLAN_F_VEPA
))
492 static int ipvlan_nl_fillinfo(struct sk_buff
*skb
,
493 const struct net_device
*dev
)
495 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
496 struct ipvl_port
*port
= ipvlan_port_get_rtnl(ipvlan
->phy_dev
);
503 if (nla_put_u16(skb
, IFLA_IPVLAN_MODE
, port
->mode
))
505 if (nla_put_u16(skb
, IFLA_IPVLAN_FLAGS
, port
->flags
))
514 int ipvlan_link_new(struct net
*src_net
, struct net_device
*dev
,
515 struct nlattr
*tb
[], struct nlattr
*data
[],
516 struct netlink_ext_ack
*extack
)
518 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
519 struct ipvl_port
*port
;
520 struct net_device
*phy_dev
;
522 u16 mode
= IPVLAN_MODE_L3
;
527 phy_dev
= __dev_get_by_index(src_net
, nla_get_u32(tb
[IFLA_LINK
]));
531 if (netif_is_ipvlan(phy_dev
)) {
532 struct ipvl_dev
*tmp
= netdev_priv(phy_dev
);
534 phy_dev
= tmp
->phy_dev
;
535 if (!ns_capable(dev_net(phy_dev
)->user_ns
, CAP_NET_ADMIN
))
537 } else if (!netif_is_ipvlan_port(phy_dev
)) {
538 /* Exit early if the underlying link is invalid or busy */
539 if (phy_dev
->type
!= ARPHRD_ETHER
||
540 phy_dev
->flags
& IFF_LOOPBACK
) {
542 "Master is either lo or non-ether device\n");
546 if (netdev_is_rx_handler_busy(phy_dev
)) {
547 netdev_err(phy_dev
, "Device is already in use.\n");
552 ipvlan
->phy_dev
= phy_dev
;
554 ipvlan
->sfeatures
= IPVLAN_FEATURES
;
556 ipvlan_adjust_mtu(ipvlan
, phy_dev
);
557 INIT_LIST_HEAD(&ipvlan
->addrs
);
558 spin_lock_init(&ipvlan
->addrs_lock
);
560 /* TODO Probably put random address here to be presented to the
561 * world but keep using the physical-dev address for the outgoing
564 memcpy(dev
->dev_addr
, phy_dev
->dev_addr
, ETH_ALEN
);
566 dev
->priv_flags
|= IFF_NO_RX_HANDLER
;
568 err
= register_netdevice(dev
);
572 /* ipvlan_init() would have created the port, if required */
573 port
= ipvlan_port_get_rtnl(phy_dev
);
576 /* If the port-id base is at the MAX value, then wrap it around and
577 * begin from 0x1 again. This may be due to a busy system where lots
578 * of slaves are getting created and deleted.
580 if (port
->dev_id_start
== 0xFFFE)
581 port
->dev_id_start
= 0x1;
583 /* Since L2 address is shared among all IPvlan slaves including
584 * master, use unique 16 bit dev-ids to diffentiate among them.
585 * Assign IDs between 0x1 and 0xFFFE (used by the master) to each
586 * slave link [see addrconf_ifid_eui48()].
588 err
= ida_simple_get(&port
->ida
, port
->dev_id_start
, 0xFFFE,
591 err
= ida_simple_get(&port
->ida
, 0x1, port
->dev_id_start
,
594 goto unregister_netdev
;
597 /* Increment id-base to the next slot for the future assignment */
598 port
->dev_id_start
= err
+ 1;
600 err
= netdev_upper_dev_link(phy_dev
, dev
, extack
);
604 /* Flags are per port and latest update overrides. User has
605 * to be consistent in setting it just like the mode attribute.
607 if (data
&& data
[IFLA_IPVLAN_FLAGS
])
608 port
->flags
= nla_get_u16(data
[IFLA_IPVLAN_FLAGS
]);
610 if (data
&& data
[IFLA_IPVLAN_MODE
])
611 mode
= nla_get_u16(data
[IFLA_IPVLAN_MODE
]);
613 err
= ipvlan_set_port_mode(port
, mode
, extack
);
617 list_add_tail_rcu(&ipvlan
->pnode
, &port
->ipvlans
);
618 netif_stacked_transfer_operstate(phy_dev
, dev
);
622 netdev_upper_dev_unlink(phy_dev
, dev
);
624 ida_simple_remove(&port
->ida
, dev
->dev_id
);
626 unregister_netdevice(dev
);
629 EXPORT_SYMBOL_GPL(ipvlan_link_new
);
631 void ipvlan_link_delete(struct net_device
*dev
, struct list_head
*head
)
633 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
634 struct ipvl_addr
*addr
, *next
;
636 spin_lock_bh(&ipvlan
->addrs_lock
);
637 list_for_each_entry_safe(addr
, next
, &ipvlan
->addrs
, anode
) {
638 ipvlan_ht_addr_del(addr
);
639 list_del_rcu(&addr
->anode
);
640 kfree_rcu(addr
, rcu
);
642 spin_unlock_bh(&ipvlan
->addrs_lock
);
644 ida_simple_remove(&ipvlan
->port
->ida
, dev
->dev_id
);
645 list_del_rcu(&ipvlan
->pnode
);
646 unregister_netdevice_queue(dev
, head
);
647 netdev_upper_dev_unlink(ipvlan
->phy_dev
, dev
);
649 EXPORT_SYMBOL_GPL(ipvlan_link_delete
);
651 void ipvlan_link_setup(struct net_device
*dev
)
655 dev
->max_mtu
= ETH_MAX_MTU
;
656 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
657 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_NO_QUEUE
;
658 dev
->netdev_ops
= &ipvlan_netdev_ops
;
659 dev
->needs_free_netdev
= true;
660 dev
->header_ops
= &ipvlan_header_ops
;
661 dev
->ethtool_ops
= &ipvlan_ethtool_ops
;
663 EXPORT_SYMBOL_GPL(ipvlan_link_setup
);
665 static const struct nla_policy ipvlan_nl_policy
[IFLA_IPVLAN_MAX
+ 1] =
667 [IFLA_IPVLAN_MODE
] = { .type
= NLA_U16
},
668 [IFLA_IPVLAN_FLAGS
] = { .type
= NLA_U16
},
671 static struct rtnl_link_ops ipvlan_link_ops
= {
673 .priv_size
= sizeof(struct ipvl_dev
),
675 .setup
= ipvlan_link_setup
,
676 .newlink
= ipvlan_link_new
,
677 .dellink
= ipvlan_link_delete
,
680 int ipvlan_link_register(struct rtnl_link_ops
*ops
)
682 ops
->get_size
= ipvlan_nl_getsize
;
683 ops
->policy
= ipvlan_nl_policy
;
684 ops
->validate
= ipvlan_nl_validate
;
685 ops
->fill_info
= ipvlan_nl_fillinfo
;
686 ops
->changelink
= ipvlan_nl_changelink
;
687 ops
->maxtype
= IFLA_IPVLAN_MAX
;
688 return rtnl_link_register(ops
);
690 EXPORT_SYMBOL_GPL(ipvlan_link_register
);
692 static int ipvlan_device_event(struct notifier_block
*unused
,
693 unsigned long event
, void *ptr
)
695 struct netlink_ext_ack
*extack
= netdev_notifier_info_to_extack(ptr
);
696 struct netdev_notifier_pre_changeaddr_info
*prechaddr_info
;
697 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
698 struct ipvl_dev
*ipvlan
, *next
;
699 struct ipvl_port
*port
;
703 if (!netif_is_ipvlan_port(dev
))
706 port
= ipvlan_port_get_rtnl(dev
);
710 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
)
711 netif_stacked_transfer_operstate(ipvlan
->phy_dev
,
715 case NETDEV_REGISTER
: {
716 struct net
*oldnet
, *newnet
= dev_net(dev
);
718 oldnet
= read_pnet(&port
->pnet
);
719 if (net_eq(newnet
, oldnet
))
722 write_pnet(&port
->pnet
, newnet
);
724 ipvlan_migrate_l3s_hook(oldnet
, newnet
);
727 case NETDEV_UNREGISTER
:
728 if (dev
->reg_state
!= NETREG_UNREGISTERING
)
731 list_for_each_entry_safe(ipvlan
, next
, &port
->ipvlans
, pnode
)
732 ipvlan
->dev
->rtnl_link_ops
->dellink(ipvlan
->dev
,
734 unregister_netdevice_many(&lst_kill
);
737 case NETDEV_FEAT_CHANGE
:
738 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
739 ipvlan
->dev
->features
= dev
->features
& IPVLAN_FEATURES
;
740 ipvlan
->dev
->gso_max_size
= dev
->gso_max_size
;
741 ipvlan
->dev
->gso_max_segs
= dev
->gso_max_segs
;
742 netdev_features_change(ipvlan
->dev
);
746 case NETDEV_CHANGEMTU
:
747 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
)
748 ipvlan_adjust_mtu(ipvlan
, dev
);
751 case NETDEV_PRE_CHANGEADDR
:
752 prechaddr_info
= ptr
;
753 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
754 err
= dev_pre_changeaddr_notify(ipvlan
->dev
,
755 prechaddr_info
->dev_addr
,
758 return notifier_from_errno(err
);
762 case NETDEV_CHANGEADDR
:
763 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
764 ether_addr_copy(ipvlan
->dev
->dev_addr
, dev
->dev_addr
);
765 call_netdevice_notifiers(NETDEV_CHANGEADDR
, ipvlan
->dev
);
769 case NETDEV_PRE_TYPE_CHANGE
:
770 /* Forbid underlying device to change its type. */
776 /* the caller must held the addrs lock */
777 static int ipvlan_add_addr(struct ipvl_dev
*ipvlan
, void *iaddr
, bool is_v6
)
779 struct ipvl_addr
*addr
;
781 addr
= kzalloc(sizeof(struct ipvl_addr
), GFP_ATOMIC
);
785 addr
->master
= ipvlan
;
787 memcpy(&addr
->ip4addr
, iaddr
, sizeof(struct in_addr
));
788 addr
->atype
= IPVL_IPV4
;
789 #if IS_ENABLED(CONFIG_IPV6)
791 memcpy(&addr
->ip6addr
, iaddr
, sizeof(struct in6_addr
));
792 addr
->atype
= IPVL_IPV6
;
796 list_add_tail_rcu(&addr
->anode
, &ipvlan
->addrs
);
798 /* If the interface is not up, the address will be added to the hash
799 * list by ipvlan_open.
801 if (netif_running(ipvlan
->dev
))
802 ipvlan_ht_addr_add(ipvlan
, addr
);
807 static void ipvlan_del_addr(struct ipvl_dev
*ipvlan
, void *iaddr
, bool is_v6
)
809 struct ipvl_addr
*addr
;
811 spin_lock_bh(&ipvlan
->addrs_lock
);
812 addr
= ipvlan_find_addr(ipvlan
, iaddr
, is_v6
);
814 spin_unlock_bh(&ipvlan
->addrs_lock
);
818 ipvlan_ht_addr_del(addr
);
819 list_del_rcu(&addr
->anode
);
820 spin_unlock_bh(&ipvlan
->addrs_lock
);
821 kfree_rcu(addr
, rcu
);
824 static bool ipvlan_is_valid_dev(const struct net_device
*dev
)
826 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
828 if (!netif_is_ipvlan(dev
))
831 if (!ipvlan
|| !ipvlan
->port
)
837 #if IS_ENABLED(CONFIG_IPV6)
838 static int ipvlan_add_addr6(struct ipvl_dev
*ipvlan
, struct in6_addr
*ip6_addr
)
842 spin_lock_bh(&ipvlan
->addrs_lock
);
843 if (ipvlan_addr_busy(ipvlan
->port
, ip6_addr
, true))
844 netif_err(ipvlan
, ifup
, ipvlan
->dev
,
845 "Failed to add IPv6=%pI6c addr for %s intf\n",
846 ip6_addr
, ipvlan
->dev
->name
);
848 ret
= ipvlan_add_addr(ipvlan
, ip6_addr
, true);
849 spin_unlock_bh(&ipvlan
->addrs_lock
);
853 static void ipvlan_del_addr6(struct ipvl_dev
*ipvlan
, struct in6_addr
*ip6_addr
)
855 return ipvlan_del_addr(ipvlan
, ip6_addr
, true);
858 static int ipvlan_addr6_event(struct notifier_block
*unused
,
859 unsigned long event
, void *ptr
)
861 struct inet6_ifaddr
*if6
= (struct inet6_ifaddr
*)ptr
;
862 struct net_device
*dev
= (struct net_device
*)if6
->idev
->dev
;
863 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
865 if (!ipvlan_is_valid_dev(dev
))
870 if (ipvlan_add_addr6(ipvlan
, &if6
->addr
))
875 ipvlan_del_addr6(ipvlan
, &if6
->addr
);
882 static int ipvlan_addr6_validator_event(struct notifier_block
*unused
,
883 unsigned long event
, void *ptr
)
885 struct in6_validator_info
*i6vi
= (struct in6_validator_info
*)ptr
;
886 struct net_device
*dev
= (struct net_device
*)i6vi
->i6vi_dev
->dev
;
887 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
889 if (!ipvlan_is_valid_dev(dev
))
894 if (ipvlan_addr_busy(ipvlan
->port
, &i6vi
->i6vi_addr
, true)) {
895 NL_SET_ERR_MSG(i6vi
->extack
,
896 "Address already assigned to an ipvlan device");
897 return notifier_from_errno(-EADDRINUSE
);
906 static int ipvlan_add_addr4(struct ipvl_dev
*ipvlan
, struct in_addr
*ip4_addr
)
910 spin_lock_bh(&ipvlan
->addrs_lock
);
911 if (ipvlan_addr_busy(ipvlan
->port
, ip4_addr
, false))
912 netif_err(ipvlan
, ifup
, ipvlan
->dev
,
913 "Failed to add IPv4=%pI4 on %s intf.\n",
914 ip4_addr
, ipvlan
->dev
->name
);
916 ret
= ipvlan_add_addr(ipvlan
, ip4_addr
, false);
917 spin_unlock_bh(&ipvlan
->addrs_lock
);
921 static void ipvlan_del_addr4(struct ipvl_dev
*ipvlan
, struct in_addr
*ip4_addr
)
923 return ipvlan_del_addr(ipvlan
, ip4_addr
, false);
926 static int ipvlan_addr4_event(struct notifier_block
*unused
,
927 unsigned long event
, void *ptr
)
929 struct in_ifaddr
*if4
= (struct in_ifaddr
*)ptr
;
930 struct net_device
*dev
= (struct net_device
*)if4
->ifa_dev
->dev
;
931 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
932 struct in_addr ip4_addr
;
934 if (!ipvlan_is_valid_dev(dev
))
939 ip4_addr
.s_addr
= if4
->ifa_address
;
940 if (ipvlan_add_addr4(ipvlan
, &ip4_addr
))
945 ip4_addr
.s_addr
= if4
->ifa_address
;
946 ipvlan_del_addr4(ipvlan
, &ip4_addr
);
953 static int ipvlan_addr4_validator_event(struct notifier_block
*unused
,
954 unsigned long event
, void *ptr
)
956 struct in_validator_info
*ivi
= (struct in_validator_info
*)ptr
;
957 struct net_device
*dev
= (struct net_device
*)ivi
->ivi_dev
->dev
;
958 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
960 if (!ipvlan_is_valid_dev(dev
))
965 if (ipvlan_addr_busy(ipvlan
->port
, &ivi
->ivi_addr
, false)) {
966 NL_SET_ERR_MSG(ivi
->extack
,
967 "Address already assigned to an ipvlan device");
968 return notifier_from_errno(-EADDRINUSE
);
976 static struct notifier_block ipvlan_addr4_notifier_block __read_mostly
= {
977 .notifier_call
= ipvlan_addr4_event
,
980 static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly
= {
981 .notifier_call
= ipvlan_addr4_validator_event
,
984 static struct notifier_block ipvlan_notifier_block __read_mostly
= {
985 .notifier_call
= ipvlan_device_event
,
988 #if IS_ENABLED(CONFIG_IPV6)
989 static struct notifier_block ipvlan_addr6_notifier_block __read_mostly
= {
990 .notifier_call
= ipvlan_addr6_event
,
993 static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly
= {
994 .notifier_call
= ipvlan_addr6_validator_event
,
998 static int __init
ipvlan_init_module(void)
1002 ipvlan_init_secret();
1003 register_netdevice_notifier(&ipvlan_notifier_block
);
1004 #if IS_ENABLED(CONFIG_IPV6)
1005 register_inet6addr_notifier(&ipvlan_addr6_notifier_block
);
1006 register_inet6addr_validator_notifier(
1007 &ipvlan_addr6_vtor_notifier_block
);
1009 register_inetaddr_notifier(&ipvlan_addr4_notifier_block
);
1010 register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block
);
1012 err
= ipvlan_l3s_init();
1016 err
= ipvlan_link_register(&ipvlan_link_ops
);
1018 ipvlan_l3s_cleanup();
1024 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block
);
1025 unregister_inetaddr_validator_notifier(
1026 &ipvlan_addr4_vtor_notifier_block
);
1027 #if IS_ENABLED(CONFIG_IPV6)
1028 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block
);
1029 unregister_inet6addr_validator_notifier(
1030 &ipvlan_addr6_vtor_notifier_block
);
1032 unregister_netdevice_notifier(&ipvlan_notifier_block
);
1036 static void __exit
ipvlan_cleanup_module(void)
1038 rtnl_link_unregister(&ipvlan_link_ops
);
1039 ipvlan_l3s_cleanup();
1040 unregister_netdevice_notifier(&ipvlan_notifier_block
);
1041 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block
);
1042 unregister_inetaddr_validator_notifier(
1043 &ipvlan_addr4_vtor_notifier_block
);
1044 #if IS_ENABLED(CONFIG_IPV6)
1045 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block
);
1046 unregister_inet6addr_validator_notifier(
1047 &ipvlan_addr6_vtor_notifier_block
);
1051 module_init(ipvlan_init_module
);
1052 module_exit(ipvlan_cleanup_module
);
1054 MODULE_LICENSE("GPL");
1055 MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
1056 MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
1057 MODULE_ALIAS_RTNL_LINK("ipvlan");