1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
5 #include <linux/ethtool.h>
9 static int ipvlan_set_port_mode(struct ipvl_port
*port
, u16 nval
,
10 struct netlink_ext_ack
*extack
)
12 struct ipvl_dev
*ipvlan
;
17 if (port
->mode
!= nval
) {
18 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
19 flags
= ipvlan
->dev
->flags
;
20 if (nval
== IPVLAN_MODE_L3
|| nval
== IPVLAN_MODE_L3S
) {
21 err
= dev_change_flags(ipvlan
->dev
,
25 err
= dev_change_flags(ipvlan
->dev
,
32 if (nval
== IPVLAN_MODE_L3S
) {
34 err
= ipvlan_l3s_register(port
);
37 } else if (port
->mode
== IPVLAN_MODE_L3S
) {
38 /* Old mode was L3S */
39 ipvlan_l3s_unregister(port
);
46 /* Undo the flags changes that have been done so far. */
47 list_for_each_entry_continue_reverse(ipvlan
, &port
->ipvlans
, pnode
) {
48 flags
= ipvlan
->dev
->flags
;
49 if (port
->mode
== IPVLAN_MODE_L3
||
50 port
->mode
== IPVLAN_MODE_L3S
)
51 dev_change_flags(ipvlan
->dev
, flags
| IFF_NOARP
,
54 dev_change_flags(ipvlan
->dev
, flags
& ~IFF_NOARP
,
61 static int ipvlan_port_create(struct net_device
*dev
)
63 struct ipvl_port
*port
;
66 port
= kzalloc(sizeof(struct ipvl_port
), GFP_KERNEL
);
70 write_pnet(&port
->pnet
, dev_net(dev
));
72 port
->mode
= IPVLAN_MODE_L3
;
73 INIT_LIST_HEAD(&port
->ipvlans
);
74 for (idx
= 0; idx
< IPVLAN_HASH_SIZE
; idx
++)
75 INIT_HLIST_HEAD(&port
->hlhead
[idx
]);
77 skb_queue_head_init(&port
->backlog
);
78 INIT_WORK(&port
->wq
, ipvlan_process_multicast
);
80 port
->dev_id_start
= 1;
82 err
= netdev_rx_handler_register(dev
, ipvlan_handle_frame
, port
);
93 static void ipvlan_port_destroy(struct net_device
*dev
)
95 struct ipvl_port
*port
= ipvlan_port_get_rtnl(dev
);
98 if (port
->mode
== IPVLAN_MODE_L3S
)
99 ipvlan_l3s_unregister(port
);
100 netdev_rx_handler_unregister(dev
);
101 cancel_work_sync(&port
->wq
);
102 while ((skb
= __skb_dequeue(&port
->backlog
)) != NULL
) {
107 ida_destroy(&port
->ida
);
111 #define IPVLAN_ALWAYS_ON_OFLOADS \
112 (NETIF_F_SG | NETIF_F_HW_CSUM | \
113 NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
115 #define IPVLAN_ALWAYS_ON \
116 (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
118 #define IPVLAN_FEATURES \
119 (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
120 NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \
121 NETIF_F_GRO | NETIF_F_RXCSUM | \
122 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
124 /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
126 #define IPVLAN_STATE_MASK \
127 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
129 static int ipvlan_init(struct net_device
*dev
)
131 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
132 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
133 struct ipvl_port
*port
;
136 dev
->state
= (dev
->state
& ~IPVLAN_STATE_MASK
) |
137 (phy_dev
->state
& IPVLAN_STATE_MASK
);
138 dev
->features
= phy_dev
->features
& IPVLAN_FEATURES
;
139 dev
->features
|= IPVLAN_ALWAYS_ON
;
140 dev
->vlan_features
= phy_dev
->vlan_features
& IPVLAN_FEATURES
;
141 dev
->vlan_features
|= IPVLAN_ALWAYS_ON_OFLOADS
;
142 dev
->hw_enc_features
|= dev
->features
;
143 dev
->gso_max_size
= phy_dev
->gso_max_size
;
144 dev
->gso_max_segs
= phy_dev
->gso_max_segs
;
145 dev
->hard_header_len
= phy_dev
->hard_header_len
;
147 netdev_lockdep_set_classes(dev
);
149 ipvlan
->pcpu_stats
= netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats
);
150 if (!ipvlan
->pcpu_stats
)
153 if (!netif_is_ipvlan_port(phy_dev
)) {
154 err
= ipvlan_port_create(phy_dev
);
156 free_percpu(ipvlan
->pcpu_stats
);
160 port
= ipvlan_port_get_rtnl(phy_dev
);
165 static void ipvlan_uninit(struct net_device
*dev
)
167 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
168 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
169 struct ipvl_port
*port
;
171 free_percpu(ipvlan
->pcpu_stats
);
173 port
= ipvlan_port_get_rtnl(phy_dev
);
176 ipvlan_port_destroy(port
->dev
);
179 static int ipvlan_open(struct net_device
*dev
)
181 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
182 struct ipvl_addr
*addr
;
184 if (ipvlan
->port
->mode
== IPVLAN_MODE_L3
||
185 ipvlan
->port
->mode
== IPVLAN_MODE_L3S
)
186 dev
->flags
|= IFF_NOARP
;
188 dev
->flags
&= ~IFF_NOARP
;
191 list_for_each_entry_rcu(addr
, &ipvlan
->addrs
, anode
)
192 ipvlan_ht_addr_add(ipvlan
, addr
);
198 static int ipvlan_stop(struct net_device
*dev
)
200 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
201 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
202 struct ipvl_addr
*addr
;
204 dev_uc_unsync(phy_dev
, dev
);
205 dev_mc_unsync(phy_dev
, dev
);
208 list_for_each_entry_rcu(addr
, &ipvlan
->addrs
, anode
)
209 ipvlan_ht_addr_del(addr
);
215 static netdev_tx_t
ipvlan_start_xmit(struct sk_buff
*skb
,
216 struct net_device
*dev
)
218 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
219 int skblen
= skb
->len
;
222 ret
= ipvlan_queue_xmit(skb
, dev
);
223 if (likely(ret
== NET_XMIT_SUCCESS
|| ret
== NET_XMIT_CN
)) {
224 struct ipvl_pcpu_stats
*pcptr
;
226 pcptr
= this_cpu_ptr(ipvlan
->pcpu_stats
);
228 u64_stats_update_begin(&pcptr
->syncp
);
230 pcptr
->tx_bytes
+= skblen
;
231 u64_stats_update_end(&pcptr
->syncp
);
233 this_cpu_inc(ipvlan
->pcpu_stats
->tx_drps
);
238 static netdev_features_t
ipvlan_fix_features(struct net_device
*dev
,
239 netdev_features_t features
)
241 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
243 features
|= NETIF_F_ALL_FOR_ALL
;
244 features
&= (ipvlan
->sfeatures
| ~IPVLAN_FEATURES
);
245 features
= netdev_increment_features(ipvlan
->phy_dev
->features
,
247 features
|= IPVLAN_ALWAYS_ON
;
248 features
&= (IPVLAN_FEATURES
| IPVLAN_ALWAYS_ON
);
253 static void ipvlan_change_rx_flags(struct net_device
*dev
, int change
)
255 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
256 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
258 if (change
& IFF_ALLMULTI
)
259 dev_set_allmulti(phy_dev
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
262 static void ipvlan_set_multicast_mac_filter(struct net_device
*dev
)
264 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
266 if (dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) {
267 bitmap_fill(ipvlan
->mac_filters
, IPVLAN_MAC_FILTER_SIZE
);
269 struct netdev_hw_addr
*ha
;
270 DECLARE_BITMAP(mc_filters
, IPVLAN_MAC_FILTER_SIZE
);
272 bitmap_zero(mc_filters
, IPVLAN_MAC_FILTER_SIZE
);
273 netdev_for_each_mc_addr(ha
, dev
)
274 __set_bit(ipvlan_mac_hash(ha
->addr
), mc_filters
);
276 /* Turn-on broadcast bit irrespective of address family,
277 * since broadcast is deferred to a work-queue, hence no
278 * impact on fast-path processing.
280 __set_bit(ipvlan_mac_hash(dev
->broadcast
), mc_filters
);
282 bitmap_copy(ipvlan
->mac_filters
, mc_filters
,
283 IPVLAN_MAC_FILTER_SIZE
);
285 dev_uc_sync(ipvlan
->phy_dev
, dev
);
286 dev_mc_sync(ipvlan
->phy_dev
, dev
);
289 static void ipvlan_get_stats64(struct net_device
*dev
,
290 struct rtnl_link_stats64
*s
)
292 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
294 if (ipvlan
->pcpu_stats
) {
295 struct ipvl_pcpu_stats
*pcptr
;
296 u64 rx_pkts
, rx_bytes
, rx_mcast
, tx_pkts
, tx_bytes
;
297 u32 rx_errs
= 0, tx_drps
= 0;
301 for_each_possible_cpu(idx
) {
302 pcptr
= per_cpu_ptr(ipvlan
->pcpu_stats
, idx
);
304 strt
= u64_stats_fetch_begin_irq(&pcptr
->syncp
);
305 rx_pkts
= pcptr
->rx_pkts
;
306 rx_bytes
= pcptr
->rx_bytes
;
307 rx_mcast
= pcptr
->rx_mcast
;
308 tx_pkts
= pcptr
->tx_pkts
;
309 tx_bytes
= pcptr
->tx_bytes
;
310 } while (u64_stats_fetch_retry_irq(&pcptr
->syncp
,
313 s
->rx_packets
+= rx_pkts
;
314 s
->rx_bytes
+= rx_bytes
;
315 s
->multicast
+= rx_mcast
;
316 s
->tx_packets
+= tx_pkts
;
317 s
->tx_bytes
+= tx_bytes
;
319 /* u32 values are updated without syncp protection. */
320 rx_errs
+= pcptr
->rx_errs
;
321 tx_drps
+= pcptr
->tx_drps
;
323 s
->rx_errors
= rx_errs
;
324 s
->rx_dropped
= rx_errs
;
325 s
->tx_dropped
= tx_drps
;
329 static int ipvlan_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
331 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
332 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
334 return vlan_vid_add(phy_dev
, proto
, vid
);
337 static int ipvlan_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
,
340 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
341 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
343 vlan_vid_del(phy_dev
, proto
, vid
);
347 static int ipvlan_get_iflink(const struct net_device
*dev
)
349 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
351 return ipvlan
->phy_dev
->ifindex
;
354 static const struct net_device_ops ipvlan_netdev_ops
= {
355 .ndo_init
= ipvlan_init
,
356 .ndo_uninit
= ipvlan_uninit
,
357 .ndo_open
= ipvlan_open
,
358 .ndo_stop
= ipvlan_stop
,
359 .ndo_start_xmit
= ipvlan_start_xmit
,
360 .ndo_fix_features
= ipvlan_fix_features
,
361 .ndo_change_rx_flags
= ipvlan_change_rx_flags
,
362 .ndo_set_rx_mode
= ipvlan_set_multicast_mac_filter
,
363 .ndo_get_stats64
= ipvlan_get_stats64
,
364 .ndo_vlan_rx_add_vid
= ipvlan_vlan_rx_add_vid
,
365 .ndo_vlan_rx_kill_vid
= ipvlan_vlan_rx_kill_vid
,
366 .ndo_get_iflink
= ipvlan_get_iflink
,
369 static int ipvlan_hard_header(struct sk_buff
*skb
, struct net_device
*dev
,
370 unsigned short type
, const void *daddr
,
371 const void *saddr
, unsigned len
)
373 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
374 struct net_device
*phy_dev
= ipvlan
->phy_dev
;
376 /* TODO Probably use a different field than dev_addr so that the
377 * mac-address on the virtual device is portable and can be carried
378 * while the packets use the mac-addr on the physical device.
380 return dev_hard_header(skb
, phy_dev
, type
, daddr
,
381 saddr
? : phy_dev
->dev_addr
, len
);
384 static const struct header_ops ipvlan_header_ops
= {
385 .create
= ipvlan_hard_header
,
386 .parse
= eth_header_parse
,
387 .cache
= eth_header_cache
,
388 .cache_update
= eth_header_cache_update
,
391 static void ipvlan_adjust_mtu(struct ipvl_dev
*ipvlan
, struct net_device
*dev
)
393 ipvlan
->dev
->mtu
= dev
->mtu
;
396 static bool netif_is_ipvlan(const struct net_device
*dev
)
398 /* both ipvlan and ipvtap devices use the same netdev_ops */
399 return dev
->netdev_ops
== &ipvlan_netdev_ops
;
402 static int ipvlan_ethtool_get_link_ksettings(struct net_device
*dev
,
403 struct ethtool_link_ksettings
*cmd
)
405 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
407 return __ethtool_get_link_ksettings(ipvlan
->phy_dev
, cmd
);
410 static void ipvlan_ethtool_get_drvinfo(struct net_device
*dev
,
411 struct ethtool_drvinfo
*drvinfo
)
413 strlcpy(drvinfo
->driver
, IPVLAN_DRV
, sizeof(drvinfo
->driver
));
414 strlcpy(drvinfo
->version
, IPV_DRV_VER
, sizeof(drvinfo
->version
));
417 static u32
ipvlan_ethtool_get_msglevel(struct net_device
*dev
)
419 const struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
421 return ipvlan
->msg_enable
;
424 static void ipvlan_ethtool_set_msglevel(struct net_device
*dev
, u32 value
)
426 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
428 ipvlan
->msg_enable
= value
;
431 static const struct ethtool_ops ipvlan_ethtool_ops
= {
432 .get_link
= ethtool_op_get_link
,
433 .get_link_ksettings
= ipvlan_ethtool_get_link_ksettings
,
434 .get_drvinfo
= ipvlan_ethtool_get_drvinfo
,
435 .get_msglevel
= ipvlan_ethtool_get_msglevel
,
436 .set_msglevel
= ipvlan_ethtool_set_msglevel
,
439 static int ipvlan_nl_changelink(struct net_device
*dev
,
440 struct nlattr
*tb
[], struct nlattr
*data
[],
441 struct netlink_ext_ack
*extack
)
443 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
444 struct ipvl_port
*port
= ipvlan_port_get_rtnl(ipvlan
->phy_dev
);
449 if (!ns_capable(dev_net(ipvlan
->phy_dev
)->user_ns
, CAP_NET_ADMIN
))
452 if (data
[IFLA_IPVLAN_MODE
]) {
453 u16 nmode
= nla_get_u16(data
[IFLA_IPVLAN_MODE
]);
455 err
= ipvlan_set_port_mode(port
, nmode
, extack
);
458 if (!err
&& data
[IFLA_IPVLAN_FLAGS
]) {
459 u16 flags
= nla_get_u16(data
[IFLA_IPVLAN_FLAGS
]);
461 if (flags
& IPVLAN_F_PRIVATE
)
462 ipvlan_mark_private(port
);
464 ipvlan_clear_private(port
);
466 if (flags
& IPVLAN_F_VEPA
)
467 ipvlan_mark_vepa(port
);
469 ipvlan_clear_vepa(port
);
475 static size_t ipvlan_nl_getsize(const struct net_device
*dev
)
478 + nla_total_size(2) /* IFLA_IPVLAN_MODE */
479 + nla_total_size(2) /* IFLA_IPVLAN_FLAGS */
483 static int ipvlan_nl_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
484 struct netlink_ext_ack
*extack
)
489 if (data
[IFLA_IPVLAN_MODE
]) {
490 u16 mode
= nla_get_u16(data
[IFLA_IPVLAN_MODE
]);
492 if (mode
>= IPVLAN_MODE_MAX
)
495 if (data
[IFLA_IPVLAN_FLAGS
]) {
496 u16 flags
= nla_get_u16(data
[IFLA_IPVLAN_FLAGS
]);
498 /* Only two bits are used at this moment. */
499 if (flags
& ~(IPVLAN_F_PRIVATE
| IPVLAN_F_VEPA
))
501 /* Also both flags can't be active at the same time. */
502 if ((flags
& (IPVLAN_F_PRIVATE
| IPVLAN_F_VEPA
)) ==
503 (IPVLAN_F_PRIVATE
| IPVLAN_F_VEPA
))
510 static int ipvlan_nl_fillinfo(struct sk_buff
*skb
,
511 const struct net_device
*dev
)
513 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
514 struct ipvl_port
*port
= ipvlan_port_get_rtnl(ipvlan
->phy_dev
);
521 if (nla_put_u16(skb
, IFLA_IPVLAN_MODE
, port
->mode
))
523 if (nla_put_u16(skb
, IFLA_IPVLAN_FLAGS
, port
->flags
))
532 int ipvlan_link_new(struct net
*src_net
, struct net_device
*dev
,
533 struct nlattr
*tb
[], struct nlattr
*data
[],
534 struct netlink_ext_ack
*extack
)
536 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
537 struct ipvl_port
*port
;
538 struct net_device
*phy_dev
;
540 u16 mode
= IPVLAN_MODE_L3
;
545 phy_dev
= __dev_get_by_index(src_net
, nla_get_u32(tb
[IFLA_LINK
]));
549 if (netif_is_ipvlan(phy_dev
)) {
550 struct ipvl_dev
*tmp
= netdev_priv(phy_dev
);
552 phy_dev
= tmp
->phy_dev
;
553 if (!ns_capable(dev_net(phy_dev
)->user_ns
, CAP_NET_ADMIN
))
555 } else if (!netif_is_ipvlan_port(phy_dev
)) {
556 /* Exit early if the underlying link is invalid or busy */
557 if (phy_dev
->type
!= ARPHRD_ETHER
||
558 phy_dev
->flags
& IFF_LOOPBACK
) {
560 "Master is either lo or non-ether device\n");
564 if (netdev_is_rx_handler_busy(phy_dev
)) {
565 netdev_err(phy_dev
, "Device is already in use.\n");
570 ipvlan
->phy_dev
= phy_dev
;
572 ipvlan
->sfeatures
= IPVLAN_FEATURES
;
574 ipvlan_adjust_mtu(ipvlan
, phy_dev
);
575 INIT_LIST_HEAD(&ipvlan
->addrs
);
576 spin_lock_init(&ipvlan
->addrs_lock
);
578 /* TODO Probably put random address here to be presented to the
579 * world but keep using the physical-dev address for the outgoing
582 memcpy(dev
->dev_addr
, phy_dev
->dev_addr
, ETH_ALEN
);
584 dev
->priv_flags
|= IFF_NO_RX_HANDLER
;
586 err
= register_netdevice(dev
);
590 /* ipvlan_init() would have created the port, if required */
591 port
= ipvlan_port_get_rtnl(phy_dev
);
594 /* If the port-id base is at the MAX value, then wrap it around and
595 * begin from 0x1 again. This may be due to a busy system where lots
596 * of slaves are getting created and deleted.
598 if (port
->dev_id_start
== 0xFFFE)
599 port
->dev_id_start
= 0x1;
601 /* Since L2 address is shared among all IPvlan slaves including
602 * master, use unique 16 bit dev-ids to diffentiate among them.
603 * Assign IDs between 0x1 and 0xFFFE (used by the master) to each
604 * slave link [see addrconf_ifid_eui48()].
606 err
= ida_simple_get(&port
->ida
, port
->dev_id_start
, 0xFFFE,
609 err
= ida_simple_get(&port
->ida
, 0x1, port
->dev_id_start
,
612 goto unregister_netdev
;
615 /* Increment id-base to the next slot for the future assignment */
616 port
->dev_id_start
= err
+ 1;
618 err
= netdev_upper_dev_link(phy_dev
, dev
, extack
);
622 /* Flags are per port and latest update overrides. User has
623 * to be consistent in setting it just like the mode attribute.
625 if (data
&& data
[IFLA_IPVLAN_FLAGS
])
626 port
->flags
= nla_get_u16(data
[IFLA_IPVLAN_FLAGS
]);
628 if (data
&& data
[IFLA_IPVLAN_MODE
])
629 mode
= nla_get_u16(data
[IFLA_IPVLAN_MODE
]);
631 err
= ipvlan_set_port_mode(port
, mode
, extack
);
635 list_add_tail_rcu(&ipvlan
->pnode
, &port
->ipvlans
);
636 netif_stacked_transfer_operstate(phy_dev
, dev
);
640 netdev_upper_dev_unlink(phy_dev
, dev
);
642 ida_simple_remove(&port
->ida
, dev
->dev_id
);
644 unregister_netdevice(dev
);
647 EXPORT_SYMBOL_GPL(ipvlan_link_new
);
649 void ipvlan_link_delete(struct net_device
*dev
, struct list_head
*head
)
651 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
652 struct ipvl_addr
*addr
, *next
;
654 spin_lock_bh(&ipvlan
->addrs_lock
);
655 list_for_each_entry_safe(addr
, next
, &ipvlan
->addrs
, anode
) {
656 ipvlan_ht_addr_del(addr
);
657 list_del_rcu(&addr
->anode
);
658 kfree_rcu(addr
, rcu
);
660 spin_unlock_bh(&ipvlan
->addrs_lock
);
662 ida_simple_remove(&ipvlan
->port
->ida
, dev
->dev_id
);
663 list_del_rcu(&ipvlan
->pnode
);
664 unregister_netdevice_queue(dev
, head
);
665 netdev_upper_dev_unlink(ipvlan
->phy_dev
, dev
);
667 EXPORT_SYMBOL_GPL(ipvlan_link_delete
);
669 void ipvlan_link_setup(struct net_device
*dev
)
673 dev
->max_mtu
= ETH_MAX_MTU
;
674 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
675 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_NO_QUEUE
;
676 dev
->netdev_ops
= &ipvlan_netdev_ops
;
677 dev
->needs_free_netdev
= true;
678 dev
->header_ops
= &ipvlan_header_ops
;
679 dev
->ethtool_ops
= &ipvlan_ethtool_ops
;
681 EXPORT_SYMBOL_GPL(ipvlan_link_setup
);
683 static const struct nla_policy ipvlan_nl_policy
[IFLA_IPVLAN_MAX
+ 1] =
685 [IFLA_IPVLAN_MODE
] = { .type
= NLA_U16
},
686 [IFLA_IPVLAN_FLAGS
] = { .type
= NLA_U16
},
689 static struct net
*ipvlan_get_link_net(const struct net_device
*dev
)
691 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
693 return dev_net(ipvlan
->phy_dev
);
696 static struct rtnl_link_ops ipvlan_link_ops
= {
698 .priv_size
= sizeof(struct ipvl_dev
),
700 .setup
= ipvlan_link_setup
,
701 .newlink
= ipvlan_link_new
,
702 .dellink
= ipvlan_link_delete
,
703 .get_link_net
= ipvlan_get_link_net
,
706 int ipvlan_link_register(struct rtnl_link_ops
*ops
)
708 ops
->get_size
= ipvlan_nl_getsize
;
709 ops
->policy
= ipvlan_nl_policy
;
710 ops
->validate
= ipvlan_nl_validate
;
711 ops
->fill_info
= ipvlan_nl_fillinfo
;
712 ops
->changelink
= ipvlan_nl_changelink
;
713 ops
->maxtype
= IFLA_IPVLAN_MAX
;
714 return rtnl_link_register(ops
);
716 EXPORT_SYMBOL_GPL(ipvlan_link_register
);
718 static int ipvlan_device_event(struct notifier_block
*unused
,
719 unsigned long event
, void *ptr
)
721 struct netlink_ext_ack
*extack
= netdev_notifier_info_to_extack(ptr
);
722 struct netdev_notifier_pre_changeaddr_info
*prechaddr_info
;
723 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
724 struct ipvl_dev
*ipvlan
, *next
;
725 struct ipvl_port
*port
;
729 if (!netif_is_ipvlan_port(dev
))
732 port
= ipvlan_port_get_rtnl(dev
);
736 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
)
737 netif_stacked_transfer_operstate(ipvlan
->phy_dev
,
741 case NETDEV_REGISTER
: {
742 struct net
*oldnet
, *newnet
= dev_net(dev
);
744 oldnet
= read_pnet(&port
->pnet
);
745 if (net_eq(newnet
, oldnet
))
748 write_pnet(&port
->pnet
, newnet
);
750 ipvlan_migrate_l3s_hook(oldnet
, newnet
);
753 case NETDEV_UNREGISTER
:
754 if (dev
->reg_state
!= NETREG_UNREGISTERING
)
757 list_for_each_entry_safe(ipvlan
, next
, &port
->ipvlans
, pnode
)
758 ipvlan
->dev
->rtnl_link_ops
->dellink(ipvlan
->dev
,
760 unregister_netdevice_many(&lst_kill
);
763 case NETDEV_FEAT_CHANGE
:
764 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
765 ipvlan
->dev
->gso_max_size
= dev
->gso_max_size
;
766 ipvlan
->dev
->gso_max_segs
= dev
->gso_max_segs
;
767 netdev_update_features(ipvlan
->dev
);
771 case NETDEV_CHANGEMTU
:
772 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
)
773 ipvlan_adjust_mtu(ipvlan
, dev
);
776 case NETDEV_PRE_CHANGEADDR
:
777 prechaddr_info
= ptr
;
778 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
779 err
= dev_pre_changeaddr_notify(ipvlan
->dev
,
780 prechaddr_info
->dev_addr
,
783 return notifier_from_errno(err
);
787 case NETDEV_CHANGEADDR
:
788 list_for_each_entry(ipvlan
, &port
->ipvlans
, pnode
) {
789 ether_addr_copy(ipvlan
->dev
->dev_addr
, dev
->dev_addr
);
790 call_netdevice_notifiers(NETDEV_CHANGEADDR
, ipvlan
->dev
);
794 case NETDEV_PRE_TYPE_CHANGE
:
795 /* Forbid underlying device to change its type. */
801 /* the caller must held the addrs lock */
802 static int ipvlan_add_addr(struct ipvl_dev
*ipvlan
, void *iaddr
, bool is_v6
)
804 struct ipvl_addr
*addr
;
806 addr
= kzalloc(sizeof(struct ipvl_addr
), GFP_ATOMIC
);
810 addr
->master
= ipvlan
;
812 memcpy(&addr
->ip4addr
, iaddr
, sizeof(struct in_addr
));
813 addr
->atype
= IPVL_IPV4
;
814 #if IS_ENABLED(CONFIG_IPV6)
816 memcpy(&addr
->ip6addr
, iaddr
, sizeof(struct in6_addr
));
817 addr
->atype
= IPVL_IPV6
;
821 list_add_tail_rcu(&addr
->anode
, &ipvlan
->addrs
);
823 /* If the interface is not up, the address will be added to the hash
824 * list by ipvlan_open.
826 if (netif_running(ipvlan
->dev
))
827 ipvlan_ht_addr_add(ipvlan
, addr
);
832 static void ipvlan_del_addr(struct ipvl_dev
*ipvlan
, void *iaddr
, bool is_v6
)
834 struct ipvl_addr
*addr
;
836 spin_lock_bh(&ipvlan
->addrs_lock
);
837 addr
= ipvlan_find_addr(ipvlan
, iaddr
, is_v6
);
839 spin_unlock_bh(&ipvlan
->addrs_lock
);
843 ipvlan_ht_addr_del(addr
);
844 list_del_rcu(&addr
->anode
);
845 spin_unlock_bh(&ipvlan
->addrs_lock
);
846 kfree_rcu(addr
, rcu
);
849 static bool ipvlan_is_valid_dev(const struct net_device
*dev
)
851 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
853 if (!netif_is_ipvlan(dev
))
856 if (!ipvlan
|| !ipvlan
->port
)
862 #if IS_ENABLED(CONFIG_IPV6)
863 static int ipvlan_add_addr6(struct ipvl_dev
*ipvlan
, struct in6_addr
*ip6_addr
)
867 spin_lock_bh(&ipvlan
->addrs_lock
);
868 if (ipvlan_addr_busy(ipvlan
->port
, ip6_addr
, true))
869 netif_err(ipvlan
, ifup
, ipvlan
->dev
,
870 "Failed to add IPv6=%pI6c addr for %s intf\n",
871 ip6_addr
, ipvlan
->dev
->name
);
873 ret
= ipvlan_add_addr(ipvlan
, ip6_addr
, true);
874 spin_unlock_bh(&ipvlan
->addrs_lock
);
878 static void ipvlan_del_addr6(struct ipvl_dev
*ipvlan
, struct in6_addr
*ip6_addr
)
880 return ipvlan_del_addr(ipvlan
, ip6_addr
, true);
883 static int ipvlan_addr6_event(struct notifier_block
*unused
,
884 unsigned long event
, void *ptr
)
886 struct inet6_ifaddr
*if6
= (struct inet6_ifaddr
*)ptr
;
887 struct net_device
*dev
= (struct net_device
*)if6
->idev
->dev
;
888 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
890 if (!ipvlan_is_valid_dev(dev
))
895 if (ipvlan_add_addr6(ipvlan
, &if6
->addr
))
900 ipvlan_del_addr6(ipvlan
, &if6
->addr
);
907 static int ipvlan_addr6_validator_event(struct notifier_block
*unused
,
908 unsigned long event
, void *ptr
)
910 struct in6_validator_info
*i6vi
= (struct in6_validator_info
*)ptr
;
911 struct net_device
*dev
= (struct net_device
*)i6vi
->i6vi_dev
->dev
;
912 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
914 if (!ipvlan_is_valid_dev(dev
))
919 if (ipvlan_addr_busy(ipvlan
->port
, &i6vi
->i6vi_addr
, true)) {
920 NL_SET_ERR_MSG(i6vi
->extack
,
921 "Address already assigned to an ipvlan device");
922 return notifier_from_errno(-EADDRINUSE
);
931 static int ipvlan_add_addr4(struct ipvl_dev
*ipvlan
, struct in_addr
*ip4_addr
)
935 spin_lock_bh(&ipvlan
->addrs_lock
);
936 if (ipvlan_addr_busy(ipvlan
->port
, ip4_addr
, false))
937 netif_err(ipvlan
, ifup
, ipvlan
->dev
,
938 "Failed to add IPv4=%pI4 on %s intf.\n",
939 ip4_addr
, ipvlan
->dev
->name
);
941 ret
= ipvlan_add_addr(ipvlan
, ip4_addr
, false);
942 spin_unlock_bh(&ipvlan
->addrs_lock
);
946 static void ipvlan_del_addr4(struct ipvl_dev
*ipvlan
, struct in_addr
*ip4_addr
)
948 return ipvlan_del_addr(ipvlan
, ip4_addr
, false);
951 static int ipvlan_addr4_event(struct notifier_block
*unused
,
952 unsigned long event
, void *ptr
)
954 struct in_ifaddr
*if4
= (struct in_ifaddr
*)ptr
;
955 struct net_device
*dev
= (struct net_device
*)if4
->ifa_dev
->dev
;
956 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
957 struct in_addr ip4_addr
;
959 if (!ipvlan_is_valid_dev(dev
))
964 ip4_addr
.s_addr
= if4
->ifa_address
;
965 if (ipvlan_add_addr4(ipvlan
, &ip4_addr
))
970 ip4_addr
.s_addr
= if4
->ifa_address
;
971 ipvlan_del_addr4(ipvlan
, &ip4_addr
);
978 static int ipvlan_addr4_validator_event(struct notifier_block
*unused
,
979 unsigned long event
, void *ptr
)
981 struct in_validator_info
*ivi
= (struct in_validator_info
*)ptr
;
982 struct net_device
*dev
= (struct net_device
*)ivi
->ivi_dev
->dev
;
983 struct ipvl_dev
*ipvlan
= netdev_priv(dev
);
985 if (!ipvlan_is_valid_dev(dev
))
990 if (ipvlan_addr_busy(ipvlan
->port
, &ivi
->ivi_addr
, false)) {
991 NL_SET_ERR_MSG(ivi
->extack
,
992 "Address already assigned to an ipvlan device");
993 return notifier_from_errno(-EADDRINUSE
);
1001 static struct notifier_block ipvlan_addr4_notifier_block __read_mostly
= {
1002 .notifier_call
= ipvlan_addr4_event
,
1005 static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly
= {
1006 .notifier_call
= ipvlan_addr4_validator_event
,
1009 static struct notifier_block ipvlan_notifier_block __read_mostly
= {
1010 .notifier_call
= ipvlan_device_event
,
1013 #if IS_ENABLED(CONFIG_IPV6)
1014 static struct notifier_block ipvlan_addr6_notifier_block __read_mostly
= {
1015 .notifier_call
= ipvlan_addr6_event
,
1018 static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly
= {
1019 .notifier_call
= ipvlan_addr6_validator_event
,
1023 static int __init
ipvlan_init_module(void)
1027 ipvlan_init_secret();
1028 register_netdevice_notifier(&ipvlan_notifier_block
);
1029 #if IS_ENABLED(CONFIG_IPV6)
1030 register_inet6addr_notifier(&ipvlan_addr6_notifier_block
);
1031 register_inet6addr_validator_notifier(
1032 &ipvlan_addr6_vtor_notifier_block
);
1034 register_inetaddr_notifier(&ipvlan_addr4_notifier_block
);
1035 register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block
);
1037 err
= ipvlan_l3s_init();
1041 err
= ipvlan_link_register(&ipvlan_link_ops
);
1043 ipvlan_l3s_cleanup();
1049 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block
);
1050 unregister_inetaddr_validator_notifier(
1051 &ipvlan_addr4_vtor_notifier_block
);
1052 #if IS_ENABLED(CONFIG_IPV6)
1053 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block
);
1054 unregister_inet6addr_validator_notifier(
1055 &ipvlan_addr6_vtor_notifier_block
);
1057 unregister_netdevice_notifier(&ipvlan_notifier_block
);
1061 static void __exit
ipvlan_cleanup_module(void)
1063 rtnl_link_unregister(&ipvlan_link_ops
);
1064 ipvlan_l3s_cleanup();
1065 unregister_netdevice_notifier(&ipvlan_notifier_block
);
1066 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block
);
1067 unregister_inetaddr_validator_notifier(
1068 &ipvlan_addr4_vtor_notifier_block
);
1069 #if IS_ENABLED(CONFIG_IPV6)
1070 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block
);
1071 unregister_inet6addr_validator_notifier(
1072 &ipvlan_addr6_vtor_notifier_block
);
1076 module_init(ipvlan_init_module
);
1077 module_exit(ipvlan_cleanup_module
);
1079 MODULE_LICENSE("GPL");
1080 MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
1081 MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
1082 MODULE_ALIAS_RTNL_LINK("ipvlan");