1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/netpoll.h>
21 #include <linux/ptp_classify.h>
25 /* slave mii_bus handling ***************************************************/
26 static int dsa_slave_phy_read(struct mii_bus
*bus
, int addr
, int reg
)
28 struct dsa_switch
*ds
= bus
->priv
;
30 if (ds
->phys_mii_mask
& (1 << addr
))
31 return ds
->ops
->phy_read(ds
, addr
, reg
);
36 static int dsa_slave_phy_write(struct mii_bus
*bus
, int addr
, int reg
, u16 val
)
38 struct dsa_switch
*ds
= bus
->priv
;
40 if (ds
->phys_mii_mask
& (1 << addr
))
41 return ds
->ops
->phy_write(ds
, addr
, reg
, val
);
46 void dsa_slave_mii_bus_init(struct dsa_switch
*ds
)
48 ds
->slave_mii_bus
->priv
= (void *)ds
;
49 ds
->slave_mii_bus
->name
= "dsa slave smi";
50 ds
->slave_mii_bus
->read
= dsa_slave_phy_read
;
51 ds
->slave_mii_bus
->write
= dsa_slave_phy_write
;
52 snprintf(ds
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "dsa-%d.%d",
53 ds
->dst
->index
, ds
->index
);
54 ds
->slave_mii_bus
->parent
= ds
->dev
;
55 ds
->slave_mii_bus
->phy_mask
= ~ds
->phys_mii_mask
;
59 /* slave device handling ****************************************************/
60 static int dsa_slave_get_iflink(const struct net_device
*dev
)
62 return dsa_slave_to_master(dev
)->ifindex
;
65 static int dsa_slave_open(struct net_device
*dev
)
67 struct net_device
*master
= dsa_slave_to_master(dev
);
68 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
71 if (!(master
->flags
& IFF_UP
))
74 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
)) {
75 err
= dev_uc_add(master
, dev
->dev_addr
);
80 if (dev
->flags
& IFF_ALLMULTI
) {
81 err
= dev_set_allmulti(master
, 1);
85 if (dev
->flags
& IFF_PROMISC
) {
86 err
= dev_set_promiscuity(master
, 1);
91 err
= dsa_port_enable(dp
, dev
->phydev
);
95 phylink_start(dp
->pl
);
100 if (dev
->flags
& IFF_PROMISC
)
101 dev_set_promiscuity(master
, -1);
103 if (dev
->flags
& IFF_ALLMULTI
)
104 dev_set_allmulti(master
, -1);
106 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
107 dev_uc_del(master
, dev
->dev_addr
);
112 static int dsa_slave_close(struct net_device
*dev
)
114 struct net_device
*master
= dsa_slave_to_master(dev
);
115 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
117 phylink_stop(dp
->pl
);
119 dsa_port_disable(dp
);
121 dev_mc_unsync(master
, dev
);
122 dev_uc_unsync(master
, dev
);
123 if (dev
->flags
& IFF_ALLMULTI
)
124 dev_set_allmulti(master
, -1);
125 if (dev
->flags
& IFF_PROMISC
)
126 dev_set_promiscuity(master
, -1);
128 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
129 dev_uc_del(master
, dev
->dev_addr
);
134 static void dsa_slave_change_rx_flags(struct net_device
*dev
, int change
)
136 struct net_device
*master
= dsa_slave_to_master(dev
);
137 if (dev
->flags
& IFF_UP
) {
138 if (change
& IFF_ALLMULTI
)
139 dev_set_allmulti(master
,
140 dev
->flags
& IFF_ALLMULTI
? 1 : -1);
141 if (change
& IFF_PROMISC
)
142 dev_set_promiscuity(master
,
143 dev
->flags
& IFF_PROMISC
? 1 : -1);
147 static void dsa_slave_set_rx_mode(struct net_device
*dev
)
149 struct net_device
*master
= dsa_slave_to_master(dev
);
151 dev_mc_sync(master
, dev
);
152 dev_uc_sync(master
, dev
);
155 static int dsa_slave_set_mac_address(struct net_device
*dev
, void *a
)
157 struct net_device
*master
= dsa_slave_to_master(dev
);
158 struct sockaddr
*addr
= a
;
161 if (!is_valid_ether_addr(addr
->sa_data
))
162 return -EADDRNOTAVAIL
;
164 if (!(dev
->flags
& IFF_UP
))
167 if (!ether_addr_equal(addr
->sa_data
, master
->dev_addr
)) {
168 err
= dev_uc_add(master
, addr
->sa_data
);
173 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
174 dev_uc_del(master
, dev
->dev_addr
);
177 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
182 struct dsa_slave_dump_ctx
{
183 struct net_device
*dev
;
185 struct netlink_callback
*cb
;
190 dsa_slave_port_fdb_do_dump(const unsigned char *addr
, u16 vid
,
191 bool is_static
, void *data
)
193 struct dsa_slave_dump_ctx
*dump
= data
;
194 u32 portid
= NETLINK_CB(dump
->cb
->skb
).portid
;
195 u32 seq
= dump
->cb
->nlh
->nlmsg_seq
;
196 struct nlmsghdr
*nlh
;
199 if (dump
->idx
< dump
->cb
->args
[2])
202 nlh
= nlmsg_put(dump
->skb
, portid
, seq
, RTM_NEWNEIGH
,
203 sizeof(*ndm
), NLM_F_MULTI
);
207 ndm
= nlmsg_data(nlh
);
208 ndm
->ndm_family
= AF_BRIDGE
;
211 ndm
->ndm_flags
= NTF_SELF
;
213 ndm
->ndm_ifindex
= dump
->dev
->ifindex
;
214 ndm
->ndm_state
= is_static
? NUD_NOARP
: NUD_REACHABLE
;
216 if (nla_put(dump
->skb
, NDA_LLADDR
, ETH_ALEN
, addr
))
217 goto nla_put_failure
;
219 if (vid
&& nla_put_u16(dump
->skb
, NDA_VLAN
, vid
))
220 goto nla_put_failure
;
222 nlmsg_end(dump
->skb
, nlh
);
229 nlmsg_cancel(dump
->skb
, nlh
);
234 dsa_slave_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
235 struct net_device
*dev
, struct net_device
*filter_dev
,
238 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
239 struct dsa_slave_dump_ctx dump
= {
247 err
= dsa_port_fdb_dump(dp
, dsa_slave_port_fdb_do_dump
, &dump
);
253 static int dsa_slave_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
255 struct dsa_slave_priv
*p
= netdev_priv(dev
);
256 struct dsa_switch
*ds
= p
->dp
->ds
;
257 int port
= p
->dp
->index
;
259 /* Pass through to switch driver if it supports timestamping */
262 if (ds
->ops
->port_hwtstamp_get
)
263 return ds
->ops
->port_hwtstamp_get(ds
, port
, ifr
);
266 if (ds
->ops
->port_hwtstamp_set
)
267 return ds
->ops
->port_hwtstamp_set(ds
, port
, ifr
);
271 return phylink_mii_ioctl(p
->dp
->pl
, ifr
, cmd
);
274 static int dsa_slave_port_attr_set(struct net_device
*dev
,
275 const struct switchdev_attr
*attr
,
276 struct switchdev_trans
*trans
)
278 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
282 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
283 ret
= dsa_port_set_state(dp
, attr
->u
.stp_state
, trans
);
285 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
286 ret
= dsa_port_vlan_filtering(dp
, attr
->u
.vlan_filtering
,
289 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
290 ret
= dsa_port_ageing_time(dp
, attr
->u
.ageing_time
, trans
);
292 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS
:
293 ret
= dsa_port_pre_bridge_flags(dp
, attr
->u
.brport_flags
,
296 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
297 ret
= dsa_port_bridge_flags(dp
, attr
->u
.brport_flags
, trans
);
299 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER
:
300 ret
= dsa_port_mrouter(dp
->cpu_dp
, attr
->u
.mrouter
, trans
);
310 static int dsa_slave_vlan_add(struct net_device
*dev
,
311 const struct switchdev_obj
*obj
,
312 struct switchdev_trans
*trans
)
314 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
315 struct switchdev_obj_port_vlan vlan
;
318 if (obj
->orig_dev
!= dev
)
321 if (dp
->bridge_dev
&& !br_vlan_enabled(dp
->bridge_dev
))
324 vlan
= *SWITCHDEV_OBJ_PORT_VLAN(obj
);
326 err
= dsa_port_vlan_add(dp
, &vlan
, trans
);
330 /* We need the dedicated CPU port to be a member of the VLAN as well.
331 * Even though drivers often handle CPU membership in special ways,
332 * it doesn't make sense to program a PVID, so clear this flag.
334 vlan
.flags
&= ~BRIDGE_VLAN_INFO_PVID
;
336 err
= dsa_port_vlan_add(dp
->cpu_dp
, &vlan
, trans
);
343 static int dsa_slave_port_obj_add(struct net_device
*dev
,
344 const struct switchdev_obj
*obj
,
345 struct switchdev_trans
*trans
,
346 struct netlink_ext_ack
*extack
)
348 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
351 /* For the prepare phase, ensure the full set of changes is feasable in
352 * one go in order to signal a failure properly. If an operation is not
353 * supported, return -EOPNOTSUPP.
357 case SWITCHDEV_OBJ_ID_PORT_MDB
:
358 if (obj
->orig_dev
!= dev
)
360 err
= dsa_port_mdb_add(dp
, SWITCHDEV_OBJ_PORT_MDB(obj
), trans
);
362 case SWITCHDEV_OBJ_ID_HOST_MDB
:
363 /* DSA can directly translate this to a normal MDB add,
364 * but on the CPU port.
366 err
= dsa_port_mdb_add(dp
->cpu_dp
, SWITCHDEV_OBJ_PORT_MDB(obj
),
369 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
370 err
= dsa_slave_vlan_add(dev
, obj
, trans
);
380 static int dsa_slave_vlan_del(struct net_device
*dev
,
381 const struct switchdev_obj
*obj
)
383 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
385 if (obj
->orig_dev
!= dev
)
388 if (dp
->bridge_dev
&& !br_vlan_enabled(dp
->bridge_dev
))
391 /* Do not deprogram the CPU port as it may be shared with other user
392 * ports which can be members of this VLAN as well.
394 return dsa_port_vlan_del(dp
, SWITCHDEV_OBJ_PORT_VLAN(obj
));
397 static int dsa_slave_port_obj_del(struct net_device
*dev
,
398 const struct switchdev_obj
*obj
)
400 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
404 case SWITCHDEV_OBJ_ID_PORT_MDB
:
405 if (obj
->orig_dev
!= dev
)
407 err
= dsa_port_mdb_del(dp
, SWITCHDEV_OBJ_PORT_MDB(obj
));
409 case SWITCHDEV_OBJ_ID_HOST_MDB
:
410 /* DSA can directly translate this to a normal MDB add,
411 * but on the CPU port.
413 err
= dsa_port_mdb_del(dp
->cpu_dp
, SWITCHDEV_OBJ_PORT_MDB(obj
));
415 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
416 err
= dsa_slave_vlan_del(dev
, obj
);
426 static int dsa_slave_get_port_parent_id(struct net_device
*dev
,
427 struct netdev_phys_item_id
*ppid
)
429 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
430 struct dsa_switch
*ds
= dp
->ds
;
431 struct dsa_switch_tree
*dst
= ds
->dst
;
433 /* For non-legacy ports, devlink is used and it takes
434 * care of the name generation. This ndo implementation
435 * should be removed with legacy support.
440 ppid
->id_len
= sizeof(dst
->index
);
441 memcpy(&ppid
->id
, &dst
->index
, ppid
->id_len
);
446 static inline netdev_tx_t
dsa_slave_netpoll_send_skb(struct net_device
*dev
,
449 #ifdef CONFIG_NET_POLL_CONTROLLER
450 struct dsa_slave_priv
*p
= netdev_priv(dev
);
453 netpoll_send_skb(p
->netpoll
, skb
);
460 static void dsa_skb_tx_timestamp(struct dsa_slave_priv
*p
,
463 struct dsa_switch
*ds
= p
->dp
->ds
;
464 struct sk_buff
*clone
;
467 type
= ptp_classify_raw(skb
);
468 if (type
== PTP_CLASS_NONE
)
471 if (!ds
->ops
->port_txtstamp
)
474 clone
= skb_clone_sk(skb
);
478 DSA_SKB_CB(skb
)->clone
= clone
;
480 if (ds
->ops
->port_txtstamp(ds
, p
->dp
->index
, clone
, type
))
486 netdev_tx_t
dsa_enqueue_skb(struct sk_buff
*skb
, struct net_device
*dev
)
488 /* SKB for netpoll still need to be mangled with the protocol-specific
489 * tag to be successfully transmitted
491 if (unlikely(netpoll_tx_running(dev
)))
492 return dsa_slave_netpoll_send_skb(dev
, skb
);
494 /* Queue the SKB for transmission on the parent interface, but
495 * do not modify its EtherType
497 skb
->dev
= dsa_slave_to_master(dev
);
502 EXPORT_SYMBOL_GPL(dsa_enqueue_skb
);
504 static netdev_tx_t
dsa_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
506 struct dsa_slave_priv
*p
= netdev_priv(dev
);
507 struct pcpu_sw_netstats
*s
;
508 struct sk_buff
*nskb
;
510 s
= this_cpu_ptr(p
->stats64
);
511 u64_stats_update_begin(&s
->syncp
);
513 s
->tx_bytes
+= skb
->len
;
514 u64_stats_update_end(&s
->syncp
);
516 DSA_SKB_CB(skb
)->clone
= NULL
;
518 /* Identify PTP protocol packets, clone them, and pass them to the
521 dsa_skb_tx_timestamp(p
, skb
);
523 /* Transmit function may have to reallocate the original SKB,
524 * in which case it must have freed it. Only free it here on error.
526 nskb
= p
->xmit(skb
, dev
);
532 return dsa_enqueue_skb(nskb
, dev
);
535 /* ethtool operations *******************************************************/
537 static void dsa_slave_get_drvinfo(struct net_device
*dev
,
538 struct ethtool_drvinfo
*drvinfo
)
540 strlcpy(drvinfo
->driver
, "dsa", sizeof(drvinfo
->driver
));
541 strlcpy(drvinfo
->fw_version
, "N/A", sizeof(drvinfo
->fw_version
));
542 strlcpy(drvinfo
->bus_info
, "platform", sizeof(drvinfo
->bus_info
));
545 static int dsa_slave_get_regs_len(struct net_device
*dev
)
547 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
548 struct dsa_switch
*ds
= dp
->ds
;
550 if (ds
->ops
->get_regs_len
)
551 return ds
->ops
->get_regs_len(ds
, dp
->index
);
557 dsa_slave_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
559 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
560 struct dsa_switch
*ds
= dp
->ds
;
562 if (ds
->ops
->get_regs
)
563 ds
->ops
->get_regs(ds
, dp
->index
, regs
, _p
);
566 static int dsa_slave_nway_reset(struct net_device
*dev
)
568 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
570 return phylink_ethtool_nway_reset(dp
->pl
);
573 static int dsa_slave_get_eeprom_len(struct net_device
*dev
)
575 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
576 struct dsa_switch
*ds
= dp
->ds
;
578 if (ds
->cd
&& ds
->cd
->eeprom_len
)
579 return ds
->cd
->eeprom_len
;
581 if (ds
->ops
->get_eeprom_len
)
582 return ds
->ops
->get_eeprom_len(ds
);
587 static int dsa_slave_get_eeprom(struct net_device
*dev
,
588 struct ethtool_eeprom
*eeprom
, u8
*data
)
590 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
591 struct dsa_switch
*ds
= dp
->ds
;
593 if (ds
->ops
->get_eeprom
)
594 return ds
->ops
->get_eeprom(ds
, eeprom
, data
);
599 static int dsa_slave_set_eeprom(struct net_device
*dev
,
600 struct ethtool_eeprom
*eeprom
, u8
*data
)
602 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
603 struct dsa_switch
*ds
= dp
->ds
;
605 if (ds
->ops
->set_eeprom
)
606 return ds
->ops
->set_eeprom(ds
, eeprom
, data
);
611 static void dsa_slave_get_strings(struct net_device
*dev
,
612 uint32_t stringset
, uint8_t *data
)
614 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
615 struct dsa_switch
*ds
= dp
->ds
;
617 if (stringset
== ETH_SS_STATS
) {
618 int len
= ETH_GSTRING_LEN
;
620 strncpy(data
, "tx_packets", len
);
621 strncpy(data
+ len
, "tx_bytes", len
);
622 strncpy(data
+ 2 * len
, "rx_packets", len
);
623 strncpy(data
+ 3 * len
, "rx_bytes", len
);
624 if (ds
->ops
->get_strings
)
625 ds
->ops
->get_strings(ds
, dp
->index
, stringset
,
630 static void dsa_slave_get_ethtool_stats(struct net_device
*dev
,
631 struct ethtool_stats
*stats
,
634 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
635 struct dsa_slave_priv
*p
= netdev_priv(dev
);
636 struct dsa_switch
*ds
= dp
->ds
;
637 struct pcpu_sw_netstats
*s
;
641 for_each_possible_cpu(i
) {
642 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
644 s
= per_cpu_ptr(p
->stats64
, i
);
646 start
= u64_stats_fetch_begin_irq(&s
->syncp
);
647 tx_packets
= s
->tx_packets
;
648 tx_bytes
= s
->tx_bytes
;
649 rx_packets
= s
->rx_packets
;
650 rx_bytes
= s
->rx_bytes
;
651 } while (u64_stats_fetch_retry_irq(&s
->syncp
, start
));
652 data
[0] += tx_packets
;
654 data
[2] += rx_packets
;
657 if (ds
->ops
->get_ethtool_stats
)
658 ds
->ops
->get_ethtool_stats(ds
, dp
->index
, data
+ 4);
661 static int dsa_slave_get_sset_count(struct net_device
*dev
, int sset
)
663 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
664 struct dsa_switch
*ds
= dp
->ds
;
666 if (sset
== ETH_SS_STATS
) {
670 if (ds
->ops
->get_sset_count
)
671 count
+= ds
->ops
->get_sset_count(ds
, dp
->index
, sset
);
679 static void dsa_slave_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*w
)
681 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
682 struct dsa_switch
*ds
= dp
->ds
;
684 phylink_ethtool_get_wol(dp
->pl
, w
);
686 if (ds
->ops
->get_wol
)
687 ds
->ops
->get_wol(ds
, dp
->index
, w
);
690 static int dsa_slave_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*w
)
692 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
693 struct dsa_switch
*ds
= dp
->ds
;
694 int ret
= -EOPNOTSUPP
;
696 phylink_ethtool_set_wol(dp
->pl
, w
);
698 if (ds
->ops
->set_wol
)
699 ret
= ds
->ops
->set_wol(ds
, dp
->index
, w
);
704 static int dsa_slave_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
706 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
707 struct dsa_switch
*ds
= dp
->ds
;
710 /* Port's PHY and MAC both need to be EEE capable */
711 if (!dev
->phydev
|| !dp
->pl
)
714 if (!ds
->ops
->set_mac_eee
)
717 ret
= ds
->ops
->set_mac_eee(ds
, dp
->index
, e
);
721 return phylink_ethtool_set_eee(dp
->pl
, e
);
724 static int dsa_slave_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
726 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
727 struct dsa_switch
*ds
= dp
->ds
;
730 /* Port's PHY and MAC both need to be EEE capable */
731 if (!dev
->phydev
|| !dp
->pl
)
734 if (!ds
->ops
->get_mac_eee
)
737 ret
= ds
->ops
->get_mac_eee(ds
, dp
->index
, e
);
741 return phylink_ethtool_get_eee(dp
->pl
, e
);
744 static int dsa_slave_get_link_ksettings(struct net_device
*dev
,
745 struct ethtool_link_ksettings
*cmd
)
747 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
749 return phylink_ethtool_ksettings_get(dp
->pl
, cmd
);
752 static int dsa_slave_set_link_ksettings(struct net_device
*dev
,
753 const struct ethtool_link_ksettings
*cmd
)
755 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
757 return phylink_ethtool_ksettings_set(dp
->pl
, cmd
);
760 static void dsa_slave_get_pauseparam(struct net_device
*dev
,
761 struct ethtool_pauseparam
*pause
)
763 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
765 phylink_ethtool_get_pauseparam(dp
->pl
, pause
);
768 static int dsa_slave_set_pauseparam(struct net_device
*dev
,
769 struct ethtool_pauseparam
*pause
)
771 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
773 return phylink_ethtool_set_pauseparam(dp
->pl
, pause
);
776 #ifdef CONFIG_NET_POLL_CONTROLLER
777 static int dsa_slave_netpoll_setup(struct net_device
*dev
,
778 struct netpoll_info
*ni
)
780 struct net_device
*master
= dsa_slave_to_master(dev
);
781 struct dsa_slave_priv
*p
= netdev_priv(dev
);
782 struct netpoll
*netpoll
;
785 netpoll
= kzalloc(sizeof(*netpoll
), GFP_KERNEL
);
789 err
= __netpoll_setup(netpoll
, master
);
795 p
->netpoll
= netpoll
;
800 static void dsa_slave_netpoll_cleanup(struct net_device
*dev
)
802 struct dsa_slave_priv
*p
= netdev_priv(dev
);
803 struct netpoll
*netpoll
= p
->netpoll
;
810 __netpoll_free(netpoll
);
813 static void dsa_slave_poll_controller(struct net_device
*dev
)
818 static int dsa_slave_get_phys_port_name(struct net_device
*dev
,
819 char *name
, size_t len
)
821 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
823 /* For non-legacy ports, devlink is used and it takes
824 * care of the name generation. This ndo implementation
825 * should be removed with legacy support.
830 if (snprintf(name
, len
, "p%d", dp
->index
) >= len
)
836 static struct dsa_mall_tc_entry
*
837 dsa_slave_mall_tc_entry_find(struct net_device
*dev
, unsigned long cookie
)
839 struct dsa_slave_priv
*p
= netdev_priv(dev
);
840 struct dsa_mall_tc_entry
*mall_tc_entry
;
842 list_for_each_entry(mall_tc_entry
, &p
->mall_tc_list
, list
)
843 if (mall_tc_entry
->cookie
== cookie
)
844 return mall_tc_entry
;
849 static int dsa_slave_add_cls_matchall(struct net_device
*dev
,
850 struct tc_cls_matchall_offload
*cls
,
853 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
854 struct dsa_slave_priv
*p
= netdev_priv(dev
);
855 struct dsa_mall_tc_entry
*mall_tc_entry
;
856 __be16 protocol
= cls
->common
.protocol
;
857 struct dsa_switch
*ds
= dp
->ds
;
858 struct flow_action_entry
*act
;
859 struct dsa_port
*to_dp
;
860 int err
= -EOPNOTSUPP
;
862 if (!ds
->ops
->port_mirror_add
)
865 if (!flow_offload_has_one_action(&cls
->rule
->action
))
868 act
= &cls
->rule
->action
.entries
[0];
870 if (act
->id
== FLOW_ACTION_MIRRED
&& protocol
== htons(ETH_P_ALL
)) {
871 struct dsa_mall_mirror_tc_entry
*mirror
;
876 if (!dsa_slave_dev_check(act
->dev
))
879 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
883 mall_tc_entry
->cookie
= cls
->cookie
;
884 mall_tc_entry
->type
= DSA_PORT_MALL_MIRROR
;
885 mirror
= &mall_tc_entry
->mirror
;
887 to_dp
= dsa_slave_to_port(act
->dev
);
889 mirror
->to_local_port
= to_dp
->index
;
890 mirror
->ingress
= ingress
;
892 err
= ds
->ops
->port_mirror_add(ds
, dp
->index
, mirror
, ingress
);
894 kfree(mall_tc_entry
);
898 list_add_tail(&mall_tc_entry
->list
, &p
->mall_tc_list
);
904 static void dsa_slave_del_cls_matchall(struct net_device
*dev
,
905 struct tc_cls_matchall_offload
*cls
)
907 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
908 struct dsa_mall_tc_entry
*mall_tc_entry
;
909 struct dsa_switch
*ds
= dp
->ds
;
911 if (!ds
->ops
->port_mirror_del
)
914 mall_tc_entry
= dsa_slave_mall_tc_entry_find(dev
, cls
->cookie
);
918 list_del(&mall_tc_entry
->list
);
920 switch (mall_tc_entry
->type
) {
921 case DSA_PORT_MALL_MIRROR
:
922 ds
->ops
->port_mirror_del(ds
, dp
->index
, &mall_tc_entry
->mirror
);
928 kfree(mall_tc_entry
);
931 static int dsa_slave_setup_tc_cls_matchall(struct net_device
*dev
,
932 struct tc_cls_matchall_offload
*cls
,
935 if (cls
->common
.chain_index
)
938 switch (cls
->command
) {
939 case TC_CLSMATCHALL_REPLACE
:
940 return dsa_slave_add_cls_matchall(dev
, cls
, ingress
);
941 case TC_CLSMATCHALL_DESTROY
:
942 dsa_slave_del_cls_matchall(dev
, cls
);
949 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
950 void *cb_priv
, bool ingress
)
952 struct net_device
*dev
= cb_priv
;
954 if (!tc_can_offload(dev
))
958 case TC_SETUP_CLSMATCHALL
:
959 return dsa_slave_setup_tc_cls_matchall(dev
, type_data
, ingress
);
965 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type
,
966 void *type_data
, void *cb_priv
)
968 return dsa_slave_setup_tc_block_cb(type
, type_data
, cb_priv
, true);
971 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type
,
972 void *type_data
, void *cb_priv
)
974 return dsa_slave_setup_tc_block_cb(type
, type_data
, cb_priv
, false);
977 static LIST_HEAD(dsa_slave_block_cb_list
);
979 static int dsa_slave_setup_tc_block(struct net_device
*dev
,
980 struct flow_block_offload
*f
)
982 struct flow_block_cb
*block_cb
;
985 if (f
->binder_type
== FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
986 cb
= dsa_slave_setup_tc_block_cb_ig
;
987 else if (f
->binder_type
== FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
)
988 cb
= dsa_slave_setup_tc_block_cb_eg
;
992 f
->driver_block_list
= &dsa_slave_block_cb_list
;
994 switch (f
->command
) {
995 case FLOW_BLOCK_BIND
:
996 if (flow_block_cb_is_busy(cb
, dev
, &dsa_slave_block_cb_list
))
999 block_cb
= flow_block_cb_alloc(cb
, dev
, dev
, NULL
);
1000 if (IS_ERR(block_cb
))
1001 return PTR_ERR(block_cb
);
1003 flow_block_cb_add(block_cb
, f
);
1004 list_add_tail(&block_cb
->driver_list
, &dsa_slave_block_cb_list
);
1006 case FLOW_BLOCK_UNBIND
:
1007 block_cb
= flow_block_cb_lookup(f
->block
, cb
, dev
);
1011 flow_block_cb_remove(block_cb
, f
);
1012 list_del(&block_cb
->driver_list
);
1019 static int dsa_slave_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1022 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1023 struct dsa_switch
*ds
= dp
->ds
;
1025 if (type
== TC_SETUP_BLOCK
)
1026 return dsa_slave_setup_tc_block(dev
, type_data
);
1028 if (!ds
->ops
->port_setup_tc
)
1031 return ds
->ops
->port_setup_tc(ds
, dp
->index
, type
, type_data
);
1034 static void dsa_slave_get_stats64(struct net_device
*dev
,
1035 struct rtnl_link_stats64
*stats
)
1037 struct dsa_slave_priv
*p
= netdev_priv(dev
);
1038 struct pcpu_sw_netstats
*s
;
1042 netdev_stats_to_stats64(stats
, &dev
->stats
);
1043 for_each_possible_cpu(i
) {
1044 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
1046 s
= per_cpu_ptr(p
->stats64
, i
);
1048 start
= u64_stats_fetch_begin_irq(&s
->syncp
);
1049 tx_packets
= s
->tx_packets
;
1050 tx_bytes
= s
->tx_bytes
;
1051 rx_packets
= s
->rx_packets
;
1052 rx_bytes
= s
->rx_bytes
;
1053 } while (u64_stats_fetch_retry_irq(&s
->syncp
, start
));
1055 stats
->tx_packets
+= tx_packets
;
1056 stats
->tx_bytes
+= tx_bytes
;
1057 stats
->rx_packets
+= rx_packets
;
1058 stats
->rx_bytes
+= rx_bytes
;
1062 static int dsa_slave_get_rxnfc(struct net_device
*dev
,
1063 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
1065 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1066 struct dsa_switch
*ds
= dp
->ds
;
1068 if (!ds
->ops
->get_rxnfc
)
1071 return ds
->ops
->get_rxnfc(ds
, dp
->index
, nfc
, rule_locs
);
1074 static int dsa_slave_set_rxnfc(struct net_device
*dev
,
1075 struct ethtool_rxnfc
*nfc
)
1077 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1078 struct dsa_switch
*ds
= dp
->ds
;
1080 if (!ds
->ops
->set_rxnfc
)
1083 return ds
->ops
->set_rxnfc(ds
, dp
->index
, nfc
);
1086 static int dsa_slave_get_ts_info(struct net_device
*dev
,
1087 struct ethtool_ts_info
*ts
)
1089 struct dsa_slave_priv
*p
= netdev_priv(dev
);
1090 struct dsa_switch
*ds
= p
->dp
->ds
;
1092 if (!ds
->ops
->get_ts_info
)
1095 return ds
->ops
->get_ts_info(ds
, p
->dp
->index
, ts
);
1098 static int dsa_slave_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
,
1101 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1102 struct bridge_vlan_info info
;
1105 /* Check for a possible bridge VLAN entry now since there is no
1106 * need to emulate the switchdev prepare + commit phase.
1108 if (dp
->bridge_dev
) {
1109 if (!br_vlan_enabled(dp
->bridge_dev
))
1112 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1113 * device, respectively the VID is not found, returning
1114 * 0 means success, which is a failure for us here.
1116 ret
= br_vlan_get_info(dp
->bridge_dev
, vid
, &info
);
1121 ret
= dsa_port_vid_add(dp
, vid
, 0);
1125 ret
= dsa_port_vid_add(dp
->cpu_dp
, vid
, 0);
1132 static int dsa_slave_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
,
1135 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1136 struct bridge_vlan_info info
;
1139 /* Check for a possible bridge VLAN entry now since there is no
1140 * need to emulate the switchdev prepare + commit phase.
1142 if (dp
->bridge_dev
) {
1143 if (!br_vlan_enabled(dp
->bridge_dev
))
1146 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1147 * device, respectively the VID is not found, returning
1148 * 0 means success, which is a failure for us here.
1150 ret
= br_vlan_get_info(dp
->bridge_dev
, vid
, &info
);
1155 /* Do not deprogram the CPU port as it may be shared with other user
1156 * ports which can be members of this VLAN as well.
1158 return dsa_port_vid_del(dp
, vid
);
1161 static const struct ethtool_ops dsa_slave_ethtool_ops
= {
1162 .get_drvinfo
= dsa_slave_get_drvinfo
,
1163 .get_regs_len
= dsa_slave_get_regs_len
,
1164 .get_regs
= dsa_slave_get_regs
,
1165 .nway_reset
= dsa_slave_nway_reset
,
1166 .get_link
= ethtool_op_get_link
,
1167 .get_eeprom_len
= dsa_slave_get_eeprom_len
,
1168 .get_eeprom
= dsa_slave_get_eeprom
,
1169 .set_eeprom
= dsa_slave_set_eeprom
,
1170 .get_strings
= dsa_slave_get_strings
,
1171 .get_ethtool_stats
= dsa_slave_get_ethtool_stats
,
1172 .get_sset_count
= dsa_slave_get_sset_count
,
1173 .set_wol
= dsa_slave_set_wol
,
1174 .get_wol
= dsa_slave_get_wol
,
1175 .set_eee
= dsa_slave_set_eee
,
1176 .get_eee
= dsa_slave_get_eee
,
1177 .get_link_ksettings
= dsa_slave_get_link_ksettings
,
1178 .set_link_ksettings
= dsa_slave_set_link_ksettings
,
1179 .get_pauseparam
= dsa_slave_get_pauseparam
,
1180 .set_pauseparam
= dsa_slave_set_pauseparam
,
1181 .get_rxnfc
= dsa_slave_get_rxnfc
,
1182 .set_rxnfc
= dsa_slave_set_rxnfc
,
1183 .get_ts_info
= dsa_slave_get_ts_info
,
1186 /* legacy way, bypassing the bridge *****************************************/
1187 int dsa_legacy_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1188 struct net_device
*dev
,
1189 const unsigned char *addr
, u16 vid
,
1191 struct netlink_ext_ack
*extack
)
1193 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1195 return dsa_port_fdb_add(dp
, addr
, vid
);
1198 int dsa_legacy_fdb_del(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1199 struct net_device
*dev
,
1200 const unsigned char *addr
, u16 vid
)
1202 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1204 return dsa_port_fdb_del(dp
, addr
, vid
);
1207 static struct devlink_port
*dsa_slave_get_devlink_port(struct net_device
*dev
)
1209 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1211 return dp
->ds
->devlink
? &dp
->devlink_port
: NULL
;
1214 static const struct net_device_ops dsa_slave_netdev_ops
= {
1215 .ndo_open
= dsa_slave_open
,
1216 .ndo_stop
= dsa_slave_close
,
1217 .ndo_start_xmit
= dsa_slave_xmit
,
1218 .ndo_change_rx_flags
= dsa_slave_change_rx_flags
,
1219 .ndo_set_rx_mode
= dsa_slave_set_rx_mode
,
1220 .ndo_set_mac_address
= dsa_slave_set_mac_address
,
1221 .ndo_fdb_add
= dsa_legacy_fdb_add
,
1222 .ndo_fdb_del
= dsa_legacy_fdb_del
,
1223 .ndo_fdb_dump
= dsa_slave_fdb_dump
,
1224 .ndo_do_ioctl
= dsa_slave_ioctl
,
1225 .ndo_get_iflink
= dsa_slave_get_iflink
,
1226 #ifdef CONFIG_NET_POLL_CONTROLLER
1227 .ndo_netpoll_setup
= dsa_slave_netpoll_setup
,
1228 .ndo_netpoll_cleanup
= dsa_slave_netpoll_cleanup
,
1229 .ndo_poll_controller
= dsa_slave_poll_controller
,
1231 .ndo_get_phys_port_name
= dsa_slave_get_phys_port_name
,
1232 .ndo_setup_tc
= dsa_slave_setup_tc
,
1233 .ndo_get_stats64
= dsa_slave_get_stats64
,
1234 .ndo_get_port_parent_id
= dsa_slave_get_port_parent_id
,
1235 .ndo_vlan_rx_add_vid
= dsa_slave_vlan_rx_add_vid
,
1236 .ndo_vlan_rx_kill_vid
= dsa_slave_vlan_rx_kill_vid
,
1237 .ndo_get_devlink_port
= dsa_slave_get_devlink_port
,
1240 static struct device_type dsa_type
= {
1244 void dsa_port_phylink_mac_change(struct dsa_switch
*ds
, int port
, bool up
)
1246 const struct dsa_port
*dp
= dsa_to_port(ds
, port
);
1248 phylink_mac_change(dp
->pl
, up
);
1250 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change
);
1252 static void dsa_slave_phylink_fixed_state(struct net_device
*dev
,
1253 struct phylink_link_state
*state
)
1255 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1256 struct dsa_switch
*ds
= dp
->ds
;
1258 /* No need to check that this operation is valid, the callback would
1259 * not be called if it was not.
1261 ds
->ops
->phylink_fixed_state(ds
, dp
->index
, state
);
1264 /* slave device setup *******************************************************/
1265 static int dsa_slave_phy_connect(struct net_device
*slave_dev
, int addr
)
1267 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1268 struct dsa_switch
*ds
= dp
->ds
;
1270 slave_dev
->phydev
= mdiobus_get_phy(ds
->slave_mii_bus
, addr
);
1271 if (!slave_dev
->phydev
) {
1272 netdev_err(slave_dev
, "no phy at %d\n", addr
);
1276 return phylink_connect_phy(dp
->pl
, slave_dev
->phydev
);
1279 static int dsa_slave_phy_setup(struct net_device
*slave_dev
)
1281 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1282 struct device_node
*port_dn
= dp
->dn
;
1283 struct dsa_switch
*ds
= dp
->ds
;
1284 phy_interface_t mode
;
1288 ret
= of_get_phy_mode(port_dn
, &mode
);
1290 mode
= PHY_INTERFACE_MODE_NA
;
1292 dp
->pl_config
.dev
= &slave_dev
->dev
;
1293 dp
->pl_config
.type
= PHYLINK_NETDEV
;
1295 dp
->pl
= phylink_create(&dp
->pl_config
, of_fwnode_handle(port_dn
), mode
,
1296 &dsa_port_phylink_mac_ops
);
1297 if (IS_ERR(dp
->pl
)) {
1298 netdev_err(slave_dev
,
1299 "error creating PHYLINK: %ld\n", PTR_ERR(dp
->pl
));
1300 return PTR_ERR(dp
->pl
);
1303 /* Register only if the switch provides such a callback, since this
1304 * callback takes precedence over polling the link GPIO in PHYLINK
1305 * (see phylink_get_fixed_state).
1307 if (ds
->ops
->phylink_fixed_state
)
1308 phylink_fixed_state_cb(dp
->pl
, dsa_slave_phylink_fixed_state
);
1310 if (ds
->ops
->get_phy_flags
)
1311 phy_flags
= ds
->ops
->get_phy_flags(ds
, dp
->index
);
1313 ret
= phylink_of_phy_connect(dp
->pl
, port_dn
, phy_flags
);
1314 if (ret
== -ENODEV
&& ds
->slave_mii_bus
) {
1315 /* We could not connect to a designated PHY or SFP, so try to
1316 * use the switch internal MDIO bus instead
1318 ret
= dsa_slave_phy_connect(slave_dev
, dp
->index
);
1320 netdev_err(slave_dev
,
1321 "failed to connect to port %d: %d\n",
1323 phylink_destroy(dp
->pl
);
1331 int dsa_slave_suspend(struct net_device
*slave_dev
)
1333 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1335 if (!netif_running(slave_dev
))
1338 netif_device_detach(slave_dev
);
1341 phylink_stop(dp
->pl
);
1347 int dsa_slave_resume(struct net_device
*slave_dev
)
1349 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1351 if (!netif_running(slave_dev
))
1354 netif_device_attach(slave_dev
);
1357 phylink_start(dp
->pl
);
1363 static void dsa_slave_notify(struct net_device
*dev
, unsigned long val
)
1365 struct net_device
*master
= dsa_slave_to_master(dev
);
1366 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1367 struct dsa_notifier_register_info rinfo
= {
1368 .switch_number
= dp
->ds
->index
,
1369 .port_number
= dp
->index
,
1374 call_dsa_notifiers(val
, dev
, &rinfo
.info
);
1377 int dsa_slave_create(struct dsa_port
*port
)
1379 const struct dsa_port
*cpu_dp
= port
->cpu_dp
;
1380 struct net_device
*master
= cpu_dp
->master
;
1381 struct dsa_switch
*ds
= port
->ds
;
1382 const char *name
= port
->name
;
1383 struct net_device
*slave_dev
;
1384 struct dsa_slave_priv
*p
;
1387 if (!ds
->num_tx_queues
)
1388 ds
->num_tx_queues
= 1;
1390 slave_dev
= alloc_netdev_mqs(sizeof(struct dsa_slave_priv
), name
,
1391 NET_NAME_UNKNOWN
, ether_setup
,
1392 ds
->num_tx_queues
, 1);
1393 if (slave_dev
== NULL
)
1396 slave_dev
->features
= master
->vlan_features
| NETIF_F_HW_TC
;
1397 if (ds
->ops
->port_vlan_add
&& ds
->ops
->port_vlan_del
)
1398 slave_dev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1399 slave_dev
->hw_features
|= NETIF_F_HW_TC
;
1400 slave_dev
->ethtool_ops
= &dsa_slave_ethtool_ops
;
1401 if (!IS_ERR_OR_NULL(port
->mac
))
1402 ether_addr_copy(slave_dev
->dev_addr
, port
->mac
);
1404 eth_hw_addr_inherit(slave_dev
, master
);
1405 slave_dev
->priv_flags
|= IFF_NO_QUEUE
;
1406 slave_dev
->netdev_ops
= &dsa_slave_netdev_ops
;
1407 slave_dev
->min_mtu
= 0;
1408 slave_dev
->max_mtu
= ETH_MAX_MTU
;
1409 SET_NETDEV_DEVTYPE(slave_dev
, &dsa_type
);
1411 SET_NETDEV_DEV(slave_dev
, port
->ds
->dev
);
1412 slave_dev
->dev
.of_node
= port
->dn
;
1413 slave_dev
->vlan_features
= master
->vlan_features
;
1415 p
= netdev_priv(slave_dev
);
1416 p
->stats64
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1418 free_netdev(slave_dev
);
1422 INIT_LIST_HEAD(&p
->mall_tc_list
);
1423 p
->xmit
= cpu_dp
->tag_ops
->xmit
;
1424 port
->slave
= slave_dev
;
1426 netif_carrier_off(slave_dev
);
1428 ret
= dsa_slave_phy_setup(slave_dev
);
1430 netdev_err(master
, "error %d setting up slave phy\n", ret
);
1434 dsa_slave_notify(slave_dev
, DSA_PORT_REGISTER
);
1436 ret
= register_netdev(slave_dev
);
1438 netdev_err(master
, "error %d registering interface %s\n",
1439 ret
, slave_dev
->name
);
1447 phylink_disconnect_phy(p
->dp
->pl
);
1449 phylink_destroy(p
->dp
->pl
);
1451 free_percpu(p
->stats64
);
1452 free_netdev(slave_dev
);
1457 void dsa_slave_destroy(struct net_device
*slave_dev
)
1459 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1460 struct dsa_slave_priv
*p
= netdev_priv(slave_dev
);
1462 netif_carrier_off(slave_dev
);
1464 phylink_disconnect_phy(dp
->pl
);
1467 dsa_slave_notify(slave_dev
, DSA_PORT_UNREGISTER
);
1468 unregister_netdev(slave_dev
);
1469 phylink_destroy(dp
->pl
);
1470 free_percpu(p
->stats64
);
1471 free_netdev(slave_dev
);
1474 bool dsa_slave_dev_check(const struct net_device
*dev
)
1476 return dev
->netdev_ops
== &dsa_slave_netdev_ops
;
1479 static int dsa_slave_changeupper(struct net_device
*dev
,
1480 struct netdev_notifier_changeupper_info
*info
)
1482 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1483 int err
= NOTIFY_DONE
;
1485 if (netif_is_bridge_master(info
->upper_dev
)) {
1486 if (info
->linking
) {
1487 err
= dsa_port_bridge_join(dp
, info
->upper_dev
);
1488 err
= notifier_from_errno(err
);
1490 dsa_port_bridge_leave(dp
, info
->upper_dev
);
1498 static int dsa_slave_upper_vlan_check(struct net_device
*dev
,
1499 struct netdev_notifier_changeupper_info
*
1502 struct netlink_ext_ack
*ext_ack
;
1503 struct net_device
*slave
;
1504 struct dsa_port
*dp
;
1506 ext_ack
= netdev_notifier_info_to_extack(&info
->info
);
1508 if (!is_vlan_dev(dev
))
1511 slave
= vlan_dev_real_dev(dev
);
1512 if (!dsa_slave_dev_check(slave
))
1515 dp
= dsa_slave_to_port(slave
);
1516 if (!dp
->bridge_dev
)
1519 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1520 if (br_vlan_enabled(dp
->bridge_dev
) &&
1521 netif_is_bridge_master(info
->upper_dev
) && info
->linking
) {
1522 NL_SET_ERR_MSG_MOD(ext_ack
,
1523 "Cannot enslave VLAN device into VLAN aware bridge");
1524 return notifier_from_errno(-EINVAL
);
1530 static int dsa_slave_netdevice_event(struct notifier_block
*nb
,
1531 unsigned long event
, void *ptr
)
1533 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1535 if (event
== NETDEV_CHANGEUPPER
) {
1536 if (!dsa_slave_dev_check(dev
))
1537 return dsa_slave_upper_vlan_check(dev
, ptr
);
1539 return dsa_slave_changeupper(dev
, ptr
);
1545 struct dsa_switchdev_event_work
{
1546 struct work_struct work
;
1547 struct switchdev_notifier_fdb_info fdb_info
;
1548 struct net_device
*dev
;
1549 unsigned long event
;
1552 static void dsa_slave_switchdev_event_work(struct work_struct
*work
)
1554 struct dsa_switchdev_event_work
*switchdev_work
=
1555 container_of(work
, struct dsa_switchdev_event_work
, work
);
1556 struct net_device
*dev
= switchdev_work
->dev
;
1557 struct switchdev_notifier_fdb_info
*fdb_info
;
1558 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1562 switch (switchdev_work
->event
) {
1563 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
1564 fdb_info
= &switchdev_work
->fdb_info
;
1565 if (!fdb_info
->added_by_user
)
1568 err
= dsa_port_fdb_add(dp
, fdb_info
->addr
, fdb_info
->vid
);
1570 netdev_dbg(dev
, "fdb add failed err=%d\n", err
);
1573 fdb_info
->offloaded
= true;
1574 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED
, dev
,
1575 &fdb_info
->info
, NULL
);
1578 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1579 fdb_info
= &switchdev_work
->fdb_info
;
1580 if (!fdb_info
->added_by_user
)
1583 err
= dsa_port_fdb_del(dp
, fdb_info
->addr
, fdb_info
->vid
);
1585 netdev_dbg(dev
, "fdb del failed err=%d\n", err
);
1592 kfree(switchdev_work
->fdb_info
.addr
);
1593 kfree(switchdev_work
);
1598 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work
*
1600 const struct switchdev_notifier_fdb_info
*
1603 memcpy(&switchdev_work
->fdb_info
, fdb_info
,
1604 sizeof(switchdev_work
->fdb_info
));
1605 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
1606 if (!switchdev_work
->fdb_info
.addr
)
1608 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
1613 /* Called under rcu_read_lock() */
1614 static int dsa_slave_switchdev_event(struct notifier_block
*unused
,
1615 unsigned long event
, void *ptr
)
1617 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
1618 struct dsa_switchdev_event_work
*switchdev_work
;
1621 if (event
== SWITCHDEV_PORT_ATTR_SET
) {
1622 err
= switchdev_handle_port_attr_set(dev
, ptr
,
1623 dsa_slave_dev_check
,
1624 dsa_slave_port_attr_set
);
1625 return notifier_from_errno(err
);
1628 if (!dsa_slave_dev_check(dev
))
1631 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
1632 if (!switchdev_work
)
1635 INIT_WORK(&switchdev_work
->work
,
1636 dsa_slave_switchdev_event_work
);
1637 switchdev_work
->dev
= dev
;
1638 switchdev_work
->event
= event
;
1641 case SWITCHDEV_FDB_ADD_TO_DEVICE
: /* fall through */
1642 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1643 if (dsa_slave_switchdev_fdb_work_init(switchdev_work
, ptr
))
1644 goto err_fdb_work_init
;
1648 kfree(switchdev_work
);
1652 dsa_schedule_work(&switchdev_work
->work
);
1656 kfree(switchdev_work
);
1660 static int dsa_slave_switchdev_blocking_event(struct notifier_block
*unused
,
1661 unsigned long event
, void *ptr
)
1663 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
1667 case SWITCHDEV_PORT_OBJ_ADD
:
1668 err
= switchdev_handle_port_obj_add(dev
, ptr
,
1669 dsa_slave_dev_check
,
1670 dsa_slave_port_obj_add
);
1671 return notifier_from_errno(err
);
1672 case SWITCHDEV_PORT_OBJ_DEL
:
1673 err
= switchdev_handle_port_obj_del(dev
, ptr
,
1674 dsa_slave_dev_check
,
1675 dsa_slave_port_obj_del
);
1676 return notifier_from_errno(err
);
1677 case SWITCHDEV_PORT_ATTR_SET
:
1678 err
= switchdev_handle_port_attr_set(dev
, ptr
,
1679 dsa_slave_dev_check
,
1680 dsa_slave_port_attr_set
);
1681 return notifier_from_errno(err
);
1687 static struct notifier_block dsa_slave_nb __read_mostly
= {
1688 .notifier_call
= dsa_slave_netdevice_event
,
1691 static struct notifier_block dsa_slave_switchdev_notifier
= {
1692 .notifier_call
= dsa_slave_switchdev_event
,
1695 static struct notifier_block dsa_slave_switchdev_blocking_notifier
= {
1696 .notifier_call
= dsa_slave_switchdev_blocking_event
,
1699 int dsa_slave_register_notifier(void)
1701 struct notifier_block
*nb
;
1704 err
= register_netdevice_notifier(&dsa_slave_nb
);
1708 err
= register_switchdev_notifier(&dsa_slave_switchdev_notifier
);
1710 goto err_switchdev_nb
;
1712 nb
= &dsa_slave_switchdev_blocking_notifier
;
1713 err
= register_switchdev_blocking_notifier(nb
);
1715 goto err_switchdev_blocking_nb
;
1719 err_switchdev_blocking_nb
:
1720 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier
);
1722 unregister_netdevice_notifier(&dsa_slave_nb
);
1726 void dsa_slave_unregister_notifier(void)
1728 struct notifier_block
*nb
;
1731 nb
= &dsa_slave_switchdev_blocking_notifier
;
1732 err
= unregister_switchdev_blocking_notifier(nb
);
1734 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err
);
1736 err
= unregister_switchdev_notifier(&dsa_slave_switchdev_notifier
);
1738 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err
);
1740 err
= unregister_netdevice_notifier(&dsa_slave_nb
);
1742 pr_err("DSA: failed to unregister slave notifier (%d)\n", err
);