1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/slave.c - Slave device handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
7 #include <linux/list.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/phy.h>
11 #include <linux/phy_fixed.h>
12 #include <linux/phylink.h>
13 #include <linux/of_net.h>
14 #include <linux/of_mdio.h>
15 #include <linux/mdio.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_act/tc_mirred.h>
19 #include <linux/if_bridge.h>
20 #include <linux/netpoll.h>
21 #include <linux/ptp_classify.h>
25 /* slave mii_bus handling ***************************************************/
26 static int dsa_slave_phy_read(struct mii_bus
*bus
, int addr
, int reg
)
28 struct dsa_switch
*ds
= bus
->priv
;
30 if (ds
->phys_mii_mask
& (1 << addr
))
31 return ds
->ops
->phy_read(ds
, addr
, reg
);
36 static int dsa_slave_phy_write(struct mii_bus
*bus
, int addr
, int reg
, u16 val
)
38 struct dsa_switch
*ds
= bus
->priv
;
40 if (ds
->phys_mii_mask
& (1 << addr
))
41 return ds
->ops
->phy_write(ds
, addr
, reg
, val
);
46 void dsa_slave_mii_bus_init(struct dsa_switch
*ds
)
48 ds
->slave_mii_bus
->priv
= (void *)ds
;
49 ds
->slave_mii_bus
->name
= "dsa slave smi";
50 ds
->slave_mii_bus
->read
= dsa_slave_phy_read
;
51 ds
->slave_mii_bus
->write
= dsa_slave_phy_write
;
52 snprintf(ds
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "dsa-%d.%d",
53 ds
->dst
->index
, ds
->index
);
54 ds
->slave_mii_bus
->parent
= ds
->dev
;
55 ds
->slave_mii_bus
->phy_mask
= ~ds
->phys_mii_mask
;
59 /* slave device handling ****************************************************/
60 static int dsa_slave_get_iflink(const struct net_device
*dev
)
62 return dsa_slave_to_master(dev
)->ifindex
;
65 static int dsa_slave_open(struct net_device
*dev
)
67 struct net_device
*master
= dsa_slave_to_master(dev
);
68 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
71 if (!(master
->flags
& IFF_UP
))
74 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
)) {
75 err
= dev_uc_add(master
, dev
->dev_addr
);
80 if (dev
->flags
& IFF_ALLMULTI
) {
81 err
= dev_set_allmulti(master
, 1);
85 if (dev
->flags
& IFF_PROMISC
) {
86 err
= dev_set_promiscuity(master
, 1);
91 err
= dsa_port_enable_rt(dp
, dev
->phydev
);
98 if (dev
->flags
& IFF_PROMISC
)
99 dev_set_promiscuity(master
, -1);
101 if (dev
->flags
& IFF_ALLMULTI
)
102 dev_set_allmulti(master
, -1);
104 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
105 dev_uc_del(master
, dev
->dev_addr
);
110 static int dsa_slave_close(struct net_device
*dev
)
112 struct net_device
*master
= dsa_slave_to_master(dev
);
113 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
115 dsa_port_disable_rt(dp
);
117 dev_mc_unsync(master
, dev
);
118 dev_uc_unsync(master
, dev
);
119 if (dev
->flags
& IFF_ALLMULTI
)
120 dev_set_allmulti(master
, -1);
121 if (dev
->flags
& IFF_PROMISC
)
122 dev_set_promiscuity(master
, -1);
124 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
125 dev_uc_del(master
, dev
->dev_addr
);
130 static void dsa_slave_change_rx_flags(struct net_device
*dev
, int change
)
132 struct net_device
*master
= dsa_slave_to_master(dev
);
133 if (dev
->flags
& IFF_UP
) {
134 if (change
& IFF_ALLMULTI
)
135 dev_set_allmulti(master
,
136 dev
->flags
& IFF_ALLMULTI
? 1 : -1);
137 if (change
& IFF_PROMISC
)
138 dev_set_promiscuity(master
,
139 dev
->flags
& IFF_PROMISC
? 1 : -1);
143 static void dsa_slave_set_rx_mode(struct net_device
*dev
)
145 struct net_device
*master
= dsa_slave_to_master(dev
);
147 dev_mc_sync(master
, dev
);
148 dev_uc_sync(master
, dev
);
151 static int dsa_slave_set_mac_address(struct net_device
*dev
, void *a
)
153 struct net_device
*master
= dsa_slave_to_master(dev
);
154 struct sockaddr
*addr
= a
;
157 if (!is_valid_ether_addr(addr
->sa_data
))
158 return -EADDRNOTAVAIL
;
160 if (!(dev
->flags
& IFF_UP
))
163 if (!ether_addr_equal(addr
->sa_data
, master
->dev_addr
)) {
164 err
= dev_uc_add(master
, addr
->sa_data
);
169 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
170 dev_uc_del(master
, dev
->dev_addr
);
173 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
178 struct dsa_slave_dump_ctx
{
179 struct net_device
*dev
;
181 struct netlink_callback
*cb
;
186 dsa_slave_port_fdb_do_dump(const unsigned char *addr
, u16 vid
,
187 bool is_static
, void *data
)
189 struct dsa_slave_dump_ctx
*dump
= data
;
190 u32 portid
= NETLINK_CB(dump
->cb
->skb
).portid
;
191 u32 seq
= dump
->cb
->nlh
->nlmsg_seq
;
192 struct nlmsghdr
*nlh
;
195 if (dump
->idx
< dump
->cb
->args
[2])
198 nlh
= nlmsg_put(dump
->skb
, portid
, seq
, RTM_NEWNEIGH
,
199 sizeof(*ndm
), NLM_F_MULTI
);
203 ndm
= nlmsg_data(nlh
);
204 ndm
->ndm_family
= AF_BRIDGE
;
207 ndm
->ndm_flags
= NTF_SELF
;
209 ndm
->ndm_ifindex
= dump
->dev
->ifindex
;
210 ndm
->ndm_state
= is_static
? NUD_NOARP
: NUD_REACHABLE
;
212 if (nla_put(dump
->skb
, NDA_LLADDR
, ETH_ALEN
, addr
))
213 goto nla_put_failure
;
215 if (vid
&& nla_put_u16(dump
->skb
, NDA_VLAN
, vid
))
216 goto nla_put_failure
;
218 nlmsg_end(dump
->skb
, nlh
);
225 nlmsg_cancel(dump
->skb
, nlh
);
230 dsa_slave_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
231 struct net_device
*dev
, struct net_device
*filter_dev
,
234 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
235 struct dsa_slave_dump_ctx dump
= {
243 err
= dsa_port_fdb_dump(dp
, dsa_slave_port_fdb_do_dump
, &dump
);
249 static int dsa_slave_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
251 struct dsa_slave_priv
*p
= netdev_priv(dev
);
252 struct dsa_switch
*ds
= p
->dp
->ds
;
253 int port
= p
->dp
->index
;
255 /* Pass through to switch driver if it supports timestamping */
258 if (ds
->ops
->port_hwtstamp_get
)
259 return ds
->ops
->port_hwtstamp_get(ds
, port
, ifr
);
262 if (ds
->ops
->port_hwtstamp_set
)
263 return ds
->ops
->port_hwtstamp_set(ds
, port
, ifr
);
267 return phylink_mii_ioctl(p
->dp
->pl
, ifr
, cmd
);
270 static int dsa_slave_port_attr_set(struct net_device
*dev
,
271 const struct switchdev_attr
*attr
,
272 struct switchdev_trans
*trans
)
274 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
278 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
279 ret
= dsa_port_set_state(dp
, attr
->u
.stp_state
, trans
);
281 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
282 ret
= dsa_port_vlan_filtering(dp
, attr
->u
.vlan_filtering
,
285 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
286 ret
= dsa_port_ageing_time(dp
, attr
->u
.ageing_time
, trans
);
288 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS
:
289 ret
= dsa_port_pre_bridge_flags(dp
, attr
->u
.brport_flags
,
292 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
293 ret
= dsa_port_bridge_flags(dp
, attr
->u
.brport_flags
, trans
);
295 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER
:
296 ret
= dsa_port_mrouter(dp
->cpu_dp
, attr
->u
.mrouter
, trans
);
306 /* Must be called under rcu_read_lock() */
308 dsa_slave_vlan_check_for_8021q_uppers(struct net_device
*slave
,
309 const struct switchdev_obj_port_vlan
*vlan
)
311 struct net_device
*upper_dev
;
312 struct list_head
*iter
;
314 netdev_for_each_upper_dev_rcu(slave
, upper_dev
, iter
) {
317 if (!is_vlan_dev(upper_dev
))
320 vid
= vlan_dev_vlan_id(upper_dev
);
321 if (vid
>= vlan
->vid_begin
&& vid
<= vlan
->vid_end
)
328 static int dsa_slave_vlan_add(struct net_device
*dev
,
329 const struct switchdev_obj
*obj
,
330 struct switchdev_trans
*trans
)
332 struct net_device
*master
= dsa_slave_to_master(dev
);
333 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
334 struct switchdev_obj_port_vlan vlan
;
337 if (obj
->orig_dev
!= dev
)
340 if (dsa_port_skip_vlan_configuration(dp
))
343 vlan
= *SWITCHDEV_OBJ_PORT_VLAN(obj
);
345 /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
348 if (trans
->ph_prepare
&& br_vlan_enabled(dp
->bridge_dev
)) {
350 err
= dsa_slave_vlan_check_for_8021q_uppers(dev
, &vlan
);
356 err
= dsa_port_vlan_add(dp
, &vlan
, trans
);
360 /* We need the dedicated CPU port to be a member of the VLAN as well.
361 * Even though drivers often handle CPU membership in special ways,
362 * it doesn't make sense to program a PVID, so clear this flag.
364 vlan
.flags
&= ~BRIDGE_VLAN_INFO_PVID
;
366 err
= dsa_port_vlan_add(dp
->cpu_dp
, &vlan
, trans
);
370 for (vid
= vlan
.vid_begin
; vid
<= vlan
.vid_end
; vid
++) {
371 err
= vlan_vid_add(master
, htons(ETH_P_8021Q
), vid
);
379 static int dsa_slave_port_obj_add(struct net_device
*dev
,
380 const struct switchdev_obj
*obj
,
381 struct switchdev_trans
*trans
,
382 struct netlink_ext_ack
*extack
)
384 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
387 /* For the prepare phase, ensure the full set of changes is feasable in
388 * one go in order to signal a failure properly. If an operation is not
389 * supported, return -EOPNOTSUPP.
393 case SWITCHDEV_OBJ_ID_PORT_MDB
:
394 if (obj
->orig_dev
!= dev
)
396 err
= dsa_port_mdb_add(dp
, SWITCHDEV_OBJ_PORT_MDB(obj
), trans
);
398 case SWITCHDEV_OBJ_ID_HOST_MDB
:
399 /* DSA can directly translate this to a normal MDB add,
400 * but on the CPU port.
402 err
= dsa_port_mdb_add(dp
->cpu_dp
, SWITCHDEV_OBJ_PORT_MDB(obj
),
405 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
406 err
= dsa_slave_vlan_add(dev
, obj
, trans
);
416 static int dsa_slave_vlan_del(struct net_device
*dev
,
417 const struct switchdev_obj
*obj
)
419 struct net_device
*master
= dsa_slave_to_master(dev
);
420 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
421 struct switchdev_obj_port_vlan
*vlan
;
424 if (obj
->orig_dev
!= dev
)
427 if (dsa_port_skip_vlan_configuration(dp
))
430 vlan
= SWITCHDEV_OBJ_PORT_VLAN(obj
);
432 /* Do not deprogram the CPU port as it may be shared with other user
433 * ports which can be members of this VLAN as well.
435 err
= dsa_port_vlan_del(dp
, vlan
);
439 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++)
440 vlan_vid_del(master
, htons(ETH_P_8021Q
), vid
);
445 static int dsa_slave_port_obj_del(struct net_device
*dev
,
446 const struct switchdev_obj
*obj
)
448 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
452 case SWITCHDEV_OBJ_ID_PORT_MDB
:
453 if (obj
->orig_dev
!= dev
)
455 err
= dsa_port_mdb_del(dp
, SWITCHDEV_OBJ_PORT_MDB(obj
));
457 case SWITCHDEV_OBJ_ID_HOST_MDB
:
458 /* DSA can directly translate this to a normal MDB add,
459 * but on the CPU port.
461 err
= dsa_port_mdb_del(dp
->cpu_dp
, SWITCHDEV_OBJ_PORT_MDB(obj
));
463 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
464 err
= dsa_slave_vlan_del(dev
, obj
);
474 static int dsa_slave_get_port_parent_id(struct net_device
*dev
,
475 struct netdev_phys_item_id
*ppid
)
477 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
478 struct dsa_switch
*ds
= dp
->ds
;
479 struct dsa_switch_tree
*dst
= ds
->dst
;
481 /* For non-legacy ports, devlink is used and it takes
482 * care of the name generation. This ndo implementation
483 * should be removed with legacy support.
488 ppid
->id_len
= sizeof(dst
->index
);
489 memcpy(&ppid
->id
, &dst
->index
, ppid
->id_len
);
494 static inline netdev_tx_t
dsa_slave_netpoll_send_skb(struct net_device
*dev
,
497 #ifdef CONFIG_NET_POLL_CONTROLLER
498 struct dsa_slave_priv
*p
= netdev_priv(dev
);
500 return netpoll_send_skb(p
->netpoll
, skb
);
507 static void dsa_skb_tx_timestamp(struct dsa_slave_priv
*p
,
510 struct dsa_switch
*ds
= p
->dp
->ds
;
511 struct sk_buff
*clone
;
514 type
= ptp_classify_raw(skb
);
515 if (type
== PTP_CLASS_NONE
)
518 if (!ds
->ops
->port_txtstamp
)
521 clone
= skb_clone_sk(skb
);
525 if (ds
->ops
->port_txtstamp(ds
, p
->dp
->index
, clone
, type
)) {
526 DSA_SKB_CB(skb
)->clone
= clone
;
533 netdev_tx_t
dsa_enqueue_skb(struct sk_buff
*skb
, struct net_device
*dev
)
535 /* SKB for netpoll still need to be mangled with the protocol-specific
536 * tag to be successfully transmitted
538 if (unlikely(netpoll_tx_running(dev
)))
539 return dsa_slave_netpoll_send_skb(dev
, skb
);
541 /* Queue the SKB for transmission on the parent interface, but
542 * do not modify its EtherType
544 skb
->dev
= dsa_slave_to_master(dev
);
549 EXPORT_SYMBOL_GPL(dsa_enqueue_skb
);
551 static int dsa_realloc_skb(struct sk_buff
*skb
, struct net_device
*dev
)
553 int needed_headroom
= dev
->needed_headroom
;
554 int needed_tailroom
= dev
->needed_tailroom
;
556 /* For tail taggers, we need to pad short frames ourselves, to ensure
557 * that the tail tag does not fail at its role of being at the end of
558 * the packet, once the master interface pads the frame. Account for
559 * that pad length here, and pad later.
561 if (unlikely(needed_tailroom
&& skb
->len
< ETH_ZLEN
))
562 needed_tailroom
+= ETH_ZLEN
- skb
->len
;
563 /* skb_headroom() returns unsigned int... */
564 needed_headroom
= max_t(int, needed_headroom
- skb_headroom(skb
), 0);
565 needed_tailroom
= max_t(int, needed_tailroom
- skb_tailroom(skb
), 0);
567 if (likely(!needed_headroom
&& !needed_tailroom
&& !skb_cloned(skb
)))
568 /* No reallocation needed, yay! */
571 return pskb_expand_head(skb
, needed_headroom
, needed_tailroom
,
575 static netdev_tx_t
dsa_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
577 struct dsa_slave_priv
*p
= netdev_priv(dev
);
578 struct sk_buff
*nskb
;
580 dev_sw_netstats_tx_add(dev
, 1, skb
->len
);
582 DSA_SKB_CB(skb
)->clone
= NULL
;
584 /* Identify PTP protocol packets, clone them, and pass them to the
587 dsa_skb_tx_timestamp(p
, skb
);
589 if (dsa_realloc_skb(skb
, dev
)) {
590 dev_kfree_skb_any(skb
);
594 /* needed_tailroom should still be 'warm' in the cache line from
595 * dsa_realloc_skb(), which has also ensured that padding is safe.
597 if (dev
->needed_tailroom
)
600 /* Transmit function may have to reallocate the original SKB,
601 * in which case it must have freed it. Only free it here on error.
603 nskb
= p
->xmit(skb
, dev
);
609 return dsa_enqueue_skb(nskb
, dev
);
612 /* ethtool operations *******************************************************/
614 static void dsa_slave_get_drvinfo(struct net_device
*dev
,
615 struct ethtool_drvinfo
*drvinfo
)
617 strlcpy(drvinfo
->driver
, "dsa", sizeof(drvinfo
->driver
));
618 strlcpy(drvinfo
->fw_version
, "N/A", sizeof(drvinfo
->fw_version
));
619 strlcpy(drvinfo
->bus_info
, "platform", sizeof(drvinfo
->bus_info
));
622 static int dsa_slave_get_regs_len(struct net_device
*dev
)
624 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
625 struct dsa_switch
*ds
= dp
->ds
;
627 if (ds
->ops
->get_regs_len
)
628 return ds
->ops
->get_regs_len(ds
, dp
->index
);
634 dsa_slave_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
636 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
637 struct dsa_switch
*ds
= dp
->ds
;
639 if (ds
->ops
->get_regs
)
640 ds
->ops
->get_regs(ds
, dp
->index
, regs
, _p
);
643 static int dsa_slave_nway_reset(struct net_device
*dev
)
645 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
647 return phylink_ethtool_nway_reset(dp
->pl
);
650 static int dsa_slave_get_eeprom_len(struct net_device
*dev
)
652 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
653 struct dsa_switch
*ds
= dp
->ds
;
655 if (ds
->cd
&& ds
->cd
->eeprom_len
)
656 return ds
->cd
->eeprom_len
;
658 if (ds
->ops
->get_eeprom_len
)
659 return ds
->ops
->get_eeprom_len(ds
);
664 static int dsa_slave_get_eeprom(struct net_device
*dev
,
665 struct ethtool_eeprom
*eeprom
, u8
*data
)
667 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
668 struct dsa_switch
*ds
= dp
->ds
;
670 if (ds
->ops
->get_eeprom
)
671 return ds
->ops
->get_eeprom(ds
, eeprom
, data
);
676 static int dsa_slave_set_eeprom(struct net_device
*dev
,
677 struct ethtool_eeprom
*eeprom
, u8
*data
)
679 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
680 struct dsa_switch
*ds
= dp
->ds
;
682 if (ds
->ops
->set_eeprom
)
683 return ds
->ops
->set_eeprom(ds
, eeprom
, data
);
688 static void dsa_slave_get_strings(struct net_device
*dev
,
689 uint32_t stringset
, uint8_t *data
)
691 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
692 struct dsa_switch
*ds
= dp
->ds
;
694 if (stringset
== ETH_SS_STATS
) {
695 int len
= ETH_GSTRING_LEN
;
697 strncpy(data
, "tx_packets", len
);
698 strncpy(data
+ len
, "tx_bytes", len
);
699 strncpy(data
+ 2 * len
, "rx_packets", len
);
700 strncpy(data
+ 3 * len
, "rx_bytes", len
);
701 if (ds
->ops
->get_strings
)
702 ds
->ops
->get_strings(ds
, dp
->index
, stringset
,
707 static void dsa_slave_get_ethtool_stats(struct net_device
*dev
,
708 struct ethtool_stats
*stats
,
711 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
712 struct dsa_switch
*ds
= dp
->ds
;
713 struct pcpu_sw_netstats
*s
;
717 for_each_possible_cpu(i
) {
718 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
720 s
= per_cpu_ptr(dev
->tstats
, i
);
722 start
= u64_stats_fetch_begin_irq(&s
->syncp
);
723 tx_packets
= s
->tx_packets
;
724 tx_bytes
= s
->tx_bytes
;
725 rx_packets
= s
->rx_packets
;
726 rx_bytes
= s
->rx_bytes
;
727 } while (u64_stats_fetch_retry_irq(&s
->syncp
, start
));
728 data
[0] += tx_packets
;
730 data
[2] += rx_packets
;
733 if (ds
->ops
->get_ethtool_stats
)
734 ds
->ops
->get_ethtool_stats(ds
, dp
->index
, data
+ 4);
737 static int dsa_slave_get_sset_count(struct net_device
*dev
, int sset
)
739 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
740 struct dsa_switch
*ds
= dp
->ds
;
742 if (sset
== ETH_SS_STATS
) {
746 if (ds
->ops
->get_sset_count
)
747 count
+= ds
->ops
->get_sset_count(ds
, dp
->index
, sset
);
755 static void dsa_slave_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*w
)
757 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
758 struct dsa_switch
*ds
= dp
->ds
;
760 phylink_ethtool_get_wol(dp
->pl
, w
);
762 if (ds
->ops
->get_wol
)
763 ds
->ops
->get_wol(ds
, dp
->index
, w
);
766 static int dsa_slave_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*w
)
768 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
769 struct dsa_switch
*ds
= dp
->ds
;
770 int ret
= -EOPNOTSUPP
;
772 phylink_ethtool_set_wol(dp
->pl
, w
);
774 if (ds
->ops
->set_wol
)
775 ret
= ds
->ops
->set_wol(ds
, dp
->index
, w
);
780 static int dsa_slave_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
782 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
783 struct dsa_switch
*ds
= dp
->ds
;
786 /* Port's PHY and MAC both need to be EEE capable */
787 if (!dev
->phydev
|| !dp
->pl
)
790 if (!ds
->ops
->set_mac_eee
)
793 ret
= ds
->ops
->set_mac_eee(ds
, dp
->index
, e
);
797 return phylink_ethtool_set_eee(dp
->pl
, e
);
800 static int dsa_slave_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
802 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
803 struct dsa_switch
*ds
= dp
->ds
;
806 /* Port's PHY and MAC both need to be EEE capable */
807 if (!dev
->phydev
|| !dp
->pl
)
810 if (!ds
->ops
->get_mac_eee
)
813 ret
= ds
->ops
->get_mac_eee(ds
, dp
->index
, e
);
817 return phylink_ethtool_get_eee(dp
->pl
, e
);
820 static int dsa_slave_get_link_ksettings(struct net_device
*dev
,
821 struct ethtool_link_ksettings
*cmd
)
823 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
825 return phylink_ethtool_ksettings_get(dp
->pl
, cmd
);
828 static int dsa_slave_set_link_ksettings(struct net_device
*dev
,
829 const struct ethtool_link_ksettings
*cmd
)
831 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
833 return phylink_ethtool_ksettings_set(dp
->pl
, cmd
);
836 static void dsa_slave_get_pauseparam(struct net_device
*dev
,
837 struct ethtool_pauseparam
*pause
)
839 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
841 phylink_ethtool_get_pauseparam(dp
->pl
, pause
);
844 static int dsa_slave_set_pauseparam(struct net_device
*dev
,
845 struct ethtool_pauseparam
*pause
)
847 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
849 return phylink_ethtool_set_pauseparam(dp
->pl
, pause
);
852 #ifdef CONFIG_NET_POLL_CONTROLLER
853 static int dsa_slave_netpoll_setup(struct net_device
*dev
,
854 struct netpoll_info
*ni
)
856 struct net_device
*master
= dsa_slave_to_master(dev
);
857 struct dsa_slave_priv
*p
= netdev_priv(dev
);
858 struct netpoll
*netpoll
;
861 netpoll
= kzalloc(sizeof(*netpoll
), GFP_KERNEL
);
865 err
= __netpoll_setup(netpoll
, master
);
871 p
->netpoll
= netpoll
;
876 static void dsa_slave_netpoll_cleanup(struct net_device
*dev
)
878 struct dsa_slave_priv
*p
= netdev_priv(dev
);
879 struct netpoll
*netpoll
= p
->netpoll
;
886 __netpoll_free(netpoll
);
889 static void dsa_slave_poll_controller(struct net_device
*dev
)
894 static int dsa_slave_get_phys_port_name(struct net_device
*dev
,
895 char *name
, size_t len
)
897 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
899 /* For non-legacy ports, devlink is used and it takes
900 * care of the name generation. This ndo implementation
901 * should be removed with legacy support.
906 if (snprintf(name
, len
, "p%d", dp
->index
) >= len
)
912 static struct dsa_mall_tc_entry
*
913 dsa_slave_mall_tc_entry_find(struct net_device
*dev
, unsigned long cookie
)
915 struct dsa_slave_priv
*p
= netdev_priv(dev
);
916 struct dsa_mall_tc_entry
*mall_tc_entry
;
918 list_for_each_entry(mall_tc_entry
, &p
->mall_tc_list
, list
)
919 if (mall_tc_entry
->cookie
== cookie
)
920 return mall_tc_entry
;
926 dsa_slave_add_cls_matchall_mirred(struct net_device
*dev
,
927 struct tc_cls_matchall_offload
*cls
,
930 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
931 struct dsa_slave_priv
*p
= netdev_priv(dev
);
932 struct dsa_mall_mirror_tc_entry
*mirror
;
933 struct dsa_mall_tc_entry
*mall_tc_entry
;
934 struct dsa_switch
*ds
= dp
->ds
;
935 struct flow_action_entry
*act
;
936 struct dsa_port
*to_dp
;
939 if (!ds
->ops
->port_mirror_add
)
942 if (!flow_action_basic_hw_stats_check(&cls
->rule
->action
,
946 act
= &cls
->rule
->action
.entries
[0];
951 if (!dsa_slave_dev_check(act
->dev
))
954 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
958 mall_tc_entry
->cookie
= cls
->cookie
;
959 mall_tc_entry
->type
= DSA_PORT_MALL_MIRROR
;
960 mirror
= &mall_tc_entry
->mirror
;
962 to_dp
= dsa_slave_to_port(act
->dev
);
964 mirror
->to_local_port
= to_dp
->index
;
965 mirror
->ingress
= ingress
;
967 err
= ds
->ops
->port_mirror_add(ds
, dp
->index
, mirror
, ingress
);
969 kfree(mall_tc_entry
);
973 list_add_tail(&mall_tc_entry
->list
, &p
->mall_tc_list
);
979 dsa_slave_add_cls_matchall_police(struct net_device
*dev
,
980 struct tc_cls_matchall_offload
*cls
,
983 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
984 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
985 struct dsa_slave_priv
*p
= netdev_priv(dev
);
986 struct dsa_mall_policer_tc_entry
*policer
;
987 struct dsa_mall_tc_entry
*mall_tc_entry
;
988 struct dsa_switch
*ds
= dp
->ds
;
989 struct flow_action_entry
*act
;
992 if (!ds
->ops
->port_policer_add
) {
993 NL_SET_ERR_MSG_MOD(extack
,
994 "Policing offload not implemented");
999 NL_SET_ERR_MSG_MOD(extack
,
1000 "Only supported on ingress qdisc");
1004 if (!flow_action_basic_hw_stats_check(&cls
->rule
->action
,
1005 cls
->common
.extack
))
1008 list_for_each_entry(mall_tc_entry
, &p
->mall_tc_list
, list
) {
1009 if (mall_tc_entry
->type
== DSA_PORT_MALL_POLICER
) {
1010 NL_SET_ERR_MSG_MOD(extack
,
1011 "Only one port policer allowed");
1016 act
= &cls
->rule
->action
.entries
[0];
1018 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1022 mall_tc_entry
->cookie
= cls
->cookie
;
1023 mall_tc_entry
->type
= DSA_PORT_MALL_POLICER
;
1024 policer
= &mall_tc_entry
->policer
;
1025 policer
->rate_bytes_per_sec
= act
->police
.rate_bytes_ps
;
1026 policer
->burst
= act
->police
.burst
;
1028 err
= ds
->ops
->port_policer_add(ds
, dp
->index
, policer
);
1030 kfree(mall_tc_entry
);
1034 list_add_tail(&mall_tc_entry
->list
, &p
->mall_tc_list
);
1039 static int dsa_slave_add_cls_matchall(struct net_device
*dev
,
1040 struct tc_cls_matchall_offload
*cls
,
1043 int err
= -EOPNOTSUPP
;
1045 if (cls
->common
.protocol
== htons(ETH_P_ALL
) &&
1046 flow_offload_has_one_action(&cls
->rule
->action
) &&
1047 cls
->rule
->action
.entries
[0].id
== FLOW_ACTION_MIRRED
)
1048 err
= dsa_slave_add_cls_matchall_mirred(dev
, cls
, ingress
);
1049 else if (flow_offload_has_one_action(&cls
->rule
->action
) &&
1050 cls
->rule
->action
.entries
[0].id
== FLOW_ACTION_POLICE
)
1051 err
= dsa_slave_add_cls_matchall_police(dev
, cls
, ingress
);
1056 static void dsa_slave_del_cls_matchall(struct net_device
*dev
,
1057 struct tc_cls_matchall_offload
*cls
)
1059 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1060 struct dsa_mall_tc_entry
*mall_tc_entry
;
1061 struct dsa_switch
*ds
= dp
->ds
;
1063 mall_tc_entry
= dsa_slave_mall_tc_entry_find(dev
, cls
->cookie
);
1067 list_del(&mall_tc_entry
->list
);
1069 switch (mall_tc_entry
->type
) {
1070 case DSA_PORT_MALL_MIRROR
:
1071 if (ds
->ops
->port_mirror_del
)
1072 ds
->ops
->port_mirror_del(ds
, dp
->index
,
1073 &mall_tc_entry
->mirror
);
1075 case DSA_PORT_MALL_POLICER
:
1076 if (ds
->ops
->port_policer_del
)
1077 ds
->ops
->port_policer_del(ds
, dp
->index
);
1083 kfree(mall_tc_entry
);
1086 static int dsa_slave_setup_tc_cls_matchall(struct net_device
*dev
,
1087 struct tc_cls_matchall_offload
*cls
,
1090 if (cls
->common
.chain_index
)
1093 switch (cls
->command
) {
1094 case TC_CLSMATCHALL_REPLACE
:
1095 return dsa_slave_add_cls_matchall(dev
, cls
, ingress
);
1096 case TC_CLSMATCHALL_DESTROY
:
1097 dsa_slave_del_cls_matchall(dev
, cls
);
1104 static int dsa_slave_add_cls_flower(struct net_device
*dev
,
1105 struct flow_cls_offload
*cls
,
1108 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1109 struct dsa_switch
*ds
= dp
->ds
;
1110 int port
= dp
->index
;
1112 if (!ds
->ops
->cls_flower_add
)
1115 return ds
->ops
->cls_flower_add(ds
, port
, cls
, ingress
);
1118 static int dsa_slave_del_cls_flower(struct net_device
*dev
,
1119 struct flow_cls_offload
*cls
,
1122 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1123 struct dsa_switch
*ds
= dp
->ds
;
1124 int port
= dp
->index
;
1126 if (!ds
->ops
->cls_flower_del
)
1129 return ds
->ops
->cls_flower_del(ds
, port
, cls
, ingress
);
1132 static int dsa_slave_stats_cls_flower(struct net_device
*dev
,
1133 struct flow_cls_offload
*cls
,
1136 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1137 struct dsa_switch
*ds
= dp
->ds
;
1138 int port
= dp
->index
;
1140 if (!ds
->ops
->cls_flower_stats
)
1143 return ds
->ops
->cls_flower_stats(ds
, port
, cls
, ingress
);
1146 static int dsa_slave_setup_tc_cls_flower(struct net_device
*dev
,
1147 struct flow_cls_offload
*cls
,
1150 switch (cls
->command
) {
1151 case FLOW_CLS_REPLACE
:
1152 return dsa_slave_add_cls_flower(dev
, cls
, ingress
);
1153 case FLOW_CLS_DESTROY
:
1154 return dsa_slave_del_cls_flower(dev
, cls
, ingress
);
1155 case FLOW_CLS_STATS
:
1156 return dsa_slave_stats_cls_flower(dev
, cls
, ingress
);
1162 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
1163 void *cb_priv
, bool ingress
)
1165 struct net_device
*dev
= cb_priv
;
1167 if (!tc_can_offload(dev
))
1171 case TC_SETUP_CLSMATCHALL
:
1172 return dsa_slave_setup_tc_cls_matchall(dev
, type_data
, ingress
);
1173 case TC_SETUP_CLSFLOWER
:
1174 return dsa_slave_setup_tc_cls_flower(dev
, type_data
, ingress
);
1180 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type
,
1181 void *type_data
, void *cb_priv
)
1183 return dsa_slave_setup_tc_block_cb(type
, type_data
, cb_priv
, true);
1186 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type
,
1187 void *type_data
, void *cb_priv
)
1189 return dsa_slave_setup_tc_block_cb(type
, type_data
, cb_priv
, false);
1192 static LIST_HEAD(dsa_slave_block_cb_list
);
1194 static int dsa_slave_setup_tc_block(struct net_device
*dev
,
1195 struct flow_block_offload
*f
)
1197 struct flow_block_cb
*block_cb
;
1198 flow_setup_cb_t
*cb
;
1200 if (f
->binder_type
== FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
1201 cb
= dsa_slave_setup_tc_block_cb_ig
;
1202 else if (f
->binder_type
== FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
)
1203 cb
= dsa_slave_setup_tc_block_cb_eg
;
1207 f
->driver_block_list
= &dsa_slave_block_cb_list
;
1209 switch (f
->command
) {
1210 case FLOW_BLOCK_BIND
:
1211 if (flow_block_cb_is_busy(cb
, dev
, &dsa_slave_block_cb_list
))
1214 block_cb
= flow_block_cb_alloc(cb
, dev
, dev
, NULL
);
1215 if (IS_ERR(block_cb
))
1216 return PTR_ERR(block_cb
);
1218 flow_block_cb_add(block_cb
, f
);
1219 list_add_tail(&block_cb
->driver_list
, &dsa_slave_block_cb_list
);
1221 case FLOW_BLOCK_UNBIND
:
1222 block_cb
= flow_block_cb_lookup(f
->block
, cb
, dev
);
1226 flow_block_cb_remove(block_cb
, f
);
1227 list_del(&block_cb
->driver_list
);
1234 static int dsa_slave_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1237 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1238 struct dsa_switch
*ds
= dp
->ds
;
1240 if (type
== TC_SETUP_BLOCK
)
1241 return dsa_slave_setup_tc_block(dev
, type_data
);
1243 if (!ds
->ops
->port_setup_tc
)
1246 return ds
->ops
->port_setup_tc(ds
, dp
->index
, type
, type_data
);
1249 static int dsa_slave_get_rxnfc(struct net_device
*dev
,
1250 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
1252 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1253 struct dsa_switch
*ds
= dp
->ds
;
1255 if (!ds
->ops
->get_rxnfc
)
1258 return ds
->ops
->get_rxnfc(ds
, dp
->index
, nfc
, rule_locs
);
1261 static int dsa_slave_set_rxnfc(struct net_device
*dev
,
1262 struct ethtool_rxnfc
*nfc
)
1264 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1265 struct dsa_switch
*ds
= dp
->ds
;
1267 if (!ds
->ops
->set_rxnfc
)
1270 return ds
->ops
->set_rxnfc(ds
, dp
->index
, nfc
);
1273 static int dsa_slave_get_ts_info(struct net_device
*dev
,
1274 struct ethtool_ts_info
*ts
)
1276 struct dsa_slave_priv
*p
= netdev_priv(dev
);
1277 struct dsa_switch
*ds
= p
->dp
->ds
;
1279 if (!ds
->ops
->get_ts_info
)
1282 return ds
->ops
->get_ts_info(ds
, p
->dp
->index
, ts
);
1285 static int dsa_slave_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
,
1288 struct net_device
*master
= dsa_slave_to_master(dev
);
1289 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1290 struct switchdev_obj_port_vlan vlan
= {
1291 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
1294 /* This API only allows programming tagged, non-PVID VIDs */
1297 struct switchdev_trans trans
;
1301 trans
.ph_prepare
= true;
1302 ret
= dsa_port_vlan_add(dp
, &vlan
, &trans
);
1306 trans
.ph_prepare
= false;
1307 ret
= dsa_port_vlan_add(dp
, &vlan
, &trans
);
1311 /* And CPU port... */
1312 trans
.ph_prepare
= true;
1313 ret
= dsa_port_vlan_add(dp
->cpu_dp
, &vlan
, &trans
);
1317 trans
.ph_prepare
= false;
1318 ret
= dsa_port_vlan_add(dp
->cpu_dp
, &vlan
, &trans
);
1322 return vlan_vid_add(master
, proto
, vid
);
1325 static int dsa_slave_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
,
1328 struct net_device
*master
= dsa_slave_to_master(dev
);
1329 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1330 struct switchdev_obj_port_vlan vlan
= {
1333 /* This API only allows programming tagged, non-PVID VIDs */
1338 /* Do not deprogram the CPU port as it may be shared with other user
1339 * ports which can be members of this VLAN as well.
1341 err
= dsa_port_vlan_del(dp
, &vlan
);
1345 vlan_vid_del(master
, proto
, vid
);
1350 struct dsa_hw_port
{
1351 struct list_head list
;
1352 struct net_device
*dev
;
1356 static int dsa_hw_port_list_set_mtu(struct list_head
*hw_port_list
, int mtu
)
1358 const struct dsa_hw_port
*p
;
1361 list_for_each_entry(p
, hw_port_list
, list
) {
1362 if (p
->dev
->mtu
== mtu
)
1365 err
= dev_set_mtu(p
->dev
, mtu
);
1373 list_for_each_entry_continue_reverse(p
, hw_port_list
, list
) {
1374 if (p
->dev
->mtu
== p
->old_mtu
)
1377 if (dev_set_mtu(p
->dev
, p
->old_mtu
))
1378 netdev_err(p
->dev
, "Failed to restore MTU\n");
1384 static void dsa_hw_port_list_free(struct list_head
*hw_port_list
)
1386 struct dsa_hw_port
*p
, *n
;
1388 list_for_each_entry_safe(p
, n
, hw_port_list
, list
)
1392 /* Make the hardware datapath to/from @dev limited to a common MTU */
1393 static void dsa_bridge_mtu_normalization(struct dsa_port
*dp
)
1395 struct list_head hw_port_list
;
1396 struct dsa_switch_tree
*dst
;
1397 int min_mtu
= ETH_MAX_MTU
;
1398 struct dsa_port
*other_dp
;
1401 if (!dp
->ds
->mtu_enforcement_ingress
)
1404 if (!dp
->bridge_dev
)
1407 INIT_LIST_HEAD(&hw_port_list
);
1409 /* Populate the list of ports that are part of the same bridge
1410 * as the newly added/modified port
1412 list_for_each_entry(dst
, &dsa_tree_list
, list
) {
1413 list_for_each_entry(other_dp
, &dst
->ports
, list
) {
1414 struct dsa_hw_port
*hw_port
;
1415 struct net_device
*slave
;
1417 if (other_dp
->type
!= DSA_PORT_TYPE_USER
)
1420 if (other_dp
->bridge_dev
!= dp
->bridge_dev
)
1423 if (!other_dp
->ds
->mtu_enforcement_ingress
)
1426 slave
= other_dp
->slave
;
1428 if (min_mtu
> slave
->mtu
)
1429 min_mtu
= slave
->mtu
;
1431 hw_port
= kzalloc(sizeof(*hw_port
), GFP_KERNEL
);
1435 hw_port
->dev
= slave
;
1436 hw_port
->old_mtu
= slave
->mtu
;
1438 list_add(&hw_port
->list
, &hw_port_list
);
1442 /* Attempt to configure the entire hardware bridge to the newly added
1443 * interface's MTU first, regardless of whether the intention of the
1444 * user was to raise or lower it.
1446 err
= dsa_hw_port_list_set_mtu(&hw_port_list
, dp
->slave
->mtu
);
1450 /* Clearly that didn't work out so well, so just set the minimum MTU on
1451 * all hardware bridge ports now. If this fails too, then all ports will
1452 * still have their old MTU rolled back anyway.
1454 dsa_hw_port_list_set_mtu(&hw_port_list
, min_mtu
);
1457 dsa_hw_port_list_free(&hw_port_list
);
1460 static int dsa_slave_change_mtu(struct net_device
*dev
, int new_mtu
)
1462 struct net_device
*master
= dsa_slave_to_master(dev
);
1463 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1464 struct dsa_slave_priv
*p
= netdev_priv(dev
);
1465 struct dsa_switch
*ds
= p
->dp
->ds
;
1466 struct dsa_port
*cpu_dp
;
1467 int port
= p
->dp
->index
;
1468 int largest_mtu
= 0;
1475 if (!ds
->ops
->port_change_mtu
)
1478 for (i
= 0; i
< ds
->num_ports
; i
++) {
1481 if (!dsa_is_user_port(ds
, i
))
1484 /* During probe, this function will be called for each slave
1485 * device, while not all of them have been allocated. That's
1486 * ok, it doesn't change what the maximum is, so ignore it.
1488 if (!dsa_to_port(ds
, i
)->slave
)
1491 /* Pretend that we already applied the setting, which we
1492 * actually haven't (still haven't done all integrity checks)
1495 slave_mtu
= new_mtu
;
1497 slave_mtu
= dsa_to_port(ds
, i
)->slave
->mtu
;
1499 if (largest_mtu
< slave_mtu
)
1500 largest_mtu
= slave_mtu
;
1503 cpu_dp
= dsa_to_port(ds
, port
)->cpu_dp
;
1505 mtu_limit
= min_t(int, master
->max_mtu
, dev
->max_mtu
);
1506 old_master_mtu
= master
->mtu
;
1507 new_master_mtu
= largest_mtu
+ cpu_dp
->tag_ops
->overhead
;
1508 if (new_master_mtu
> mtu_limit
)
1511 /* If the master MTU isn't over limit, there's no need to check the CPU
1512 * MTU, since that surely isn't either.
1514 cpu_mtu
= largest_mtu
;
1516 /* Start applying stuff */
1517 if (new_master_mtu
!= old_master_mtu
) {
1518 err
= dev_set_mtu(master
, new_master_mtu
);
1520 goto out_master_failed
;
1522 /* We only need to propagate the MTU of the CPU port to
1523 * upstream switches.
1525 err
= dsa_port_mtu_change(cpu_dp
, cpu_mtu
, true);
1527 goto out_cpu_failed
;
1530 err
= dsa_port_mtu_change(dp
, new_mtu
, false);
1532 goto out_port_failed
;
1536 dsa_bridge_mtu_normalization(dp
);
1541 if (new_master_mtu
!= old_master_mtu
)
1542 dsa_port_mtu_change(cpu_dp
, old_master_mtu
-
1543 cpu_dp
->tag_ops
->overhead
,
1546 if (new_master_mtu
!= old_master_mtu
)
1547 dev_set_mtu(master
, old_master_mtu
);
1552 static const struct ethtool_ops dsa_slave_ethtool_ops
= {
1553 .get_drvinfo
= dsa_slave_get_drvinfo
,
1554 .get_regs_len
= dsa_slave_get_regs_len
,
1555 .get_regs
= dsa_slave_get_regs
,
1556 .nway_reset
= dsa_slave_nway_reset
,
1557 .get_link
= ethtool_op_get_link
,
1558 .get_eeprom_len
= dsa_slave_get_eeprom_len
,
1559 .get_eeprom
= dsa_slave_get_eeprom
,
1560 .set_eeprom
= dsa_slave_set_eeprom
,
1561 .get_strings
= dsa_slave_get_strings
,
1562 .get_ethtool_stats
= dsa_slave_get_ethtool_stats
,
1563 .get_sset_count
= dsa_slave_get_sset_count
,
1564 .set_wol
= dsa_slave_set_wol
,
1565 .get_wol
= dsa_slave_get_wol
,
1566 .set_eee
= dsa_slave_set_eee
,
1567 .get_eee
= dsa_slave_get_eee
,
1568 .get_link_ksettings
= dsa_slave_get_link_ksettings
,
1569 .set_link_ksettings
= dsa_slave_set_link_ksettings
,
1570 .get_pauseparam
= dsa_slave_get_pauseparam
,
1571 .set_pauseparam
= dsa_slave_set_pauseparam
,
1572 .get_rxnfc
= dsa_slave_get_rxnfc
,
1573 .set_rxnfc
= dsa_slave_set_rxnfc
,
1574 .get_ts_info
= dsa_slave_get_ts_info
,
1577 /* legacy way, bypassing the bridge *****************************************/
1578 int dsa_legacy_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1579 struct net_device
*dev
,
1580 const unsigned char *addr
, u16 vid
,
1582 struct netlink_ext_ack
*extack
)
1584 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1586 return dsa_port_fdb_add(dp
, addr
, vid
);
1589 int dsa_legacy_fdb_del(struct ndmsg
*ndm
, struct nlattr
*tb
[],
1590 struct net_device
*dev
,
1591 const unsigned char *addr
, u16 vid
)
1593 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1595 return dsa_port_fdb_del(dp
, addr
, vid
);
1598 static struct devlink_port
*dsa_slave_get_devlink_port(struct net_device
*dev
)
1600 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1602 return dp
->ds
->devlink
? &dp
->devlink_port
: NULL
;
1605 static const struct net_device_ops dsa_slave_netdev_ops
= {
1606 .ndo_open
= dsa_slave_open
,
1607 .ndo_stop
= dsa_slave_close
,
1608 .ndo_start_xmit
= dsa_slave_xmit
,
1609 .ndo_change_rx_flags
= dsa_slave_change_rx_flags
,
1610 .ndo_set_rx_mode
= dsa_slave_set_rx_mode
,
1611 .ndo_set_mac_address
= dsa_slave_set_mac_address
,
1612 .ndo_fdb_add
= dsa_legacy_fdb_add
,
1613 .ndo_fdb_del
= dsa_legacy_fdb_del
,
1614 .ndo_fdb_dump
= dsa_slave_fdb_dump
,
1615 .ndo_do_ioctl
= dsa_slave_ioctl
,
1616 .ndo_get_iflink
= dsa_slave_get_iflink
,
1617 #ifdef CONFIG_NET_POLL_CONTROLLER
1618 .ndo_netpoll_setup
= dsa_slave_netpoll_setup
,
1619 .ndo_netpoll_cleanup
= dsa_slave_netpoll_cleanup
,
1620 .ndo_poll_controller
= dsa_slave_poll_controller
,
1622 .ndo_get_phys_port_name
= dsa_slave_get_phys_port_name
,
1623 .ndo_setup_tc
= dsa_slave_setup_tc
,
1624 .ndo_get_stats64
= dev_get_tstats64
,
1625 .ndo_get_port_parent_id
= dsa_slave_get_port_parent_id
,
1626 .ndo_vlan_rx_add_vid
= dsa_slave_vlan_rx_add_vid
,
1627 .ndo_vlan_rx_kill_vid
= dsa_slave_vlan_rx_kill_vid
,
1628 .ndo_get_devlink_port
= dsa_slave_get_devlink_port
,
1629 .ndo_change_mtu
= dsa_slave_change_mtu
,
1632 static struct device_type dsa_type
= {
1636 void dsa_port_phylink_mac_change(struct dsa_switch
*ds
, int port
, bool up
)
1638 const struct dsa_port
*dp
= dsa_to_port(ds
, port
);
1641 phylink_mac_change(dp
->pl
, up
);
1643 EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change
);
1645 static void dsa_slave_phylink_fixed_state(struct phylink_config
*config
,
1646 struct phylink_link_state
*state
)
1648 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
1649 struct dsa_switch
*ds
= dp
->ds
;
1651 /* No need to check that this operation is valid, the callback would
1652 * not be called if it was not.
1654 ds
->ops
->phylink_fixed_state(ds
, dp
->index
, state
);
1657 /* slave device setup *******************************************************/
1658 static int dsa_slave_phy_connect(struct net_device
*slave_dev
, int addr
)
1660 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1661 struct dsa_switch
*ds
= dp
->ds
;
1663 slave_dev
->phydev
= mdiobus_get_phy(ds
->slave_mii_bus
, addr
);
1664 if (!slave_dev
->phydev
) {
1665 netdev_err(slave_dev
, "no phy at %d\n", addr
);
1669 return phylink_connect_phy(dp
->pl
, slave_dev
->phydev
);
1672 static int dsa_slave_phy_setup(struct net_device
*slave_dev
)
1674 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1675 struct device_node
*port_dn
= dp
->dn
;
1676 struct dsa_switch
*ds
= dp
->ds
;
1677 phy_interface_t mode
;
1681 ret
= of_get_phy_mode(port_dn
, &mode
);
1683 mode
= PHY_INTERFACE_MODE_NA
;
1685 dp
->pl_config
.dev
= &slave_dev
->dev
;
1686 dp
->pl_config
.type
= PHYLINK_NETDEV
;
1688 /* The get_fixed_state callback takes precedence over polling the
1689 * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set
1690 * this if the switch provides such a callback.
1692 if (ds
->ops
->phylink_fixed_state
) {
1693 dp
->pl_config
.get_fixed_state
= dsa_slave_phylink_fixed_state
;
1694 dp
->pl_config
.poll_fixed_state
= true;
1697 dp
->pl
= phylink_create(&dp
->pl_config
, of_fwnode_handle(port_dn
), mode
,
1698 &dsa_port_phylink_mac_ops
);
1699 if (IS_ERR(dp
->pl
)) {
1700 netdev_err(slave_dev
,
1701 "error creating PHYLINK: %ld\n", PTR_ERR(dp
->pl
));
1702 return PTR_ERR(dp
->pl
);
1705 if (ds
->ops
->get_phy_flags
)
1706 phy_flags
= ds
->ops
->get_phy_flags(ds
, dp
->index
);
1708 ret
= phylink_of_phy_connect(dp
->pl
, port_dn
, phy_flags
);
1709 if (ret
== -ENODEV
&& ds
->slave_mii_bus
) {
1710 /* We could not connect to a designated PHY or SFP, so try to
1711 * use the switch internal MDIO bus instead
1713 ret
= dsa_slave_phy_connect(slave_dev
, dp
->index
);
1715 netdev_err(slave_dev
,
1716 "failed to connect to port %d: %d\n",
1718 phylink_destroy(dp
->pl
);
1726 static struct lock_class_key dsa_slave_netdev_xmit_lock_key
;
1727 static void dsa_slave_set_lockdep_class_one(struct net_device
*dev
,
1728 struct netdev_queue
*txq
,
1731 lockdep_set_class(&txq
->_xmit_lock
,
1732 &dsa_slave_netdev_xmit_lock_key
);
1735 int dsa_slave_suspend(struct net_device
*slave_dev
)
1737 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1739 if (!netif_running(slave_dev
))
1742 netif_device_detach(slave_dev
);
1745 phylink_stop(dp
->pl
);
1751 int dsa_slave_resume(struct net_device
*slave_dev
)
1753 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1755 if (!netif_running(slave_dev
))
1758 netif_device_attach(slave_dev
);
1761 phylink_start(dp
->pl
);
1767 static void dsa_slave_notify(struct net_device
*dev
, unsigned long val
)
1769 struct net_device
*master
= dsa_slave_to_master(dev
);
1770 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1771 struct dsa_notifier_register_info rinfo
= {
1772 .switch_number
= dp
->ds
->index
,
1773 .port_number
= dp
->index
,
1778 call_dsa_notifiers(val
, dev
, &rinfo
.info
);
1781 int dsa_slave_create(struct dsa_port
*port
)
1783 const struct dsa_port
*cpu_dp
= port
->cpu_dp
;
1784 struct net_device
*master
= cpu_dp
->master
;
1785 struct dsa_switch
*ds
= port
->ds
;
1786 const char *name
= port
->name
;
1787 struct net_device
*slave_dev
;
1788 struct dsa_slave_priv
*p
;
1791 if (!ds
->num_tx_queues
)
1792 ds
->num_tx_queues
= 1;
1794 slave_dev
= alloc_netdev_mqs(sizeof(struct dsa_slave_priv
), name
,
1795 NET_NAME_UNKNOWN
, ether_setup
,
1796 ds
->num_tx_queues
, 1);
1797 if (slave_dev
== NULL
)
1800 slave_dev
->features
= master
->vlan_features
| NETIF_F_HW_TC
;
1801 if (ds
->ops
->port_vlan_add
&& ds
->ops
->port_vlan_del
)
1802 slave_dev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1803 slave_dev
->hw_features
|= NETIF_F_HW_TC
;
1804 slave_dev
->features
|= NETIF_F_LLTX
;
1805 slave_dev
->ethtool_ops
= &dsa_slave_ethtool_ops
;
1806 if (!IS_ERR_OR_NULL(port
->mac
))
1807 ether_addr_copy(slave_dev
->dev_addr
, port
->mac
);
1809 eth_hw_addr_inherit(slave_dev
, master
);
1810 slave_dev
->priv_flags
|= IFF_NO_QUEUE
;
1811 slave_dev
->netdev_ops
= &dsa_slave_netdev_ops
;
1812 if (ds
->ops
->port_max_mtu
)
1813 slave_dev
->max_mtu
= ds
->ops
->port_max_mtu(ds
, port
->index
);
1814 if (cpu_dp
->tag_ops
->tail_tag
)
1815 slave_dev
->needed_tailroom
= cpu_dp
->tag_ops
->overhead
;
1817 slave_dev
->needed_headroom
= cpu_dp
->tag_ops
->overhead
;
1818 /* Try to save one extra realloc later in the TX path (in the master)
1819 * by also inheriting the master's needed headroom and tailroom.
1820 * The 8021q driver also does this.
1822 slave_dev
->needed_headroom
+= master
->needed_headroom
;
1823 slave_dev
->needed_tailroom
+= master
->needed_tailroom
;
1824 SET_NETDEV_DEVTYPE(slave_dev
, &dsa_type
);
1826 netdev_for_each_tx_queue(slave_dev
, dsa_slave_set_lockdep_class_one
,
1829 SET_NETDEV_DEV(slave_dev
, port
->ds
->dev
);
1830 slave_dev
->dev
.of_node
= port
->dn
;
1831 slave_dev
->vlan_features
= master
->vlan_features
;
1833 p
= netdev_priv(slave_dev
);
1834 slave_dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1835 if (!slave_dev
->tstats
) {
1836 free_netdev(slave_dev
);
1840 ret
= gro_cells_init(&p
->gcells
, slave_dev
);
1845 INIT_LIST_HEAD(&p
->mall_tc_list
);
1846 p
->xmit
= cpu_dp
->tag_ops
->xmit
;
1847 port
->slave
= slave_dev
;
1850 ret
= dsa_slave_change_mtu(slave_dev
, ETH_DATA_LEN
);
1852 if (ret
&& ret
!= -EOPNOTSUPP
)
1853 dev_warn(ds
->dev
, "nonfatal error %d setting MTU to %d on port %d\n",
1854 ret
, ETH_DATA_LEN
, port
->index
);
1856 netif_carrier_off(slave_dev
);
1858 ret
= dsa_slave_phy_setup(slave_dev
);
1860 netdev_err(slave_dev
,
1861 "error %d setting up PHY for tree %d, switch %d, port %d\n",
1862 ret
, ds
->dst
->index
, ds
->index
, port
->index
);
1866 dsa_slave_notify(slave_dev
, DSA_PORT_REGISTER
);
1870 ret
= register_netdevice(slave_dev
);
1872 netdev_err(master
, "error %d registering interface %s\n",
1873 ret
, slave_dev
->name
);
1878 ret
= netdev_upper_dev_link(master
, slave_dev
, NULL
);
1883 goto out_unregister
;
1888 unregister_netdev(slave_dev
);
1891 phylink_disconnect_phy(p
->dp
->pl
);
1893 phylink_destroy(p
->dp
->pl
);
1895 gro_cells_destroy(&p
->gcells
);
1897 free_percpu(slave_dev
->tstats
);
1898 free_netdev(slave_dev
);
1903 void dsa_slave_destroy(struct net_device
*slave_dev
)
1905 struct net_device
*master
= dsa_slave_to_master(slave_dev
);
1906 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1907 struct dsa_slave_priv
*p
= netdev_priv(slave_dev
);
1909 netif_carrier_off(slave_dev
);
1911 netdev_upper_dev_unlink(master
, slave_dev
);
1912 unregister_netdevice(slave_dev
);
1913 phylink_disconnect_phy(dp
->pl
);
1916 dsa_slave_notify(slave_dev
, DSA_PORT_UNREGISTER
);
1917 phylink_destroy(dp
->pl
);
1918 gro_cells_destroy(&p
->gcells
);
1919 free_percpu(slave_dev
->tstats
);
1920 free_netdev(slave_dev
);
1923 bool dsa_slave_dev_check(const struct net_device
*dev
)
1925 return dev
->netdev_ops
== &dsa_slave_netdev_ops
;
1928 static int dsa_slave_changeupper(struct net_device
*dev
,
1929 struct netdev_notifier_changeupper_info
*info
)
1931 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1932 int err
= NOTIFY_DONE
;
1934 if (netif_is_bridge_master(info
->upper_dev
)) {
1935 if (info
->linking
) {
1936 err
= dsa_port_bridge_join(dp
, info
->upper_dev
);
1938 dsa_bridge_mtu_normalization(dp
);
1939 err
= notifier_from_errno(err
);
1941 dsa_port_bridge_leave(dp
, info
->upper_dev
);
1950 dsa_prevent_bridging_8021q_upper(struct net_device
*dev
,
1951 struct netdev_notifier_changeupper_info
*info
)
1953 struct netlink_ext_ack
*ext_ack
;
1954 struct net_device
*slave
;
1955 struct dsa_port
*dp
;
1957 ext_ack
= netdev_notifier_info_to_extack(&info
->info
);
1959 if (!is_vlan_dev(dev
))
1962 slave
= vlan_dev_real_dev(dev
);
1963 if (!dsa_slave_dev_check(slave
))
1966 dp
= dsa_slave_to_port(slave
);
1967 if (!dp
->bridge_dev
)
1970 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1971 if (br_vlan_enabled(dp
->bridge_dev
) &&
1972 netif_is_bridge_master(info
->upper_dev
) && info
->linking
) {
1973 NL_SET_ERR_MSG_MOD(ext_ack
,
1974 "Cannot enslave VLAN device into VLAN aware bridge");
1975 return notifier_from_errno(-EINVAL
);
1982 dsa_slave_check_8021q_upper(struct net_device
*dev
,
1983 struct netdev_notifier_changeupper_info
*info
)
1985 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1986 struct net_device
*br
= dp
->bridge_dev
;
1987 struct bridge_vlan_info br_info
;
1988 struct netlink_ext_ack
*extack
;
1989 int err
= NOTIFY_DONE
;
1992 if (!br
|| !br_vlan_enabled(br
))
1995 extack
= netdev_notifier_info_to_extack(&info
->info
);
1996 vid
= vlan_dev_vlan_id(info
->upper_dev
);
1998 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1999 * device, respectively the VID is not found, returning
2000 * 0 means success, which is a failure for us here.
2002 err
= br_vlan_get_info(br
, vid
, &br_info
);
2004 NL_SET_ERR_MSG_MOD(extack
,
2005 "This VLAN is already configured by the bridge");
2006 return notifier_from_errno(-EBUSY
);
2012 static int dsa_slave_netdevice_event(struct notifier_block
*nb
,
2013 unsigned long event
, void *ptr
)
2015 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2018 case NETDEV_PRECHANGEUPPER
: {
2019 struct netdev_notifier_changeupper_info
*info
= ptr
;
2020 struct dsa_switch
*ds
;
2021 struct dsa_port
*dp
;
2024 if (!dsa_slave_dev_check(dev
))
2025 return dsa_prevent_bridging_8021q_upper(dev
, ptr
);
2027 dp
= dsa_slave_to_port(dev
);
2030 if (ds
->ops
->port_prechangeupper
) {
2031 err
= ds
->ops
->port_prechangeupper(ds
, dp
->index
, info
);
2033 return notifier_from_errno(err
);
2036 if (is_vlan_dev(info
->upper_dev
))
2037 return dsa_slave_check_8021q_upper(dev
, ptr
);
2040 case NETDEV_CHANGEUPPER
:
2041 if (!dsa_slave_dev_check(dev
))
2044 return dsa_slave_changeupper(dev
, ptr
);
2050 struct dsa_switchdev_event_work
{
2051 struct work_struct work
;
2052 struct switchdev_notifier_fdb_info fdb_info
;
2053 struct net_device
*dev
;
2054 unsigned long event
;
2057 static void dsa_slave_switchdev_event_work(struct work_struct
*work
)
2059 struct dsa_switchdev_event_work
*switchdev_work
=
2060 container_of(work
, struct dsa_switchdev_event_work
, work
);
2061 struct net_device
*dev
= switchdev_work
->dev
;
2062 struct switchdev_notifier_fdb_info
*fdb_info
;
2063 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
2067 switch (switchdev_work
->event
) {
2068 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
2069 fdb_info
= &switchdev_work
->fdb_info
;
2070 if (!fdb_info
->added_by_user
)
2073 err
= dsa_port_fdb_add(dp
, fdb_info
->addr
, fdb_info
->vid
);
2075 netdev_dbg(dev
, "fdb add failed err=%d\n", err
);
2078 fdb_info
->offloaded
= true;
2079 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED
, dev
,
2080 &fdb_info
->info
, NULL
);
2083 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
2084 fdb_info
= &switchdev_work
->fdb_info
;
2085 if (!fdb_info
->added_by_user
)
2088 err
= dsa_port_fdb_del(dp
, fdb_info
->addr
, fdb_info
->vid
);
2090 netdev_dbg(dev
, "fdb del failed err=%d\n", err
);
2097 kfree(switchdev_work
->fdb_info
.addr
);
2098 kfree(switchdev_work
);
2103 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work
*
2105 const struct switchdev_notifier_fdb_info
*
2108 memcpy(&switchdev_work
->fdb_info
, fdb_info
,
2109 sizeof(switchdev_work
->fdb_info
));
2110 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
2111 if (!switchdev_work
->fdb_info
.addr
)
2113 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
2118 /* Called under rcu_read_lock() */
2119 static int dsa_slave_switchdev_event(struct notifier_block
*unused
,
2120 unsigned long event
, void *ptr
)
2122 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
2123 struct dsa_switchdev_event_work
*switchdev_work
;
2126 if (event
== SWITCHDEV_PORT_ATTR_SET
) {
2127 err
= switchdev_handle_port_attr_set(dev
, ptr
,
2128 dsa_slave_dev_check
,
2129 dsa_slave_port_attr_set
);
2130 return notifier_from_errno(err
);
2133 if (!dsa_slave_dev_check(dev
))
2136 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
2137 if (!switchdev_work
)
2140 INIT_WORK(&switchdev_work
->work
,
2141 dsa_slave_switchdev_event_work
);
2142 switchdev_work
->dev
= dev
;
2143 switchdev_work
->event
= event
;
2146 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
2147 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
2148 if (dsa_slave_switchdev_fdb_work_init(switchdev_work
, ptr
))
2149 goto err_fdb_work_init
;
2153 kfree(switchdev_work
);
2157 dsa_schedule_work(&switchdev_work
->work
);
2161 kfree(switchdev_work
);
2165 static int dsa_slave_switchdev_blocking_event(struct notifier_block
*unused
,
2166 unsigned long event
, void *ptr
)
2168 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
2172 case SWITCHDEV_PORT_OBJ_ADD
:
2173 err
= switchdev_handle_port_obj_add(dev
, ptr
,
2174 dsa_slave_dev_check
,
2175 dsa_slave_port_obj_add
);
2176 return notifier_from_errno(err
);
2177 case SWITCHDEV_PORT_OBJ_DEL
:
2178 err
= switchdev_handle_port_obj_del(dev
, ptr
,
2179 dsa_slave_dev_check
,
2180 dsa_slave_port_obj_del
);
2181 return notifier_from_errno(err
);
2182 case SWITCHDEV_PORT_ATTR_SET
:
2183 err
= switchdev_handle_port_attr_set(dev
, ptr
,
2184 dsa_slave_dev_check
,
2185 dsa_slave_port_attr_set
);
2186 return notifier_from_errno(err
);
2192 static struct notifier_block dsa_slave_nb __read_mostly
= {
2193 .notifier_call
= dsa_slave_netdevice_event
,
2196 static struct notifier_block dsa_slave_switchdev_notifier
= {
2197 .notifier_call
= dsa_slave_switchdev_event
,
2200 static struct notifier_block dsa_slave_switchdev_blocking_notifier
= {
2201 .notifier_call
= dsa_slave_switchdev_blocking_event
,
2204 int dsa_slave_register_notifier(void)
2206 struct notifier_block
*nb
;
2209 err
= register_netdevice_notifier(&dsa_slave_nb
);
2213 err
= register_switchdev_notifier(&dsa_slave_switchdev_notifier
);
2215 goto err_switchdev_nb
;
2217 nb
= &dsa_slave_switchdev_blocking_notifier
;
2218 err
= register_switchdev_blocking_notifier(nb
);
2220 goto err_switchdev_blocking_nb
;
2224 err_switchdev_blocking_nb
:
2225 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier
);
2227 unregister_netdevice_notifier(&dsa_slave_nb
);
2231 void dsa_slave_unregister_notifier(void)
2233 struct notifier_block
*nb
;
2236 nb
= &dsa_slave_switchdev_blocking_notifier
;
2237 err
= unregister_switchdev_blocking_notifier(nb
);
2239 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err
);
2241 err
= unregister_switchdev_notifier(&dsa_slave_switchdev_notifier
);
2243 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err
);
2245 err
= unregister_netdevice_notifier(&dsa_slave_nb
);
2247 pr_err("DSA: failed to unregister slave notifier (%d)\n", err
);