2 * net/dsa/slave.c - Slave device handling
3 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/list.h>
12 #include <linux/etherdevice.h>
13 #include <linux/netdevice.h>
14 #include <linux/phy.h>
15 #include <linux/phy_fixed.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/mdio.h>
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/tc_act/tc_mirred.h>
22 #include <linux/if_bridge.h>
23 #include <linux/netpoll.h>
27 static bool dsa_slave_dev_check(struct net_device
*dev
);
29 /* slave mii_bus handling ***************************************************/
30 static int dsa_slave_phy_read(struct mii_bus
*bus
, int addr
, int reg
)
32 struct dsa_switch
*ds
= bus
->priv
;
34 if (ds
->phys_mii_mask
& (1 << addr
))
35 return ds
->ops
->phy_read(ds
, addr
, reg
);
40 static int dsa_slave_phy_write(struct mii_bus
*bus
, int addr
, int reg
, u16 val
)
42 struct dsa_switch
*ds
= bus
->priv
;
44 if (ds
->phys_mii_mask
& (1 << addr
))
45 return ds
->ops
->phy_write(ds
, addr
, reg
, val
);
50 void dsa_slave_mii_bus_init(struct dsa_switch
*ds
)
52 ds
->slave_mii_bus
->priv
= (void *)ds
;
53 ds
->slave_mii_bus
->name
= "dsa slave smi";
54 ds
->slave_mii_bus
->read
= dsa_slave_phy_read
;
55 ds
->slave_mii_bus
->write
= dsa_slave_phy_write
;
56 snprintf(ds
->slave_mii_bus
->id
, MII_BUS_ID_SIZE
, "dsa-%d.%d",
57 ds
->dst
->index
, ds
->index
);
58 ds
->slave_mii_bus
->parent
= ds
->dev
;
59 ds
->slave_mii_bus
->phy_mask
= ~ds
->phys_mii_mask
;
63 /* slave device handling ****************************************************/
64 static int dsa_slave_get_iflink(const struct net_device
*dev
)
66 return dsa_slave_to_master(dev
)->ifindex
;
69 static int dsa_slave_open(struct net_device
*dev
)
71 struct net_device
*master
= dsa_slave_to_master(dev
);
72 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
75 if (!(master
->flags
& IFF_UP
))
78 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
)) {
79 err
= dev_uc_add(master
, dev
->dev_addr
);
84 if (dev
->flags
& IFF_ALLMULTI
) {
85 err
= dev_set_allmulti(master
, 1);
89 if (dev
->flags
& IFF_PROMISC
) {
90 err
= dev_set_promiscuity(master
, 1);
95 err
= dsa_port_enable(dp
, dev
->phydev
);
100 phy_start(dev
->phydev
);
105 if (dev
->flags
& IFF_PROMISC
)
106 dev_set_promiscuity(master
, -1);
108 if (dev
->flags
& IFF_ALLMULTI
)
109 dev_set_allmulti(master
, -1);
111 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
112 dev_uc_del(master
, dev
->dev_addr
);
117 static int dsa_slave_close(struct net_device
*dev
)
119 struct net_device
*master
= dsa_slave_to_master(dev
);
120 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
123 phy_stop(dev
->phydev
);
125 dsa_port_disable(dp
, dev
->phydev
);
127 dev_mc_unsync(master
, dev
);
128 dev_uc_unsync(master
, dev
);
129 if (dev
->flags
& IFF_ALLMULTI
)
130 dev_set_allmulti(master
, -1);
131 if (dev
->flags
& IFF_PROMISC
)
132 dev_set_promiscuity(master
, -1);
134 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
135 dev_uc_del(master
, dev
->dev_addr
);
140 static void dsa_slave_change_rx_flags(struct net_device
*dev
, int change
)
142 struct net_device
*master
= dsa_slave_to_master(dev
);
144 if (change
& IFF_ALLMULTI
)
145 dev_set_allmulti(master
, dev
->flags
& IFF_ALLMULTI
? 1 : -1);
146 if (change
& IFF_PROMISC
)
147 dev_set_promiscuity(master
, dev
->flags
& IFF_PROMISC
? 1 : -1);
150 static void dsa_slave_set_rx_mode(struct net_device
*dev
)
152 struct net_device
*master
= dsa_slave_to_master(dev
);
154 dev_mc_sync(master
, dev
);
155 dev_uc_sync(master
, dev
);
158 static int dsa_slave_set_mac_address(struct net_device
*dev
, void *a
)
160 struct net_device
*master
= dsa_slave_to_master(dev
);
161 struct sockaddr
*addr
= a
;
164 if (!is_valid_ether_addr(addr
->sa_data
))
165 return -EADDRNOTAVAIL
;
167 if (!(dev
->flags
& IFF_UP
))
170 if (!ether_addr_equal(addr
->sa_data
, master
->dev_addr
)) {
171 err
= dev_uc_add(master
, addr
->sa_data
);
176 if (!ether_addr_equal(dev
->dev_addr
, master
->dev_addr
))
177 dev_uc_del(master
, dev
->dev_addr
);
180 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
185 struct dsa_slave_dump_ctx
{
186 struct net_device
*dev
;
188 struct netlink_callback
*cb
;
193 dsa_slave_port_fdb_do_dump(const unsigned char *addr
, u16 vid
,
194 bool is_static
, void *data
)
196 struct dsa_slave_dump_ctx
*dump
= data
;
197 u32 portid
= NETLINK_CB(dump
->cb
->skb
).portid
;
198 u32 seq
= dump
->cb
->nlh
->nlmsg_seq
;
199 struct nlmsghdr
*nlh
;
202 if (dump
->idx
< dump
->cb
->args
[2])
205 nlh
= nlmsg_put(dump
->skb
, portid
, seq
, RTM_NEWNEIGH
,
206 sizeof(*ndm
), NLM_F_MULTI
);
210 ndm
= nlmsg_data(nlh
);
211 ndm
->ndm_family
= AF_BRIDGE
;
214 ndm
->ndm_flags
= NTF_SELF
;
216 ndm
->ndm_ifindex
= dump
->dev
->ifindex
;
217 ndm
->ndm_state
= is_static
? NUD_NOARP
: NUD_REACHABLE
;
219 if (nla_put(dump
->skb
, NDA_LLADDR
, ETH_ALEN
, addr
))
220 goto nla_put_failure
;
222 if (vid
&& nla_put_u16(dump
->skb
, NDA_VLAN
, vid
))
223 goto nla_put_failure
;
225 nlmsg_end(dump
->skb
, nlh
);
232 nlmsg_cancel(dump
->skb
, nlh
);
237 dsa_slave_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
238 struct net_device
*dev
, struct net_device
*filter_dev
,
241 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
242 struct dsa_slave_dump_ctx dump
= {
250 err
= dsa_port_fdb_dump(dp
, dsa_slave_port_fdb_do_dump
, &dump
);
256 static int dsa_slave_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
261 return phy_mii_ioctl(dev
->phydev
, ifr
, cmd
);
264 static int dsa_slave_port_attr_set(struct net_device
*dev
,
265 const struct switchdev_attr
*attr
,
266 struct switchdev_trans
*trans
)
268 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
272 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
273 ret
= dsa_port_set_state(dp
, attr
->u
.stp_state
, trans
);
275 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
276 ret
= dsa_port_vlan_filtering(dp
, attr
->u
.vlan_filtering
,
279 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
280 ret
= dsa_port_ageing_time(dp
, attr
->u
.ageing_time
, trans
);
290 static int dsa_slave_port_obj_add(struct net_device
*dev
,
291 const struct switchdev_obj
*obj
,
292 struct switchdev_trans
*trans
)
294 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
297 /* For the prepare phase, ensure the full set of changes is feasable in
298 * one go in order to signal a failure properly. If an operation is not
299 * supported, return -EOPNOTSUPP.
303 case SWITCHDEV_OBJ_ID_PORT_MDB
:
304 err
= dsa_port_mdb_add(dp
, SWITCHDEV_OBJ_PORT_MDB(obj
), trans
);
306 case SWITCHDEV_OBJ_ID_HOST_MDB
:
307 /* DSA can directly translate this to a normal MDB add,
308 * but on the CPU port.
310 err
= dsa_port_mdb_add(dp
->cpu_dp
, SWITCHDEV_OBJ_PORT_MDB(obj
),
313 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
314 err
= dsa_port_vlan_add(dp
, SWITCHDEV_OBJ_PORT_VLAN(obj
),
325 static int dsa_slave_port_obj_del(struct net_device
*dev
,
326 const struct switchdev_obj
*obj
)
328 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
332 case SWITCHDEV_OBJ_ID_PORT_MDB
:
333 err
= dsa_port_mdb_del(dp
, SWITCHDEV_OBJ_PORT_MDB(obj
));
335 case SWITCHDEV_OBJ_ID_HOST_MDB
:
336 /* DSA can directly translate this to a normal MDB add,
337 * but on the CPU port.
339 err
= dsa_port_mdb_del(dp
->cpu_dp
, SWITCHDEV_OBJ_PORT_MDB(obj
));
341 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
342 err
= dsa_port_vlan_del(dp
, SWITCHDEV_OBJ_PORT_VLAN(obj
));
352 static int dsa_slave_port_attr_get(struct net_device
*dev
,
353 struct switchdev_attr
*attr
)
355 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
356 struct dsa_switch
*ds
= dp
->ds
;
357 struct dsa_switch_tree
*dst
= ds
->dst
;
360 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
361 attr
->u
.ppid
.id_len
= sizeof(dst
->index
);
362 memcpy(&attr
->u
.ppid
.id
, &dst
->index
, attr
->u
.ppid
.id_len
);
364 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT
:
365 attr
->u
.brport_flags_support
= 0;
374 static inline netdev_tx_t
dsa_slave_netpoll_send_skb(struct net_device
*dev
,
377 #ifdef CONFIG_NET_POLL_CONTROLLER
378 struct dsa_slave_priv
*p
= netdev_priv(dev
);
381 netpoll_send_skb(p
->netpoll
, skb
);
388 static netdev_tx_t
dsa_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
390 struct dsa_slave_priv
*p
= netdev_priv(dev
);
391 struct pcpu_sw_netstats
*s
;
392 struct sk_buff
*nskb
;
394 s
= this_cpu_ptr(p
->stats64
);
395 u64_stats_update_begin(&s
->syncp
);
397 s
->tx_bytes
+= skb
->len
;
398 u64_stats_update_end(&s
->syncp
);
400 /* Transmit function may have to reallocate the original SKB,
401 * in which case it must have freed it. Only free it here on error.
403 nskb
= p
->xmit(skb
, dev
);
409 /* SKB for netpoll still need to be mangled with the protocol-specific
410 * tag to be successfully transmitted
412 if (unlikely(netpoll_tx_running(dev
)))
413 return dsa_slave_netpoll_send_skb(dev
, nskb
);
415 /* Queue the SKB for transmission on the parent interface, but
416 * do not modify its EtherType
418 nskb
->dev
= dsa_slave_to_master(dev
);
419 dev_queue_xmit(nskb
);
424 /* ethtool operations *******************************************************/
426 static void dsa_slave_get_drvinfo(struct net_device
*dev
,
427 struct ethtool_drvinfo
*drvinfo
)
429 strlcpy(drvinfo
->driver
, "dsa", sizeof(drvinfo
->driver
));
430 strlcpy(drvinfo
->fw_version
, "N/A", sizeof(drvinfo
->fw_version
));
431 strlcpy(drvinfo
->bus_info
, "platform", sizeof(drvinfo
->bus_info
));
434 static int dsa_slave_get_regs_len(struct net_device
*dev
)
436 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
437 struct dsa_switch
*ds
= dp
->ds
;
439 if (ds
->ops
->get_regs_len
)
440 return ds
->ops
->get_regs_len(ds
, dp
->index
);
446 dsa_slave_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
448 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
449 struct dsa_switch
*ds
= dp
->ds
;
451 if (ds
->ops
->get_regs
)
452 ds
->ops
->get_regs(ds
, dp
->index
, regs
, _p
);
455 static u32
dsa_slave_get_link(struct net_device
*dev
)
460 genphy_update_link(dev
->phydev
);
462 return dev
->phydev
->link
;
465 static int dsa_slave_get_eeprom_len(struct net_device
*dev
)
467 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
468 struct dsa_switch
*ds
= dp
->ds
;
470 if (ds
->cd
&& ds
->cd
->eeprom_len
)
471 return ds
->cd
->eeprom_len
;
473 if (ds
->ops
->get_eeprom_len
)
474 return ds
->ops
->get_eeprom_len(ds
);
479 static int dsa_slave_get_eeprom(struct net_device
*dev
,
480 struct ethtool_eeprom
*eeprom
, u8
*data
)
482 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
483 struct dsa_switch
*ds
= dp
->ds
;
485 if (ds
->ops
->get_eeprom
)
486 return ds
->ops
->get_eeprom(ds
, eeprom
, data
);
491 static int dsa_slave_set_eeprom(struct net_device
*dev
,
492 struct ethtool_eeprom
*eeprom
, u8
*data
)
494 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
495 struct dsa_switch
*ds
= dp
->ds
;
497 if (ds
->ops
->set_eeprom
)
498 return ds
->ops
->set_eeprom(ds
, eeprom
, data
);
503 static void dsa_slave_get_strings(struct net_device
*dev
,
504 uint32_t stringset
, uint8_t *data
)
506 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
507 struct dsa_switch
*ds
= dp
->ds
;
509 if (stringset
== ETH_SS_STATS
) {
510 int len
= ETH_GSTRING_LEN
;
512 strncpy(data
, "tx_packets", len
);
513 strncpy(data
+ len
, "tx_bytes", len
);
514 strncpy(data
+ 2 * len
, "rx_packets", len
);
515 strncpy(data
+ 3 * len
, "rx_bytes", len
);
516 if (ds
->ops
->get_strings
)
517 ds
->ops
->get_strings(ds
, dp
->index
, data
+ 4 * len
);
521 static void dsa_slave_get_ethtool_stats(struct net_device
*dev
,
522 struct ethtool_stats
*stats
,
525 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
526 struct dsa_slave_priv
*p
= netdev_priv(dev
);
527 struct dsa_switch
*ds
= dp
->ds
;
528 struct pcpu_sw_netstats
*s
;
532 for_each_possible_cpu(i
) {
533 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
535 s
= per_cpu_ptr(p
->stats64
, i
);
537 start
= u64_stats_fetch_begin_irq(&s
->syncp
);
538 tx_packets
= s
->tx_packets
;
539 tx_bytes
= s
->tx_bytes
;
540 rx_packets
= s
->rx_packets
;
541 rx_bytes
= s
->rx_bytes
;
542 } while (u64_stats_fetch_retry_irq(&s
->syncp
, start
));
543 data
[0] += tx_packets
;
545 data
[2] += rx_packets
;
548 if (ds
->ops
->get_ethtool_stats
)
549 ds
->ops
->get_ethtool_stats(ds
, dp
->index
, data
+ 4);
552 static int dsa_slave_get_sset_count(struct net_device
*dev
, int sset
)
554 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
555 struct dsa_switch
*ds
= dp
->ds
;
557 if (sset
== ETH_SS_STATS
) {
561 if (ds
->ops
->get_sset_count
)
562 count
+= ds
->ops
->get_sset_count(ds
);
570 static void dsa_slave_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*w
)
572 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
573 struct dsa_switch
*ds
= dp
->ds
;
575 if (ds
->ops
->get_wol
)
576 ds
->ops
->get_wol(ds
, dp
->index
, w
);
579 static int dsa_slave_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*w
)
581 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
582 struct dsa_switch
*ds
= dp
->ds
;
583 int ret
= -EOPNOTSUPP
;
585 if (ds
->ops
->set_wol
)
586 ret
= ds
->ops
->set_wol(ds
, dp
->index
, w
);
591 static int dsa_slave_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
593 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
594 struct dsa_switch
*ds
= dp
->ds
;
597 /* Port's PHY and MAC both need to be EEE capable */
601 if (!ds
->ops
->set_mac_eee
)
604 ret
= ds
->ops
->set_mac_eee(ds
, dp
->index
, e
);
608 if (e
->eee_enabled
) {
609 ret
= phy_init_eee(dev
->phydev
, 0);
614 return phy_ethtool_set_eee(dev
->phydev
, e
);
617 static int dsa_slave_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
619 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
620 struct dsa_switch
*ds
= dp
->ds
;
623 /* Port's PHY and MAC both need to be EEE capable */
627 if (!ds
->ops
->get_mac_eee
)
630 ret
= ds
->ops
->get_mac_eee(ds
, dp
->index
, e
);
634 return phy_ethtool_get_eee(dev
->phydev
, e
);
637 #ifdef CONFIG_NET_POLL_CONTROLLER
638 static int dsa_slave_netpoll_setup(struct net_device
*dev
,
639 struct netpoll_info
*ni
)
641 struct net_device
*master
= dsa_slave_to_master(dev
);
642 struct dsa_slave_priv
*p
= netdev_priv(dev
);
643 struct netpoll
*netpoll
;
646 netpoll
= kzalloc(sizeof(*netpoll
), GFP_KERNEL
);
650 err
= __netpoll_setup(netpoll
, master
);
656 p
->netpoll
= netpoll
;
661 static void dsa_slave_netpoll_cleanup(struct net_device
*dev
)
663 struct dsa_slave_priv
*p
= netdev_priv(dev
);
664 struct netpoll
*netpoll
= p
->netpoll
;
671 __netpoll_free_async(netpoll
);
674 static void dsa_slave_poll_controller(struct net_device
*dev
)
679 static int dsa_slave_get_phys_port_name(struct net_device
*dev
,
680 char *name
, size_t len
)
682 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
684 if (snprintf(name
, len
, "p%d", dp
->index
) >= len
)
690 static struct dsa_mall_tc_entry
*
691 dsa_slave_mall_tc_entry_find(struct net_device
*dev
, unsigned long cookie
)
693 struct dsa_slave_priv
*p
= netdev_priv(dev
);
694 struct dsa_mall_tc_entry
*mall_tc_entry
;
696 list_for_each_entry(mall_tc_entry
, &p
->mall_tc_list
, list
)
697 if (mall_tc_entry
->cookie
== cookie
)
698 return mall_tc_entry
;
703 static int dsa_slave_add_cls_matchall(struct net_device
*dev
,
704 struct tc_cls_matchall_offload
*cls
,
707 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
708 struct dsa_slave_priv
*p
= netdev_priv(dev
);
709 struct dsa_mall_tc_entry
*mall_tc_entry
;
710 __be16 protocol
= cls
->common
.protocol
;
711 struct dsa_switch
*ds
= dp
->ds
;
712 struct net_device
*to_dev
;
713 const struct tc_action
*a
;
714 struct dsa_port
*to_dp
;
715 int err
= -EOPNOTSUPP
;
718 if (!ds
->ops
->port_mirror_add
)
721 if (!tcf_exts_has_one_action(cls
->exts
))
724 tcf_exts_to_list(cls
->exts
, &actions
);
725 a
= list_first_entry(&actions
, struct tc_action
, list
);
727 if (is_tcf_mirred_egress_mirror(a
) && protocol
== htons(ETH_P_ALL
)) {
728 struct dsa_mall_mirror_tc_entry
*mirror
;
730 to_dev
= tcf_mirred_dev(a
);
734 if (!dsa_slave_dev_check(to_dev
))
737 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
741 mall_tc_entry
->cookie
= cls
->cookie
;
742 mall_tc_entry
->type
= DSA_PORT_MALL_MIRROR
;
743 mirror
= &mall_tc_entry
->mirror
;
745 to_dp
= dsa_slave_to_port(to_dev
);
747 mirror
->to_local_port
= to_dp
->index
;
748 mirror
->ingress
= ingress
;
750 err
= ds
->ops
->port_mirror_add(ds
, dp
->index
, mirror
, ingress
);
752 kfree(mall_tc_entry
);
756 list_add_tail(&mall_tc_entry
->list
, &p
->mall_tc_list
);
762 static void dsa_slave_del_cls_matchall(struct net_device
*dev
,
763 struct tc_cls_matchall_offload
*cls
)
765 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
766 struct dsa_mall_tc_entry
*mall_tc_entry
;
767 struct dsa_switch
*ds
= dp
->ds
;
769 if (!ds
->ops
->port_mirror_del
)
772 mall_tc_entry
= dsa_slave_mall_tc_entry_find(dev
, cls
->cookie
);
776 list_del(&mall_tc_entry
->list
);
778 switch (mall_tc_entry
->type
) {
779 case DSA_PORT_MALL_MIRROR
:
780 ds
->ops
->port_mirror_del(ds
, dp
->index
, &mall_tc_entry
->mirror
);
786 kfree(mall_tc_entry
);
789 static int dsa_slave_setup_tc_cls_matchall(struct net_device
*dev
,
790 struct tc_cls_matchall_offload
*cls
,
793 if (cls
->common
.chain_index
)
796 switch (cls
->command
) {
797 case TC_CLSMATCHALL_REPLACE
:
798 return dsa_slave_add_cls_matchall(dev
, cls
, ingress
);
799 case TC_CLSMATCHALL_DESTROY
:
800 dsa_slave_del_cls_matchall(dev
, cls
);
807 static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
808 void *cb_priv
, bool ingress
)
810 struct net_device
*dev
= cb_priv
;
812 if (!tc_can_offload(dev
))
816 case TC_SETUP_CLSMATCHALL
:
817 return dsa_slave_setup_tc_cls_matchall(dev
, type_data
, ingress
);
823 static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type
,
824 void *type_data
, void *cb_priv
)
826 return dsa_slave_setup_tc_block_cb(type
, type_data
, cb_priv
, true);
829 static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type
,
830 void *type_data
, void *cb_priv
)
832 return dsa_slave_setup_tc_block_cb(type
, type_data
, cb_priv
, false);
835 static int dsa_slave_setup_tc_block(struct net_device
*dev
,
836 struct tc_block_offload
*f
)
840 if (f
->binder_type
== TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
841 cb
= dsa_slave_setup_tc_block_cb_ig
;
842 else if (f
->binder_type
== TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS
)
843 cb
= dsa_slave_setup_tc_block_cb_eg
;
847 switch (f
->command
) {
849 return tcf_block_cb_register(f
->block
, cb
, dev
, dev
);
850 case TC_BLOCK_UNBIND
:
851 tcf_block_cb_unregister(f
->block
, cb
, dev
);
858 static int dsa_slave_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
863 return dsa_slave_setup_tc_block(dev
, type_data
);
869 static void dsa_slave_get_stats64(struct net_device
*dev
,
870 struct rtnl_link_stats64
*stats
)
872 struct dsa_slave_priv
*p
= netdev_priv(dev
);
873 struct pcpu_sw_netstats
*s
;
877 netdev_stats_to_stats64(stats
, &dev
->stats
);
878 for_each_possible_cpu(i
) {
879 u64 tx_packets
, tx_bytes
, rx_packets
, rx_bytes
;
881 s
= per_cpu_ptr(p
->stats64
, i
);
883 start
= u64_stats_fetch_begin_irq(&s
->syncp
);
884 tx_packets
= s
->tx_packets
;
885 tx_bytes
= s
->tx_bytes
;
886 rx_packets
= s
->rx_packets
;
887 rx_bytes
= s
->rx_bytes
;
888 } while (u64_stats_fetch_retry_irq(&s
->syncp
, start
));
890 stats
->tx_packets
+= tx_packets
;
891 stats
->tx_bytes
+= tx_bytes
;
892 stats
->rx_packets
+= rx_packets
;
893 stats
->rx_bytes
+= rx_bytes
;
897 static int dsa_slave_get_rxnfc(struct net_device
*dev
,
898 struct ethtool_rxnfc
*nfc
, u32
*rule_locs
)
900 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
901 struct dsa_switch
*ds
= dp
->ds
;
903 if (!ds
->ops
->get_rxnfc
)
906 return ds
->ops
->get_rxnfc(ds
, dp
->index
, nfc
, rule_locs
);
909 static int dsa_slave_set_rxnfc(struct net_device
*dev
,
910 struct ethtool_rxnfc
*nfc
)
912 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
913 struct dsa_switch
*ds
= dp
->ds
;
915 if (!ds
->ops
->set_rxnfc
)
918 return ds
->ops
->set_rxnfc(ds
, dp
->index
, nfc
);
921 static const struct ethtool_ops dsa_slave_ethtool_ops
= {
922 .get_drvinfo
= dsa_slave_get_drvinfo
,
923 .get_regs_len
= dsa_slave_get_regs_len
,
924 .get_regs
= dsa_slave_get_regs
,
925 .nway_reset
= phy_ethtool_nway_reset
,
926 .get_link
= dsa_slave_get_link
,
927 .get_eeprom_len
= dsa_slave_get_eeprom_len
,
928 .get_eeprom
= dsa_slave_get_eeprom
,
929 .set_eeprom
= dsa_slave_set_eeprom
,
930 .get_strings
= dsa_slave_get_strings
,
931 .get_ethtool_stats
= dsa_slave_get_ethtool_stats
,
932 .get_sset_count
= dsa_slave_get_sset_count
,
933 .set_wol
= dsa_slave_set_wol
,
934 .get_wol
= dsa_slave_get_wol
,
935 .set_eee
= dsa_slave_set_eee
,
936 .get_eee
= dsa_slave_get_eee
,
937 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
938 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
939 .get_rxnfc
= dsa_slave_get_rxnfc
,
940 .set_rxnfc
= dsa_slave_set_rxnfc
,
943 /* legacy way, bypassing the bridge *****************************************/
944 int dsa_legacy_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
945 struct net_device
*dev
,
946 const unsigned char *addr
, u16 vid
,
949 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
951 return dsa_port_fdb_add(dp
, addr
, vid
);
954 int dsa_legacy_fdb_del(struct ndmsg
*ndm
, struct nlattr
*tb
[],
955 struct net_device
*dev
,
956 const unsigned char *addr
, u16 vid
)
958 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
960 return dsa_port_fdb_del(dp
, addr
, vid
);
963 static const struct net_device_ops dsa_slave_netdev_ops
= {
964 .ndo_open
= dsa_slave_open
,
965 .ndo_stop
= dsa_slave_close
,
966 .ndo_start_xmit
= dsa_slave_xmit
,
967 .ndo_change_rx_flags
= dsa_slave_change_rx_flags
,
968 .ndo_set_rx_mode
= dsa_slave_set_rx_mode
,
969 .ndo_set_mac_address
= dsa_slave_set_mac_address
,
970 .ndo_fdb_add
= dsa_legacy_fdb_add
,
971 .ndo_fdb_del
= dsa_legacy_fdb_del
,
972 .ndo_fdb_dump
= dsa_slave_fdb_dump
,
973 .ndo_do_ioctl
= dsa_slave_ioctl
,
974 .ndo_get_iflink
= dsa_slave_get_iflink
,
975 #ifdef CONFIG_NET_POLL_CONTROLLER
976 .ndo_netpoll_setup
= dsa_slave_netpoll_setup
,
977 .ndo_netpoll_cleanup
= dsa_slave_netpoll_cleanup
,
978 .ndo_poll_controller
= dsa_slave_poll_controller
,
980 .ndo_get_phys_port_name
= dsa_slave_get_phys_port_name
,
981 .ndo_setup_tc
= dsa_slave_setup_tc
,
982 .ndo_get_stats64
= dsa_slave_get_stats64
,
985 static const struct switchdev_ops dsa_slave_switchdev_ops
= {
986 .switchdev_port_attr_get
= dsa_slave_port_attr_get
,
987 .switchdev_port_attr_set
= dsa_slave_port_attr_set
,
988 .switchdev_port_obj_add
= dsa_slave_port_obj_add
,
989 .switchdev_port_obj_del
= dsa_slave_port_obj_del
,
992 static struct device_type dsa_type
= {
996 static void dsa_slave_adjust_link(struct net_device
*dev
)
998 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
999 struct dsa_slave_priv
*p
= netdev_priv(dev
);
1000 struct dsa_switch
*ds
= dp
->ds
;
1001 unsigned int status_changed
= 0;
1003 if (p
->old_link
!= dev
->phydev
->link
) {
1005 p
->old_link
= dev
->phydev
->link
;
1008 if (p
->old_duplex
!= dev
->phydev
->duplex
) {
1010 p
->old_duplex
= dev
->phydev
->duplex
;
1013 if (p
->old_pause
!= dev
->phydev
->pause
) {
1015 p
->old_pause
= dev
->phydev
->pause
;
1018 if (ds
->ops
->adjust_link
&& status_changed
)
1019 ds
->ops
->adjust_link(ds
, dp
->index
, dev
->phydev
);
1022 phy_print_status(dev
->phydev
);
1025 static int dsa_slave_fixed_link_update(struct net_device
*dev
,
1026 struct fixed_phy_status
*status
)
1028 struct dsa_switch
*ds
;
1029 struct dsa_port
*dp
;
1032 dp
= dsa_slave_to_port(dev
);
1034 if (ds
->ops
->fixed_link_update
)
1035 ds
->ops
->fixed_link_update(ds
, dp
->index
, status
);
1041 /* slave device setup *******************************************************/
1042 static int dsa_slave_phy_connect(struct net_device
*slave_dev
, int addr
)
1044 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1045 struct dsa_slave_priv
*p
= netdev_priv(slave_dev
);
1046 struct dsa_switch
*ds
= dp
->ds
;
1048 slave_dev
->phydev
= mdiobus_get_phy(ds
->slave_mii_bus
, addr
);
1049 if (!slave_dev
->phydev
) {
1050 netdev_err(slave_dev
, "no phy at %d\n", addr
);
1054 /* Use already configured phy mode */
1055 if (p
->phy_interface
== PHY_INTERFACE_MODE_NA
)
1056 p
->phy_interface
= slave_dev
->phydev
->interface
;
1058 return phy_connect_direct(slave_dev
, slave_dev
->phydev
,
1059 dsa_slave_adjust_link
, p
->phy_interface
);
1062 static int dsa_slave_phy_setup(struct net_device
*slave_dev
)
1064 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1065 struct dsa_slave_priv
*p
= netdev_priv(slave_dev
);
1066 struct device_node
*port_dn
= dp
->dn
;
1067 struct dsa_switch
*ds
= dp
->ds
;
1068 struct device_node
*phy_dn
;
1069 bool phy_is_fixed
= false;
1073 mode
= of_get_phy_mode(port_dn
);
1075 mode
= PHY_INTERFACE_MODE_NA
;
1076 p
->phy_interface
= mode
;
1078 phy_dn
= of_parse_phandle(port_dn
, "phy-handle", 0);
1079 if (!phy_dn
&& of_phy_is_fixed_link(port_dn
)) {
1080 /* In the case of a fixed PHY, the DT node associated
1081 * to the fixed PHY is the Port DT node
1083 ret
= of_phy_register_fixed_link(port_dn
);
1085 netdev_err(slave_dev
, "failed to register fixed PHY: %d\n", ret
);
1088 phy_is_fixed
= true;
1089 phy_dn
= of_node_get(port_dn
);
1092 if (ds
->ops
->get_phy_flags
)
1093 phy_flags
= ds
->ops
->get_phy_flags(ds
, dp
->index
);
1096 slave_dev
->phydev
= of_phy_connect(slave_dev
, phy_dn
,
1097 dsa_slave_adjust_link
,
1100 of_node_put(phy_dn
);
1103 if (slave_dev
->phydev
&& phy_is_fixed
)
1104 fixed_phy_set_link_update(slave_dev
->phydev
,
1105 dsa_slave_fixed_link_update
);
1107 /* We could not connect to a designated PHY, so use the switch internal
1110 if (!slave_dev
->phydev
) {
1111 ret
= dsa_slave_phy_connect(slave_dev
, dp
->index
);
1113 netdev_err(slave_dev
, "failed to connect to port %d: %d\n",
1116 of_phy_deregister_fixed_link(port_dn
);
1121 phy_attached_info(slave_dev
->phydev
);
1126 static struct lock_class_key dsa_slave_netdev_xmit_lock_key
;
1127 static void dsa_slave_set_lockdep_class_one(struct net_device
*dev
,
1128 struct netdev_queue
*txq
,
1131 lockdep_set_class(&txq
->_xmit_lock
,
1132 &dsa_slave_netdev_xmit_lock_key
);
1135 int dsa_slave_suspend(struct net_device
*slave_dev
)
1137 struct dsa_slave_priv
*p
= netdev_priv(slave_dev
);
1139 netif_device_detach(slave_dev
);
1141 if (slave_dev
->phydev
) {
1142 phy_stop(slave_dev
->phydev
);
1146 phy_suspend(slave_dev
->phydev
);
1152 int dsa_slave_resume(struct net_device
*slave_dev
)
1154 netif_device_attach(slave_dev
);
1156 if (slave_dev
->phydev
) {
1157 phy_resume(slave_dev
->phydev
);
1158 phy_start(slave_dev
->phydev
);
1164 static void dsa_slave_notify(struct net_device
*dev
, unsigned long val
)
1166 struct net_device
*master
= dsa_slave_to_master(dev
);
1167 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1168 struct dsa_notifier_register_info rinfo
= {
1169 .switch_number
= dp
->ds
->index
,
1170 .port_number
= dp
->index
,
1175 call_dsa_notifiers(val
, dev
, &rinfo
.info
);
1178 int dsa_slave_create(struct dsa_port
*port
)
1180 const struct dsa_port
*cpu_dp
= port
->cpu_dp
;
1181 struct net_device
*master
= cpu_dp
->master
;
1182 struct dsa_switch
*ds
= port
->ds
;
1183 const char *name
= port
->name
;
1184 struct net_device
*slave_dev
;
1185 struct dsa_slave_priv
*p
;
1188 if (!ds
->num_tx_queues
)
1189 ds
->num_tx_queues
= 1;
1191 slave_dev
= alloc_netdev_mqs(sizeof(struct dsa_slave_priv
), name
,
1192 NET_NAME_UNKNOWN
, ether_setup
,
1193 ds
->num_tx_queues
, 1);
1194 if (slave_dev
== NULL
)
1197 slave_dev
->features
= master
->vlan_features
| NETIF_F_HW_TC
;
1198 slave_dev
->hw_features
|= NETIF_F_HW_TC
;
1199 slave_dev
->ethtool_ops
= &dsa_slave_ethtool_ops
;
1200 eth_hw_addr_inherit(slave_dev
, master
);
1201 slave_dev
->priv_flags
|= IFF_NO_QUEUE
;
1202 slave_dev
->netdev_ops
= &dsa_slave_netdev_ops
;
1203 slave_dev
->switchdev_ops
= &dsa_slave_switchdev_ops
;
1204 slave_dev
->min_mtu
= 0;
1205 slave_dev
->max_mtu
= ETH_MAX_MTU
;
1206 SET_NETDEV_DEVTYPE(slave_dev
, &dsa_type
);
1208 netdev_for_each_tx_queue(slave_dev
, dsa_slave_set_lockdep_class_one
,
1211 SET_NETDEV_DEV(slave_dev
, port
->ds
->dev
);
1212 slave_dev
->dev
.of_node
= port
->dn
;
1213 slave_dev
->vlan_features
= master
->vlan_features
;
1215 p
= netdev_priv(slave_dev
);
1216 p
->stats64
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
1218 free_netdev(slave_dev
);
1222 INIT_LIST_HEAD(&p
->mall_tc_list
);
1223 p
->xmit
= cpu_dp
->tag_ops
->xmit
;
1229 port
->slave
= slave_dev
;
1231 netif_carrier_off(slave_dev
);
1233 ret
= dsa_slave_phy_setup(slave_dev
);
1235 netdev_err(master
, "error %d setting up slave phy\n", ret
);
1239 dsa_slave_notify(slave_dev
, DSA_PORT_REGISTER
);
1241 ret
= register_netdev(slave_dev
);
1243 netdev_err(master
, "error %d registering interface %s\n",
1244 ret
, slave_dev
->name
);
1251 phy_disconnect(slave_dev
->phydev
);
1252 if (of_phy_is_fixed_link(port
->dn
))
1253 of_phy_deregister_fixed_link(port
->dn
);
1255 free_percpu(p
->stats64
);
1256 free_netdev(slave_dev
);
1261 void dsa_slave_destroy(struct net_device
*slave_dev
)
1263 struct dsa_port
*dp
= dsa_slave_to_port(slave_dev
);
1264 struct dsa_slave_priv
*p
= netdev_priv(slave_dev
);
1265 struct device_node
*port_dn
= dp
->dn
;
1267 netif_carrier_off(slave_dev
);
1268 if (slave_dev
->phydev
) {
1269 phy_disconnect(slave_dev
->phydev
);
1271 if (of_phy_is_fixed_link(port_dn
))
1272 of_phy_deregister_fixed_link(port_dn
);
1274 dsa_slave_notify(slave_dev
, DSA_PORT_UNREGISTER
);
1275 unregister_netdev(slave_dev
);
1276 free_percpu(p
->stats64
);
1277 free_netdev(slave_dev
);
1280 static bool dsa_slave_dev_check(struct net_device
*dev
)
1282 return dev
->netdev_ops
== &dsa_slave_netdev_ops
;
1285 static int dsa_slave_changeupper(struct net_device
*dev
,
1286 struct netdev_notifier_changeupper_info
*info
)
1288 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1289 int err
= NOTIFY_DONE
;
1291 if (netif_is_bridge_master(info
->upper_dev
)) {
1292 if (info
->linking
) {
1293 err
= dsa_port_bridge_join(dp
, info
->upper_dev
);
1294 err
= notifier_from_errno(err
);
1296 dsa_port_bridge_leave(dp
, info
->upper_dev
);
1304 static int dsa_slave_netdevice_event(struct notifier_block
*nb
,
1305 unsigned long event
, void *ptr
)
1307 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1309 if (!dsa_slave_dev_check(dev
))
1312 if (event
== NETDEV_CHANGEUPPER
)
1313 return dsa_slave_changeupper(dev
, ptr
);
1318 struct dsa_switchdev_event_work
{
1319 struct work_struct work
;
1320 struct switchdev_notifier_fdb_info fdb_info
;
1321 struct net_device
*dev
;
1322 unsigned long event
;
1325 static void dsa_slave_switchdev_event_work(struct work_struct
*work
)
1327 struct dsa_switchdev_event_work
*switchdev_work
=
1328 container_of(work
, struct dsa_switchdev_event_work
, work
);
1329 struct net_device
*dev
= switchdev_work
->dev
;
1330 struct switchdev_notifier_fdb_info
*fdb_info
;
1331 struct dsa_port
*dp
= dsa_slave_to_port(dev
);
1335 switch (switchdev_work
->event
) {
1336 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
1337 fdb_info
= &switchdev_work
->fdb_info
;
1338 err
= dsa_port_fdb_add(dp
, fdb_info
->addr
, fdb_info
->vid
);
1340 netdev_dbg(dev
, "fdb add failed err=%d\n", err
);
1343 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED
, dev
,
1347 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1348 fdb_info
= &switchdev_work
->fdb_info
;
1349 err
= dsa_port_fdb_del(dp
, fdb_info
->addr
, fdb_info
->vid
);
1351 netdev_dbg(dev
, "fdb del failed err=%d\n", err
);
1358 kfree(switchdev_work
->fdb_info
.addr
);
1359 kfree(switchdev_work
);
1364 dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work
*
1366 const struct switchdev_notifier_fdb_info
*
1369 memcpy(&switchdev_work
->fdb_info
, fdb_info
,
1370 sizeof(switchdev_work
->fdb_info
));
1371 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
1372 if (!switchdev_work
->fdb_info
.addr
)
1374 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
1379 /* Called under rcu_read_lock() */
1380 static int dsa_slave_switchdev_event(struct notifier_block
*unused
,
1381 unsigned long event
, void *ptr
)
1383 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
1384 struct dsa_switchdev_event_work
*switchdev_work
;
1386 if (!dsa_slave_dev_check(dev
))
1389 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
1390 if (!switchdev_work
)
1393 INIT_WORK(&switchdev_work
->work
,
1394 dsa_slave_switchdev_event_work
);
1395 switchdev_work
->dev
= dev
;
1396 switchdev_work
->event
= event
;
1399 case SWITCHDEV_FDB_ADD_TO_DEVICE
: /* fall through */
1400 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1401 if (dsa_slave_switchdev_fdb_work_init(switchdev_work
,
1403 goto err_fdb_work_init
;
1407 kfree(switchdev_work
);
1411 dsa_schedule_work(&switchdev_work
->work
);
1415 kfree(switchdev_work
);
1419 static struct notifier_block dsa_slave_nb __read_mostly
= {
1420 .notifier_call
= dsa_slave_netdevice_event
,
1423 static struct notifier_block dsa_slave_switchdev_notifier
= {
1424 .notifier_call
= dsa_slave_switchdev_event
,
1427 int dsa_slave_register_notifier(void)
1431 err
= register_netdevice_notifier(&dsa_slave_nb
);
1435 err
= register_switchdev_notifier(&dsa_slave_switchdev_notifier
);
1437 goto err_switchdev_nb
;
1442 unregister_netdevice_notifier(&dsa_slave_nb
);
1446 void dsa_slave_unregister_notifier(void)
1450 err
= unregister_switchdev_notifier(&dsa_slave_switchdev_notifier
);
1452 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err
);
1454 err
= unregister_netdevice_notifier(&dsa_slave_nb
);
1456 pr_err("DSA: failed to unregister slave notifier (%d)\n", err
);