1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2011-2014 Autronica Fire and Security AS
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 * This file contains device methods for creating, using and destroying
7 * virtual HSR or PRP devices.
10 #include <linux/netdevice.h>
11 #include <linux/skbuff.h>
12 #include <linux/etherdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/pkt_sched.h>
15 #include "hsr_device.h"
16 #include "hsr_slave.h"
17 #include "hsr_framereg.h"
19 #include "hsr_forward.h"
21 static bool is_admin_up(struct net_device
*dev
)
23 return dev
&& (dev
->flags
& IFF_UP
);
26 static bool is_slave_up(struct net_device
*dev
)
28 return dev
&& is_admin_up(dev
) && netif_oper_up(dev
);
31 static void hsr_set_operstate(struct hsr_port
*master
, bool has_carrier
)
33 struct net_device
*dev
= master
->dev
;
35 if (!is_admin_up(dev
)) {
36 netdev_set_operstate(dev
, IF_OPER_DOWN
);
41 netdev_set_operstate(dev
, IF_OPER_UP
);
43 netdev_set_operstate(dev
, IF_OPER_LOWERLAYERDOWN
);
46 static bool hsr_check_carrier(struct hsr_port
*master
)
48 struct hsr_port
*port
;
52 hsr_for_each_port(master
->hsr
, port
) {
53 if (port
->type
!= HSR_PT_MASTER
&& is_slave_up(port
->dev
)) {
54 netif_carrier_on(master
->dev
);
59 netif_carrier_off(master
->dev
);
64 static void hsr_check_announce(struct net_device
*hsr_dev
)
68 hsr
= netdev_priv(hsr_dev
);
69 if (netif_running(hsr_dev
) && netif_oper_up(hsr_dev
)) {
70 /* Enable announce timer and start sending supervisory frames */
71 if (!timer_pending(&hsr
->announce_timer
)) {
72 hsr
->announce_count
= 0;
73 mod_timer(&hsr
->announce_timer
, jiffies
+
74 msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL
));
77 if (hsr
->redbox
&& !timer_pending(&hsr
->announce_proxy_timer
))
78 mod_timer(&hsr
->announce_proxy_timer
, jiffies
+
79 msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL
) / 2);
81 /* Deactivate the announce timer */
82 timer_delete(&hsr
->announce_timer
);
84 timer_delete(&hsr
->announce_proxy_timer
);
88 void hsr_check_carrier_and_operstate(struct hsr_priv
*hsr
)
90 struct hsr_port
*master
;
93 master
= hsr_port_get_hsr(hsr
, HSR_PT_MASTER
);
94 /* netif_stacked_transfer_operstate() cannot be used here since
95 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
97 has_carrier
= hsr_check_carrier(master
);
98 hsr_set_operstate(master
, has_carrier
);
99 hsr_check_announce(master
->dev
);
102 int hsr_get_max_mtu(struct hsr_priv
*hsr
)
104 unsigned int mtu_max
;
105 struct hsr_port
*port
;
107 mtu_max
= ETH_DATA_LEN
;
108 hsr_for_each_port(hsr
, port
)
109 if (port
->type
!= HSR_PT_MASTER
)
110 mtu_max
= min(port
->dev
->mtu
, mtu_max
);
112 if (mtu_max
< HSR_HLEN
)
114 return mtu_max
- HSR_HLEN
;
117 static int hsr_dev_change_mtu(struct net_device
*dev
, int new_mtu
)
119 struct hsr_priv
*hsr
;
121 hsr
= netdev_priv(dev
);
123 if (new_mtu
> hsr_get_max_mtu(hsr
)) {
124 netdev_info(dev
, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
129 WRITE_ONCE(dev
->mtu
, new_mtu
);
134 static int hsr_dev_open(struct net_device
*dev
)
136 struct hsr_priv
*hsr
;
137 struct hsr_port
*port
;
138 const char *designation
= NULL
;
140 hsr
= netdev_priv(dev
);
142 hsr_for_each_port(hsr
, port
) {
143 if (port
->type
== HSR_PT_MASTER
)
145 switch (port
->type
) {
147 designation
= "Slave A";
150 designation
= "Slave B";
152 case HSR_PT_INTERLINK
:
153 designation
= "Interlink";
156 designation
= "Unknown";
158 if (!is_slave_up(port
->dev
))
159 netdev_warn(dev
, "%s (%s) is not up; please bring it up to get a fully working HSR network\n",
160 designation
, port
->dev
->name
);
164 netdev_warn(dev
, "No slave devices configured\n");
169 static int hsr_dev_close(struct net_device
*dev
)
171 struct hsr_port
*port
;
172 struct hsr_priv
*hsr
;
174 hsr
= netdev_priv(dev
);
175 hsr_for_each_port(hsr
, port
) {
176 if (port
->type
== HSR_PT_MASTER
)
178 switch (port
->type
) {
181 dev_uc_unsync(port
->dev
, dev
);
182 dev_mc_unsync(port
->dev
, dev
);
192 static netdev_features_t
hsr_features_recompute(struct hsr_priv
*hsr
,
193 netdev_features_t features
)
195 netdev_features_t mask
;
196 struct hsr_port
*port
;
200 /* Mask out all features that, if supported by one device, should be
201 * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
203 * Anything that's off in mask will not be enabled - so only things
204 * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
205 * may become enabled.
207 features
&= ~NETIF_F_ONE_FOR_ALL
;
208 hsr_for_each_port(hsr
, port
)
209 features
= netdev_increment_features(features
,
216 static netdev_features_t
hsr_fix_features(struct net_device
*dev
,
217 netdev_features_t features
)
219 struct hsr_priv
*hsr
= netdev_priv(dev
);
221 return hsr_features_recompute(hsr
, features
);
224 static netdev_tx_t
hsr_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
226 struct hsr_priv
*hsr
= netdev_priv(dev
);
227 struct hsr_port
*master
;
229 master
= hsr_port_get_hsr(hsr
, HSR_PT_MASTER
);
231 skb
->dev
= master
->dev
;
232 skb_reset_mac_header(skb
);
233 skb_reset_mac_len(skb
);
234 spin_lock_bh(&hsr
->seqnr_lock
);
235 hsr_forward_skb(skb
, master
);
236 spin_unlock_bh(&hsr
->seqnr_lock
);
238 dev_core_stats_tx_dropped_inc(dev
);
239 dev_kfree_skb_any(skb
);
244 static const struct header_ops hsr_header_ops
= {
245 .create
= eth_header
,
246 .parse
= eth_header_parse
,
249 static struct sk_buff
*hsr_init_skb(struct hsr_port
*master
)
251 struct hsr_priv
*hsr
= master
->hsr
;
255 hlen
= LL_RESERVED_SPACE(master
->dev
);
256 tlen
= master
->dev
->needed_tailroom
;
257 /* skb size is same for PRP/HSR frames, only difference
258 * being, for PRP it is a trailer and for HSR it is a
261 skb
= dev_alloc_skb(sizeof(struct hsr_sup_tag
) +
262 sizeof(struct hsr_sup_payload
) + hlen
+ tlen
);
267 skb_reserve(skb
, hlen
);
268 skb
->dev
= master
->dev
;
269 skb
->priority
= TC_PRIO_CONTROL
;
271 skb_reset_network_header(skb
);
272 skb_reset_transport_header(skb
);
273 if (dev_hard_header(skb
, skb
->dev
, ETH_P_PRP
,
274 hsr
->sup_multicast_addr
,
275 skb
->dev
->dev_addr
, skb
->len
) <= 0)
278 skb_reset_mac_header(skb
);
279 skb_reset_mac_len(skb
);
288 static void send_hsr_supervision_frame(struct hsr_port
*port
,
289 unsigned long *interval
,
290 const unsigned char *addr
)
292 struct hsr_priv
*hsr
= port
->hsr
;
293 __u8 type
= HSR_TLV_LIFE_CHECK
;
294 struct hsr_sup_payload
*hsr_sp
;
295 struct hsr_sup_tlv
*hsr_stlv
;
296 struct hsr_sup_tag
*hsr_stag
;
299 *interval
= msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL
);
300 if (hsr
->announce_count
< 3 && hsr
->prot_version
== 0) {
301 type
= HSR_TLV_ANNOUNCE
;
302 *interval
= msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL
);
303 hsr
->announce_count
++;
306 skb
= hsr_init_skb(port
);
308 netdev_warn_once(port
->dev
, "HSR: Could not send supervision frame\n");
312 hsr_stag
= skb_put(skb
, sizeof(struct hsr_sup_tag
));
313 set_hsr_stag_path(hsr_stag
, (hsr
->prot_version
? 0x0 : 0xf));
314 set_hsr_stag_HSR_ver(hsr_stag
, hsr
->prot_version
);
316 /* From HSRv1 on we have separate supervision sequence numbers. */
317 spin_lock_bh(&hsr
->seqnr_lock
);
318 if (hsr
->prot_version
> 0) {
319 hsr_stag
->sequence_nr
= htons(hsr
->sup_sequence_nr
);
320 hsr
->sup_sequence_nr
++;
322 hsr_stag
->sequence_nr
= htons(hsr
->sequence_nr
);
326 hsr_stag
->tlv
.HSR_TLV_type
= type
;
327 /* TODO: Why 12 in HSRv0? */
328 hsr_stag
->tlv
.HSR_TLV_length
= hsr
->prot_version
?
329 sizeof(struct hsr_sup_payload
) : 12;
331 /* Payload: MacAddressA / SAN MAC from ProxyNodeTable */
332 hsr_sp
= skb_put(skb
, sizeof(struct hsr_sup_payload
));
333 ether_addr_copy(hsr_sp
->macaddress_A
, addr
);
336 hsr_is_node_in_db(&hsr
->proxy_node_db
, addr
)) {
337 hsr_stlv
= skb_put(skb
, sizeof(struct hsr_sup_tlv
));
338 hsr_stlv
->HSR_TLV_type
= PRP_TLV_REDBOX_MAC
;
339 hsr_stlv
->HSR_TLV_length
= sizeof(struct hsr_sup_payload
);
341 /* Payload: MacAddressRedBox */
342 hsr_sp
= skb_put(skb
, sizeof(struct hsr_sup_payload
));
343 ether_addr_copy(hsr_sp
->macaddress_A
, hsr
->macaddress_redbox
);
346 if (skb_put_padto(skb
, ETH_ZLEN
)) {
347 spin_unlock_bh(&hsr
->seqnr_lock
);
351 hsr_forward_skb(skb
, port
);
352 spin_unlock_bh(&hsr
->seqnr_lock
);
356 static void send_prp_supervision_frame(struct hsr_port
*master
,
357 unsigned long *interval
,
358 const unsigned char *addr
)
360 struct hsr_priv
*hsr
= master
->hsr
;
361 struct hsr_sup_payload
*hsr_sp
;
362 struct hsr_sup_tag
*hsr_stag
;
365 skb
= hsr_init_skb(master
);
367 netdev_warn_once(master
->dev
, "PRP: Could not send supervision frame\n");
371 *interval
= msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL
);
372 hsr_stag
= skb_put(skb
, sizeof(struct hsr_sup_tag
));
373 set_hsr_stag_path(hsr_stag
, (hsr
->prot_version
? 0x0 : 0xf));
374 set_hsr_stag_HSR_ver(hsr_stag
, (hsr
->prot_version
? 1 : 0));
376 /* From HSRv1 on we have separate supervision sequence numbers. */
377 spin_lock_bh(&hsr
->seqnr_lock
);
378 hsr_stag
->sequence_nr
= htons(hsr
->sup_sequence_nr
);
379 hsr
->sup_sequence_nr
++;
380 hsr_stag
->tlv
.HSR_TLV_type
= PRP_TLV_LIFE_CHECK_DD
;
381 hsr_stag
->tlv
.HSR_TLV_length
= sizeof(struct hsr_sup_payload
);
383 /* Payload: MacAddressA */
384 hsr_sp
= skb_put(skb
, sizeof(struct hsr_sup_payload
));
385 ether_addr_copy(hsr_sp
->macaddress_A
, master
->dev
->dev_addr
);
387 if (skb_put_padto(skb
, ETH_ZLEN
)) {
388 spin_unlock_bh(&hsr
->seqnr_lock
);
392 hsr_forward_skb(skb
, master
);
393 spin_unlock_bh(&hsr
->seqnr_lock
);
396 /* Announce (supervision frame) timer function
398 static void hsr_announce(struct timer_list
*t
)
400 struct hsr_priv
*hsr
;
401 struct hsr_port
*master
;
402 unsigned long interval
;
404 hsr
= from_timer(hsr
, t
, announce_timer
);
407 master
= hsr_port_get_hsr(hsr
, HSR_PT_MASTER
);
408 hsr
->proto_ops
->send_sv_frame(master
, &interval
, master
->dev
->dev_addr
);
410 if (is_admin_up(master
->dev
))
411 mod_timer(&hsr
->announce_timer
, jiffies
+ interval
);
416 /* Announce (supervision frame) timer function for RedBox
418 static void hsr_proxy_announce(struct timer_list
*t
)
420 struct hsr_priv
*hsr
= from_timer(hsr
, t
, announce_proxy_timer
);
421 struct hsr_port
*interlink
;
422 unsigned long interval
= 0;
423 struct hsr_node
*node
;
426 /* RedBOX sends supervisory frames to HSR network with MAC addresses
427 * of SAN nodes stored in ProxyNodeTable.
429 interlink
= hsr_port_get_hsr(hsr
, HSR_PT_INTERLINK
);
433 list_for_each_entry_rcu(node
, &hsr
->proxy_node_db
, mac_list
) {
434 if (hsr_addr_is_redbox(hsr
, node
->macaddress_A
))
436 hsr
->proto_ops
->send_sv_frame(interlink
, &interval
,
440 if (is_admin_up(interlink
->dev
)) {
442 interval
= msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL
);
444 mod_timer(&hsr
->announce_proxy_timer
, jiffies
+ interval
);
451 void hsr_del_ports(struct hsr_priv
*hsr
)
453 struct hsr_port
*port
;
455 port
= hsr_port_get_hsr(hsr
, HSR_PT_SLAVE_A
);
459 port
= hsr_port_get_hsr(hsr
, HSR_PT_SLAVE_B
);
463 port
= hsr_port_get_hsr(hsr
, HSR_PT_INTERLINK
);
467 port
= hsr_port_get_hsr(hsr
, HSR_PT_MASTER
);
472 static void hsr_set_rx_mode(struct net_device
*dev
)
474 struct hsr_port
*port
;
475 struct hsr_priv
*hsr
;
477 hsr
= netdev_priv(dev
);
479 hsr_for_each_port(hsr
, port
) {
480 if (port
->type
== HSR_PT_MASTER
)
482 switch (port
->type
) {
485 dev_mc_sync_multiple(port
->dev
, dev
);
486 dev_uc_sync_multiple(port
->dev
, dev
);
494 static void hsr_change_rx_flags(struct net_device
*dev
, int change
)
496 struct hsr_port
*port
;
497 struct hsr_priv
*hsr
;
499 hsr
= netdev_priv(dev
);
501 hsr_for_each_port(hsr
, port
) {
502 if (port
->type
== HSR_PT_MASTER
)
504 switch (port
->type
) {
507 if (change
& IFF_ALLMULTI
)
508 dev_set_allmulti(port
->dev
,
510 IFF_ALLMULTI
? 1 : -1);
518 static int hsr_ndo_vlan_rx_add_vid(struct net_device
*dev
,
519 __be16 proto
, u16 vid
)
521 bool is_slave_a_added
= false;
522 bool is_slave_b_added
= false;
523 struct hsr_port
*port
;
524 struct hsr_priv
*hsr
;
527 hsr
= netdev_priv(dev
);
529 hsr_for_each_port(hsr
, port
) {
530 if (port
->type
== HSR_PT_MASTER
||
531 port
->type
== HSR_PT_INTERLINK
)
534 ret
= vlan_vid_add(port
->dev
, proto
, vid
);
535 switch (port
->type
) {
538 /* clean up Slave-B */
539 netdev_err(dev
, "add vid failed for Slave-A\n");
540 if (is_slave_b_added
)
541 vlan_vid_del(port
->dev
, proto
, vid
);
545 is_slave_a_added
= true;
550 /* clean up Slave-A */
551 netdev_err(dev
, "add vid failed for Slave-B\n");
552 if (is_slave_a_added
)
553 vlan_vid_del(port
->dev
, proto
, vid
);
557 is_slave_b_added
= true;
567 static int hsr_ndo_vlan_rx_kill_vid(struct net_device
*dev
,
568 __be16 proto
, u16 vid
)
570 struct hsr_port
*port
;
571 struct hsr_priv
*hsr
;
573 hsr
= netdev_priv(dev
);
575 hsr_for_each_port(hsr
, port
) {
576 switch (port
->type
) {
579 vlan_vid_del(port
->dev
, proto
, vid
);
589 static const struct net_device_ops hsr_device_ops
= {
590 .ndo_change_mtu
= hsr_dev_change_mtu
,
591 .ndo_open
= hsr_dev_open
,
592 .ndo_stop
= hsr_dev_close
,
593 .ndo_start_xmit
= hsr_dev_xmit
,
594 .ndo_change_rx_flags
= hsr_change_rx_flags
,
595 .ndo_fix_features
= hsr_fix_features
,
596 .ndo_set_rx_mode
= hsr_set_rx_mode
,
597 .ndo_vlan_rx_add_vid
= hsr_ndo_vlan_rx_add_vid
,
598 .ndo_vlan_rx_kill_vid
= hsr_ndo_vlan_rx_kill_vid
,
601 static const struct device_type hsr_type
= {
605 static struct hsr_proto_ops hsr_ops
= {
606 .send_sv_frame
= send_hsr_supervision_frame
,
607 .create_tagged_frame
= hsr_create_tagged_frame
,
608 .get_untagged_frame
= hsr_get_untagged_frame
,
609 .drop_frame
= hsr_drop_frame
,
610 .fill_frame_info
= hsr_fill_frame_info
,
611 .invalid_dan_ingress_frame
= hsr_invalid_dan_ingress_frame
,
614 static struct hsr_proto_ops prp_ops
= {
615 .send_sv_frame
= send_prp_supervision_frame
,
616 .create_tagged_frame
= prp_create_tagged_frame
,
617 .get_untagged_frame
= prp_get_untagged_frame
,
618 .drop_frame
= prp_drop_frame
,
619 .fill_frame_info
= prp_fill_frame_info
,
620 .handle_san_frame
= prp_handle_san_frame
,
621 .update_san_info
= prp_update_san_info
,
624 void hsr_dev_setup(struct net_device
*dev
)
626 eth_hw_addr_random(dev
);
630 dev
->header_ops
= &hsr_header_ops
;
631 dev
->netdev_ops
= &hsr_device_ops
;
632 SET_NETDEV_DEVTYPE(dev
, &hsr_type
);
633 dev
->priv_flags
|= IFF_NO_QUEUE
| IFF_DISABLE_NETPOLL
;
634 /* Prevent recursive tx locking */
636 /* Not sure about this. Taken from bridge code. netdevice.h says
637 * it means "Does not change network namespaces".
639 dev
->netns_local
= true;
641 dev
->needs_free_netdev
= true;
643 dev
->hw_features
= NETIF_F_SG
| NETIF_F_FRAGLIST
| NETIF_F_HIGHDMA
|
644 NETIF_F_GSO_MASK
| NETIF_F_HW_CSUM
|
645 NETIF_F_HW_VLAN_CTAG_TX
|
646 NETIF_F_HW_VLAN_CTAG_FILTER
;
648 dev
->features
= dev
->hw_features
;
651 /* Return true if dev is a HSR master; return false otherwise.
653 bool is_hsr_master(struct net_device
*dev
)
655 return (dev
->netdev_ops
->ndo_start_xmit
== hsr_dev_xmit
);
657 EXPORT_SYMBOL(is_hsr_master
);
659 /* Default multicast address for HSR Supervision frames */
660 static const unsigned char def_multicast_addr
[ETH_ALEN
] __aligned(2) = {
661 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
664 int hsr_dev_finalize(struct net_device
*hsr_dev
, struct net_device
*slave
[2],
665 struct net_device
*interlink
, unsigned char multicast_spec
,
666 u8 protocol_version
, struct netlink_ext_ack
*extack
)
668 bool unregister
= false;
669 struct hsr_priv
*hsr
;
672 hsr
= netdev_priv(hsr_dev
);
673 INIT_LIST_HEAD(&hsr
->ports
);
674 INIT_LIST_HEAD(&hsr
->node_db
);
675 INIT_LIST_HEAD(&hsr
->proxy_node_db
);
676 spin_lock_init(&hsr
->list_lock
);
678 eth_hw_addr_set(hsr_dev
, slave
[0]->dev_addr
);
680 /* initialize protocol specific functions */
681 if (protocol_version
== PRP_V1
) {
682 /* For PRP, lan_id has most significant 3 bits holding
683 * the net_id of PRP_LAN_ID
685 hsr
->net_id
= PRP_LAN_ID
<< 1;
686 hsr
->proto_ops
= &prp_ops
;
688 hsr
->proto_ops
= &hsr_ops
;
691 /* Make sure we recognize frames from ourselves in hsr_rcv() */
692 res
= hsr_create_self_node(hsr
, hsr_dev
->dev_addr
,
697 spin_lock_init(&hsr
->seqnr_lock
);
698 /* Overflow soon to find bugs easier: */
699 hsr
->sequence_nr
= HSR_SEQNR_START
;
700 hsr
->sup_sequence_nr
= HSR_SUP_SEQNR_START
;
702 timer_setup(&hsr
->announce_timer
, hsr_announce
, 0);
703 timer_setup(&hsr
->prune_timer
, hsr_prune_nodes
, 0);
704 timer_setup(&hsr
->prune_proxy_timer
, hsr_prune_proxy_nodes
, 0);
705 timer_setup(&hsr
->announce_proxy_timer
, hsr_proxy_announce
, 0);
707 ether_addr_copy(hsr
->sup_multicast_addr
, def_multicast_addr
);
708 hsr
->sup_multicast_addr
[ETH_ALEN
- 1] = multicast_spec
;
710 hsr
->prot_version
= protocol_version
;
712 /* Make sure the 1st call to netif_carrier_on() gets through */
713 netif_carrier_off(hsr_dev
);
715 res
= hsr_add_port(hsr
, hsr_dev
, HSR_PT_MASTER
, extack
);
719 /* HSR forwarding offload supported in lower device? */
720 if ((slave
[0]->features
& NETIF_F_HW_HSR_FWD
) &&
721 (slave
[1]->features
& NETIF_F_HW_HSR_FWD
))
722 hsr
->fwd_offloaded
= true;
724 if ((slave
[0]->features
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
725 (slave
[1]->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
726 hsr_dev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
728 res
= register_netdevice(hsr_dev
);
734 res
= hsr_add_port(hsr
, slave
[0], HSR_PT_SLAVE_A
, extack
);
738 res
= hsr_add_port(hsr
, slave
[1], HSR_PT_SLAVE_B
, extack
);
743 res
= hsr_add_port(hsr
, interlink
, HSR_PT_INTERLINK
, extack
);
748 ether_addr_copy(hsr
->macaddress_redbox
, interlink
->dev_addr
);
749 mod_timer(&hsr
->prune_proxy_timer
,
750 jiffies
+ msecs_to_jiffies(PRUNE_PROXY_PERIOD
));
753 hsr_debugfs_init(hsr
, hsr_dev
);
754 mod_timer(&hsr
->prune_timer
, jiffies
+ msecs_to_jiffies(PRUNE_PERIOD
));
761 hsr_del_self_node(hsr
);
764 unregister_netdevice(hsr_dev
);