1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/mrp_bridge.h>
4 #include "br_private_mrp.h"
6 static const u8 mrp_test_dmac
[ETH_ALEN
] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
7 static const u8 mrp_in_test_dmac
[ETH_ALEN
] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
9 static int br_mrp_process(struct net_bridge_port
*p
, struct sk_buff
*skb
);
11 static struct br_frame_type mrp_frame_type __read_mostly
= {
12 .type
= cpu_to_be16(ETH_P_MRP
),
13 .frame_handler
= br_mrp_process
,
16 static bool br_mrp_is_ring_port(struct net_bridge_port
*p_port
,
17 struct net_bridge_port
*s_port
,
18 struct net_bridge_port
*port
)
27 static bool br_mrp_is_in_port(struct net_bridge_port
*i_port
,
28 struct net_bridge_port
*port
)
36 static struct net_bridge_port
*br_mrp_get_port(struct net_bridge
*br
,
39 struct net_bridge_port
*res
= NULL
;
40 struct net_bridge_port
*port
;
42 list_for_each_entry(port
, &br
->port_list
, list
) {
43 if (port
->dev
->ifindex
== ifindex
) {
52 static struct br_mrp
*br_mrp_find_id(struct net_bridge
*br
, u32 ring_id
)
54 struct br_mrp
*res
= NULL
;
57 hlist_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
58 lockdep_rtnl_is_held()) {
59 if (mrp
->ring_id
== ring_id
) {
68 static struct br_mrp
*br_mrp_find_in_id(struct net_bridge
*br
, u32 in_id
)
70 struct br_mrp
*res
= NULL
;
73 hlist_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
74 lockdep_rtnl_is_held()) {
75 if (mrp
->in_id
== in_id
) {
84 static bool br_mrp_unique_ifindex(struct net_bridge
*br
, u32 ifindex
)
88 hlist_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
89 lockdep_rtnl_is_held()) {
90 struct net_bridge_port
*p
;
92 p
= rtnl_dereference(mrp
->p_port
);
93 if (p
&& p
->dev
->ifindex
== ifindex
)
96 p
= rtnl_dereference(mrp
->s_port
);
97 if (p
&& p
->dev
->ifindex
== ifindex
)
100 p
= rtnl_dereference(mrp
->i_port
);
101 if (p
&& p
->dev
->ifindex
== ifindex
)
108 static struct br_mrp
*br_mrp_find_port(struct net_bridge
*br
,
109 struct net_bridge_port
*p
)
111 struct br_mrp
*res
= NULL
;
114 hlist_for_each_entry_rcu(mrp
, &br
->mrp_list
, list
,
115 lockdep_rtnl_is_held()) {
116 if (rcu_access_pointer(mrp
->p_port
) == p
||
117 rcu_access_pointer(mrp
->s_port
) == p
||
118 rcu_access_pointer(mrp
->i_port
) == p
) {
127 static int br_mrp_next_seq(struct br_mrp
*mrp
)
133 static struct sk_buff
*br_mrp_skb_alloc(struct net_bridge_port
*p
,
134 const u8
*src
, const u8
*dst
)
136 struct ethhdr
*eth_hdr
;
140 skb
= dev_alloc_skb(MRP_MAX_FRAME_LENGTH
);
145 skb
->protocol
= htons(ETH_P_MRP
);
146 skb
->priority
= MRP_FRAME_PRIO
;
147 skb_reserve(skb
, sizeof(*eth_hdr
));
149 eth_hdr
= skb_push(skb
, sizeof(*eth_hdr
));
150 ether_addr_copy(eth_hdr
->h_dest
, dst
);
151 ether_addr_copy(eth_hdr
->h_source
, src
);
152 eth_hdr
->h_proto
= htons(ETH_P_MRP
);
154 version
= skb_put(skb
, sizeof(*version
));
155 *version
= cpu_to_be16(MRP_VERSION
);
160 static void br_mrp_skb_tlv(struct sk_buff
*skb
,
161 enum br_mrp_tlv_header_type type
,
164 struct br_mrp_tlv_hdr
*hdr
;
166 hdr
= skb_put(skb
, sizeof(*hdr
));
168 hdr
->length
= length
;
171 static void br_mrp_skb_common(struct sk_buff
*skb
, struct br_mrp
*mrp
)
173 struct br_mrp_common_hdr
*hdr
;
175 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_COMMON
, sizeof(*hdr
));
177 hdr
= skb_put(skb
, sizeof(*hdr
));
178 hdr
->seq_id
= cpu_to_be16(br_mrp_next_seq(mrp
));
179 memset(hdr
->domain
, 0xff, MRP_DOMAIN_UUID_LENGTH
);
182 static struct sk_buff
*br_mrp_alloc_test_skb(struct br_mrp
*mrp
,
183 struct net_bridge_port
*p
,
184 enum br_mrp_port_role_type port_role
)
186 struct br_mrp_ring_test_hdr
*hdr
= NULL
;
187 struct sk_buff
*skb
= NULL
;
192 skb
= br_mrp_skb_alloc(p
, p
->dev
->dev_addr
, mrp_test_dmac
);
196 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_RING_TEST
, sizeof(*hdr
));
197 hdr
= skb_put(skb
, sizeof(*hdr
));
199 hdr
->prio
= cpu_to_be16(mrp
->prio
);
200 ether_addr_copy(hdr
->sa
, p
->br
->dev
->dev_addr
);
201 hdr
->port_role
= cpu_to_be16(port_role
);
202 hdr
->state
= cpu_to_be16(mrp
->ring_state
);
203 hdr
->transitions
= cpu_to_be16(mrp
->ring_transitions
);
204 hdr
->timestamp
= cpu_to_be32(jiffies_to_msecs(jiffies
));
206 br_mrp_skb_common(skb
, mrp
);
208 /* In case the node behaves as MRA then the Test frame needs to have
209 * an Option TLV which includes eventually a sub-option TLV that has
212 if (mrp
->ring_role
== BR_MRP_RING_ROLE_MRA
) {
213 struct br_mrp_sub_option1_hdr
*sub_opt
= NULL
;
214 struct br_mrp_tlv_hdr
*sub_tlv
= NULL
;
215 struct br_mrp_oui_hdr
*oui
= NULL
;
218 length
= sizeof(*sub_opt
) + sizeof(*sub_tlv
) + sizeof(oui
) +
220 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_OPTION
, length
);
222 oui
= skb_put(skb
, sizeof(*oui
));
223 memset(oui
, 0x0, sizeof(*oui
));
224 sub_opt
= skb_put(skb
, sizeof(*sub_opt
));
225 memset(sub_opt
, 0x0, sizeof(*sub_opt
));
227 sub_tlv
= skb_put(skb
, sizeof(*sub_tlv
));
228 sub_tlv
->type
= BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR
;
230 /* 32 bit alligment shall be ensured therefore add 2 bytes */
231 skb_put(skb
, MRP_OPT_PADDING
);
234 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_END
, 0x0);
239 static struct sk_buff
*br_mrp_alloc_in_test_skb(struct br_mrp
*mrp
,
240 struct net_bridge_port
*p
,
241 enum br_mrp_port_role_type port_role
)
243 struct br_mrp_in_test_hdr
*hdr
= NULL
;
244 struct sk_buff
*skb
= NULL
;
249 skb
= br_mrp_skb_alloc(p
, p
->dev
->dev_addr
, mrp_in_test_dmac
);
253 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_IN_TEST
, sizeof(*hdr
));
254 hdr
= skb_put(skb
, sizeof(*hdr
));
256 hdr
->id
= cpu_to_be16(mrp
->in_id
);
257 ether_addr_copy(hdr
->sa
, p
->br
->dev
->dev_addr
);
258 hdr
->port_role
= cpu_to_be16(port_role
);
259 hdr
->state
= cpu_to_be16(mrp
->in_state
);
260 hdr
->transitions
= cpu_to_be16(mrp
->in_transitions
);
261 hdr
->timestamp
= cpu_to_be32(jiffies_to_msecs(jiffies
));
263 br_mrp_skb_common(skb
, mrp
);
264 br_mrp_skb_tlv(skb
, BR_MRP_TLV_HEADER_END
, 0x0);
269 /* This function is continuously called in the following cases:
270 * - when node role is MRM, in this case test_monitor is always set to false
271 * because it needs to notify the userspace that the ring is open and needs to
272 * send MRP_Test frames
273 * - when node role is MRA, there are 2 subcases:
274 * - when MRA behaves as MRM, in this case is similar with MRM role
275 * - when MRA behaves as MRC, in this case test_monitor is set to true,
276 * because it needs to detect when it stops seeing MRP_Test frames
277 * from MRM node but it doesn't need to send MRP_Test frames.
279 static void br_mrp_test_work_expired(struct work_struct
*work
)
281 struct delayed_work
*del_work
= to_delayed_work(work
);
282 struct br_mrp
*mrp
= container_of(del_work
, struct br_mrp
, test_work
);
283 struct net_bridge_port
*p
;
284 bool notify_open
= false;
287 if (time_before_eq(mrp
->test_end
, jiffies
))
290 if (mrp
->test_count_miss
< mrp
->test_max_miss
) {
291 mrp
->test_count_miss
++;
293 /* Notify that the ring is open only if the ring state is
294 * closed, otherwise it would continue to notify at every
296 * Also notify that the ring is open when the node has the
297 * role MRA and behaves as MRC. The reason is that the
298 * userspace needs to know when the MRM stopped sending
299 * MRP_Test frames so that the current node to try to take
302 if (mrp
->ring_state
== BR_MRP_RING_STATE_CLOSED
||
309 p
= rcu_dereference(mrp
->p_port
);
311 if (!mrp
->test_monitor
) {
312 skb
= br_mrp_alloc_test_skb(mrp
, p
,
313 BR_MRP_PORT_ROLE_PRIMARY
);
317 skb_reset_network_header(skb
);
321 if (notify_open
&& !mrp
->ring_role_offloaded
)
322 br_mrp_ring_port_open(p
->dev
, true);
325 p
= rcu_dereference(mrp
->s_port
);
327 if (!mrp
->test_monitor
) {
328 skb
= br_mrp_alloc_test_skb(mrp
, p
,
329 BR_MRP_PORT_ROLE_SECONDARY
);
333 skb_reset_network_header(skb
);
337 if (notify_open
&& !mrp
->ring_role_offloaded
)
338 br_mrp_ring_port_open(p
->dev
, true);
344 queue_delayed_work(system_wq
, &mrp
->test_work
,
345 usecs_to_jiffies(mrp
->test_interval
));
348 /* This function is continuously called when the node has the interconnect role
349 * MIM. It would generate interconnect test frames and will send them on all 3
350 * ports. But will also check if it stop receiving interconnect test frames.
352 static void br_mrp_in_test_work_expired(struct work_struct
*work
)
354 struct delayed_work
*del_work
= to_delayed_work(work
);
355 struct br_mrp
*mrp
= container_of(del_work
, struct br_mrp
, in_test_work
);
356 struct net_bridge_port
*p
;
357 bool notify_open
= false;
360 if (time_before_eq(mrp
->in_test_end
, jiffies
))
363 if (mrp
->in_test_count_miss
< mrp
->in_test_max_miss
) {
364 mrp
->in_test_count_miss
++;
366 /* Notify that the interconnect ring is open only if the
367 * interconnect ring state is closed, otherwise it would
368 * continue to notify at every interval.
370 if (mrp
->in_state
== BR_MRP_IN_STATE_CLOSED
)
376 p
= rcu_dereference(mrp
->p_port
);
378 skb
= br_mrp_alloc_in_test_skb(mrp
, p
,
379 BR_MRP_PORT_ROLE_PRIMARY
);
383 skb_reset_network_header(skb
);
386 if (notify_open
&& !mrp
->in_role_offloaded
)
387 br_mrp_in_port_open(p
->dev
, true);
390 p
= rcu_dereference(mrp
->s_port
);
392 skb
= br_mrp_alloc_in_test_skb(mrp
, p
,
393 BR_MRP_PORT_ROLE_SECONDARY
);
397 skb_reset_network_header(skb
);
400 if (notify_open
&& !mrp
->in_role_offloaded
)
401 br_mrp_in_port_open(p
->dev
, true);
404 p
= rcu_dereference(mrp
->i_port
);
406 skb
= br_mrp_alloc_in_test_skb(mrp
, p
,
407 BR_MRP_PORT_ROLE_INTER
);
411 skb_reset_network_header(skb
);
414 if (notify_open
&& !mrp
->in_role_offloaded
)
415 br_mrp_in_port_open(p
->dev
, true);
421 queue_delayed_work(system_wq
, &mrp
->in_test_work
,
422 usecs_to_jiffies(mrp
->in_test_interval
));
425 /* Deletes the MRP instance.
426 * note: called under rtnl_lock
428 static void br_mrp_del_impl(struct net_bridge
*br
, struct br_mrp
*mrp
)
430 struct net_bridge_port
*p
;
433 /* Stop sending MRP_Test frames */
434 cancel_delayed_work_sync(&mrp
->test_work
);
435 br_mrp_switchdev_send_ring_test(br
, mrp
, 0, 0, 0, 0);
437 /* Stop sending MRP_InTest frames if has an interconnect role */
438 cancel_delayed_work_sync(&mrp
->in_test_work
);
439 br_mrp_switchdev_send_in_test(br
, mrp
, 0, 0, 0);
441 /* Disable the roles */
442 br_mrp_switchdev_set_ring_role(br
, mrp
, BR_MRP_RING_ROLE_DISABLED
);
443 p
= rtnl_dereference(mrp
->i_port
);
445 br_mrp_switchdev_set_in_role(br
, mrp
, mrp
->in_id
, mrp
->ring_id
,
446 BR_MRP_IN_ROLE_DISABLED
);
448 br_mrp_switchdev_del(br
, mrp
);
450 /* Reset the ports */
451 p
= rtnl_dereference(mrp
->p_port
);
453 spin_lock_bh(&br
->lock
);
454 state
= netif_running(br
->dev
) ?
455 BR_STATE_FORWARDING
: BR_STATE_DISABLED
;
457 p
->flags
&= ~BR_MRP_AWARE
;
458 spin_unlock_bh(&br
->lock
);
459 br_mrp_port_switchdev_set_state(p
, state
);
460 rcu_assign_pointer(mrp
->p_port
, NULL
);
463 p
= rtnl_dereference(mrp
->s_port
);
465 spin_lock_bh(&br
->lock
);
466 state
= netif_running(br
->dev
) ?
467 BR_STATE_FORWARDING
: BR_STATE_DISABLED
;
469 p
->flags
&= ~BR_MRP_AWARE
;
470 spin_unlock_bh(&br
->lock
);
471 br_mrp_port_switchdev_set_state(p
, state
);
472 rcu_assign_pointer(mrp
->s_port
, NULL
);
475 p
= rtnl_dereference(mrp
->i_port
);
477 spin_lock_bh(&br
->lock
);
478 state
= netif_running(br
->dev
) ?
479 BR_STATE_FORWARDING
: BR_STATE_DISABLED
;
481 p
->flags
&= ~BR_MRP_AWARE
;
482 spin_unlock_bh(&br
->lock
);
483 br_mrp_port_switchdev_set_state(p
, state
);
484 rcu_assign_pointer(mrp
->i_port
, NULL
);
487 hlist_del_rcu(&mrp
->list
);
490 if (hlist_empty(&br
->mrp_list
))
491 br_del_frame(br
, &mrp_frame_type
);
494 /* Adds a new MRP instance.
495 * note: called under rtnl_lock
497 int br_mrp_add(struct net_bridge
*br
, struct br_mrp_instance
*instance
)
499 struct net_bridge_port
*p
;
503 /* If the ring exists, it is not possible to create another one with the
506 mrp
= br_mrp_find_id(br
, instance
->ring_id
);
510 if (!br_mrp_get_port(br
, instance
->p_ifindex
) ||
511 !br_mrp_get_port(br
, instance
->s_ifindex
))
514 /* It is not possible to have the same port part of multiple rings */
515 if (!br_mrp_unique_ifindex(br
, instance
->p_ifindex
) ||
516 !br_mrp_unique_ifindex(br
, instance
->s_ifindex
))
519 mrp
= kzalloc(sizeof(*mrp
), GFP_KERNEL
);
523 mrp
->ring_id
= instance
->ring_id
;
524 mrp
->prio
= instance
->prio
;
526 p
= br_mrp_get_port(br
, instance
->p_ifindex
);
527 spin_lock_bh(&br
->lock
);
528 p
->state
= BR_STATE_FORWARDING
;
529 p
->flags
|= BR_MRP_AWARE
;
530 spin_unlock_bh(&br
->lock
);
531 rcu_assign_pointer(mrp
->p_port
, p
);
533 p
= br_mrp_get_port(br
, instance
->s_ifindex
);
534 spin_lock_bh(&br
->lock
);
535 p
->state
= BR_STATE_FORWARDING
;
536 p
->flags
|= BR_MRP_AWARE
;
537 spin_unlock_bh(&br
->lock
);
538 rcu_assign_pointer(mrp
->s_port
, p
);
540 if (hlist_empty(&br
->mrp_list
))
541 br_add_frame(br
, &mrp_frame_type
);
543 INIT_DELAYED_WORK(&mrp
->test_work
, br_mrp_test_work_expired
);
544 INIT_DELAYED_WORK(&mrp
->in_test_work
, br_mrp_in_test_work_expired
);
545 hlist_add_tail_rcu(&mrp
->list
, &br
->mrp_list
);
547 err
= br_mrp_switchdev_add(br
, mrp
);
554 br_mrp_del_impl(br
, mrp
);
559 /* Deletes the MRP instance from which the port is part of
560 * note: called under rtnl_lock
562 void br_mrp_port_del(struct net_bridge
*br
, struct net_bridge_port
*p
)
564 struct br_mrp
*mrp
= br_mrp_find_port(br
, p
);
566 /* If the port is not part of a MRP instance just bail out */
570 br_mrp_del_impl(br
, mrp
);
573 /* Deletes existing MRP instance based on ring_id
574 * note: called under rtnl_lock
576 int br_mrp_del(struct net_bridge
*br
, struct br_mrp_instance
*instance
)
578 struct br_mrp
*mrp
= br_mrp_find_id(br
, instance
->ring_id
);
583 br_mrp_del_impl(br
, mrp
);
588 /* Set port state, port state can be forwarding, blocked or disabled
589 * note: already called with rtnl_lock
591 int br_mrp_set_port_state(struct net_bridge_port
*p
,
592 enum br_mrp_port_state_type state
)
596 if (!p
|| !(p
->flags
& BR_MRP_AWARE
))
599 spin_lock_bh(&p
->br
->lock
);
601 if (state
== BR_MRP_PORT_STATE_FORWARDING
)
602 port_state
= BR_STATE_FORWARDING
;
604 port_state
= BR_STATE_BLOCKING
;
606 p
->state
= port_state
;
607 spin_unlock_bh(&p
->br
->lock
);
609 br_mrp_port_switchdev_set_state(p
, port_state
);
614 /* Set port role, port role can be primary or secondary
615 * note: already called with rtnl_lock
617 int br_mrp_set_port_role(struct net_bridge_port
*p
,
618 enum br_mrp_port_role_type role
)
622 if (!p
|| !(p
->flags
& BR_MRP_AWARE
))
625 mrp
= br_mrp_find_port(p
->br
, p
);
631 case BR_MRP_PORT_ROLE_PRIMARY
:
632 rcu_assign_pointer(mrp
->p_port
, p
);
634 case BR_MRP_PORT_ROLE_SECONDARY
:
635 rcu_assign_pointer(mrp
->s_port
, p
);
641 br_mrp_port_switchdev_set_role(p
, role
);
646 /* Set ring state, ring state can be only Open or Closed
647 * note: already called with rtnl_lock
649 int br_mrp_set_ring_state(struct net_bridge
*br
,
650 struct br_mrp_ring_state
*state
)
652 struct br_mrp
*mrp
= br_mrp_find_id(br
, state
->ring_id
);
657 if (mrp
->ring_state
!= state
->ring_state
)
658 mrp
->ring_transitions
++;
660 mrp
->ring_state
= state
->ring_state
;
662 br_mrp_switchdev_set_ring_state(br
, mrp
, state
->ring_state
);
667 /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
668 * MRC(Media Redundancy Client).
669 * note: already called with rtnl_lock
671 int br_mrp_set_ring_role(struct net_bridge
*br
,
672 struct br_mrp_ring_role
*role
)
674 struct br_mrp
*mrp
= br_mrp_find_id(br
, role
->ring_id
);
675 enum br_mrp_hw_support support
;
680 mrp
->ring_role
= role
->ring_role
;
682 /* If there is an error just bailed out */
683 support
= br_mrp_switchdev_set_ring_role(br
, mrp
, role
->ring_role
);
684 if (support
== BR_MRP_NONE
)
687 /* Now detect if the HW actually applied the role or not. If the HW
688 * applied the role it means that the SW will not to do those operations
689 * anymore. For example if the role ir MRM then the HW will notify the
690 * SW when ring is open, but if the is not pushed to the HW the SW will
691 * need to detect when the ring is open
693 mrp
->ring_role_offloaded
= support
== BR_MRP_SW
? 0 : 1;
698 /* Start to generate or monitor MRP test frames, the frames are generated by
699 * HW and if it fails, they are generated by the SW.
700 * note: already called with rtnl_lock
702 int br_mrp_start_test(struct net_bridge
*br
,
703 struct br_mrp_start_test
*test
)
705 struct br_mrp
*mrp
= br_mrp_find_id(br
, test
->ring_id
);
706 enum br_mrp_hw_support support
;
711 /* Try to push it to the HW and if it fails then continue with SW
712 * implementation and if that also fails then return error.
714 support
= br_mrp_switchdev_send_ring_test(br
, mrp
, test
->interval
,
715 test
->max_miss
, test
->period
,
717 if (support
== BR_MRP_NONE
)
720 if (support
== BR_MRP_HW
)
723 mrp
->test_interval
= test
->interval
;
724 mrp
->test_end
= jiffies
+ usecs_to_jiffies(test
->period
);
725 mrp
->test_max_miss
= test
->max_miss
;
726 mrp
->test_monitor
= test
->monitor
;
727 mrp
->test_count_miss
= 0;
728 queue_delayed_work(system_wq
, &mrp
->test_work
,
729 usecs_to_jiffies(test
->interval
));
734 /* Set in state, int state can be only Open or Closed
735 * note: already called with rtnl_lock
737 int br_mrp_set_in_state(struct net_bridge
*br
, struct br_mrp_in_state
*state
)
739 struct br_mrp
*mrp
= br_mrp_find_in_id(br
, state
->in_id
);
744 if (mrp
->in_state
!= state
->in_state
)
745 mrp
->in_transitions
++;
747 mrp
->in_state
= state
->in_state
;
749 br_mrp_switchdev_set_in_state(br
, mrp
, state
->in_state
);
754 /* Set in role, in role can be only MIM(Media Interconnection Manager) or
755 * MIC(Media Interconnection Client).
756 * note: already called with rtnl_lock
758 int br_mrp_set_in_role(struct net_bridge
*br
, struct br_mrp_in_role
*role
)
760 struct br_mrp
*mrp
= br_mrp_find_id(br
, role
->ring_id
);
761 enum br_mrp_hw_support support
;
762 struct net_bridge_port
*p
;
767 if (!br_mrp_get_port(br
, role
->i_ifindex
))
770 if (role
->in_role
== BR_MRP_IN_ROLE_DISABLED
) {
773 /* It is not allowed to disable a port that doesn't exist */
774 p
= rtnl_dereference(mrp
->i_port
);
778 /* Stop the generating MRP_InTest frames */
779 cancel_delayed_work_sync(&mrp
->in_test_work
);
780 br_mrp_switchdev_send_in_test(br
, mrp
, 0, 0, 0);
782 /* Remove the port */
783 spin_lock_bh(&br
->lock
);
784 state
= netif_running(br
->dev
) ?
785 BR_STATE_FORWARDING
: BR_STATE_DISABLED
;
787 p
->flags
&= ~BR_MRP_AWARE
;
788 spin_unlock_bh(&br
->lock
);
789 br_mrp_port_switchdev_set_state(p
, state
);
790 rcu_assign_pointer(mrp
->i_port
, NULL
);
792 mrp
->in_role
= role
->in_role
;
798 /* It is not possible to have the same port part of multiple rings */
799 if (!br_mrp_unique_ifindex(br
, role
->i_ifindex
))
802 /* It is not allowed to set a different interconnect port if the mrp
803 * instance has already one. First it needs to be disabled and after
804 * that set the new port
806 if (rcu_access_pointer(mrp
->i_port
))
809 p
= br_mrp_get_port(br
, role
->i_ifindex
);
810 spin_lock_bh(&br
->lock
);
811 p
->state
= BR_STATE_FORWARDING
;
812 p
->flags
|= BR_MRP_AWARE
;
813 spin_unlock_bh(&br
->lock
);
814 rcu_assign_pointer(mrp
->i_port
, p
);
816 mrp
->in_role
= role
->in_role
;
817 mrp
->in_id
= role
->in_id
;
819 /* If there is an error just bailed out */
820 support
= br_mrp_switchdev_set_in_role(br
, mrp
, role
->in_id
,
821 role
->ring_id
, role
->in_role
);
822 if (support
== BR_MRP_NONE
)
825 /* Now detect if the HW actually applied the role or not. If the HW
826 * applied the role it means that the SW will not to do those operations
827 * anymore. For example if the role is MIM then the HW will notify the
828 * SW when interconnect ring is open, but if the is not pushed to the HW
829 * the SW will need to detect when the interconnect ring is open.
831 mrp
->in_role_offloaded
= support
== BR_MRP_SW
? 0 : 1;
836 /* Start to generate MRP_InTest frames, the frames are generated by
837 * HW and if it fails, they are generated by the SW.
838 * note: already called with rtnl_lock
840 int br_mrp_start_in_test(struct net_bridge
*br
,
841 struct br_mrp_start_in_test
*in_test
)
843 struct br_mrp
*mrp
= br_mrp_find_in_id(br
, in_test
->in_id
);
844 enum br_mrp_hw_support support
;
849 if (mrp
->in_role
!= BR_MRP_IN_ROLE_MIM
)
852 /* Try to push it to the HW and if it fails then continue with SW
853 * implementation and if that also fails then return error.
855 support
= br_mrp_switchdev_send_in_test(br
, mrp
, in_test
->interval
,
858 if (support
== BR_MRP_NONE
)
861 if (support
== BR_MRP_HW
)
864 mrp
->in_test_interval
= in_test
->interval
;
865 mrp
->in_test_end
= jiffies
+ usecs_to_jiffies(in_test
->period
);
866 mrp
->in_test_max_miss
= in_test
->max_miss
;
867 mrp
->in_test_count_miss
= 0;
868 queue_delayed_work(system_wq
, &mrp
->in_test_work
,
869 usecs_to_jiffies(in_test
->interval
));
874 /* Determine if the frame type is a ring frame */
875 static bool br_mrp_ring_frame(struct sk_buff
*skb
)
877 const struct br_mrp_tlv_hdr
*hdr
;
878 struct br_mrp_tlv_hdr _hdr
;
880 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
884 if (hdr
->type
== BR_MRP_TLV_HEADER_RING_TEST
||
885 hdr
->type
== BR_MRP_TLV_HEADER_RING_TOPO
||
886 hdr
->type
== BR_MRP_TLV_HEADER_RING_LINK_DOWN
||
887 hdr
->type
== BR_MRP_TLV_HEADER_RING_LINK_UP
||
888 hdr
->type
== BR_MRP_TLV_HEADER_OPTION
)
894 /* Determine if the frame type is an interconnect frame */
895 static bool br_mrp_in_frame(struct sk_buff
*skb
)
897 const struct br_mrp_tlv_hdr
*hdr
;
898 struct br_mrp_tlv_hdr _hdr
;
900 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
904 if (hdr
->type
== BR_MRP_TLV_HEADER_IN_TEST
||
905 hdr
->type
== BR_MRP_TLV_HEADER_IN_TOPO
||
906 hdr
->type
== BR_MRP_TLV_HEADER_IN_LINK_DOWN
||
907 hdr
->type
== BR_MRP_TLV_HEADER_IN_LINK_UP
||
908 hdr
->type
== BR_MRP_TLV_HEADER_IN_LINK_STATUS
)
914 /* Process only MRP Test frame. All the other MRP frames are processed by
915 * userspace application
916 * note: already called with rcu_read_lock
918 static void br_mrp_mrm_process(struct br_mrp
*mrp
, struct net_bridge_port
*port
,
921 const struct br_mrp_tlv_hdr
*hdr
;
922 struct br_mrp_tlv_hdr _hdr
;
924 /* Each MRP header starts with a version field which is 16 bits.
925 * Therefore skip the version and get directly the TLV header.
927 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
931 if (hdr
->type
!= BR_MRP_TLV_HEADER_RING_TEST
)
934 mrp
->test_count_miss
= 0;
936 /* Notify the userspace that the ring is closed only when the ring is
939 if (mrp
->ring_state
!= BR_MRP_RING_STATE_CLOSED
)
940 br_mrp_ring_port_open(port
->dev
, false);
943 /* Determine if the test hdr has a better priority than the node */
944 static bool br_mrp_test_better_than_own(struct br_mrp
*mrp
,
945 struct net_bridge
*br
,
946 const struct br_mrp_ring_test_hdr
*hdr
)
948 u16 prio
= be16_to_cpu(hdr
->prio
);
950 if (prio
< mrp
->prio
||
951 (prio
== mrp
->prio
&&
952 ether_addr_to_u64(hdr
->sa
) < ether_addr_to_u64(br
->dev
->dev_addr
)))
958 /* Process only MRP Test frame. All the other MRP frames are processed by
959 * userspace application
960 * note: already called with rcu_read_lock
962 static void br_mrp_mra_process(struct br_mrp
*mrp
, struct net_bridge
*br
,
963 struct net_bridge_port
*port
,
966 const struct br_mrp_ring_test_hdr
*test_hdr
;
967 struct br_mrp_ring_test_hdr _test_hdr
;
968 const struct br_mrp_tlv_hdr
*hdr
;
969 struct br_mrp_tlv_hdr _hdr
;
971 /* Each MRP header starts with a version field which is 16 bits.
972 * Therefore skip the version and get directly the TLV header.
974 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
978 if (hdr
->type
!= BR_MRP_TLV_HEADER_RING_TEST
)
981 test_hdr
= skb_header_pointer(skb
, sizeof(uint16_t) + sizeof(_hdr
),
982 sizeof(_test_hdr
), &_test_hdr
);
986 /* Only frames that have a better priority than the node will
987 * clear the miss counter because otherwise the node will need to behave
990 if (br_mrp_test_better_than_own(mrp
, br
, test_hdr
))
991 mrp
->test_count_miss
= 0;
994 /* Process only MRP InTest frame. All the other MRP frames are processed by
995 * userspace application
996 * note: already called with rcu_read_lock
998 static bool br_mrp_mim_process(struct br_mrp
*mrp
, struct net_bridge_port
*port
,
1001 const struct br_mrp_in_test_hdr
*in_hdr
;
1002 struct br_mrp_in_test_hdr _in_hdr
;
1003 const struct br_mrp_tlv_hdr
*hdr
;
1004 struct br_mrp_tlv_hdr _hdr
;
1006 /* Each MRP header starts with a version field which is 16 bits.
1007 * Therefore skip the version and get directly the TLV header.
1009 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
1013 /* The check for InTest frame type was already done */
1014 in_hdr
= skb_header_pointer(skb
, sizeof(uint16_t) + sizeof(_hdr
),
1015 sizeof(_in_hdr
), &_in_hdr
);
1019 /* It needs to process only it's own InTest frames. */
1020 if (mrp
->in_id
!= ntohs(in_hdr
->id
))
1023 mrp
->in_test_count_miss
= 0;
1025 /* Notify the userspace that the ring is closed only when the ring is
1028 if (mrp
->in_state
!= BR_MRP_IN_STATE_CLOSED
)
1029 br_mrp_in_port_open(port
->dev
, false);
1034 /* Get the MRP frame type
1035 * note: already called with rcu_read_lock
1037 static u8
br_mrp_get_frame_type(struct sk_buff
*skb
)
1039 const struct br_mrp_tlv_hdr
*hdr
;
1040 struct br_mrp_tlv_hdr _hdr
;
1042 /* Each MRP header starts with a version field which is 16 bits.
1043 * Therefore skip the version and get directly the TLV header.
1045 hdr
= skb_header_pointer(skb
, sizeof(uint16_t), sizeof(_hdr
), &_hdr
);
1052 static bool br_mrp_mrm_behaviour(struct br_mrp
*mrp
)
1054 if (mrp
->ring_role
== BR_MRP_RING_ROLE_MRM
||
1055 (mrp
->ring_role
== BR_MRP_RING_ROLE_MRA
&& !mrp
->test_monitor
))
1061 static bool br_mrp_mrc_behaviour(struct br_mrp
*mrp
)
1063 if (mrp
->ring_role
== BR_MRP_RING_ROLE_MRC
||
1064 (mrp
->ring_role
== BR_MRP_RING_ROLE_MRA
&& mrp
->test_monitor
))
1070 /* This will just forward the frame to the other mrp ring ports, depending on
1071 * the frame type, ring role and interconnect role
1072 * note: already called with rcu_read_lock
1074 static int br_mrp_rcv(struct net_bridge_port
*p
,
1075 struct sk_buff
*skb
, struct net_device
*dev
)
1077 struct net_bridge_port
*p_port
, *s_port
, *i_port
= NULL
;
1078 struct net_bridge_port
*p_dst
, *s_dst
, *i_dst
= NULL
;
1079 struct net_bridge
*br
;
1082 /* If port is disabled don't accept any frames */
1083 if (p
->state
== BR_STATE_DISABLED
)
1087 mrp
= br_mrp_find_port(br
, p
);
1091 p_port
= rcu_dereference(mrp
->p_port
);
1096 s_port
= rcu_dereference(mrp
->s_port
);
1101 /* If the frame is a ring frame then it is not required to check the
1102 * interconnect role and ports to process or forward the frame
1104 if (br_mrp_ring_frame(skb
)) {
1105 /* If the role is MRM then don't forward the frames */
1106 if (mrp
->ring_role
== BR_MRP_RING_ROLE_MRM
) {
1107 br_mrp_mrm_process(mrp
, p
, skb
);
1111 /* If the role is MRA then don't forward the frames if it
1112 * behaves as MRM node
1114 if (mrp
->ring_role
== BR_MRP_RING_ROLE_MRA
) {
1115 if (!mrp
->test_monitor
) {
1116 br_mrp_mrm_process(mrp
, p
, skb
);
1120 br_mrp_mra_process(mrp
, br
, p
, skb
);
1126 if (br_mrp_in_frame(skb
)) {
1127 u8 in_type
= br_mrp_get_frame_type(skb
);
1129 i_port
= rcu_dereference(mrp
->i_port
);
1132 /* If the ring port is in block state it should not forward
1135 if (br_mrp_is_ring_port(p_port
, s_port
, p
) &&
1136 p
->state
== BR_STATE_BLOCKING
&&
1137 in_type
== BR_MRP_TLV_HEADER_IN_TEST
)
1140 /* Nodes that behaves as MRM needs to stop forwarding the
1141 * frames in case the ring is closed, otherwise will be a loop.
1142 * In this case the frame is no forward between the ring ports.
1144 if (br_mrp_mrm_behaviour(mrp
) &&
1145 br_mrp_is_ring_port(p_port
, s_port
, p
) &&
1146 (s_port
->state
!= BR_STATE_FORWARDING
||
1147 p_port
->state
!= BR_STATE_FORWARDING
)) {
1152 /* A node that behaves as MRC and doesn't have a interconnect
1153 * role then it should forward all frames between the ring ports
1154 * because it doesn't have an interconnect port
1156 if (br_mrp_mrc_behaviour(mrp
) &&
1157 mrp
->in_role
== BR_MRP_IN_ROLE_DISABLED
)
1160 if (mrp
->in_role
== BR_MRP_IN_ROLE_MIM
) {
1161 if (in_type
== BR_MRP_TLV_HEADER_IN_TEST
) {
1162 /* MIM should not forward it's own InTest
1165 if (br_mrp_mim_process(mrp
, p
, skb
)) {
1168 if (br_mrp_is_ring_port(p_port
, s_port
,
1172 if (br_mrp_is_in_port(i_port
, p
))
1176 /* MIM should forward IntLinkChange/Status and
1177 * IntTopoChange between ring ports but MIM
1178 * should not forward IntLinkChange/Status and
1179 * IntTopoChange if the frame was received at
1180 * the interconnect port
1182 if (br_mrp_is_ring_port(p_port
, s_port
, p
))
1185 if (br_mrp_is_in_port(i_port
, p
))
1190 if (mrp
->in_role
== BR_MRP_IN_ROLE_MIC
) {
1191 /* MIC should forward InTest frames on all ports
1192 * regardless of the received port
1194 if (in_type
== BR_MRP_TLV_HEADER_IN_TEST
)
1197 /* MIC should forward IntLinkChange frames only if they
1198 * are received on ring ports to all the ports
1200 if (br_mrp_is_ring_port(p_port
, s_port
, p
) &&
1201 (in_type
== BR_MRP_TLV_HEADER_IN_LINK_UP
||
1202 in_type
== BR_MRP_TLV_HEADER_IN_LINK_DOWN
))
1205 /* MIC should forward IntLinkStatus frames only to
1206 * interconnect port if it was received on a ring port.
1207 * If it is received on interconnect port then, it
1208 * should be forward on both ring ports
1210 if (br_mrp_is_ring_port(p_port
, s_port
, p
) &&
1211 in_type
== BR_MRP_TLV_HEADER_IN_LINK_STATUS
) {
1216 /* Should forward the InTopo frames only between the
1219 if (in_type
== BR_MRP_TLV_HEADER_IN_TOPO
) {
1224 /* In all the other cases don't forward the frames */
1231 br_forward(p_dst
, skb
, true, false);
1233 br_forward(s_dst
, skb
, true, false);
1235 br_forward(i_dst
, skb
, true, false);
1241 /* Check if the frame was received on a port that is part of MRP ring
1242 * and if the frame has MRP eth. In that case process the frame otherwise do
1243 * normal forwarding.
1244 * note: already called with rcu_read_lock
1246 static int br_mrp_process(struct net_bridge_port
*p
, struct sk_buff
*skb
)
1248 /* If there is no MRP instance do normal forwarding */
1249 if (likely(!(p
->flags
& BR_MRP_AWARE
)))
1252 return br_mrp_rcv(p
, skb
, p
->dev
);
1257 bool br_mrp_enabled(struct net_bridge
*br
)
1259 return !hlist_empty(&br
->mrp_list
);