WIP FPC-III support
[linux/fpc-iii.git] / tools / testing / selftests / drivers / net / mlxsw / qos_ets_strict.sh
blobe9f8718af979aa84a302419d134d8747ed29d2c1
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
4 # A test for strict prioritization of traffic in the switch. Run two streams of
5 # traffic, each through a different ingress port, one tagged with PCP of 1, the
6 # other with PCP of 2. Both streams converge at one egress port, where they are
7 # assigned TC of, respectively, 1 and 2, with strict priority configured between
8 # them. In H3, we expect to see (almost) exclusively the high-priority traffic.
10 # Please see qos_mc_aware.sh for an explanation of why we use mausezahn and
11 # counters instead of just running iperf3.
13 # +---------------------------+ +-----------------------------+
14 # | H1 | | H2 |
15 # | $h1.111 + | | + $h2.222 |
16 # | 192.0.2.33/28 | | | | 192.0.2.65/28 |
17 # | e-qos-map 0:1 | | | | e-qos-map 0:2 |
18 # | | | | | |
19 # | $h1 + | | + $h2 |
20 # +-----------------|---------+ +---------|-------------------+
21 # | |
22 # +-----------------|-------------------------------------|-------------------+
23 # | $swp1 + + $swp2 |
24 # | >1Gbps | | >1Gbps |
25 # | +---------------|-----------+ +----------|----------------+ |
26 # | | $swp1.111 + | | + $swp2.222 | |
27 # | | BR111 | SW | BR222 | |
28 # | | $swp3.111 + | | + $swp3.222 | |
29 # | +---------------|-----------+ +----------|----------------+ |
30 # | \_____________________________________/ |
31 # | | |
32 # | + $swp3 |
33 # | | 1Gbps bottleneck |
34 # | | ETS: (up n->tc n for n in 0..7) |
35 # | | strict priority |
36 # +------------------------------------|--------------------------------------+
37 # |
38 # +--------------------|--------------------+
39 # | + $h3 H3 |
40 # | / \ |
41 # | / \ |
42 # | $h3.111 + + $h3.222 |
43 # | 192.0.2.34/28 192.0.2.66/28 |
44 # +-----------------------------------------+
46 ALL_TESTS="
47 ping_ipv4
48 test_ets_strict
51 lib_dir=$(dirname $0)/../../../net/forwarding
53 NUM_NETIFS=6
54 source $lib_dir/lib.sh
55 source $lib_dir/devlink_lib.sh
56 source qos_lib.sh
58 h1_create()
60 simple_if_init $h1
61 mtu_set $h1 10000
63 vlan_create $h1 111 v$h1 192.0.2.33/28
64 ip link set dev $h1.111 type vlan egress-qos-map 0:1
67 h1_destroy()
69 vlan_destroy $h1 111
71 mtu_restore $h1
72 simple_if_fini $h1
75 h2_create()
77 simple_if_init $h2
78 mtu_set $h2 10000
80 vlan_create $h2 222 v$h2 192.0.2.65/28
81 ip link set dev $h2.222 type vlan egress-qos-map 0:2
84 h2_destroy()
86 vlan_destroy $h2 222
88 mtu_restore $h2
89 simple_if_fini $h2
92 h3_create()
94 simple_if_init $h3
95 mtu_set $h3 10000
97 vlan_create $h3 111 v$h3 192.0.2.34/28
98 vlan_create $h3 222 v$h3 192.0.2.66/28
101 h3_destroy()
103 vlan_destroy $h3 222
104 vlan_destroy $h3 111
106 mtu_restore $h3
107 simple_if_fini $h3
110 switch_create()
112 ip link set dev $swp1 up
113 mtu_set $swp1 10000
115 ip link set dev $swp2 up
116 mtu_set $swp2 10000
118 # prio n -> TC n, strict scheduling
119 lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7
120 lldptool -T -i $swp3 -V ETS-CFG tsa=$(
121 )"0:strict,"$(
122 )"1:strict,"$(
123 )"2:strict,"$(
124 )"3:strict,"$(
125 )"4:strict,"$(
126 )"5:strict,"$(
127 )"6:strict,"$(
128 )"7:strict"
129 sleep 1
131 ip link set dev $swp3 up
132 mtu_set $swp3 10000
133 ethtool -s $swp3 speed 1000 autoneg off
135 vlan_create $swp1 111
136 vlan_create $swp2 222
137 vlan_create $swp3 111
138 vlan_create $swp3 222
140 ip link add name br111 up type bridge vlan_filtering 0
141 ip link set dev $swp1.111 master br111
142 ip link set dev $swp3.111 master br111
144 ip link add name br222 up type bridge vlan_filtering 0
145 ip link set dev $swp2.222 master br222
146 ip link set dev $swp3.222 master br222
148 # Make sure that ingress quotas are smaller than egress so that there is
149 # room for both streams of traffic to be admitted to shared buffer.
150 devlink_pool_size_thtype_save 0
151 devlink_pool_size_thtype_set 0 dynamic 10000000
152 devlink_pool_size_thtype_save 4
153 devlink_pool_size_thtype_set 4 dynamic 10000000
155 devlink_port_pool_th_save $swp1 0
156 devlink_port_pool_th_set $swp1 0 6
157 devlink_tc_bind_pool_th_save $swp1 1 ingress
158 devlink_tc_bind_pool_th_set $swp1 1 ingress 0 6
160 devlink_port_pool_th_save $swp2 0
161 devlink_port_pool_th_set $swp2 0 6
162 devlink_tc_bind_pool_th_save $swp2 2 ingress
163 devlink_tc_bind_pool_th_set $swp2 2 ingress 0 6
165 devlink_tc_bind_pool_th_save $swp3 1 egress
166 devlink_tc_bind_pool_th_set $swp3 1 egress 4 7
167 devlink_tc_bind_pool_th_save $swp3 2 egress
168 devlink_tc_bind_pool_th_set $swp3 2 egress 4 7
169 devlink_port_pool_th_save $swp3 4
170 devlink_port_pool_th_set $swp3 4 7
173 switch_destroy()
175 devlink_port_pool_th_restore $swp3 4
176 devlink_tc_bind_pool_th_restore $swp3 2 egress
177 devlink_tc_bind_pool_th_restore $swp3 1 egress
179 devlink_tc_bind_pool_th_restore $swp2 2 ingress
180 devlink_port_pool_th_restore $swp2 0
182 devlink_tc_bind_pool_th_restore $swp1 1 ingress
183 devlink_port_pool_th_restore $swp1 0
185 devlink_pool_size_thtype_restore 4
186 devlink_pool_size_thtype_restore 0
188 ip link del dev br222
189 ip link del dev br111
191 vlan_destroy $swp3 222
192 vlan_destroy $swp3 111
193 vlan_destroy $swp2 222
194 vlan_destroy $swp1 111
196 ethtool -s $swp3 autoneg on
197 mtu_restore $swp3
198 ip link set dev $swp3 down
199 lldptool -T -i $swp3 -V ETS-CFG up2tc=0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0
201 mtu_restore $swp2
202 ip link set dev $swp2 down
204 mtu_restore $swp1
205 ip link set dev $swp1 down
208 setup_prepare()
210 h1=${NETIFS[p1]}
211 swp1=${NETIFS[p2]}
213 swp2=${NETIFS[p3]}
214 h2=${NETIFS[p4]}
216 swp3=${NETIFS[p5]}
217 h3=${NETIFS[p6]}
219 h3mac=$(mac_get $h3)
221 vrf_prepare
223 h1_create
224 h2_create
225 h3_create
226 switch_create
229 cleanup()
231 pre_cleanup
233 switch_destroy
234 h3_destroy
235 h2_destroy
236 h1_destroy
238 vrf_cleanup
241 ping_ipv4()
243 ping_test $h1 192.0.2.34 " from H1"
244 ping_test $h2 192.0.2.66 " from H2"
247 rel()
249 local old=$1; shift
250 local new=$1; shift
252 bc <<< "
253 scale=2
254 ret = 100 * $new / $old
255 if (ret > 0) { ret } else { 0 }
259 test_ets_strict()
261 RET=0
263 # Run high-prio traffic on its own.
264 start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
265 local -a rate_2
266 rate_2=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2"))
267 check_err $? "Could not get high enough prio-2 ingress rate"
268 local rate_2_in=${rate_2[0]}
269 local rate_2_eg=${rate_2[1]}
270 stop_traffic # $h2.222
272 # Start low-prio stream.
273 start_traffic $h1.111 192.0.2.33 192.0.2.34 $h3mac
275 local -a rate_1
276 rate_1=($(measure_rate $swp1 $h3 rx_octets_prio_1 "prio 1"))
277 check_err $? "Could not get high enough prio-1 ingress rate"
278 local rate_1_in=${rate_1[0]}
279 local rate_1_eg=${rate_1[1]}
281 # High-prio and low-prio on their own should have about the same
282 # throughput.
283 local rel21=$(rel $rate_1_eg $rate_2_eg)
284 check_err $(bc <<< "$rel21 < 95")
285 check_err $(bc <<< "$rel21 > 105")
287 # Start the high-prio stream--now both streams run.
288 start_traffic $h2.222 192.0.2.65 192.0.2.66 $h3mac
289 rate_3=($(measure_rate $swp2 $h3 rx_octets_prio_2 "prio 2 w/ 1"))
290 check_err $? "Could not get high enough prio-2 ingress rate with prio-1"
291 local rate_3_in=${rate_3[0]}
292 local rate_3_eg=${rate_3[1]}
293 stop_traffic # $h2.222
295 stop_traffic # $h1.111
297 # High-prio should have about the same throughput whether or not
298 # low-prio is in the system.
299 local rel32=$(rel $rate_2_eg $rate_3_eg)
300 check_err $(bc <<< "$rel32 < 95")
302 log_test "strict priority"
303 echo "Ingress to switch:"
304 echo " p1 in rate $(humanize $rate_1_in)"
305 echo " p2 in rate $(humanize $rate_2_in)"
306 echo " p2 in rate w/ p1 $(humanize $rate_3_in)"
307 echo "Egress from switch:"
308 echo " p1 eg rate $(humanize $rate_1_eg)"
309 echo " p2 eg rate $(humanize $rate_2_eg) ($rel21% of p1)"
310 echo " p2 eg rate w/ p1 $(humanize $rate_3_eg) ($rel32% of p2)"
313 trap cleanup EXIT
315 setup_prepare
316 setup_wait
318 tests_run
320 exit $EXIT_STATUS