2 # SPDX-License-Identifier: GPL-2.0
4 # Double quotes to prevent globbing and word splitting is recommended in new
5 # code but we accept it, especially because there were too many before having
6 # address all other issues detected by shellcheck.
7 #shellcheck disable=SC2086
9 .
"$(dirname "${0}")/mptcp_lib.sh"
16 timeout_test
=$
((timeout_poll
* 2 + 1))
17 # a bit more space: because we have more to display
18 MPTCP_LIB_TEST_FORMAT
="%02u %-60s"
30 echo "Usage: $0 [ -b ] [ -c ] [ -d ] [ -i]"
31 echo -e "\t-b: bail out after first error, otherwise runs al testcases"
32 echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
33 echo -e "\t-d: debug this script"
34 echo -e "\t-i: use 'ip mptcp' instead of 'pm_nl_ctl'"
37 # This function is used in the cleanup trap
38 #shellcheck disable=SC2317
42 rm -f "$large" "$small"
45 mptcp_lib_ns_exit
"${ns1}" "${ns2}" "${ns3}"
49 mptcp_lib_check_tools ip tc
52 # ns1eth1 ns2eth1 ns2eth3 ns3eth1
64 size
=$
((2 * 2048 * 4096))
66 dd if=/dev
/zero of
=$small bs
=4096 count
=20 >/dev
/null
2>&1
67 dd if=/dev
/zero of
=$large bs
=4096 count
=$
((size
/ 4096)) >/dev
/null
2>&1
71 mptcp_lib_ns_init ns1 ns2 ns3
73 ip link add ns1eth1 netns
"$ns1" type veth peer name ns2eth1 netns
"$ns2"
74 ip link add ns1eth2 netns
"$ns1" type veth peer name ns2eth2 netns
"$ns2"
75 ip link add ns2eth3 netns
"$ns2" type veth peer name ns3eth1 netns
"$ns3"
77 ip
-net "$ns1" addr add
10.0.1.1/24 dev ns1eth1
78 ip
-net "$ns1" addr add dead
:beef
:1::1/64 dev ns1eth1 nodad
79 ip
-net "$ns1" link
set ns1eth1 up mtu
1500
80 ip
-net "$ns1" route add default via
10.0.1.2
81 ip
-net "$ns1" route add default via dead
:beef
:1::2
83 ip
-net "$ns1" addr add
10.0.2.1/24 dev ns1eth2
84 ip
-net "$ns1" addr add dead
:beef
:2::1/64 dev ns1eth2 nodad
85 ip
-net "$ns1" link
set ns1eth2 up mtu
1500
86 ip
-net "$ns1" route add default via
10.0.2.2 metric
101
87 ip
-net "$ns1" route add default via dead
:beef
:2::2 metric
101
89 mptcp_lib_pm_nl_set_limits
"${ns1}" 1 1
90 mptcp_lib_pm_nl_add_endpoint
"${ns1}" 10.0.2.1 dev ns1eth2 flags subflow
92 ip
-net "$ns2" addr add
10.0.1.2/24 dev ns2eth1
93 ip
-net "$ns2" addr add dead
:beef
:1::2/64 dev ns2eth1 nodad
94 ip
-net "$ns2" link
set ns2eth1 up mtu
1500
96 ip
-net "$ns2" addr add
10.0.2.2/24 dev ns2eth2
97 ip
-net "$ns2" addr add dead
:beef
:2::2/64 dev ns2eth2 nodad
98 ip
-net "$ns2" link
set ns2eth2 up mtu
1500
100 ip
-net "$ns2" addr add
10.0.3.2/24 dev ns2eth3
101 ip
-net "$ns2" addr add dead
:beef
:3::2/64 dev ns2eth3 nodad
102 ip
-net "$ns2" link
set ns2eth3 up mtu
1500
103 ip netns
exec "$ns2" sysctl
-q net.ipv4.ip_forward
=1
104 ip netns
exec "$ns2" sysctl
-q net.ipv6.conf.all.forwarding
=1
106 ip
-net "$ns3" addr add
10.0.3.3/24 dev ns3eth1
107 ip
-net "$ns3" addr add dead
:beef
:3::3/64 dev ns3eth1 nodad
108 ip
-net "$ns3" link
set ns3eth1 up mtu
1500
109 ip
-net "$ns3" route add default via
10.0.3.2
110 ip
-net "$ns3" route add default via dead
:beef
:3::2
112 mptcp_lib_pm_nl_set_limits
"${ns3}" 1 1
114 # debug build can slow down measurably the test program
115 # we use quite tight time limit on the run-time, to ensure
117 # Use kmemleak/lockdep/kasan/prove_locking presence as a rough
118 # estimate for this being a debug kernel and increase the
119 # maximum run-time accordingly. Observed run times for CI builds
120 # running selftests, including kbuild, were used to determine the
121 # amount of time to add.
122 grep -q ' kmemleak_init$\| lockdep_init$\| kasan_init$\| prove_locking$' /proc
/kallsyms
&& slack
=$
((slack
+550))
131 port
=$
((10000+MPTCP_LIB_TEST_COUNTER
))
139 local rndh
="${ns1:4}"
140 if [ -z $SUDO_USER ] ; then
143 capuser
="-Z $SUDO_USER"
146 local capfile
="${rndh}-${port}"
147 local capopt
="-i any -s 65535 -B 32768 ${capuser}"
149 ip netns
exec ${ns3} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
150 local cappid_listener=$!
152 ip netns exec ${ns1} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
153 local cappid_connector
=$
!
158 NSTAT_HISTORY
=/tmp
/${ns3}.nstat ip netns
exec ${ns3} \
160 NSTAT_HISTORY
=/tmp
/${ns1}.nstat ip netns
exec ${ns1} \
163 timeout
${timeout_test} \
164 ip netns
exec ${ns3} \
165 .
/mptcp_connect
-jt ${timeout_poll} -l -p $port -T $max_time \
166 0.0.0.0 < "$sin" > "$sout" &
169 mptcp_lib_wait_local_port_listen
"${ns3}" "${port}"
171 timeout
${timeout_test} \
172 ip netns
exec ${ns1} \
173 .
/mptcp_connect
-jt ${timeout_poll} -p $port -T $max_time \
174 10.0.3.3 < "$cin" > "$cout" &
184 kill ${cappid_listener}
185 kill ${cappid_connector}
188 NSTAT_HISTORY
=/tmp
/${ns3}.nstat ip netns
exec ${ns3} \
189 nstat |
grep Tcp
> /tmp
/${ns3}.out
190 NSTAT_HISTORY
=/tmp
/${ns1}.nstat ip netns
exec ${ns1} \
191 nstat |
grep Tcp
> /tmp
/${ns1}.out
193 cmp $sin $cout > /dev
/null
2>&1
195 cmp $cin $sout > /dev
/null
2>&1
198 if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
199 [ $cmpc -eq 0 ] && [ $cmps -eq 0 ]; then
200 printf "%-16s" " max $max_time "
206 mptcp_lib_pr_fail
"client exit code $retc, server $rets"
207 mptcp_lib_pr_err_stats
"${ns3}" "${ns1}" "${port}" \
208 "/tmp/${ns3}.out" "/tmp/${ns1}.out"
227 [ $delay1 -gt 0 ] && delay1
="delay ${delay1}ms" || delay1
=""
228 [ $delay2 -gt 0 ] && delay2
="delay ${delay2}ms" || delay2
=""
230 for dev
in ns1eth1 ns1eth2
; do
231 tc
-n $ns1 qdisc del dev
$dev root
>/dev
/null
2>&1
233 for dev
in ns2eth1 ns2eth2
; do
234 tc
-n $ns2 qdisc del dev
$dev root
>/dev
/null
2>&1
236 tc
-n $ns1 qdisc add dev ns1eth1 root netem rate
${rate1}mbit
$delay1
237 tc
-n $ns1 qdisc add dev ns1eth2 root netem rate
${rate2}mbit
$delay2
238 tc
-n $ns2 qdisc add dev ns2eth1 root netem rate
${rate1}mbit
$delay1
239 tc
-n $ns2 qdisc add dev ns2eth2 root netem rate
${rate2}mbit
$delay2
241 # time is measured in ms, account for transfer size, aggregated link speed
242 # and header overhead (10%)
243 # ms byte -> bit 10% mbit -> kbit -> bit 10%
244 local time=$
((1000 * size
* 8 * 10 / ((rate1
+ rate2
) * 1000 * 1000 * 9) ))
246 # mptcp_connect will do some sleeps to allow the mp_join handshake
247 # completion (see mptcp_connect): 200ms on each side, add some slack
248 time=$
((time + 400 + slack
))
250 mptcp_lib_print_title
"$msg"
251 do_transfer
$small $large $time
253 mptcp_lib_result_code
"${lret}" "${msg}"
254 if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky
; then
256 [ $bail -eq 0 ] ||
exit $ret
259 msg
+=" - reverse direction"
260 mptcp_lib_print_title
"${msg}"
261 do_transfer
$large $small $time
263 mptcp_lib_result_code
"${lret}" "${msg}"
264 if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky
; then
266 [ $bail -eq 0 ] ||
exit $ret
270 while getopts "bcdhi" option
;do
286 mptcp_lib_set_ip_mptcp
296 mptcp_lib_subtests_last_ts_reset
297 run_test
10 10 0 0 "balanced bwidth"
298 run_test
10 10 1 25 "balanced bwidth with unbalanced delay"
300 # we still need some additional infrastructure to pass the following test-cases
301 MPTCP_LIB_SUBTEST_FLAKY
=1 run_test
10 3 0 0 "unbalanced bwidth"
302 run_test
10 3 1 25 "unbalanced bwidth with unbalanced delay"
303 run_test
10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
305 mptcp_lib_result_print_all_tap