1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
9 #include <kunit/test.h>
10 #include <linux/idr.h>
15 static int __ida_init(struct kunit_resource
*res
, void *context
)
17 struct ida
*ida
= context
;
24 static void __ida_destroy(struct kunit_resource
*res
)
26 struct ida
*ida
= res
->data
;
31 static void kunit_ida_init(struct kunit
*test
, struct ida
*ida
)
33 kunit_alloc_resource(test
, __ida_init
, __ida_destroy
, GFP_KERNEL
, ida
);
36 static struct tb_switch
*alloc_switch(struct kunit
*test
, u64 route
,
37 u8 upstream_port
, u8 max_port_number
)
43 sw
= kunit_kzalloc(test
, sizeof(*sw
), GFP_KERNEL
);
47 sw
->config
.upstream_port_number
= upstream_port
;
48 sw
->config
.depth
= tb_route_length(route
);
49 sw
->config
.route_hi
= upper_32_bits(route
);
50 sw
->config
.route_lo
= lower_32_bits(route
);
51 sw
->config
.enabled
= 0;
52 sw
->config
.max_port_number
= max_port_number
;
54 size
= (sw
->config
.max_port_number
+ 1) * sizeof(*sw
->ports
);
55 sw
->ports
= kunit_kzalloc(test
, size
, GFP_KERNEL
);
59 for (i
= 0; i
<= sw
->config
.max_port_number
; i
++) {
61 sw
->ports
[i
].port
= i
;
62 sw
->ports
[i
].config
.port_number
= i
;
64 kunit_ida_init(test
, &sw
->ports
[i
].in_hopids
);
65 kunit_ida_init(test
, &sw
->ports
[i
].out_hopids
);
72 static struct tb_switch
*alloc_host(struct kunit
*test
)
76 sw
= alloc_switch(test
, 0, 7, 13);
80 sw
->config
.vendor_id
= 0x8086;
81 sw
->config
.device_id
= 0x9a1b;
83 sw
->ports
[0].config
.type
= TB_TYPE_PORT
;
84 sw
->ports
[0].config
.max_in_hop_id
= 7;
85 sw
->ports
[0].config
.max_out_hop_id
= 7;
87 sw
->ports
[1].config
.type
= TB_TYPE_PORT
;
88 sw
->ports
[1].config
.max_in_hop_id
= 19;
89 sw
->ports
[1].config
.max_out_hop_id
= 19;
90 sw
->ports
[1].total_credits
= 60;
91 sw
->ports
[1].ctl_credits
= 2;
92 sw
->ports
[1].dual_link_port
= &sw
->ports
[2];
94 sw
->ports
[2].config
.type
= TB_TYPE_PORT
;
95 sw
->ports
[2].config
.max_in_hop_id
= 19;
96 sw
->ports
[2].config
.max_out_hop_id
= 19;
97 sw
->ports
[2].total_credits
= 60;
98 sw
->ports
[2].ctl_credits
= 2;
99 sw
->ports
[2].dual_link_port
= &sw
->ports
[1];
100 sw
->ports
[2].link_nr
= 1;
102 sw
->ports
[3].config
.type
= TB_TYPE_PORT
;
103 sw
->ports
[3].config
.max_in_hop_id
= 19;
104 sw
->ports
[3].config
.max_out_hop_id
= 19;
105 sw
->ports
[3].total_credits
= 60;
106 sw
->ports
[3].ctl_credits
= 2;
107 sw
->ports
[3].dual_link_port
= &sw
->ports
[4];
109 sw
->ports
[4].config
.type
= TB_TYPE_PORT
;
110 sw
->ports
[4].config
.max_in_hop_id
= 19;
111 sw
->ports
[4].config
.max_out_hop_id
= 19;
112 sw
->ports
[4].total_credits
= 60;
113 sw
->ports
[4].ctl_credits
= 2;
114 sw
->ports
[4].dual_link_port
= &sw
->ports
[3];
115 sw
->ports
[4].link_nr
= 1;
117 sw
->ports
[5].config
.type
= TB_TYPE_DP_HDMI_IN
;
118 sw
->ports
[5].config
.max_in_hop_id
= 9;
119 sw
->ports
[5].config
.max_out_hop_id
= 9;
120 sw
->ports
[5].cap_adap
= -1;
122 sw
->ports
[6].config
.type
= TB_TYPE_DP_HDMI_IN
;
123 sw
->ports
[6].config
.max_in_hop_id
= 9;
124 sw
->ports
[6].config
.max_out_hop_id
= 9;
125 sw
->ports
[6].cap_adap
= -1;
127 sw
->ports
[7].config
.type
= TB_TYPE_NHI
;
128 sw
->ports
[7].config
.max_in_hop_id
= 11;
129 sw
->ports
[7].config
.max_out_hop_id
= 11;
130 sw
->ports
[7].config
.nfc_credits
= 0x41800000;
132 sw
->ports
[8].config
.type
= TB_TYPE_PCIE_DOWN
;
133 sw
->ports
[8].config
.max_in_hop_id
= 8;
134 sw
->ports
[8].config
.max_out_hop_id
= 8;
136 sw
->ports
[9].config
.type
= TB_TYPE_PCIE_DOWN
;
137 sw
->ports
[9].config
.max_in_hop_id
= 8;
138 sw
->ports
[9].config
.max_out_hop_id
= 8;
140 sw
->ports
[10].disabled
= true;
141 sw
->ports
[11].disabled
= true;
143 sw
->ports
[12].config
.type
= TB_TYPE_USB3_DOWN
;
144 sw
->ports
[12].config
.max_in_hop_id
= 8;
145 sw
->ports
[12].config
.max_out_hop_id
= 8;
147 sw
->ports
[13].config
.type
= TB_TYPE_USB3_DOWN
;
148 sw
->ports
[13].config
.max_in_hop_id
= 8;
149 sw
->ports
[13].config
.max_out_hop_id
= 8;
154 static struct tb_switch
*alloc_host_usb4(struct kunit
*test
)
156 struct tb_switch
*sw
;
158 sw
= alloc_host(test
);
163 sw
->credit_allocation
= true;
164 sw
->max_usb3_credits
= 32;
165 sw
->min_dp_aux_credits
= 1;
166 sw
->min_dp_main_credits
= 0;
167 sw
->max_pcie_credits
= 64;
168 sw
->max_dma_credits
= 14;
173 static struct tb_switch
*alloc_host_br(struct kunit
*test
)
175 struct tb_switch
*sw
;
177 sw
= alloc_host_usb4(test
);
181 sw
->ports
[10].config
.type
= TB_TYPE_DP_HDMI_IN
;
182 sw
->ports
[10].config
.max_in_hop_id
= 9;
183 sw
->ports
[10].config
.max_out_hop_id
= 9;
184 sw
->ports
[10].cap_adap
= -1;
185 sw
->ports
[10].disabled
= false;
190 static struct tb_switch
*alloc_dev_default(struct kunit
*test
,
191 struct tb_switch
*parent
,
192 u64 route
, bool bonded
)
194 struct tb_port
*port
, *upstream_port
;
195 struct tb_switch
*sw
;
197 sw
= alloc_switch(test
, route
, 1, 19);
201 sw
->config
.vendor_id
= 0x8086;
202 sw
->config
.device_id
= 0x15ef;
204 sw
->ports
[0].config
.type
= TB_TYPE_PORT
;
205 sw
->ports
[0].config
.max_in_hop_id
= 8;
206 sw
->ports
[0].config
.max_out_hop_id
= 8;
208 sw
->ports
[1].config
.type
= TB_TYPE_PORT
;
209 sw
->ports
[1].config
.max_in_hop_id
= 19;
210 sw
->ports
[1].config
.max_out_hop_id
= 19;
211 sw
->ports
[1].total_credits
= 60;
212 sw
->ports
[1].ctl_credits
= 2;
213 sw
->ports
[1].dual_link_port
= &sw
->ports
[2];
215 sw
->ports
[2].config
.type
= TB_TYPE_PORT
;
216 sw
->ports
[2].config
.max_in_hop_id
= 19;
217 sw
->ports
[2].config
.max_out_hop_id
= 19;
218 sw
->ports
[2].total_credits
= 60;
219 sw
->ports
[2].ctl_credits
= 2;
220 sw
->ports
[2].dual_link_port
= &sw
->ports
[1];
221 sw
->ports
[2].link_nr
= 1;
223 sw
->ports
[3].config
.type
= TB_TYPE_PORT
;
224 sw
->ports
[3].config
.max_in_hop_id
= 19;
225 sw
->ports
[3].config
.max_out_hop_id
= 19;
226 sw
->ports
[3].total_credits
= 60;
227 sw
->ports
[3].ctl_credits
= 2;
228 sw
->ports
[3].dual_link_port
= &sw
->ports
[4];
230 sw
->ports
[4].config
.type
= TB_TYPE_PORT
;
231 sw
->ports
[4].config
.max_in_hop_id
= 19;
232 sw
->ports
[4].config
.max_out_hop_id
= 19;
233 sw
->ports
[4].total_credits
= 60;
234 sw
->ports
[4].ctl_credits
= 2;
235 sw
->ports
[4].dual_link_port
= &sw
->ports
[3];
236 sw
->ports
[4].link_nr
= 1;
238 sw
->ports
[5].config
.type
= TB_TYPE_PORT
;
239 sw
->ports
[5].config
.max_in_hop_id
= 19;
240 sw
->ports
[5].config
.max_out_hop_id
= 19;
241 sw
->ports
[5].total_credits
= 60;
242 sw
->ports
[5].ctl_credits
= 2;
243 sw
->ports
[5].dual_link_port
= &sw
->ports
[6];
245 sw
->ports
[6].config
.type
= TB_TYPE_PORT
;
246 sw
->ports
[6].config
.max_in_hop_id
= 19;
247 sw
->ports
[6].config
.max_out_hop_id
= 19;
248 sw
->ports
[6].total_credits
= 60;
249 sw
->ports
[6].ctl_credits
= 2;
250 sw
->ports
[6].dual_link_port
= &sw
->ports
[5];
251 sw
->ports
[6].link_nr
= 1;
253 sw
->ports
[7].config
.type
= TB_TYPE_PORT
;
254 sw
->ports
[7].config
.max_in_hop_id
= 19;
255 sw
->ports
[7].config
.max_out_hop_id
= 19;
256 sw
->ports
[7].total_credits
= 60;
257 sw
->ports
[7].ctl_credits
= 2;
258 sw
->ports
[7].dual_link_port
= &sw
->ports
[8];
260 sw
->ports
[8].config
.type
= TB_TYPE_PORT
;
261 sw
->ports
[8].config
.max_in_hop_id
= 19;
262 sw
->ports
[8].config
.max_out_hop_id
= 19;
263 sw
->ports
[8].total_credits
= 60;
264 sw
->ports
[8].ctl_credits
= 2;
265 sw
->ports
[8].dual_link_port
= &sw
->ports
[7];
266 sw
->ports
[8].link_nr
= 1;
268 sw
->ports
[9].config
.type
= TB_TYPE_PCIE_UP
;
269 sw
->ports
[9].config
.max_in_hop_id
= 8;
270 sw
->ports
[9].config
.max_out_hop_id
= 8;
272 sw
->ports
[10].config
.type
= TB_TYPE_PCIE_DOWN
;
273 sw
->ports
[10].config
.max_in_hop_id
= 8;
274 sw
->ports
[10].config
.max_out_hop_id
= 8;
276 sw
->ports
[11].config
.type
= TB_TYPE_PCIE_DOWN
;
277 sw
->ports
[11].config
.max_in_hop_id
= 8;
278 sw
->ports
[11].config
.max_out_hop_id
= 8;
280 sw
->ports
[12].config
.type
= TB_TYPE_PCIE_DOWN
;
281 sw
->ports
[12].config
.max_in_hop_id
= 8;
282 sw
->ports
[12].config
.max_out_hop_id
= 8;
284 sw
->ports
[13].config
.type
= TB_TYPE_DP_HDMI_OUT
;
285 sw
->ports
[13].config
.max_in_hop_id
= 9;
286 sw
->ports
[13].config
.max_out_hop_id
= 9;
287 sw
->ports
[13].cap_adap
= -1;
289 sw
->ports
[14].config
.type
= TB_TYPE_DP_HDMI_OUT
;
290 sw
->ports
[14].config
.max_in_hop_id
= 9;
291 sw
->ports
[14].config
.max_out_hop_id
= 9;
292 sw
->ports
[14].cap_adap
= -1;
294 sw
->ports
[15].disabled
= true;
296 sw
->ports
[16].config
.type
= TB_TYPE_USB3_UP
;
297 sw
->ports
[16].config
.max_in_hop_id
= 8;
298 sw
->ports
[16].config
.max_out_hop_id
= 8;
300 sw
->ports
[17].config
.type
= TB_TYPE_USB3_DOWN
;
301 sw
->ports
[17].config
.max_in_hop_id
= 8;
302 sw
->ports
[17].config
.max_out_hop_id
= 8;
304 sw
->ports
[18].config
.type
= TB_TYPE_USB3_DOWN
;
305 sw
->ports
[18].config
.max_in_hop_id
= 8;
306 sw
->ports
[18].config
.max_out_hop_id
= 8;
308 sw
->ports
[19].config
.type
= TB_TYPE_USB3_DOWN
;
309 sw
->ports
[19].config
.max_in_hop_id
= 8;
310 sw
->ports
[19].config
.max_out_hop_id
= 8;
316 upstream_port
= tb_upstream_port(sw
);
317 port
= tb_port_at(route
, parent
);
318 port
->remote
= upstream_port
;
319 upstream_port
->remote
= port
;
320 if (port
->dual_link_port
&& upstream_port
->dual_link_port
) {
321 port
->dual_link_port
->remote
= upstream_port
->dual_link_port
;
322 upstream_port
->dual_link_port
->remote
= port
->dual_link_port
;
325 /* Bonding is used */
327 port
->total_credits
*= 2;
328 port
->dual_link_port
->bonded
= true;
329 port
->dual_link_port
->total_credits
= 0;
330 upstream_port
->bonded
= true;
331 upstream_port
->total_credits
*= 2;
332 upstream_port
->dual_link_port
->bonded
= true;
333 upstream_port
->dual_link_port
->total_credits
= 0;
340 static struct tb_switch
*alloc_dev_with_dpin(struct kunit
*test
,
341 struct tb_switch
*parent
,
342 u64 route
, bool bonded
)
344 struct tb_switch
*sw
;
346 sw
= alloc_dev_default(test
, parent
, route
, bonded
);
350 sw
->ports
[13].config
.type
= TB_TYPE_DP_HDMI_IN
;
351 sw
->ports
[13].config
.max_in_hop_id
= 9;
352 sw
->ports
[13].config
.max_out_hop_id
= 9;
354 sw
->ports
[14].config
.type
= TB_TYPE_DP_HDMI_IN
;
355 sw
->ports
[14].config
.max_in_hop_id
= 9;
356 sw
->ports
[14].config
.max_out_hop_id
= 9;
361 static struct tb_switch
*alloc_dev_without_dp(struct kunit
*test
,
362 struct tb_switch
*parent
,
363 u64 route
, bool bonded
)
365 struct tb_switch
*sw
;
368 sw
= alloc_dev_default(test
, parent
, route
, bonded
);
373 * 2x USB4 Adapters (adapters 1,2 and 3,4),
374 * 1x PCIe Upstream (adapter 9),
375 * 1x PCIe Downstream (adapter 10),
376 * 1x USB3 Upstream (adapter 16),
377 * 1x USB3 Downstream (adapter 17)
379 for (i
= 5; i
<= 8; i
++)
380 sw
->ports
[i
].disabled
= true;
382 for (i
= 11; i
<= 14; i
++)
383 sw
->ports
[i
].disabled
= true;
385 sw
->ports
[13].cap_adap
= 0;
386 sw
->ports
[14].cap_adap
= 0;
388 for (i
= 18; i
<= 19; i
++)
389 sw
->ports
[i
].disabled
= true;
392 sw
->credit_allocation
= true;
393 sw
->max_usb3_credits
= 109;
394 sw
->min_dp_aux_credits
= 0;
395 sw
->min_dp_main_credits
= 0;
396 sw
->max_pcie_credits
= 30;
397 sw
->max_dma_credits
= 1;
402 static struct tb_switch
*alloc_dev_usb4(struct kunit
*test
,
403 struct tb_switch
*parent
,
404 u64 route
, bool bonded
)
406 struct tb_switch
*sw
;
408 sw
= alloc_dev_default(test
, parent
, route
, bonded
);
413 sw
->credit_allocation
= true;
414 sw
->max_usb3_credits
= 14;
415 sw
->min_dp_aux_credits
= 1;
416 sw
->min_dp_main_credits
= 18;
417 sw
->max_pcie_credits
= 32;
418 sw
->max_dma_credits
= 14;
423 static void tb_test_path_basic(struct kunit
*test
)
425 struct tb_port
*src_port
, *dst_port
, *p
;
426 struct tb_switch
*host
;
428 host
= alloc_host(test
);
430 src_port
= &host
->ports
[5];
433 p
= tb_next_port_on_path(src_port
, dst_port
, NULL
);
434 KUNIT_EXPECT_PTR_EQ(test
, p
, dst_port
);
436 p
= tb_next_port_on_path(src_port
, dst_port
, p
);
437 KUNIT_EXPECT_TRUE(test
, !p
);
440 static void tb_test_path_not_connected_walk(struct kunit
*test
)
442 struct tb_port
*src_port
, *dst_port
, *p
;
443 struct tb_switch
*host
, *dev
;
445 host
= alloc_host(test
);
446 /* No connection between host and dev */
447 dev
= alloc_dev_default(test
, NULL
, 3, true);
449 src_port
= &host
->ports
[12];
450 dst_port
= &dev
->ports
[16];
452 p
= tb_next_port_on_path(src_port
, dst_port
, NULL
);
453 KUNIT_EXPECT_PTR_EQ(test
, p
, src_port
);
455 p
= tb_next_port_on_path(src_port
, dst_port
, p
);
456 KUNIT_EXPECT_PTR_EQ(test
, p
, &host
->ports
[3]);
458 p
= tb_next_port_on_path(src_port
, dst_port
, p
);
459 KUNIT_EXPECT_TRUE(test
, !p
);
461 /* Other direction */
463 p
= tb_next_port_on_path(dst_port
, src_port
, NULL
);
464 KUNIT_EXPECT_PTR_EQ(test
, p
, dst_port
);
466 p
= tb_next_port_on_path(dst_port
, src_port
, p
);
467 KUNIT_EXPECT_PTR_EQ(test
, p
, &dev
->ports
[1]);
469 p
= tb_next_port_on_path(dst_port
, src_port
, p
);
470 KUNIT_EXPECT_TRUE(test
, !p
);
473 struct port_expectation
{
476 enum tb_port_type type
;
479 static void tb_test_path_single_hop_walk(struct kunit
*test
)
482 * Walks from Host PCIe downstream port to Device #1 PCIe
490 static const struct port_expectation test_data
[] = {
491 { .route
= 0x0, .port
= 8, .type
= TB_TYPE_PCIE_DOWN
},
492 { .route
= 0x0, .port
= 1, .type
= TB_TYPE_PORT
},
493 { .route
= 0x1, .port
= 1, .type
= TB_TYPE_PORT
},
494 { .route
= 0x1, .port
= 9, .type
= TB_TYPE_PCIE_UP
},
496 struct tb_port
*src_port
, *dst_port
, *p
;
497 struct tb_switch
*host
, *dev
;
500 host
= alloc_host(test
);
501 dev
= alloc_dev_default(test
, host
, 1, true);
503 src_port
= &host
->ports
[8];
504 dst_port
= &dev
->ports
[9];
506 /* Walk both directions */
509 tb_for_each_port_on_path(src_port
, dst_port
, p
) {
510 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
511 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
512 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
513 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
518 KUNIT_EXPECT_EQ(test
, i
, ARRAY_SIZE(test_data
));
520 i
= ARRAY_SIZE(test_data
) - 1;
521 tb_for_each_port_on_path(dst_port
, src_port
, p
) {
522 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
523 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
524 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
525 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
530 KUNIT_EXPECT_EQ(test
, i
, -1);
533 static void tb_test_path_daisy_chain_walk(struct kunit
*test
)
536 * Walks from Host DP IN to Device #2 DP OUT.
546 static const struct port_expectation test_data
[] = {
547 { .route
= 0x0, .port
= 5, .type
= TB_TYPE_DP_HDMI_IN
},
548 { .route
= 0x0, .port
= 1, .type
= TB_TYPE_PORT
},
549 { .route
= 0x1, .port
= 1, .type
= TB_TYPE_PORT
},
550 { .route
= 0x1, .port
= 3, .type
= TB_TYPE_PORT
},
551 { .route
= 0x301, .port
= 1, .type
= TB_TYPE_PORT
},
552 { .route
= 0x301, .port
= 13, .type
= TB_TYPE_DP_HDMI_OUT
},
554 struct tb_port
*src_port
, *dst_port
, *p
;
555 struct tb_switch
*host
, *dev1
, *dev2
;
558 host
= alloc_host(test
);
559 dev1
= alloc_dev_default(test
, host
, 0x1, true);
560 dev2
= alloc_dev_default(test
, dev1
, 0x301, true);
562 src_port
= &host
->ports
[5];
563 dst_port
= &dev2
->ports
[13];
565 /* Walk both directions */
568 tb_for_each_port_on_path(src_port
, dst_port
, p
) {
569 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
570 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
571 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
572 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
577 KUNIT_EXPECT_EQ(test
, i
, ARRAY_SIZE(test_data
));
579 i
= ARRAY_SIZE(test_data
) - 1;
580 tb_for_each_port_on_path(dst_port
, src_port
, p
) {
581 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
582 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
583 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
584 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
589 KUNIT_EXPECT_EQ(test
, i
, -1);
592 static void tb_test_path_simple_tree_walk(struct kunit
*test
)
595 * Walks from Host DP IN to Device #3 DP OUT.
603 * [Device #2] | [Device #4]
607 static const struct port_expectation test_data
[] = {
608 { .route
= 0x0, .port
= 5, .type
= TB_TYPE_DP_HDMI_IN
},
609 { .route
= 0x0, .port
= 1, .type
= TB_TYPE_PORT
},
610 { .route
= 0x1, .port
= 1, .type
= TB_TYPE_PORT
},
611 { .route
= 0x1, .port
= 5, .type
= TB_TYPE_PORT
},
612 { .route
= 0x501, .port
= 1, .type
= TB_TYPE_PORT
},
613 { .route
= 0x501, .port
= 13, .type
= TB_TYPE_DP_HDMI_OUT
},
615 struct tb_port
*src_port
, *dst_port
, *p
;
616 struct tb_switch
*host
, *dev1
, *dev3
;
619 host
= alloc_host(test
);
620 dev1
= alloc_dev_default(test
, host
, 0x1, true);
621 alloc_dev_default(test
, dev1
, 0x301, true);
622 dev3
= alloc_dev_default(test
, dev1
, 0x501, true);
623 alloc_dev_default(test
, dev1
, 0x701, true);
625 src_port
= &host
->ports
[5];
626 dst_port
= &dev3
->ports
[13];
628 /* Walk both directions */
631 tb_for_each_port_on_path(src_port
, dst_port
, p
) {
632 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
633 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
634 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
635 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
640 KUNIT_EXPECT_EQ(test
, i
, ARRAY_SIZE(test_data
));
642 i
= ARRAY_SIZE(test_data
) - 1;
643 tb_for_each_port_on_path(dst_port
, src_port
, p
) {
644 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
645 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
646 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
647 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
652 KUNIT_EXPECT_EQ(test
, i
, -1);
655 static void tb_test_path_complex_tree_walk(struct kunit
*test
)
658 * Walks from Device #3 DP IN to Device #9 DP OUT.
666 * [Device #2] | [Device #5]
668 * 1 | [Device #4] \ 1
669 * [Device #3] [Device #6]
678 static const struct port_expectation test_data
[] = {
679 { .route
= 0x50301, .port
= 13, .type
= TB_TYPE_DP_HDMI_IN
},
680 { .route
= 0x50301, .port
= 1, .type
= TB_TYPE_PORT
},
681 { .route
= 0x301, .port
= 5, .type
= TB_TYPE_PORT
},
682 { .route
= 0x301, .port
= 1, .type
= TB_TYPE_PORT
},
683 { .route
= 0x1, .port
= 3, .type
= TB_TYPE_PORT
},
684 { .route
= 0x1, .port
= 7, .type
= TB_TYPE_PORT
},
685 { .route
= 0x701, .port
= 1, .type
= TB_TYPE_PORT
},
686 { .route
= 0x701, .port
= 7, .type
= TB_TYPE_PORT
},
687 { .route
= 0x70701, .port
= 1, .type
= TB_TYPE_PORT
},
688 { .route
= 0x70701, .port
= 3, .type
= TB_TYPE_PORT
},
689 { .route
= 0x3070701, .port
= 1, .type
= TB_TYPE_PORT
},
690 { .route
= 0x3070701, .port
= 5, .type
= TB_TYPE_PORT
},
691 { .route
= 0x503070701, .port
= 1, .type
= TB_TYPE_PORT
},
692 { .route
= 0x503070701, .port
= 14, .type
= TB_TYPE_DP_HDMI_OUT
},
694 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev5
, *dev6
, *dev7
, *dev9
;
695 struct tb_port
*src_port
, *dst_port
, *p
;
698 host
= alloc_host(test
);
699 dev1
= alloc_dev_default(test
, host
, 0x1, true);
700 dev2
= alloc_dev_default(test
, dev1
, 0x301, true);
701 dev3
= alloc_dev_with_dpin(test
, dev2
, 0x50301, true);
702 alloc_dev_default(test
, dev1
, 0x501, true);
703 dev5
= alloc_dev_default(test
, dev1
, 0x701, true);
704 dev6
= alloc_dev_default(test
, dev5
, 0x70701, true);
705 dev7
= alloc_dev_default(test
, dev6
, 0x3070701, true);
706 alloc_dev_default(test
, dev7
, 0x303070701, true);
707 dev9
= alloc_dev_default(test
, dev7
, 0x503070701, true);
709 src_port
= &dev3
->ports
[13];
710 dst_port
= &dev9
->ports
[14];
712 /* Walk both directions */
715 tb_for_each_port_on_path(src_port
, dst_port
, p
) {
716 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
717 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
718 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
719 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
724 KUNIT_EXPECT_EQ(test
, i
, ARRAY_SIZE(test_data
));
726 i
= ARRAY_SIZE(test_data
) - 1;
727 tb_for_each_port_on_path(dst_port
, src_port
, p
) {
728 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
729 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
730 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
731 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
736 KUNIT_EXPECT_EQ(test
, i
, -1);
739 static void tb_test_path_max_length_walk(struct kunit
*test
)
741 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev4
, *dev5
, *dev6
;
742 struct tb_switch
*dev7
, *dev8
, *dev9
, *dev10
, *dev11
, *dev12
;
743 struct tb_port
*src_port
, *dst_port
, *p
;
747 * Walks from Device #6 DP IN to Device #12 DP OUT.
752 * [Device #1] [Device #7]
755 * [Device #2] [Device #8]
758 * [Device #3] [Device #9]
761 * [Device #4] [Device #10]
764 * [Device #5] [Device #11]
767 * [Device #6] [Device #12]
769 static const struct port_expectation test_data
[] = {
770 { .route
= 0x30303030301, .port
= 13, .type
= TB_TYPE_DP_HDMI_IN
},
771 { .route
= 0x30303030301, .port
= 1, .type
= TB_TYPE_PORT
},
772 { .route
= 0x303030301, .port
= 3, .type
= TB_TYPE_PORT
},
773 { .route
= 0x303030301, .port
= 1, .type
= TB_TYPE_PORT
},
774 { .route
= 0x3030301, .port
= 3, .type
= TB_TYPE_PORT
},
775 { .route
= 0x3030301, .port
= 1, .type
= TB_TYPE_PORT
},
776 { .route
= 0x30301, .port
= 3, .type
= TB_TYPE_PORT
},
777 { .route
= 0x30301, .port
= 1, .type
= TB_TYPE_PORT
},
778 { .route
= 0x301, .port
= 3, .type
= TB_TYPE_PORT
},
779 { .route
= 0x301, .port
= 1, .type
= TB_TYPE_PORT
},
780 { .route
= 0x1, .port
= 3, .type
= TB_TYPE_PORT
},
781 { .route
= 0x1, .port
= 1, .type
= TB_TYPE_PORT
},
782 { .route
= 0x0, .port
= 1, .type
= TB_TYPE_PORT
},
783 { .route
= 0x0, .port
= 3, .type
= TB_TYPE_PORT
},
784 { .route
= 0x3, .port
= 1, .type
= TB_TYPE_PORT
},
785 { .route
= 0x3, .port
= 3, .type
= TB_TYPE_PORT
},
786 { .route
= 0x303, .port
= 1, .type
= TB_TYPE_PORT
},
787 { .route
= 0x303, .port
= 3, .type
= TB_TYPE_PORT
},
788 { .route
= 0x30303, .port
= 1, .type
= TB_TYPE_PORT
},
789 { .route
= 0x30303, .port
= 3, .type
= TB_TYPE_PORT
},
790 { .route
= 0x3030303, .port
= 1, .type
= TB_TYPE_PORT
},
791 { .route
= 0x3030303, .port
= 3, .type
= TB_TYPE_PORT
},
792 { .route
= 0x303030303, .port
= 1, .type
= TB_TYPE_PORT
},
793 { .route
= 0x303030303, .port
= 3, .type
= TB_TYPE_PORT
},
794 { .route
= 0x30303030303, .port
= 1, .type
= TB_TYPE_PORT
},
795 { .route
= 0x30303030303, .port
= 13, .type
= TB_TYPE_DP_HDMI_OUT
},
798 host
= alloc_host(test
);
799 dev1
= alloc_dev_default(test
, host
, 0x1, true);
800 dev2
= alloc_dev_default(test
, dev1
, 0x301, true);
801 dev3
= alloc_dev_default(test
, dev2
, 0x30301, true);
802 dev4
= alloc_dev_default(test
, dev3
, 0x3030301, true);
803 dev5
= alloc_dev_default(test
, dev4
, 0x303030301, true);
804 dev6
= alloc_dev_with_dpin(test
, dev5
, 0x30303030301, true);
805 dev7
= alloc_dev_default(test
, host
, 0x3, true);
806 dev8
= alloc_dev_default(test
, dev7
, 0x303, true);
807 dev9
= alloc_dev_default(test
, dev8
, 0x30303, true);
808 dev10
= alloc_dev_default(test
, dev9
, 0x3030303, true);
809 dev11
= alloc_dev_default(test
, dev10
, 0x303030303, true);
810 dev12
= alloc_dev_default(test
, dev11
, 0x30303030303, true);
812 src_port
= &dev6
->ports
[13];
813 dst_port
= &dev12
->ports
[13];
815 /* Walk both directions */
818 tb_for_each_port_on_path(src_port
, dst_port
, p
) {
819 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
820 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
821 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
822 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
827 KUNIT_EXPECT_EQ(test
, i
, ARRAY_SIZE(test_data
));
829 i
= ARRAY_SIZE(test_data
) - 1;
830 tb_for_each_port_on_path(dst_port
, src_port
, p
) {
831 KUNIT_EXPECT_TRUE(test
, i
< ARRAY_SIZE(test_data
));
832 KUNIT_EXPECT_EQ(test
, tb_route(p
->sw
), test_data
[i
].route
);
833 KUNIT_EXPECT_EQ(test
, p
->port
, test_data
[i
].port
);
834 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)p
->config
.type
,
839 KUNIT_EXPECT_EQ(test
, i
, -1);
842 static void tb_test_path_not_connected(struct kunit
*test
)
844 struct tb_switch
*host
, *dev1
, *dev2
;
845 struct tb_port
*down
, *up
;
846 struct tb_path
*path
;
848 host
= alloc_host(test
);
849 dev1
= alloc_dev_default(test
, host
, 0x3, false);
850 /* Not connected to anything */
851 dev2
= alloc_dev_default(test
, NULL
, 0x303, false);
853 down
= &dev1
->ports
[10];
854 up
= &dev2
->ports
[9];
856 path
= tb_path_alloc(NULL
, down
, 8, up
, 8, 0, "PCIe Down");
857 KUNIT_ASSERT_NULL(test
, path
);
858 path
= tb_path_alloc(NULL
, down
, 8, up
, 8, 1, "PCIe Down");
859 KUNIT_ASSERT_NULL(test
, path
);
862 struct hop_expectation
{
865 enum tb_port_type in_type
;
867 enum tb_port_type out_type
;
870 static void tb_test_path_not_bonded_lane0(struct kunit
*test
)
873 * PCIe path from host to device using lane 0.
880 static const struct hop_expectation test_data
[] = {
884 .in_type
= TB_TYPE_PCIE_DOWN
,
886 .out_type
= TB_TYPE_PORT
,
891 .in_type
= TB_TYPE_PORT
,
893 .out_type
= TB_TYPE_PCIE_UP
,
896 struct tb_switch
*host
, *dev
;
897 struct tb_port
*down
, *up
;
898 struct tb_path
*path
;
901 host
= alloc_host(test
);
902 dev
= alloc_dev_default(test
, host
, 0x3, false);
904 down
= &host
->ports
[9];
907 path
= tb_path_alloc(NULL
, down
, 8, up
, 8, 0, "PCIe Down");
908 KUNIT_ASSERT_NOT_NULL(test
, path
);
909 KUNIT_ASSERT_EQ(test
, path
->path_length
, ARRAY_SIZE(test_data
));
910 for (i
= 0; i
< ARRAY_SIZE(test_data
); i
++) {
911 const struct tb_port
*in_port
, *out_port
;
913 in_port
= path
->hops
[i
].in_port
;
914 out_port
= path
->hops
[i
].out_port
;
916 KUNIT_EXPECT_EQ(test
, tb_route(in_port
->sw
), test_data
[i
].route
);
917 KUNIT_EXPECT_EQ(test
, in_port
->port
, test_data
[i
].in_port
);
918 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)in_port
->config
.type
,
919 test_data
[i
].in_type
);
920 KUNIT_EXPECT_EQ(test
, tb_route(out_port
->sw
), test_data
[i
].route
);
921 KUNIT_EXPECT_EQ(test
, out_port
->port
, test_data
[i
].out_port
);
922 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)out_port
->config
.type
,
923 test_data
[i
].out_type
);
928 static void tb_test_path_not_bonded_lane1(struct kunit
*test
)
931 * DP Video path from host to device using lane 1. Paths like
932 * these are only used with Thunderbolt 1 devices where lane
933 * bonding is not possible. USB4 specifically does not allow
934 * paths like this (you either use lane 0 where lane 1 is
935 * disabled or both lanes are bonded).
942 static const struct hop_expectation test_data
[] = {
946 .in_type
= TB_TYPE_DP_HDMI_IN
,
948 .out_type
= TB_TYPE_PORT
,
953 .in_type
= TB_TYPE_PORT
,
955 .out_type
= TB_TYPE_DP_HDMI_OUT
,
958 struct tb_switch
*host
, *dev
;
959 struct tb_port
*in
, *out
;
960 struct tb_path
*path
;
963 host
= alloc_host(test
);
964 dev
= alloc_dev_default(test
, host
, 0x1, false);
966 in
= &host
->ports
[5];
967 out
= &dev
->ports
[13];
969 path
= tb_path_alloc(NULL
, in
, 9, out
, 9, 1, "Video");
970 KUNIT_ASSERT_NOT_NULL(test
, path
);
971 KUNIT_ASSERT_EQ(test
, path
->path_length
, ARRAY_SIZE(test_data
));
972 for (i
= 0; i
< ARRAY_SIZE(test_data
); i
++) {
973 const struct tb_port
*in_port
, *out_port
;
975 in_port
= path
->hops
[i
].in_port
;
976 out_port
= path
->hops
[i
].out_port
;
978 KUNIT_EXPECT_EQ(test
, tb_route(in_port
->sw
), test_data
[i
].route
);
979 KUNIT_EXPECT_EQ(test
, in_port
->port
, test_data
[i
].in_port
);
980 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)in_port
->config
.type
,
981 test_data
[i
].in_type
);
982 KUNIT_EXPECT_EQ(test
, tb_route(out_port
->sw
), test_data
[i
].route
);
983 KUNIT_EXPECT_EQ(test
, out_port
->port
, test_data
[i
].out_port
);
984 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)out_port
->config
.type
,
985 test_data
[i
].out_type
);
990 static void tb_test_path_not_bonded_lane1_chain(struct kunit
*test
)
993 * DP Video path from host to device 3 using lane 1.
1006 static const struct hop_expectation test_data
[] = {
1010 .in_type
= TB_TYPE_DP_HDMI_IN
,
1012 .out_type
= TB_TYPE_PORT
,
1017 .in_type
= TB_TYPE_PORT
,
1019 .out_type
= TB_TYPE_PORT
,
1024 .in_type
= TB_TYPE_PORT
,
1026 .out_type
= TB_TYPE_PORT
,
1031 .in_type
= TB_TYPE_PORT
,
1033 .out_type
= TB_TYPE_DP_HDMI_OUT
,
1036 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
;
1037 struct tb_port
*in
, *out
;
1038 struct tb_path
*path
;
1041 host
= alloc_host(test
);
1042 dev1
= alloc_dev_default(test
, host
, 0x1, false);
1043 dev2
= alloc_dev_default(test
, dev1
, 0x701, false);
1044 dev3
= alloc_dev_default(test
, dev2
, 0x50701, false);
1046 in
= &host
->ports
[5];
1047 out
= &dev3
->ports
[13];
1049 path
= tb_path_alloc(NULL
, in
, 9, out
, 9, 1, "Video");
1050 KUNIT_ASSERT_NOT_NULL(test
, path
);
1051 KUNIT_ASSERT_EQ(test
, path
->path_length
, ARRAY_SIZE(test_data
));
1052 for (i
= 0; i
< ARRAY_SIZE(test_data
); i
++) {
1053 const struct tb_port
*in_port
, *out_port
;
1055 in_port
= path
->hops
[i
].in_port
;
1056 out_port
= path
->hops
[i
].out_port
;
1058 KUNIT_EXPECT_EQ(test
, tb_route(in_port
->sw
), test_data
[i
].route
);
1059 KUNIT_EXPECT_EQ(test
, in_port
->port
, test_data
[i
].in_port
);
1060 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)in_port
->config
.type
,
1061 test_data
[i
].in_type
);
1062 KUNIT_EXPECT_EQ(test
, tb_route(out_port
->sw
), test_data
[i
].route
);
1063 KUNIT_EXPECT_EQ(test
, out_port
->port
, test_data
[i
].out_port
);
1064 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)out_port
->config
.type
,
1065 test_data
[i
].out_type
);
1070 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit
*test
)
1073 * DP Video path from device 3 to host using lane 1.
1086 static const struct hop_expectation test_data
[] = {
1090 .in_type
= TB_TYPE_DP_HDMI_IN
,
1092 .out_type
= TB_TYPE_PORT
,
1097 .in_type
= TB_TYPE_PORT
,
1099 .out_type
= TB_TYPE_PORT
,
1104 .in_type
= TB_TYPE_PORT
,
1106 .out_type
= TB_TYPE_PORT
,
1111 .in_type
= TB_TYPE_PORT
,
1113 .out_type
= TB_TYPE_DP_HDMI_IN
,
1116 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
;
1117 struct tb_port
*in
, *out
;
1118 struct tb_path
*path
;
1121 host
= alloc_host(test
);
1122 dev1
= alloc_dev_default(test
, host
, 0x1, false);
1123 dev2
= alloc_dev_default(test
, dev1
, 0x701, false);
1124 dev3
= alloc_dev_with_dpin(test
, dev2
, 0x50701, false);
1126 in
= &dev3
->ports
[13];
1127 out
= &host
->ports
[5];
1129 path
= tb_path_alloc(NULL
, in
, 9, out
, 9, 1, "Video");
1130 KUNIT_ASSERT_NOT_NULL(test
, path
);
1131 KUNIT_ASSERT_EQ(test
, path
->path_length
, ARRAY_SIZE(test_data
));
1132 for (i
= 0; i
< ARRAY_SIZE(test_data
); i
++) {
1133 const struct tb_port
*in_port
, *out_port
;
1135 in_port
= path
->hops
[i
].in_port
;
1136 out_port
= path
->hops
[i
].out_port
;
1138 KUNIT_EXPECT_EQ(test
, tb_route(in_port
->sw
), test_data
[i
].route
);
1139 KUNIT_EXPECT_EQ(test
, in_port
->port
, test_data
[i
].in_port
);
1140 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)in_port
->config
.type
,
1141 test_data
[i
].in_type
);
1142 KUNIT_EXPECT_EQ(test
, tb_route(out_port
->sw
), test_data
[i
].route
);
1143 KUNIT_EXPECT_EQ(test
, out_port
->port
, test_data
[i
].out_port
);
1144 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)out_port
->config
.type
,
1145 test_data
[i
].out_type
);
1150 static void tb_test_path_mixed_chain(struct kunit
*test
)
1153 * DP Video path from host to device 4 where first and last link
1170 static const struct hop_expectation test_data
[] = {
1174 .in_type
= TB_TYPE_DP_HDMI_IN
,
1176 .out_type
= TB_TYPE_PORT
,
1181 .in_type
= TB_TYPE_PORT
,
1183 .out_type
= TB_TYPE_PORT
,
1188 .in_type
= TB_TYPE_PORT
,
1190 .out_type
= TB_TYPE_PORT
,
1195 .in_type
= TB_TYPE_PORT
,
1197 .out_type
= TB_TYPE_PORT
,
1202 .in_type
= TB_TYPE_PORT
,
1204 .out_type
= TB_TYPE_DP_HDMI_OUT
,
1207 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev4
;
1208 struct tb_port
*in
, *out
;
1209 struct tb_path
*path
;
1212 host
= alloc_host(test
);
1213 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1214 dev2
= alloc_dev_default(test
, dev1
, 0x701, false);
1215 dev3
= alloc_dev_default(test
, dev2
, 0x50701, false);
1216 dev4
= alloc_dev_default(test
, dev3
, 0x3050701, true);
1218 in
= &host
->ports
[5];
1219 out
= &dev4
->ports
[13];
1221 path
= tb_path_alloc(NULL
, in
, 9, out
, 9, 1, "Video");
1222 KUNIT_ASSERT_NOT_NULL(test
, path
);
1223 KUNIT_ASSERT_EQ(test
, path
->path_length
, ARRAY_SIZE(test_data
));
1224 for (i
= 0; i
< ARRAY_SIZE(test_data
); i
++) {
1225 const struct tb_port
*in_port
, *out_port
;
1227 in_port
= path
->hops
[i
].in_port
;
1228 out_port
= path
->hops
[i
].out_port
;
1230 KUNIT_EXPECT_EQ(test
, tb_route(in_port
->sw
), test_data
[i
].route
);
1231 KUNIT_EXPECT_EQ(test
, in_port
->port
, test_data
[i
].in_port
);
1232 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)in_port
->config
.type
,
1233 test_data
[i
].in_type
);
1234 KUNIT_EXPECT_EQ(test
, tb_route(out_port
->sw
), test_data
[i
].route
);
1235 KUNIT_EXPECT_EQ(test
, out_port
->port
, test_data
[i
].out_port
);
1236 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)out_port
->config
.type
,
1237 test_data
[i
].out_type
);
1242 static void tb_test_path_mixed_chain_reverse(struct kunit
*test
)
1245 * DP Video path from device 4 to host where first and last link
1262 static const struct hop_expectation test_data
[] = {
1266 .in_type
= TB_TYPE_DP_HDMI_OUT
,
1268 .out_type
= TB_TYPE_PORT
,
1273 .in_type
= TB_TYPE_PORT
,
1275 .out_type
= TB_TYPE_PORT
,
1280 .in_type
= TB_TYPE_PORT
,
1282 .out_type
= TB_TYPE_PORT
,
1287 .in_type
= TB_TYPE_PORT
,
1289 .out_type
= TB_TYPE_PORT
,
1294 .in_type
= TB_TYPE_PORT
,
1296 .out_type
= TB_TYPE_DP_HDMI_IN
,
1299 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev4
;
1300 struct tb_port
*in
, *out
;
1301 struct tb_path
*path
;
1304 host
= alloc_host(test
);
1305 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1306 dev2
= alloc_dev_default(test
, dev1
, 0x701, false);
1307 dev3
= alloc_dev_default(test
, dev2
, 0x50701, false);
1308 dev4
= alloc_dev_default(test
, dev3
, 0x3050701, true);
1310 in
= &dev4
->ports
[13];
1311 out
= &host
->ports
[5];
1313 path
= tb_path_alloc(NULL
, in
, 9, out
, 9, 1, "Video");
1314 KUNIT_ASSERT_NOT_NULL(test
, path
);
1315 KUNIT_ASSERT_EQ(test
, path
->path_length
, ARRAY_SIZE(test_data
));
1316 for (i
= 0; i
< ARRAY_SIZE(test_data
); i
++) {
1317 const struct tb_port
*in_port
, *out_port
;
1319 in_port
= path
->hops
[i
].in_port
;
1320 out_port
= path
->hops
[i
].out_port
;
1322 KUNIT_EXPECT_EQ(test
, tb_route(in_port
->sw
), test_data
[i
].route
);
1323 KUNIT_EXPECT_EQ(test
, in_port
->port
, test_data
[i
].in_port
);
1324 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)in_port
->config
.type
,
1325 test_data
[i
].in_type
);
1326 KUNIT_EXPECT_EQ(test
, tb_route(out_port
->sw
), test_data
[i
].route
);
1327 KUNIT_EXPECT_EQ(test
, out_port
->port
, test_data
[i
].out_port
);
1328 KUNIT_EXPECT_EQ(test
, (enum tb_port_type
)out_port
->config
.type
,
1329 test_data
[i
].out_type
);
1334 static void tb_test_tunnel_pcie(struct kunit
*test
)
1336 struct tb_switch
*host
, *dev1
, *dev2
;
1337 struct tb_tunnel
*tunnel1
, *tunnel2
;
1338 struct tb_port
*down
, *up
;
1341 * Create PCIe tunnel between host and two devices.
1351 host
= alloc_host(test
);
1352 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1353 dev2
= alloc_dev_default(test
, dev1
, 0x501, true);
1355 down
= &host
->ports
[8];
1356 up
= &dev1
->ports
[9];
1357 tunnel1
= tb_tunnel_alloc_pci(NULL
, up
, down
);
1358 KUNIT_ASSERT_NOT_NULL(test
, tunnel1
);
1359 KUNIT_EXPECT_EQ(test
, tunnel1
->type
, TB_TUNNEL_PCI
);
1360 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->src_port
, down
);
1361 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->dst_port
, up
);
1362 KUNIT_ASSERT_EQ(test
, tunnel1
->npaths
, 2);
1363 KUNIT_ASSERT_EQ(test
, tunnel1
->paths
[0]->path_length
, 2);
1364 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[0]->hops
[0].in_port
, down
);
1365 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[0]->hops
[1].out_port
, up
);
1366 KUNIT_ASSERT_EQ(test
, tunnel1
->paths
[1]->path_length
, 2);
1367 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[1]->hops
[0].in_port
, up
);
1368 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[1]->hops
[1].out_port
, down
);
1370 down
= &dev1
->ports
[10];
1371 up
= &dev2
->ports
[9];
1372 tunnel2
= tb_tunnel_alloc_pci(NULL
, up
, down
);
1373 KUNIT_ASSERT_NOT_NULL(test
, tunnel2
);
1374 KUNIT_EXPECT_EQ(test
, tunnel2
->type
, TB_TUNNEL_PCI
);
1375 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->src_port
, down
);
1376 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->dst_port
, up
);
1377 KUNIT_ASSERT_EQ(test
, tunnel2
->npaths
, 2);
1378 KUNIT_ASSERT_EQ(test
, tunnel2
->paths
[0]->path_length
, 2);
1379 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[0]->hops
[0].in_port
, down
);
1380 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[0]->hops
[1].out_port
, up
);
1381 KUNIT_ASSERT_EQ(test
, tunnel2
->paths
[1]->path_length
, 2);
1382 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[1]->hops
[0].in_port
, up
);
1383 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[1]->hops
[1].out_port
, down
);
1385 tb_tunnel_free(tunnel2
);
1386 tb_tunnel_free(tunnel1
);
1389 static void tb_test_tunnel_dp(struct kunit
*test
)
1391 struct tb_switch
*host
, *dev
;
1392 struct tb_port
*in
, *out
;
1393 struct tb_tunnel
*tunnel
;
1396 * Create DP tunnel between Host and Device
1403 host
= alloc_host(test
);
1404 dev
= alloc_dev_default(test
, host
, 0x3, true);
1406 in
= &host
->ports
[5];
1407 out
= &dev
->ports
[13];
1409 tunnel
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
1410 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1411 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DP
);
1412 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, in
);
1413 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, out
);
1414 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 3);
1415 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 2);
1416 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, in
);
1417 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[1].out_port
, out
);
1418 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[1]->path_length
, 2);
1419 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].in_port
, in
);
1420 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[1].out_port
, out
);
1421 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[2]->path_length
, 2);
1422 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[0].in_port
, out
);
1423 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[1].out_port
, in
);
1424 tb_tunnel_free(tunnel
);
1427 static void tb_test_tunnel_dp_chain(struct kunit
*test
)
1429 struct tb_switch
*host
, *dev1
, *dev4
;
1430 struct tb_port
*in
, *out
;
1431 struct tb_tunnel
*tunnel
;
1434 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1442 * [Device #2] | [Device #4]
1446 host
= alloc_host(test
);
1447 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1448 alloc_dev_default(test
, dev1
, 0x301, true);
1449 alloc_dev_default(test
, dev1
, 0x501, true);
1450 dev4
= alloc_dev_default(test
, dev1
, 0x701, true);
1452 in
= &host
->ports
[5];
1453 out
= &dev4
->ports
[14];
1455 tunnel
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
1456 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1457 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DP
);
1458 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, in
);
1459 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, out
);
1460 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 3);
1461 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 3);
1462 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, in
);
1463 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[2].out_port
, out
);
1464 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[1]->path_length
, 3);
1465 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].in_port
, in
);
1466 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[2].out_port
, out
);
1467 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[2]->path_length
, 3);
1468 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[0].in_port
, out
);
1469 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[2].out_port
, in
);
1470 tb_tunnel_free(tunnel
);
1473 static void tb_test_tunnel_dp_tree(struct kunit
*test
)
1475 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev5
;
1476 struct tb_port
*in
, *out
;
1477 struct tb_tunnel
*tunnel
;
1480 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1488 * [Device #2] | [Device #4]
1495 host
= alloc_host(test
);
1496 dev1
= alloc_dev_default(test
, host
, 0x3, true);
1497 dev2
= alloc_dev_with_dpin(test
, dev1
, 0x303, true);
1498 dev3
= alloc_dev_default(test
, dev1
, 0x503, true);
1499 alloc_dev_default(test
, dev1
, 0x703, true);
1500 dev5
= alloc_dev_default(test
, dev3
, 0x50503, true);
1502 in
= &dev2
->ports
[13];
1503 out
= &dev5
->ports
[13];
1505 tunnel
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
1506 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1507 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DP
);
1508 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, in
);
1509 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, out
);
1510 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 3);
1511 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 4);
1512 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, in
);
1513 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[3].out_port
, out
);
1514 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[1]->path_length
, 4);
1515 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].in_port
, in
);
1516 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[3].out_port
, out
);
1517 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[2]->path_length
, 4);
1518 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[0].in_port
, out
);
1519 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[3].out_port
, in
);
1520 tb_tunnel_free(tunnel
);
1523 static void tb_test_tunnel_dp_max_length(struct kunit
*test
)
1525 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev4
, *dev5
, *dev6
;
1526 struct tb_switch
*dev7
, *dev8
, *dev9
, *dev10
, *dev11
, *dev12
;
1527 struct tb_port
*in
, *out
;
1528 struct tb_tunnel
*tunnel
;
1531 * Creates DP tunnel from Device #6 to Device #12.
1536 * [Device #1] [Device #7]
1539 * [Device #2] [Device #8]
1542 * [Device #3] [Device #9]
1545 * [Device #4] [Device #10]
1548 * [Device #5] [Device #11]
1551 * [Device #6] [Device #12]
1553 host
= alloc_host(test
);
1554 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1555 dev2
= alloc_dev_default(test
, dev1
, 0x301, true);
1556 dev3
= alloc_dev_default(test
, dev2
, 0x30301, true);
1557 dev4
= alloc_dev_default(test
, dev3
, 0x3030301, true);
1558 dev5
= alloc_dev_default(test
, dev4
, 0x303030301, true);
1559 dev6
= alloc_dev_with_dpin(test
, dev5
, 0x30303030301, true);
1560 dev7
= alloc_dev_default(test
, host
, 0x3, true);
1561 dev8
= alloc_dev_default(test
, dev7
, 0x303, true);
1562 dev9
= alloc_dev_default(test
, dev8
, 0x30303, true);
1563 dev10
= alloc_dev_default(test
, dev9
, 0x3030303, true);
1564 dev11
= alloc_dev_default(test
, dev10
, 0x303030303, true);
1565 dev12
= alloc_dev_default(test
, dev11
, 0x30303030303, true);
1567 in
= &dev6
->ports
[13];
1568 out
= &dev12
->ports
[13];
1570 tunnel
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
1571 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1572 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DP
);
1573 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, in
);
1574 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, out
);
1575 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 3);
1576 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 13);
1578 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, in
);
1580 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[6].in_port
,
1582 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[6].out_port
,
1585 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[12].out_port
, out
);
1586 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[1]->path_length
, 13);
1587 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].in_port
, in
);
1588 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[6].in_port
,
1590 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[6].out_port
,
1592 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[12].out_port
, out
);
1593 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[2]->path_length
, 13);
1594 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[0].in_port
, out
);
1595 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[6].in_port
,
1597 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[6].out_port
,
1599 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[2]->hops
[12].out_port
, in
);
1600 tb_tunnel_free(tunnel
);
1603 static void tb_test_tunnel_3dp(struct kunit
*test
)
1605 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev4
, *dev5
;
1606 struct tb_port
*in1
, *in2
, *in3
, *out1
, *out2
, *out3
;
1607 struct tb_tunnel
*tunnel1
, *tunnel2
, *tunnel3
;
1610 * Create 3 DP tunnels from Host to Devices #2, #5 and #4.
1618 * [Device #2] | [Device #4]
1625 host
= alloc_host_br(test
);
1626 dev1
= alloc_dev_default(test
, host
, 0x3, true);
1627 dev2
= alloc_dev_default(test
, dev1
, 0x303, true);
1628 dev3
= alloc_dev_default(test
, dev1
, 0x503, true);
1629 dev4
= alloc_dev_default(test
, dev1
, 0x703, true);
1630 dev5
= alloc_dev_default(test
, dev3
, 0x50503, true);
1632 in1
= &host
->ports
[5];
1633 in2
= &host
->ports
[6];
1634 in3
= &host
->ports
[10];
1636 out1
= &dev2
->ports
[13];
1637 out2
= &dev5
->ports
[13];
1638 out3
= &dev4
->ports
[14];
1640 tunnel1
= tb_tunnel_alloc_dp(NULL
, in1
, out1
, 1, 0, 0);
1641 KUNIT_ASSERT_TRUE(test
, tunnel1
!= NULL
);
1642 KUNIT_EXPECT_EQ(test
, tunnel1
->type
, TB_TUNNEL_DP
);
1643 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->src_port
, in1
);
1644 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->dst_port
, out1
);
1645 KUNIT_ASSERT_EQ(test
, tunnel1
->npaths
, 3);
1646 KUNIT_ASSERT_EQ(test
, tunnel1
->paths
[0]->path_length
, 3);
1648 tunnel2
= tb_tunnel_alloc_dp(NULL
, in2
, out2
, 1, 0, 0);
1649 KUNIT_ASSERT_TRUE(test
, tunnel2
!= NULL
);
1650 KUNIT_EXPECT_EQ(test
, tunnel2
->type
, TB_TUNNEL_DP
);
1651 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->src_port
, in2
);
1652 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->dst_port
, out2
);
1653 KUNIT_ASSERT_EQ(test
, tunnel2
->npaths
, 3);
1654 KUNIT_ASSERT_EQ(test
, tunnel2
->paths
[0]->path_length
, 4);
1656 tunnel3
= tb_tunnel_alloc_dp(NULL
, in3
, out3
, 1, 0, 0);
1657 KUNIT_ASSERT_TRUE(test
, tunnel3
!= NULL
);
1658 KUNIT_EXPECT_EQ(test
, tunnel3
->type
, TB_TUNNEL_DP
);
1659 KUNIT_EXPECT_PTR_EQ(test
, tunnel3
->src_port
, in3
);
1660 KUNIT_EXPECT_PTR_EQ(test
, tunnel3
->dst_port
, out3
);
1661 KUNIT_ASSERT_EQ(test
, tunnel3
->npaths
, 3);
1662 KUNIT_ASSERT_EQ(test
, tunnel3
->paths
[0]->path_length
, 3);
1664 tb_tunnel_free(tunnel2
);
1665 tb_tunnel_free(tunnel1
);
1668 static void tb_test_tunnel_usb3(struct kunit
*test
)
1670 struct tb_switch
*host
, *dev1
, *dev2
;
1671 struct tb_tunnel
*tunnel1
, *tunnel2
;
1672 struct tb_port
*down
, *up
;
1675 * Create USB3 tunnel between host and two devices.
1685 host
= alloc_host(test
);
1686 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1687 dev2
= alloc_dev_default(test
, dev1
, 0x701, true);
1689 down
= &host
->ports
[12];
1690 up
= &dev1
->ports
[16];
1691 tunnel1
= tb_tunnel_alloc_usb3(NULL
, up
, down
, 0, 0);
1692 KUNIT_ASSERT_NOT_NULL(test
, tunnel1
);
1693 KUNIT_EXPECT_EQ(test
, tunnel1
->type
, TB_TUNNEL_USB3
);
1694 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->src_port
, down
);
1695 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->dst_port
, up
);
1696 KUNIT_ASSERT_EQ(test
, tunnel1
->npaths
, 2);
1697 KUNIT_ASSERT_EQ(test
, tunnel1
->paths
[0]->path_length
, 2);
1698 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[0]->hops
[0].in_port
, down
);
1699 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[0]->hops
[1].out_port
, up
);
1700 KUNIT_ASSERT_EQ(test
, tunnel1
->paths
[1]->path_length
, 2);
1701 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[1]->hops
[0].in_port
, up
);
1702 KUNIT_EXPECT_PTR_EQ(test
, tunnel1
->paths
[1]->hops
[1].out_port
, down
);
1704 down
= &dev1
->ports
[17];
1705 up
= &dev2
->ports
[16];
1706 tunnel2
= tb_tunnel_alloc_usb3(NULL
, up
, down
, 0, 0);
1707 KUNIT_ASSERT_NOT_NULL(test
, tunnel2
);
1708 KUNIT_EXPECT_EQ(test
, tunnel2
->type
, TB_TUNNEL_USB3
);
1709 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->src_port
, down
);
1710 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->dst_port
, up
);
1711 KUNIT_ASSERT_EQ(test
, tunnel2
->npaths
, 2);
1712 KUNIT_ASSERT_EQ(test
, tunnel2
->paths
[0]->path_length
, 2);
1713 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[0]->hops
[0].in_port
, down
);
1714 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[0]->hops
[1].out_port
, up
);
1715 KUNIT_ASSERT_EQ(test
, tunnel2
->paths
[1]->path_length
, 2);
1716 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[1]->hops
[0].in_port
, up
);
1717 KUNIT_EXPECT_PTR_EQ(test
, tunnel2
->paths
[1]->hops
[1].out_port
, down
);
1719 tb_tunnel_free(tunnel2
);
1720 tb_tunnel_free(tunnel1
);
1723 static void tb_test_tunnel_port_on_path(struct kunit
*test
)
1725 struct tb_switch
*host
, *dev1
, *dev2
, *dev3
, *dev4
, *dev5
;
1726 struct tb_port
*in
, *out
, *port
;
1727 struct tb_tunnel
*dp_tunnel
;
1736 * [Device #2] | [Device #4]
1743 host
= alloc_host(test
);
1744 dev1
= alloc_dev_default(test
, host
, 0x3, true);
1745 dev2
= alloc_dev_with_dpin(test
, dev1
, 0x303, true);
1746 dev3
= alloc_dev_default(test
, dev1
, 0x503, true);
1747 dev4
= alloc_dev_default(test
, dev1
, 0x703, true);
1748 dev5
= alloc_dev_default(test
, dev3
, 0x50503, true);
1750 in
= &dev2
->ports
[13];
1751 out
= &dev5
->ports
[13];
1753 dp_tunnel
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
1754 KUNIT_ASSERT_NOT_NULL(test
, dp_tunnel
);
1756 KUNIT_EXPECT_TRUE(test
, tb_tunnel_port_on_path(dp_tunnel
, in
));
1757 KUNIT_EXPECT_TRUE(test
, tb_tunnel_port_on_path(dp_tunnel
, out
));
1759 port
= &host
->ports
[8];
1760 KUNIT_EXPECT_FALSE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1762 port
= &host
->ports
[3];
1763 KUNIT_EXPECT_FALSE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1765 port
= &dev1
->ports
[1];
1766 KUNIT_EXPECT_FALSE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1768 port
= &dev1
->ports
[3];
1769 KUNIT_EXPECT_TRUE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1771 port
= &dev1
->ports
[5];
1772 KUNIT_EXPECT_TRUE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1774 port
= &dev1
->ports
[7];
1775 KUNIT_EXPECT_FALSE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1777 port
= &dev3
->ports
[1];
1778 KUNIT_EXPECT_TRUE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1780 port
= &dev5
->ports
[1];
1781 KUNIT_EXPECT_TRUE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1783 port
= &dev4
->ports
[1];
1784 KUNIT_EXPECT_FALSE(test
, tb_tunnel_port_on_path(dp_tunnel
, port
));
1786 tb_tunnel_free(dp_tunnel
);
1789 static void tb_test_tunnel_dma(struct kunit
*test
)
1791 struct tb_port
*nhi
, *port
;
1792 struct tb_tunnel
*tunnel
;
1793 struct tb_switch
*host
;
1796 * Create DMA tunnel from NHI to port 1 and back.
1799 * 1 ^ In HopID 1 -> Out HopID 8
1801 * v In HopID 8 -> Out HopID 1
1802 * ............ Domain border
1806 host
= alloc_host(test
);
1807 nhi
= &host
->ports
[7];
1808 port
= &host
->ports
[1];
1810 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 8, 1, 8, 1);
1811 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1812 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DMA
);
1813 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, nhi
);
1814 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, port
);
1815 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 2);
1817 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 1);
1818 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, port
);
1819 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].in_hop_index
, 8);
1820 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].out_port
, nhi
);
1821 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].next_hop_index
, 1);
1823 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[1]->path_length
, 1);
1824 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].in_port
, nhi
);
1825 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[1]->hops
[0].in_hop_index
, 1);
1826 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].out_port
, port
);
1827 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[1]->hops
[0].next_hop_index
, 8);
1829 tb_tunnel_free(tunnel
);
1832 static void tb_test_tunnel_dma_rx(struct kunit
*test
)
1834 struct tb_port
*nhi
, *port
;
1835 struct tb_tunnel
*tunnel
;
1836 struct tb_switch
*host
;
1839 * Create DMA RX tunnel from port 1 to NHI.
1844 * | In HopID 15 -> Out HopID 2
1845 * ............ Domain border
1849 host
= alloc_host(test
);
1850 nhi
= &host
->ports
[7];
1851 port
= &host
->ports
[1];
1853 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, -1, -1, 15, 2);
1854 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1855 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DMA
);
1856 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, nhi
);
1857 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, port
);
1858 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 1);
1860 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 1);
1861 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, port
);
1862 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].in_hop_index
, 15);
1863 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].out_port
, nhi
);
1864 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].next_hop_index
, 2);
1866 tb_tunnel_free(tunnel
);
1869 static void tb_test_tunnel_dma_tx(struct kunit
*test
)
1871 struct tb_port
*nhi
, *port
;
1872 struct tb_tunnel
*tunnel
;
1873 struct tb_switch
*host
;
1876 * Create DMA TX tunnel from NHI to port 1.
1879 * 1 | In HopID 2 -> Out HopID 15
1882 * ............ Domain border
1886 host
= alloc_host(test
);
1887 nhi
= &host
->ports
[7];
1888 port
= &host
->ports
[1];
1890 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 15, 2, -1, -1);
1891 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1892 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DMA
);
1893 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, nhi
);
1894 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, port
);
1895 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 1);
1897 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 1);
1898 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, nhi
);
1899 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].in_hop_index
, 2);
1900 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].out_port
, port
);
1901 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].next_hop_index
, 15);
1903 tb_tunnel_free(tunnel
);
1906 static void tb_test_tunnel_dma_chain(struct kunit
*test
)
1908 struct tb_switch
*host
, *dev1
, *dev2
;
1909 struct tb_port
*nhi
, *port
;
1910 struct tb_tunnel
*tunnel
;
1913 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1916 * 1 ^ In HopID 1 -> Out HopID x
1918 * 1 | In HopID x -> Out HopID 1
1923 * 3 | In HopID x -> Out HopID 8
1925 * v In HopID 8 -> Out HopID x
1926 * ............ Domain border
1930 host
= alloc_host(test
);
1931 dev1
= alloc_dev_default(test
, host
, 0x1, true);
1932 dev2
= alloc_dev_default(test
, dev1
, 0x701, true);
1934 nhi
= &host
->ports
[7];
1935 port
= &dev2
->ports
[3];
1936 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 8, 1, 8, 1);
1937 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1938 KUNIT_EXPECT_EQ(test
, tunnel
->type
, TB_TUNNEL_DMA
);
1939 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->src_port
, nhi
);
1940 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->dst_port
, port
);
1941 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, 2);
1943 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[0]->path_length
, 3);
1944 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].in_port
, port
);
1945 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[0].in_hop_index
, 8);
1946 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[0].out_port
,
1948 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[1].in_port
,
1950 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[1].out_port
,
1952 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[2].in_port
,
1954 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[0]->hops
[2].out_port
, nhi
);
1955 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[0]->hops
[2].next_hop_index
, 1);
1957 KUNIT_ASSERT_EQ(test
, tunnel
->paths
[1]->path_length
, 3);
1958 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[0].in_port
, nhi
);
1959 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[1]->hops
[0].in_hop_index
, 1);
1960 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[1].in_port
,
1962 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[1].out_port
,
1964 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[2].in_port
,
1966 KUNIT_EXPECT_PTR_EQ(test
, tunnel
->paths
[1]->hops
[2].out_port
, port
);
1967 KUNIT_EXPECT_EQ(test
, tunnel
->paths
[1]->hops
[2].next_hop_index
, 8);
1969 tb_tunnel_free(tunnel
);
1972 static void tb_test_tunnel_dma_match(struct kunit
*test
)
1974 struct tb_port
*nhi
, *port
;
1975 struct tb_tunnel
*tunnel
;
1976 struct tb_switch
*host
;
1978 host
= alloc_host(test
);
1979 nhi
= &host
->ports
[7];
1980 port
= &host
->ports
[1];
1982 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 15, 1, 15, 1);
1983 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
1985 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, 15, 1, 15, 1));
1986 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, 8, 1, 15, 1));
1987 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 15, 1));
1988 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, 15, 1, -1, -1));
1989 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, 15, -1, -1, -1));
1990 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, 1, -1, -1));
1991 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 15, -1));
1992 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, -1, 1));
1993 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, -1, -1));
1994 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, 8, -1, 8, -1));
1996 tb_tunnel_free(tunnel
);
1998 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 15, 1, -1, -1);
1999 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2000 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, 15, 1, -1, -1));
2001 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, 15, -1, -1, -1));
2002 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, 1, -1, -1));
2003 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, -1, -1));
2004 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, 15, 1, 15, 1));
2005 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 15, 1));
2006 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, 15, 11, -1, -1));
2008 tb_tunnel_free(tunnel
);
2010 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, -1, -1, 15, 11);
2011 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2012 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 15, 11));
2013 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 15, -1));
2014 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, -1, 11));
2015 KUNIT_ASSERT_TRUE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, -1, -1));
2016 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 15, 1));
2017 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, -1, -1, 10, 11));
2018 KUNIT_ASSERT_FALSE(test
, tb_tunnel_match_dma(tunnel
, 15, 11, -1, -1));
2020 tb_tunnel_free(tunnel
);
2023 static void tb_test_credit_alloc_legacy_not_bonded(struct kunit
*test
)
2025 struct tb_switch
*host
, *dev
;
2026 struct tb_port
*up
, *down
;
2027 struct tb_tunnel
*tunnel
;
2028 struct tb_path
*path
;
2030 host
= alloc_host(test
);
2031 dev
= alloc_dev_default(test
, host
, 0x1, false);
2033 down
= &host
->ports
[8];
2034 up
= &dev
->ports
[9];
2035 tunnel
= tb_tunnel_alloc_pci(NULL
, up
, down
);
2036 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2037 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)2);
2039 path
= tunnel
->paths
[0];
2040 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2041 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2042 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2043 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2044 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 16U);
2046 path
= tunnel
->paths
[1];
2047 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2048 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2049 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2050 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2051 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 16U);
2053 tb_tunnel_free(tunnel
);
2056 static void tb_test_credit_alloc_legacy_bonded(struct kunit
*test
)
2058 struct tb_switch
*host
, *dev
;
2059 struct tb_port
*up
, *down
;
2060 struct tb_tunnel
*tunnel
;
2061 struct tb_path
*path
;
2063 host
= alloc_host(test
);
2064 dev
= alloc_dev_default(test
, host
, 0x1, true);
2066 down
= &host
->ports
[8];
2067 up
= &dev
->ports
[9];
2068 tunnel
= tb_tunnel_alloc_pci(NULL
, up
, down
);
2069 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2070 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)2);
2072 path
= tunnel
->paths
[0];
2073 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2074 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2075 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2076 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2077 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 32U);
2079 path
= tunnel
->paths
[1];
2080 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2081 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2082 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2083 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2084 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 32U);
2086 tb_tunnel_free(tunnel
);
2089 static void tb_test_credit_alloc_pcie(struct kunit
*test
)
2091 struct tb_switch
*host
, *dev
;
2092 struct tb_port
*up
, *down
;
2093 struct tb_tunnel
*tunnel
;
2094 struct tb_path
*path
;
2096 host
= alloc_host_usb4(test
);
2097 dev
= alloc_dev_usb4(test
, host
, 0x1, true);
2099 down
= &host
->ports
[8];
2100 up
= &dev
->ports
[9];
2101 tunnel
= tb_tunnel_alloc_pci(NULL
, up
, down
);
2102 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2103 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)2);
2105 path
= tunnel
->paths
[0];
2106 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2107 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2108 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2109 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2110 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 32U);
2112 path
= tunnel
->paths
[1];
2113 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2114 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2115 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2116 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2117 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 64U);
2119 tb_tunnel_free(tunnel
);
2122 static void tb_test_credit_alloc_without_dp(struct kunit
*test
)
2124 struct tb_switch
*host
, *dev
;
2125 struct tb_port
*up
, *down
;
2126 struct tb_tunnel
*tunnel
;
2127 struct tb_path
*path
;
2129 host
= alloc_host_usb4(test
);
2130 dev
= alloc_dev_without_dp(test
, host
, 0x1, true);
2133 * The device has no DP therefore baMinDPmain = baMinDPaux = 0
2135 * Create PCIe path with buffers less than baMaxPCIe.
2137 * For a device with buffers configurations:
2143 * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
2144 * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
2145 * = Max(6, Min(30, 9) = 9
2147 down
= &host
->ports
[8];
2148 up
= &dev
->ports
[9];
2149 tunnel
= tb_tunnel_alloc_pci(NULL
, up
, down
);
2150 KUNIT_ASSERT_TRUE(test
, tunnel
!= NULL
);
2151 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)2);
2153 /* PCIe downstream path */
2154 path
= tunnel
->paths
[0];
2155 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2156 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2157 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2158 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2159 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 9U);
2161 /* PCIe upstream path */
2162 path
= tunnel
->paths
[1];
2163 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2164 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2165 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2166 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2167 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 64U);
2169 tb_tunnel_free(tunnel
);
2172 static void tb_test_credit_alloc_dp(struct kunit
*test
)
2174 struct tb_switch
*host
, *dev
;
2175 struct tb_port
*in
, *out
;
2176 struct tb_tunnel
*tunnel
;
2177 struct tb_path
*path
;
2179 host
= alloc_host_usb4(test
);
2180 dev
= alloc_dev_usb4(test
, host
, 0x1, true);
2182 in
= &host
->ports
[5];
2183 out
= &dev
->ports
[14];
2185 tunnel
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
2186 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2187 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)3);
2189 /* Video (main) path */
2190 path
= tunnel
->paths
[0];
2191 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2192 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 12U);
2193 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2194 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 18U);
2195 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 0U);
2198 path
= tunnel
->paths
[1];
2199 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2200 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2201 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 1U);
2202 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2203 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2206 path
= tunnel
->paths
[2];
2207 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2208 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2209 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 1U);
2210 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2211 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2213 tb_tunnel_free(tunnel
);
2216 static void tb_test_credit_alloc_usb3(struct kunit
*test
)
2218 struct tb_switch
*host
, *dev
;
2219 struct tb_port
*up
, *down
;
2220 struct tb_tunnel
*tunnel
;
2221 struct tb_path
*path
;
2223 host
= alloc_host_usb4(test
);
2224 dev
= alloc_dev_usb4(test
, host
, 0x1, true);
2226 down
= &host
->ports
[12];
2227 up
= &dev
->ports
[16];
2228 tunnel
= tb_tunnel_alloc_usb3(NULL
, up
, down
, 0, 0);
2229 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2230 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)2);
2232 path
= tunnel
->paths
[0];
2233 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2234 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2235 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2236 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2237 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2239 path
= tunnel
->paths
[1];
2240 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2241 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2242 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2243 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2244 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 32U);
2246 tb_tunnel_free(tunnel
);
2249 static void tb_test_credit_alloc_dma(struct kunit
*test
)
2251 struct tb_switch
*host
, *dev
;
2252 struct tb_port
*nhi
, *port
;
2253 struct tb_tunnel
*tunnel
;
2254 struct tb_path
*path
;
2256 host
= alloc_host_usb4(test
);
2257 dev
= alloc_dev_usb4(test
, host
, 0x1, true);
2259 nhi
= &host
->ports
[7];
2260 port
= &dev
->ports
[3];
2262 tunnel
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 8, 1, 8, 1);
2263 KUNIT_ASSERT_NOT_NULL(test
, tunnel
);
2264 KUNIT_ASSERT_EQ(test
, tunnel
->npaths
, (size_t)2);
2267 path
= tunnel
->paths
[0];
2268 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2269 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2270 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 14U);
2271 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2272 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2275 path
= tunnel
->paths
[1];
2276 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2277 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2278 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2279 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2280 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2282 tb_tunnel_free(tunnel
);
2285 static void tb_test_credit_alloc_dma_multiple(struct kunit
*test
)
2287 struct tb_tunnel
*tunnel1
, *tunnel2
, *tunnel3
;
2288 struct tb_switch
*host
, *dev
;
2289 struct tb_port
*nhi
, *port
;
2290 struct tb_path
*path
;
2292 host
= alloc_host_usb4(test
);
2293 dev
= alloc_dev_usb4(test
, host
, 0x1, true);
2295 nhi
= &host
->ports
[7];
2296 port
= &dev
->ports
[3];
2299 * Create three DMA tunnels through the same ports. With the
2300 * default buffers we should be able to create two and the last
2303 * For default host we have following buffers for DMA:
2305 * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
2307 * For device we have following:
2309 * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
2311 * spare = 14 + 1 = 15
2313 * So on host the first tunnel gets 14 and the second gets the
2314 * remaining 1 and then we run out of buffers.
2316 tunnel1
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 8, 1, 8, 1);
2317 KUNIT_ASSERT_NOT_NULL(test
, tunnel1
);
2318 KUNIT_ASSERT_EQ(test
, tunnel1
->npaths
, (size_t)2);
2320 path
= tunnel1
->paths
[0];
2321 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2322 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2323 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 14U);
2324 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2325 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2327 path
= tunnel1
->paths
[1];
2328 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2329 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2330 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2331 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2332 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2334 tunnel2
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 9, 2, 9, 2);
2335 KUNIT_ASSERT_NOT_NULL(test
, tunnel2
);
2336 KUNIT_ASSERT_EQ(test
, tunnel2
->npaths
, (size_t)2);
2338 path
= tunnel2
->paths
[0];
2339 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2340 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2341 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 14U);
2342 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2343 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2345 path
= tunnel2
->paths
[1];
2346 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2347 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2348 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2349 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2350 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2352 tunnel3
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 10, 3, 10, 3);
2353 KUNIT_ASSERT_NULL(test
, tunnel3
);
2356 * Release the first DMA tunnel. That should make 14 buffers
2357 * available for the next tunnel.
2359 tb_tunnel_free(tunnel1
);
2361 tunnel3
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 10, 3, 10, 3);
2362 KUNIT_ASSERT_NOT_NULL(test
, tunnel3
);
2364 path
= tunnel3
->paths
[0];
2365 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2366 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2367 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 14U);
2368 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2369 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2371 path
= tunnel3
->paths
[1];
2372 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2373 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2374 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2375 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2376 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2378 tb_tunnel_free(tunnel3
);
2379 tb_tunnel_free(tunnel2
);
2382 static struct tb_tunnel
*TB_TEST_PCIE_TUNNEL(struct kunit
*test
,
2383 struct tb_switch
*host
, struct tb_switch
*dev
)
2385 struct tb_port
*up
, *down
;
2386 struct tb_tunnel
*pcie_tunnel
;
2387 struct tb_path
*path
;
2389 down
= &host
->ports
[8];
2390 up
= &dev
->ports
[9];
2391 pcie_tunnel
= tb_tunnel_alloc_pci(NULL
, up
, down
);
2392 KUNIT_ASSERT_NOT_NULL(test
, pcie_tunnel
);
2393 KUNIT_ASSERT_EQ(test
, pcie_tunnel
->npaths
, (size_t)2);
2395 path
= pcie_tunnel
->paths
[0];
2396 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2397 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2398 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2399 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2400 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 32U);
2402 path
= pcie_tunnel
->paths
[1];
2403 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2404 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2405 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2406 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2407 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 64U);
2412 static struct tb_tunnel
*TB_TEST_DP_TUNNEL1(struct kunit
*test
,
2413 struct tb_switch
*host
, struct tb_switch
*dev
)
2415 struct tb_port
*in
, *out
;
2416 struct tb_tunnel
*dp_tunnel1
;
2417 struct tb_path
*path
;
2419 in
= &host
->ports
[5];
2420 out
= &dev
->ports
[13];
2421 dp_tunnel1
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
2422 KUNIT_ASSERT_NOT_NULL(test
, dp_tunnel1
);
2423 KUNIT_ASSERT_EQ(test
, dp_tunnel1
->npaths
, (size_t)3);
2425 path
= dp_tunnel1
->paths
[0];
2426 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2427 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 12U);
2428 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2429 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 18U);
2430 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 0U);
2432 path
= dp_tunnel1
->paths
[1];
2433 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2434 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2435 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 1U);
2436 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2437 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2439 path
= dp_tunnel1
->paths
[2];
2440 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2441 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2442 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 1U);
2443 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2444 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2449 static struct tb_tunnel
*TB_TEST_DP_TUNNEL2(struct kunit
*test
,
2450 struct tb_switch
*host
, struct tb_switch
*dev
)
2452 struct tb_port
*in
, *out
;
2453 struct tb_tunnel
*dp_tunnel2
;
2454 struct tb_path
*path
;
2456 in
= &host
->ports
[6];
2457 out
= &dev
->ports
[14];
2458 dp_tunnel2
= tb_tunnel_alloc_dp(NULL
, in
, out
, 1, 0, 0);
2459 KUNIT_ASSERT_NOT_NULL(test
, dp_tunnel2
);
2460 KUNIT_ASSERT_EQ(test
, dp_tunnel2
->npaths
, (size_t)3);
2462 path
= dp_tunnel2
->paths
[0];
2463 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2464 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 12U);
2465 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2466 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 18U);
2467 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 0U);
2469 path
= dp_tunnel2
->paths
[1];
2470 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2471 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2472 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 1U);
2473 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2474 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2476 path
= dp_tunnel2
->paths
[2];
2477 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2478 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2479 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 1U);
2480 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2481 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2486 static struct tb_tunnel
*TB_TEST_USB3_TUNNEL(struct kunit
*test
,
2487 struct tb_switch
*host
, struct tb_switch
*dev
)
2489 struct tb_port
*up
, *down
;
2490 struct tb_tunnel
*usb3_tunnel
;
2491 struct tb_path
*path
;
2493 down
= &host
->ports
[12];
2494 up
= &dev
->ports
[16];
2495 usb3_tunnel
= tb_tunnel_alloc_usb3(NULL
, up
, down
, 0, 0);
2496 KUNIT_ASSERT_NOT_NULL(test
, usb3_tunnel
);
2497 KUNIT_ASSERT_EQ(test
, usb3_tunnel
->npaths
, (size_t)2);
2499 path
= usb3_tunnel
->paths
[0];
2500 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2501 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2502 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2503 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2504 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2506 path
= usb3_tunnel
->paths
[1];
2507 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2508 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2509 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 7U);
2510 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2511 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 32U);
2516 static struct tb_tunnel
*TB_TEST_DMA_TUNNEL1(struct kunit
*test
,
2517 struct tb_switch
*host
, struct tb_switch
*dev
)
2519 struct tb_port
*nhi
, *port
;
2520 struct tb_tunnel
*dma_tunnel1
;
2521 struct tb_path
*path
;
2523 nhi
= &host
->ports
[7];
2524 port
= &dev
->ports
[3];
2525 dma_tunnel1
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 8, 1, 8, 1);
2526 KUNIT_ASSERT_NOT_NULL(test
, dma_tunnel1
);
2527 KUNIT_ASSERT_EQ(test
, dma_tunnel1
->npaths
, (size_t)2);
2529 path
= dma_tunnel1
->paths
[0];
2530 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2531 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2532 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 14U);
2533 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2534 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2536 path
= dma_tunnel1
->paths
[1];
2537 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2538 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2539 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2540 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2541 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 14U);
2546 static struct tb_tunnel
*TB_TEST_DMA_TUNNEL2(struct kunit
*test
,
2547 struct tb_switch
*host
, struct tb_switch
*dev
)
2549 struct tb_port
*nhi
, *port
;
2550 struct tb_tunnel
*dma_tunnel2
;
2551 struct tb_path
*path
;
2553 nhi
= &host
->ports
[7];
2554 port
= &dev
->ports
[3];
2555 dma_tunnel2
= tb_tunnel_alloc_dma(NULL
, nhi
, port
, 9, 2, 9, 2);
2556 KUNIT_ASSERT_NOT_NULL(test
, dma_tunnel2
);
2557 KUNIT_ASSERT_EQ(test
, dma_tunnel2
->npaths
, (size_t)2);
2559 path
= dma_tunnel2
->paths
[0];
2560 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2561 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2562 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 14U);
2563 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2564 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2566 path
= dma_tunnel2
->paths
[1];
2567 KUNIT_ASSERT_EQ(test
, path
->path_length
, 2);
2568 KUNIT_EXPECT_EQ(test
, path
->hops
[0].nfc_credits
, 0U);
2569 KUNIT_EXPECT_EQ(test
, path
->hops
[0].initial_credits
, 0U);
2570 KUNIT_EXPECT_EQ(test
, path
->hops
[1].nfc_credits
, 0U);
2571 KUNIT_EXPECT_EQ(test
, path
->hops
[1].initial_credits
, 1U);
2576 static void tb_test_credit_alloc_all(struct kunit
*test
)
2578 struct tb_tunnel
*pcie_tunnel
, *dp_tunnel1
, *dp_tunnel2
, *usb3_tunnel
;
2579 struct tb_tunnel
*dma_tunnel1
, *dma_tunnel2
;
2580 struct tb_switch
*host
, *dev
;
2583 * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
2584 * device. Expectation is that all these can be established with
2585 * the default credit allocation found in Intel hardware.
2588 host
= alloc_host_usb4(test
);
2589 dev
= alloc_dev_usb4(test
, host
, 0x1, true);
2591 pcie_tunnel
= TB_TEST_PCIE_TUNNEL(test
, host
, dev
);
2592 dp_tunnel1
= TB_TEST_DP_TUNNEL1(test
, host
, dev
);
2593 dp_tunnel2
= TB_TEST_DP_TUNNEL2(test
, host
, dev
);
2594 usb3_tunnel
= TB_TEST_USB3_TUNNEL(test
, host
, dev
);
2595 dma_tunnel1
= TB_TEST_DMA_TUNNEL1(test
, host
, dev
);
2596 dma_tunnel2
= TB_TEST_DMA_TUNNEL2(test
, host
, dev
);
2598 tb_tunnel_free(dma_tunnel2
);
2599 tb_tunnel_free(dma_tunnel1
);
2600 tb_tunnel_free(usb3_tunnel
);
2601 tb_tunnel_free(dp_tunnel2
);
2602 tb_tunnel_free(dp_tunnel1
);
2603 tb_tunnel_free(pcie_tunnel
);
2606 static const u32 root_directory
[] = {
2607 0x55584401, /* "UXD" v1 */
2608 0x00000018, /* Root directory length */
2609 0x76656e64, /* "vend" */
2610 0x6f726964, /* "orid" */
2611 0x76000001, /* "v" R 1 */
2612 0x00000a27, /* Immediate value, ! Vendor ID */
2613 0x76656e64, /* "vend" */
2614 0x6f726964, /* "orid" */
2615 0x74000003, /* "t" R 3 */
2616 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
2617 0x64657669, /* "devi" */
2618 0x63656964, /* "ceid" */
2619 0x76000001, /* "v" R 1 */
2620 0x0000000a, /* Immediate value, ! Device ID */
2621 0x64657669, /* "devi" */
2622 0x63656964, /* "ceid" */
2623 0x74000003, /* "t" R 3 */
2624 0x0000001d, /* Text leaf offset, (“Macintosh”) */
2625 0x64657669, /* "devi" */
2626 0x63657276, /* "cerv" */
2627 0x76000001, /* "v" R 1 */
2628 0x80000100, /* Immediate value, Device Revision */
2629 0x6e657477, /* "netw" */
2630 0x6f726b00, /* "ork" */
2631 0x44000014, /* "D" R 20 */
2632 0x00000021, /* Directory data offset, (Network Directory) */
2633 0x4170706c, /* "Appl" */
2634 0x6520496e, /* "e In" */
2635 0x632e0000, /* "c." ! */
2636 0x4d616369, /* "Maci" */
2637 0x6e746f73, /* "ntos" */
2638 0x68000000, /* "h" */
2639 0x00000000, /* padding */
2640 0xca8961c6, /* Directory UUID, Network Directory */
2641 0x9541ce1c, /* Directory UUID, Network Directory */
2642 0x5949b8bd, /* Directory UUID, Network Directory */
2643 0x4f5a5f2e, /* Directory UUID, Network Directory */
2644 0x70727463, /* "prtc" */
2645 0x69640000, /* "id" */
2646 0x76000001, /* "v" R 1 */
2647 0x00000001, /* Immediate value, Network Protocol ID */
2648 0x70727463, /* "prtc" */
2649 0x76657273, /* "vers" */
2650 0x76000001, /* "v" R 1 */
2651 0x00000001, /* Immediate value, Network Protocol Version */
2652 0x70727463, /* "prtc" */
2653 0x72657673, /* "revs" */
2654 0x76000001, /* "v" R 1 */
2655 0x00000001, /* Immediate value, Network Protocol Revision */
2656 0x70727463, /* "prtc" */
2657 0x73746e73, /* "stns" */
2658 0x76000001, /* "v" R 1 */
2659 0x00000000, /* Immediate value, Network Protocol Settings */
2662 static const uuid_t network_dir_uuid
=
2663 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2664 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2666 static void tb_test_property_parse(struct kunit
*test
)
2668 struct tb_property_dir
*dir
, *network_dir
;
2669 struct tb_property
*p
;
2671 dir
= tb_property_parse_dir(root_directory
, ARRAY_SIZE(root_directory
));
2672 KUNIT_ASSERT_NOT_NULL(test
, dir
);
2674 p
= tb_property_find(dir
, "foo", TB_PROPERTY_TYPE_TEXT
);
2675 KUNIT_ASSERT_NULL(test
, p
);
2677 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_TEXT
);
2678 KUNIT_ASSERT_NOT_NULL(test
, p
);
2679 KUNIT_EXPECT_STREQ(test
, p
->value
.text
, "Apple Inc.");
2681 p
= tb_property_find(dir
, "vendorid", TB_PROPERTY_TYPE_VALUE
);
2682 KUNIT_ASSERT_NOT_NULL(test
, p
);
2683 KUNIT_EXPECT_EQ(test
, p
->value
.immediate
, 0xa27);
2685 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_TEXT
);
2686 KUNIT_ASSERT_NOT_NULL(test
, p
);
2687 KUNIT_EXPECT_STREQ(test
, p
->value
.text
, "Macintosh");
2689 p
= tb_property_find(dir
, "deviceid", TB_PROPERTY_TYPE_VALUE
);
2690 KUNIT_ASSERT_NOT_NULL(test
, p
);
2691 KUNIT_EXPECT_EQ(test
, p
->value
.immediate
, 0xa);
2693 p
= tb_property_find(dir
, "missing", TB_PROPERTY_TYPE_DIRECTORY
);
2694 KUNIT_ASSERT_NULL(test
, p
);
2696 p
= tb_property_find(dir
, "network", TB_PROPERTY_TYPE_DIRECTORY
);
2697 KUNIT_ASSERT_NOT_NULL(test
, p
);
2699 network_dir
= p
->value
.dir
;
2700 KUNIT_EXPECT_TRUE(test
, uuid_equal(network_dir
->uuid
, &network_dir_uuid
));
2702 p
= tb_property_find(network_dir
, "prtcid", TB_PROPERTY_TYPE_VALUE
);
2703 KUNIT_ASSERT_NOT_NULL(test
, p
);
2704 KUNIT_EXPECT_EQ(test
, p
->value
.immediate
, 0x1);
2706 p
= tb_property_find(network_dir
, "prtcvers", TB_PROPERTY_TYPE_VALUE
);
2707 KUNIT_ASSERT_NOT_NULL(test
, p
);
2708 KUNIT_EXPECT_EQ(test
, p
->value
.immediate
, 0x1);
2710 p
= tb_property_find(network_dir
, "prtcrevs", TB_PROPERTY_TYPE_VALUE
);
2711 KUNIT_ASSERT_NOT_NULL(test
, p
);
2712 KUNIT_EXPECT_EQ(test
, p
->value
.immediate
, 0x1);
2714 p
= tb_property_find(network_dir
, "prtcstns", TB_PROPERTY_TYPE_VALUE
);
2715 KUNIT_ASSERT_NOT_NULL(test
, p
);
2716 KUNIT_EXPECT_EQ(test
, p
->value
.immediate
, 0x0);
2718 p
= tb_property_find(network_dir
, "deviceid", TB_PROPERTY_TYPE_VALUE
);
2719 KUNIT_EXPECT_TRUE(test
, !p
);
2720 p
= tb_property_find(network_dir
, "deviceid", TB_PROPERTY_TYPE_TEXT
);
2721 KUNIT_EXPECT_TRUE(test
, !p
);
2723 tb_property_free_dir(dir
);
2726 static void tb_test_property_format(struct kunit
*test
)
2728 struct tb_property_dir
*dir
;
2733 dir
= tb_property_parse_dir(root_directory
, ARRAY_SIZE(root_directory
));
2734 KUNIT_ASSERT_NOT_NULL(test
, dir
);
2736 ret
= tb_property_format_dir(dir
, NULL
, 0);
2737 KUNIT_ASSERT_EQ(test
, ret
, ARRAY_SIZE(root_directory
));
2741 block
= kunit_kzalloc(test
, block_len
* sizeof(u32
), GFP_KERNEL
);
2742 KUNIT_ASSERT_NOT_NULL(test
, block
);
2744 ret
= tb_property_format_dir(dir
, block
, block_len
);
2745 KUNIT_EXPECT_EQ(test
, ret
, 0);
2747 for (i
= 0; i
< ARRAY_SIZE(root_directory
); i
++)
2748 KUNIT_EXPECT_EQ(test
, root_directory
[i
], block
[i
]);
2750 tb_property_free_dir(dir
);
2753 static void compare_dirs(struct kunit
*test
, struct tb_property_dir
*d1
,
2754 struct tb_property_dir
*d2
)
2756 struct tb_property
*p1
, *p2
, *tmp
;
2760 KUNIT_ASSERT_NOT_NULL(test
, d2
->uuid
);
2761 KUNIT_ASSERT_TRUE(test
, uuid_equal(d1
->uuid
, d2
->uuid
));
2763 KUNIT_ASSERT_NULL(test
, d2
->uuid
);
2767 tb_property_for_each(d1
, tmp
)
2769 KUNIT_ASSERT_NE(test
, n1
, 0);
2772 tb_property_for_each(d2
, tmp
)
2774 KUNIT_ASSERT_NE(test
, n2
, 0);
2776 KUNIT_ASSERT_EQ(test
, n1
, n2
);
2780 for (i
= 0; i
< n1
; i
++) {
2781 p1
= tb_property_get_next(d1
, p1
);
2782 KUNIT_ASSERT_NOT_NULL(test
, p1
);
2783 p2
= tb_property_get_next(d2
, p2
);
2784 KUNIT_ASSERT_NOT_NULL(test
, p2
);
2786 KUNIT_ASSERT_STREQ(test
, &p1
->key
[0], &p2
->key
[0]);
2787 KUNIT_ASSERT_EQ(test
, p1
->type
, p2
->type
);
2788 KUNIT_ASSERT_EQ(test
, p1
->length
, p2
->length
);
2791 case TB_PROPERTY_TYPE_DIRECTORY
:
2792 KUNIT_ASSERT_NOT_NULL(test
, p1
->value
.dir
);
2793 KUNIT_ASSERT_NOT_NULL(test
, p2
->value
.dir
);
2794 compare_dirs(test
, p1
->value
.dir
, p2
->value
.dir
);
2797 case TB_PROPERTY_TYPE_DATA
:
2798 KUNIT_ASSERT_NOT_NULL(test
, p1
->value
.data
);
2799 KUNIT_ASSERT_NOT_NULL(test
, p2
->value
.data
);
2800 KUNIT_ASSERT_TRUE(test
,
2801 !memcmp(p1
->value
.data
, p2
->value
.data
,
2806 case TB_PROPERTY_TYPE_TEXT
:
2807 KUNIT_ASSERT_NOT_NULL(test
, p1
->value
.text
);
2808 KUNIT_ASSERT_NOT_NULL(test
, p2
->value
.text
);
2809 KUNIT_ASSERT_STREQ(test
, p1
->value
.text
, p2
->value
.text
);
2812 case TB_PROPERTY_TYPE_VALUE
:
2813 KUNIT_ASSERT_EQ(test
, p1
->value
.immediate
,
2814 p2
->value
.immediate
);
2817 KUNIT_FAIL(test
, "unexpected property type");
2823 static void tb_test_property_copy(struct kunit
*test
)
2825 struct tb_property_dir
*src
, *dst
;
2829 src
= tb_property_parse_dir(root_directory
, ARRAY_SIZE(root_directory
));
2830 KUNIT_ASSERT_NOT_NULL(test
, src
);
2832 dst
= tb_property_copy_dir(src
);
2833 KUNIT_ASSERT_NOT_NULL(test
, dst
);
2835 /* Compare the structures */
2836 compare_dirs(test
, src
, dst
);
2838 /* Compare the resulting property block */
2839 ret
= tb_property_format_dir(dst
, NULL
, 0);
2840 KUNIT_ASSERT_EQ(test
, ret
, ARRAY_SIZE(root_directory
));
2842 block
= kunit_kzalloc(test
, sizeof(root_directory
), GFP_KERNEL
);
2843 KUNIT_ASSERT_NOT_NULL(test
, block
);
2845 ret
= tb_property_format_dir(dst
, block
, ARRAY_SIZE(root_directory
));
2846 KUNIT_EXPECT_TRUE(test
, !ret
);
2848 for (i
= 0; i
< ARRAY_SIZE(root_directory
); i
++)
2849 KUNIT_EXPECT_EQ(test
, root_directory
[i
], block
[i
]);
2851 tb_property_free_dir(dst
);
2852 tb_property_free_dir(src
);
2855 static struct kunit_case tb_test_cases
[] = {
2856 KUNIT_CASE(tb_test_path_basic
),
2857 KUNIT_CASE(tb_test_path_not_connected_walk
),
2858 KUNIT_CASE(tb_test_path_single_hop_walk
),
2859 KUNIT_CASE(tb_test_path_daisy_chain_walk
),
2860 KUNIT_CASE(tb_test_path_simple_tree_walk
),
2861 KUNIT_CASE(tb_test_path_complex_tree_walk
),
2862 KUNIT_CASE(tb_test_path_max_length_walk
),
2863 KUNIT_CASE(tb_test_path_not_connected
),
2864 KUNIT_CASE(tb_test_path_not_bonded_lane0
),
2865 KUNIT_CASE(tb_test_path_not_bonded_lane1
),
2866 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain
),
2867 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse
),
2868 KUNIT_CASE(tb_test_path_mixed_chain
),
2869 KUNIT_CASE(tb_test_path_mixed_chain_reverse
),
2870 KUNIT_CASE(tb_test_tunnel_pcie
),
2871 KUNIT_CASE(tb_test_tunnel_dp
),
2872 KUNIT_CASE(tb_test_tunnel_dp_chain
),
2873 KUNIT_CASE(tb_test_tunnel_dp_tree
),
2874 KUNIT_CASE(tb_test_tunnel_dp_max_length
),
2875 KUNIT_CASE(tb_test_tunnel_3dp
),
2876 KUNIT_CASE(tb_test_tunnel_port_on_path
),
2877 KUNIT_CASE(tb_test_tunnel_usb3
),
2878 KUNIT_CASE(tb_test_tunnel_dma
),
2879 KUNIT_CASE(tb_test_tunnel_dma_rx
),
2880 KUNIT_CASE(tb_test_tunnel_dma_tx
),
2881 KUNIT_CASE(tb_test_tunnel_dma_chain
),
2882 KUNIT_CASE(tb_test_tunnel_dma_match
),
2883 KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded
),
2884 KUNIT_CASE(tb_test_credit_alloc_legacy_bonded
),
2885 KUNIT_CASE(tb_test_credit_alloc_pcie
),
2886 KUNIT_CASE(tb_test_credit_alloc_without_dp
),
2887 KUNIT_CASE(tb_test_credit_alloc_dp
),
2888 KUNIT_CASE(tb_test_credit_alloc_usb3
),
2889 KUNIT_CASE(tb_test_credit_alloc_dma
),
2890 KUNIT_CASE(tb_test_credit_alloc_dma_multiple
),
2891 KUNIT_CASE(tb_test_credit_alloc_all
),
2892 KUNIT_CASE(tb_test_property_parse
),
2893 KUNIT_CASE(tb_test_property_format
),
2894 KUNIT_CASE(tb_test_property_copy
),
2898 static struct kunit_suite tb_test_suite
= {
2899 .name
= "thunderbolt",
2900 .test_cases
= tb_test_cases
,
2903 kunit_test_suite(tb_test_suite
);