1 // SPDX-License-Identifier: GPL-2.0
3 * DMA traffic test driver
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Isaac Hazan <isaac.hazan@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/completion.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/sizes.h>
14 #include <linux/thunderbolt.h>
16 #define DMA_TEST_TX_RING_SIZE 64
17 #define DMA_TEST_RX_RING_SIZE 256
18 #define DMA_TEST_FRAME_SIZE SZ_4K
19 #define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL
20 #define DMA_TEST_MAX_PACKETS 1000
22 enum dma_test_frame_pdf
{
23 DMA_TEST_PDF_FRAME_START
= 1,
24 DMA_TEST_PDF_FRAME_END
,
27 struct dma_test_frame
{
28 struct dma_test
*dma_test
;
30 struct ring_frame frame
;
33 enum dma_test_test_error
{
36 DMA_TEST_BUFFER_ERROR
,
38 DMA_TEST_CONFIG_ERROR
,
41 DMA_TEST_BONDING_ERROR
,
42 DMA_TEST_PACKET_ERROR
,
45 static const char * const dma_test_error_names
[] = {
46 [DMA_TEST_NO_ERROR
] = "no errors",
47 [DMA_TEST_INTERRUPTED
] = "interrupted by signal",
48 [DMA_TEST_BUFFER_ERROR
] = "no memory for packet buffers",
49 [DMA_TEST_DMA_ERROR
] = "DMA ring setup failed",
50 [DMA_TEST_CONFIG_ERROR
] = "configuration is not valid",
51 [DMA_TEST_SPEED_ERROR
] = "unexpected link speed",
52 [DMA_TEST_WIDTH_ERROR
] = "unexpected link width",
53 [DMA_TEST_BONDING_ERROR
] = "lane bonding configuration error",
54 [DMA_TEST_PACKET_ERROR
] = "packet check failed",
57 enum dma_test_result
{
63 static const char * const dma_test_result_names
[] = {
64 [DMA_TEST_NOT_RUN
] = "not run",
65 [DMA_TEST_SUCCESS
] = "success",
66 [DMA_TEST_FAIL
] = "failed",
70 * struct dma_test - DMA test device driver private data
71 * @svc: XDomain service the driver is bound to
72 * @xd: XDomain the service belongs to
73 * @rx_ring: Software ring holding RX frames
74 * @rx_hopid: HopID used for receiving frames
75 * @tx_ring: Software ring holding TX frames
76 * @tx_hopid: HopID used for sending fames
77 * @packets_to_send: Number of packets to send
78 * @packets_to_receive: Number of packets to receive
79 * @packets_sent: Actual number of packets sent
80 * @packets_received: Actual number of packets received
81 * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
82 * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
83 * @crc_errors: Number of CRC errors during the test run
84 * @buffer_overflow_errors: Number of buffer overflow errors during the test
86 * @result: Result of the last run
87 * @error_code: Error code of the last run
88 * @complete: Used to wait for the Rx to complete
89 * @lock: Lock serializing access to this structure
90 * @debugfs_dir: dentry of this dma_test
93 const struct tb_service
*svc
;
94 struct tb_xdomain
*xd
;
95 struct tb_ring
*rx_ring
;
97 struct tb_ring
*tx_ring
;
99 unsigned int packets_to_send
;
100 unsigned int packets_to_receive
;
101 unsigned int packets_sent
;
102 unsigned int packets_received
;
103 unsigned int link_speed
;
104 enum tb_link_width link_width
;
105 unsigned int crc_errors
;
106 unsigned int buffer_overflow_errors
;
107 enum dma_test_result result
;
108 enum dma_test_test_error error_code
;
109 struct completion complete
;
111 struct dentry
*debugfs_dir
;
114 /* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
115 static const uuid_t dma_test_dir_uuid
=
116 UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
117 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
119 static struct tb_property_dir
*dma_test_dir
;
120 static void *dma_test_pattern
;
122 static void dma_test_free_rings(struct dma_test
*dt
)
125 tb_xdomain_release_in_hopid(dt
->xd
, dt
->rx_hopid
);
126 tb_ring_free(dt
->rx_ring
);
130 tb_xdomain_release_out_hopid(dt
->xd
, dt
->tx_hopid
);
131 tb_ring_free(dt
->tx_ring
);
136 static int dma_test_start_rings(struct dma_test
*dt
)
138 unsigned int flags
= RING_FLAG_FRAME
;
139 struct tb_xdomain
*xd
= dt
->xd
;
140 int ret
, e2e_tx_hop
= 0;
141 struct tb_ring
*ring
;
144 * If we are both sender and receiver (traffic goes over a
145 * special loopback dongle) enable E2E flow control. This avoids
148 if (dt
->packets_to_send
&& dt
->packets_to_receive
)
149 flags
|= RING_FLAG_E2E
;
151 if (dt
->packets_to_send
) {
152 ring
= tb_ring_alloc_tx(xd
->tb
->nhi
, -1, DMA_TEST_TX_RING_SIZE
,
158 e2e_tx_hop
= ring
->hop
;
160 ret
= tb_xdomain_alloc_out_hopid(xd
, -1);
162 dma_test_free_rings(dt
);
169 if (dt
->packets_to_receive
) {
170 u16 sof_mask
, eof_mask
;
172 sof_mask
= BIT(DMA_TEST_PDF_FRAME_START
);
173 eof_mask
= BIT(DMA_TEST_PDF_FRAME_END
);
175 ring
= tb_ring_alloc_rx(xd
->tb
->nhi
, -1, DMA_TEST_RX_RING_SIZE
,
176 flags
, e2e_tx_hop
, sof_mask
, eof_mask
,
179 dma_test_free_rings(dt
);
185 ret
= tb_xdomain_alloc_in_hopid(xd
, -1);
187 dma_test_free_rings(dt
);
194 ret
= tb_xdomain_enable_paths(dt
->xd
, dt
->tx_hopid
,
195 dt
->tx_ring
? dt
->tx_ring
->hop
: -1,
197 dt
->rx_ring
? dt
->rx_ring
->hop
: -1);
199 dma_test_free_rings(dt
);
204 tb_ring_start(dt
->tx_ring
);
206 tb_ring_start(dt
->rx_ring
);
211 static void dma_test_stop_rings(struct dma_test
*dt
)
216 tb_ring_stop(dt
->rx_ring
);
218 tb_ring_stop(dt
->tx_ring
);
220 ret
= tb_xdomain_disable_paths(dt
->xd
, dt
->tx_hopid
,
221 dt
->tx_ring
? dt
->tx_ring
->hop
: -1,
223 dt
->rx_ring
? dt
->rx_ring
->hop
: -1);
225 dev_warn(&dt
->svc
->dev
, "failed to disable DMA paths\n");
227 dma_test_free_rings(dt
);
230 static void dma_test_rx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
233 struct dma_test_frame
*tf
= container_of(frame
, typeof(*tf
), frame
);
234 struct dma_test
*dt
= tf
->dma_test
;
235 struct device
*dma_dev
= tb_ring_dma_device(dt
->rx_ring
);
237 dma_unmap_single(dma_dev
, tf
->frame
.buffer_phy
, DMA_TEST_FRAME_SIZE
,
246 dt
->packets_received
++;
247 dev_dbg(&dt
->svc
->dev
, "packet %u/%u received\n", dt
->packets_received
,
248 dt
->packets_to_receive
);
250 if (tf
->frame
.flags
& RING_DESC_CRC_ERROR
)
252 if (tf
->frame
.flags
& RING_DESC_BUFFER_OVERRUN
)
253 dt
->buffer_overflow_errors
++;
257 if (dt
->packets_received
== dt
->packets_to_receive
)
258 complete(&dt
->complete
);
261 static int dma_test_submit_rx(struct dma_test
*dt
, size_t npackets
)
263 struct device
*dma_dev
= tb_ring_dma_device(dt
->rx_ring
);
266 for (i
= 0; i
< npackets
; i
++) {
267 struct dma_test_frame
*tf
;
270 tf
= kzalloc(sizeof(*tf
), GFP_KERNEL
);
274 tf
->data
= kzalloc(DMA_TEST_FRAME_SIZE
, GFP_KERNEL
);
280 dma_addr
= dma_map_single(dma_dev
, tf
->data
, DMA_TEST_FRAME_SIZE
,
282 if (dma_mapping_error(dma_dev
, dma_addr
)) {
288 tf
->frame
.buffer_phy
= dma_addr
;
289 tf
->frame
.callback
= dma_test_rx_callback
;
291 INIT_LIST_HEAD(&tf
->frame
.list
);
293 tb_ring_rx(dt
->rx_ring
, &tf
->frame
);
299 static void dma_test_tx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
302 struct dma_test_frame
*tf
= container_of(frame
, typeof(*tf
), frame
);
303 struct dma_test
*dt
= tf
->dma_test
;
304 struct device
*dma_dev
= tb_ring_dma_device(dt
->tx_ring
);
306 dma_unmap_single(dma_dev
, tf
->frame
.buffer_phy
, DMA_TEST_FRAME_SIZE
,
312 static int dma_test_submit_tx(struct dma_test
*dt
, size_t npackets
)
314 struct device
*dma_dev
= tb_ring_dma_device(dt
->tx_ring
);
317 for (i
= 0; i
< npackets
; i
++) {
318 struct dma_test_frame
*tf
;
321 tf
= kzalloc(sizeof(*tf
), GFP_KERNEL
);
325 tf
->frame
.size
= 0; /* means 4096 */
328 tf
->data
= kmemdup(dma_test_pattern
, DMA_TEST_FRAME_SIZE
, GFP_KERNEL
);
334 dma_addr
= dma_map_single(dma_dev
, tf
->data
, DMA_TEST_FRAME_SIZE
,
336 if (dma_mapping_error(dma_dev
, dma_addr
)) {
342 tf
->frame
.buffer_phy
= dma_addr
;
343 tf
->frame
.callback
= dma_test_tx_callback
;
344 tf
->frame
.sof
= DMA_TEST_PDF_FRAME_START
;
345 tf
->frame
.eof
= DMA_TEST_PDF_FRAME_END
;
346 INIT_LIST_HEAD(&tf
->frame
.list
);
349 dev_dbg(&dt
->svc
->dev
, "packet %u/%u sent\n", dt
->packets_sent
,
350 dt
->packets_to_send
);
352 tb_ring_tx(dt
->tx_ring
, &tf
->frame
);
358 #define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \
359 static int __fops ## _show(void *data, u64 *val) \
361 struct tb_service *svc = data; \
362 struct dma_test *dt = tb_service_get_drvdata(svc); \
365 ret = mutex_lock_interruptible(&dt->lock); \
369 mutex_unlock(&dt->lock); \
372 static int __fops ## _store(void *data, u64 val) \
374 struct tb_service *svc = data; \
375 struct dma_test *dt = tb_service_get_drvdata(svc); \
378 ret = __validate(val); \
381 ret = mutex_lock_interruptible(&dt->lock); \
385 mutex_unlock(&dt->lock); \
388 DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \
389 __fops ## _store, "%llu\n")
391 static void lanes_get(const struct dma_test
*dt
, u64
*val
)
393 *val
= dt
->link_width
;
396 static int lanes_validate(u64 val
)
398 return val
> 2 ? -EINVAL
: 0;
401 static void lanes_set(struct dma_test
*dt
, u64 val
)
403 dt
->link_width
= val
;
405 DMA_TEST_DEBUGFS_ATTR(lanes
, lanes_get
, lanes_validate
, lanes_set
);
407 static void speed_get(const struct dma_test
*dt
, u64
*val
)
409 *val
= dt
->link_speed
;
412 static int speed_validate(u64 val
)
425 static void speed_set(struct dma_test
*dt
, u64 val
)
427 dt
->link_speed
= val
;
429 DMA_TEST_DEBUGFS_ATTR(speed
, speed_get
, speed_validate
, speed_set
);
431 static void packets_to_receive_get(const struct dma_test
*dt
, u64
*val
)
433 *val
= dt
->packets_to_receive
;
436 static int packets_to_receive_validate(u64 val
)
438 return val
> DMA_TEST_MAX_PACKETS
? -EINVAL
: 0;
441 static void packets_to_receive_set(struct dma_test
*dt
, u64 val
)
443 dt
->packets_to_receive
= val
;
445 DMA_TEST_DEBUGFS_ATTR(packets_to_receive
, packets_to_receive_get
,
446 packets_to_receive_validate
, packets_to_receive_set
);
448 static void packets_to_send_get(const struct dma_test
*dt
, u64
*val
)
450 *val
= dt
->packets_to_send
;
453 static int packets_to_send_validate(u64 val
)
455 return val
> DMA_TEST_MAX_PACKETS
? -EINVAL
: 0;
458 static void packets_to_send_set(struct dma_test
*dt
, u64 val
)
460 dt
->packets_to_send
= val
;
462 DMA_TEST_DEBUGFS_ATTR(packets_to_send
, packets_to_send_get
,
463 packets_to_send_validate
, packets_to_send_set
);
465 static int dma_test_set_bonding(struct dma_test
*dt
)
467 switch (dt
->link_width
) {
468 case TB_LINK_WIDTH_DUAL
:
469 return tb_xdomain_lane_bonding_enable(dt
->xd
);
470 case TB_LINK_WIDTH_SINGLE
:
471 tb_xdomain_lane_bonding_disable(dt
->xd
);
478 static bool dma_test_validate_config(struct dma_test
*dt
)
480 if (!dt
->packets_to_send
&& !dt
->packets_to_receive
)
482 if (dt
->packets_to_send
&& dt
->packets_to_receive
&&
483 dt
->packets_to_send
!= dt
->packets_to_receive
)
488 static void dma_test_check_errors(struct dma_test
*dt
, int ret
)
490 if (!dt
->error_code
) {
491 if (dt
->link_speed
&& dt
->xd
->link_speed
!= dt
->link_speed
) {
492 dt
->error_code
= DMA_TEST_SPEED_ERROR
;
493 } else if (dt
->link_width
&& dt
->link_width
!= dt
->xd
->link_width
) {
494 dt
->error_code
= DMA_TEST_WIDTH_ERROR
;
495 } else if (dt
->packets_to_send
!= dt
->packets_sent
||
496 dt
->packets_to_receive
!= dt
->packets_received
||
497 dt
->crc_errors
|| dt
->buffer_overflow_errors
) {
498 dt
->error_code
= DMA_TEST_PACKET_ERROR
;
504 dt
->result
= DMA_TEST_FAIL
;
507 static int test_store(void *data
, u64 val
)
509 struct tb_service
*svc
= data
;
510 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
516 ret
= mutex_lock_interruptible(&dt
->lock
);
520 dt
->packets_sent
= 0;
521 dt
->packets_received
= 0;
523 dt
->buffer_overflow_errors
= 0;
524 dt
->result
= DMA_TEST_SUCCESS
;
525 dt
->error_code
= DMA_TEST_NO_ERROR
;
527 dev_dbg(&svc
->dev
, "DMA test starting\n");
529 dev_dbg(&svc
->dev
, "link_speed: %u Gb/s\n", dt
->link_speed
);
531 dev_dbg(&svc
->dev
, "link_width: %u\n", dt
->link_width
);
532 dev_dbg(&svc
->dev
, "packets_to_send: %u\n", dt
->packets_to_send
);
533 dev_dbg(&svc
->dev
, "packets_to_receive: %u\n", dt
->packets_to_receive
);
535 if (!dma_test_validate_config(dt
)) {
536 dev_err(&svc
->dev
, "invalid test configuration\n");
537 dt
->error_code
= DMA_TEST_CONFIG_ERROR
;
541 ret
= dma_test_set_bonding(dt
);
543 dev_err(&svc
->dev
, "failed to set lanes\n");
544 dt
->error_code
= DMA_TEST_BONDING_ERROR
;
548 ret
= dma_test_start_rings(dt
);
550 dev_err(&svc
->dev
, "failed to enable DMA rings\n");
551 dt
->error_code
= DMA_TEST_DMA_ERROR
;
555 if (dt
->packets_to_receive
) {
556 reinit_completion(&dt
->complete
);
557 ret
= dma_test_submit_rx(dt
, dt
->packets_to_receive
);
559 dev_err(&svc
->dev
, "failed to submit receive buffers\n");
560 dt
->error_code
= DMA_TEST_BUFFER_ERROR
;
565 if (dt
->packets_to_send
) {
566 ret
= dma_test_submit_tx(dt
, dt
->packets_to_send
);
568 dev_err(&svc
->dev
, "failed to submit transmit buffers\n");
569 dt
->error_code
= DMA_TEST_BUFFER_ERROR
;
574 if (dt
->packets_to_receive
) {
575 ret
= wait_for_completion_interruptible(&dt
->complete
);
577 dt
->error_code
= DMA_TEST_INTERRUPTED
;
583 dma_test_stop_rings(dt
);
585 dma_test_check_errors(dt
, ret
);
586 mutex_unlock(&dt
->lock
);
588 dev_dbg(&svc
->dev
, "DMA test %s\n", dma_test_result_names
[dt
->result
]);
591 DEFINE_DEBUGFS_ATTRIBUTE(test_fops
, NULL
, test_store
, "%llu\n");
593 static int status_show(struct seq_file
*s
, void *not_used
)
595 struct tb_service
*svc
= s
->private;
596 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
599 ret
= mutex_lock_interruptible(&dt
->lock
);
603 seq_printf(s
, "result: %s\n", dma_test_result_names
[dt
->result
]);
604 if (dt
->result
== DMA_TEST_NOT_RUN
)
607 seq_printf(s
, "packets received: %u\n", dt
->packets_received
);
608 seq_printf(s
, "packets sent: %u\n", dt
->packets_sent
);
609 seq_printf(s
, "CRC errors: %u\n", dt
->crc_errors
);
610 seq_printf(s
, "buffer overflow errors: %u\n",
611 dt
->buffer_overflow_errors
);
612 seq_printf(s
, "error: %s\n", dma_test_error_names
[dt
->error_code
]);
615 mutex_unlock(&dt
->lock
);
618 DEFINE_SHOW_ATTRIBUTE(status
);
620 static void dma_test_debugfs_init(struct tb_service
*svc
)
622 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
624 dt
->debugfs_dir
= debugfs_create_dir("dma_test", svc
->debugfs_dir
);
626 debugfs_create_file("lanes", 0600, dt
->debugfs_dir
, svc
, &lanes_fops
);
627 debugfs_create_file("speed", 0600, dt
->debugfs_dir
, svc
, &speed_fops
);
628 debugfs_create_file("packets_to_receive", 0600, dt
->debugfs_dir
, svc
,
629 &packets_to_receive_fops
);
630 debugfs_create_file("packets_to_send", 0600, dt
->debugfs_dir
, svc
,
631 &packets_to_send_fops
);
632 debugfs_create_file("status", 0400, dt
->debugfs_dir
, svc
, &status_fops
);
633 debugfs_create_file("test", 0200, dt
->debugfs_dir
, svc
, &test_fops
);
636 static int dma_test_probe(struct tb_service
*svc
, const struct tb_service_id
*id
)
638 struct tb_xdomain
*xd
= tb_service_parent(svc
);
641 dt
= devm_kzalloc(&svc
->dev
, sizeof(*dt
), GFP_KERNEL
);
647 mutex_init(&dt
->lock
);
648 init_completion(&dt
->complete
);
650 tb_service_set_drvdata(svc
, dt
);
651 dma_test_debugfs_init(svc
);
656 static void dma_test_remove(struct tb_service
*svc
)
658 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
660 mutex_lock(&dt
->lock
);
661 debugfs_remove_recursive(dt
->debugfs_dir
);
662 mutex_unlock(&dt
->lock
);
665 static int __maybe_unused
dma_test_suspend(struct device
*dev
)
668 * No need to do anything special here. If userspace is writing
669 * to the test attribute when suspend started, it comes out from
670 * wait_for_completion_interruptible() with -ERESTARTSYS and the
671 * DMA test fails tearing down the rings. Once userspace is
672 * thawed the kernel restarts the write syscall effectively
673 * re-running the test.
678 static int __maybe_unused
dma_test_resume(struct device
*dev
)
683 static const struct dev_pm_ops dma_test_pm_ops
= {
684 SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend
, dma_test_resume
)
687 static const struct tb_service_id dma_test_ids
[] = {
688 { TB_SERVICE("dma_test", 1) },
691 MODULE_DEVICE_TABLE(tbsvc
, dma_test_ids
);
693 static struct tb_service_driver dma_test_driver
= {
695 .owner
= THIS_MODULE
,
696 .name
= "thunderbolt_dma_test",
697 .pm
= &dma_test_pm_ops
,
699 .probe
= dma_test_probe
,
700 .remove
= dma_test_remove
,
701 .id_table
= dma_test_ids
,
704 static int __init
dma_test_init(void)
706 u64 data_value
= DMA_TEST_DATA_PATTERN
;
709 dma_test_pattern
= kmalloc(DMA_TEST_FRAME_SIZE
, GFP_KERNEL
);
710 if (!dma_test_pattern
)
713 for (i
= 0; i
< DMA_TEST_FRAME_SIZE
/ sizeof(data_value
); i
++)
714 ((u32
*)dma_test_pattern
)[i
] = data_value
++;
716 dma_test_dir
= tb_property_create_dir(&dma_test_dir_uuid
);
719 goto err_free_pattern
;
722 tb_property_add_immediate(dma_test_dir
, "prtcid", 1);
723 tb_property_add_immediate(dma_test_dir
, "prtcvers", 1);
724 tb_property_add_immediate(dma_test_dir
, "prtcrevs", 0);
725 tb_property_add_immediate(dma_test_dir
, "prtcstns", 0);
727 ret
= tb_register_property_dir("dma_test", dma_test_dir
);
731 ret
= tb_register_service_driver(&dma_test_driver
);
733 goto err_unregister_dir
;
738 tb_unregister_property_dir("dma_test", dma_test_dir
);
740 tb_property_free_dir(dma_test_dir
);
742 kfree(dma_test_pattern
);
746 module_init(dma_test_init
);
748 static void __exit
dma_test_exit(void)
750 tb_unregister_service_driver(&dma_test_driver
);
751 tb_unregister_property_dir("dma_test", dma_test_dir
);
752 tb_property_free_dir(dma_test_dir
);
753 kfree(dma_test_pattern
);
755 module_exit(dma_test_exit
);
757 MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
758 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
759 MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver");
760 MODULE_LICENSE("GPL v2");