1 // SPDX-License-Identifier: GPL-2.0
3 * DMA traffic test driver
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Isaac Hazan <isaac.hazan@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/acpi.h>
11 #include <linux/completion.h>
12 #include <linux/debugfs.h>
13 #include <linux/module.h>
14 #include <linux/sizes.h>
15 #include <linux/thunderbolt.h>
17 #define DMA_TEST_HOPID 8
18 #define DMA_TEST_TX_RING_SIZE 64
19 #define DMA_TEST_RX_RING_SIZE 256
20 #define DMA_TEST_FRAME_SIZE SZ_4K
21 #define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL
22 #define DMA_TEST_MAX_PACKETS 1000
24 enum dma_test_frame_pdf
{
25 DMA_TEST_PDF_FRAME_START
= 1,
26 DMA_TEST_PDF_FRAME_END
,
29 struct dma_test_frame
{
30 struct dma_test
*dma_test
;
32 struct ring_frame frame
;
35 enum dma_test_test_error
{
38 DMA_TEST_BUFFER_ERROR
,
40 DMA_TEST_CONFIG_ERROR
,
43 DMA_TEST_BONDING_ERROR
,
44 DMA_TEST_PACKET_ERROR
,
47 static const char * const dma_test_error_names
[] = {
48 [DMA_TEST_NO_ERROR
] = "no errors",
49 [DMA_TEST_INTERRUPTED
] = "interrupted by signal",
50 [DMA_TEST_BUFFER_ERROR
] = "no memory for packet buffers",
51 [DMA_TEST_DMA_ERROR
] = "DMA ring setup failed",
52 [DMA_TEST_CONFIG_ERROR
] = "configuration is not valid",
53 [DMA_TEST_SPEED_ERROR
] = "unexpected link speed",
54 [DMA_TEST_WIDTH_ERROR
] = "unexpected link width",
55 [DMA_TEST_BONDING_ERROR
] = "lane bonding configuration error",
56 [DMA_TEST_PACKET_ERROR
] = "packet check failed",
59 enum dma_test_result
{
65 static const char * const dma_test_result_names
[] = {
66 [DMA_TEST_NOT_RUN
] = "not run",
67 [DMA_TEST_SUCCESS
] = "success",
68 [DMA_TEST_FAIL
] = "failed",
72 * struct dma_test - DMA test device driver private data
73 * @svc: XDomain service the driver is bound to
74 * @xd: XDomain the service belongs to
75 * @rx_ring: Software ring holding RX frames
76 * @tx_ring: Software ring holding TX frames
77 * @packets_to_send: Number of packets to send
78 * @packets_to_receive: Number of packets to receive
79 * @packets_sent: Actual number of packets sent
80 * @packets_received: Actual number of packets received
81 * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
82 * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
83 * @crc_errors: Number of CRC errors during the test run
84 * @buffer_overflow_errors: Number of buffer overflow errors during the test
86 * @result: Result of the last run
87 * @error_code: Error code of the last run
88 * @complete: Used to wait for the Rx to complete
89 * @lock: Lock serializing access to this structure
90 * @debugfs_dir: dentry of this dma_test
93 const struct tb_service
*svc
;
94 struct tb_xdomain
*xd
;
95 struct tb_ring
*rx_ring
;
96 struct tb_ring
*tx_ring
;
97 unsigned int packets_to_send
;
98 unsigned int packets_to_receive
;
99 unsigned int packets_sent
;
100 unsigned int packets_received
;
101 unsigned int link_speed
;
102 unsigned int link_width
;
103 unsigned int crc_errors
;
104 unsigned int buffer_overflow_errors
;
105 enum dma_test_result result
;
106 enum dma_test_test_error error_code
;
107 struct completion complete
;
109 struct dentry
*debugfs_dir
;
112 /* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
113 static const uuid_t dma_test_dir_uuid
=
114 UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
115 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
117 static struct tb_property_dir
*dma_test_dir
;
118 static void *dma_test_pattern
;
120 static void dma_test_free_rings(struct dma_test
*dt
)
123 tb_ring_free(dt
->rx_ring
);
127 tb_ring_free(dt
->tx_ring
);
132 static int dma_test_start_rings(struct dma_test
*dt
)
134 unsigned int flags
= RING_FLAG_FRAME
;
135 struct tb_xdomain
*xd
= dt
->xd
;
136 int ret
, e2e_tx_hop
= 0;
137 struct tb_ring
*ring
;
140 * If we are both sender and receiver (traffic goes over a
141 * special loopback dongle) enable E2E flow control. This avoids
144 if (dt
->packets_to_send
&& dt
->packets_to_receive
)
145 flags
|= RING_FLAG_E2E
;
147 if (dt
->packets_to_send
) {
148 ring
= tb_ring_alloc_tx(xd
->tb
->nhi
, -1, DMA_TEST_TX_RING_SIZE
,
154 e2e_tx_hop
= ring
->hop
;
157 if (dt
->packets_to_receive
) {
158 u16 sof_mask
, eof_mask
;
160 sof_mask
= BIT(DMA_TEST_PDF_FRAME_START
);
161 eof_mask
= BIT(DMA_TEST_PDF_FRAME_END
);
163 ring
= tb_ring_alloc_rx(xd
->tb
->nhi
, -1, DMA_TEST_RX_RING_SIZE
,
164 flags
, e2e_tx_hop
, sof_mask
, eof_mask
,
167 dma_test_free_rings(dt
);
174 ret
= tb_xdomain_enable_paths(dt
->xd
, DMA_TEST_HOPID
,
175 dt
->tx_ring
? dt
->tx_ring
->hop
: 0,
177 dt
->rx_ring
? dt
->rx_ring
->hop
: 0);
179 dma_test_free_rings(dt
);
184 tb_ring_start(dt
->tx_ring
);
186 tb_ring_start(dt
->rx_ring
);
191 static void dma_test_stop_rings(struct dma_test
*dt
)
194 tb_ring_stop(dt
->rx_ring
);
196 tb_ring_stop(dt
->tx_ring
);
198 if (tb_xdomain_disable_paths(dt
->xd
))
199 dev_warn(&dt
->svc
->dev
, "failed to disable DMA paths\n");
201 dma_test_free_rings(dt
);
204 static void dma_test_rx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
207 struct dma_test_frame
*tf
= container_of(frame
, typeof(*tf
), frame
);
208 struct dma_test
*dt
= tf
->dma_test
;
209 struct device
*dma_dev
= tb_ring_dma_device(dt
->rx_ring
);
211 dma_unmap_single(dma_dev
, tf
->frame
.buffer_phy
, DMA_TEST_FRAME_SIZE
,
220 dt
->packets_received
++;
221 dev_dbg(&dt
->svc
->dev
, "packet %u/%u received\n", dt
->packets_received
,
222 dt
->packets_to_receive
);
224 if (tf
->frame
.flags
& RING_DESC_CRC_ERROR
)
226 if (tf
->frame
.flags
& RING_DESC_BUFFER_OVERRUN
)
227 dt
->buffer_overflow_errors
++;
231 if (dt
->packets_received
== dt
->packets_to_receive
)
232 complete(&dt
->complete
);
235 static int dma_test_submit_rx(struct dma_test
*dt
, size_t npackets
)
237 struct device
*dma_dev
= tb_ring_dma_device(dt
->rx_ring
);
240 for (i
= 0; i
< npackets
; i
++) {
241 struct dma_test_frame
*tf
;
244 tf
= kzalloc(sizeof(*tf
), GFP_KERNEL
);
248 tf
->data
= kzalloc(DMA_TEST_FRAME_SIZE
, GFP_KERNEL
);
254 dma_addr
= dma_map_single(dma_dev
, tf
->data
, DMA_TEST_FRAME_SIZE
,
256 if (dma_mapping_error(dma_dev
, dma_addr
)) {
262 tf
->frame
.buffer_phy
= dma_addr
;
263 tf
->frame
.callback
= dma_test_rx_callback
;
265 INIT_LIST_HEAD(&tf
->frame
.list
);
267 tb_ring_rx(dt
->rx_ring
, &tf
->frame
);
273 static void dma_test_tx_callback(struct tb_ring
*ring
, struct ring_frame
*frame
,
276 struct dma_test_frame
*tf
= container_of(frame
, typeof(*tf
), frame
);
277 struct dma_test
*dt
= tf
->dma_test
;
278 struct device
*dma_dev
= tb_ring_dma_device(dt
->tx_ring
);
280 dma_unmap_single(dma_dev
, tf
->frame
.buffer_phy
, DMA_TEST_FRAME_SIZE
,
286 static int dma_test_submit_tx(struct dma_test
*dt
, size_t npackets
)
288 struct device
*dma_dev
= tb_ring_dma_device(dt
->tx_ring
);
291 for (i
= 0; i
< npackets
; i
++) {
292 struct dma_test_frame
*tf
;
295 tf
= kzalloc(sizeof(*tf
), GFP_KERNEL
);
299 tf
->frame
.size
= 0; /* means 4096 */
302 tf
->data
= kzalloc(DMA_TEST_FRAME_SIZE
, GFP_KERNEL
);
308 memcpy(tf
->data
, dma_test_pattern
, DMA_TEST_FRAME_SIZE
);
310 dma_addr
= dma_map_single(dma_dev
, tf
->data
, DMA_TEST_FRAME_SIZE
,
312 if (dma_mapping_error(dma_dev
, dma_addr
)) {
318 tf
->frame
.buffer_phy
= dma_addr
;
319 tf
->frame
.callback
= dma_test_tx_callback
;
320 tf
->frame
.sof
= DMA_TEST_PDF_FRAME_START
;
321 tf
->frame
.eof
= DMA_TEST_PDF_FRAME_END
;
322 INIT_LIST_HEAD(&tf
->frame
.list
);
325 dev_dbg(&dt
->svc
->dev
, "packet %u/%u sent\n", dt
->packets_sent
,
326 dt
->packets_to_send
);
328 tb_ring_tx(dt
->tx_ring
, &tf
->frame
);
334 #define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \
335 static int __fops ## _show(void *data, u64 *val) \
337 struct tb_service *svc = data; \
338 struct dma_test *dt = tb_service_get_drvdata(svc); \
341 ret = mutex_lock_interruptible(&dt->lock); \
345 mutex_unlock(&dt->lock); \
348 static int __fops ## _store(void *data, u64 val) \
350 struct tb_service *svc = data; \
351 struct dma_test *dt = tb_service_get_drvdata(svc); \
354 ret = __validate(val); \
357 ret = mutex_lock_interruptible(&dt->lock); \
361 mutex_unlock(&dt->lock); \
364 DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \
365 __fops ## _store, "%llu\n")
367 static void lanes_get(const struct dma_test
*dt
, u64
*val
)
369 *val
= dt
->link_width
;
372 static int lanes_validate(u64 val
)
374 return val
> 2 ? -EINVAL
: 0;
377 static void lanes_set(struct dma_test
*dt
, u64 val
)
379 dt
->link_width
= val
;
381 DMA_TEST_DEBUGFS_ATTR(lanes
, lanes_get
, lanes_validate
, lanes_set
);
383 static void speed_get(const struct dma_test
*dt
, u64
*val
)
385 *val
= dt
->link_speed
;
388 static int speed_validate(u64 val
)
400 static void speed_set(struct dma_test
*dt
, u64 val
)
402 dt
->link_speed
= val
;
404 DMA_TEST_DEBUGFS_ATTR(speed
, speed_get
, speed_validate
, speed_set
);
406 static void packets_to_receive_get(const struct dma_test
*dt
, u64
*val
)
408 *val
= dt
->packets_to_receive
;
411 static int packets_to_receive_validate(u64 val
)
413 return val
> DMA_TEST_MAX_PACKETS
? -EINVAL
: 0;
416 static void packets_to_receive_set(struct dma_test
*dt
, u64 val
)
418 dt
->packets_to_receive
= val
;
420 DMA_TEST_DEBUGFS_ATTR(packets_to_receive
, packets_to_receive_get
,
421 packets_to_receive_validate
, packets_to_receive_set
);
423 static void packets_to_send_get(const struct dma_test
*dt
, u64
*val
)
425 *val
= dt
->packets_to_send
;
428 static int packets_to_send_validate(u64 val
)
430 return val
> DMA_TEST_MAX_PACKETS
? -EINVAL
: 0;
433 static void packets_to_send_set(struct dma_test
*dt
, u64 val
)
435 dt
->packets_to_send
= val
;
437 DMA_TEST_DEBUGFS_ATTR(packets_to_send
, packets_to_send_get
,
438 packets_to_send_validate
, packets_to_send_set
);
440 static int dma_test_set_bonding(struct dma_test
*dt
)
442 switch (dt
->link_width
) {
444 return tb_xdomain_lane_bonding_enable(dt
->xd
);
446 tb_xdomain_lane_bonding_disable(dt
->xd
);
453 static bool dma_test_validate_config(struct dma_test
*dt
)
455 if (!dt
->packets_to_send
&& !dt
->packets_to_receive
)
457 if (dt
->packets_to_send
&& dt
->packets_to_receive
&&
458 dt
->packets_to_send
!= dt
->packets_to_receive
)
463 static void dma_test_check_errors(struct dma_test
*dt
, int ret
)
465 if (!dt
->error_code
) {
466 if (dt
->link_speed
&& dt
->xd
->link_speed
!= dt
->link_speed
) {
467 dt
->error_code
= DMA_TEST_SPEED_ERROR
;
468 } else if (dt
->link_width
&&
469 dt
->xd
->link_width
!= dt
->link_width
) {
470 dt
->error_code
= DMA_TEST_WIDTH_ERROR
;
471 } else if (dt
->packets_to_send
!= dt
->packets_sent
||
472 dt
->packets_to_receive
!= dt
->packets_received
||
473 dt
->crc_errors
|| dt
->buffer_overflow_errors
) {
474 dt
->error_code
= DMA_TEST_PACKET_ERROR
;
480 dt
->result
= DMA_TEST_FAIL
;
483 static int test_store(void *data
, u64 val
)
485 struct tb_service
*svc
= data
;
486 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
492 ret
= mutex_lock_interruptible(&dt
->lock
);
496 dt
->packets_sent
= 0;
497 dt
->packets_received
= 0;
499 dt
->buffer_overflow_errors
= 0;
500 dt
->result
= DMA_TEST_SUCCESS
;
501 dt
->error_code
= DMA_TEST_NO_ERROR
;
503 dev_dbg(&svc
->dev
, "DMA test starting\n");
505 dev_dbg(&svc
->dev
, "link_speed: %u Gb/s\n", dt
->link_speed
);
507 dev_dbg(&svc
->dev
, "link_width: %u\n", dt
->link_width
);
508 dev_dbg(&svc
->dev
, "packets_to_send: %u\n", dt
->packets_to_send
);
509 dev_dbg(&svc
->dev
, "packets_to_receive: %u\n", dt
->packets_to_receive
);
511 if (!dma_test_validate_config(dt
)) {
512 dev_err(&svc
->dev
, "invalid test configuration\n");
513 dt
->error_code
= DMA_TEST_CONFIG_ERROR
;
517 ret
= dma_test_set_bonding(dt
);
519 dev_err(&svc
->dev
, "failed to set lanes\n");
520 dt
->error_code
= DMA_TEST_BONDING_ERROR
;
524 ret
= dma_test_start_rings(dt
);
526 dev_err(&svc
->dev
, "failed to enable DMA rings\n");
527 dt
->error_code
= DMA_TEST_DMA_ERROR
;
531 if (dt
->packets_to_receive
) {
532 reinit_completion(&dt
->complete
);
533 ret
= dma_test_submit_rx(dt
, dt
->packets_to_receive
);
535 dev_err(&svc
->dev
, "failed to submit receive buffers\n");
536 dt
->error_code
= DMA_TEST_BUFFER_ERROR
;
541 if (dt
->packets_to_send
) {
542 ret
= dma_test_submit_tx(dt
, dt
->packets_to_send
);
544 dev_err(&svc
->dev
, "failed to submit transmit buffers\n");
545 dt
->error_code
= DMA_TEST_BUFFER_ERROR
;
550 if (dt
->packets_to_receive
) {
551 ret
= wait_for_completion_interruptible(&dt
->complete
);
553 dt
->error_code
= DMA_TEST_INTERRUPTED
;
559 dma_test_stop_rings(dt
);
561 dma_test_check_errors(dt
, ret
);
562 mutex_unlock(&dt
->lock
);
564 dev_dbg(&svc
->dev
, "DMA test %s\n", dma_test_result_names
[dt
->result
]);
567 DEFINE_DEBUGFS_ATTRIBUTE(test_fops
, NULL
, test_store
, "%llu\n");
569 static int status_show(struct seq_file
*s
, void *not_used
)
571 struct tb_service
*svc
= s
->private;
572 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
575 ret
= mutex_lock_interruptible(&dt
->lock
);
579 seq_printf(s
, "result: %s\n", dma_test_result_names
[dt
->result
]);
580 if (dt
->result
== DMA_TEST_NOT_RUN
)
583 seq_printf(s
, "packets received: %u\n", dt
->packets_received
);
584 seq_printf(s
, "packets sent: %u\n", dt
->packets_sent
);
585 seq_printf(s
, "CRC errors: %u\n", dt
->crc_errors
);
586 seq_printf(s
, "buffer overflow errors: %u\n",
587 dt
->buffer_overflow_errors
);
588 seq_printf(s
, "error: %s\n", dma_test_error_names
[dt
->error_code
]);
591 mutex_unlock(&dt
->lock
);
594 DEFINE_SHOW_ATTRIBUTE(status
);
596 static void dma_test_debugfs_init(struct tb_service
*svc
)
598 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
600 dt
->debugfs_dir
= debugfs_create_dir("dma_test", svc
->debugfs_dir
);
602 debugfs_create_file("lanes", 0600, dt
->debugfs_dir
, svc
, &lanes_fops
);
603 debugfs_create_file("speed", 0600, dt
->debugfs_dir
, svc
, &speed_fops
);
604 debugfs_create_file("packets_to_receive", 0600, dt
->debugfs_dir
, svc
,
605 &packets_to_receive_fops
);
606 debugfs_create_file("packets_to_send", 0600, dt
->debugfs_dir
, svc
,
607 &packets_to_send_fops
);
608 debugfs_create_file("status", 0400, dt
->debugfs_dir
, svc
, &status_fops
);
609 debugfs_create_file("test", 0200, dt
->debugfs_dir
, svc
, &test_fops
);
612 static int dma_test_probe(struct tb_service
*svc
, const struct tb_service_id
*id
)
614 struct tb_xdomain
*xd
= tb_service_parent(svc
);
617 dt
= devm_kzalloc(&svc
->dev
, sizeof(*dt
), GFP_KERNEL
);
623 mutex_init(&dt
->lock
);
624 init_completion(&dt
->complete
);
626 tb_service_set_drvdata(svc
, dt
);
627 dma_test_debugfs_init(svc
);
632 static void dma_test_remove(struct tb_service
*svc
)
634 struct dma_test
*dt
= tb_service_get_drvdata(svc
);
636 mutex_lock(&dt
->lock
);
637 debugfs_remove_recursive(dt
->debugfs_dir
);
638 mutex_unlock(&dt
->lock
);
641 static int __maybe_unused
dma_test_suspend(struct device
*dev
)
644 * No need to do anything special here. If userspace is writing
645 * to the test attribute when suspend started, it comes out from
646 * wait_for_completion_interruptible() with -ERESTARTSYS and the
647 * DMA test fails tearing down the rings. Once userspace is
648 * thawed the kernel restarts the write syscall effectively
649 * re-running the test.
654 static int __maybe_unused
dma_test_resume(struct device
*dev
)
659 static const struct dev_pm_ops dma_test_pm_ops
= {
660 SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend
, dma_test_resume
)
663 static const struct tb_service_id dma_test_ids
[] = {
664 { TB_SERVICE("dma_test", 1) },
667 MODULE_DEVICE_TABLE(tbsvc
, dma_test_ids
);
669 static struct tb_service_driver dma_test_driver
= {
671 .owner
= THIS_MODULE
,
672 .name
= "thunderbolt_dma_test",
673 .pm
= &dma_test_pm_ops
,
675 .probe
= dma_test_probe
,
676 .remove
= dma_test_remove
,
677 .id_table
= dma_test_ids
,
680 static int __init
dma_test_init(void)
682 u64 data_value
= DMA_TEST_DATA_PATTERN
;
685 dma_test_pattern
= kmalloc(DMA_TEST_FRAME_SIZE
, GFP_KERNEL
);
686 if (!dma_test_pattern
)
689 for (i
= 0; i
< DMA_TEST_FRAME_SIZE
/ sizeof(data_value
); i
++)
690 ((u32
*)dma_test_pattern
)[i
] = data_value
++;
692 dma_test_dir
= tb_property_create_dir(&dma_test_dir_uuid
);
695 goto err_free_pattern
;
698 tb_property_add_immediate(dma_test_dir
, "prtcid", 1);
699 tb_property_add_immediate(dma_test_dir
, "prtcvers", 1);
700 tb_property_add_immediate(dma_test_dir
, "prtcrevs", 0);
701 tb_property_add_immediate(dma_test_dir
, "prtcstns", 0);
703 ret
= tb_register_property_dir("dma_test", dma_test_dir
);
707 ret
= tb_register_service_driver(&dma_test_driver
);
709 goto err_unregister_dir
;
714 tb_unregister_property_dir("dma_test", dma_test_dir
);
716 tb_property_free_dir(dma_test_dir
);
718 kfree(dma_test_pattern
);
722 module_init(dma_test_init
);
724 static void __exit
dma_test_exit(void)
726 tb_unregister_service_driver(&dma_test_driver
);
727 tb_unregister_property_dir("dma_test", dma_test_dir
);
728 tb_property_free_dir(dma_test_dir
);
729 kfree(dma_test_pattern
);
731 module_exit(dma_test_exit
);
733 MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
734 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
735 MODULE_DESCRIPTION("DMA traffic test driver");
736 MODULE_LICENSE("GPL v2");