2 * DMA Engine test module
4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2013 Intel Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/freezer.h>
15 #include <linux/init.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/wait.h>
22 #include <linux/ctype.h>
23 #include <linux/debugfs.h>
24 #include <linux/uaccess.h>
25 #include <linux/seq_file.h>
27 static unsigned int test_buf_size
= 16384;
28 module_param(test_buf_size
, uint
, S_IRUGO
);
29 MODULE_PARM_DESC(test_buf_size
, "Size of the memcpy test buffer");
31 static char test_channel
[20];
32 module_param_string(channel
, test_channel
, sizeof(test_channel
), S_IRUGO
);
33 MODULE_PARM_DESC(channel
, "Bus ID of the channel to test (default: any)");
35 static char test_device
[20];
36 module_param_string(device
, test_device
, sizeof(test_device
), S_IRUGO
);
37 MODULE_PARM_DESC(device
, "Bus ID of the DMA Engine to test (default: any)");
39 static unsigned int threads_per_chan
= 1;
40 module_param(threads_per_chan
, uint
, S_IRUGO
);
41 MODULE_PARM_DESC(threads_per_chan
,
42 "Number of threads to start per channel (default: 1)");
44 static unsigned int max_channels
;
45 module_param(max_channels
, uint
, S_IRUGO
);
46 MODULE_PARM_DESC(max_channels
,
47 "Maximum number of channels to use (default: all)");
49 static unsigned int iterations
;
50 module_param(iterations
, uint
, S_IRUGO
);
51 MODULE_PARM_DESC(iterations
,
52 "Iterations before stopping test (default: infinite)");
54 static unsigned int xor_sources
= 3;
55 module_param(xor_sources
, uint
, S_IRUGO
);
56 MODULE_PARM_DESC(xor_sources
,
57 "Number of xor source buffers (default: 3)");
59 static unsigned int pq_sources
= 3;
60 module_param(pq_sources
, uint
, S_IRUGO
);
61 MODULE_PARM_DESC(pq_sources
,
62 "Number of p+q source buffers (default: 3)");
64 static int timeout
= 3000;
65 module_param(timeout
, uint
, S_IRUGO
);
66 MODULE_PARM_DESC(timeout
, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
69 /* Maximum amount of mismatched bytes in buffer to print */
70 #define MAX_ERROR_COUNT 32
73 * Initialization patterns. All bytes in the source buffer has bit 7
74 * set, all bytes in the destination buffer has bit 7 cleared.
76 * Bit 6 is set for all bytes which are to be copied by the DMA
77 * engine. Bit 5 is set for all bytes which are to be overwritten by
80 * The remaining bits are the inverse of a counter which increments by
81 * one for each byte address.
83 #define PATTERN_SRC 0x80
84 #define PATTERN_DST 0x00
85 #define PATTERN_COPY 0x40
86 #define PATTERN_OVERWRITE 0x20
87 #define PATTERN_COUNT_MASK 0x1f
89 enum dmatest_error_type
{
97 DMATEST_ET_DMA_IN_PROGRESS
,
99 DMATEST_ET_VERIFY_BUF
,
102 struct dmatest_verify_buffer
{
108 struct dmatest_verify_result
{
109 unsigned int error_count
;
110 struct dmatest_verify_buffer data
[MAX_ERROR_COUNT
];
115 struct dmatest_thread_result
{
116 struct list_head node
;
118 unsigned int src_off
;
119 unsigned int dst_off
;
121 enum dmatest_error_type type
;
125 enum dma_status status
;
127 struct dmatest_verify_result
*vr
;
131 struct dmatest_result
{
132 struct list_head node
;
134 struct list_head results
;
139 struct dmatest_thread
{
140 struct list_head node
;
141 struct dmatest_info
*info
;
142 struct task_struct
*task
;
143 struct dma_chan
*chan
;
146 enum dma_transaction_type type
;
150 struct dmatest_chan
{
151 struct list_head node
;
152 struct dma_chan
*chan
;
153 struct list_head threads
;
157 * struct dmatest_params - test parameters.
158 * @buf_size: size of the memcpy test buffer
159 * @channel: bus ID of the channel to test
160 * @device: bus ID of the DMA Engine to test
161 * @threads_per_chan: number of threads to start per channel
162 * @max_channels: maximum number of channels to use
163 * @iterations: iterations before stopping test
164 * @xor_sources: number of xor source buffers
165 * @pq_sources: number of p+q source buffers
166 * @timeout: transfer timeout in msec, -1 for infinite timeout
168 struct dmatest_params
{
169 unsigned int buf_size
;
172 unsigned int threads_per_chan
;
173 unsigned int max_channels
;
174 unsigned int iterations
;
175 unsigned int xor_sources
;
176 unsigned int pq_sources
;
181 * struct dmatest_info - test information.
182 * @params: test parameters
183 * @lock: access protection to the fields of this structure
185 struct dmatest_info
{
186 /* Test parameters */
187 struct dmatest_params params
;
190 struct list_head channels
;
191 unsigned int nr_channels
;
194 /* debugfs related stuff */
196 struct dmatest_params dbgfs_params
;
199 struct list_head results
;
200 struct mutex results_lock
;
203 static struct dmatest_info test_info
;
205 static bool dmatest_match_channel(struct dmatest_params
*params
,
206 struct dma_chan
*chan
)
208 if (params
->channel
[0] == '\0')
210 return strcmp(dma_chan_name(chan
), params
->channel
) == 0;
213 static bool dmatest_match_device(struct dmatest_params
*params
,
214 struct dma_device
*device
)
216 if (params
->device
[0] == '\0')
218 return strcmp(dev_name(device
->dev
), params
->device
) == 0;
221 static unsigned long dmatest_random(void)
225 get_random_bytes(&buf
, sizeof(buf
));
229 static void dmatest_init_srcs(u8
**bufs
, unsigned int start
, unsigned int len
,
230 unsigned int buf_size
)
235 for (; (buf
= *bufs
); bufs
++) {
236 for (i
= 0; i
< start
; i
++)
237 buf
[i
] = PATTERN_SRC
| (~i
& PATTERN_COUNT_MASK
);
238 for ( ; i
< start
+ len
; i
++)
239 buf
[i
] = PATTERN_SRC
| PATTERN_COPY
240 | (~i
& PATTERN_COUNT_MASK
);
241 for ( ; i
< buf_size
; i
++)
242 buf
[i
] = PATTERN_SRC
| (~i
& PATTERN_COUNT_MASK
);
247 static void dmatest_init_dsts(u8
**bufs
, unsigned int start
, unsigned int len
,
248 unsigned int buf_size
)
253 for (; (buf
= *bufs
); bufs
++) {
254 for (i
= 0; i
< start
; i
++)
255 buf
[i
] = PATTERN_DST
| (~i
& PATTERN_COUNT_MASK
);
256 for ( ; i
< start
+ len
; i
++)
257 buf
[i
] = PATTERN_DST
| PATTERN_OVERWRITE
258 | (~i
& PATTERN_COUNT_MASK
);
259 for ( ; i
< buf_size
; i
++)
260 buf
[i
] = PATTERN_DST
| (~i
& PATTERN_COUNT_MASK
);
264 static unsigned int dmatest_verify(struct dmatest_verify_result
*vr
, u8
**bufs
,
265 unsigned int start
, unsigned int end
, unsigned int counter
,
266 u8 pattern
, bool is_srcbuf
)
269 unsigned int error_count
= 0;
273 unsigned int counter_orig
= counter
;
274 struct dmatest_verify_buffer
*vb
;
276 for (; (buf
= *bufs
); bufs
++) {
277 counter
= counter_orig
;
278 for (i
= start
; i
< end
; i
++) {
280 expected
= pattern
| (~counter
& PATTERN_COUNT_MASK
);
281 if (actual
!= expected
) {
282 if (error_count
< MAX_ERROR_COUNT
&& vr
) {
283 vb
= &vr
->data
[error_count
];
285 vb
->expected
= expected
;
294 if (error_count
> MAX_ERROR_COUNT
)
295 pr_warning("%s: %u errors suppressed\n",
296 current
->comm
, error_count
- MAX_ERROR_COUNT
);
301 /* poor man's completion - we want to use wait_event_freezable() on it */
302 struct dmatest_done
{
304 wait_queue_head_t
*wait
;
307 static void dmatest_callback(void *arg
)
309 struct dmatest_done
*done
= arg
;
312 wake_up_all(done
->wait
);
315 static inline void unmap_src(struct device
*dev
, dma_addr_t
*addr
, size_t len
,
319 dma_unmap_single(dev
, addr
[count
], len
, DMA_TO_DEVICE
);
322 static inline void unmap_dst(struct device
*dev
, dma_addr_t
*addr
, size_t len
,
326 dma_unmap_single(dev
, addr
[count
], len
, DMA_BIDIRECTIONAL
);
329 static unsigned int min_odd(unsigned int x
, unsigned int y
)
331 unsigned int val
= min(x
, y
);
333 return val
% 2 ? val
: val
- 1;
336 static char *verify_result_get_one(struct dmatest_verify_result
*vr
,
339 struct dmatest_verify_buffer
*vb
= &vr
->data
[i
];
340 u8 diff
= vb
->actual
^ vr
->pattern
;
341 static char buf
[512];
345 msg
= "srcbuf overwritten!";
346 else if ((vr
->pattern
& PATTERN_COPY
)
347 && (diff
& (PATTERN_COPY
| PATTERN_OVERWRITE
)))
348 msg
= "dstbuf not copied!";
349 else if (diff
& PATTERN_SRC
)
350 msg
= "dstbuf was copied!";
352 msg
= "dstbuf mismatch!";
354 snprintf(buf
, sizeof(buf
) - 1, "%s [0x%x] Expected %02x, got %02x", msg
,
355 vb
->index
, vb
->expected
, vb
->actual
);
360 static char *thread_result_get(const char *name
,
361 struct dmatest_thread_result
*tr
)
363 static const char * const messages
[] = {
364 [DMATEST_ET_OK
] = "No errors",
365 [DMATEST_ET_MAP_SRC
] = "src mapping error",
366 [DMATEST_ET_MAP_DST
] = "dst mapping error",
367 [DMATEST_ET_PREP
] = "prep error",
368 [DMATEST_ET_SUBMIT
] = "submit error",
369 [DMATEST_ET_TIMEOUT
] = "test timed out",
370 [DMATEST_ET_DMA_ERROR
] =
371 "got completion callback (DMA_ERROR)",
372 [DMATEST_ET_DMA_IN_PROGRESS
] =
373 "got completion callback (DMA_IN_PROGRESS)",
374 [DMATEST_ET_VERIFY
] = "errors",
375 [DMATEST_ET_VERIFY_BUF
] = "verify errors",
377 static char buf
[512];
379 snprintf(buf
, sizeof(buf
) - 1,
380 "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
381 name
, tr
->n
, messages
[tr
->type
], tr
->src_off
, tr
->dst_off
,
387 static int thread_result_add(struct dmatest_info
*info
,
388 struct dmatest_result
*r
, enum dmatest_error_type type
,
389 unsigned int n
, unsigned int src_off
, unsigned int dst_off
,
390 unsigned int len
, unsigned long data
)
392 struct dmatest_thread_result
*tr
;
394 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
400 tr
->src_off
= src_off
;
401 tr
->dst_off
= dst_off
;
405 mutex_lock(&info
->results_lock
);
406 list_add_tail(&tr
->node
, &r
->results
);
407 mutex_unlock(&info
->results_lock
);
409 pr_warn("%s\n", thread_result_get(r
->name
, tr
));
413 static unsigned int verify_result_add(struct dmatest_info
*info
,
414 struct dmatest_result
*r
, unsigned int n
,
415 unsigned int src_off
, unsigned int dst_off
, unsigned int len
,
416 u8
**bufs
, int whence
, unsigned int counter
, u8 pattern
,
419 struct dmatest_verify_result
*vr
;
420 unsigned int error_count
;
421 unsigned int buf_off
= is_srcbuf
? src_off
: dst_off
;
422 unsigned int start
, end
;
427 } else if (whence
> 0) {
428 start
= buf_off
+ len
;
429 end
= info
->params
.buf_size
;
435 vr
= kmalloc(sizeof(*vr
), GFP_KERNEL
);
437 pr_warn("dmatest: No memory to store verify result\n");
438 return dmatest_verify(NULL
, bufs
, start
, end
, counter
, pattern
,
442 vr
->pattern
= pattern
;
443 vr
->is_srcbuf
= is_srcbuf
;
445 error_count
= dmatest_verify(vr
, bufs
, start
, end
, counter
, pattern
,
448 vr
->error_count
= error_count
;
449 thread_result_add(info
, r
, DMATEST_ET_VERIFY_BUF
, n
, src_off
,
450 dst_off
, len
, (unsigned long)vr
);
458 static void result_free(struct dmatest_info
*info
, const char *name
)
460 struct dmatest_result
*r
, *_r
;
462 mutex_lock(&info
->results_lock
);
463 list_for_each_entry_safe(r
, _r
, &info
->results
, node
) {
464 struct dmatest_thread_result
*tr
, *_tr
;
466 if (name
&& strcmp(r
->name
, name
))
469 list_for_each_entry_safe(tr
, _tr
, &r
->results
, node
) {
470 if (tr
->type
== DMATEST_ET_VERIFY_BUF
)
481 mutex_unlock(&info
->results_lock
);
484 static struct dmatest_result
*result_init(struct dmatest_info
*info
,
487 struct dmatest_result
*r
;
489 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
491 r
->name
= kstrdup(name
, GFP_KERNEL
);
492 INIT_LIST_HEAD(&r
->results
);
493 mutex_lock(&info
->results_lock
);
494 list_add_tail(&r
->node
, &info
->results
);
495 mutex_unlock(&info
->results_lock
);
501 * This function repeatedly tests DMA transfers of various lengths and
502 * offsets for a given operation type until it is told to exit by
503 * kthread_stop(). There may be multiple threads running this function
504 * in parallel for a single channel, and there may be multiple channels
505 * being tested in parallel.
507 * Before each test, the source and destination buffer is initialized
508 * with a known pattern. This pattern is different depending on
509 * whether it's in an area which is supposed to be copied or
510 * overwritten, and different in the source and destination buffers.
511 * So if the DMA engine doesn't copy exactly what we tell it to copy,
514 static int dmatest_func(void *data
)
516 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait
);
517 struct dmatest_thread
*thread
= data
;
518 struct dmatest_done done
= { .wait
= &done_wait
};
519 struct dmatest_info
*info
;
520 struct dmatest_params
*params
;
521 struct dma_chan
*chan
;
522 struct dma_device
*dev
;
523 const char *thread_name
;
524 unsigned int src_off
, dst_off
, len
;
525 unsigned int error_count
;
526 unsigned int failed_tests
= 0;
527 unsigned int total_tests
= 0;
529 enum dma_status status
;
530 enum dma_ctrl_flags flags
;
536 struct dmatest_result
*result
;
538 thread_name
= current
->comm
;
545 params
= &info
->params
;
548 if (thread
->type
== DMA_MEMCPY
)
549 src_cnt
= dst_cnt
= 1;
550 else if (thread
->type
== DMA_XOR
) {
551 /* force odd to ensure dst = src */
552 src_cnt
= min_odd(params
->xor_sources
| 1, dev
->max_xor
);
554 } else if (thread
->type
== DMA_PQ
) {
555 /* force odd to ensure dst = src */
556 src_cnt
= min_odd(params
->pq_sources
| 1, dma_maxpq(dev
, 0));
559 pq_coefs
= kmalloc(params
->pq_sources
+1, GFP_KERNEL
);
561 goto err_thread_type
;
563 for (i
= 0; i
< src_cnt
; i
++)
566 goto err_thread_type
;
568 result
= result_init(info
, thread_name
);
572 thread
->srcs
= kcalloc(src_cnt
+1, sizeof(u8
*), GFP_KERNEL
);
575 for (i
= 0; i
< src_cnt
; i
++) {
576 thread
->srcs
[i
] = kmalloc(params
->buf_size
, GFP_KERNEL
);
577 if (!thread
->srcs
[i
])
580 thread
->srcs
[i
] = NULL
;
582 thread
->dsts
= kcalloc(dst_cnt
+1, sizeof(u8
*), GFP_KERNEL
);
585 for (i
= 0; i
< dst_cnt
; i
++) {
586 thread
->dsts
[i
] = kmalloc(params
->buf_size
, GFP_KERNEL
);
587 if (!thread
->dsts
[i
])
590 thread
->dsts
[i
] = NULL
;
592 set_user_nice(current
, 10);
595 * src buffers are freed by the DMAEngine code with dma_unmap_single()
596 * dst buffers are freed by ourselves below
598 flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
599 | DMA_COMPL_SKIP_DEST_UNMAP
| DMA_COMPL_SRC_UNMAP_SINGLE
;
601 while (!kthread_should_stop()
602 && !(params
->iterations
&& total_tests
>= params
->iterations
)) {
603 struct dma_async_tx_descriptor
*tx
= NULL
;
604 dma_addr_t dma_srcs
[src_cnt
];
605 dma_addr_t dma_dsts
[dst_cnt
];
610 /* honor alignment restrictions */
611 if (thread
->type
== DMA_MEMCPY
)
612 align
= dev
->copy_align
;
613 else if (thread
->type
== DMA_XOR
)
614 align
= dev
->xor_align
;
615 else if (thread
->type
== DMA_PQ
)
616 align
= dev
->pq_align
;
618 if (1 << align
> params
->buf_size
) {
619 pr_err("%u-byte buffer too small for %d-byte alignment\n",
620 params
->buf_size
, 1 << align
);
624 len
= dmatest_random() % params
->buf_size
+ 1;
625 len
= (len
>> align
) << align
;
628 src_off
= dmatest_random() % (params
->buf_size
- len
+ 1);
629 dst_off
= dmatest_random() % (params
->buf_size
- len
+ 1);
631 src_off
= (src_off
>> align
) << align
;
632 dst_off
= (dst_off
>> align
) << align
;
634 dmatest_init_srcs(thread
->srcs
, src_off
, len
, params
->buf_size
);
635 dmatest_init_dsts(thread
->dsts
, dst_off
, len
, params
->buf_size
);
637 for (i
= 0; i
< src_cnt
; i
++) {
638 u8
*buf
= thread
->srcs
[i
] + src_off
;
640 dma_srcs
[i
] = dma_map_single(dev
->dev
, buf
, len
,
642 ret
= dma_mapping_error(dev
->dev
, dma_srcs
[i
]);
644 unmap_src(dev
->dev
, dma_srcs
, len
, i
);
645 thread_result_add(info
, result
,
647 total_tests
, src_off
, dst_off
,
653 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
654 for (i
= 0; i
< dst_cnt
; i
++) {
655 dma_dsts
[i
] = dma_map_single(dev
->dev
, thread
->dsts
[i
],
658 ret
= dma_mapping_error(dev
->dev
, dma_dsts
[i
]);
660 unmap_src(dev
->dev
, dma_srcs
, len
, src_cnt
);
661 unmap_dst(dev
->dev
, dma_dsts
, params
->buf_size
,
663 thread_result_add(info
, result
,
665 total_tests
, src_off
, dst_off
,
672 if (thread
->type
== DMA_MEMCPY
)
673 tx
= dev
->device_prep_dma_memcpy(chan
,
674 dma_dsts
[0] + dst_off
,
677 else if (thread
->type
== DMA_XOR
)
678 tx
= dev
->device_prep_dma_xor(chan
,
679 dma_dsts
[0] + dst_off
,
682 else if (thread
->type
== DMA_PQ
) {
683 dma_addr_t dma_pq
[dst_cnt
];
685 for (i
= 0; i
< dst_cnt
; i
++)
686 dma_pq
[i
] = dma_dsts
[i
] + dst_off
;
687 tx
= dev
->device_prep_dma_pq(chan
, dma_pq
, dma_srcs
,
693 unmap_src(dev
->dev
, dma_srcs
, len
, src_cnt
);
694 unmap_dst(dev
->dev
, dma_dsts
, params
->buf_size
,
696 thread_result_add(info
, result
, DMATEST_ET_PREP
,
697 total_tests
, src_off
, dst_off
,
705 tx
->callback
= dmatest_callback
;
706 tx
->callback_param
= &done
;
707 cookie
= tx
->tx_submit(tx
);
709 if (dma_submit_error(cookie
)) {
710 thread_result_add(info
, result
, DMATEST_ET_SUBMIT
,
711 total_tests
, src_off
, dst_off
,
717 dma_async_issue_pending(chan
);
719 wait_event_freezable_timeout(done_wait
,
720 done
.done
|| kthread_should_stop(),
721 msecs_to_jiffies(params
->timeout
));
723 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
727 * We're leaving the timed out dma operation with
728 * dangling pointer to done_wait. To make this
729 * correct, we'll need to allocate wait_done for
730 * each test iteration and perform "who's gonna
731 * free it this time?" dancing. For now, just
734 thread_result_add(info
, result
, DMATEST_ET_TIMEOUT
,
735 total_tests
, src_off
, dst_off
,
739 } else if (status
!= DMA_SUCCESS
) {
740 enum dmatest_error_type type
= (status
== DMA_ERROR
) ?
741 DMATEST_ET_DMA_ERROR
: DMATEST_ET_DMA_IN_PROGRESS
;
742 thread_result_add(info
, result
, type
,
743 total_tests
, src_off
, dst_off
,
749 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
750 unmap_dst(dev
->dev
, dma_dsts
, params
->buf_size
, dst_cnt
);
754 pr_debug("%s: verifying source buffer...\n", thread_name
);
755 error_count
+= verify_result_add(info
, result
, total_tests
,
756 src_off
, dst_off
, len
, thread
->srcs
, -1,
757 0, PATTERN_SRC
, true);
758 error_count
+= verify_result_add(info
, result
, total_tests
,
759 src_off
, dst_off
, len
, thread
->srcs
, 0,
760 src_off
, PATTERN_SRC
| PATTERN_COPY
, true);
761 error_count
+= verify_result_add(info
, result
, total_tests
,
762 src_off
, dst_off
, len
, thread
->srcs
, 1,
763 src_off
+ len
, PATTERN_SRC
, true);
765 pr_debug("%s: verifying dest buffer...\n", thread_name
);
766 error_count
+= verify_result_add(info
, result
, total_tests
,
767 src_off
, dst_off
, len
, thread
->dsts
, -1,
768 0, PATTERN_DST
, false);
769 error_count
+= verify_result_add(info
, result
, total_tests
,
770 src_off
, dst_off
, len
, thread
->dsts
, 0,
771 src_off
, PATTERN_SRC
| PATTERN_COPY
, false);
772 error_count
+= verify_result_add(info
, result
, total_tests
,
773 src_off
, dst_off
, len
, thread
->dsts
, 1,
774 dst_off
+ len
, PATTERN_DST
, false);
777 thread_result_add(info
, result
, DMATEST_ET_VERIFY
,
778 total_tests
, src_off
, dst_off
,
782 thread_result_add(info
, result
, DMATEST_ET_OK
,
783 total_tests
, src_off
, dst_off
,
789 for (i
= 0; thread
->dsts
[i
]; i
++)
790 kfree(thread
->dsts
[i
]);
794 for (i
= 0; thread
->srcs
[i
]; i
++)
795 kfree(thread
->srcs
[i
]);
801 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
802 thread_name
, total_tests
, failed_tests
, ret
);
804 /* terminate all transfers on specified channels */
806 dmaengine_terminate_all(chan
);
810 if (params
->iterations
> 0)
811 while (!kthread_should_stop()) {
812 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit
);
813 interruptible_sleep_on(&wait_dmatest_exit
);
819 static void dmatest_cleanup_channel(struct dmatest_chan
*dtc
)
821 struct dmatest_thread
*thread
;
822 struct dmatest_thread
*_thread
;
825 list_for_each_entry_safe(thread
, _thread
, &dtc
->threads
, node
) {
826 ret
= kthread_stop(thread
->task
);
827 pr_debug("dmatest: thread %s exited with status %d\n",
828 thread
->task
->comm
, ret
);
829 list_del(&thread
->node
);
833 /* terminate all transfers on specified channels */
834 dmaengine_terminate_all(dtc
->chan
);
839 static int dmatest_add_threads(struct dmatest_info
*info
,
840 struct dmatest_chan
*dtc
, enum dma_transaction_type type
)
842 struct dmatest_params
*params
= &info
->params
;
843 struct dmatest_thread
*thread
;
844 struct dma_chan
*chan
= dtc
->chan
;
848 if (type
== DMA_MEMCPY
)
850 else if (type
== DMA_XOR
)
852 else if (type
== DMA_PQ
)
857 for (i
= 0; i
< params
->threads_per_chan
; i
++) {
858 thread
= kzalloc(sizeof(struct dmatest_thread
), GFP_KERNEL
);
860 pr_warning("dmatest: No memory for %s-%s%u\n",
861 dma_chan_name(chan
), op
, i
);
866 thread
->chan
= dtc
->chan
;
869 thread
->task
= kthread_run(dmatest_func
, thread
, "%s-%s%u",
870 dma_chan_name(chan
), op
, i
);
871 if (IS_ERR(thread
->task
)) {
872 pr_warning("dmatest: Failed to run thread %s-%s%u\n",
873 dma_chan_name(chan
), op
, i
);
878 /* srcbuf and dstbuf are allocated by the thread itself */
880 list_add_tail(&thread
->node
, &dtc
->threads
);
886 static int dmatest_add_channel(struct dmatest_info
*info
,
887 struct dma_chan
*chan
)
889 struct dmatest_chan
*dtc
;
890 struct dma_device
*dma_dev
= chan
->device
;
891 unsigned int thread_count
= 0;
894 dtc
= kmalloc(sizeof(struct dmatest_chan
), GFP_KERNEL
);
896 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan
));
901 INIT_LIST_HEAD(&dtc
->threads
);
903 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
)) {
904 cnt
= dmatest_add_threads(info
, dtc
, DMA_MEMCPY
);
905 thread_count
+= cnt
> 0 ? cnt
: 0;
907 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
908 cnt
= dmatest_add_threads(info
, dtc
, DMA_XOR
);
909 thread_count
+= cnt
> 0 ? cnt
: 0;
911 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
912 cnt
= dmatest_add_threads(info
, dtc
, DMA_PQ
);
913 thread_count
+= cnt
> 0 ? cnt
: 0;
916 pr_info("dmatest: Started %u threads using %s\n",
917 thread_count
, dma_chan_name(chan
));
919 list_add_tail(&dtc
->node
, &info
->channels
);
925 static bool filter(struct dma_chan
*chan
, void *param
)
927 struct dmatest_params
*params
= param
;
929 if (!dmatest_match_channel(params
, chan
) ||
930 !dmatest_match_device(params
, chan
->device
))
936 static int __run_threaded_test(struct dmatest_info
*info
)
939 struct dma_chan
*chan
;
940 struct dmatest_params
*params
= &info
->params
;
944 dma_cap_set(DMA_MEMCPY
, mask
);
946 chan
= dma_request_channel(mask
, filter
, params
);
948 err
= dmatest_add_channel(info
, chan
);
950 dma_release_channel(chan
);
951 break; /* add_channel failed, punt */
954 break; /* no more channels available */
955 if (params
->max_channels
&&
956 info
->nr_channels
>= params
->max_channels
)
957 break; /* we have all we need */
963 static int run_threaded_test(struct dmatest_info
*info
)
967 mutex_lock(&info
->lock
);
968 ret
= __run_threaded_test(info
);
969 mutex_unlock(&info
->lock
);
974 static void __stop_threaded_test(struct dmatest_info
*info
)
976 struct dmatest_chan
*dtc
, *_dtc
;
977 struct dma_chan
*chan
;
979 list_for_each_entry_safe(dtc
, _dtc
, &info
->channels
, node
) {
980 list_del(&dtc
->node
);
982 dmatest_cleanup_channel(dtc
);
983 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan
));
984 dma_release_channel(chan
);
987 info
->nr_channels
= 0;
990 static void stop_threaded_test(struct dmatest_info
*info
)
992 mutex_lock(&info
->lock
);
993 __stop_threaded_test(info
);
994 mutex_unlock(&info
->lock
);
997 static int __restart_threaded_test(struct dmatest_info
*info
, bool run
)
999 struct dmatest_params
*params
= &info
->params
;
1002 /* Stop any running test first */
1003 __stop_threaded_test(info
);
1008 /* Clear results from previous run */
1009 result_free(info
, NULL
);
1011 /* Copy test parameters */
1012 memcpy(params
, &info
->dbgfs_params
, sizeof(*params
));
1014 /* Run test with new parameters */
1015 ret
= __run_threaded_test(info
);
1017 __stop_threaded_test(info
);
1018 pr_err("dmatest: Can't run test\n");
1024 static ssize_t
dtf_write_string(void *to
, size_t available
, loff_t
*ppos
,
1025 const void __user
*from
, size_t count
)
1030 len
= simple_write_to_buffer(tmp
, sizeof(tmp
) - 1, ppos
, from
, count
);
1033 strlcpy(to
, strim(tmp
), available
);
1039 static ssize_t
dtf_read_channel(struct file
*file
, char __user
*buf
,
1040 size_t count
, loff_t
*ppos
)
1042 struct dmatest_info
*info
= file
->private_data
;
1043 return simple_read_from_buffer(buf
, count
, ppos
,
1044 info
->dbgfs_params
.channel
,
1045 strlen(info
->dbgfs_params
.channel
));
1048 static ssize_t
dtf_write_channel(struct file
*file
, const char __user
*buf
,
1049 size_t size
, loff_t
*ppos
)
1051 struct dmatest_info
*info
= file
->private_data
;
1052 return dtf_write_string(info
->dbgfs_params
.channel
,
1053 sizeof(info
->dbgfs_params
.channel
),
1057 static const struct file_operations dtf_channel_fops
= {
1058 .read
= dtf_read_channel
,
1059 .write
= dtf_write_channel
,
1060 .open
= simple_open
,
1061 .llseek
= default_llseek
,
1064 static ssize_t
dtf_read_device(struct file
*file
, char __user
*buf
,
1065 size_t count
, loff_t
*ppos
)
1067 struct dmatest_info
*info
= file
->private_data
;
1068 return simple_read_from_buffer(buf
, count
, ppos
,
1069 info
->dbgfs_params
.device
,
1070 strlen(info
->dbgfs_params
.device
));
1073 static ssize_t
dtf_write_device(struct file
*file
, const char __user
*buf
,
1074 size_t size
, loff_t
*ppos
)
1076 struct dmatest_info
*info
= file
->private_data
;
1077 return dtf_write_string(info
->dbgfs_params
.device
,
1078 sizeof(info
->dbgfs_params
.device
),
1082 static const struct file_operations dtf_device_fops
= {
1083 .read
= dtf_read_device
,
1084 .write
= dtf_write_device
,
1085 .open
= simple_open
,
1086 .llseek
= default_llseek
,
1089 static ssize_t
dtf_read_run(struct file
*file
, char __user
*user_buf
,
1090 size_t count
, loff_t
*ppos
)
1092 struct dmatest_info
*info
= file
->private_data
;
1094 struct dmatest_chan
*dtc
;
1097 mutex_lock(&info
->lock
);
1098 list_for_each_entry(dtc
, &info
->channels
, node
) {
1099 struct dmatest_thread
*thread
;
1101 list_for_each_entry(thread
, &dtc
->threads
, node
) {
1102 if (!thread
->done
) {
1112 __stop_threaded_test(info
);
1116 mutex_unlock(&info
->lock
);
1119 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
1122 static ssize_t
dtf_write_run(struct file
*file
, const char __user
*user_buf
,
1123 size_t count
, loff_t
*ppos
)
1125 struct dmatest_info
*info
= file
->private_data
;
1130 if (copy_from_user(buf
, user_buf
, min(count
, (sizeof(buf
) - 1))))
1133 if (strtobool(buf
, &bv
) == 0) {
1134 mutex_lock(&info
->lock
);
1135 ret
= __restart_threaded_test(info
, bv
);
1136 mutex_unlock(&info
->lock
);
1139 return ret
? ret
: count
;
1142 static const struct file_operations dtf_run_fops
= {
1143 .read
= dtf_read_run
,
1144 .write
= dtf_write_run
,
1145 .open
= simple_open
,
1146 .llseek
= default_llseek
,
1149 static int dtf_results_show(struct seq_file
*sf
, void *data
)
1151 struct dmatest_info
*info
= sf
->private;
1152 struct dmatest_result
*result
;
1153 struct dmatest_thread_result
*tr
;
1156 mutex_lock(&info
->results_lock
);
1157 list_for_each_entry(result
, &info
->results
, node
) {
1158 list_for_each_entry(tr
, &result
->results
, node
) {
1159 seq_printf(sf
, "%s\n",
1160 thread_result_get(result
->name
, tr
));
1161 if (tr
->type
== DMATEST_ET_VERIFY_BUF
) {
1162 for (i
= 0; i
< tr
->vr
->error_count
; i
++) {
1163 seq_printf(sf
, "\t%s\n",
1164 verify_result_get_one(tr
->vr
, i
));
1170 mutex_unlock(&info
->results_lock
);
1174 static int dtf_results_open(struct inode
*inode
, struct file
*file
)
1176 return single_open(file
, dtf_results_show
, inode
->i_private
);
1179 static const struct file_operations dtf_results_fops
= {
1180 .open
= dtf_results_open
,
1182 .llseek
= seq_lseek
,
1183 .release
= single_release
,
1186 static int dmatest_register_dbgfs(struct dmatest_info
*info
)
1189 struct dmatest_params
*params
= &info
->dbgfs_params
;
1192 d
= debugfs_create_dir("dmatest", NULL
);
1200 /* Copy initial values */
1201 memcpy(params
, &info
->params
, sizeof(*params
));
1203 /* Test parameters */
1205 d
= debugfs_create_u32("test_buf_size", S_IWUSR
| S_IRUGO
, info
->root
,
1206 (u32
*)¶ms
->buf_size
);
1207 if (IS_ERR_OR_NULL(d
))
1210 d
= debugfs_create_file("channel", S_IRUGO
| S_IWUSR
, info
->root
,
1211 info
, &dtf_channel_fops
);
1212 if (IS_ERR_OR_NULL(d
))
1215 d
= debugfs_create_file("device", S_IRUGO
| S_IWUSR
, info
->root
,
1216 info
, &dtf_device_fops
);
1217 if (IS_ERR_OR_NULL(d
))
1220 d
= debugfs_create_u32("threads_per_chan", S_IWUSR
| S_IRUGO
, info
->root
,
1221 (u32
*)¶ms
->threads_per_chan
);
1222 if (IS_ERR_OR_NULL(d
))
1225 d
= debugfs_create_u32("max_channels", S_IWUSR
| S_IRUGO
, info
->root
,
1226 (u32
*)¶ms
->max_channels
);
1227 if (IS_ERR_OR_NULL(d
))
1230 d
= debugfs_create_u32("iterations", S_IWUSR
| S_IRUGO
, info
->root
,
1231 (u32
*)¶ms
->iterations
);
1232 if (IS_ERR_OR_NULL(d
))
1235 d
= debugfs_create_u32("xor_sources", S_IWUSR
| S_IRUGO
, info
->root
,
1236 (u32
*)¶ms
->xor_sources
);
1237 if (IS_ERR_OR_NULL(d
))
1240 d
= debugfs_create_u32("pq_sources", S_IWUSR
| S_IRUGO
, info
->root
,
1241 (u32
*)¶ms
->pq_sources
);
1242 if (IS_ERR_OR_NULL(d
))
1245 d
= debugfs_create_u32("timeout", S_IWUSR
| S_IRUGO
, info
->root
,
1246 (u32
*)¶ms
->timeout
);
1247 if (IS_ERR_OR_NULL(d
))
1250 /* Run or stop threaded test */
1251 d
= debugfs_create_file("run", S_IWUSR
| S_IRUGO
, info
->root
,
1252 info
, &dtf_run_fops
);
1253 if (IS_ERR_OR_NULL(d
))
1256 /* Results of test in progress */
1257 d
= debugfs_create_file("results", S_IRUGO
, info
->root
, info
,
1259 if (IS_ERR_OR_NULL(d
))
1265 debugfs_remove_recursive(info
->root
);
1267 pr_err("dmatest: Failed to initialize debugfs\n");
1271 static int __init
dmatest_init(void)
1273 struct dmatest_info
*info
= &test_info
;
1274 struct dmatest_params
*params
= &info
->params
;
1277 memset(info
, 0, sizeof(*info
));
1279 mutex_init(&info
->lock
);
1280 INIT_LIST_HEAD(&info
->channels
);
1282 mutex_init(&info
->results_lock
);
1283 INIT_LIST_HEAD(&info
->results
);
1285 /* Set default parameters */
1286 params
->buf_size
= test_buf_size
;
1287 strlcpy(params
->channel
, test_channel
, sizeof(params
->channel
));
1288 strlcpy(params
->device
, test_device
, sizeof(params
->device
));
1289 params
->threads_per_chan
= threads_per_chan
;
1290 params
->max_channels
= max_channels
;
1291 params
->iterations
= iterations
;
1292 params
->xor_sources
= xor_sources
;
1293 params
->pq_sources
= pq_sources
;
1294 params
->timeout
= timeout
;
1296 ret
= dmatest_register_dbgfs(info
);
1303 return run_threaded_test(info
);
1306 /* when compiled-in wait for drivers to load first */
1307 late_initcall(dmatest_init
);
1309 static void __exit
dmatest_exit(void)
1311 struct dmatest_info
*info
= &test_info
;
1313 debugfs_remove_recursive(info
->root
);
1314 stop_threaded_test(info
);
1315 result_free(info
, NULL
);
1317 module_exit(dmatest_exit
);
1319 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1320 MODULE_LICENSE("GPL v2");