1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007-2008 Pierre Ossman
6 #include <linux/mmc/core.h>
7 #include <linux/mmc/card.h>
8 #include <linux/mmc/host.h>
9 #include <linux/mmc/mmc.h>
10 #include <linux/slab.h>
12 #include <linux/scatterlist.h>
13 #include <linux/swap.h> /* For nr_free_buffer_pages() */
14 #include <linux/list.h>
16 #include <linux/debugfs.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages
{
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages
*arr
;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
75 struct mmc_test_area
{
77 unsigned int dev_addr
;
79 unsigned int max_segs
;
80 unsigned int max_seg_sz
;
83 struct mmc_test_mem
*mem
;
84 struct scatterlist
*sg
;
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
96 struct mmc_test_transfer_result
{
97 struct list_head link
;
100 struct timespec64 ts
;
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
113 struct mmc_test_general_result
{
114 struct list_head link
;
115 struct mmc_card
*card
;
118 struct list_head tr_lst
;
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
127 struct mmc_test_dbgfs_file
{
128 struct list_head link
;
129 struct mmc_card
*card
;
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
142 struct mmc_test_card
{
143 struct mmc_card
*card
;
145 u8 scratch
[BUFFER_SIZE
];
147 #ifdef CONFIG_HIGHMEM
148 struct page
*highmem
;
150 struct mmc_test_area area
;
151 struct mmc_test_general_result
*gr
;
154 enum mmc_test_prep_media
{
155 MMC_TEST_PREP_NONE
= 0,
156 MMC_TEST_PREP_WRITE_FULL
= 1 << 0,
157 MMC_TEST_PREP_ERASE
= 1 << 1,
160 struct mmc_test_multiple_rw
{
161 unsigned int *sg_len
;
166 bool do_nonblock_req
;
167 enum mmc_test_prep_media prepare
;
170 /*******************************************************************/
171 /* General helper functions */
172 /*******************************************************************/
175 * Configure correct block size in card
177 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
179 return mmc_set_blocklen(test
->card
, size
);
182 static bool mmc_test_card_cmd23(struct mmc_card
*card
)
184 return mmc_card_mmc(card
) ||
185 (mmc_card_sd(card
) && card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
);
188 static void mmc_test_prepare_sbc(struct mmc_test_card
*test
,
189 struct mmc_request
*mrq
, unsigned int blocks
)
191 struct mmc_card
*card
= test
->card
;
193 if (!mrq
->sbc
|| !mmc_host_cmd23(card
->host
) ||
194 !mmc_test_card_cmd23(card
) || !mmc_op_multi(mrq
->cmd
->opcode
) ||
195 (card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
)) {
200 mrq
->sbc
->opcode
= MMC_SET_BLOCK_COUNT
;
201 mrq
->sbc
->arg
= blocks
;
202 mrq
->sbc
->flags
= MMC_RSP_R1
| MMC_CMD_AC
;
206 * Fill in the mmc_request structure given a set of transfer parameters.
208 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
209 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
210 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
212 if (WARN_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
))
216 mrq
->cmd
->opcode
= write
?
217 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
219 mrq
->cmd
->opcode
= write
?
220 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
223 mrq
->cmd
->arg
= dev_addr
;
224 if (!mmc_card_blockaddr(test
->card
))
227 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
232 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
234 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
237 mrq
->data
->blksz
= blksz
;
238 mrq
->data
->blocks
= blocks
;
239 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
241 mrq
->data
->sg_len
= sg_len
;
243 mmc_test_prepare_sbc(test
, mrq
, blocks
);
245 mmc_set_data_timeout(mrq
->data
, test
->card
);
248 static int mmc_test_busy(struct mmc_command
*cmd
)
250 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
251 (R1_CURRENT_STATE(cmd
->resp
[0]) == R1_STATE_PRG
);
255 * Wait for the card to finish the busy state
257 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
260 struct mmc_command cmd
= {};
264 memset(&cmd
, 0, sizeof(struct mmc_command
));
266 cmd
.opcode
= MMC_SEND_STATUS
;
267 cmd
.arg
= test
->card
->rca
<< 16;
268 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
270 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
274 if (!busy
&& mmc_test_busy(&cmd
)) {
276 if (test
->card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
277 pr_info("%s: Warning: Host did not wait for busy state to end.\n",
278 mmc_hostname(test
->card
->host
));
280 } while (mmc_test_busy(&cmd
));
286 * Transfer a single sector of kernel addressable data
288 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
289 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
291 struct mmc_request mrq
= {};
292 struct mmc_command cmd
= {};
293 struct mmc_command stop
= {};
294 struct mmc_data data
= {};
296 struct scatterlist sg
;
302 sg_init_one(&sg
, buffer
, blksz
);
304 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
306 mmc_wait_for_req(test
->card
->host
, &mrq
);
313 return mmc_test_wait_busy(test
);
316 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
321 __free_pages(mem
->arr
[mem
->cnt
].page
,
322 mem
->arr
[mem
->cnt
].order
);
328 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
329 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
330 * not exceed a maximum number of segments and try not to make segments much
331 * bigger than maximum segment size.
333 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
334 unsigned long max_sz
,
335 unsigned int max_segs
,
336 unsigned int max_seg_sz
)
338 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
339 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
340 unsigned long max_seg_page_cnt
= DIV_ROUND_UP(max_seg_sz
, PAGE_SIZE
);
341 unsigned long page_cnt
= 0;
342 unsigned long limit
= nr_free_buffer_pages() >> 4;
343 struct mmc_test_mem
*mem
;
345 if (max_page_cnt
> limit
)
346 max_page_cnt
= limit
;
347 if (min_page_cnt
> max_page_cnt
)
348 min_page_cnt
= max_page_cnt
;
350 if (max_seg_page_cnt
> max_page_cnt
)
351 max_seg_page_cnt
= max_page_cnt
;
353 if (max_segs
> max_page_cnt
)
354 max_segs
= max_page_cnt
;
356 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
360 mem
->arr
= kcalloc(max_segs
, sizeof(*mem
->arr
), GFP_KERNEL
);
364 while (max_page_cnt
) {
367 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
370 order
= get_order(max_seg_page_cnt
<< PAGE_SHIFT
);
372 page
= alloc_pages(flags
, order
);
378 if (page_cnt
< min_page_cnt
)
382 mem
->arr
[mem
->cnt
].page
= page
;
383 mem
->arr
[mem
->cnt
].order
= order
;
385 if (max_page_cnt
<= (1UL << order
))
387 max_page_cnt
-= 1UL << order
;
388 page_cnt
+= 1UL << order
;
389 if (mem
->cnt
>= max_segs
) {
390 if (page_cnt
< min_page_cnt
)
399 mmc_test_free_mem(mem
);
404 * Map memory into a scatterlist. Optionally allow the same memory to be
405 * mapped more than once.
407 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long size
,
408 struct scatterlist
*sglist
, int repeat
,
409 unsigned int max_segs
, unsigned int max_seg_sz
,
410 unsigned int *sg_len
, int min_sg_len
)
412 struct scatterlist
*sg
= NULL
;
414 unsigned long sz
= size
;
416 sg_init_table(sglist
, max_segs
);
417 if (min_sg_len
> max_segs
)
418 min_sg_len
= max_segs
;
422 for (i
= 0; i
< mem
->cnt
; i
++) {
423 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
425 if (min_sg_len
&& (size
/ min_sg_len
< len
))
426 len
= ALIGN(size
/ min_sg_len
, 512);
429 if (len
> max_seg_sz
)
437 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
443 } while (sz
&& repeat
);
455 * Map memory into a scatterlist so that no pages are contiguous. Allow the
456 * same memory to be mapped more than once.
458 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
460 struct scatterlist
*sglist
,
461 unsigned int max_segs
,
462 unsigned int max_seg_sz
,
463 unsigned int *sg_len
)
465 struct scatterlist
*sg
= NULL
;
466 unsigned int i
= mem
->cnt
, cnt
;
468 void *base
, *addr
, *last_addr
= NULL
;
470 sg_init_table(sglist
, max_segs
);
474 base
= page_address(mem
->arr
[--i
].page
);
475 cnt
= 1 << mem
->arr
[i
].order
;
477 addr
= base
+ PAGE_SIZE
* --cnt
;
478 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
482 if (len
> max_seg_sz
)
492 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
507 * Calculate transfer rate in bytes per second.
509 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec64
*ts
)
513 ns
= timespec64_to_ns(ts
);
516 while (ns
> UINT_MAX
) {
524 do_div(bytes
, (uint32_t)ns
);
530 * Save transfer results for future usage
532 static void mmc_test_save_transfer_result(struct mmc_test_card
*test
,
533 unsigned int count
, unsigned int sectors
, struct timespec64 ts
,
534 unsigned int rate
, unsigned int iops
)
536 struct mmc_test_transfer_result
*tr
;
541 tr
= kmalloc(sizeof(*tr
), GFP_KERNEL
);
546 tr
->sectors
= sectors
;
551 list_add_tail(&tr
->link
, &test
->gr
->tr_lst
);
555 * Print the transfer rate.
557 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
558 struct timespec64
*ts1
, struct timespec64
*ts2
)
560 unsigned int rate
, iops
, sectors
= bytes
>> 9;
561 struct timespec64 ts
;
563 ts
= timespec64_sub(*ts2
, *ts1
);
565 rate
= mmc_test_rate(bytes
, &ts
);
566 iops
= mmc_test_rate(100, &ts
); /* I/O ops per sec x 100 */
568 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
569 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
570 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
571 (sectors
& 1 ? ".5" : ""), (u64
)ts
.tv_sec
,
572 (u32
)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024,
573 iops
/ 100, iops
% 100);
575 mmc_test_save_transfer_result(test
, 1, sectors
, ts
, rate
, iops
);
579 * Print the average transfer rate.
581 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
582 unsigned int count
, struct timespec64
*ts1
,
583 struct timespec64
*ts2
)
585 unsigned int rate
, iops
, sectors
= bytes
>> 9;
586 uint64_t tot
= bytes
* count
;
587 struct timespec64 ts
;
589 ts
= timespec64_sub(*ts2
, *ts1
);
591 rate
= mmc_test_rate(tot
, &ts
);
592 iops
= mmc_test_rate(count
* 100, &ts
); /* I/O ops per sec x 100 */
594 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
595 "%llu.%09u seconds (%u kB/s, %u KiB/s, "
596 "%u.%02u IOPS, sg_len %d)\n",
597 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
598 sectors
>> 1, (sectors
& 1 ? ".5" : ""),
599 (u64
)ts
.tv_sec
, (u32
)ts
.tv_nsec
,
600 rate
/ 1000, rate
/ 1024, iops
/ 100, iops
% 100,
603 mmc_test_save_transfer_result(test
, count
, sectors
, ts
, rate
, iops
);
607 * Return the card size in sectors.
609 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
611 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
612 return card
->ext_csd
.sectors
;
614 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
617 /*******************************************************************/
618 /* Test preparation and cleanup */
619 /*******************************************************************/
622 * Fill the first couple of sectors of the card with known data
623 * so that bad reads/writes can be detected
625 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
629 ret
= mmc_test_set_blksize(test
, 512);
634 memset(test
->buffer
, 0xDF, 512);
636 for (i
= 0; i
< 512; i
++)
640 for (i
= 0; i
< BUFFER_SIZE
/ 512; i
++) {
641 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
649 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
651 return __mmc_test_prepare(test
, 1);
654 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
656 return __mmc_test_prepare(test
, 0);
659 static int mmc_test_cleanup(struct mmc_test_card
*test
)
663 ret
= mmc_test_set_blksize(test
, 512);
667 memset(test
->buffer
, 0, 512);
669 for (i
= 0; i
< BUFFER_SIZE
/ 512; i
++) {
670 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
678 /*******************************************************************/
679 /* Test execution helpers */
680 /*******************************************************************/
683 * Modifies the mmc_request to perform the "short transfer" tests
685 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
686 struct mmc_request
*mrq
, int write
)
688 if (WARN_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
))
691 if (mrq
->data
->blocks
> 1) {
692 mrq
->cmd
->opcode
= write
?
693 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
696 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
697 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
702 * Checks that a normal transfer didn't have any errors
704 static int mmc_test_check_result(struct mmc_test_card
*test
,
705 struct mmc_request
*mrq
)
709 if (WARN_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
))
714 if (mrq
->sbc
&& mrq
->sbc
->error
)
715 ret
= mrq
->sbc
->error
;
716 if (!ret
&& mrq
->cmd
->error
)
717 ret
= mrq
->cmd
->error
;
718 if (!ret
&& mrq
->data
->error
)
719 ret
= mrq
->data
->error
;
720 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
721 ret
= mrq
->stop
->error
;
722 if (!ret
&& mrq
->data
->bytes_xfered
!=
723 mrq
->data
->blocks
* mrq
->data
->blksz
)
727 ret
= RESULT_UNSUP_HOST
;
733 * Checks that a "short transfer" behaved as expected
735 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
736 struct mmc_request
*mrq
)
740 if (WARN_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
))
745 if (!ret
&& mrq
->cmd
->error
)
746 ret
= mrq
->cmd
->error
;
747 if (!ret
&& mrq
->data
->error
== 0)
749 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
750 ret
= mrq
->data
->error
;
751 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
752 ret
= mrq
->stop
->error
;
753 if (mrq
->data
->blocks
> 1) {
754 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
757 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
762 ret
= RESULT_UNSUP_HOST
;
767 struct mmc_test_req
{
768 struct mmc_request mrq
;
769 struct mmc_command sbc
;
770 struct mmc_command cmd
;
771 struct mmc_command stop
;
772 struct mmc_command status
;
773 struct mmc_data data
;
777 * Tests nonblock transfer with certain parameters
779 static void mmc_test_req_reset(struct mmc_test_req
*rq
)
781 memset(rq
, 0, sizeof(struct mmc_test_req
));
783 rq
->mrq
.cmd
= &rq
->cmd
;
784 rq
->mrq
.data
= &rq
->data
;
785 rq
->mrq
.stop
= &rq
->stop
;
788 static struct mmc_test_req
*mmc_test_req_alloc(void)
790 struct mmc_test_req
*rq
= kmalloc(sizeof(*rq
), GFP_KERNEL
);
793 mmc_test_req_reset(rq
);
798 static void mmc_test_wait_done(struct mmc_request
*mrq
)
800 complete(&mrq
->completion
);
803 static int mmc_test_start_areq(struct mmc_test_card
*test
,
804 struct mmc_request
*mrq
,
805 struct mmc_request
*prev_mrq
)
807 struct mmc_host
*host
= test
->card
->host
;
811 init_completion(&mrq
->completion
);
812 mrq
->done
= mmc_test_wait_done
;
813 mmc_pre_req(host
, mrq
);
817 wait_for_completion(&prev_mrq
->completion
);
818 err
= mmc_test_wait_busy(test
);
820 err
= mmc_test_check_result(test
, prev_mrq
);
824 err
= mmc_start_request(host
, mrq
);
826 mmc_retune_release(host
);
830 mmc_post_req(host
, prev_mrq
, 0);
833 mmc_post_req(host
, mrq
, err
);
838 static int mmc_test_nonblock_transfer(struct mmc_test_card
*test
,
839 struct scatterlist
*sg
, unsigned sg_len
,
840 unsigned dev_addr
, unsigned blocks
,
841 unsigned blksz
, int write
, int count
)
843 struct mmc_test_req
*rq1
, *rq2
;
844 struct mmc_request
*mrq
, *prev_mrq
;
848 rq1
= mmc_test_req_alloc();
849 rq2
= mmc_test_req_alloc();
858 for (i
= 0; i
< count
; i
++) {
859 mmc_test_req_reset(container_of(mrq
, struct mmc_test_req
, mrq
));
860 mmc_test_prepare_mrq(test
, mrq
, sg
, sg_len
, dev_addr
, blocks
,
862 ret
= mmc_test_start_areq(test
, mrq
, prev_mrq
);
867 prev_mrq
= &rq2
->mrq
;
873 ret
= mmc_test_start_areq(test
, NULL
, prev_mrq
);
881 * Tests a basic transfer with certain parameters
883 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
884 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
885 unsigned blocks
, unsigned blksz
, int write
)
887 struct mmc_request mrq
= {};
888 struct mmc_command cmd
= {};
889 struct mmc_command stop
= {};
890 struct mmc_data data
= {};
896 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
897 blocks
, blksz
, write
);
899 mmc_wait_for_req(test
->card
->host
, &mrq
);
901 mmc_test_wait_busy(test
);
903 return mmc_test_check_result(test
, &mrq
);
907 * Tests a transfer where the card will fail completely or partly
909 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
910 unsigned blocks
, unsigned blksz
, int write
)
912 struct mmc_request mrq
= {};
913 struct mmc_command cmd
= {};
914 struct mmc_command stop
= {};
915 struct mmc_data data
= {};
917 struct scatterlist sg
;
923 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
925 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
926 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
928 mmc_wait_for_req(test
->card
->host
, &mrq
);
930 mmc_test_wait_busy(test
);
932 return mmc_test_check_broken_result(test
, &mrq
);
936 * Does a complete transfer test where data is also validated
938 * Note: mmc_test_prepare() must have been done before this call
940 static int mmc_test_transfer(struct mmc_test_card
*test
,
941 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
942 unsigned blocks
, unsigned blksz
, int write
)
948 for (i
= 0; i
< blocks
* blksz
; i
++)
949 test
->scratch
[i
] = i
;
951 memset(test
->scratch
, 0, BUFFER_SIZE
);
953 local_irq_save(flags
);
954 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
955 local_irq_restore(flags
);
957 ret
= mmc_test_set_blksize(test
, blksz
);
961 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
962 blocks
, blksz
, write
);
969 ret
= mmc_test_set_blksize(test
, 512);
973 sectors
= (blocks
* blksz
+ 511) / 512;
974 if ((sectors
* 512) == (blocks
* blksz
))
977 if ((sectors
* 512) > BUFFER_SIZE
)
980 memset(test
->buffer
, 0, sectors
* 512);
982 for (i
= 0; i
< sectors
; i
++) {
983 ret
= mmc_test_buffer_transfer(test
,
984 test
->buffer
+ i
* 512,
985 dev_addr
+ i
, 512, 0);
990 for (i
= 0; i
< blocks
* blksz
; i
++) {
991 if (test
->buffer
[i
] != (u8
)i
)
995 for (; i
< sectors
* 512; i
++) {
996 if (test
->buffer
[i
] != 0xDF)
1000 local_irq_save(flags
);
1001 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
1002 local_irq_restore(flags
);
1003 for (i
= 0; i
< blocks
* blksz
; i
++) {
1004 if (test
->scratch
[i
] != (u8
)i
)
1012 /*******************************************************************/
1014 /*******************************************************************/
1016 struct mmc_test_case
{
1019 int (*prepare
)(struct mmc_test_card
*);
1020 int (*run
)(struct mmc_test_card
*);
1021 int (*cleanup
)(struct mmc_test_card
*);
1024 static int mmc_test_basic_write(struct mmc_test_card
*test
)
1027 struct scatterlist sg
;
1029 ret
= mmc_test_set_blksize(test
, 512);
1033 sg_init_one(&sg
, test
->buffer
, 512);
1035 return mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1038 static int mmc_test_basic_read(struct mmc_test_card
*test
)
1041 struct scatterlist sg
;
1043 ret
= mmc_test_set_blksize(test
, 512);
1047 sg_init_one(&sg
, test
->buffer
, 512);
1049 return mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1052 static int mmc_test_verify_write(struct mmc_test_card
*test
)
1054 struct scatterlist sg
;
1056 sg_init_one(&sg
, test
->buffer
, 512);
1058 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1061 static int mmc_test_verify_read(struct mmc_test_card
*test
)
1063 struct scatterlist sg
;
1065 sg_init_one(&sg
, test
->buffer
, 512);
1067 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1070 static int mmc_test_multi_write(struct mmc_test_card
*test
)
1073 struct scatterlist sg
;
1075 if (test
->card
->host
->max_blk_count
== 1)
1076 return RESULT_UNSUP_HOST
;
1078 size
= PAGE_SIZE
* 2;
1079 size
= min(size
, test
->card
->host
->max_req_size
);
1080 size
= min(size
, test
->card
->host
->max_seg_size
);
1081 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1084 return RESULT_UNSUP_HOST
;
1086 sg_init_one(&sg
, test
->buffer
, size
);
1088 return mmc_test_transfer(test
, &sg
, 1, 0, size
/ 512, 512, 1);
1091 static int mmc_test_multi_read(struct mmc_test_card
*test
)
1094 struct scatterlist sg
;
1096 if (test
->card
->host
->max_blk_count
== 1)
1097 return RESULT_UNSUP_HOST
;
1099 size
= PAGE_SIZE
* 2;
1100 size
= min(size
, test
->card
->host
->max_req_size
);
1101 size
= min(size
, test
->card
->host
->max_seg_size
);
1102 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1105 return RESULT_UNSUP_HOST
;
1107 sg_init_one(&sg
, test
->buffer
, size
);
1109 return mmc_test_transfer(test
, &sg
, 1, 0, size
/ 512, 512, 0);
1112 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
1115 struct scatterlist sg
;
1117 if (!test
->card
->csd
.write_partial
)
1118 return RESULT_UNSUP_CARD
;
1120 for (i
= 1; i
< 512; i
<<= 1) {
1121 sg_init_one(&sg
, test
->buffer
, i
);
1122 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1130 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
1133 struct scatterlist sg
;
1135 if (!test
->card
->csd
.read_partial
)
1136 return RESULT_UNSUP_CARD
;
1138 for (i
= 1; i
< 512; i
<<= 1) {
1139 sg_init_one(&sg
, test
->buffer
, i
);
1140 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1148 static int mmc_test_weird_write(struct mmc_test_card
*test
)
1151 struct scatterlist sg
;
1153 if (!test
->card
->csd
.write_partial
)
1154 return RESULT_UNSUP_CARD
;
1156 for (i
= 3; i
< 512; i
+= 7) {
1157 sg_init_one(&sg
, test
->buffer
, i
);
1158 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1166 static int mmc_test_weird_read(struct mmc_test_card
*test
)
1169 struct scatterlist sg
;
1171 if (!test
->card
->csd
.read_partial
)
1172 return RESULT_UNSUP_CARD
;
1174 for (i
= 3; i
< 512; i
+= 7) {
1175 sg_init_one(&sg
, test
->buffer
, i
);
1176 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1184 static int mmc_test_align_write(struct mmc_test_card
*test
)
1187 struct scatterlist sg
;
1189 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1190 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1191 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1199 static int mmc_test_align_read(struct mmc_test_card
*test
)
1202 struct scatterlist sg
;
1204 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1205 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1206 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1214 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
1218 struct scatterlist sg
;
1220 if (test
->card
->host
->max_blk_count
== 1)
1221 return RESULT_UNSUP_HOST
;
1223 size
= PAGE_SIZE
* 2;
1224 size
= min(size
, test
->card
->host
->max_req_size
);
1225 size
= min(size
, test
->card
->host
->max_seg_size
);
1226 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1229 return RESULT_UNSUP_HOST
;
1231 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1232 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1233 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/ 512, 512, 1);
1241 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1245 struct scatterlist sg
;
1247 if (test
->card
->host
->max_blk_count
== 1)
1248 return RESULT_UNSUP_HOST
;
1250 size
= PAGE_SIZE
* 2;
1251 size
= min(size
, test
->card
->host
->max_req_size
);
1252 size
= min(size
, test
->card
->host
->max_seg_size
);
1253 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1256 return RESULT_UNSUP_HOST
;
1258 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1259 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1260 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/ 512, 512, 0);
1268 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1272 ret
= mmc_test_set_blksize(test
, 512);
1276 return mmc_test_broken_transfer(test
, 1, 512, 1);
1279 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1283 ret
= mmc_test_set_blksize(test
, 512);
1287 return mmc_test_broken_transfer(test
, 1, 512, 0);
1290 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1294 if (test
->card
->host
->max_blk_count
== 1)
1295 return RESULT_UNSUP_HOST
;
1297 ret
= mmc_test_set_blksize(test
, 512);
1301 return mmc_test_broken_transfer(test
, 2, 512, 1);
1304 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1308 if (test
->card
->host
->max_blk_count
== 1)
1309 return RESULT_UNSUP_HOST
;
1311 ret
= mmc_test_set_blksize(test
, 512);
1315 return mmc_test_broken_transfer(test
, 2, 512, 0);
1318 #ifdef CONFIG_HIGHMEM
1320 static int mmc_test_write_high(struct mmc_test_card
*test
)
1322 struct scatterlist sg
;
1324 sg_init_table(&sg
, 1);
1325 sg_set_page(&sg
, test
->highmem
, 512, 0);
1327 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1330 static int mmc_test_read_high(struct mmc_test_card
*test
)
1332 struct scatterlist sg
;
1334 sg_init_table(&sg
, 1);
1335 sg_set_page(&sg
, test
->highmem
, 512, 0);
1337 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1340 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1343 struct scatterlist sg
;
1345 if (test
->card
->host
->max_blk_count
== 1)
1346 return RESULT_UNSUP_HOST
;
1348 size
= PAGE_SIZE
* 2;
1349 size
= min(size
, test
->card
->host
->max_req_size
);
1350 size
= min(size
, test
->card
->host
->max_seg_size
);
1351 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1354 return RESULT_UNSUP_HOST
;
1356 sg_init_table(&sg
, 1);
1357 sg_set_page(&sg
, test
->highmem
, size
, 0);
1359 return mmc_test_transfer(test
, &sg
, 1, 0, size
/ 512, 512, 1);
1362 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1365 struct scatterlist sg
;
1367 if (test
->card
->host
->max_blk_count
== 1)
1368 return RESULT_UNSUP_HOST
;
1370 size
= PAGE_SIZE
* 2;
1371 size
= min(size
, test
->card
->host
->max_req_size
);
1372 size
= min(size
, test
->card
->host
->max_seg_size
);
1373 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1376 return RESULT_UNSUP_HOST
;
1378 sg_init_table(&sg
, 1);
1379 sg_set_page(&sg
, test
->highmem
, size
, 0);
1381 return mmc_test_transfer(test
, &sg
, 1, 0, size
/ 512, 512, 0);
1386 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1388 pr_info("%s: Highmem not configured - test skipped\n",
1389 mmc_hostname(test
->card
->host
));
1393 #endif /* CONFIG_HIGHMEM */
1396 * Map sz bytes so that it can be transferred.
1398 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1399 int max_scatter
, int min_sg_len
)
1401 struct mmc_test_area
*t
= &test
->area
;
1404 t
->blocks
= sz
>> 9;
1407 err
= mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1408 t
->max_segs
, t
->max_seg_sz
,
1411 err
= mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1412 t
->max_seg_sz
, &t
->sg_len
, min_sg_len
);
1415 pr_info("%s: Failed to map sg list\n",
1416 mmc_hostname(test
->card
->host
));
1421 * Transfer bytes mapped by mmc_test_area_map().
1423 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1424 unsigned int dev_addr
, int write
)
1426 struct mmc_test_area
*t
= &test
->area
;
1428 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1429 t
->blocks
, 512, write
);
1433 * Map and transfer bytes for multiple transfers.
1435 static int mmc_test_area_io_seq(struct mmc_test_card
*test
, unsigned long sz
,
1436 unsigned int dev_addr
, int write
,
1437 int max_scatter
, int timed
, int count
,
1438 bool nonblock
, int min_sg_len
)
1440 struct timespec64 ts1
, ts2
;
1443 struct mmc_test_area
*t
= &test
->area
;
1446 * In the case of a maximally scattered transfer, the maximum transfer
1447 * size is further limited by using PAGE_SIZE segments.
1450 struct mmc_test_area
*t
= &test
->area
;
1451 unsigned long max_tfr
;
1453 if (t
->max_seg_sz
>= PAGE_SIZE
)
1454 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1456 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1461 ret
= mmc_test_area_map(test
, sz
, max_scatter
, min_sg_len
);
1466 ktime_get_ts64(&ts1
);
1468 ret
= mmc_test_nonblock_transfer(test
, t
->sg
, t
->sg_len
,
1469 dev_addr
, t
->blocks
, 512, write
, count
);
1471 for (i
= 0; i
< count
&& ret
== 0; i
++) {
1472 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1473 dev_addr
+= sz
>> 9;
1480 ktime_get_ts64(&ts2
);
1483 mmc_test_print_avg_rate(test
, sz
, count
, &ts1
, &ts2
);
1488 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1489 unsigned int dev_addr
, int write
, int max_scatter
,
1492 return mmc_test_area_io_seq(test
, sz
, dev_addr
, write
, max_scatter
,
1493 timed
, 1, false, 0);
1497 * Write the test area entirely.
1499 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1501 struct mmc_test_area
*t
= &test
->area
;
1503 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, 1, 0, 0);
1507 * Erase the test area entirely.
1509 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1511 struct mmc_test_area
*t
= &test
->area
;
1513 if (!mmc_can_erase(test
->card
))
1516 return mmc_erase(test
->card
, t
->dev_addr
, t
->max_sz
>> 9,
1521 * Cleanup struct mmc_test_area.
1523 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1525 struct mmc_test_area
*t
= &test
->area
;
1528 mmc_test_free_mem(t
->mem
);
1534 * Initialize an area for testing large transfers. The test area is set to the
1535 * middle of the card because cards may have different characteristics at the
1536 * front (for FAT file system optimization). Optionally, the area is erased
1537 * (if the card supports it) which may improve write performance. Optionally,
1538 * the area is filled with data for subsequent read tests.
1540 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1542 struct mmc_test_area
*t
= &test
->area
;
1543 unsigned long min_sz
= 64 * 1024, sz
;
1546 ret
= mmc_test_set_blksize(test
, 512);
1550 /* Make the test area size about 4MiB */
1551 sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1553 while (t
->max_sz
< 4 * 1024 * 1024)
1555 while (t
->max_sz
> TEST_AREA_MAX_SIZE
&& t
->max_sz
> sz
)
1558 t
->max_segs
= test
->card
->host
->max_segs
;
1559 t
->max_seg_sz
= test
->card
->host
->max_seg_size
;
1560 t
->max_seg_sz
-= t
->max_seg_sz
% 512;
1562 t
->max_tfr
= t
->max_sz
;
1563 if (t
->max_tfr
>> 9 > test
->card
->host
->max_blk_count
)
1564 t
->max_tfr
= test
->card
->host
->max_blk_count
<< 9;
1565 if (t
->max_tfr
> test
->card
->host
->max_req_size
)
1566 t
->max_tfr
= test
->card
->host
->max_req_size
;
1567 if (t
->max_tfr
/ t
->max_seg_sz
> t
->max_segs
)
1568 t
->max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1571 * Try to allocate enough memory for a max. sized transfer. Less is OK
1572 * because the same memory can be mapped into the scatterlist more than
1573 * once. Also, take into account the limits imposed on scatterlist
1574 * segments by the host driver.
1576 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_tfr
, t
->max_segs
,
1581 t
->sg
= kmalloc_array(t
->max_segs
, sizeof(*t
->sg
), GFP_KERNEL
);
1587 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1588 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1591 ret
= mmc_test_area_erase(test
);
1597 ret
= mmc_test_area_fill(test
);
1605 mmc_test_area_cleanup(test
);
1610 * Prepare for large transfers. Do not erase the test area.
1612 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1614 return mmc_test_area_init(test
, 0, 0);
1618 * Prepare for large transfers. Do erase the test area.
1620 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1622 return mmc_test_area_init(test
, 1, 0);
1626 * Prepare for large transfers. Erase and fill the test area.
1628 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1630 return mmc_test_area_init(test
, 1, 1);
1634 * Test best-case performance. Best-case performance is expected from
1635 * a single large transfer.
1637 * An additional option (max_scatter) allows the measurement of the same
1638 * transfer but with no contiguous pages in the scatter list. This tests
1639 * the efficiency of DMA to handle scattered pages.
1641 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1644 struct mmc_test_area
*t
= &test
->area
;
1646 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, write
,
1651 * Best-case read performance.
1653 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1655 return mmc_test_best_performance(test
, 0, 0);
1659 * Best-case write performance.
1661 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1663 return mmc_test_best_performance(test
, 1, 0);
1667 * Best-case read performance into scattered pages.
1669 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1671 return mmc_test_best_performance(test
, 0, 1);
1675 * Best-case write performance from scattered pages.
1677 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1679 return mmc_test_best_performance(test
, 1, 1);
1683 * Single read performance by transfer size.
1685 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1687 struct mmc_test_area
*t
= &test
->area
;
1689 unsigned int dev_addr
;
1692 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1693 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1694 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1699 dev_addr
= t
->dev_addr
;
1700 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1704 * Single write performance by transfer size.
1706 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1708 struct mmc_test_area
*t
= &test
->area
;
1710 unsigned int dev_addr
;
1713 ret
= mmc_test_area_erase(test
);
1716 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1717 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1718 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1722 ret
= mmc_test_area_erase(test
);
1726 dev_addr
= t
->dev_addr
;
1727 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1731 * Single trim performance by transfer size.
1733 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1735 struct mmc_test_area
*t
= &test
->area
;
1737 unsigned int dev_addr
;
1738 struct timespec64 ts1
, ts2
;
1741 if (!mmc_can_trim(test
->card
))
1742 return RESULT_UNSUP_CARD
;
1744 if (!mmc_can_erase(test
->card
))
1745 return RESULT_UNSUP_HOST
;
1747 for (sz
= 512; sz
< t
->max_sz
; sz
<<= 1) {
1748 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1749 ktime_get_ts64(&ts1
);
1750 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1753 ktime_get_ts64(&ts2
);
1754 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1756 dev_addr
= t
->dev_addr
;
1757 ktime_get_ts64(&ts1
);
1758 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1761 ktime_get_ts64(&ts2
);
1762 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1766 static int mmc_test_seq_read_perf(struct mmc_test_card
*test
, unsigned long sz
)
1768 struct mmc_test_area
*t
= &test
->area
;
1769 unsigned int dev_addr
, i
, cnt
;
1770 struct timespec64 ts1
, ts2
;
1773 cnt
= t
->max_sz
/ sz
;
1774 dev_addr
= t
->dev_addr
;
1775 ktime_get_ts64(&ts1
);
1776 for (i
= 0; i
< cnt
; i
++) {
1777 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1780 dev_addr
+= (sz
>> 9);
1782 ktime_get_ts64(&ts2
);
1783 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1788 * Consecutive read performance by transfer size.
1790 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1792 struct mmc_test_area
*t
= &test
->area
;
1796 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1797 ret
= mmc_test_seq_read_perf(test
, sz
);
1802 return mmc_test_seq_read_perf(test
, sz
);
1805 static int mmc_test_seq_write_perf(struct mmc_test_card
*test
, unsigned long sz
)
1807 struct mmc_test_area
*t
= &test
->area
;
1808 unsigned int dev_addr
, i
, cnt
;
1809 struct timespec64 ts1
, ts2
;
1812 ret
= mmc_test_area_erase(test
);
1815 cnt
= t
->max_sz
/ sz
;
1816 dev_addr
= t
->dev_addr
;
1817 ktime_get_ts64(&ts1
);
1818 for (i
= 0; i
< cnt
; i
++) {
1819 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1822 dev_addr
+= (sz
>> 9);
1824 ktime_get_ts64(&ts2
);
1825 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1830 * Consecutive write performance by transfer size.
1832 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1834 struct mmc_test_area
*t
= &test
->area
;
1838 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1839 ret
= mmc_test_seq_write_perf(test
, sz
);
1844 return mmc_test_seq_write_perf(test
, sz
);
1848 * Consecutive trim performance by transfer size.
1850 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1852 struct mmc_test_area
*t
= &test
->area
;
1854 unsigned int dev_addr
, i
, cnt
;
1855 struct timespec64 ts1
, ts2
;
1858 if (!mmc_can_trim(test
->card
))
1859 return RESULT_UNSUP_CARD
;
1861 if (!mmc_can_erase(test
->card
))
1862 return RESULT_UNSUP_HOST
;
1864 for (sz
= 512; sz
<= t
->max_sz
; sz
<<= 1) {
1865 ret
= mmc_test_area_erase(test
);
1868 ret
= mmc_test_area_fill(test
);
1871 cnt
= t
->max_sz
/ sz
;
1872 dev_addr
= t
->dev_addr
;
1873 ktime_get_ts64(&ts1
);
1874 for (i
= 0; i
< cnt
; i
++) {
1875 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1879 dev_addr
+= (sz
>> 9);
1881 ktime_get_ts64(&ts2
);
1882 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1887 static unsigned int rnd_next
= 1;
1889 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt
)
1893 rnd_next
= rnd_next
* 1103515245 + 12345;
1894 r
= (rnd_next
>> 16) & 0x7fff;
1895 return (r
* rnd_cnt
) >> 15;
1898 static int mmc_test_rnd_perf(struct mmc_test_card
*test
, int write
, int print
,
1901 unsigned int dev_addr
, cnt
, rnd_addr
, range1
, range2
, last_ea
= 0, ea
;
1903 struct timespec64 ts1
, ts2
, ts
;
1908 rnd_addr
= mmc_test_capacity(test
->card
) / 4;
1909 range1
= rnd_addr
/ test
->card
->pref_erase
;
1910 range2
= range1
/ ssz
;
1912 ktime_get_ts64(&ts1
);
1913 for (cnt
= 0; cnt
< UINT_MAX
; cnt
++) {
1914 ktime_get_ts64(&ts2
);
1915 ts
= timespec64_sub(ts2
, ts1
);
1916 if (ts
.tv_sec
>= 10)
1918 ea
= mmc_test_rnd_num(range1
);
1922 dev_addr
= rnd_addr
+ test
->card
->pref_erase
* ea
+
1923 ssz
* mmc_test_rnd_num(range2
);
1924 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
, 0, 0);
1929 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1933 static int mmc_test_random_perf(struct mmc_test_card
*test
, int write
)
1935 struct mmc_test_area
*t
= &test
->area
;
1940 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1942 * When writing, try to get more consistent results by running
1943 * the test twice with exactly the same I/O but outputting the
1944 * results only for the 2nd run.
1948 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1953 ret
= mmc_test_rnd_perf(test
, write
, 1, sz
);
1960 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1965 return mmc_test_rnd_perf(test
, write
, 1, sz
);
1969 * Random read performance by transfer size.
1971 static int mmc_test_random_read_perf(struct mmc_test_card
*test
)
1973 return mmc_test_random_perf(test
, 0);
1977 * Random write performance by transfer size.
1979 static int mmc_test_random_write_perf(struct mmc_test_card
*test
)
1981 return mmc_test_random_perf(test
, 1);
1984 static int mmc_test_seq_perf(struct mmc_test_card
*test
, int write
,
1985 unsigned int tot_sz
, int max_scatter
)
1987 struct mmc_test_area
*t
= &test
->area
;
1988 unsigned int dev_addr
, i
, cnt
, sz
, ssz
;
1989 struct timespec64 ts1
, ts2
;
1995 * In the case of a maximally scattered transfer, the maximum transfer
1996 * size is further limited by using PAGE_SIZE segments.
1999 unsigned long max_tfr
;
2001 if (t
->max_seg_sz
>= PAGE_SIZE
)
2002 max_tfr
= t
->max_segs
* PAGE_SIZE
;
2004 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
2010 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2011 if (tot_sz
> dev_addr
<< 9)
2012 tot_sz
= dev_addr
<< 9;
2014 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2016 ktime_get_ts64(&ts1
);
2017 for (i
= 0; i
< cnt
; i
++) {
2018 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
,
2024 ktime_get_ts64(&ts2
);
2026 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
2031 static int mmc_test_large_seq_perf(struct mmc_test_card
*test
, int write
)
2035 for (i
= 0; i
< 10; i
++) {
2036 ret
= mmc_test_seq_perf(test
, write
, 10 * 1024 * 1024, 1);
2040 for (i
= 0; i
< 5; i
++) {
2041 ret
= mmc_test_seq_perf(test
, write
, 100 * 1024 * 1024, 1);
2045 for (i
= 0; i
< 3; i
++) {
2046 ret
= mmc_test_seq_perf(test
, write
, 1000 * 1024 * 1024, 1);
2055 * Large sequential read performance.
2057 static int mmc_test_large_seq_read_perf(struct mmc_test_card
*test
)
2059 return mmc_test_large_seq_perf(test
, 0);
2063 * Large sequential write performance.
2065 static int mmc_test_large_seq_write_perf(struct mmc_test_card
*test
)
2067 return mmc_test_large_seq_perf(test
, 1);
2070 static int mmc_test_rw_multiple(struct mmc_test_card
*test
,
2071 struct mmc_test_multiple_rw
*tdata
,
2072 unsigned int reqsize
, unsigned int size
,
2075 unsigned int dev_addr
;
2076 struct mmc_test_area
*t
= &test
->area
;
2079 /* Set up test area */
2080 if (size
> mmc_test_capacity(test
->card
) / 2 * 512)
2081 size
= mmc_test_capacity(test
->card
) / 2 * 512;
2082 if (reqsize
> t
->max_tfr
)
2083 reqsize
= t
->max_tfr
;
2084 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2085 if ((dev_addr
& 0xffff0000))
2086 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2088 dev_addr
&= 0xfffff800; /* Round to 1MiB boundary */
2095 /* prepare test area */
2096 if (mmc_can_erase(test
->card
) &&
2097 tdata
->prepare
& MMC_TEST_PREP_ERASE
) {
2098 ret
= mmc_erase(test
->card
, dev_addr
,
2099 size
/ 512, MMC_SECURE_ERASE_ARG
);
2101 ret
= mmc_erase(test
->card
, dev_addr
,
2102 size
/ 512, MMC_ERASE_ARG
);
2108 ret
= mmc_test_area_io_seq(test
, reqsize
, dev_addr
,
2109 tdata
->do_write
, 0, 1, size
/ reqsize
,
2110 tdata
->do_nonblock_req
, min_sg_len
);
2116 pr_info("[%s] error\n", __func__
);
2120 static int mmc_test_rw_multiple_size(struct mmc_test_card
*test
,
2121 struct mmc_test_multiple_rw
*rw
)
2125 void *pre_req
= test
->card
->host
->ops
->pre_req
;
2126 void *post_req
= test
->card
->host
->ops
->post_req
;
2128 if (rw
->do_nonblock_req
&&
2129 ((!pre_req
&& post_req
) || (pre_req
&& !post_req
))) {
2130 pr_info("error: only one of pre/post is defined\n");
2134 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2135 ret
= mmc_test_rw_multiple(test
, rw
, rw
->bs
[i
], rw
->size
, 0);
2142 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card
*test
,
2143 struct mmc_test_multiple_rw
*rw
)
2148 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2149 ret
= mmc_test_rw_multiple(test
, rw
, 512 * 1024, rw
->size
,
2158 * Multiple blocking write 4k to 4 MB chunks
2160 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card
*test
)
2162 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2163 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2164 struct mmc_test_multiple_rw test_data
= {
2166 .size
= TEST_AREA_MAX_SIZE
,
2167 .len
= ARRAY_SIZE(bs
),
2169 .do_nonblock_req
= false,
2170 .prepare
= MMC_TEST_PREP_ERASE
,
2173 return mmc_test_rw_multiple_size(test
, &test_data
);
2177 * Multiple non-blocking write 4k to 4 MB chunks
2179 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card
*test
)
2181 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2182 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2183 struct mmc_test_multiple_rw test_data
= {
2185 .size
= TEST_AREA_MAX_SIZE
,
2186 .len
= ARRAY_SIZE(bs
),
2188 .do_nonblock_req
= true,
2189 .prepare
= MMC_TEST_PREP_ERASE
,
2192 return mmc_test_rw_multiple_size(test
, &test_data
);
2196 * Multiple blocking read 4k to 4 MB chunks
2198 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card
*test
)
2200 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2201 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2202 struct mmc_test_multiple_rw test_data
= {
2204 .size
= TEST_AREA_MAX_SIZE
,
2205 .len
= ARRAY_SIZE(bs
),
2207 .do_nonblock_req
= false,
2208 .prepare
= MMC_TEST_PREP_NONE
,
2211 return mmc_test_rw_multiple_size(test
, &test_data
);
2215 * Multiple non-blocking read 4k to 4 MB chunks
2217 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card
*test
)
2219 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2220 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2221 struct mmc_test_multiple_rw test_data
= {
2223 .size
= TEST_AREA_MAX_SIZE
,
2224 .len
= ARRAY_SIZE(bs
),
2226 .do_nonblock_req
= true,
2227 .prepare
= MMC_TEST_PREP_NONE
,
2230 return mmc_test_rw_multiple_size(test
, &test_data
);
2234 * Multiple blocking write 1 to 512 sg elements
2236 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card
*test
)
2238 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2239 1 << 7, 1 << 8, 1 << 9};
2240 struct mmc_test_multiple_rw test_data
= {
2242 .size
= TEST_AREA_MAX_SIZE
,
2243 .len
= ARRAY_SIZE(sg_len
),
2245 .do_nonblock_req
= false,
2246 .prepare
= MMC_TEST_PREP_ERASE
,
2249 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2253 * Multiple non-blocking write 1 to 512 sg elements
2255 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card
*test
)
2257 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2258 1 << 7, 1 << 8, 1 << 9};
2259 struct mmc_test_multiple_rw test_data
= {
2261 .size
= TEST_AREA_MAX_SIZE
,
2262 .len
= ARRAY_SIZE(sg_len
),
2264 .do_nonblock_req
= true,
2265 .prepare
= MMC_TEST_PREP_ERASE
,
2268 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2272 * Multiple blocking read 1 to 512 sg elements
2274 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card
*test
)
2276 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2277 1 << 7, 1 << 8, 1 << 9};
2278 struct mmc_test_multiple_rw test_data
= {
2280 .size
= TEST_AREA_MAX_SIZE
,
2281 .len
= ARRAY_SIZE(sg_len
),
2283 .do_nonblock_req
= false,
2284 .prepare
= MMC_TEST_PREP_NONE
,
2287 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2291 * Multiple non-blocking read 1 to 512 sg elements
2293 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card
*test
)
2295 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2296 1 << 7, 1 << 8, 1 << 9};
2297 struct mmc_test_multiple_rw test_data
= {
2299 .size
= TEST_AREA_MAX_SIZE
,
2300 .len
= ARRAY_SIZE(sg_len
),
2302 .do_nonblock_req
= true,
2303 .prepare
= MMC_TEST_PREP_NONE
,
2306 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2310 * eMMC hardware reset.
2312 static int mmc_test_reset(struct mmc_test_card
*test
)
2314 struct mmc_card
*card
= test
->card
;
2315 struct mmc_host
*host
= card
->host
;
2318 err
= mmc_hw_reset(host
);
2321 * Reset will re-enable the card's command queue, but tests
2322 * expect it to be disabled.
2324 if (card
->ext_csd
.cmdq_en
)
2325 mmc_cmdq_disable(card
);
2327 } else if (err
== -EOPNOTSUPP
) {
2328 return RESULT_UNSUP_HOST
;
2334 static int mmc_test_send_status(struct mmc_test_card
*test
,
2335 struct mmc_command
*cmd
)
2337 memset(cmd
, 0, sizeof(*cmd
));
2339 cmd
->opcode
= MMC_SEND_STATUS
;
2340 if (!mmc_host_is_spi(test
->card
->host
))
2341 cmd
->arg
= test
->card
->rca
<< 16;
2342 cmd
->flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
2344 return mmc_wait_for_cmd(test
->card
->host
, cmd
, 0);
2347 static int mmc_test_ongoing_transfer(struct mmc_test_card
*test
,
2348 unsigned int dev_addr
, int use_sbc
,
2349 int repeat_cmd
, int write
, int use_areq
)
2351 struct mmc_test_req
*rq
= mmc_test_req_alloc();
2352 struct mmc_host
*host
= test
->card
->host
;
2353 struct mmc_test_area
*t
= &test
->area
;
2354 struct mmc_request
*mrq
;
2355 unsigned long timeout
;
2356 bool expired
= false;
2357 int ret
= 0, cmd_ret
;
2366 mrq
->sbc
= &rq
->sbc
;
2367 mrq
->cap_cmd_during_tfr
= true;
2369 mmc_test_prepare_mrq(test
, mrq
, t
->sg
, t
->sg_len
, dev_addr
, t
->blocks
,
2372 if (use_sbc
&& t
->blocks
> 1 && !mrq
->sbc
) {
2373 ret
= mmc_host_cmd23(host
) ?
2379 /* Start ongoing data request */
2381 ret
= mmc_test_start_areq(test
, mrq
, NULL
);
2385 mmc_wait_for_req(host
, mrq
);
2388 timeout
= jiffies
+ msecs_to_jiffies(3000);
2392 /* Send status command while data transfer in progress */
2393 cmd_ret
= mmc_test_send_status(test
, &rq
->status
);
2397 status
= rq
->status
.resp
[0];
2398 if (status
& R1_ERROR
) {
2403 if (mmc_is_req_done(host
, mrq
))
2406 expired
= time_after(jiffies
, timeout
);
2408 pr_info("%s: timeout waiting for Tran state status %#x\n",
2409 mmc_hostname(host
), status
);
2410 cmd_ret
= -ETIMEDOUT
;
2413 } while (repeat_cmd
&& R1_CURRENT_STATE(status
) != R1_STATE_TRAN
);
2415 /* Wait for data request to complete */
2417 ret
= mmc_test_start_areq(test
, NULL
, mrq
);
2419 mmc_wait_for_req_done(test
->card
->host
, mrq
);
2423 * For cap_cmd_during_tfr request, upper layer must send stop if
2426 if (mrq
->data
->stop
&& (mrq
->data
->error
|| !mrq
->sbc
)) {
2428 mmc_wait_for_cmd(host
, mrq
->data
->stop
, 0);
2430 ret
= mmc_wait_for_cmd(host
, mrq
->data
->stop
, 0);
2437 pr_info("%s: Send Status failed: status %#x, error %d\n",
2438 mmc_hostname(test
->card
->host
), status
, cmd_ret
);
2441 ret
= mmc_test_check_result(test
, mrq
);
2445 ret
= mmc_test_wait_busy(test
);
2449 if (repeat_cmd
&& (t
->blocks
+ 1) << 9 > t
->max_tfr
)
2450 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2451 mmc_hostname(test
->card
->host
), count
, t
->blocks
);
2461 static int __mmc_test_cmds_during_tfr(struct mmc_test_card
*test
,
2462 unsigned long sz
, int use_sbc
, int write
,
2465 struct mmc_test_area
*t
= &test
->area
;
2468 if (!(test
->card
->host
->caps
& MMC_CAP_CMD_DURING_TFR
))
2469 return RESULT_UNSUP_HOST
;
2471 ret
= mmc_test_area_map(test
, sz
, 0, 0);
2475 ret
= mmc_test_ongoing_transfer(test
, t
->dev_addr
, use_sbc
, 0, write
,
2480 return mmc_test_ongoing_transfer(test
, t
->dev_addr
, use_sbc
, 1, write
,
2484 static int mmc_test_cmds_during_tfr(struct mmc_test_card
*test
, int use_sbc
,
2485 int write
, int use_areq
)
2487 struct mmc_test_area
*t
= &test
->area
;
2491 for (sz
= 512; sz
<= t
->max_tfr
; sz
+= 512) {
2492 ret
= __mmc_test_cmds_during_tfr(test
, sz
, use_sbc
, write
,
2501 * Commands during read - no Set Block Count (CMD23).
2503 static int mmc_test_cmds_during_read(struct mmc_test_card
*test
)
2505 return mmc_test_cmds_during_tfr(test
, 0, 0, 0);
2509 * Commands during write - no Set Block Count (CMD23).
2511 static int mmc_test_cmds_during_write(struct mmc_test_card
*test
)
2513 return mmc_test_cmds_during_tfr(test
, 0, 1, 0);
2517 * Commands during read - use Set Block Count (CMD23).
2519 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card
*test
)
2521 return mmc_test_cmds_during_tfr(test
, 1, 0, 0);
2525 * Commands during write - use Set Block Count (CMD23).
2527 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card
*test
)
2529 return mmc_test_cmds_during_tfr(test
, 1, 1, 0);
2533 * Commands during non-blocking read - use Set Block Count (CMD23).
2535 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card
*test
)
2537 return mmc_test_cmds_during_tfr(test
, 1, 0, 1);
2541 * Commands during non-blocking write - use Set Block Count (CMD23).
2543 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card
*test
)
2545 return mmc_test_cmds_during_tfr(test
, 1, 1, 1);
2548 static const struct mmc_test_case mmc_test_cases
[] = {
2550 .name
= "Basic write (no data verification)",
2551 .run
= mmc_test_basic_write
,
2555 .name
= "Basic read (no data verification)",
2556 .run
= mmc_test_basic_read
,
2560 .name
= "Basic write (with data verification)",
2561 .prepare
= mmc_test_prepare_write
,
2562 .run
= mmc_test_verify_write
,
2563 .cleanup
= mmc_test_cleanup
,
2567 .name
= "Basic read (with data verification)",
2568 .prepare
= mmc_test_prepare_read
,
2569 .run
= mmc_test_verify_read
,
2570 .cleanup
= mmc_test_cleanup
,
2574 .name
= "Multi-block write",
2575 .prepare
= mmc_test_prepare_write
,
2576 .run
= mmc_test_multi_write
,
2577 .cleanup
= mmc_test_cleanup
,
2581 .name
= "Multi-block read",
2582 .prepare
= mmc_test_prepare_read
,
2583 .run
= mmc_test_multi_read
,
2584 .cleanup
= mmc_test_cleanup
,
2588 .name
= "Power of two block writes",
2589 .prepare
= mmc_test_prepare_write
,
2590 .run
= mmc_test_pow2_write
,
2591 .cleanup
= mmc_test_cleanup
,
2595 .name
= "Power of two block reads",
2596 .prepare
= mmc_test_prepare_read
,
2597 .run
= mmc_test_pow2_read
,
2598 .cleanup
= mmc_test_cleanup
,
2602 .name
= "Weird sized block writes",
2603 .prepare
= mmc_test_prepare_write
,
2604 .run
= mmc_test_weird_write
,
2605 .cleanup
= mmc_test_cleanup
,
2609 .name
= "Weird sized block reads",
2610 .prepare
= mmc_test_prepare_read
,
2611 .run
= mmc_test_weird_read
,
2612 .cleanup
= mmc_test_cleanup
,
2616 .name
= "Badly aligned write",
2617 .prepare
= mmc_test_prepare_write
,
2618 .run
= mmc_test_align_write
,
2619 .cleanup
= mmc_test_cleanup
,
2623 .name
= "Badly aligned read",
2624 .prepare
= mmc_test_prepare_read
,
2625 .run
= mmc_test_align_read
,
2626 .cleanup
= mmc_test_cleanup
,
2630 .name
= "Badly aligned multi-block write",
2631 .prepare
= mmc_test_prepare_write
,
2632 .run
= mmc_test_align_multi_write
,
2633 .cleanup
= mmc_test_cleanup
,
2637 .name
= "Badly aligned multi-block read",
2638 .prepare
= mmc_test_prepare_read
,
2639 .run
= mmc_test_align_multi_read
,
2640 .cleanup
= mmc_test_cleanup
,
2644 .name
= "Correct xfer_size at write (start failure)",
2645 .run
= mmc_test_xfersize_write
,
2649 .name
= "Correct xfer_size at read (start failure)",
2650 .run
= mmc_test_xfersize_read
,
2654 .name
= "Correct xfer_size at write (midway failure)",
2655 .run
= mmc_test_multi_xfersize_write
,
2659 .name
= "Correct xfer_size at read (midway failure)",
2660 .run
= mmc_test_multi_xfersize_read
,
2663 #ifdef CONFIG_HIGHMEM
2666 .name
= "Highmem write",
2667 .prepare
= mmc_test_prepare_write
,
2668 .run
= mmc_test_write_high
,
2669 .cleanup
= mmc_test_cleanup
,
2673 .name
= "Highmem read",
2674 .prepare
= mmc_test_prepare_read
,
2675 .run
= mmc_test_read_high
,
2676 .cleanup
= mmc_test_cleanup
,
2680 .name
= "Multi-block highmem write",
2681 .prepare
= mmc_test_prepare_write
,
2682 .run
= mmc_test_multi_write_high
,
2683 .cleanup
= mmc_test_cleanup
,
2687 .name
= "Multi-block highmem read",
2688 .prepare
= mmc_test_prepare_read
,
2689 .run
= mmc_test_multi_read_high
,
2690 .cleanup
= mmc_test_cleanup
,
2696 .name
= "Highmem write",
2697 .run
= mmc_test_no_highmem
,
2701 .name
= "Highmem read",
2702 .run
= mmc_test_no_highmem
,
2706 .name
= "Multi-block highmem write",
2707 .run
= mmc_test_no_highmem
,
2711 .name
= "Multi-block highmem read",
2712 .run
= mmc_test_no_highmem
,
2715 #endif /* CONFIG_HIGHMEM */
2718 .name
= "Best-case read performance",
2719 .prepare
= mmc_test_area_prepare_fill
,
2720 .run
= mmc_test_best_read_performance
,
2721 .cleanup
= mmc_test_area_cleanup
,
2725 .name
= "Best-case write performance",
2726 .prepare
= mmc_test_area_prepare_erase
,
2727 .run
= mmc_test_best_write_performance
,
2728 .cleanup
= mmc_test_area_cleanup
,
2732 .name
= "Best-case read performance into scattered pages",
2733 .prepare
= mmc_test_area_prepare_fill
,
2734 .run
= mmc_test_best_read_perf_max_scatter
,
2735 .cleanup
= mmc_test_area_cleanup
,
2739 .name
= "Best-case write performance from scattered pages",
2740 .prepare
= mmc_test_area_prepare_erase
,
2741 .run
= mmc_test_best_write_perf_max_scatter
,
2742 .cleanup
= mmc_test_area_cleanup
,
2746 .name
= "Single read performance by transfer size",
2747 .prepare
= mmc_test_area_prepare_fill
,
2748 .run
= mmc_test_profile_read_perf
,
2749 .cleanup
= mmc_test_area_cleanup
,
2753 .name
= "Single write performance by transfer size",
2754 .prepare
= mmc_test_area_prepare
,
2755 .run
= mmc_test_profile_write_perf
,
2756 .cleanup
= mmc_test_area_cleanup
,
2760 .name
= "Single trim performance by transfer size",
2761 .prepare
= mmc_test_area_prepare_fill
,
2762 .run
= mmc_test_profile_trim_perf
,
2763 .cleanup
= mmc_test_area_cleanup
,
2767 .name
= "Consecutive read performance by transfer size",
2768 .prepare
= mmc_test_area_prepare_fill
,
2769 .run
= mmc_test_profile_seq_read_perf
,
2770 .cleanup
= mmc_test_area_cleanup
,
2774 .name
= "Consecutive write performance by transfer size",
2775 .prepare
= mmc_test_area_prepare
,
2776 .run
= mmc_test_profile_seq_write_perf
,
2777 .cleanup
= mmc_test_area_cleanup
,
2781 .name
= "Consecutive trim performance by transfer size",
2782 .prepare
= mmc_test_area_prepare
,
2783 .run
= mmc_test_profile_seq_trim_perf
,
2784 .cleanup
= mmc_test_area_cleanup
,
2788 .name
= "Random read performance by transfer size",
2789 .prepare
= mmc_test_area_prepare
,
2790 .run
= mmc_test_random_read_perf
,
2791 .cleanup
= mmc_test_area_cleanup
,
2795 .name
= "Random write performance by transfer size",
2796 .prepare
= mmc_test_area_prepare
,
2797 .run
= mmc_test_random_write_perf
,
2798 .cleanup
= mmc_test_area_cleanup
,
2802 .name
= "Large sequential read into scattered pages",
2803 .prepare
= mmc_test_area_prepare
,
2804 .run
= mmc_test_large_seq_read_perf
,
2805 .cleanup
= mmc_test_area_cleanup
,
2809 .name
= "Large sequential write from scattered pages",
2810 .prepare
= mmc_test_area_prepare
,
2811 .run
= mmc_test_large_seq_write_perf
,
2812 .cleanup
= mmc_test_area_cleanup
,
2816 .name
= "Write performance with blocking req 4k to 4MB",
2817 .prepare
= mmc_test_area_prepare
,
2818 .run
= mmc_test_profile_mult_write_blocking_perf
,
2819 .cleanup
= mmc_test_area_cleanup
,
2823 .name
= "Write performance with non-blocking req 4k to 4MB",
2824 .prepare
= mmc_test_area_prepare
,
2825 .run
= mmc_test_profile_mult_write_nonblock_perf
,
2826 .cleanup
= mmc_test_area_cleanup
,
2830 .name
= "Read performance with blocking req 4k to 4MB",
2831 .prepare
= mmc_test_area_prepare
,
2832 .run
= mmc_test_profile_mult_read_blocking_perf
,
2833 .cleanup
= mmc_test_area_cleanup
,
2837 .name
= "Read performance with non-blocking req 4k to 4MB",
2838 .prepare
= mmc_test_area_prepare
,
2839 .run
= mmc_test_profile_mult_read_nonblock_perf
,
2840 .cleanup
= mmc_test_area_cleanup
,
2844 .name
= "Write performance blocking req 1 to 512 sg elems",
2845 .prepare
= mmc_test_area_prepare
,
2846 .run
= mmc_test_profile_sglen_wr_blocking_perf
,
2847 .cleanup
= mmc_test_area_cleanup
,
2851 .name
= "Write performance non-blocking req 1 to 512 sg elems",
2852 .prepare
= mmc_test_area_prepare
,
2853 .run
= mmc_test_profile_sglen_wr_nonblock_perf
,
2854 .cleanup
= mmc_test_area_cleanup
,
2858 .name
= "Read performance blocking req 1 to 512 sg elems",
2859 .prepare
= mmc_test_area_prepare
,
2860 .run
= mmc_test_profile_sglen_r_blocking_perf
,
2861 .cleanup
= mmc_test_area_cleanup
,
2865 .name
= "Read performance non-blocking req 1 to 512 sg elems",
2866 .prepare
= mmc_test_area_prepare
,
2867 .run
= mmc_test_profile_sglen_r_nonblock_perf
,
2868 .cleanup
= mmc_test_area_cleanup
,
2872 .name
= "Reset test",
2873 .run
= mmc_test_reset
,
2877 .name
= "Commands during read - no Set Block Count (CMD23)",
2878 .prepare
= mmc_test_area_prepare
,
2879 .run
= mmc_test_cmds_during_read
,
2880 .cleanup
= mmc_test_area_cleanup
,
2884 .name
= "Commands during write - no Set Block Count (CMD23)",
2885 .prepare
= mmc_test_area_prepare
,
2886 .run
= mmc_test_cmds_during_write
,
2887 .cleanup
= mmc_test_area_cleanup
,
2891 .name
= "Commands during read - use Set Block Count (CMD23)",
2892 .prepare
= mmc_test_area_prepare
,
2893 .run
= mmc_test_cmds_during_read_cmd23
,
2894 .cleanup
= mmc_test_area_cleanup
,
2898 .name
= "Commands during write - use Set Block Count (CMD23)",
2899 .prepare
= mmc_test_area_prepare
,
2900 .run
= mmc_test_cmds_during_write_cmd23
,
2901 .cleanup
= mmc_test_area_cleanup
,
2905 .name
= "Commands during non-blocking read - use Set Block Count (CMD23)",
2906 .prepare
= mmc_test_area_prepare
,
2907 .run
= mmc_test_cmds_during_read_cmd23_nonblock
,
2908 .cleanup
= mmc_test_area_cleanup
,
2912 .name
= "Commands during non-blocking write - use Set Block Count (CMD23)",
2913 .prepare
= mmc_test_area_prepare
,
2914 .run
= mmc_test_cmds_during_write_cmd23_nonblock
,
2915 .cleanup
= mmc_test_area_cleanup
,
2919 static DEFINE_MUTEX(mmc_test_lock
);
2921 static LIST_HEAD(mmc_test_result
);
2923 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
2927 pr_info("%s: Starting tests of card %s...\n",
2928 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
2930 mmc_claim_host(test
->card
->host
);
2932 for (i
= 0; i
< ARRAY_SIZE(mmc_test_cases
); i
++) {
2933 struct mmc_test_general_result
*gr
;
2935 if (testcase
&& ((i
+ 1) != testcase
))
2938 pr_info("%s: Test case %d. %s...\n",
2939 mmc_hostname(test
->card
->host
), i
+ 1,
2940 mmc_test_cases
[i
].name
);
2942 if (mmc_test_cases
[i
].prepare
) {
2943 ret
= mmc_test_cases
[i
].prepare(test
);
2945 pr_info("%s: Result: Prepare stage failed! (%d)\n",
2946 mmc_hostname(test
->card
->host
),
2952 gr
= kzalloc(sizeof(*gr
), GFP_KERNEL
);
2954 INIT_LIST_HEAD(&gr
->tr_lst
);
2956 /* Assign data what we know already */
2957 gr
->card
= test
->card
;
2960 /* Append container to global one */
2961 list_add_tail(&gr
->link
, &mmc_test_result
);
2964 * Save the pointer to created container in our private
2970 ret
= mmc_test_cases
[i
].run(test
);
2973 pr_info("%s: Result: OK\n",
2974 mmc_hostname(test
->card
->host
));
2977 pr_info("%s: Result: FAILED\n",
2978 mmc_hostname(test
->card
->host
));
2980 case RESULT_UNSUP_HOST
:
2981 pr_info("%s: Result: UNSUPPORTED (by host)\n",
2982 mmc_hostname(test
->card
->host
));
2984 case RESULT_UNSUP_CARD
:
2985 pr_info("%s: Result: UNSUPPORTED (by card)\n",
2986 mmc_hostname(test
->card
->host
));
2989 pr_info("%s: Result: ERROR (%d)\n",
2990 mmc_hostname(test
->card
->host
), ret
);
2993 /* Save the result */
2997 if (mmc_test_cases
[i
].cleanup
) {
2998 ret
= mmc_test_cases
[i
].cleanup(test
);
3000 pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3001 mmc_hostname(test
->card
->host
),
3007 mmc_release_host(test
->card
->host
);
3009 pr_info("%s: Tests completed.\n",
3010 mmc_hostname(test
->card
->host
));
3013 static void mmc_test_free_result(struct mmc_card
*card
)
3015 struct mmc_test_general_result
*gr
, *grs
;
3017 mutex_lock(&mmc_test_lock
);
3019 list_for_each_entry_safe(gr
, grs
, &mmc_test_result
, link
) {
3020 struct mmc_test_transfer_result
*tr
, *trs
;
3022 if (card
&& gr
->card
!= card
)
3025 list_for_each_entry_safe(tr
, trs
, &gr
->tr_lst
, link
) {
3026 list_del(&tr
->link
);
3030 list_del(&gr
->link
);
3034 mutex_unlock(&mmc_test_lock
);
3037 static LIST_HEAD(mmc_test_file_test
);
3039 static int mtf_test_show(struct seq_file
*sf
, void *data
)
3041 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
3042 struct mmc_test_general_result
*gr
;
3044 mutex_lock(&mmc_test_lock
);
3046 list_for_each_entry(gr
, &mmc_test_result
, link
) {
3047 struct mmc_test_transfer_result
*tr
;
3049 if (gr
->card
!= card
)
3052 seq_printf(sf
, "Test %d: %d\n", gr
->testcase
+ 1, gr
->result
);
3054 list_for_each_entry(tr
, &gr
->tr_lst
, link
) {
3055 seq_printf(sf
, "%u %d %llu.%09u %u %u.%02u\n",
3056 tr
->count
, tr
->sectors
,
3057 (u64
)tr
->ts
.tv_sec
, (u32
)tr
->ts
.tv_nsec
,
3058 tr
->rate
, tr
->iops
/ 100, tr
->iops
% 100);
3062 mutex_unlock(&mmc_test_lock
);
3067 static int mtf_test_open(struct inode
*inode
, struct file
*file
)
3069 return single_open(file
, mtf_test_show
, inode
->i_private
);
3072 static ssize_t
mtf_test_write(struct file
*file
, const char __user
*buf
,
3073 size_t count
, loff_t
*pos
)
3075 struct seq_file
*sf
= (struct seq_file
*)file
->private_data
;
3076 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
3077 struct mmc_test_card
*test
;
3081 ret
= kstrtol_from_user(buf
, count
, 10, &testcase
);
3085 test
= kzalloc(sizeof(*test
), GFP_KERNEL
);
3090 * Remove all test cases associated with given card. Thus we have only
3091 * actual data of the last run.
3093 mmc_test_free_result(card
);
3097 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
3098 #ifdef CONFIG_HIGHMEM
3099 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
3102 #ifdef CONFIG_HIGHMEM
3103 if (test
->buffer
&& test
->highmem
) {
3107 mutex_lock(&mmc_test_lock
);
3108 mmc_test_run(test
, testcase
);
3109 mutex_unlock(&mmc_test_lock
);
3112 #ifdef CONFIG_HIGHMEM
3113 __free_pages(test
->highmem
, BUFFER_ORDER
);
3115 kfree(test
->buffer
);
3121 static const struct file_operations mmc_test_fops_test
= {
3122 .open
= mtf_test_open
,
3124 .write
= mtf_test_write
,
3125 .llseek
= seq_lseek
,
3126 .release
= single_release
,
3129 static int mtf_testlist_show(struct seq_file
*sf
, void *data
)
3133 mutex_lock(&mmc_test_lock
);
3135 seq_puts(sf
, "0:\tRun all tests\n");
3136 for (i
= 0; i
< ARRAY_SIZE(mmc_test_cases
); i
++)
3137 seq_printf(sf
, "%d:\t%s\n", i
+ 1, mmc_test_cases
[i
].name
);
3139 mutex_unlock(&mmc_test_lock
);
3144 DEFINE_SHOW_ATTRIBUTE(mtf_testlist
);
3146 static void mmc_test_free_dbgfs_file(struct mmc_card
*card
)
3148 struct mmc_test_dbgfs_file
*df
, *dfs
;
3150 mutex_lock(&mmc_test_lock
);
3152 list_for_each_entry_safe(df
, dfs
, &mmc_test_file_test
, link
) {
3153 if (card
&& df
->card
!= card
)
3155 debugfs_remove(df
->file
);
3156 list_del(&df
->link
);
3160 mutex_unlock(&mmc_test_lock
);
3163 static int __mmc_test_register_dbgfs_file(struct mmc_card
*card
,
3164 const char *name
, umode_t mode
, const struct file_operations
*fops
)
3166 struct dentry
*file
= NULL
;
3167 struct mmc_test_dbgfs_file
*df
;
3169 if (card
->debugfs_root
)
3170 file
= debugfs_create_file(name
, mode
, card
->debugfs_root
,
3173 if (IS_ERR_OR_NULL(file
)) {
3175 "Can't create %s. Perhaps debugfs is disabled.\n",
3180 df
= kmalloc(sizeof(*df
), GFP_KERNEL
);
3182 debugfs_remove(file
);
3189 list_add(&df
->link
, &mmc_test_file_test
);
3193 static int mmc_test_register_dbgfs_file(struct mmc_card
*card
)
3197 mutex_lock(&mmc_test_lock
);
3199 ret
= __mmc_test_register_dbgfs_file(card
, "test", S_IWUSR
| S_IRUGO
,
3200 &mmc_test_fops_test
);
3204 ret
= __mmc_test_register_dbgfs_file(card
, "testlist", S_IRUGO
,
3205 &mtf_testlist_fops
);
3210 mutex_unlock(&mmc_test_lock
);
3215 static int mmc_test_probe(struct mmc_card
*card
)
3219 if (!mmc_card_mmc(card
) && !mmc_card_sd(card
))
3222 ret
= mmc_test_register_dbgfs_file(card
);
3226 if (card
->ext_csd
.cmdq_en
) {
3227 mmc_claim_host(card
->host
);
3228 ret
= mmc_cmdq_disable(card
);
3229 mmc_release_host(card
->host
);
3234 dev_info(&card
->dev
, "Card claimed for testing.\n");
3239 static void mmc_test_remove(struct mmc_card
*card
)
3241 if (card
->reenable_cmdq
) {
3242 mmc_claim_host(card
->host
);
3243 mmc_cmdq_enable(card
);
3244 mmc_release_host(card
->host
);
3246 mmc_test_free_result(card
);
3247 mmc_test_free_dbgfs_file(card
);
3250 static void mmc_test_shutdown(struct mmc_card
*card
)
3254 static struct mmc_driver mmc_driver
= {
3258 .probe
= mmc_test_probe
,
3259 .remove
= mmc_test_remove
,
3260 .shutdown
= mmc_test_shutdown
,
3263 static int __init
mmc_test_init(void)
3265 return mmc_register_driver(&mmc_driver
);
3268 static void __exit
mmc_test_exit(void)
3270 /* Clear stalled data if card is still plugged */
3271 mmc_test_free_result(NULL
);
3272 mmc_test_free_dbgfs_file(NULL
);
3274 mmc_unregister_driver(&mmc_driver
);
3277 module_init(mmc_test_init
);
3278 module_exit(mmc_test_exit
);
3280 MODULE_LICENSE("GPL");
3281 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3282 MODULE_AUTHOR("Pierre Ossman");