2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages
{
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages
*arr
;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
75 struct mmc_test_area
{
77 unsigned int dev_addr
;
79 unsigned int max_segs
;
80 unsigned int max_seg_sz
;
83 struct mmc_test_mem
*mem
;
84 struct scatterlist
*sg
;
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
96 struct mmc_test_transfer_result
{
97 struct list_head link
;
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
113 struct mmc_test_general_result
{
114 struct list_head link
;
115 struct mmc_card
*card
;
118 struct list_head tr_lst
;
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
127 struct mmc_test_dbgfs_file
{
128 struct list_head link
;
129 struct mmc_card
*card
;
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
142 struct mmc_test_card
{
143 struct mmc_card
*card
;
145 u8 scratch
[BUFFER_SIZE
];
147 #ifdef CONFIG_HIGHMEM
148 struct page
*highmem
;
150 struct mmc_test_area area
;
151 struct mmc_test_general_result
*gr
;
154 enum mmc_test_prep_media
{
155 MMC_TEST_PREP_NONE
= 0,
156 MMC_TEST_PREP_WRITE_FULL
= 1 << 0,
157 MMC_TEST_PREP_ERASE
= 1 << 1,
160 struct mmc_test_multiple_rw
{
161 unsigned int *sg_len
;
166 bool do_nonblock_req
;
167 enum mmc_test_prep_media prepare
;
170 struct mmc_test_async_req
{
171 struct mmc_async_req areq
;
172 struct mmc_test_card
*test
;
175 /*******************************************************************/
176 /* General helper functions */
177 /*******************************************************************/
180 * Configure correct block size in card
182 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
184 return mmc_set_blocklen(test
->card
, size
);
187 static bool mmc_test_card_cmd23(struct mmc_card
*card
)
189 return mmc_card_mmc(card
) ||
190 (mmc_card_sd(card
) && card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
);
193 static void mmc_test_prepare_sbc(struct mmc_test_card
*test
,
194 struct mmc_request
*mrq
, unsigned int blocks
)
196 struct mmc_card
*card
= test
->card
;
198 if (!mrq
->sbc
|| !mmc_host_cmd23(card
->host
) ||
199 !mmc_test_card_cmd23(card
) || !mmc_op_multi(mrq
->cmd
->opcode
) ||
200 (card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
)) {
205 mrq
->sbc
->opcode
= MMC_SET_BLOCK_COUNT
;
206 mrq
->sbc
->arg
= blocks
;
207 mrq
->sbc
->flags
= MMC_RSP_R1
| MMC_CMD_AC
;
211 * Fill in the mmc_request structure given a set of transfer parameters.
213 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
214 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
215 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
217 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
220 mrq
->cmd
->opcode
= write
?
221 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
223 mrq
->cmd
->opcode
= write
?
224 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
227 mrq
->cmd
->arg
= dev_addr
;
228 if (!mmc_card_blockaddr(test
->card
))
231 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
236 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
238 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
241 mrq
->data
->blksz
= blksz
;
242 mrq
->data
->blocks
= blocks
;
243 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
245 mrq
->data
->sg_len
= sg_len
;
247 mmc_test_prepare_sbc(test
, mrq
, blocks
);
249 mmc_set_data_timeout(mrq
->data
, test
->card
);
252 static int mmc_test_busy(struct mmc_command
*cmd
)
254 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
255 (R1_CURRENT_STATE(cmd
->resp
[0]) == R1_STATE_PRG
);
259 * Wait for the card to finish the busy state
261 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
264 struct mmc_command cmd
= {0};
268 memset(&cmd
, 0, sizeof(struct mmc_command
));
270 cmd
.opcode
= MMC_SEND_STATUS
;
271 cmd
.arg
= test
->card
->rca
<< 16;
272 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
274 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
278 if (!busy
&& mmc_test_busy(&cmd
)) {
280 if (test
->card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
281 pr_info("%s: Warning: Host did not "
282 "wait for busy state to end.\n",
283 mmc_hostname(test
->card
->host
));
285 } while (mmc_test_busy(&cmd
));
291 * Transfer a single sector of kernel addressable data
293 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
294 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
296 struct mmc_request mrq
= {0};
297 struct mmc_command cmd
= {0};
298 struct mmc_command stop
= {0};
299 struct mmc_data data
= {0};
301 struct scatterlist sg
;
307 sg_init_one(&sg
, buffer
, blksz
);
309 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
311 mmc_wait_for_req(test
->card
->host
, &mrq
);
318 return mmc_test_wait_busy(test
);
321 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
326 __free_pages(mem
->arr
[mem
->cnt
].page
,
327 mem
->arr
[mem
->cnt
].order
);
333 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
334 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
335 * not exceed a maximum number of segments and try not to make segments much
336 * bigger than maximum segment size.
338 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
339 unsigned long max_sz
,
340 unsigned int max_segs
,
341 unsigned int max_seg_sz
)
343 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
344 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
345 unsigned long max_seg_page_cnt
= DIV_ROUND_UP(max_seg_sz
, PAGE_SIZE
);
346 unsigned long page_cnt
= 0;
347 unsigned long limit
= nr_free_buffer_pages() >> 4;
348 struct mmc_test_mem
*mem
;
350 if (max_page_cnt
> limit
)
351 max_page_cnt
= limit
;
352 if (min_page_cnt
> max_page_cnt
)
353 min_page_cnt
= max_page_cnt
;
355 if (max_seg_page_cnt
> max_page_cnt
)
356 max_seg_page_cnt
= max_page_cnt
;
358 if (max_segs
> max_page_cnt
)
359 max_segs
= max_page_cnt
;
361 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
365 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_segs
,
370 while (max_page_cnt
) {
373 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
376 order
= get_order(max_seg_page_cnt
<< PAGE_SHIFT
);
378 page
= alloc_pages(flags
, order
);
384 if (page_cnt
< min_page_cnt
)
388 mem
->arr
[mem
->cnt
].page
= page
;
389 mem
->arr
[mem
->cnt
].order
= order
;
391 if (max_page_cnt
<= (1UL << order
))
393 max_page_cnt
-= 1UL << order
;
394 page_cnt
+= 1UL << order
;
395 if (mem
->cnt
>= max_segs
) {
396 if (page_cnt
< min_page_cnt
)
405 mmc_test_free_mem(mem
);
410 * Map memory into a scatterlist. Optionally allow the same memory to be
411 * mapped more than once.
413 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long size
,
414 struct scatterlist
*sglist
, int repeat
,
415 unsigned int max_segs
, unsigned int max_seg_sz
,
416 unsigned int *sg_len
, int min_sg_len
)
418 struct scatterlist
*sg
= NULL
;
420 unsigned long sz
= size
;
422 sg_init_table(sglist
, max_segs
);
423 if (min_sg_len
> max_segs
)
424 min_sg_len
= max_segs
;
428 for (i
= 0; i
< mem
->cnt
; i
++) {
429 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
431 if (min_sg_len
&& (size
/ min_sg_len
< len
))
432 len
= ALIGN(size
/ min_sg_len
, 512);
435 if (len
> max_seg_sz
)
443 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
449 } while (sz
&& repeat
);
461 * Map memory into a scatterlist so that no pages are contiguous. Allow the
462 * same memory to be mapped more than once.
464 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
466 struct scatterlist
*sglist
,
467 unsigned int max_segs
,
468 unsigned int max_seg_sz
,
469 unsigned int *sg_len
)
471 struct scatterlist
*sg
= NULL
;
472 unsigned int i
= mem
->cnt
, cnt
;
474 void *base
, *addr
, *last_addr
= NULL
;
476 sg_init_table(sglist
, max_segs
);
480 base
= page_address(mem
->arr
[--i
].page
);
481 cnt
= 1 << mem
->arr
[i
].order
;
483 addr
= base
+ PAGE_SIZE
* --cnt
;
484 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
488 if (len
> max_seg_sz
)
498 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
513 * Calculate transfer rate in bytes per second.
515 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
525 while (ns
> UINT_MAX
) {
533 do_div(bytes
, (uint32_t)ns
);
539 * Save transfer results for future usage
541 static void mmc_test_save_transfer_result(struct mmc_test_card
*test
,
542 unsigned int count
, unsigned int sectors
, struct timespec ts
,
543 unsigned int rate
, unsigned int iops
)
545 struct mmc_test_transfer_result
*tr
;
550 tr
= kmalloc(sizeof(struct mmc_test_transfer_result
), GFP_KERNEL
);
555 tr
->sectors
= sectors
;
560 list_add_tail(&tr
->link
, &test
->gr
->tr_lst
);
564 * Print the transfer rate.
566 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
567 struct timespec
*ts1
, struct timespec
*ts2
)
569 unsigned int rate
, iops
, sectors
= bytes
>> 9;
572 ts
= timespec_sub(*ts2
, *ts1
);
574 rate
= mmc_test_rate(bytes
, &ts
);
575 iops
= mmc_test_rate(100, &ts
); /* I/O ops per sec x 100 */
577 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
578 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
579 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
580 (sectors
& 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
581 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024,
582 iops
/ 100, iops
% 100);
584 mmc_test_save_transfer_result(test
, 1, sectors
, ts
, rate
, iops
);
588 * Print the average transfer rate.
590 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
591 unsigned int count
, struct timespec
*ts1
,
592 struct timespec
*ts2
)
594 unsigned int rate
, iops
, sectors
= bytes
>> 9;
595 uint64_t tot
= bytes
* count
;
598 ts
= timespec_sub(*ts2
, *ts1
);
600 rate
= mmc_test_rate(tot
, &ts
);
601 iops
= mmc_test_rate(count
* 100, &ts
); /* I/O ops per sec x 100 */
603 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
604 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
605 "%u.%02u IOPS, sg_len %d)\n",
606 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
607 sectors
>> 1, (sectors
& 1 ? ".5" : ""),
608 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
609 rate
/ 1000, rate
/ 1024, iops
/ 100, iops
% 100,
612 mmc_test_save_transfer_result(test
, count
, sectors
, ts
, rate
, iops
);
616 * Return the card size in sectors.
618 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
620 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
621 return card
->ext_csd
.sectors
;
623 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
626 /*******************************************************************/
627 /* Test preparation and cleanup */
628 /*******************************************************************/
631 * Fill the first couple of sectors of the card with known data
632 * so that bad reads/writes can be detected
634 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
638 ret
= mmc_test_set_blksize(test
, 512);
643 memset(test
->buffer
, 0xDF, 512);
645 for (i
= 0;i
< 512;i
++)
649 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
650 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
658 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
660 return __mmc_test_prepare(test
, 1);
663 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
665 return __mmc_test_prepare(test
, 0);
668 static int mmc_test_cleanup(struct mmc_test_card
*test
)
672 ret
= mmc_test_set_blksize(test
, 512);
676 memset(test
->buffer
, 0, 512);
678 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
679 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
687 /*******************************************************************/
688 /* Test execution helpers */
689 /*******************************************************************/
692 * Modifies the mmc_request to perform the "short transfer" tests
694 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
695 struct mmc_request
*mrq
, int write
)
697 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
699 if (mrq
->data
->blocks
> 1) {
700 mrq
->cmd
->opcode
= write
?
701 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
704 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
705 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
710 * Checks that a normal transfer didn't have any errors
712 static int mmc_test_check_result(struct mmc_test_card
*test
,
713 struct mmc_request
*mrq
)
717 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
721 if (mrq
->sbc
&& mrq
->sbc
->error
)
722 ret
= mrq
->sbc
->error
;
723 if (!ret
&& mrq
->cmd
->error
)
724 ret
= mrq
->cmd
->error
;
725 if (!ret
&& mrq
->data
->error
)
726 ret
= mrq
->data
->error
;
727 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
728 ret
= mrq
->stop
->error
;
729 if (!ret
&& mrq
->data
->bytes_xfered
!=
730 mrq
->data
->blocks
* mrq
->data
->blksz
)
734 ret
= RESULT_UNSUP_HOST
;
739 static int mmc_test_check_result_async(struct mmc_card
*card
,
740 struct mmc_async_req
*areq
)
742 struct mmc_test_async_req
*test_async
=
743 container_of(areq
, struct mmc_test_async_req
, areq
);
745 mmc_test_wait_busy(test_async
->test
);
747 return mmc_test_check_result(test_async
->test
, areq
->mrq
);
751 * Checks that a "short transfer" behaved as expected
753 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
754 struct mmc_request
*mrq
)
758 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
762 if (!ret
&& mrq
->cmd
->error
)
763 ret
= mrq
->cmd
->error
;
764 if (!ret
&& mrq
->data
->error
== 0)
766 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
767 ret
= mrq
->data
->error
;
768 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
769 ret
= mrq
->stop
->error
;
770 if (mrq
->data
->blocks
> 1) {
771 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
774 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
779 ret
= RESULT_UNSUP_HOST
;
785 * Tests nonblock transfer with certain parameters
787 static void mmc_test_nonblock_reset(struct mmc_request
*mrq
,
788 struct mmc_command
*cmd
,
789 struct mmc_command
*stop
,
790 struct mmc_data
*data
)
792 memset(mrq
, 0, sizeof(struct mmc_request
));
793 memset(cmd
, 0, sizeof(struct mmc_command
));
794 memset(data
, 0, sizeof(struct mmc_data
));
795 memset(stop
, 0, sizeof(struct mmc_command
));
801 static int mmc_test_nonblock_transfer(struct mmc_test_card
*test
,
802 struct scatterlist
*sg
, unsigned sg_len
,
803 unsigned dev_addr
, unsigned blocks
,
804 unsigned blksz
, int write
, int count
)
806 struct mmc_request mrq1
;
807 struct mmc_command cmd1
;
808 struct mmc_command stop1
;
809 struct mmc_data data1
;
811 struct mmc_request mrq2
;
812 struct mmc_command cmd2
;
813 struct mmc_command stop2
;
814 struct mmc_data data2
;
816 struct mmc_test_async_req test_areq
[2];
817 struct mmc_async_req
*done_areq
;
818 struct mmc_async_req
*cur_areq
= &test_areq
[0].areq
;
819 struct mmc_async_req
*other_areq
= &test_areq
[1].areq
;
823 test_areq
[0].test
= test
;
824 test_areq
[1].test
= test
;
826 mmc_test_nonblock_reset(&mrq1
, &cmd1
, &stop1
, &data1
);
827 mmc_test_nonblock_reset(&mrq2
, &cmd2
, &stop2
, &data2
);
829 cur_areq
->mrq
= &mrq1
;
830 cur_areq
->err_check
= mmc_test_check_result_async
;
831 other_areq
->mrq
= &mrq2
;
832 other_areq
->err_check
= mmc_test_check_result_async
;
834 for (i
= 0; i
< count
; i
++) {
835 mmc_test_prepare_mrq(test
, cur_areq
->mrq
, sg
, sg_len
, dev_addr
,
836 blocks
, blksz
, write
);
837 done_areq
= mmc_start_req(test
->card
->host
, cur_areq
, &ret
);
839 if (ret
|| (!done_areq
&& i
> 0))
843 if (done_areq
->mrq
== &mrq2
)
844 mmc_test_nonblock_reset(&mrq2
, &cmd2
,
847 mmc_test_nonblock_reset(&mrq1
, &cmd1
,
850 swap(cur_areq
, other_areq
);
854 done_areq
= mmc_start_req(test
->card
->host
, NULL
, &ret
);
862 * Tests a basic transfer with certain parameters
864 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
865 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
866 unsigned blocks
, unsigned blksz
, int write
)
868 struct mmc_request mrq
= {0};
869 struct mmc_command cmd
= {0};
870 struct mmc_command stop
= {0};
871 struct mmc_data data
= {0};
877 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
878 blocks
, blksz
, write
);
880 mmc_wait_for_req(test
->card
->host
, &mrq
);
882 mmc_test_wait_busy(test
);
884 return mmc_test_check_result(test
, &mrq
);
888 * Tests a transfer where the card will fail completely or partly
890 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
891 unsigned blocks
, unsigned blksz
, int write
)
893 struct mmc_request mrq
= {0};
894 struct mmc_command cmd
= {0};
895 struct mmc_command stop
= {0};
896 struct mmc_data data
= {0};
898 struct scatterlist sg
;
904 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
906 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
907 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
909 mmc_wait_for_req(test
->card
->host
, &mrq
);
911 mmc_test_wait_busy(test
);
913 return mmc_test_check_broken_result(test
, &mrq
);
917 * Does a complete transfer test where data is also validated
919 * Note: mmc_test_prepare() must have been done before this call
921 static int mmc_test_transfer(struct mmc_test_card
*test
,
922 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
923 unsigned blocks
, unsigned blksz
, int write
)
929 for (i
= 0;i
< blocks
* blksz
;i
++)
930 test
->scratch
[i
] = i
;
932 memset(test
->scratch
, 0, BUFFER_SIZE
);
934 local_irq_save(flags
);
935 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
936 local_irq_restore(flags
);
938 ret
= mmc_test_set_blksize(test
, blksz
);
942 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
943 blocks
, blksz
, write
);
950 ret
= mmc_test_set_blksize(test
, 512);
954 sectors
= (blocks
* blksz
+ 511) / 512;
955 if ((sectors
* 512) == (blocks
* blksz
))
958 if ((sectors
* 512) > BUFFER_SIZE
)
961 memset(test
->buffer
, 0, sectors
* 512);
963 for (i
= 0;i
< sectors
;i
++) {
964 ret
= mmc_test_buffer_transfer(test
,
965 test
->buffer
+ i
* 512,
966 dev_addr
+ i
, 512, 0);
971 for (i
= 0;i
< blocks
* blksz
;i
++) {
972 if (test
->buffer
[i
] != (u8
)i
)
976 for (;i
< sectors
* 512;i
++) {
977 if (test
->buffer
[i
] != 0xDF)
981 local_irq_save(flags
);
982 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
983 local_irq_restore(flags
);
984 for (i
= 0;i
< blocks
* blksz
;i
++) {
985 if (test
->scratch
[i
] != (u8
)i
)
993 /*******************************************************************/
995 /*******************************************************************/
997 struct mmc_test_case
{
1000 int (*prepare
)(struct mmc_test_card
*);
1001 int (*run
)(struct mmc_test_card
*);
1002 int (*cleanup
)(struct mmc_test_card
*);
1005 static int mmc_test_basic_write(struct mmc_test_card
*test
)
1008 struct scatterlist sg
;
1010 ret
= mmc_test_set_blksize(test
, 512);
1014 sg_init_one(&sg
, test
->buffer
, 512);
1016 return mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1019 static int mmc_test_basic_read(struct mmc_test_card
*test
)
1022 struct scatterlist sg
;
1024 ret
= mmc_test_set_blksize(test
, 512);
1028 sg_init_one(&sg
, test
->buffer
, 512);
1030 return mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1033 static int mmc_test_verify_write(struct mmc_test_card
*test
)
1035 struct scatterlist sg
;
1037 sg_init_one(&sg
, test
->buffer
, 512);
1039 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1042 static int mmc_test_verify_read(struct mmc_test_card
*test
)
1044 struct scatterlist sg
;
1046 sg_init_one(&sg
, test
->buffer
, 512);
1048 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1051 static int mmc_test_multi_write(struct mmc_test_card
*test
)
1054 struct scatterlist sg
;
1056 if (test
->card
->host
->max_blk_count
== 1)
1057 return RESULT_UNSUP_HOST
;
1059 size
= PAGE_SIZE
* 2;
1060 size
= min(size
, test
->card
->host
->max_req_size
);
1061 size
= min(size
, test
->card
->host
->max_seg_size
);
1062 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1065 return RESULT_UNSUP_HOST
;
1067 sg_init_one(&sg
, test
->buffer
, size
);
1069 return mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1072 static int mmc_test_multi_read(struct mmc_test_card
*test
)
1075 struct scatterlist sg
;
1077 if (test
->card
->host
->max_blk_count
== 1)
1078 return RESULT_UNSUP_HOST
;
1080 size
= PAGE_SIZE
* 2;
1081 size
= min(size
, test
->card
->host
->max_req_size
);
1082 size
= min(size
, test
->card
->host
->max_seg_size
);
1083 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1086 return RESULT_UNSUP_HOST
;
1088 sg_init_one(&sg
, test
->buffer
, size
);
1090 return mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1093 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
1096 struct scatterlist sg
;
1098 if (!test
->card
->csd
.write_partial
)
1099 return RESULT_UNSUP_CARD
;
1101 for (i
= 1; i
< 512;i
<<= 1) {
1102 sg_init_one(&sg
, test
->buffer
, i
);
1103 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1111 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
1114 struct scatterlist sg
;
1116 if (!test
->card
->csd
.read_partial
)
1117 return RESULT_UNSUP_CARD
;
1119 for (i
= 1; i
< 512;i
<<= 1) {
1120 sg_init_one(&sg
, test
->buffer
, i
);
1121 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1129 static int mmc_test_weird_write(struct mmc_test_card
*test
)
1132 struct scatterlist sg
;
1134 if (!test
->card
->csd
.write_partial
)
1135 return RESULT_UNSUP_CARD
;
1137 for (i
= 3; i
< 512;i
+= 7) {
1138 sg_init_one(&sg
, test
->buffer
, i
);
1139 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1147 static int mmc_test_weird_read(struct mmc_test_card
*test
)
1150 struct scatterlist sg
;
1152 if (!test
->card
->csd
.read_partial
)
1153 return RESULT_UNSUP_CARD
;
1155 for (i
= 3; i
< 512;i
+= 7) {
1156 sg_init_one(&sg
, test
->buffer
, i
);
1157 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1165 static int mmc_test_align_write(struct mmc_test_card
*test
)
1168 struct scatterlist sg
;
1170 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1171 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1172 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1180 static int mmc_test_align_read(struct mmc_test_card
*test
)
1183 struct scatterlist sg
;
1185 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1186 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1187 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1195 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
1199 struct scatterlist sg
;
1201 if (test
->card
->host
->max_blk_count
== 1)
1202 return RESULT_UNSUP_HOST
;
1204 size
= PAGE_SIZE
* 2;
1205 size
= min(size
, test
->card
->host
->max_req_size
);
1206 size
= min(size
, test
->card
->host
->max_seg_size
);
1207 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1210 return RESULT_UNSUP_HOST
;
1212 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1213 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1214 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1222 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1226 struct scatterlist sg
;
1228 if (test
->card
->host
->max_blk_count
== 1)
1229 return RESULT_UNSUP_HOST
;
1231 size
= PAGE_SIZE
* 2;
1232 size
= min(size
, test
->card
->host
->max_req_size
);
1233 size
= min(size
, test
->card
->host
->max_seg_size
);
1234 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1237 return RESULT_UNSUP_HOST
;
1239 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1240 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1241 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1249 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1253 ret
= mmc_test_set_blksize(test
, 512);
1257 return mmc_test_broken_transfer(test
, 1, 512, 1);
1260 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1264 ret
= mmc_test_set_blksize(test
, 512);
1268 return mmc_test_broken_transfer(test
, 1, 512, 0);
1271 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1275 if (test
->card
->host
->max_blk_count
== 1)
1276 return RESULT_UNSUP_HOST
;
1278 ret
= mmc_test_set_blksize(test
, 512);
1282 return mmc_test_broken_transfer(test
, 2, 512, 1);
1285 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1289 if (test
->card
->host
->max_blk_count
== 1)
1290 return RESULT_UNSUP_HOST
;
1292 ret
= mmc_test_set_blksize(test
, 512);
1296 return mmc_test_broken_transfer(test
, 2, 512, 0);
1299 #ifdef CONFIG_HIGHMEM
1301 static int mmc_test_write_high(struct mmc_test_card
*test
)
1303 struct scatterlist sg
;
1305 sg_init_table(&sg
, 1);
1306 sg_set_page(&sg
, test
->highmem
, 512, 0);
1308 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1311 static int mmc_test_read_high(struct mmc_test_card
*test
)
1313 struct scatterlist sg
;
1315 sg_init_table(&sg
, 1);
1316 sg_set_page(&sg
, test
->highmem
, 512, 0);
1318 return mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1321 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1324 struct scatterlist sg
;
1326 if (test
->card
->host
->max_blk_count
== 1)
1327 return RESULT_UNSUP_HOST
;
1329 size
= PAGE_SIZE
* 2;
1330 size
= min(size
, test
->card
->host
->max_req_size
);
1331 size
= min(size
, test
->card
->host
->max_seg_size
);
1332 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1335 return RESULT_UNSUP_HOST
;
1337 sg_init_table(&sg
, 1);
1338 sg_set_page(&sg
, test
->highmem
, size
, 0);
1340 return mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1343 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1346 struct scatterlist sg
;
1348 if (test
->card
->host
->max_blk_count
== 1)
1349 return RESULT_UNSUP_HOST
;
1351 size
= PAGE_SIZE
* 2;
1352 size
= min(size
, test
->card
->host
->max_req_size
);
1353 size
= min(size
, test
->card
->host
->max_seg_size
);
1354 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1357 return RESULT_UNSUP_HOST
;
1359 sg_init_table(&sg
, 1);
1360 sg_set_page(&sg
, test
->highmem
, size
, 0);
1362 return mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1367 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1369 pr_info("%s: Highmem not configured - test skipped\n",
1370 mmc_hostname(test
->card
->host
));
1374 #endif /* CONFIG_HIGHMEM */
1377 * Map sz bytes so that it can be transferred.
1379 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1380 int max_scatter
, int min_sg_len
)
1382 struct mmc_test_area
*t
= &test
->area
;
1385 t
->blocks
= sz
>> 9;
1388 err
= mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1389 t
->max_segs
, t
->max_seg_sz
,
1392 err
= mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1393 t
->max_seg_sz
, &t
->sg_len
, min_sg_len
);
1396 pr_info("%s: Failed to map sg list\n",
1397 mmc_hostname(test
->card
->host
));
1402 * Transfer bytes mapped by mmc_test_area_map().
1404 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1405 unsigned int dev_addr
, int write
)
1407 struct mmc_test_area
*t
= &test
->area
;
1409 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1410 t
->blocks
, 512, write
);
1414 * Map and transfer bytes for multiple transfers.
1416 static int mmc_test_area_io_seq(struct mmc_test_card
*test
, unsigned long sz
,
1417 unsigned int dev_addr
, int write
,
1418 int max_scatter
, int timed
, int count
,
1419 bool nonblock
, int min_sg_len
)
1421 struct timespec ts1
, ts2
;
1424 struct mmc_test_area
*t
= &test
->area
;
1427 * In the case of a maximally scattered transfer, the maximum transfer
1428 * size is further limited by using PAGE_SIZE segments.
1431 struct mmc_test_area
*t
= &test
->area
;
1432 unsigned long max_tfr
;
1434 if (t
->max_seg_sz
>= PAGE_SIZE
)
1435 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1437 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1442 ret
= mmc_test_area_map(test
, sz
, max_scatter
, min_sg_len
);
1447 getnstimeofday(&ts1
);
1449 ret
= mmc_test_nonblock_transfer(test
, t
->sg
, t
->sg_len
,
1450 dev_addr
, t
->blocks
, 512, write
, count
);
1452 for (i
= 0; i
< count
&& ret
== 0; i
++) {
1453 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1454 dev_addr
+= sz
>> 9;
1461 getnstimeofday(&ts2
);
1464 mmc_test_print_avg_rate(test
, sz
, count
, &ts1
, &ts2
);
1469 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1470 unsigned int dev_addr
, int write
, int max_scatter
,
1473 return mmc_test_area_io_seq(test
, sz
, dev_addr
, write
, max_scatter
,
1474 timed
, 1, false, 0);
1478 * Write the test area entirely.
1480 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1482 struct mmc_test_area
*t
= &test
->area
;
1484 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, 1, 0, 0);
1488 * Erase the test area entirely.
1490 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1492 struct mmc_test_area
*t
= &test
->area
;
1494 if (!mmc_can_erase(test
->card
))
1497 return mmc_erase(test
->card
, t
->dev_addr
, t
->max_sz
>> 9,
1502 * Cleanup struct mmc_test_area.
1504 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1506 struct mmc_test_area
*t
= &test
->area
;
1509 mmc_test_free_mem(t
->mem
);
1515 * Initialize an area for testing large transfers. The test area is set to the
1516 * middle of the card because cards may have different charateristics at the
1517 * front (for FAT file system optimization). Optionally, the area is erased
1518 * (if the card supports it) which may improve write performance. Optionally,
1519 * the area is filled with data for subsequent read tests.
1521 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1523 struct mmc_test_area
*t
= &test
->area
;
1524 unsigned long min_sz
= 64 * 1024, sz
;
1527 ret
= mmc_test_set_blksize(test
, 512);
1531 /* Make the test area size about 4MiB */
1532 sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1534 while (t
->max_sz
< 4 * 1024 * 1024)
1536 while (t
->max_sz
> TEST_AREA_MAX_SIZE
&& t
->max_sz
> sz
)
1539 t
->max_segs
= test
->card
->host
->max_segs
;
1540 t
->max_seg_sz
= test
->card
->host
->max_seg_size
;
1541 t
->max_seg_sz
-= t
->max_seg_sz
% 512;
1543 t
->max_tfr
= t
->max_sz
;
1544 if (t
->max_tfr
>> 9 > test
->card
->host
->max_blk_count
)
1545 t
->max_tfr
= test
->card
->host
->max_blk_count
<< 9;
1546 if (t
->max_tfr
> test
->card
->host
->max_req_size
)
1547 t
->max_tfr
= test
->card
->host
->max_req_size
;
1548 if (t
->max_tfr
/ t
->max_seg_sz
> t
->max_segs
)
1549 t
->max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1552 * Try to allocate enough memory for a max. sized transfer. Less is OK
1553 * because the same memory can be mapped into the scatterlist more than
1554 * once. Also, take into account the limits imposed on scatterlist
1555 * segments by the host driver.
1557 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_tfr
, t
->max_segs
,
1562 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1568 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1569 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1572 ret
= mmc_test_area_erase(test
);
1578 ret
= mmc_test_area_fill(test
);
1586 mmc_test_area_cleanup(test
);
1591 * Prepare for large transfers. Do not erase the test area.
1593 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1595 return mmc_test_area_init(test
, 0, 0);
1599 * Prepare for large transfers. Do erase the test area.
1601 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1603 return mmc_test_area_init(test
, 1, 0);
1607 * Prepare for large transfers. Erase and fill the test area.
1609 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1611 return mmc_test_area_init(test
, 1, 1);
1615 * Test best-case performance. Best-case performance is expected from
1616 * a single large transfer.
1618 * An additional option (max_scatter) allows the measurement of the same
1619 * transfer but with no contiguous pages in the scatter list. This tests
1620 * the efficiency of DMA to handle scattered pages.
1622 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1625 struct mmc_test_area
*t
= &test
->area
;
1627 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, write
,
1632 * Best-case read performance.
1634 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1636 return mmc_test_best_performance(test
, 0, 0);
1640 * Best-case write performance.
1642 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1644 return mmc_test_best_performance(test
, 1, 0);
1648 * Best-case read performance into scattered pages.
1650 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1652 return mmc_test_best_performance(test
, 0, 1);
1656 * Best-case write performance from scattered pages.
1658 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1660 return mmc_test_best_performance(test
, 1, 1);
1664 * Single read performance by transfer size.
1666 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1668 struct mmc_test_area
*t
= &test
->area
;
1670 unsigned int dev_addr
;
1673 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1674 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1675 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1680 dev_addr
= t
->dev_addr
;
1681 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1685 * Single write performance by transfer size.
1687 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1689 struct mmc_test_area
*t
= &test
->area
;
1691 unsigned int dev_addr
;
1694 ret
= mmc_test_area_erase(test
);
1697 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1698 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1699 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1703 ret
= mmc_test_area_erase(test
);
1707 dev_addr
= t
->dev_addr
;
1708 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1712 * Single trim performance by transfer size.
1714 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1716 struct mmc_test_area
*t
= &test
->area
;
1718 unsigned int dev_addr
;
1719 struct timespec ts1
, ts2
;
1722 if (!mmc_can_trim(test
->card
))
1723 return RESULT_UNSUP_CARD
;
1725 if (!mmc_can_erase(test
->card
))
1726 return RESULT_UNSUP_HOST
;
1728 for (sz
= 512; sz
< t
->max_sz
; sz
<<= 1) {
1729 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1730 getnstimeofday(&ts1
);
1731 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1734 getnstimeofday(&ts2
);
1735 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1737 dev_addr
= t
->dev_addr
;
1738 getnstimeofday(&ts1
);
1739 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1742 getnstimeofday(&ts2
);
1743 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1747 static int mmc_test_seq_read_perf(struct mmc_test_card
*test
, unsigned long sz
)
1749 struct mmc_test_area
*t
= &test
->area
;
1750 unsigned int dev_addr
, i
, cnt
;
1751 struct timespec ts1
, ts2
;
1754 cnt
= t
->max_sz
/ sz
;
1755 dev_addr
= t
->dev_addr
;
1756 getnstimeofday(&ts1
);
1757 for (i
= 0; i
< cnt
; i
++) {
1758 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1761 dev_addr
+= (sz
>> 9);
1763 getnstimeofday(&ts2
);
1764 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1769 * Consecutive read performance by transfer size.
1771 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1773 struct mmc_test_area
*t
= &test
->area
;
1777 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1778 ret
= mmc_test_seq_read_perf(test
, sz
);
1783 return mmc_test_seq_read_perf(test
, sz
);
1786 static int mmc_test_seq_write_perf(struct mmc_test_card
*test
, unsigned long sz
)
1788 struct mmc_test_area
*t
= &test
->area
;
1789 unsigned int dev_addr
, i
, cnt
;
1790 struct timespec ts1
, ts2
;
1793 ret
= mmc_test_area_erase(test
);
1796 cnt
= t
->max_sz
/ sz
;
1797 dev_addr
= t
->dev_addr
;
1798 getnstimeofday(&ts1
);
1799 for (i
= 0; i
< cnt
; i
++) {
1800 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1803 dev_addr
+= (sz
>> 9);
1805 getnstimeofday(&ts2
);
1806 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1811 * Consecutive write performance by transfer size.
1813 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1815 struct mmc_test_area
*t
= &test
->area
;
1819 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1820 ret
= mmc_test_seq_write_perf(test
, sz
);
1825 return mmc_test_seq_write_perf(test
, sz
);
1829 * Consecutive trim performance by transfer size.
1831 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1833 struct mmc_test_area
*t
= &test
->area
;
1835 unsigned int dev_addr
, i
, cnt
;
1836 struct timespec ts1
, ts2
;
1839 if (!mmc_can_trim(test
->card
))
1840 return RESULT_UNSUP_CARD
;
1842 if (!mmc_can_erase(test
->card
))
1843 return RESULT_UNSUP_HOST
;
1845 for (sz
= 512; sz
<= t
->max_sz
; sz
<<= 1) {
1846 ret
= mmc_test_area_erase(test
);
1849 ret
= mmc_test_area_fill(test
);
1852 cnt
= t
->max_sz
/ sz
;
1853 dev_addr
= t
->dev_addr
;
1854 getnstimeofday(&ts1
);
1855 for (i
= 0; i
< cnt
; i
++) {
1856 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1860 dev_addr
+= (sz
>> 9);
1862 getnstimeofday(&ts2
);
1863 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1868 static unsigned int rnd_next
= 1;
1870 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt
)
1874 rnd_next
= rnd_next
* 1103515245 + 12345;
1875 r
= (rnd_next
>> 16) & 0x7fff;
1876 return (r
* rnd_cnt
) >> 15;
1879 static int mmc_test_rnd_perf(struct mmc_test_card
*test
, int write
, int print
,
1882 unsigned int dev_addr
, cnt
, rnd_addr
, range1
, range2
, last_ea
= 0, ea
;
1884 struct timespec ts1
, ts2
, ts
;
1889 rnd_addr
= mmc_test_capacity(test
->card
) / 4;
1890 range1
= rnd_addr
/ test
->card
->pref_erase
;
1891 range2
= range1
/ ssz
;
1893 getnstimeofday(&ts1
);
1894 for (cnt
= 0; cnt
< UINT_MAX
; cnt
++) {
1895 getnstimeofday(&ts2
);
1896 ts
= timespec_sub(ts2
, ts1
);
1897 if (ts
.tv_sec
>= 10)
1899 ea
= mmc_test_rnd_num(range1
);
1903 dev_addr
= rnd_addr
+ test
->card
->pref_erase
* ea
+
1904 ssz
* mmc_test_rnd_num(range2
);
1905 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
, 0, 0);
1910 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1914 static int mmc_test_random_perf(struct mmc_test_card
*test
, int write
)
1916 struct mmc_test_area
*t
= &test
->area
;
1921 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1923 * When writing, try to get more consistent results by running
1924 * the test twice with exactly the same I/O but outputting the
1925 * results only for the 2nd run.
1929 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1934 ret
= mmc_test_rnd_perf(test
, write
, 1, sz
);
1941 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1946 return mmc_test_rnd_perf(test
, write
, 1, sz
);
1950 * Random read performance by transfer size.
1952 static int mmc_test_random_read_perf(struct mmc_test_card
*test
)
1954 return mmc_test_random_perf(test
, 0);
1958 * Random write performance by transfer size.
1960 static int mmc_test_random_write_perf(struct mmc_test_card
*test
)
1962 return mmc_test_random_perf(test
, 1);
1965 static int mmc_test_seq_perf(struct mmc_test_card
*test
, int write
,
1966 unsigned int tot_sz
, int max_scatter
)
1968 struct mmc_test_area
*t
= &test
->area
;
1969 unsigned int dev_addr
, i
, cnt
, sz
, ssz
;
1970 struct timespec ts1
, ts2
;
1976 * In the case of a maximally scattered transfer, the maximum transfer
1977 * size is further limited by using PAGE_SIZE segments.
1980 unsigned long max_tfr
;
1982 if (t
->max_seg_sz
>= PAGE_SIZE
)
1983 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1985 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1991 dev_addr
= mmc_test_capacity(test
->card
) / 4;
1992 if (tot_sz
> dev_addr
<< 9)
1993 tot_sz
= dev_addr
<< 9;
1995 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
1997 getnstimeofday(&ts1
);
1998 for (i
= 0; i
< cnt
; i
++) {
1999 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
,
2005 getnstimeofday(&ts2
);
2007 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
2012 static int mmc_test_large_seq_perf(struct mmc_test_card
*test
, int write
)
2016 for (i
= 0; i
< 10; i
++) {
2017 ret
= mmc_test_seq_perf(test
, write
, 10 * 1024 * 1024, 1);
2021 for (i
= 0; i
< 5; i
++) {
2022 ret
= mmc_test_seq_perf(test
, write
, 100 * 1024 * 1024, 1);
2026 for (i
= 0; i
< 3; i
++) {
2027 ret
= mmc_test_seq_perf(test
, write
, 1000 * 1024 * 1024, 1);
2036 * Large sequential read performance.
2038 static int mmc_test_large_seq_read_perf(struct mmc_test_card
*test
)
2040 return mmc_test_large_seq_perf(test
, 0);
2044 * Large sequential write performance.
2046 static int mmc_test_large_seq_write_perf(struct mmc_test_card
*test
)
2048 return mmc_test_large_seq_perf(test
, 1);
2051 static int mmc_test_rw_multiple(struct mmc_test_card
*test
,
2052 struct mmc_test_multiple_rw
*tdata
,
2053 unsigned int reqsize
, unsigned int size
,
2056 unsigned int dev_addr
;
2057 struct mmc_test_area
*t
= &test
->area
;
2060 /* Set up test area */
2061 if (size
> mmc_test_capacity(test
->card
) / 2 * 512)
2062 size
= mmc_test_capacity(test
->card
) / 2 * 512;
2063 if (reqsize
> t
->max_tfr
)
2064 reqsize
= t
->max_tfr
;
2065 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2066 if ((dev_addr
& 0xffff0000))
2067 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2069 dev_addr
&= 0xfffff800; /* Round to 1MiB boundary */
2076 /* prepare test area */
2077 if (mmc_can_erase(test
->card
) &&
2078 tdata
->prepare
& MMC_TEST_PREP_ERASE
) {
2079 ret
= mmc_erase(test
->card
, dev_addr
,
2080 size
/ 512, MMC_SECURE_ERASE_ARG
);
2082 ret
= mmc_erase(test
->card
, dev_addr
,
2083 size
/ 512, MMC_ERASE_ARG
);
2089 ret
= mmc_test_area_io_seq(test
, reqsize
, dev_addr
,
2090 tdata
->do_write
, 0, 1, size
/ reqsize
,
2091 tdata
->do_nonblock_req
, min_sg_len
);
2097 pr_info("[%s] error\n", __func__
);
2101 static int mmc_test_rw_multiple_size(struct mmc_test_card
*test
,
2102 struct mmc_test_multiple_rw
*rw
)
2106 void *pre_req
= test
->card
->host
->ops
->pre_req
;
2107 void *post_req
= test
->card
->host
->ops
->post_req
;
2109 if (rw
->do_nonblock_req
&&
2110 ((!pre_req
&& post_req
) || (pre_req
&& !post_req
))) {
2111 pr_info("error: only one of pre/post is defined\n");
2115 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2116 ret
= mmc_test_rw_multiple(test
, rw
, rw
->bs
[i
], rw
->size
, 0);
2123 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card
*test
,
2124 struct mmc_test_multiple_rw
*rw
)
2129 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2130 ret
= mmc_test_rw_multiple(test
, rw
, 512*1024, rw
->size
,
2139 * Multiple blocking write 4k to 4 MB chunks
2141 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card
*test
)
2143 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2144 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2145 struct mmc_test_multiple_rw test_data
= {
2147 .size
= TEST_AREA_MAX_SIZE
,
2148 .len
= ARRAY_SIZE(bs
),
2150 .do_nonblock_req
= false,
2151 .prepare
= MMC_TEST_PREP_ERASE
,
2154 return mmc_test_rw_multiple_size(test
, &test_data
);
2158 * Multiple non-blocking write 4k to 4 MB chunks
2160 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card
*test
)
2162 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2163 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2164 struct mmc_test_multiple_rw test_data
= {
2166 .size
= TEST_AREA_MAX_SIZE
,
2167 .len
= ARRAY_SIZE(bs
),
2169 .do_nonblock_req
= true,
2170 .prepare
= MMC_TEST_PREP_ERASE
,
2173 return mmc_test_rw_multiple_size(test
, &test_data
);
2177 * Multiple blocking read 4k to 4 MB chunks
2179 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card
*test
)
2181 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2182 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2183 struct mmc_test_multiple_rw test_data
= {
2185 .size
= TEST_AREA_MAX_SIZE
,
2186 .len
= ARRAY_SIZE(bs
),
2188 .do_nonblock_req
= false,
2189 .prepare
= MMC_TEST_PREP_NONE
,
2192 return mmc_test_rw_multiple_size(test
, &test_data
);
2196 * Multiple non-blocking read 4k to 4 MB chunks
2198 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card
*test
)
2200 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2201 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2202 struct mmc_test_multiple_rw test_data
= {
2204 .size
= TEST_AREA_MAX_SIZE
,
2205 .len
= ARRAY_SIZE(bs
),
2207 .do_nonblock_req
= true,
2208 .prepare
= MMC_TEST_PREP_NONE
,
2211 return mmc_test_rw_multiple_size(test
, &test_data
);
2215 * Multiple blocking write 1 to 512 sg elements
2217 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card
*test
)
2219 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2220 1 << 7, 1 << 8, 1 << 9};
2221 struct mmc_test_multiple_rw test_data
= {
2223 .size
= TEST_AREA_MAX_SIZE
,
2224 .len
= ARRAY_SIZE(sg_len
),
2226 .do_nonblock_req
= false,
2227 .prepare
= MMC_TEST_PREP_ERASE
,
2230 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2234 * Multiple non-blocking write 1 to 512 sg elements
2236 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card
*test
)
2238 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2239 1 << 7, 1 << 8, 1 << 9};
2240 struct mmc_test_multiple_rw test_data
= {
2242 .size
= TEST_AREA_MAX_SIZE
,
2243 .len
= ARRAY_SIZE(sg_len
),
2245 .do_nonblock_req
= true,
2246 .prepare
= MMC_TEST_PREP_ERASE
,
2249 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2253 * Multiple blocking read 1 to 512 sg elements
2255 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card
*test
)
2257 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2258 1 << 7, 1 << 8, 1 << 9};
2259 struct mmc_test_multiple_rw test_data
= {
2261 .size
= TEST_AREA_MAX_SIZE
,
2262 .len
= ARRAY_SIZE(sg_len
),
2264 .do_nonblock_req
= false,
2265 .prepare
= MMC_TEST_PREP_NONE
,
2268 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2272 * Multiple non-blocking read 1 to 512 sg elements
2274 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card
*test
)
2276 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2277 1 << 7, 1 << 8, 1 << 9};
2278 struct mmc_test_multiple_rw test_data
= {
2280 .size
= TEST_AREA_MAX_SIZE
,
2281 .len
= ARRAY_SIZE(sg_len
),
2283 .do_nonblock_req
= true,
2284 .prepare
= MMC_TEST_PREP_NONE
,
2287 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2291 * eMMC hardware reset.
2293 static int mmc_test_reset(struct mmc_test_card
*test
)
2295 struct mmc_card
*card
= test
->card
;
2296 struct mmc_host
*host
= card
->host
;
2299 err
= mmc_hw_reset(host
);
2302 else if (err
== -EOPNOTSUPP
)
2303 return RESULT_UNSUP_HOST
;
2308 struct mmc_test_req
{
2309 struct mmc_request mrq
;
2310 struct mmc_command sbc
;
2311 struct mmc_command cmd
;
2312 struct mmc_command stop
;
2313 struct mmc_command status
;
2314 struct mmc_data data
;
2317 static struct mmc_test_req
*mmc_test_req_alloc(void)
2319 struct mmc_test_req
*rq
= kzalloc(sizeof(*rq
), GFP_KERNEL
);
2322 rq
->mrq
.cmd
= &rq
->cmd
;
2323 rq
->mrq
.data
= &rq
->data
;
2324 rq
->mrq
.stop
= &rq
->stop
;
2330 static int mmc_test_send_status(struct mmc_test_card
*test
,
2331 struct mmc_command
*cmd
)
2333 memset(cmd
, 0, sizeof(*cmd
));
2335 cmd
->opcode
= MMC_SEND_STATUS
;
2336 if (!mmc_host_is_spi(test
->card
->host
))
2337 cmd
->arg
= test
->card
->rca
<< 16;
2338 cmd
->flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
2340 return mmc_wait_for_cmd(test
->card
->host
, cmd
, 0);
2343 static int mmc_test_ongoing_transfer(struct mmc_test_card
*test
,
2344 unsigned int dev_addr
, int use_sbc
,
2345 int repeat_cmd
, int write
, int use_areq
)
2347 struct mmc_test_req
*rq
= mmc_test_req_alloc();
2348 struct mmc_host
*host
= test
->card
->host
;
2349 struct mmc_test_area
*t
= &test
->area
;
2350 struct mmc_async_req areq
;
2351 struct mmc_request
*mrq
;
2352 unsigned long timeout
;
2353 bool expired
= false;
2354 int ret
= 0, cmd_ret
;
2363 mrq
->sbc
= &rq
->sbc
;
2364 mrq
->cap_cmd_during_tfr
= true;
2367 areq
.err_check
= mmc_test_check_result_async
;
2369 mmc_test_prepare_mrq(test
, mrq
, t
->sg
, t
->sg_len
, dev_addr
, t
->blocks
,
2372 if (use_sbc
&& t
->blocks
> 1 && !mrq
->sbc
) {
2373 ret
= mmc_host_cmd23(host
) ?
2379 /* Start ongoing data request */
2381 mmc_start_req(host
, &areq
, &ret
);
2385 mmc_wait_for_req(host
, mrq
);
2388 timeout
= jiffies
+ msecs_to_jiffies(3000);
2392 /* Send status command while data transfer in progress */
2393 cmd_ret
= mmc_test_send_status(test
, &rq
->status
);
2397 status
= rq
->status
.resp
[0];
2398 if (status
& R1_ERROR
) {
2403 if (mmc_is_req_done(host
, mrq
))
2406 expired
= time_after(jiffies
, timeout
);
2408 pr_info("%s: timeout waiting for Tran state status %#x\n",
2409 mmc_hostname(host
), status
);
2410 cmd_ret
= -ETIMEDOUT
;
2413 } while (repeat_cmd
&& R1_CURRENT_STATE(status
) != R1_STATE_TRAN
);
2415 /* Wait for data request to complete */
2417 mmc_start_req(host
, NULL
, &ret
);
2419 mmc_wait_for_req_done(test
->card
->host
, mrq
);
2422 * For cap_cmd_during_tfr request, upper layer must send stop if
2425 if (mrq
->data
->stop
&& (mrq
->data
->error
|| !mrq
->sbc
)) {
2427 mmc_wait_for_cmd(host
, mrq
->data
->stop
, 0);
2429 ret
= mmc_wait_for_cmd(host
, mrq
->data
->stop
, 0);
2436 pr_info("%s: Send Status failed: status %#x, error %d\n",
2437 mmc_hostname(test
->card
->host
), status
, cmd_ret
);
2440 ret
= mmc_test_check_result(test
, mrq
);
2444 ret
= mmc_test_wait_busy(test
);
2448 if (repeat_cmd
&& (t
->blocks
+ 1) << 9 > t
->max_tfr
)
2449 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2450 mmc_hostname(test
->card
->host
), count
, t
->blocks
);
2460 static int __mmc_test_cmds_during_tfr(struct mmc_test_card
*test
,
2461 unsigned long sz
, int use_sbc
, int write
,
2464 struct mmc_test_area
*t
= &test
->area
;
2467 if (!(test
->card
->host
->caps
& MMC_CAP_CMD_DURING_TFR
))
2468 return RESULT_UNSUP_HOST
;
2470 ret
= mmc_test_area_map(test
, sz
, 0, 0);
2474 ret
= mmc_test_ongoing_transfer(test
, t
->dev_addr
, use_sbc
, 0, write
,
2479 return mmc_test_ongoing_transfer(test
, t
->dev_addr
, use_sbc
, 1, write
,
2483 static int mmc_test_cmds_during_tfr(struct mmc_test_card
*test
, int use_sbc
,
2484 int write
, int use_areq
)
2486 struct mmc_test_area
*t
= &test
->area
;
2490 for (sz
= 512; sz
<= t
->max_tfr
; sz
+= 512) {
2491 ret
= __mmc_test_cmds_during_tfr(test
, sz
, use_sbc
, write
,
2500 * Commands during read - no Set Block Count (CMD23).
2502 static int mmc_test_cmds_during_read(struct mmc_test_card
*test
)
2504 return mmc_test_cmds_during_tfr(test
, 0, 0, 0);
2508 * Commands during write - no Set Block Count (CMD23).
2510 static int mmc_test_cmds_during_write(struct mmc_test_card
*test
)
2512 return mmc_test_cmds_during_tfr(test
, 0, 1, 0);
2516 * Commands during read - use Set Block Count (CMD23).
2518 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card
*test
)
2520 return mmc_test_cmds_during_tfr(test
, 1, 0, 0);
2524 * Commands during write - use Set Block Count (CMD23).
2526 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card
*test
)
2528 return mmc_test_cmds_during_tfr(test
, 1, 1, 0);
2532 * Commands during non-blocking read - use Set Block Count (CMD23).
2534 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card
*test
)
2536 return mmc_test_cmds_during_tfr(test
, 1, 0, 1);
2540 * Commands during non-blocking write - use Set Block Count (CMD23).
2542 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card
*test
)
2544 return mmc_test_cmds_during_tfr(test
, 1, 1, 1);
2547 static const struct mmc_test_case mmc_test_cases
[] = {
2549 .name
= "Basic write (no data verification)",
2550 .run
= mmc_test_basic_write
,
2554 .name
= "Basic read (no data verification)",
2555 .run
= mmc_test_basic_read
,
2559 .name
= "Basic write (with data verification)",
2560 .prepare
= mmc_test_prepare_write
,
2561 .run
= mmc_test_verify_write
,
2562 .cleanup
= mmc_test_cleanup
,
2566 .name
= "Basic read (with data verification)",
2567 .prepare
= mmc_test_prepare_read
,
2568 .run
= mmc_test_verify_read
,
2569 .cleanup
= mmc_test_cleanup
,
2573 .name
= "Multi-block write",
2574 .prepare
= mmc_test_prepare_write
,
2575 .run
= mmc_test_multi_write
,
2576 .cleanup
= mmc_test_cleanup
,
2580 .name
= "Multi-block read",
2581 .prepare
= mmc_test_prepare_read
,
2582 .run
= mmc_test_multi_read
,
2583 .cleanup
= mmc_test_cleanup
,
2587 .name
= "Power of two block writes",
2588 .prepare
= mmc_test_prepare_write
,
2589 .run
= mmc_test_pow2_write
,
2590 .cleanup
= mmc_test_cleanup
,
2594 .name
= "Power of two block reads",
2595 .prepare
= mmc_test_prepare_read
,
2596 .run
= mmc_test_pow2_read
,
2597 .cleanup
= mmc_test_cleanup
,
2601 .name
= "Weird sized block writes",
2602 .prepare
= mmc_test_prepare_write
,
2603 .run
= mmc_test_weird_write
,
2604 .cleanup
= mmc_test_cleanup
,
2608 .name
= "Weird sized block reads",
2609 .prepare
= mmc_test_prepare_read
,
2610 .run
= mmc_test_weird_read
,
2611 .cleanup
= mmc_test_cleanup
,
2615 .name
= "Badly aligned write",
2616 .prepare
= mmc_test_prepare_write
,
2617 .run
= mmc_test_align_write
,
2618 .cleanup
= mmc_test_cleanup
,
2622 .name
= "Badly aligned read",
2623 .prepare
= mmc_test_prepare_read
,
2624 .run
= mmc_test_align_read
,
2625 .cleanup
= mmc_test_cleanup
,
2629 .name
= "Badly aligned multi-block write",
2630 .prepare
= mmc_test_prepare_write
,
2631 .run
= mmc_test_align_multi_write
,
2632 .cleanup
= mmc_test_cleanup
,
2636 .name
= "Badly aligned multi-block read",
2637 .prepare
= mmc_test_prepare_read
,
2638 .run
= mmc_test_align_multi_read
,
2639 .cleanup
= mmc_test_cleanup
,
2643 .name
= "Correct xfer_size at write (start failure)",
2644 .run
= mmc_test_xfersize_write
,
2648 .name
= "Correct xfer_size at read (start failure)",
2649 .run
= mmc_test_xfersize_read
,
2653 .name
= "Correct xfer_size at write (midway failure)",
2654 .run
= mmc_test_multi_xfersize_write
,
2658 .name
= "Correct xfer_size at read (midway failure)",
2659 .run
= mmc_test_multi_xfersize_read
,
2662 #ifdef CONFIG_HIGHMEM
2665 .name
= "Highmem write",
2666 .prepare
= mmc_test_prepare_write
,
2667 .run
= mmc_test_write_high
,
2668 .cleanup
= mmc_test_cleanup
,
2672 .name
= "Highmem read",
2673 .prepare
= mmc_test_prepare_read
,
2674 .run
= mmc_test_read_high
,
2675 .cleanup
= mmc_test_cleanup
,
2679 .name
= "Multi-block highmem write",
2680 .prepare
= mmc_test_prepare_write
,
2681 .run
= mmc_test_multi_write_high
,
2682 .cleanup
= mmc_test_cleanup
,
2686 .name
= "Multi-block highmem read",
2687 .prepare
= mmc_test_prepare_read
,
2688 .run
= mmc_test_multi_read_high
,
2689 .cleanup
= mmc_test_cleanup
,
2695 .name
= "Highmem write",
2696 .run
= mmc_test_no_highmem
,
2700 .name
= "Highmem read",
2701 .run
= mmc_test_no_highmem
,
2705 .name
= "Multi-block highmem write",
2706 .run
= mmc_test_no_highmem
,
2710 .name
= "Multi-block highmem read",
2711 .run
= mmc_test_no_highmem
,
2714 #endif /* CONFIG_HIGHMEM */
2717 .name
= "Best-case read performance",
2718 .prepare
= mmc_test_area_prepare_fill
,
2719 .run
= mmc_test_best_read_performance
,
2720 .cleanup
= mmc_test_area_cleanup
,
2724 .name
= "Best-case write performance",
2725 .prepare
= mmc_test_area_prepare_erase
,
2726 .run
= mmc_test_best_write_performance
,
2727 .cleanup
= mmc_test_area_cleanup
,
2731 .name
= "Best-case read performance into scattered pages",
2732 .prepare
= mmc_test_area_prepare_fill
,
2733 .run
= mmc_test_best_read_perf_max_scatter
,
2734 .cleanup
= mmc_test_area_cleanup
,
2738 .name
= "Best-case write performance from scattered pages",
2739 .prepare
= mmc_test_area_prepare_erase
,
2740 .run
= mmc_test_best_write_perf_max_scatter
,
2741 .cleanup
= mmc_test_area_cleanup
,
2745 .name
= "Single read performance by transfer size",
2746 .prepare
= mmc_test_area_prepare_fill
,
2747 .run
= mmc_test_profile_read_perf
,
2748 .cleanup
= mmc_test_area_cleanup
,
2752 .name
= "Single write performance by transfer size",
2753 .prepare
= mmc_test_area_prepare
,
2754 .run
= mmc_test_profile_write_perf
,
2755 .cleanup
= mmc_test_area_cleanup
,
2759 .name
= "Single trim performance by transfer size",
2760 .prepare
= mmc_test_area_prepare_fill
,
2761 .run
= mmc_test_profile_trim_perf
,
2762 .cleanup
= mmc_test_area_cleanup
,
2766 .name
= "Consecutive read performance by transfer size",
2767 .prepare
= mmc_test_area_prepare_fill
,
2768 .run
= mmc_test_profile_seq_read_perf
,
2769 .cleanup
= mmc_test_area_cleanup
,
2773 .name
= "Consecutive write performance by transfer size",
2774 .prepare
= mmc_test_area_prepare
,
2775 .run
= mmc_test_profile_seq_write_perf
,
2776 .cleanup
= mmc_test_area_cleanup
,
2780 .name
= "Consecutive trim performance by transfer size",
2781 .prepare
= mmc_test_area_prepare
,
2782 .run
= mmc_test_profile_seq_trim_perf
,
2783 .cleanup
= mmc_test_area_cleanup
,
2787 .name
= "Random read performance by transfer size",
2788 .prepare
= mmc_test_area_prepare
,
2789 .run
= mmc_test_random_read_perf
,
2790 .cleanup
= mmc_test_area_cleanup
,
2794 .name
= "Random write performance by transfer size",
2795 .prepare
= mmc_test_area_prepare
,
2796 .run
= mmc_test_random_write_perf
,
2797 .cleanup
= mmc_test_area_cleanup
,
2801 .name
= "Large sequential read into scattered pages",
2802 .prepare
= mmc_test_area_prepare
,
2803 .run
= mmc_test_large_seq_read_perf
,
2804 .cleanup
= mmc_test_area_cleanup
,
2808 .name
= "Large sequential write from scattered pages",
2809 .prepare
= mmc_test_area_prepare
,
2810 .run
= mmc_test_large_seq_write_perf
,
2811 .cleanup
= mmc_test_area_cleanup
,
2815 .name
= "Write performance with blocking req 4k to 4MB",
2816 .prepare
= mmc_test_area_prepare
,
2817 .run
= mmc_test_profile_mult_write_blocking_perf
,
2818 .cleanup
= mmc_test_area_cleanup
,
2822 .name
= "Write performance with non-blocking req 4k to 4MB",
2823 .prepare
= mmc_test_area_prepare
,
2824 .run
= mmc_test_profile_mult_write_nonblock_perf
,
2825 .cleanup
= mmc_test_area_cleanup
,
2829 .name
= "Read performance with blocking req 4k to 4MB",
2830 .prepare
= mmc_test_area_prepare
,
2831 .run
= mmc_test_profile_mult_read_blocking_perf
,
2832 .cleanup
= mmc_test_area_cleanup
,
2836 .name
= "Read performance with non-blocking req 4k to 4MB",
2837 .prepare
= mmc_test_area_prepare
,
2838 .run
= mmc_test_profile_mult_read_nonblock_perf
,
2839 .cleanup
= mmc_test_area_cleanup
,
2843 .name
= "Write performance blocking req 1 to 512 sg elems",
2844 .prepare
= mmc_test_area_prepare
,
2845 .run
= mmc_test_profile_sglen_wr_blocking_perf
,
2846 .cleanup
= mmc_test_area_cleanup
,
2850 .name
= "Write performance non-blocking req 1 to 512 sg elems",
2851 .prepare
= mmc_test_area_prepare
,
2852 .run
= mmc_test_profile_sglen_wr_nonblock_perf
,
2853 .cleanup
= mmc_test_area_cleanup
,
2857 .name
= "Read performance blocking req 1 to 512 sg elems",
2858 .prepare
= mmc_test_area_prepare
,
2859 .run
= mmc_test_profile_sglen_r_blocking_perf
,
2860 .cleanup
= mmc_test_area_cleanup
,
2864 .name
= "Read performance non-blocking req 1 to 512 sg elems",
2865 .prepare
= mmc_test_area_prepare
,
2866 .run
= mmc_test_profile_sglen_r_nonblock_perf
,
2867 .cleanup
= mmc_test_area_cleanup
,
2871 .name
= "Reset test",
2872 .run
= mmc_test_reset
,
2876 .name
= "Commands during read - no Set Block Count (CMD23)",
2877 .prepare
= mmc_test_area_prepare
,
2878 .run
= mmc_test_cmds_during_read
,
2879 .cleanup
= mmc_test_area_cleanup
,
2883 .name
= "Commands during write - no Set Block Count (CMD23)",
2884 .prepare
= mmc_test_area_prepare
,
2885 .run
= mmc_test_cmds_during_write
,
2886 .cleanup
= mmc_test_area_cleanup
,
2890 .name
= "Commands during read - use Set Block Count (CMD23)",
2891 .prepare
= mmc_test_area_prepare
,
2892 .run
= mmc_test_cmds_during_read_cmd23
,
2893 .cleanup
= mmc_test_area_cleanup
,
2897 .name
= "Commands during write - use Set Block Count (CMD23)",
2898 .prepare
= mmc_test_area_prepare
,
2899 .run
= mmc_test_cmds_during_write_cmd23
,
2900 .cleanup
= mmc_test_area_cleanup
,
2904 .name
= "Commands during non-blocking read - use Set Block Count (CMD23)",
2905 .prepare
= mmc_test_area_prepare
,
2906 .run
= mmc_test_cmds_during_read_cmd23_nonblock
,
2907 .cleanup
= mmc_test_area_cleanup
,
2911 .name
= "Commands during non-blocking write - use Set Block Count (CMD23)",
2912 .prepare
= mmc_test_area_prepare
,
2913 .run
= mmc_test_cmds_during_write_cmd23_nonblock
,
2914 .cleanup
= mmc_test_area_cleanup
,
2918 static DEFINE_MUTEX(mmc_test_lock
);
2920 static LIST_HEAD(mmc_test_result
);
2922 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
2926 pr_info("%s: Starting tests of card %s...\n",
2927 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
2929 mmc_claim_host(test
->card
->host
);
2931 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
2932 struct mmc_test_general_result
*gr
;
2934 if (testcase
&& ((i
+ 1) != testcase
))
2937 pr_info("%s: Test case %d. %s...\n",
2938 mmc_hostname(test
->card
->host
), i
+ 1,
2939 mmc_test_cases
[i
].name
);
2941 if (mmc_test_cases
[i
].prepare
) {
2942 ret
= mmc_test_cases
[i
].prepare(test
);
2944 pr_info("%s: Result: Prepare "
2945 "stage failed! (%d)\n",
2946 mmc_hostname(test
->card
->host
),
2952 gr
= kzalloc(sizeof(struct mmc_test_general_result
),
2955 INIT_LIST_HEAD(&gr
->tr_lst
);
2957 /* Assign data what we know already */
2958 gr
->card
= test
->card
;
2961 /* Append container to global one */
2962 list_add_tail(&gr
->link
, &mmc_test_result
);
2965 * Save the pointer to created container in our private
2971 ret
= mmc_test_cases
[i
].run(test
);
2974 pr_info("%s: Result: OK\n",
2975 mmc_hostname(test
->card
->host
));
2978 pr_info("%s: Result: FAILED\n",
2979 mmc_hostname(test
->card
->host
));
2981 case RESULT_UNSUP_HOST
:
2982 pr_info("%s: Result: UNSUPPORTED "
2984 mmc_hostname(test
->card
->host
));
2986 case RESULT_UNSUP_CARD
:
2987 pr_info("%s: Result: UNSUPPORTED "
2989 mmc_hostname(test
->card
->host
));
2992 pr_info("%s: Result: ERROR (%d)\n",
2993 mmc_hostname(test
->card
->host
), ret
);
2996 /* Save the result */
3000 if (mmc_test_cases
[i
].cleanup
) {
3001 ret
= mmc_test_cases
[i
].cleanup(test
);
3003 pr_info("%s: Warning: Cleanup "
3004 "stage failed! (%d)\n",
3005 mmc_hostname(test
->card
->host
),
3011 mmc_release_host(test
->card
->host
);
3013 pr_info("%s: Tests completed.\n",
3014 mmc_hostname(test
->card
->host
));
3017 static void mmc_test_free_result(struct mmc_card
*card
)
3019 struct mmc_test_general_result
*gr
, *grs
;
3021 mutex_lock(&mmc_test_lock
);
3023 list_for_each_entry_safe(gr
, grs
, &mmc_test_result
, link
) {
3024 struct mmc_test_transfer_result
*tr
, *trs
;
3026 if (card
&& gr
->card
!= card
)
3029 list_for_each_entry_safe(tr
, trs
, &gr
->tr_lst
, link
) {
3030 list_del(&tr
->link
);
3034 list_del(&gr
->link
);
3038 mutex_unlock(&mmc_test_lock
);
3041 static LIST_HEAD(mmc_test_file_test
);
3043 static int mtf_test_show(struct seq_file
*sf
, void *data
)
3045 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
3046 struct mmc_test_general_result
*gr
;
3048 mutex_lock(&mmc_test_lock
);
3050 list_for_each_entry(gr
, &mmc_test_result
, link
) {
3051 struct mmc_test_transfer_result
*tr
;
3053 if (gr
->card
!= card
)
3056 seq_printf(sf
, "Test %d: %d\n", gr
->testcase
+ 1, gr
->result
);
3058 list_for_each_entry(tr
, &gr
->tr_lst
, link
) {
3059 seq_printf(sf
, "%u %d %lu.%09lu %u %u.%02u\n",
3060 tr
->count
, tr
->sectors
,
3061 (unsigned long)tr
->ts
.tv_sec
,
3062 (unsigned long)tr
->ts
.tv_nsec
,
3063 tr
->rate
, tr
->iops
/ 100, tr
->iops
% 100);
3067 mutex_unlock(&mmc_test_lock
);
3072 static int mtf_test_open(struct inode
*inode
, struct file
*file
)
3074 return single_open(file
, mtf_test_show
, inode
->i_private
);
3077 static ssize_t
mtf_test_write(struct file
*file
, const char __user
*buf
,
3078 size_t count
, loff_t
*pos
)
3080 struct seq_file
*sf
= (struct seq_file
*)file
->private_data
;
3081 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
3082 struct mmc_test_card
*test
;
3086 ret
= kstrtol_from_user(buf
, count
, 10, &testcase
);
3090 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
3095 * Remove all test cases associated with given card. Thus we have only
3096 * actual data of the last run.
3098 mmc_test_free_result(card
);
3102 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
3103 #ifdef CONFIG_HIGHMEM
3104 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
3107 #ifdef CONFIG_HIGHMEM
3108 if (test
->buffer
&& test
->highmem
) {
3112 mutex_lock(&mmc_test_lock
);
3113 mmc_test_run(test
, testcase
);
3114 mutex_unlock(&mmc_test_lock
);
3117 #ifdef CONFIG_HIGHMEM
3118 __free_pages(test
->highmem
, BUFFER_ORDER
);
3120 kfree(test
->buffer
);
3126 static const struct file_operations mmc_test_fops_test
= {
3127 .open
= mtf_test_open
,
3129 .write
= mtf_test_write
,
3130 .llseek
= seq_lseek
,
3131 .release
= single_release
,
3134 static int mtf_testlist_show(struct seq_file
*sf
, void *data
)
3138 mutex_lock(&mmc_test_lock
);
3140 seq_printf(sf
, "0:\tRun all tests\n");
3141 for (i
= 0; i
< ARRAY_SIZE(mmc_test_cases
); i
++)
3142 seq_printf(sf
, "%d:\t%s\n", i
+1, mmc_test_cases
[i
].name
);
3144 mutex_unlock(&mmc_test_lock
);
3149 static int mtf_testlist_open(struct inode
*inode
, struct file
*file
)
3151 return single_open(file
, mtf_testlist_show
, inode
->i_private
);
3154 static const struct file_operations mmc_test_fops_testlist
= {
3155 .open
= mtf_testlist_open
,
3157 .llseek
= seq_lseek
,
3158 .release
= single_release
,
3161 static void mmc_test_free_dbgfs_file(struct mmc_card
*card
)
3163 struct mmc_test_dbgfs_file
*df
, *dfs
;
3165 mutex_lock(&mmc_test_lock
);
3167 list_for_each_entry_safe(df
, dfs
, &mmc_test_file_test
, link
) {
3168 if (card
&& df
->card
!= card
)
3170 debugfs_remove(df
->file
);
3171 list_del(&df
->link
);
3175 mutex_unlock(&mmc_test_lock
);
3178 static int __mmc_test_register_dbgfs_file(struct mmc_card
*card
,
3179 const char *name
, umode_t mode
, const struct file_operations
*fops
)
3181 struct dentry
*file
= NULL
;
3182 struct mmc_test_dbgfs_file
*df
;
3184 if (card
->debugfs_root
)
3185 file
= debugfs_create_file(name
, mode
, card
->debugfs_root
,
3188 if (IS_ERR_OR_NULL(file
)) {
3190 "Can't create %s. Perhaps debugfs is disabled.\n",
3195 df
= kmalloc(sizeof(struct mmc_test_dbgfs_file
), GFP_KERNEL
);
3197 debugfs_remove(file
);
3199 "Can't allocate memory for internal usage.\n");
3206 list_add(&df
->link
, &mmc_test_file_test
);
3210 static int mmc_test_register_dbgfs_file(struct mmc_card
*card
)
3214 mutex_lock(&mmc_test_lock
);
3216 ret
= __mmc_test_register_dbgfs_file(card
, "test", S_IWUSR
| S_IRUGO
,
3217 &mmc_test_fops_test
);
3221 ret
= __mmc_test_register_dbgfs_file(card
, "testlist", S_IRUGO
,
3222 &mmc_test_fops_testlist
);
3227 mutex_unlock(&mmc_test_lock
);
3232 static int mmc_test_probe(struct mmc_card
*card
)
3236 if (!mmc_card_mmc(card
) && !mmc_card_sd(card
))
3239 ret
= mmc_test_register_dbgfs_file(card
);
3243 dev_info(&card
->dev
, "Card claimed for testing.\n");
3248 static void mmc_test_remove(struct mmc_card
*card
)
3250 mmc_test_free_result(card
);
3251 mmc_test_free_dbgfs_file(card
);
3254 static void mmc_test_shutdown(struct mmc_card
*card
)
3258 static struct mmc_driver mmc_driver
= {
3262 .probe
= mmc_test_probe
,
3263 .remove
= mmc_test_remove
,
3264 .shutdown
= mmc_test_shutdown
,
3267 static int __init
mmc_test_init(void)
3269 return mmc_register_driver(&mmc_driver
);
3272 static void __exit
mmc_test_exit(void)
3274 /* Clear stalled data if card is still plugged */
3275 mmc_test_free_result(NULL
);
3276 mmc_test_free_dbgfs_file(NULL
);
3278 mmc_unregister_driver(&mmc_driver
);
3281 module_init(mmc_test_init
);
3282 module_exit(mmc_test_exit
);
3284 MODULE_LICENSE("GPL");
3285 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3286 MODULE_AUTHOR("Pierre Ossman");