2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages
{
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages
*arr
;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
75 struct mmc_test_area
{
77 unsigned int dev_addr
;
79 unsigned int max_segs
;
80 unsigned int max_seg_sz
;
83 struct mmc_test_mem
*mem
;
84 struct scatterlist
*sg
;
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
96 struct mmc_test_transfer_result
{
97 struct list_head link
;
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
113 struct mmc_test_general_result
{
114 struct list_head link
;
115 struct mmc_card
*card
;
118 struct list_head tr_lst
;
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
127 struct mmc_test_dbgfs_file
{
128 struct list_head link
;
129 struct mmc_card
*card
;
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
142 struct mmc_test_card
{
143 struct mmc_card
*card
;
145 u8 scratch
[BUFFER_SIZE
];
147 #ifdef CONFIG_HIGHMEM
148 struct page
*highmem
;
150 struct mmc_test_area area
;
151 struct mmc_test_general_result
*gr
;
154 enum mmc_test_prep_media
{
155 MMC_TEST_PREP_NONE
= 0,
156 MMC_TEST_PREP_WRITE_FULL
= 1 << 0,
157 MMC_TEST_PREP_ERASE
= 1 << 1,
160 struct mmc_test_multiple_rw
{
161 unsigned int *sg_len
;
166 bool do_nonblock_req
;
167 enum mmc_test_prep_media prepare
;
170 struct mmc_test_async_req
{
171 struct mmc_async_req areq
;
172 struct mmc_test_card
*test
;
175 /*******************************************************************/
176 /* General helper functions */
177 /*******************************************************************/
180 * Configure correct block size in card
182 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
184 return mmc_set_blocklen(test
->card
, size
);
188 * Fill in the mmc_request structure given a set of transfer parameters.
190 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
191 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
192 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
194 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
197 mrq
->cmd
->opcode
= write
?
198 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
200 mrq
->cmd
->opcode
= write
?
201 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
204 mrq
->cmd
->arg
= dev_addr
;
205 if (!mmc_card_blockaddr(test
->card
))
208 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
213 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
215 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
218 mrq
->data
->blksz
= blksz
;
219 mrq
->data
->blocks
= blocks
;
220 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
222 mrq
->data
->sg_len
= sg_len
;
224 mmc_set_data_timeout(mrq
->data
, test
->card
);
227 static int mmc_test_busy(struct mmc_command
*cmd
)
229 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
230 (R1_CURRENT_STATE(cmd
->resp
[0]) == R1_STATE_PRG
);
234 * Wait for the card to finish the busy state
236 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
239 struct mmc_command cmd
= {0};
243 memset(&cmd
, 0, sizeof(struct mmc_command
));
245 cmd
.opcode
= MMC_SEND_STATUS
;
246 cmd
.arg
= test
->card
->rca
<< 16;
247 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
249 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
253 if (!busy
&& mmc_test_busy(&cmd
)) {
255 if (test
->card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
256 pr_info("%s: Warning: Host did not "
257 "wait for busy state to end.\n",
258 mmc_hostname(test
->card
->host
));
260 } while (mmc_test_busy(&cmd
));
266 * Transfer a single sector of kernel addressable data
268 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
269 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
273 struct mmc_request mrq
= {0};
274 struct mmc_command cmd
= {0};
275 struct mmc_command stop
= {0};
276 struct mmc_data data
= {0};
278 struct scatterlist sg
;
284 sg_init_one(&sg
, buffer
, blksz
);
286 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
288 mmc_wait_for_req(test
->card
->host
, &mrq
);
295 ret
= mmc_test_wait_busy(test
);
302 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
307 __free_pages(mem
->arr
[mem
->cnt
].page
,
308 mem
->arr
[mem
->cnt
].order
);
314 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
315 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
316 * not exceed a maximum number of segments and try not to make segments much
317 * bigger than maximum segment size.
319 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
320 unsigned long max_sz
,
321 unsigned int max_segs
,
322 unsigned int max_seg_sz
)
324 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
325 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
326 unsigned long max_seg_page_cnt
= DIV_ROUND_UP(max_seg_sz
, PAGE_SIZE
);
327 unsigned long page_cnt
= 0;
328 unsigned long limit
= nr_free_buffer_pages() >> 4;
329 struct mmc_test_mem
*mem
;
331 if (max_page_cnt
> limit
)
332 max_page_cnt
= limit
;
333 if (min_page_cnt
> max_page_cnt
)
334 min_page_cnt
= max_page_cnt
;
336 if (max_seg_page_cnt
> max_page_cnt
)
337 max_seg_page_cnt
= max_page_cnt
;
339 if (max_segs
> max_page_cnt
)
340 max_segs
= max_page_cnt
;
342 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
346 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_segs
,
351 while (max_page_cnt
) {
354 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
357 order
= get_order(max_seg_page_cnt
<< PAGE_SHIFT
);
359 page
= alloc_pages(flags
, order
);
365 if (page_cnt
< min_page_cnt
)
369 mem
->arr
[mem
->cnt
].page
= page
;
370 mem
->arr
[mem
->cnt
].order
= order
;
372 if (max_page_cnt
<= (1UL << order
))
374 max_page_cnt
-= 1UL << order
;
375 page_cnt
+= 1UL << order
;
376 if (mem
->cnt
>= max_segs
) {
377 if (page_cnt
< min_page_cnt
)
386 mmc_test_free_mem(mem
);
391 * Map memory into a scatterlist. Optionally allow the same memory to be
392 * mapped more than once.
394 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long size
,
395 struct scatterlist
*sglist
, int repeat
,
396 unsigned int max_segs
, unsigned int max_seg_sz
,
397 unsigned int *sg_len
, int min_sg_len
)
399 struct scatterlist
*sg
= NULL
;
401 unsigned long sz
= size
;
403 sg_init_table(sglist
, max_segs
);
404 if (min_sg_len
> max_segs
)
405 min_sg_len
= max_segs
;
409 for (i
= 0; i
< mem
->cnt
; i
++) {
410 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
412 if (min_sg_len
&& (size
/ min_sg_len
< len
))
413 len
= ALIGN(size
/ min_sg_len
, 512);
416 if (len
> max_seg_sz
)
424 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
430 } while (sz
&& repeat
);
442 * Map memory into a scatterlist so that no pages are contiguous. Allow the
443 * same memory to be mapped more than once.
445 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
447 struct scatterlist
*sglist
,
448 unsigned int max_segs
,
449 unsigned int max_seg_sz
,
450 unsigned int *sg_len
)
452 struct scatterlist
*sg
= NULL
;
453 unsigned int i
= mem
->cnt
, cnt
;
455 void *base
, *addr
, *last_addr
= NULL
;
457 sg_init_table(sglist
, max_segs
);
461 base
= page_address(mem
->arr
[--i
].page
);
462 cnt
= 1 << mem
->arr
[i
].order
;
464 addr
= base
+ PAGE_SIZE
* --cnt
;
465 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
469 if (len
> max_seg_sz
)
479 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
494 * Calculate transfer rate in bytes per second.
496 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
506 while (ns
> UINT_MAX
) {
514 do_div(bytes
, (uint32_t)ns
);
520 * Save transfer results for future usage
522 static void mmc_test_save_transfer_result(struct mmc_test_card
*test
,
523 unsigned int count
, unsigned int sectors
, struct timespec ts
,
524 unsigned int rate
, unsigned int iops
)
526 struct mmc_test_transfer_result
*tr
;
531 tr
= kmalloc(sizeof(struct mmc_test_transfer_result
), GFP_KERNEL
);
536 tr
->sectors
= sectors
;
541 list_add_tail(&tr
->link
, &test
->gr
->tr_lst
);
545 * Print the transfer rate.
547 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
548 struct timespec
*ts1
, struct timespec
*ts2
)
550 unsigned int rate
, iops
, sectors
= bytes
>> 9;
553 ts
= timespec_sub(*ts2
, *ts1
);
555 rate
= mmc_test_rate(bytes
, &ts
);
556 iops
= mmc_test_rate(100, &ts
); /* I/O ops per sec x 100 */
558 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
559 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
560 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
561 (sectors
& 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
562 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024,
563 iops
/ 100, iops
% 100);
565 mmc_test_save_transfer_result(test
, 1, sectors
, ts
, rate
, iops
);
569 * Print the average transfer rate.
571 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
572 unsigned int count
, struct timespec
*ts1
,
573 struct timespec
*ts2
)
575 unsigned int rate
, iops
, sectors
= bytes
>> 9;
576 uint64_t tot
= bytes
* count
;
579 ts
= timespec_sub(*ts2
, *ts1
);
581 rate
= mmc_test_rate(tot
, &ts
);
582 iops
= mmc_test_rate(count
* 100, &ts
); /* I/O ops per sec x 100 */
584 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
585 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
586 "%u.%02u IOPS, sg_len %d)\n",
587 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
588 sectors
>> 1, (sectors
& 1 ? ".5" : ""),
589 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
590 rate
/ 1000, rate
/ 1024, iops
/ 100, iops
% 100,
593 mmc_test_save_transfer_result(test
, count
, sectors
, ts
, rate
, iops
);
597 * Return the card size in sectors.
599 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
601 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
602 return card
->ext_csd
.sectors
;
604 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
607 /*******************************************************************/
608 /* Test preparation and cleanup */
609 /*******************************************************************/
612 * Fill the first couple of sectors of the card with known data
613 * so that bad reads/writes can be detected
615 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
619 ret
= mmc_test_set_blksize(test
, 512);
624 memset(test
->buffer
, 0xDF, 512);
626 for (i
= 0;i
< 512;i
++)
630 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
631 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
639 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
641 return __mmc_test_prepare(test
, 1);
644 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
646 return __mmc_test_prepare(test
, 0);
649 static int mmc_test_cleanup(struct mmc_test_card
*test
)
653 ret
= mmc_test_set_blksize(test
, 512);
657 memset(test
->buffer
, 0, 512);
659 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
660 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
668 /*******************************************************************/
669 /* Test execution helpers */
670 /*******************************************************************/
673 * Modifies the mmc_request to perform the "short transfer" tests
675 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
676 struct mmc_request
*mrq
, int write
)
678 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
680 if (mrq
->data
->blocks
> 1) {
681 mrq
->cmd
->opcode
= write
?
682 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
685 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
686 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
691 * Checks that a normal transfer didn't have any errors
693 static int mmc_test_check_result(struct mmc_test_card
*test
,
694 struct mmc_request
*mrq
)
698 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
702 if (!ret
&& mrq
->cmd
->error
)
703 ret
= mrq
->cmd
->error
;
704 if (!ret
&& mrq
->data
->error
)
705 ret
= mrq
->data
->error
;
706 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
707 ret
= mrq
->stop
->error
;
708 if (!ret
&& mrq
->data
->bytes_xfered
!=
709 mrq
->data
->blocks
* mrq
->data
->blksz
)
713 ret
= RESULT_UNSUP_HOST
;
718 static int mmc_test_check_result_async(struct mmc_card
*card
,
719 struct mmc_async_req
*areq
)
721 struct mmc_test_async_req
*test_async
=
722 container_of(areq
, struct mmc_test_async_req
, areq
);
724 mmc_test_wait_busy(test_async
->test
);
726 return mmc_test_check_result(test_async
->test
, areq
->mrq
);
730 * Checks that a "short transfer" behaved as expected
732 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
733 struct mmc_request
*mrq
)
737 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
741 if (!ret
&& mrq
->cmd
->error
)
742 ret
= mrq
->cmd
->error
;
743 if (!ret
&& mrq
->data
->error
== 0)
745 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
746 ret
= mrq
->data
->error
;
747 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
748 ret
= mrq
->stop
->error
;
749 if (mrq
->data
->blocks
> 1) {
750 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
753 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
758 ret
= RESULT_UNSUP_HOST
;
764 * Tests nonblock transfer with certain parameters
766 static void mmc_test_nonblock_reset(struct mmc_request
*mrq
,
767 struct mmc_command
*cmd
,
768 struct mmc_command
*stop
,
769 struct mmc_data
*data
)
771 memset(mrq
, 0, sizeof(struct mmc_request
));
772 memset(cmd
, 0, sizeof(struct mmc_command
));
773 memset(data
, 0, sizeof(struct mmc_data
));
774 memset(stop
, 0, sizeof(struct mmc_command
));
780 static int mmc_test_nonblock_transfer(struct mmc_test_card
*test
,
781 struct scatterlist
*sg
, unsigned sg_len
,
782 unsigned dev_addr
, unsigned blocks
,
783 unsigned blksz
, int write
, int count
)
785 struct mmc_request mrq1
;
786 struct mmc_command cmd1
;
787 struct mmc_command stop1
;
788 struct mmc_data data1
;
790 struct mmc_request mrq2
;
791 struct mmc_command cmd2
;
792 struct mmc_command stop2
;
793 struct mmc_data data2
;
795 struct mmc_test_async_req test_areq
[2];
796 struct mmc_async_req
*done_areq
;
797 struct mmc_async_req
*cur_areq
= &test_areq
[0].areq
;
798 struct mmc_async_req
*other_areq
= &test_areq
[1].areq
;
802 test_areq
[0].test
= test
;
803 test_areq
[1].test
= test
;
805 mmc_test_nonblock_reset(&mrq1
, &cmd1
, &stop1
, &data1
);
806 mmc_test_nonblock_reset(&mrq2
, &cmd2
, &stop2
, &data2
);
808 cur_areq
->mrq
= &mrq1
;
809 cur_areq
->err_check
= mmc_test_check_result_async
;
810 other_areq
->mrq
= &mrq2
;
811 other_areq
->err_check
= mmc_test_check_result_async
;
813 for (i
= 0; i
< count
; i
++) {
814 mmc_test_prepare_mrq(test
, cur_areq
->mrq
, sg
, sg_len
, dev_addr
,
815 blocks
, blksz
, write
);
816 done_areq
= mmc_start_req(test
->card
->host
, cur_areq
, &ret
);
818 if (ret
|| (!done_areq
&& i
> 0))
822 if (done_areq
->mrq
== &mrq2
)
823 mmc_test_nonblock_reset(&mrq2
, &cmd2
,
826 mmc_test_nonblock_reset(&mrq1
, &cmd1
,
829 done_areq
= cur_areq
;
830 cur_areq
= other_areq
;
831 other_areq
= done_areq
;
835 done_areq
= mmc_start_req(test
->card
->host
, NULL
, &ret
);
843 * Tests a basic transfer with certain parameters
845 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
846 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
847 unsigned blocks
, unsigned blksz
, int write
)
849 struct mmc_request mrq
= {0};
850 struct mmc_command cmd
= {0};
851 struct mmc_command stop
= {0};
852 struct mmc_data data
= {0};
858 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
859 blocks
, blksz
, write
);
861 mmc_wait_for_req(test
->card
->host
, &mrq
);
863 mmc_test_wait_busy(test
);
865 return mmc_test_check_result(test
, &mrq
);
869 * Tests a transfer where the card will fail completely or partly
871 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
872 unsigned blocks
, unsigned blksz
, int write
)
874 struct mmc_request mrq
= {0};
875 struct mmc_command cmd
= {0};
876 struct mmc_command stop
= {0};
877 struct mmc_data data
= {0};
879 struct scatterlist sg
;
885 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
887 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
888 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
890 mmc_wait_for_req(test
->card
->host
, &mrq
);
892 mmc_test_wait_busy(test
);
894 return mmc_test_check_broken_result(test
, &mrq
);
898 * Does a complete transfer test where data is also validated
900 * Note: mmc_test_prepare() must have been done before this call
902 static int mmc_test_transfer(struct mmc_test_card
*test
,
903 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
904 unsigned blocks
, unsigned blksz
, int write
)
910 for (i
= 0;i
< blocks
* blksz
;i
++)
911 test
->scratch
[i
] = i
;
913 memset(test
->scratch
, 0, BUFFER_SIZE
);
915 local_irq_save(flags
);
916 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
917 local_irq_restore(flags
);
919 ret
= mmc_test_set_blksize(test
, blksz
);
923 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
924 blocks
, blksz
, write
);
931 ret
= mmc_test_set_blksize(test
, 512);
935 sectors
= (blocks
* blksz
+ 511) / 512;
936 if ((sectors
* 512) == (blocks
* blksz
))
939 if ((sectors
* 512) > BUFFER_SIZE
)
942 memset(test
->buffer
, 0, sectors
* 512);
944 for (i
= 0;i
< sectors
;i
++) {
945 ret
= mmc_test_buffer_transfer(test
,
946 test
->buffer
+ i
* 512,
947 dev_addr
+ i
, 512, 0);
952 for (i
= 0;i
< blocks
* blksz
;i
++) {
953 if (test
->buffer
[i
] != (u8
)i
)
957 for (;i
< sectors
* 512;i
++) {
958 if (test
->buffer
[i
] != 0xDF)
962 local_irq_save(flags
);
963 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
964 local_irq_restore(flags
);
965 for (i
= 0;i
< blocks
* blksz
;i
++) {
966 if (test
->scratch
[i
] != (u8
)i
)
974 /*******************************************************************/
976 /*******************************************************************/
978 struct mmc_test_case
{
981 int (*prepare
)(struct mmc_test_card
*);
982 int (*run
)(struct mmc_test_card
*);
983 int (*cleanup
)(struct mmc_test_card
*);
986 static int mmc_test_basic_write(struct mmc_test_card
*test
)
989 struct scatterlist sg
;
991 ret
= mmc_test_set_blksize(test
, 512);
995 sg_init_one(&sg
, test
->buffer
, 512);
997 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1004 static int mmc_test_basic_read(struct mmc_test_card
*test
)
1007 struct scatterlist sg
;
1009 ret
= mmc_test_set_blksize(test
, 512);
1013 sg_init_one(&sg
, test
->buffer
, 512);
1015 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1022 static int mmc_test_verify_write(struct mmc_test_card
*test
)
1025 struct scatterlist sg
;
1027 sg_init_one(&sg
, test
->buffer
, 512);
1029 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1036 static int mmc_test_verify_read(struct mmc_test_card
*test
)
1039 struct scatterlist sg
;
1041 sg_init_one(&sg
, test
->buffer
, 512);
1043 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1050 static int mmc_test_multi_write(struct mmc_test_card
*test
)
1054 struct scatterlist sg
;
1056 if (test
->card
->host
->max_blk_count
== 1)
1057 return RESULT_UNSUP_HOST
;
1059 size
= PAGE_SIZE
* 2;
1060 size
= min(size
, test
->card
->host
->max_req_size
);
1061 size
= min(size
, test
->card
->host
->max_seg_size
);
1062 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1065 return RESULT_UNSUP_HOST
;
1067 sg_init_one(&sg
, test
->buffer
, size
);
1069 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1076 static int mmc_test_multi_read(struct mmc_test_card
*test
)
1080 struct scatterlist sg
;
1082 if (test
->card
->host
->max_blk_count
== 1)
1083 return RESULT_UNSUP_HOST
;
1085 size
= PAGE_SIZE
* 2;
1086 size
= min(size
, test
->card
->host
->max_req_size
);
1087 size
= min(size
, test
->card
->host
->max_seg_size
);
1088 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1091 return RESULT_UNSUP_HOST
;
1093 sg_init_one(&sg
, test
->buffer
, size
);
1095 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1102 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
1105 struct scatterlist sg
;
1107 if (!test
->card
->csd
.write_partial
)
1108 return RESULT_UNSUP_CARD
;
1110 for (i
= 1; i
< 512;i
<<= 1) {
1111 sg_init_one(&sg
, test
->buffer
, i
);
1112 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1120 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
1123 struct scatterlist sg
;
1125 if (!test
->card
->csd
.read_partial
)
1126 return RESULT_UNSUP_CARD
;
1128 for (i
= 1; i
< 512;i
<<= 1) {
1129 sg_init_one(&sg
, test
->buffer
, i
);
1130 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1138 static int mmc_test_weird_write(struct mmc_test_card
*test
)
1141 struct scatterlist sg
;
1143 if (!test
->card
->csd
.write_partial
)
1144 return RESULT_UNSUP_CARD
;
1146 for (i
= 3; i
< 512;i
+= 7) {
1147 sg_init_one(&sg
, test
->buffer
, i
);
1148 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1156 static int mmc_test_weird_read(struct mmc_test_card
*test
)
1159 struct scatterlist sg
;
1161 if (!test
->card
->csd
.read_partial
)
1162 return RESULT_UNSUP_CARD
;
1164 for (i
= 3; i
< 512;i
+= 7) {
1165 sg_init_one(&sg
, test
->buffer
, i
);
1166 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1174 static int mmc_test_align_write(struct mmc_test_card
*test
)
1177 struct scatterlist sg
;
1179 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1180 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1181 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1189 static int mmc_test_align_read(struct mmc_test_card
*test
)
1192 struct scatterlist sg
;
1194 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1195 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1196 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1204 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
1208 struct scatterlist sg
;
1210 if (test
->card
->host
->max_blk_count
== 1)
1211 return RESULT_UNSUP_HOST
;
1213 size
= PAGE_SIZE
* 2;
1214 size
= min(size
, test
->card
->host
->max_req_size
);
1215 size
= min(size
, test
->card
->host
->max_seg_size
);
1216 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1219 return RESULT_UNSUP_HOST
;
1221 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1222 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1223 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1231 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1235 struct scatterlist sg
;
1237 if (test
->card
->host
->max_blk_count
== 1)
1238 return RESULT_UNSUP_HOST
;
1240 size
= PAGE_SIZE
* 2;
1241 size
= min(size
, test
->card
->host
->max_req_size
);
1242 size
= min(size
, test
->card
->host
->max_seg_size
);
1243 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1246 return RESULT_UNSUP_HOST
;
1248 for (i
= 1; i
< TEST_ALIGN_END
; i
++) {
1249 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1250 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1258 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1262 ret
= mmc_test_set_blksize(test
, 512);
1266 ret
= mmc_test_broken_transfer(test
, 1, 512, 1);
1273 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1277 ret
= mmc_test_set_blksize(test
, 512);
1281 ret
= mmc_test_broken_transfer(test
, 1, 512, 0);
1288 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1292 if (test
->card
->host
->max_blk_count
== 1)
1293 return RESULT_UNSUP_HOST
;
1295 ret
= mmc_test_set_blksize(test
, 512);
1299 ret
= mmc_test_broken_transfer(test
, 2, 512, 1);
1306 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1310 if (test
->card
->host
->max_blk_count
== 1)
1311 return RESULT_UNSUP_HOST
;
1313 ret
= mmc_test_set_blksize(test
, 512);
1317 ret
= mmc_test_broken_transfer(test
, 2, 512, 0);
1324 #ifdef CONFIG_HIGHMEM
1326 static int mmc_test_write_high(struct mmc_test_card
*test
)
1329 struct scatterlist sg
;
1331 sg_init_table(&sg
, 1);
1332 sg_set_page(&sg
, test
->highmem
, 512, 0);
1334 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1341 static int mmc_test_read_high(struct mmc_test_card
*test
)
1344 struct scatterlist sg
;
1346 sg_init_table(&sg
, 1);
1347 sg_set_page(&sg
, test
->highmem
, 512, 0);
1349 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1356 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1360 struct scatterlist sg
;
1362 if (test
->card
->host
->max_blk_count
== 1)
1363 return RESULT_UNSUP_HOST
;
1365 size
= PAGE_SIZE
* 2;
1366 size
= min(size
, test
->card
->host
->max_req_size
);
1367 size
= min(size
, test
->card
->host
->max_seg_size
);
1368 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1371 return RESULT_UNSUP_HOST
;
1373 sg_init_table(&sg
, 1);
1374 sg_set_page(&sg
, test
->highmem
, size
, 0);
1376 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1383 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1387 struct scatterlist sg
;
1389 if (test
->card
->host
->max_blk_count
== 1)
1390 return RESULT_UNSUP_HOST
;
1392 size
= PAGE_SIZE
* 2;
1393 size
= min(size
, test
->card
->host
->max_req_size
);
1394 size
= min(size
, test
->card
->host
->max_seg_size
);
1395 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1398 return RESULT_UNSUP_HOST
;
1400 sg_init_table(&sg
, 1);
1401 sg_set_page(&sg
, test
->highmem
, size
, 0);
1403 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1412 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1414 pr_info("%s: Highmem not configured - test skipped\n",
1415 mmc_hostname(test
->card
->host
));
1419 #endif /* CONFIG_HIGHMEM */
1422 * Map sz bytes so that it can be transferred.
1424 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1425 int max_scatter
, int min_sg_len
)
1427 struct mmc_test_area
*t
= &test
->area
;
1430 t
->blocks
= sz
>> 9;
1433 err
= mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1434 t
->max_segs
, t
->max_seg_sz
,
1437 err
= mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1438 t
->max_seg_sz
, &t
->sg_len
, min_sg_len
);
1441 pr_info("%s: Failed to map sg list\n",
1442 mmc_hostname(test
->card
->host
));
1447 * Transfer bytes mapped by mmc_test_area_map().
1449 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1450 unsigned int dev_addr
, int write
)
1452 struct mmc_test_area
*t
= &test
->area
;
1454 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1455 t
->blocks
, 512, write
);
1459 * Map and transfer bytes for multiple transfers.
1461 static int mmc_test_area_io_seq(struct mmc_test_card
*test
, unsigned long sz
,
1462 unsigned int dev_addr
, int write
,
1463 int max_scatter
, int timed
, int count
,
1464 bool nonblock
, int min_sg_len
)
1466 struct timespec ts1
, ts2
;
1469 struct mmc_test_area
*t
= &test
->area
;
1472 * In the case of a maximally scattered transfer, the maximum transfer
1473 * size is further limited by using PAGE_SIZE segments.
1476 struct mmc_test_area
*t
= &test
->area
;
1477 unsigned long max_tfr
;
1479 if (t
->max_seg_sz
>= PAGE_SIZE
)
1480 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1482 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1487 ret
= mmc_test_area_map(test
, sz
, max_scatter
, min_sg_len
);
1492 getnstimeofday(&ts1
);
1494 ret
= mmc_test_nonblock_transfer(test
, t
->sg
, t
->sg_len
,
1495 dev_addr
, t
->blocks
, 512, write
, count
);
1497 for (i
= 0; i
< count
&& ret
== 0; i
++) {
1498 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1499 dev_addr
+= sz
>> 9;
1506 getnstimeofday(&ts2
);
1509 mmc_test_print_avg_rate(test
, sz
, count
, &ts1
, &ts2
);
1514 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1515 unsigned int dev_addr
, int write
, int max_scatter
,
1518 return mmc_test_area_io_seq(test
, sz
, dev_addr
, write
, max_scatter
,
1519 timed
, 1, false, 0);
1523 * Write the test area entirely.
1525 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1527 struct mmc_test_area
*t
= &test
->area
;
1529 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, 1, 0, 0);
1533 * Erase the test area entirely.
1535 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1537 struct mmc_test_area
*t
= &test
->area
;
1539 if (!mmc_can_erase(test
->card
))
1542 return mmc_erase(test
->card
, t
->dev_addr
, t
->max_sz
>> 9,
1547 * Cleanup struct mmc_test_area.
1549 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1551 struct mmc_test_area
*t
= &test
->area
;
1554 mmc_test_free_mem(t
->mem
);
1560 * Initialize an area for testing large transfers. The test area is set to the
1561 * middle of the card because cards may have different charateristics at the
1562 * front (for FAT file system optimization). Optionally, the area is erased
1563 * (if the card supports it) which may improve write performance. Optionally,
1564 * the area is filled with data for subsequent read tests.
1566 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1568 struct mmc_test_area
*t
= &test
->area
;
1569 unsigned long min_sz
= 64 * 1024, sz
;
1572 ret
= mmc_test_set_blksize(test
, 512);
1576 /* Make the test area size about 4MiB */
1577 sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1579 while (t
->max_sz
< 4 * 1024 * 1024)
1581 while (t
->max_sz
> TEST_AREA_MAX_SIZE
&& t
->max_sz
> sz
)
1584 t
->max_segs
= test
->card
->host
->max_segs
;
1585 t
->max_seg_sz
= test
->card
->host
->max_seg_size
;
1586 t
->max_seg_sz
-= t
->max_seg_sz
% 512;
1588 t
->max_tfr
= t
->max_sz
;
1589 if (t
->max_tfr
>> 9 > test
->card
->host
->max_blk_count
)
1590 t
->max_tfr
= test
->card
->host
->max_blk_count
<< 9;
1591 if (t
->max_tfr
> test
->card
->host
->max_req_size
)
1592 t
->max_tfr
= test
->card
->host
->max_req_size
;
1593 if (t
->max_tfr
/ t
->max_seg_sz
> t
->max_segs
)
1594 t
->max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1597 * Try to allocate enough memory for a max. sized transfer. Less is OK
1598 * because the same memory can be mapped into the scatterlist more than
1599 * once. Also, take into account the limits imposed on scatterlist
1600 * segments by the host driver.
1602 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_tfr
, t
->max_segs
,
1607 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1613 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1614 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1617 ret
= mmc_test_area_erase(test
);
1623 ret
= mmc_test_area_fill(test
);
1631 mmc_test_area_cleanup(test
);
1636 * Prepare for large transfers. Do not erase the test area.
1638 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1640 return mmc_test_area_init(test
, 0, 0);
1644 * Prepare for large transfers. Do erase the test area.
1646 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1648 return mmc_test_area_init(test
, 1, 0);
1652 * Prepare for large transfers. Erase and fill the test area.
1654 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1656 return mmc_test_area_init(test
, 1, 1);
1660 * Test best-case performance. Best-case performance is expected from
1661 * a single large transfer.
1663 * An additional option (max_scatter) allows the measurement of the same
1664 * transfer but with no contiguous pages in the scatter list. This tests
1665 * the efficiency of DMA to handle scattered pages.
1667 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1670 struct mmc_test_area
*t
= &test
->area
;
1672 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, write
,
1677 * Best-case read performance.
1679 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1681 return mmc_test_best_performance(test
, 0, 0);
1685 * Best-case write performance.
1687 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1689 return mmc_test_best_performance(test
, 1, 0);
1693 * Best-case read performance into scattered pages.
1695 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1697 return mmc_test_best_performance(test
, 0, 1);
1701 * Best-case write performance from scattered pages.
1703 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1705 return mmc_test_best_performance(test
, 1, 1);
1709 * Single read performance by transfer size.
1711 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1713 struct mmc_test_area
*t
= &test
->area
;
1715 unsigned int dev_addr
;
1718 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1719 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1720 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1725 dev_addr
= t
->dev_addr
;
1726 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1730 * Single write performance by transfer size.
1732 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1734 struct mmc_test_area
*t
= &test
->area
;
1736 unsigned int dev_addr
;
1739 ret
= mmc_test_area_erase(test
);
1742 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1743 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1744 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1748 ret
= mmc_test_area_erase(test
);
1752 dev_addr
= t
->dev_addr
;
1753 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1757 * Single trim performance by transfer size.
1759 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1761 struct mmc_test_area
*t
= &test
->area
;
1763 unsigned int dev_addr
;
1764 struct timespec ts1
, ts2
;
1767 if (!mmc_can_trim(test
->card
))
1768 return RESULT_UNSUP_CARD
;
1770 if (!mmc_can_erase(test
->card
))
1771 return RESULT_UNSUP_HOST
;
1773 for (sz
= 512; sz
< t
->max_sz
; sz
<<= 1) {
1774 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1775 getnstimeofday(&ts1
);
1776 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1779 getnstimeofday(&ts2
);
1780 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1782 dev_addr
= t
->dev_addr
;
1783 getnstimeofday(&ts1
);
1784 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1787 getnstimeofday(&ts2
);
1788 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1792 static int mmc_test_seq_read_perf(struct mmc_test_card
*test
, unsigned long sz
)
1794 struct mmc_test_area
*t
= &test
->area
;
1795 unsigned int dev_addr
, i
, cnt
;
1796 struct timespec ts1
, ts2
;
1799 cnt
= t
->max_sz
/ sz
;
1800 dev_addr
= t
->dev_addr
;
1801 getnstimeofday(&ts1
);
1802 for (i
= 0; i
< cnt
; i
++) {
1803 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1806 dev_addr
+= (sz
>> 9);
1808 getnstimeofday(&ts2
);
1809 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1814 * Consecutive read performance by transfer size.
1816 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1818 struct mmc_test_area
*t
= &test
->area
;
1822 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1823 ret
= mmc_test_seq_read_perf(test
, sz
);
1828 return mmc_test_seq_read_perf(test
, sz
);
1831 static int mmc_test_seq_write_perf(struct mmc_test_card
*test
, unsigned long sz
)
1833 struct mmc_test_area
*t
= &test
->area
;
1834 unsigned int dev_addr
, i
, cnt
;
1835 struct timespec ts1
, ts2
;
1838 ret
= mmc_test_area_erase(test
);
1841 cnt
= t
->max_sz
/ sz
;
1842 dev_addr
= t
->dev_addr
;
1843 getnstimeofday(&ts1
);
1844 for (i
= 0; i
< cnt
; i
++) {
1845 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1848 dev_addr
+= (sz
>> 9);
1850 getnstimeofday(&ts2
);
1851 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1856 * Consecutive write performance by transfer size.
1858 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1860 struct mmc_test_area
*t
= &test
->area
;
1864 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1865 ret
= mmc_test_seq_write_perf(test
, sz
);
1870 return mmc_test_seq_write_perf(test
, sz
);
1874 * Consecutive trim performance by transfer size.
1876 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1878 struct mmc_test_area
*t
= &test
->area
;
1880 unsigned int dev_addr
, i
, cnt
;
1881 struct timespec ts1
, ts2
;
1884 if (!mmc_can_trim(test
->card
))
1885 return RESULT_UNSUP_CARD
;
1887 if (!mmc_can_erase(test
->card
))
1888 return RESULT_UNSUP_HOST
;
1890 for (sz
= 512; sz
<= t
->max_sz
; sz
<<= 1) {
1891 ret
= mmc_test_area_erase(test
);
1894 ret
= mmc_test_area_fill(test
);
1897 cnt
= t
->max_sz
/ sz
;
1898 dev_addr
= t
->dev_addr
;
1899 getnstimeofday(&ts1
);
1900 for (i
= 0; i
< cnt
; i
++) {
1901 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1905 dev_addr
+= (sz
>> 9);
1907 getnstimeofday(&ts2
);
1908 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1913 static unsigned int rnd_next
= 1;
1915 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt
)
1919 rnd_next
= rnd_next
* 1103515245 + 12345;
1920 r
= (rnd_next
>> 16) & 0x7fff;
1921 return (r
* rnd_cnt
) >> 15;
1924 static int mmc_test_rnd_perf(struct mmc_test_card
*test
, int write
, int print
,
1927 unsigned int dev_addr
, cnt
, rnd_addr
, range1
, range2
, last_ea
= 0, ea
;
1929 struct timespec ts1
, ts2
, ts
;
1934 rnd_addr
= mmc_test_capacity(test
->card
) / 4;
1935 range1
= rnd_addr
/ test
->card
->pref_erase
;
1936 range2
= range1
/ ssz
;
1938 getnstimeofday(&ts1
);
1939 for (cnt
= 0; cnt
< UINT_MAX
; cnt
++) {
1940 getnstimeofday(&ts2
);
1941 ts
= timespec_sub(ts2
, ts1
);
1942 if (ts
.tv_sec
>= 10)
1944 ea
= mmc_test_rnd_num(range1
);
1948 dev_addr
= rnd_addr
+ test
->card
->pref_erase
* ea
+
1949 ssz
* mmc_test_rnd_num(range2
);
1950 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
, 0, 0);
1955 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1959 static int mmc_test_random_perf(struct mmc_test_card
*test
, int write
)
1961 struct mmc_test_area
*t
= &test
->area
;
1966 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1968 * When writing, try to get more consistent results by running
1969 * the test twice with exactly the same I/O but outputting the
1970 * results only for the 2nd run.
1974 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1979 ret
= mmc_test_rnd_perf(test
, write
, 1, sz
);
1986 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1991 return mmc_test_rnd_perf(test
, write
, 1, sz
);
1995 * Random read performance by transfer size.
1997 static int mmc_test_random_read_perf(struct mmc_test_card
*test
)
1999 return mmc_test_random_perf(test
, 0);
2003 * Random write performance by transfer size.
2005 static int mmc_test_random_write_perf(struct mmc_test_card
*test
)
2007 return mmc_test_random_perf(test
, 1);
2010 static int mmc_test_seq_perf(struct mmc_test_card
*test
, int write
,
2011 unsigned int tot_sz
, int max_scatter
)
2013 struct mmc_test_area
*t
= &test
->area
;
2014 unsigned int dev_addr
, i
, cnt
, sz
, ssz
;
2015 struct timespec ts1
, ts2
;
2021 * In the case of a maximally scattered transfer, the maximum transfer
2022 * size is further limited by using PAGE_SIZE segments.
2025 unsigned long max_tfr
;
2027 if (t
->max_seg_sz
>= PAGE_SIZE
)
2028 max_tfr
= t
->max_segs
* PAGE_SIZE
;
2030 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
2036 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2037 if (tot_sz
> dev_addr
<< 9)
2038 tot_sz
= dev_addr
<< 9;
2040 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2042 getnstimeofday(&ts1
);
2043 for (i
= 0; i
< cnt
; i
++) {
2044 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
,
2050 getnstimeofday(&ts2
);
2052 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
2057 static int mmc_test_large_seq_perf(struct mmc_test_card
*test
, int write
)
2061 for (i
= 0; i
< 10; i
++) {
2062 ret
= mmc_test_seq_perf(test
, write
, 10 * 1024 * 1024, 1);
2066 for (i
= 0; i
< 5; i
++) {
2067 ret
= mmc_test_seq_perf(test
, write
, 100 * 1024 * 1024, 1);
2071 for (i
= 0; i
< 3; i
++) {
2072 ret
= mmc_test_seq_perf(test
, write
, 1000 * 1024 * 1024, 1);
2081 * Large sequential read performance.
2083 static int mmc_test_large_seq_read_perf(struct mmc_test_card
*test
)
2085 return mmc_test_large_seq_perf(test
, 0);
2089 * Large sequential write performance.
2091 static int mmc_test_large_seq_write_perf(struct mmc_test_card
*test
)
2093 return mmc_test_large_seq_perf(test
, 1);
2096 static int mmc_test_rw_multiple(struct mmc_test_card
*test
,
2097 struct mmc_test_multiple_rw
*tdata
,
2098 unsigned int reqsize
, unsigned int size
,
2101 unsigned int dev_addr
;
2102 struct mmc_test_area
*t
= &test
->area
;
2105 /* Set up test area */
2106 if (size
> mmc_test_capacity(test
->card
) / 2 * 512)
2107 size
= mmc_test_capacity(test
->card
) / 2 * 512;
2108 if (reqsize
> t
->max_tfr
)
2109 reqsize
= t
->max_tfr
;
2110 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2111 if ((dev_addr
& 0xffff0000))
2112 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2114 dev_addr
&= 0xfffff800; /* Round to 1MiB boundary */
2121 /* prepare test area */
2122 if (mmc_can_erase(test
->card
) &&
2123 tdata
->prepare
& MMC_TEST_PREP_ERASE
) {
2124 ret
= mmc_erase(test
->card
, dev_addr
,
2125 size
/ 512, MMC_SECURE_ERASE_ARG
);
2127 ret
= mmc_erase(test
->card
, dev_addr
,
2128 size
/ 512, MMC_ERASE_ARG
);
2134 ret
= mmc_test_area_io_seq(test
, reqsize
, dev_addr
,
2135 tdata
->do_write
, 0, 1, size
/ reqsize
,
2136 tdata
->do_nonblock_req
, min_sg_len
);
2142 pr_info("[%s] error\n", __func__
);
2146 static int mmc_test_rw_multiple_size(struct mmc_test_card
*test
,
2147 struct mmc_test_multiple_rw
*rw
)
2151 void *pre_req
= test
->card
->host
->ops
->pre_req
;
2152 void *post_req
= test
->card
->host
->ops
->post_req
;
2154 if (rw
->do_nonblock_req
&&
2155 ((!pre_req
&& post_req
) || (pre_req
&& !post_req
))) {
2156 pr_info("error: only one of pre/post is defined\n");
2160 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2161 ret
= mmc_test_rw_multiple(test
, rw
, rw
->bs
[i
], rw
->size
, 0);
2168 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card
*test
,
2169 struct mmc_test_multiple_rw
*rw
)
2174 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2175 ret
= mmc_test_rw_multiple(test
, rw
, 512*1024, rw
->size
,
2184 * Multiple blocking write 4k to 4 MB chunks
2186 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card
*test
)
2188 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2189 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2190 struct mmc_test_multiple_rw test_data
= {
2192 .size
= TEST_AREA_MAX_SIZE
,
2193 .len
= ARRAY_SIZE(bs
),
2195 .do_nonblock_req
= false,
2196 .prepare
= MMC_TEST_PREP_ERASE
,
2199 return mmc_test_rw_multiple_size(test
, &test_data
);
2203 * Multiple non-blocking write 4k to 4 MB chunks
2205 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card
*test
)
2207 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2208 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2209 struct mmc_test_multiple_rw test_data
= {
2211 .size
= TEST_AREA_MAX_SIZE
,
2212 .len
= ARRAY_SIZE(bs
),
2214 .do_nonblock_req
= true,
2215 .prepare
= MMC_TEST_PREP_ERASE
,
2218 return mmc_test_rw_multiple_size(test
, &test_data
);
2222 * Multiple blocking read 4k to 4 MB chunks
2224 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card
*test
)
2226 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2227 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2228 struct mmc_test_multiple_rw test_data
= {
2230 .size
= TEST_AREA_MAX_SIZE
,
2231 .len
= ARRAY_SIZE(bs
),
2233 .do_nonblock_req
= false,
2234 .prepare
= MMC_TEST_PREP_NONE
,
2237 return mmc_test_rw_multiple_size(test
, &test_data
);
2241 * Multiple non-blocking read 4k to 4 MB chunks
2243 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card
*test
)
2245 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2246 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2247 struct mmc_test_multiple_rw test_data
= {
2249 .size
= TEST_AREA_MAX_SIZE
,
2250 .len
= ARRAY_SIZE(bs
),
2252 .do_nonblock_req
= true,
2253 .prepare
= MMC_TEST_PREP_NONE
,
2256 return mmc_test_rw_multiple_size(test
, &test_data
);
2260 * Multiple blocking write 1 to 512 sg elements
2262 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card
*test
)
2264 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2265 1 << 7, 1 << 8, 1 << 9};
2266 struct mmc_test_multiple_rw test_data
= {
2268 .size
= TEST_AREA_MAX_SIZE
,
2269 .len
= ARRAY_SIZE(sg_len
),
2271 .do_nonblock_req
= false,
2272 .prepare
= MMC_TEST_PREP_ERASE
,
2275 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2279 * Multiple non-blocking write 1 to 512 sg elements
2281 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card
*test
)
2283 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2284 1 << 7, 1 << 8, 1 << 9};
2285 struct mmc_test_multiple_rw test_data
= {
2287 .size
= TEST_AREA_MAX_SIZE
,
2288 .len
= ARRAY_SIZE(sg_len
),
2290 .do_nonblock_req
= true,
2291 .prepare
= MMC_TEST_PREP_ERASE
,
2294 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2298 * Multiple blocking read 1 to 512 sg elements
2300 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card
*test
)
2302 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2303 1 << 7, 1 << 8, 1 << 9};
2304 struct mmc_test_multiple_rw test_data
= {
2306 .size
= TEST_AREA_MAX_SIZE
,
2307 .len
= ARRAY_SIZE(sg_len
),
2309 .do_nonblock_req
= false,
2310 .prepare
= MMC_TEST_PREP_NONE
,
2313 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2317 * Multiple non-blocking read 1 to 512 sg elements
2319 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card
*test
)
2321 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2322 1 << 7, 1 << 8, 1 << 9};
2323 struct mmc_test_multiple_rw test_data
= {
2325 .size
= TEST_AREA_MAX_SIZE
,
2326 .len
= ARRAY_SIZE(sg_len
),
2328 .do_nonblock_req
= true,
2329 .prepare
= MMC_TEST_PREP_NONE
,
2332 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2336 * eMMC hardware reset.
2338 static int mmc_test_hw_reset(struct mmc_test_card
*test
)
2340 struct mmc_card
*card
= test
->card
;
2341 struct mmc_host
*host
= card
->host
;
2344 if (!mmc_card_mmc(card
) || !mmc_can_reset(card
))
2345 return RESULT_UNSUP_CARD
;
2347 err
= mmc_hw_reset(host
);
2350 else if (err
== -EOPNOTSUPP
)
2351 return RESULT_UNSUP_HOST
;
2356 static const struct mmc_test_case mmc_test_cases
[] = {
2358 .name
= "Basic write (no data verification)",
2359 .run
= mmc_test_basic_write
,
2363 .name
= "Basic read (no data verification)",
2364 .run
= mmc_test_basic_read
,
2368 .name
= "Basic write (with data verification)",
2369 .prepare
= mmc_test_prepare_write
,
2370 .run
= mmc_test_verify_write
,
2371 .cleanup
= mmc_test_cleanup
,
2375 .name
= "Basic read (with data verification)",
2376 .prepare
= mmc_test_prepare_read
,
2377 .run
= mmc_test_verify_read
,
2378 .cleanup
= mmc_test_cleanup
,
2382 .name
= "Multi-block write",
2383 .prepare
= mmc_test_prepare_write
,
2384 .run
= mmc_test_multi_write
,
2385 .cleanup
= mmc_test_cleanup
,
2389 .name
= "Multi-block read",
2390 .prepare
= mmc_test_prepare_read
,
2391 .run
= mmc_test_multi_read
,
2392 .cleanup
= mmc_test_cleanup
,
2396 .name
= "Power of two block writes",
2397 .prepare
= mmc_test_prepare_write
,
2398 .run
= mmc_test_pow2_write
,
2399 .cleanup
= mmc_test_cleanup
,
2403 .name
= "Power of two block reads",
2404 .prepare
= mmc_test_prepare_read
,
2405 .run
= mmc_test_pow2_read
,
2406 .cleanup
= mmc_test_cleanup
,
2410 .name
= "Weird sized block writes",
2411 .prepare
= mmc_test_prepare_write
,
2412 .run
= mmc_test_weird_write
,
2413 .cleanup
= mmc_test_cleanup
,
2417 .name
= "Weird sized block reads",
2418 .prepare
= mmc_test_prepare_read
,
2419 .run
= mmc_test_weird_read
,
2420 .cleanup
= mmc_test_cleanup
,
2424 .name
= "Badly aligned write",
2425 .prepare
= mmc_test_prepare_write
,
2426 .run
= mmc_test_align_write
,
2427 .cleanup
= mmc_test_cleanup
,
2431 .name
= "Badly aligned read",
2432 .prepare
= mmc_test_prepare_read
,
2433 .run
= mmc_test_align_read
,
2434 .cleanup
= mmc_test_cleanup
,
2438 .name
= "Badly aligned multi-block write",
2439 .prepare
= mmc_test_prepare_write
,
2440 .run
= mmc_test_align_multi_write
,
2441 .cleanup
= mmc_test_cleanup
,
2445 .name
= "Badly aligned multi-block read",
2446 .prepare
= mmc_test_prepare_read
,
2447 .run
= mmc_test_align_multi_read
,
2448 .cleanup
= mmc_test_cleanup
,
2452 .name
= "Correct xfer_size at write (start failure)",
2453 .run
= mmc_test_xfersize_write
,
2457 .name
= "Correct xfer_size at read (start failure)",
2458 .run
= mmc_test_xfersize_read
,
2462 .name
= "Correct xfer_size at write (midway failure)",
2463 .run
= mmc_test_multi_xfersize_write
,
2467 .name
= "Correct xfer_size at read (midway failure)",
2468 .run
= mmc_test_multi_xfersize_read
,
2471 #ifdef CONFIG_HIGHMEM
2474 .name
= "Highmem write",
2475 .prepare
= mmc_test_prepare_write
,
2476 .run
= mmc_test_write_high
,
2477 .cleanup
= mmc_test_cleanup
,
2481 .name
= "Highmem read",
2482 .prepare
= mmc_test_prepare_read
,
2483 .run
= mmc_test_read_high
,
2484 .cleanup
= mmc_test_cleanup
,
2488 .name
= "Multi-block highmem write",
2489 .prepare
= mmc_test_prepare_write
,
2490 .run
= mmc_test_multi_write_high
,
2491 .cleanup
= mmc_test_cleanup
,
2495 .name
= "Multi-block highmem read",
2496 .prepare
= mmc_test_prepare_read
,
2497 .run
= mmc_test_multi_read_high
,
2498 .cleanup
= mmc_test_cleanup
,
2504 .name
= "Highmem write",
2505 .run
= mmc_test_no_highmem
,
2509 .name
= "Highmem read",
2510 .run
= mmc_test_no_highmem
,
2514 .name
= "Multi-block highmem write",
2515 .run
= mmc_test_no_highmem
,
2519 .name
= "Multi-block highmem read",
2520 .run
= mmc_test_no_highmem
,
2523 #endif /* CONFIG_HIGHMEM */
2526 .name
= "Best-case read performance",
2527 .prepare
= mmc_test_area_prepare_fill
,
2528 .run
= mmc_test_best_read_performance
,
2529 .cleanup
= mmc_test_area_cleanup
,
2533 .name
= "Best-case write performance",
2534 .prepare
= mmc_test_area_prepare_erase
,
2535 .run
= mmc_test_best_write_performance
,
2536 .cleanup
= mmc_test_area_cleanup
,
2540 .name
= "Best-case read performance into scattered pages",
2541 .prepare
= mmc_test_area_prepare_fill
,
2542 .run
= mmc_test_best_read_perf_max_scatter
,
2543 .cleanup
= mmc_test_area_cleanup
,
2547 .name
= "Best-case write performance from scattered pages",
2548 .prepare
= mmc_test_area_prepare_erase
,
2549 .run
= mmc_test_best_write_perf_max_scatter
,
2550 .cleanup
= mmc_test_area_cleanup
,
2554 .name
= "Single read performance by transfer size",
2555 .prepare
= mmc_test_area_prepare_fill
,
2556 .run
= mmc_test_profile_read_perf
,
2557 .cleanup
= mmc_test_area_cleanup
,
2561 .name
= "Single write performance by transfer size",
2562 .prepare
= mmc_test_area_prepare
,
2563 .run
= mmc_test_profile_write_perf
,
2564 .cleanup
= mmc_test_area_cleanup
,
2568 .name
= "Single trim performance by transfer size",
2569 .prepare
= mmc_test_area_prepare_fill
,
2570 .run
= mmc_test_profile_trim_perf
,
2571 .cleanup
= mmc_test_area_cleanup
,
2575 .name
= "Consecutive read performance by transfer size",
2576 .prepare
= mmc_test_area_prepare_fill
,
2577 .run
= mmc_test_profile_seq_read_perf
,
2578 .cleanup
= mmc_test_area_cleanup
,
2582 .name
= "Consecutive write performance by transfer size",
2583 .prepare
= mmc_test_area_prepare
,
2584 .run
= mmc_test_profile_seq_write_perf
,
2585 .cleanup
= mmc_test_area_cleanup
,
2589 .name
= "Consecutive trim performance by transfer size",
2590 .prepare
= mmc_test_area_prepare
,
2591 .run
= mmc_test_profile_seq_trim_perf
,
2592 .cleanup
= mmc_test_area_cleanup
,
2596 .name
= "Random read performance by transfer size",
2597 .prepare
= mmc_test_area_prepare
,
2598 .run
= mmc_test_random_read_perf
,
2599 .cleanup
= mmc_test_area_cleanup
,
2603 .name
= "Random write performance by transfer size",
2604 .prepare
= mmc_test_area_prepare
,
2605 .run
= mmc_test_random_write_perf
,
2606 .cleanup
= mmc_test_area_cleanup
,
2610 .name
= "Large sequential read into scattered pages",
2611 .prepare
= mmc_test_area_prepare
,
2612 .run
= mmc_test_large_seq_read_perf
,
2613 .cleanup
= mmc_test_area_cleanup
,
2617 .name
= "Large sequential write from scattered pages",
2618 .prepare
= mmc_test_area_prepare
,
2619 .run
= mmc_test_large_seq_write_perf
,
2620 .cleanup
= mmc_test_area_cleanup
,
2624 .name
= "Write performance with blocking req 4k to 4MB",
2625 .prepare
= mmc_test_area_prepare
,
2626 .run
= mmc_test_profile_mult_write_blocking_perf
,
2627 .cleanup
= mmc_test_area_cleanup
,
2631 .name
= "Write performance with non-blocking req 4k to 4MB",
2632 .prepare
= mmc_test_area_prepare
,
2633 .run
= mmc_test_profile_mult_write_nonblock_perf
,
2634 .cleanup
= mmc_test_area_cleanup
,
2638 .name
= "Read performance with blocking req 4k to 4MB",
2639 .prepare
= mmc_test_area_prepare
,
2640 .run
= mmc_test_profile_mult_read_blocking_perf
,
2641 .cleanup
= mmc_test_area_cleanup
,
2645 .name
= "Read performance with non-blocking req 4k to 4MB",
2646 .prepare
= mmc_test_area_prepare
,
2647 .run
= mmc_test_profile_mult_read_nonblock_perf
,
2648 .cleanup
= mmc_test_area_cleanup
,
2652 .name
= "Write performance blocking req 1 to 512 sg elems",
2653 .prepare
= mmc_test_area_prepare
,
2654 .run
= mmc_test_profile_sglen_wr_blocking_perf
,
2655 .cleanup
= mmc_test_area_cleanup
,
2659 .name
= "Write performance non-blocking req 1 to 512 sg elems",
2660 .prepare
= mmc_test_area_prepare
,
2661 .run
= mmc_test_profile_sglen_wr_nonblock_perf
,
2662 .cleanup
= mmc_test_area_cleanup
,
2666 .name
= "Read performance blocking req 1 to 512 sg elems",
2667 .prepare
= mmc_test_area_prepare
,
2668 .run
= mmc_test_profile_sglen_r_blocking_perf
,
2669 .cleanup
= mmc_test_area_cleanup
,
2673 .name
= "Read performance non-blocking req 1 to 512 sg elems",
2674 .prepare
= mmc_test_area_prepare
,
2675 .run
= mmc_test_profile_sglen_r_nonblock_perf
,
2676 .cleanup
= mmc_test_area_cleanup
,
2680 .name
= "eMMC hardware reset",
2681 .run
= mmc_test_hw_reset
,
2685 static DEFINE_MUTEX(mmc_test_lock
);
2687 static LIST_HEAD(mmc_test_result
);
2689 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
2693 pr_info("%s: Starting tests of card %s...\n",
2694 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
2696 mmc_claim_host(test
->card
->host
);
2698 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
2699 struct mmc_test_general_result
*gr
;
2701 if (testcase
&& ((i
+ 1) != testcase
))
2704 pr_info("%s: Test case %d. %s...\n",
2705 mmc_hostname(test
->card
->host
), i
+ 1,
2706 mmc_test_cases
[i
].name
);
2708 if (mmc_test_cases
[i
].prepare
) {
2709 ret
= mmc_test_cases
[i
].prepare(test
);
2711 pr_info("%s: Result: Prepare "
2712 "stage failed! (%d)\n",
2713 mmc_hostname(test
->card
->host
),
2719 gr
= kzalloc(sizeof(struct mmc_test_general_result
),
2722 INIT_LIST_HEAD(&gr
->tr_lst
);
2724 /* Assign data what we know already */
2725 gr
->card
= test
->card
;
2728 /* Append container to global one */
2729 list_add_tail(&gr
->link
, &mmc_test_result
);
2732 * Save the pointer to created container in our private
2738 ret
= mmc_test_cases
[i
].run(test
);
2741 pr_info("%s: Result: OK\n",
2742 mmc_hostname(test
->card
->host
));
2745 pr_info("%s: Result: FAILED\n",
2746 mmc_hostname(test
->card
->host
));
2748 case RESULT_UNSUP_HOST
:
2749 pr_info("%s: Result: UNSUPPORTED "
2751 mmc_hostname(test
->card
->host
));
2753 case RESULT_UNSUP_CARD
:
2754 pr_info("%s: Result: UNSUPPORTED "
2756 mmc_hostname(test
->card
->host
));
2759 pr_info("%s: Result: ERROR (%d)\n",
2760 mmc_hostname(test
->card
->host
), ret
);
2763 /* Save the result */
2767 if (mmc_test_cases
[i
].cleanup
) {
2768 ret
= mmc_test_cases
[i
].cleanup(test
);
2770 pr_info("%s: Warning: Cleanup "
2771 "stage failed! (%d)\n",
2772 mmc_hostname(test
->card
->host
),
2778 mmc_release_host(test
->card
->host
);
2780 pr_info("%s: Tests completed.\n",
2781 mmc_hostname(test
->card
->host
));
2784 static void mmc_test_free_result(struct mmc_card
*card
)
2786 struct mmc_test_general_result
*gr
, *grs
;
2788 mutex_lock(&mmc_test_lock
);
2790 list_for_each_entry_safe(gr
, grs
, &mmc_test_result
, link
) {
2791 struct mmc_test_transfer_result
*tr
, *trs
;
2793 if (card
&& gr
->card
!= card
)
2796 list_for_each_entry_safe(tr
, trs
, &gr
->tr_lst
, link
) {
2797 list_del(&tr
->link
);
2801 list_del(&gr
->link
);
2805 mutex_unlock(&mmc_test_lock
);
2808 static LIST_HEAD(mmc_test_file_test
);
2810 static int mtf_test_show(struct seq_file
*sf
, void *data
)
2812 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2813 struct mmc_test_general_result
*gr
;
2815 mutex_lock(&mmc_test_lock
);
2817 list_for_each_entry(gr
, &mmc_test_result
, link
) {
2818 struct mmc_test_transfer_result
*tr
;
2820 if (gr
->card
!= card
)
2823 seq_printf(sf
, "Test %d: %d\n", gr
->testcase
+ 1, gr
->result
);
2825 list_for_each_entry(tr
, &gr
->tr_lst
, link
) {
2826 seq_printf(sf
, "%u %d %lu.%09lu %u %u.%02u\n",
2827 tr
->count
, tr
->sectors
,
2828 (unsigned long)tr
->ts
.tv_sec
,
2829 (unsigned long)tr
->ts
.tv_nsec
,
2830 tr
->rate
, tr
->iops
/ 100, tr
->iops
% 100);
2834 mutex_unlock(&mmc_test_lock
);
2839 static int mtf_test_open(struct inode
*inode
, struct file
*file
)
2841 return single_open(file
, mtf_test_show
, inode
->i_private
);
2844 static ssize_t
mtf_test_write(struct file
*file
, const char __user
*buf
,
2845 size_t count
, loff_t
*pos
)
2847 struct seq_file
*sf
= (struct seq_file
*)file
->private_data
;
2848 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2849 struct mmc_test_card
*test
;
2853 ret
= kstrtol_from_user(buf
, count
, 10, &testcase
);
2857 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
2862 * Remove all test cases associated with given card. Thus we have only
2863 * actual data of the last run.
2865 mmc_test_free_result(card
);
2869 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
2870 #ifdef CONFIG_HIGHMEM
2871 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
2874 #ifdef CONFIG_HIGHMEM
2875 if (test
->buffer
&& test
->highmem
) {
2879 mutex_lock(&mmc_test_lock
);
2880 mmc_test_run(test
, testcase
);
2881 mutex_unlock(&mmc_test_lock
);
2884 #ifdef CONFIG_HIGHMEM
2885 __free_pages(test
->highmem
, BUFFER_ORDER
);
2887 kfree(test
->buffer
);
2893 static const struct file_operations mmc_test_fops_test
= {
2894 .open
= mtf_test_open
,
2896 .write
= mtf_test_write
,
2897 .llseek
= seq_lseek
,
2898 .release
= single_release
,
2901 static int mtf_testlist_show(struct seq_file
*sf
, void *data
)
2905 mutex_lock(&mmc_test_lock
);
2907 for (i
= 0; i
< ARRAY_SIZE(mmc_test_cases
); i
++)
2908 seq_printf(sf
, "%d:\t%s\n", i
+1, mmc_test_cases
[i
].name
);
2910 mutex_unlock(&mmc_test_lock
);
2915 static int mtf_testlist_open(struct inode
*inode
, struct file
*file
)
2917 return single_open(file
, mtf_testlist_show
, inode
->i_private
);
2920 static const struct file_operations mmc_test_fops_testlist
= {
2921 .open
= mtf_testlist_open
,
2923 .llseek
= seq_lseek
,
2924 .release
= single_release
,
2927 static void mmc_test_free_dbgfs_file(struct mmc_card
*card
)
2929 struct mmc_test_dbgfs_file
*df
, *dfs
;
2931 mutex_lock(&mmc_test_lock
);
2933 list_for_each_entry_safe(df
, dfs
, &mmc_test_file_test
, link
) {
2934 if (card
&& df
->card
!= card
)
2936 debugfs_remove(df
->file
);
2937 list_del(&df
->link
);
2941 mutex_unlock(&mmc_test_lock
);
2944 static int __mmc_test_register_dbgfs_file(struct mmc_card
*card
,
2945 const char *name
, umode_t mode
, const struct file_operations
*fops
)
2947 struct dentry
*file
= NULL
;
2948 struct mmc_test_dbgfs_file
*df
;
2950 if (card
->debugfs_root
)
2951 file
= debugfs_create_file(name
, mode
, card
->debugfs_root
,
2954 if (IS_ERR_OR_NULL(file
)) {
2956 "Can't create %s. Perhaps debugfs is disabled.\n",
2961 df
= kmalloc(sizeof(struct mmc_test_dbgfs_file
), GFP_KERNEL
);
2963 debugfs_remove(file
);
2965 "Can't allocate memory for internal usage.\n");
2972 list_add(&df
->link
, &mmc_test_file_test
);
2976 static int mmc_test_register_dbgfs_file(struct mmc_card
*card
)
2980 mutex_lock(&mmc_test_lock
);
2982 ret
= __mmc_test_register_dbgfs_file(card
, "test", S_IWUSR
| S_IRUGO
,
2983 &mmc_test_fops_test
);
2987 ret
= __mmc_test_register_dbgfs_file(card
, "testlist", S_IRUGO
,
2988 &mmc_test_fops_testlist
);
2993 mutex_unlock(&mmc_test_lock
);
2998 static int mmc_test_probe(struct mmc_card
*card
)
3002 if (!mmc_card_mmc(card
) && !mmc_card_sd(card
))
3005 ret
= mmc_test_register_dbgfs_file(card
);
3009 dev_info(&card
->dev
, "Card claimed for testing.\n");
3014 static void mmc_test_remove(struct mmc_card
*card
)
3016 mmc_test_free_result(card
);
3017 mmc_test_free_dbgfs_file(card
);
3020 static void mmc_test_shutdown(struct mmc_card
*card
)
3024 static struct mmc_driver mmc_driver
= {
3028 .probe
= mmc_test_probe
,
3029 .remove
= mmc_test_remove
,
3030 .shutdown
= mmc_test_shutdown
,
3033 static int __init
mmc_test_init(void)
3035 return mmc_register_driver(&mmc_driver
);
3038 static void __exit
mmc_test_exit(void)
3040 /* Clear stalled data if card is still plugged */
3041 mmc_test_free_result(NULL
);
3042 mmc_test_free_dbgfs_file(NULL
);
3044 mmc_unregister_driver(&mmc_driver
);
3047 module_init(mmc_test_init
);
3048 module_exit(mmc_test_exit
);
3050 MODULE_LICENSE("GPL");
3051 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3052 MODULE_AUTHOR("Pierre Ossman");