2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
28 #define RESULT_UNSUP_HOST 2
29 #define RESULT_UNSUP_CARD 3
31 #define BUFFER_ORDER 2
32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
38 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
45 struct mmc_test_pages
{
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
56 struct mmc_test_pages
*arr
;
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
72 struct mmc_test_area
{
74 unsigned int dev_addr
;
76 unsigned int max_segs
;
77 unsigned int max_seg_sz
;
80 struct mmc_test_mem
*mem
;
81 struct scatterlist
*sg
;
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
93 struct mmc_test_transfer_result
{
94 struct list_head link
;
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
110 struct mmc_test_general_result
{
111 struct list_head link
;
112 struct mmc_card
*card
;
115 struct list_head tr_lst
;
119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
124 struct mmc_test_dbgfs_file
{
125 struct list_head link
;
126 struct mmc_card
*card
;
131 * struct mmc_test_card - test information.
132 * @card: card under test
133 * @scratch: transfer buffer
134 * @buffer: transfer buffer
135 * @highmem: buffer for highmem tests
136 * @area: information for performance tests
137 * @gr: pointer to results of current testcase
139 struct mmc_test_card
{
140 struct mmc_card
*card
;
142 u8 scratch
[BUFFER_SIZE
];
144 #ifdef CONFIG_HIGHMEM
145 struct page
*highmem
;
147 struct mmc_test_area area
;
148 struct mmc_test_general_result
*gr
;
151 enum mmc_test_prep_media
{
152 MMC_TEST_PREP_NONE
= 0,
153 MMC_TEST_PREP_WRITE_FULL
= 1 << 0,
154 MMC_TEST_PREP_ERASE
= 1 << 1,
157 struct mmc_test_multiple_rw
{
158 unsigned int *sg_len
;
163 bool do_nonblock_req
;
164 enum mmc_test_prep_media prepare
;
167 struct mmc_test_async_req
{
168 struct mmc_async_req areq
;
169 struct mmc_test_card
*test
;
172 /*******************************************************************/
173 /* General helper functions */
174 /*******************************************************************/
177 * Configure correct block size in card
179 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
181 return mmc_set_blocklen(test
->card
, size
);
185 * Fill in the mmc_request structure given a set of transfer parameters.
187 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
188 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
189 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
191 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
194 mrq
->cmd
->opcode
= write
?
195 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
197 mrq
->cmd
->opcode
= write
?
198 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
201 mrq
->cmd
->arg
= dev_addr
;
202 if (!mmc_card_blockaddr(test
->card
))
205 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
210 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
212 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
215 mrq
->data
->blksz
= blksz
;
216 mrq
->data
->blocks
= blocks
;
217 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
219 mrq
->data
->sg_len
= sg_len
;
221 mmc_set_data_timeout(mrq
->data
, test
->card
);
224 static int mmc_test_busy(struct mmc_command
*cmd
)
226 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
227 (R1_CURRENT_STATE(cmd
->resp
[0]) == R1_STATE_PRG
);
231 * Wait for the card to finish the busy state
233 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
236 struct mmc_command cmd
= {0};
240 memset(&cmd
, 0, sizeof(struct mmc_command
));
242 cmd
.opcode
= MMC_SEND_STATUS
;
243 cmd
.arg
= test
->card
->rca
<< 16;
244 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
246 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
250 if (!busy
&& mmc_test_busy(&cmd
)) {
252 if (test
->card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
253 printk(KERN_INFO
"%s: Warning: Host did not "
254 "wait for busy state to end.\n",
255 mmc_hostname(test
->card
->host
));
257 } while (mmc_test_busy(&cmd
));
263 * Transfer a single sector of kernel addressable data
265 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
266 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
270 struct mmc_request mrq
= {0};
271 struct mmc_command cmd
= {0};
272 struct mmc_command stop
= {0};
273 struct mmc_data data
= {0};
275 struct scatterlist sg
;
281 sg_init_one(&sg
, buffer
, blksz
);
283 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
285 mmc_wait_for_req(test
->card
->host
, &mrq
);
292 ret
= mmc_test_wait_busy(test
);
299 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
304 __free_pages(mem
->arr
[mem
->cnt
].page
,
305 mem
->arr
[mem
->cnt
].order
);
311 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
312 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
313 * not exceed a maximum number of segments and try not to make segments much
314 * bigger than maximum segment size.
316 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
317 unsigned long max_sz
,
318 unsigned int max_segs
,
319 unsigned int max_seg_sz
)
321 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
322 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
323 unsigned long max_seg_page_cnt
= DIV_ROUND_UP(max_seg_sz
, PAGE_SIZE
);
324 unsigned long page_cnt
= 0;
325 unsigned long limit
= nr_free_buffer_pages() >> 4;
326 struct mmc_test_mem
*mem
;
328 if (max_page_cnt
> limit
)
329 max_page_cnt
= limit
;
330 if (min_page_cnt
> max_page_cnt
)
331 min_page_cnt
= max_page_cnt
;
333 if (max_seg_page_cnt
> max_page_cnt
)
334 max_seg_page_cnt
= max_page_cnt
;
336 if (max_segs
> max_page_cnt
)
337 max_segs
= max_page_cnt
;
339 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
343 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_segs
,
348 while (max_page_cnt
) {
351 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
354 order
= get_order(max_seg_page_cnt
<< PAGE_SHIFT
);
356 page
= alloc_pages(flags
, order
);
362 if (page_cnt
< min_page_cnt
)
366 mem
->arr
[mem
->cnt
].page
= page
;
367 mem
->arr
[mem
->cnt
].order
= order
;
369 if (max_page_cnt
<= (1UL << order
))
371 max_page_cnt
-= 1UL << order
;
372 page_cnt
+= 1UL << order
;
373 if (mem
->cnt
>= max_segs
) {
374 if (page_cnt
< min_page_cnt
)
383 mmc_test_free_mem(mem
);
388 * Map memory into a scatterlist. Optionally allow the same memory to be
389 * mapped more than once.
391 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long size
,
392 struct scatterlist
*sglist
, int repeat
,
393 unsigned int max_segs
, unsigned int max_seg_sz
,
394 unsigned int *sg_len
, int min_sg_len
)
396 struct scatterlist
*sg
= NULL
;
398 unsigned long sz
= size
;
400 sg_init_table(sglist
, max_segs
);
401 if (min_sg_len
> max_segs
)
402 min_sg_len
= max_segs
;
406 for (i
= 0; i
< mem
->cnt
; i
++) {
407 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
409 if (min_sg_len
&& (size
/ min_sg_len
< len
))
410 len
= ALIGN(size
/ min_sg_len
, 512);
413 if (len
> max_seg_sz
)
421 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
427 } while (sz
&& repeat
);
439 * Map memory into a scatterlist so that no pages are contiguous. Allow the
440 * same memory to be mapped more than once.
442 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
444 struct scatterlist
*sglist
,
445 unsigned int max_segs
,
446 unsigned int max_seg_sz
,
447 unsigned int *sg_len
)
449 struct scatterlist
*sg
= NULL
;
450 unsigned int i
= mem
->cnt
, cnt
;
452 void *base
, *addr
, *last_addr
= NULL
;
454 sg_init_table(sglist
, max_segs
);
458 base
= page_address(mem
->arr
[--i
].page
);
459 cnt
= 1 << mem
->arr
[i
].order
;
461 addr
= base
+ PAGE_SIZE
* --cnt
;
462 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
466 if (len
> max_seg_sz
)
476 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
491 * Calculate transfer rate in bytes per second.
493 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
503 while (ns
> UINT_MAX
) {
511 do_div(bytes
, (uint32_t)ns
);
517 * Save transfer results for future usage
519 static void mmc_test_save_transfer_result(struct mmc_test_card
*test
,
520 unsigned int count
, unsigned int sectors
, struct timespec ts
,
521 unsigned int rate
, unsigned int iops
)
523 struct mmc_test_transfer_result
*tr
;
528 tr
= kmalloc(sizeof(struct mmc_test_transfer_result
), GFP_KERNEL
);
533 tr
->sectors
= sectors
;
538 list_add_tail(&tr
->link
, &test
->gr
->tr_lst
);
542 * Print the transfer rate.
544 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
545 struct timespec
*ts1
, struct timespec
*ts2
)
547 unsigned int rate
, iops
, sectors
= bytes
>> 9;
550 ts
= timespec_sub(*ts2
, *ts1
);
552 rate
= mmc_test_rate(bytes
, &ts
);
553 iops
= mmc_test_rate(100, &ts
); /* I/O ops per sec x 100 */
555 printk(KERN_INFO
"%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
556 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
557 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
558 (sectors
& 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
559 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024,
560 iops
/ 100, iops
% 100);
562 mmc_test_save_transfer_result(test
, 1, sectors
, ts
, rate
, iops
);
566 * Print the average transfer rate.
568 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
569 unsigned int count
, struct timespec
*ts1
,
570 struct timespec
*ts2
)
572 unsigned int rate
, iops
, sectors
= bytes
>> 9;
573 uint64_t tot
= bytes
* count
;
576 ts
= timespec_sub(*ts2
, *ts1
);
578 rate
= mmc_test_rate(tot
, &ts
);
579 iops
= mmc_test_rate(count
* 100, &ts
); /* I/O ops per sec x 100 */
581 printk(KERN_INFO
"%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
582 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
583 "%u.%02u IOPS, sg_len %d)\n",
584 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
585 sectors
>> 1, (sectors
& 1 ? ".5" : ""),
586 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
587 rate
/ 1000, rate
/ 1024, iops
/ 100, iops
% 100,
590 mmc_test_save_transfer_result(test
, count
, sectors
, ts
, rate
, iops
);
594 * Return the card size in sectors.
596 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
598 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
599 return card
->ext_csd
.sectors
;
601 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
604 /*******************************************************************/
605 /* Test preparation and cleanup */
606 /*******************************************************************/
609 * Fill the first couple of sectors of the card with known data
610 * so that bad reads/writes can be detected
612 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
616 ret
= mmc_test_set_blksize(test
, 512);
621 memset(test
->buffer
, 0xDF, 512);
623 for (i
= 0;i
< 512;i
++)
627 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
628 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
636 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
638 return __mmc_test_prepare(test
, 1);
641 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
643 return __mmc_test_prepare(test
, 0);
646 static int mmc_test_cleanup(struct mmc_test_card
*test
)
650 ret
= mmc_test_set_blksize(test
, 512);
654 memset(test
->buffer
, 0, 512);
656 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
657 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
665 /*******************************************************************/
666 /* Test execution helpers */
667 /*******************************************************************/
670 * Modifies the mmc_request to perform the "short transfer" tests
672 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
673 struct mmc_request
*mrq
, int write
)
675 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
677 if (mrq
->data
->blocks
> 1) {
678 mrq
->cmd
->opcode
= write
?
679 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
682 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
683 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
688 * Checks that a normal transfer didn't have any errors
690 static int mmc_test_check_result(struct mmc_test_card
*test
,
691 struct mmc_request
*mrq
)
695 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
699 if (!ret
&& mrq
->cmd
->error
)
700 ret
= mrq
->cmd
->error
;
701 if (!ret
&& mrq
->data
->error
)
702 ret
= mrq
->data
->error
;
703 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
704 ret
= mrq
->stop
->error
;
705 if (!ret
&& mrq
->data
->bytes_xfered
!=
706 mrq
->data
->blocks
* mrq
->data
->blksz
)
710 ret
= RESULT_UNSUP_HOST
;
715 static int mmc_test_check_result_async(struct mmc_card
*card
,
716 struct mmc_async_req
*areq
)
718 struct mmc_test_async_req
*test_async
=
719 container_of(areq
, struct mmc_test_async_req
, areq
);
721 mmc_test_wait_busy(test_async
->test
);
723 return mmc_test_check_result(test_async
->test
, areq
->mrq
);
727 * Checks that a "short transfer" behaved as expected
729 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
730 struct mmc_request
*mrq
)
734 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
738 if (!ret
&& mrq
->cmd
->error
)
739 ret
= mrq
->cmd
->error
;
740 if (!ret
&& mrq
->data
->error
== 0)
742 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
743 ret
= mrq
->data
->error
;
744 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
745 ret
= mrq
->stop
->error
;
746 if (mrq
->data
->blocks
> 1) {
747 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
750 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
755 ret
= RESULT_UNSUP_HOST
;
761 * Tests nonblock transfer with certain parameters
763 static void mmc_test_nonblock_reset(struct mmc_request
*mrq
,
764 struct mmc_command
*cmd
,
765 struct mmc_command
*stop
,
766 struct mmc_data
*data
)
768 memset(mrq
, 0, sizeof(struct mmc_request
));
769 memset(cmd
, 0, sizeof(struct mmc_command
));
770 memset(data
, 0, sizeof(struct mmc_data
));
771 memset(stop
, 0, sizeof(struct mmc_command
));
777 static int mmc_test_nonblock_transfer(struct mmc_test_card
*test
,
778 struct scatterlist
*sg
, unsigned sg_len
,
779 unsigned dev_addr
, unsigned blocks
,
780 unsigned blksz
, int write
, int count
)
782 struct mmc_request mrq1
;
783 struct mmc_command cmd1
;
784 struct mmc_command stop1
;
785 struct mmc_data data1
;
787 struct mmc_request mrq2
;
788 struct mmc_command cmd2
;
789 struct mmc_command stop2
;
790 struct mmc_data data2
;
792 struct mmc_test_async_req test_areq
[2];
793 struct mmc_async_req
*done_areq
;
794 struct mmc_async_req
*cur_areq
= &test_areq
[0].areq
;
795 struct mmc_async_req
*other_areq
= &test_areq
[1].areq
;
799 test_areq
[0].test
= test
;
800 test_areq
[1].test
= test
;
802 mmc_test_nonblock_reset(&mrq1
, &cmd1
, &stop1
, &data1
);
803 mmc_test_nonblock_reset(&mrq2
, &cmd2
, &stop2
, &data2
);
805 cur_areq
->mrq
= &mrq1
;
806 cur_areq
->err_check
= mmc_test_check_result_async
;
807 other_areq
->mrq
= &mrq2
;
808 other_areq
->err_check
= mmc_test_check_result_async
;
810 for (i
= 0; i
< count
; i
++) {
811 mmc_test_prepare_mrq(test
, cur_areq
->mrq
, sg
, sg_len
, dev_addr
,
812 blocks
, blksz
, write
);
813 done_areq
= mmc_start_req(test
->card
->host
, cur_areq
, &ret
);
815 if (ret
|| (!done_areq
&& i
> 0))
819 if (done_areq
->mrq
== &mrq2
)
820 mmc_test_nonblock_reset(&mrq2
, &cmd2
,
823 mmc_test_nonblock_reset(&mrq1
, &cmd1
,
826 done_areq
= cur_areq
;
827 cur_areq
= other_areq
;
828 other_areq
= done_areq
;
832 done_areq
= mmc_start_req(test
->card
->host
, NULL
, &ret
);
840 * Tests a basic transfer with certain parameters
842 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
843 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
844 unsigned blocks
, unsigned blksz
, int write
)
846 struct mmc_request mrq
= {0};
847 struct mmc_command cmd
= {0};
848 struct mmc_command stop
= {0};
849 struct mmc_data data
= {0};
855 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
856 blocks
, blksz
, write
);
858 mmc_wait_for_req(test
->card
->host
, &mrq
);
860 mmc_test_wait_busy(test
);
862 return mmc_test_check_result(test
, &mrq
);
866 * Tests a transfer where the card will fail completely or partly
868 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
869 unsigned blocks
, unsigned blksz
, int write
)
871 struct mmc_request mrq
= {0};
872 struct mmc_command cmd
= {0};
873 struct mmc_command stop
= {0};
874 struct mmc_data data
= {0};
876 struct scatterlist sg
;
882 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
884 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
885 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
887 mmc_wait_for_req(test
->card
->host
, &mrq
);
889 mmc_test_wait_busy(test
);
891 return mmc_test_check_broken_result(test
, &mrq
);
895 * Does a complete transfer test where data is also validated
897 * Note: mmc_test_prepare() must have been done before this call
899 static int mmc_test_transfer(struct mmc_test_card
*test
,
900 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
901 unsigned blocks
, unsigned blksz
, int write
)
907 for (i
= 0;i
< blocks
* blksz
;i
++)
908 test
->scratch
[i
] = i
;
910 memset(test
->scratch
, 0, BUFFER_SIZE
);
912 local_irq_save(flags
);
913 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
914 local_irq_restore(flags
);
916 ret
= mmc_test_set_blksize(test
, blksz
);
920 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
921 blocks
, blksz
, write
);
928 ret
= mmc_test_set_blksize(test
, 512);
932 sectors
= (blocks
* blksz
+ 511) / 512;
933 if ((sectors
* 512) == (blocks
* blksz
))
936 if ((sectors
* 512) > BUFFER_SIZE
)
939 memset(test
->buffer
, 0, sectors
* 512);
941 for (i
= 0;i
< sectors
;i
++) {
942 ret
= mmc_test_buffer_transfer(test
,
943 test
->buffer
+ i
* 512,
944 dev_addr
+ i
, 512, 0);
949 for (i
= 0;i
< blocks
* blksz
;i
++) {
950 if (test
->buffer
[i
] != (u8
)i
)
954 for (;i
< sectors
* 512;i
++) {
955 if (test
->buffer
[i
] != 0xDF)
959 local_irq_save(flags
);
960 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
961 local_irq_restore(flags
);
962 for (i
= 0;i
< blocks
* blksz
;i
++) {
963 if (test
->scratch
[i
] != (u8
)i
)
971 /*******************************************************************/
973 /*******************************************************************/
975 struct mmc_test_case
{
978 int (*prepare
)(struct mmc_test_card
*);
979 int (*run
)(struct mmc_test_card
*);
980 int (*cleanup
)(struct mmc_test_card
*);
983 static int mmc_test_basic_write(struct mmc_test_card
*test
)
986 struct scatterlist sg
;
988 ret
= mmc_test_set_blksize(test
, 512);
992 sg_init_one(&sg
, test
->buffer
, 512);
994 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1001 static int mmc_test_basic_read(struct mmc_test_card
*test
)
1004 struct scatterlist sg
;
1006 ret
= mmc_test_set_blksize(test
, 512);
1010 sg_init_one(&sg
, test
->buffer
, 512);
1012 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1019 static int mmc_test_verify_write(struct mmc_test_card
*test
)
1022 struct scatterlist sg
;
1024 sg_init_one(&sg
, test
->buffer
, 512);
1026 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1033 static int mmc_test_verify_read(struct mmc_test_card
*test
)
1036 struct scatterlist sg
;
1038 sg_init_one(&sg
, test
->buffer
, 512);
1040 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1047 static int mmc_test_multi_write(struct mmc_test_card
*test
)
1051 struct scatterlist sg
;
1053 if (test
->card
->host
->max_blk_count
== 1)
1054 return RESULT_UNSUP_HOST
;
1056 size
= PAGE_SIZE
* 2;
1057 size
= min(size
, test
->card
->host
->max_req_size
);
1058 size
= min(size
, test
->card
->host
->max_seg_size
);
1059 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1062 return RESULT_UNSUP_HOST
;
1064 sg_init_one(&sg
, test
->buffer
, size
);
1066 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1073 static int mmc_test_multi_read(struct mmc_test_card
*test
)
1077 struct scatterlist sg
;
1079 if (test
->card
->host
->max_blk_count
== 1)
1080 return RESULT_UNSUP_HOST
;
1082 size
= PAGE_SIZE
* 2;
1083 size
= min(size
, test
->card
->host
->max_req_size
);
1084 size
= min(size
, test
->card
->host
->max_seg_size
);
1085 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1088 return RESULT_UNSUP_HOST
;
1090 sg_init_one(&sg
, test
->buffer
, size
);
1092 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1099 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
1102 struct scatterlist sg
;
1104 if (!test
->card
->csd
.write_partial
)
1105 return RESULT_UNSUP_CARD
;
1107 for (i
= 1; i
< 512;i
<<= 1) {
1108 sg_init_one(&sg
, test
->buffer
, i
);
1109 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1117 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
1120 struct scatterlist sg
;
1122 if (!test
->card
->csd
.read_partial
)
1123 return RESULT_UNSUP_CARD
;
1125 for (i
= 1; i
< 512;i
<<= 1) {
1126 sg_init_one(&sg
, test
->buffer
, i
);
1127 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1135 static int mmc_test_weird_write(struct mmc_test_card
*test
)
1138 struct scatterlist sg
;
1140 if (!test
->card
->csd
.write_partial
)
1141 return RESULT_UNSUP_CARD
;
1143 for (i
= 3; i
< 512;i
+= 7) {
1144 sg_init_one(&sg
, test
->buffer
, i
);
1145 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1153 static int mmc_test_weird_read(struct mmc_test_card
*test
)
1156 struct scatterlist sg
;
1158 if (!test
->card
->csd
.read_partial
)
1159 return RESULT_UNSUP_CARD
;
1161 for (i
= 3; i
< 512;i
+= 7) {
1162 sg_init_one(&sg
, test
->buffer
, i
);
1163 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1171 static int mmc_test_align_write(struct mmc_test_card
*test
)
1174 struct scatterlist sg
;
1176 for (i
= 1;i
< 4;i
++) {
1177 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1178 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1186 static int mmc_test_align_read(struct mmc_test_card
*test
)
1189 struct scatterlist sg
;
1191 for (i
= 1;i
< 4;i
++) {
1192 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1193 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1201 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
1205 struct scatterlist sg
;
1207 if (test
->card
->host
->max_blk_count
== 1)
1208 return RESULT_UNSUP_HOST
;
1210 size
= PAGE_SIZE
* 2;
1211 size
= min(size
, test
->card
->host
->max_req_size
);
1212 size
= min(size
, test
->card
->host
->max_seg_size
);
1213 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1216 return RESULT_UNSUP_HOST
;
1218 for (i
= 1;i
< 4;i
++) {
1219 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1220 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1228 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1232 struct scatterlist sg
;
1234 if (test
->card
->host
->max_blk_count
== 1)
1235 return RESULT_UNSUP_HOST
;
1237 size
= PAGE_SIZE
* 2;
1238 size
= min(size
, test
->card
->host
->max_req_size
);
1239 size
= min(size
, test
->card
->host
->max_seg_size
);
1240 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1243 return RESULT_UNSUP_HOST
;
1245 for (i
= 1;i
< 4;i
++) {
1246 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1247 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1255 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1259 ret
= mmc_test_set_blksize(test
, 512);
1263 ret
= mmc_test_broken_transfer(test
, 1, 512, 1);
1270 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1274 ret
= mmc_test_set_blksize(test
, 512);
1278 ret
= mmc_test_broken_transfer(test
, 1, 512, 0);
1285 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1289 if (test
->card
->host
->max_blk_count
== 1)
1290 return RESULT_UNSUP_HOST
;
1292 ret
= mmc_test_set_blksize(test
, 512);
1296 ret
= mmc_test_broken_transfer(test
, 2, 512, 1);
1303 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1307 if (test
->card
->host
->max_blk_count
== 1)
1308 return RESULT_UNSUP_HOST
;
1310 ret
= mmc_test_set_blksize(test
, 512);
1314 ret
= mmc_test_broken_transfer(test
, 2, 512, 0);
1321 #ifdef CONFIG_HIGHMEM
1323 static int mmc_test_write_high(struct mmc_test_card
*test
)
1326 struct scatterlist sg
;
1328 sg_init_table(&sg
, 1);
1329 sg_set_page(&sg
, test
->highmem
, 512, 0);
1331 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1338 static int mmc_test_read_high(struct mmc_test_card
*test
)
1341 struct scatterlist sg
;
1343 sg_init_table(&sg
, 1);
1344 sg_set_page(&sg
, test
->highmem
, 512, 0);
1346 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1353 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1357 struct scatterlist sg
;
1359 if (test
->card
->host
->max_blk_count
== 1)
1360 return RESULT_UNSUP_HOST
;
1362 size
= PAGE_SIZE
* 2;
1363 size
= min(size
, test
->card
->host
->max_req_size
);
1364 size
= min(size
, test
->card
->host
->max_seg_size
);
1365 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1368 return RESULT_UNSUP_HOST
;
1370 sg_init_table(&sg
, 1);
1371 sg_set_page(&sg
, test
->highmem
, size
, 0);
1373 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1380 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1384 struct scatterlist sg
;
1386 if (test
->card
->host
->max_blk_count
== 1)
1387 return RESULT_UNSUP_HOST
;
1389 size
= PAGE_SIZE
* 2;
1390 size
= min(size
, test
->card
->host
->max_req_size
);
1391 size
= min(size
, test
->card
->host
->max_seg_size
);
1392 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1395 return RESULT_UNSUP_HOST
;
1397 sg_init_table(&sg
, 1);
1398 sg_set_page(&sg
, test
->highmem
, size
, 0);
1400 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1409 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1411 printk(KERN_INFO
"%s: Highmem not configured - test skipped\n",
1412 mmc_hostname(test
->card
->host
));
1416 #endif /* CONFIG_HIGHMEM */
1419 * Map sz bytes so that it can be transferred.
1421 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1422 int max_scatter
, int min_sg_len
)
1424 struct mmc_test_area
*t
= &test
->area
;
1427 t
->blocks
= sz
>> 9;
1430 err
= mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1431 t
->max_segs
, t
->max_seg_sz
,
1434 err
= mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1435 t
->max_seg_sz
, &t
->sg_len
, min_sg_len
);
1438 printk(KERN_INFO
"%s: Failed to map sg list\n",
1439 mmc_hostname(test
->card
->host
));
1444 * Transfer bytes mapped by mmc_test_area_map().
1446 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1447 unsigned int dev_addr
, int write
)
1449 struct mmc_test_area
*t
= &test
->area
;
1451 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1452 t
->blocks
, 512, write
);
1456 * Map and transfer bytes for multiple transfers.
1458 static int mmc_test_area_io_seq(struct mmc_test_card
*test
, unsigned long sz
,
1459 unsigned int dev_addr
, int write
,
1460 int max_scatter
, int timed
, int count
,
1461 bool nonblock
, int min_sg_len
)
1463 struct timespec ts1
, ts2
;
1466 struct mmc_test_area
*t
= &test
->area
;
1469 * In the case of a maximally scattered transfer, the maximum transfer
1470 * size is further limited by using PAGE_SIZE segments.
1473 struct mmc_test_area
*t
= &test
->area
;
1474 unsigned long max_tfr
;
1476 if (t
->max_seg_sz
>= PAGE_SIZE
)
1477 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1479 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1484 ret
= mmc_test_area_map(test
, sz
, max_scatter
, min_sg_len
);
1489 getnstimeofday(&ts1
);
1491 ret
= mmc_test_nonblock_transfer(test
, t
->sg
, t
->sg_len
,
1492 dev_addr
, t
->blocks
, 512, write
, count
);
1494 for (i
= 0; i
< count
&& ret
== 0; i
++) {
1495 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1496 dev_addr
+= sz
>> 9;
1503 getnstimeofday(&ts2
);
1506 mmc_test_print_avg_rate(test
, sz
, count
, &ts1
, &ts2
);
1511 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1512 unsigned int dev_addr
, int write
, int max_scatter
,
1515 return mmc_test_area_io_seq(test
, sz
, dev_addr
, write
, max_scatter
,
1516 timed
, 1, false, 0);
1520 * Write the test area entirely.
1522 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1524 struct mmc_test_area
*t
= &test
->area
;
1526 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, 1, 0, 0);
1530 * Erase the test area entirely.
1532 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1534 struct mmc_test_area
*t
= &test
->area
;
1536 if (!mmc_can_erase(test
->card
))
1539 return mmc_erase(test
->card
, t
->dev_addr
, t
->max_sz
>> 9,
1544 * Cleanup struct mmc_test_area.
1546 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1548 struct mmc_test_area
*t
= &test
->area
;
1551 mmc_test_free_mem(t
->mem
);
1557 * Initialize an area for testing large transfers. The test area is set to the
1558 * middle of the card because cards may have different charateristics at the
1559 * front (for FAT file system optimization). Optionally, the area is erased
1560 * (if the card supports it) which may improve write performance. Optionally,
1561 * the area is filled with data for subsequent read tests.
1563 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1565 struct mmc_test_area
*t
= &test
->area
;
1566 unsigned long min_sz
= 64 * 1024, sz
;
1569 ret
= mmc_test_set_blksize(test
, 512);
1573 /* Make the test area size about 4MiB */
1574 sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1576 while (t
->max_sz
< 4 * 1024 * 1024)
1578 while (t
->max_sz
> TEST_AREA_MAX_SIZE
&& t
->max_sz
> sz
)
1581 t
->max_segs
= test
->card
->host
->max_segs
;
1582 t
->max_seg_sz
= test
->card
->host
->max_seg_size
;
1584 t
->max_tfr
= t
->max_sz
;
1585 if (t
->max_tfr
>> 9 > test
->card
->host
->max_blk_count
)
1586 t
->max_tfr
= test
->card
->host
->max_blk_count
<< 9;
1587 if (t
->max_tfr
> test
->card
->host
->max_req_size
)
1588 t
->max_tfr
= test
->card
->host
->max_req_size
;
1589 if (t
->max_tfr
/ t
->max_seg_sz
> t
->max_segs
)
1590 t
->max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1593 * Try to allocate enough memory for a max. sized transfer. Less is OK
1594 * because the same memory can be mapped into the scatterlist more than
1595 * once. Also, take into account the limits imposed on scatterlist
1596 * segments by the host driver.
1598 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_tfr
, t
->max_segs
,
1603 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1609 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1610 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1613 ret
= mmc_test_area_erase(test
);
1619 ret
= mmc_test_area_fill(test
);
1627 mmc_test_area_cleanup(test
);
1632 * Prepare for large transfers. Do not erase the test area.
1634 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1636 return mmc_test_area_init(test
, 0, 0);
1640 * Prepare for large transfers. Do erase the test area.
1642 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1644 return mmc_test_area_init(test
, 1, 0);
1648 * Prepare for large transfers. Erase and fill the test area.
1650 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1652 return mmc_test_area_init(test
, 1, 1);
1656 * Test best-case performance. Best-case performance is expected from
1657 * a single large transfer.
1659 * An additional option (max_scatter) allows the measurement of the same
1660 * transfer but with no contiguous pages in the scatter list. This tests
1661 * the efficiency of DMA to handle scattered pages.
1663 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1666 struct mmc_test_area
*t
= &test
->area
;
1668 return mmc_test_area_io(test
, t
->max_tfr
, t
->dev_addr
, write
,
1673 * Best-case read performance.
1675 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1677 return mmc_test_best_performance(test
, 0, 0);
1681 * Best-case write performance.
1683 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1685 return mmc_test_best_performance(test
, 1, 0);
1689 * Best-case read performance into scattered pages.
1691 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1693 return mmc_test_best_performance(test
, 0, 1);
1697 * Best-case write performance from scattered pages.
1699 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1701 return mmc_test_best_performance(test
, 1, 1);
1705 * Single read performance by transfer size.
1707 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1709 struct mmc_test_area
*t
= &test
->area
;
1711 unsigned int dev_addr
;
1714 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1715 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1716 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1721 dev_addr
= t
->dev_addr
;
1722 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1726 * Single write performance by transfer size.
1728 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1730 struct mmc_test_area
*t
= &test
->area
;
1732 unsigned int dev_addr
;
1735 ret
= mmc_test_area_erase(test
);
1738 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1739 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1740 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1744 ret
= mmc_test_area_erase(test
);
1748 dev_addr
= t
->dev_addr
;
1749 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1753 * Single trim performance by transfer size.
1755 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1757 struct mmc_test_area
*t
= &test
->area
;
1759 unsigned int dev_addr
;
1760 struct timespec ts1
, ts2
;
1763 if (!mmc_can_trim(test
->card
))
1764 return RESULT_UNSUP_CARD
;
1766 if (!mmc_can_erase(test
->card
))
1767 return RESULT_UNSUP_HOST
;
1769 for (sz
= 512; sz
< t
->max_sz
; sz
<<= 1) {
1770 dev_addr
= t
->dev_addr
+ (sz
>> 9);
1771 getnstimeofday(&ts1
);
1772 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1775 getnstimeofday(&ts2
);
1776 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1778 dev_addr
= t
->dev_addr
;
1779 getnstimeofday(&ts1
);
1780 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1783 getnstimeofday(&ts2
);
1784 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1788 static int mmc_test_seq_read_perf(struct mmc_test_card
*test
, unsigned long sz
)
1790 struct mmc_test_area
*t
= &test
->area
;
1791 unsigned int dev_addr
, i
, cnt
;
1792 struct timespec ts1
, ts2
;
1795 cnt
= t
->max_sz
/ sz
;
1796 dev_addr
= t
->dev_addr
;
1797 getnstimeofday(&ts1
);
1798 for (i
= 0; i
< cnt
; i
++) {
1799 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1802 dev_addr
+= (sz
>> 9);
1804 getnstimeofday(&ts2
);
1805 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1810 * Consecutive read performance by transfer size.
1812 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1814 struct mmc_test_area
*t
= &test
->area
;
1818 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1819 ret
= mmc_test_seq_read_perf(test
, sz
);
1824 return mmc_test_seq_read_perf(test
, sz
);
1827 static int mmc_test_seq_write_perf(struct mmc_test_card
*test
, unsigned long sz
)
1829 struct mmc_test_area
*t
= &test
->area
;
1830 unsigned int dev_addr
, i
, cnt
;
1831 struct timespec ts1
, ts2
;
1834 ret
= mmc_test_area_erase(test
);
1837 cnt
= t
->max_sz
/ sz
;
1838 dev_addr
= t
->dev_addr
;
1839 getnstimeofday(&ts1
);
1840 for (i
= 0; i
< cnt
; i
++) {
1841 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1844 dev_addr
+= (sz
>> 9);
1846 getnstimeofday(&ts2
);
1847 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1852 * Consecutive write performance by transfer size.
1854 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1856 struct mmc_test_area
*t
= &test
->area
;
1860 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1861 ret
= mmc_test_seq_write_perf(test
, sz
);
1866 return mmc_test_seq_write_perf(test
, sz
);
1870 * Consecutive trim performance by transfer size.
1872 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1874 struct mmc_test_area
*t
= &test
->area
;
1876 unsigned int dev_addr
, i
, cnt
;
1877 struct timespec ts1
, ts2
;
1880 if (!mmc_can_trim(test
->card
))
1881 return RESULT_UNSUP_CARD
;
1883 if (!mmc_can_erase(test
->card
))
1884 return RESULT_UNSUP_HOST
;
1886 for (sz
= 512; sz
<= t
->max_sz
; sz
<<= 1) {
1887 ret
= mmc_test_area_erase(test
);
1890 ret
= mmc_test_area_fill(test
);
1893 cnt
= t
->max_sz
/ sz
;
1894 dev_addr
= t
->dev_addr
;
1895 getnstimeofday(&ts1
);
1896 for (i
= 0; i
< cnt
; i
++) {
1897 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1901 dev_addr
+= (sz
>> 9);
1903 getnstimeofday(&ts2
);
1904 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1909 static unsigned int rnd_next
= 1;
1911 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt
)
1915 rnd_next
= rnd_next
* 1103515245 + 12345;
1916 r
= (rnd_next
>> 16) & 0x7fff;
1917 return (r
* rnd_cnt
) >> 15;
1920 static int mmc_test_rnd_perf(struct mmc_test_card
*test
, int write
, int print
,
1923 unsigned int dev_addr
, cnt
, rnd_addr
, range1
, range2
, last_ea
= 0, ea
;
1925 struct timespec ts1
, ts2
, ts
;
1930 rnd_addr
= mmc_test_capacity(test
->card
) / 4;
1931 range1
= rnd_addr
/ test
->card
->pref_erase
;
1932 range2
= range1
/ ssz
;
1934 getnstimeofday(&ts1
);
1935 for (cnt
= 0; cnt
< UINT_MAX
; cnt
++) {
1936 getnstimeofday(&ts2
);
1937 ts
= timespec_sub(ts2
, ts1
);
1938 if (ts
.tv_sec
>= 10)
1940 ea
= mmc_test_rnd_num(range1
);
1944 dev_addr
= rnd_addr
+ test
->card
->pref_erase
* ea
+
1945 ssz
* mmc_test_rnd_num(range2
);
1946 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
, 0, 0);
1951 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1955 static int mmc_test_random_perf(struct mmc_test_card
*test
, int write
)
1957 struct mmc_test_area
*t
= &test
->area
;
1962 for (sz
= 512; sz
< t
->max_tfr
; sz
<<= 1) {
1964 * When writing, try to get more consistent results by running
1965 * the test twice with exactly the same I/O but outputting the
1966 * results only for the 2nd run.
1970 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1975 ret
= mmc_test_rnd_perf(test
, write
, 1, sz
);
1982 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1987 return mmc_test_rnd_perf(test
, write
, 1, sz
);
1991 * Random read performance by transfer size.
1993 static int mmc_test_random_read_perf(struct mmc_test_card
*test
)
1995 return mmc_test_random_perf(test
, 0);
1999 * Random write performance by transfer size.
2001 static int mmc_test_random_write_perf(struct mmc_test_card
*test
)
2003 return mmc_test_random_perf(test
, 1);
2006 static int mmc_test_seq_perf(struct mmc_test_card
*test
, int write
,
2007 unsigned int tot_sz
, int max_scatter
)
2009 struct mmc_test_area
*t
= &test
->area
;
2010 unsigned int dev_addr
, i
, cnt
, sz
, ssz
;
2011 struct timespec ts1
, ts2
;
2017 * In the case of a maximally scattered transfer, the maximum transfer
2018 * size is further limited by using PAGE_SIZE segments.
2021 unsigned long max_tfr
;
2023 if (t
->max_seg_sz
>= PAGE_SIZE
)
2024 max_tfr
= t
->max_segs
* PAGE_SIZE
;
2026 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
2032 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2033 if (tot_sz
> dev_addr
<< 9)
2034 tot_sz
= dev_addr
<< 9;
2036 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2038 getnstimeofday(&ts1
);
2039 for (i
= 0; i
< cnt
; i
++) {
2040 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
,
2046 getnstimeofday(&ts2
);
2048 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
2053 static int mmc_test_large_seq_perf(struct mmc_test_card
*test
, int write
)
2057 for (i
= 0; i
< 10; i
++) {
2058 ret
= mmc_test_seq_perf(test
, write
, 10 * 1024 * 1024, 1);
2062 for (i
= 0; i
< 5; i
++) {
2063 ret
= mmc_test_seq_perf(test
, write
, 100 * 1024 * 1024, 1);
2067 for (i
= 0; i
< 3; i
++) {
2068 ret
= mmc_test_seq_perf(test
, write
, 1000 * 1024 * 1024, 1);
2077 * Large sequential read performance.
2079 static int mmc_test_large_seq_read_perf(struct mmc_test_card
*test
)
2081 return mmc_test_large_seq_perf(test
, 0);
2085 * Large sequential write performance.
2087 static int mmc_test_large_seq_write_perf(struct mmc_test_card
*test
)
2089 return mmc_test_large_seq_perf(test
, 1);
2092 static int mmc_test_rw_multiple(struct mmc_test_card
*test
,
2093 struct mmc_test_multiple_rw
*tdata
,
2094 unsigned int reqsize
, unsigned int size
,
2097 unsigned int dev_addr
;
2098 struct mmc_test_area
*t
= &test
->area
;
2101 /* Set up test area */
2102 if (size
> mmc_test_capacity(test
->card
) / 2 * 512)
2103 size
= mmc_test_capacity(test
->card
) / 2 * 512;
2104 if (reqsize
> t
->max_tfr
)
2105 reqsize
= t
->max_tfr
;
2106 dev_addr
= mmc_test_capacity(test
->card
) / 4;
2107 if ((dev_addr
& 0xffff0000))
2108 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
2110 dev_addr
&= 0xfffff800; /* Round to 1MiB boundary */
2117 /* prepare test area */
2118 if (mmc_can_erase(test
->card
) &&
2119 tdata
->prepare
& MMC_TEST_PREP_ERASE
) {
2120 ret
= mmc_erase(test
->card
, dev_addr
,
2121 size
/ 512, MMC_SECURE_ERASE_ARG
);
2123 ret
= mmc_erase(test
->card
, dev_addr
,
2124 size
/ 512, MMC_ERASE_ARG
);
2130 ret
= mmc_test_area_io_seq(test
, reqsize
, dev_addr
,
2131 tdata
->do_write
, 0, 1, size
/ reqsize
,
2132 tdata
->do_nonblock_req
, min_sg_len
);
2138 printk(KERN_INFO
"[%s] error\n", __func__
);
2142 static int mmc_test_rw_multiple_size(struct mmc_test_card
*test
,
2143 struct mmc_test_multiple_rw
*rw
)
2147 void *pre_req
= test
->card
->host
->ops
->pre_req
;
2148 void *post_req
= test
->card
->host
->ops
->post_req
;
2150 if (rw
->do_nonblock_req
&&
2151 ((!pre_req
&& post_req
) || (pre_req
&& !post_req
))) {
2152 printk(KERN_INFO
"error: only one of pre/post is defined\n");
2156 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2157 ret
= mmc_test_rw_multiple(test
, rw
, rw
->bs
[i
], rw
->size
, 0);
2164 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card
*test
,
2165 struct mmc_test_multiple_rw
*rw
)
2170 for (i
= 0 ; i
< rw
->len
&& ret
== 0; i
++) {
2171 ret
= mmc_test_rw_multiple(test
, rw
, 512*1024, rw
->size
,
2180 * Multiple blocking write 4k to 4 MB chunks
2182 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card
*test
)
2184 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2185 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2186 struct mmc_test_multiple_rw test_data
= {
2188 .size
= TEST_AREA_MAX_SIZE
,
2189 .len
= ARRAY_SIZE(bs
),
2191 .do_nonblock_req
= false,
2192 .prepare
= MMC_TEST_PREP_ERASE
,
2195 return mmc_test_rw_multiple_size(test
, &test_data
);
2199 * Multiple non-blocking write 4k to 4 MB chunks
2201 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card
*test
)
2203 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2204 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2205 struct mmc_test_multiple_rw test_data
= {
2207 .size
= TEST_AREA_MAX_SIZE
,
2208 .len
= ARRAY_SIZE(bs
),
2210 .do_nonblock_req
= true,
2211 .prepare
= MMC_TEST_PREP_ERASE
,
2214 return mmc_test_rw_multiple_size(test
, &test_data
);
2218 * Multiple blocking read 4k to 4 MB chunks
2220 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card
*test
)
2222 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2223 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2224 struct mmc_test_multiple_rw test_data
= {
2226 .size
= TEST_AREA_MAX_SIZE
,
2227 .len
= ARRAY_SIZE(bs
),
2229 .do_nonblock_req
= false,
2230 .prepare
= MMC_TEST_PREP_NONE
,
2233 return mmc_test_rw_multiple_size(test
, &test_data
);
2237 * Multiple non-blocking read 4k to 4 MB chunks
2239 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card
*test
)
2241 unsigned int bs
[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2242 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2243 struct mmc_test_multiple_rw test_data
= {
2245 .size
= TEST_AREA_MAX_SIZE
,
2246 .len
= ARRAY_SIZE(bs
),
2248 .do_nonblock_req
= true,
2249 .prepare
= MMC_TEST_PREP_NONE
,
2252 return mmc_test_rw_multiple_size(test
, &test_data
);
2256 * Multiple blocking write 1 to 512 sg elements
2258 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card
*test
)
2260 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2261 1 << 7, 1 << 8, 1 << 9};
2262 struct mmc_test_multiple_rw test_data
= {
2264 .size
= TEST_AREA_MAX_SIZE
,
2265 .len
= ARRAY_SIZE(sg_len
),
2267 .do_nonblock_req
= false,
2268 .prepare
= MMC_TEST_PREP_ERASE
,
2271 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2275 * Multiple non-blocking write 1 to 512 sg elements
2277 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card
*test
)
2279 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2280 1 << 7, 1 << 8, 1 << 9};
2281 struct mmc_test_multiple_rw test_data
= {
2283 .size
= TEST_AREA_MAX_SIZE
,
2284 .len
= ARRAY_SIZE(sg_len
),
2286 .do_nonblock_req
= true,
2287 .prepare
= MMC_TEST_PREP_ERASE
,
2290 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2294 * Multiple blocking read 1 to 512 sg elements
2296 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card
*test
)
2298 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2299 1 << 7, 1 << 8, 1 << 9};
2300 struct mmc_test_multiple_rw test_data
= {
2302 .size
= TEST_AREA_MAX_SIZE
,
2303 .len
= ARRAY_SIZE(sg_len
),
2305 .do_nonblock_req
= false,
2306 .prepare
= MMC_TEST_PREP_NONE
,
2309 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2313 * Multiple non-blocking read 1 to 512 sg elements
2315 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card
*test
)
2317 unsigned int sg_len
[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2318 1 << 7, 1 << 8, 1 << 9};
2319 struct mmc_test_multiple_rw test_data
= {
2321 .size
= TEST_AREA_MAX_SIZE
,
2322 .len
= ARRAY_SIZE(sg_len
),
2324 .do_nonblock_req
= true,
2325 .prepare
= MMC_TEST_PREP_NONE
,
2328 return mmc_test_rw_multiple_sg_len(test
, &test_data
);
2331 static const struct mmc_test_case mmc_test_cases
[] = {
2333 .name
= "Basic write (no data verification)",
2334 .run
= mmc_test_basic_write
,
2338 .name
= "Basic read (no data verification)",
2339 .run
= mmc_test_basic_read
,
2343 .name
= "Basic write (with data verification)",
2344 .prepare
= mmc_test_prepare_write
,
2345 .run
= mmc_test_verify_write
,
2346 .cleanup
= mmc_test_cleanup
,
2350 .name
= "Basic read (with data verification)",
2351 .prepare
= mmc_test_prepare_read
,
2352 .run
= mmc_test_verify_read
,
2353 .cleanup
= mmc_test_cleanup
,
2357 .name
= "Multi-block write",
2358 .prepare
= mmc_test_prepare_write
,
2359 .run
= mmc_test_multi_write
,
2360 .cleanup
= mmc_test_cleanup
,
2364 .name
= "Multi-block read",
2365 .prepare
= mmc_test_prepare_read
,
2366 .run
= mmc_test_multi_read
,
2367 .cleanup
= mmc_test_cleanup
,
2371 .name
= "Power of two block writes",
2372 .prepare
= mmc_test_prepare_write
,
2373 .run
= mmc_test_pow2_write
,
2374 .cleanup
= mmc_test_cleanup
,
2378 .name
= "Power of two block reads",
2379 .prepare
= mmc_test_prepare_read
,
2380 .run
= mmc_test_pow2_read
,
2381 .cleanup
= mmc_test_cleanup
,
2385 .name
= "Weird sized block writes",
2386 .prepare
= mmc_test_prepare_write
,
2387 .run
= mmc_test_weird_write
,
2388 .cleanup
= mmc_test_cleanup
,
2392 .name
= "Weird sized block reads",
2393 .prepare
= mmc_test_prepare_read
,
2394 .run
= mmc_test_weird_read
,
2395 .cleanup
= mmc_test_cleanup
,
2399 .name
= "Badly aligned write",
2400 .prepare
= mmc_test_prepare_write
,
2401 .run
= mmc_test_align_write
,
2402 .cleanup
= mmc_test_cleanup
,
2406 .name
= "Badly aligned read",
2407 .prepare
= mmc_test_prepare_read
,
2408 .run
= mmc_test_align_read
,
2409 .cleanup
= mmc_test_cleanup
,
2413 .name
= "Badly aligned multi-block write",
2414 .prepare
= mmc_test_prepare_write
,
2415 .run
= mmc_test_align_multi_write
,
2416 .cleanup
= mmc_test_cleanup
,
2420 .name
= "Badly aligned multi-block read",
2421 .prepare
= mmc_test_prepare_read
,
2422 .run
= mmc_test_align_multi_read
,
2423 .cleanup
= mmc_test_cleanup
,
2427 .name
= "Correct xfer_size at write (start failure)",
2428 .run
= mmc_test_xfersize_write
,
2432 .name
= "Correct xfer_size at read (start failure)",
2433 .run
= mmc_test_xfersize_read
,
2437 .name
= "Correct xfer_size at write (midway failure)",
2438 .run
= mmc_test_multi_xfersize_write
,
2442 .name
= "Correct xfer_size at read (midway failure)",
2443 .run
= mmc_test_multi_xfersize_read
,
2446 #ifdef CONFIG_HIGHMEM
2449 .name
= "Highmem write",
2450 .prepare
= mmc_test_prepare_write
,
2451 .run
= mmc_test_write_high
,
2452 .cleanup
= mmc_test_cleanup
,
2456 .name
= "Highmem read",
2457 .prepare
= mmc_test_prepare_read
,
2458 .run
= mmc_test_read_high
,
2459 .cleanup
= mmc_test_cleanup
,
2463 .name
= "Multi-block highmem write",
2464 .prepare
= mmc_test_prepare_write
,
2465 .run
= mmc_test_multi_write_high
,
2466 .cleanup
= mmc_test_cleanup
,
2470 .name
= "Multi-block highmem read",
2471 .prepare
= mmc_test_prepare_read
,
2472 .run
= mmc_test_multi_read_high
,
2473 .cleanup
= mmc_test_cleanup
,
2479 .name
= "Highmem write",
2480 .run
= mmc_test_no_highmem
,
2484 .name
= "Highmem read",
2485 .run
= mmc_test_no_highmem
,
2489 .name
= "Multi-block highmem write",
2490 .run
= mmc_test_no_highmem
,
2494 .name
= "Multi-block highmem read",
2495 .run
= mmc_test_no_highmem
,
2498 #endif /* CONFIG_HIGHMEM */
2501 .name
= "Best-case read performance",
2502 .prepare
= mmc_test_area_prepare_fill
,
2503 .run
= mmc_test_best_read_performance
,
2504 .cleanup
= mmc_test_area_cleanup
,
2508 .name
= "Best-case write performance",
2509 .prepare
= mmc_test_area_prepare_erase
,
2510 .run
= mmc_test_best_write_performance
,
2511 .cleanup
= mmc_test_area_cleanup
,
2515 .name
= "Best-case read performance into scattered pages",
2516 .prepare
= mmc_test_area_prepare_fill
,
2517 .run
= mmc_test_best_read_perf_max_scatter
,
2518 .cleanup
= mmc_test_area_cleanup
,
2522 .name
= "Best-case write performance from scattered pages",
2523 .prepare
= mmc_test_area_prepare_erase
,
2524 .run
= mmc_test_best_write_perf_max_scatter
,
2525 .cleanup
= mmc_test_area_cleanup
,
2529 .name
= "Single read performance by transfer size",
2530 .prepare
= mmc_test_area_prepare_fill
,
2531 .run
= mmc_test_profile_read_perf
,
2532 .cleanup
= mmc_test_area_cleanup
,
2536 .name
= "Single write performance by transfer size",
2537 .prepare
= mmc_test_area_prepare
,
2538 .run
= mmc_test_profile_write_perf
,
2539 .cleanup
= mmc_test_area_cleanup
,
2543 .name
= "Single trim performance by transfer size",
2544 .prepare
= mmc_test_area_prepare_fill
,
2545 .run
= mmc_test_profile_trim_perf
,
2546 .cleanup
= mmc_test_area_cleanup
,
2550 .name
= "Consecutive read performance by transfer size",
2551 .prepare
= mmc_test_area_prepare_fill
,
2552 .run
= mmc_test_profile_seq_read_perf
,
2553 .cleanup
= mmc_test_area_cleanup
,
2557 .name
= "Consecutive write performance by transfer size",
2558 .prepare
= mmc_test_area_prepare
,
2559 .run
= mmc_test_profile_seq_write_perf
,
2560 .cleanup
= mmc_test_area_cleanup
,
2564 .name
= "Consecutive trim performance by transfer size",
2565 .prepare
= mmc_test_area_prepare
,
2566 .run
= mmc_test_profile_seq_trim_perf
,
2567 .cleanup
= mmc_test_area_cleanup
,
2571 .name
= "Random read performance by transfer size",
2572 .prepare
= mmc_test_area_prepare
,
2573 .run
= mmc_test_random_read_perf
,
2574 .cleanup
= mmc_test_area_cleanup
,
2578 .name
= "Random write performance by transfer size",
2579 .prepare
= mmc_test_area_prepare
,
2580 .run
= mmc_test_random_write_perf
,
2581 .cleanup
= mmc_test_area_cleanup
,
2585 .name
= "Large sequential read into scattered pages",
2586 .prepare
= mmc_test_area_prepare
,
2587 .run
= mmc_test_large_seq_read_perf
,
2588 .cleanup
= mmc_test_area_cleanup
,
2592 .name
= "Large sequential write from scattered pages",
2593 .prepare
= mmc_test_area_prepare
,
2594 .run
= mmc_test_large_seq_write_perf
,
2595 .cleanup
= mmc_test_area_cleanup
,
2599 .name
= "Write performance with blocking req 4k to 4MB",
2600 .prepare
= mmc_test_area_prepare
,
2601 .run
= mmc_test_profile_mult_write_blocking_perf
,
2602 .cleanup
= mmc_test_area_cleanup
,
2606 .name
= "Write performance with non-blocking req 4k to 4MB",
2607 .prepare
= mmc_test_area_prepare
,
2608 .run
= mmc_test_profile_mult_write_nonblock_perf
,
2609 .cleanup
= mmc_test_area_cleanup
,
2613 .name
= "Read performance with blocking req 4k to 4MB",
2614 .prepare
= mmc_test_area_prepare
,
2615 .run
= mmc_test_profile_mult_read_blocking_perf
,
2616 .cleanup
= mmc_test_area_cleanup
,
2620 .name
= "Read performance with non-blocking req 4k to 4MB",
2621 .prepare
= mmc_test_area_prepare
,
2622 .run
= mmc_test_profile_mult_read_nonblock_perf
,
2623 .cleanup
= mmc_test_area_cleanup
,
2627 .name
= "Write performance blocking req 1 to 512 sg elems",
2628 .prepare
= mmc_test_area_prepare
,
2629 .run
= mmc_test_profile_sglen_wr_blocking_perf
,
2630 .cleanup
= mmc_test_area_cleanup
,
2634 .name
= "Write performance non-blocking req 1 to 512 sg elems",
2635 .prepare
= mmc_test_area_prepare
,
2636 .run
= mmc_test_profile_sglen_wr_nonblock_perf
,
2637 .cleanup
= mmc_test_area_cleanup
,
2641 .name
= "Read performance blocking req 1 to 512 sg elems",
2642 .prepare
= mmc_test_area_prepare
,
2643 .run
= mmc_test_profile_sglen_r_blocking_perf
,
2644 .cleanup
= mmc_test_area_cleanup
,
2648 .name
= "Read performance non-blocking req 1 to 512 sg elems",
2649 .prepare
= mmc_test_area_prepare
,
2650 .run
= mmc_test_profile_sglen_r_nonblock_perf
,
2651 .cleanup
= mmc_test_area_cleanup
,
2655 static DEFINE_MUTEX(mmc_test_lock
);
2657 static LIST_HEAD(mmc_test_result
);
2659 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
2663 printk(KERN_INFO
"%s: Starting tests of card %s...\n",
2664 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
2666 mmc_claim_host(test
->card
->host
);
2668 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
2669 struct mmc_test_general_result
*gr
;
2671 if (testcase
&& ((i
+ 1) != testcase
))
2674 printk(KERN_INFO
"%s: Test case %d. %s...\n",
2675 mmc_hostname(test
->card
->host
), i
+ 1,
2676 mmc_test_cases
[i
].name
);
2678 if (mmc_test_cases
[i
].prepare
) {
2679 ret
= mmc_test_cases
[i
].prepare(test
);
2681 printk(KERN_INFO
"%s: Result: Prepare "
2682 "stage failed! (%d)\n",
2683 mmc_hostname(test
->card
->host
),
2689 gr
= kzalloc(sizeof(struct mmc_test_general_result
),
2692 INIT_LIST_HEAD(&gr
->tr_lst
);
2694 /* Assign data what we know already */
2695 gr
->card
= test
->card
;
2698 /* Append container to global one */
2699 list_add_tail(&gr
->link
, &mmc_test_result
);
2702 * Save the pointer to created container in our private
2708 ret
= mmc_test_cases
[i
].run(test
);
2711 printk(KERN_INFO
"%s: Result: OK\n",
2712 mmc_hostname(test
->card
->host
));
2715 printk(KERN_INFO
"%s: Result: FAILED\n",
2716 mmc_hostname(test
->card
->host
));
2718 case RESULT_UNSUP_HOST
:
2719 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
2721 mmc_hostname(test
->card
->host
));
2723 case RESULT_UNSUP_CARD
:
2724 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
2726 mmc_hostname(test
->card
->host
));
2729 printk(KERN_INFO
"%s: Result: ERROR (%d)\n",
2730 mmc_hostname(test
->card
->host
), ret
);
2733 /* Save the result */
2737 if (mmc_test_cases
[i
].cleanup
) {
2738 ret
= mmc_test_cases
[i
].cleanup(test
);
2740 printk(KERN_INFO
"%s: Warning: Cleanup "
2741 "stage failed! (%d)\n",
2742 mmc_hostname(test
->card
->host
),
2748 mmc_release_host(test
->card
->host
);
2750 printk(KERN_INFO
"%s: Tests completed.\n",
2751 mmc_hostname(test
->card
->host
));
2754 static void mmc_test_free_result(struct mmc_card
*card
)
2756 struct mmc_test_general_result
*gr
, *grs
;
2758 mutex_lock(&mmc_test_lock
);
2760 list_for_each_entry_safe(gr
, grs
, &mmc_test_result
, link
) {
2761 struct mmc_test_transfer_result
*tr
, *trs
;
2763 if (card
&& gr
->card
!= card
)
2766 list_for_each_entry_safe(tr
, trs
, &gr
->tr_lst
, link
) {
2767 list_del(&tr
->link
);
2771 list_del(&gr
->link
);
2775 mutex_unlock(&mmc_test_lock
);
2778 static LIST_HEAD(mmc_test_file_test
);
2780 static int mtf_test_show(struct seq_file
*sf
, void *data
)
2782 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2783 struct mmc_test_general_result
*gr
;
2785 mutex_lock(&mmc_test_lock
);
2787 list_for_each_entry(gr
, &mmc_test_result
, link
) {
2788 struct mmc_test_transfer_result
*tr
;
2790 if (gr
->card
!= card
)
2793 seq_printf(sf
, "Test %d: %d\n", gr
->testcase
+ 1, gr
->result
);
2795 list_for_each_entry(tr
, &gr
->tr_lst
, link
) {
2796 seq_printf(sf
, "%u %d %lu.%09lu %u %u.%02u\n",
2797 tr
->count
, tr
->sectors
,
2798 (unsigned long)tr
->ts
.tv_sec
,
2799 (unsigned long)tr
->ts
.tv_nsec
,
2800 tr
->rate
, tr
->iops
/ 100, tr
->iops
% 100);
2804 mutex_unlock(&mmc_test_lock
);
2809 static int mtf_test_open(struct inode
*inode
, struct file
*file
)
2811 return single_open(file
, mtf_test_show
, inode
->i_private
);
2814 static ssize_t
mtf_test_write(struct file
*file
, const char __user
*buf
,
2815 size_t count
, loff_t
*pos
)
2817 struct seq_file
*sf
= (struct seq_file
*)file
->private_data
;
2818 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2819 struct mmc_test_card
*test
;
2823 if (count
>= sizeof(lbuf
))
2826 if (copy_from_user(lbuf
, buf
, count
))
2830 if (strict_strtol(lbuf
, 10, &testcase
))
2833 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
2838 * Remove all test cases associated with given card. Thus we have only
2839 * actual data of the last run.
2841 mmc_test_free_result(card
);
2845 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
2846 #ifdef CONFIG_HIGHMEM
2847 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
2850 #ifdef CONFIG_HIGHMEM
2851 if (test
->buffer
&& test
->highmem
) {
2855 mutex_lock(&mmc_test_lock
);
2856 mmc_test_run(test
, testcase
);
2857 mutex_unlock(&mmc_test_lock
);
2860 #ifdef CONFIG_HIGHMEM
2861 __free_pages(test
->highmem
, BUFFER_ORDER
);
2863 kfree(test
->buffer
);
2869 static const struct file_operations mmc_test_fops_test
= {
2870 .open
= mtf_test_open
,
2872 .write
= mtf_test_write
,
2873 .llseek
= seq_lseek
,
2874 .release
= single_release
,
2877 static int mtf_testlist_show(struct seq_file
*sf
, void *data
)
2881 mutex_lock(&mmc_test_lock
);
2883 for (i
= 0; i
< ARRAY_SIZE(mmc_test_cases
); i
++)
2884 seq_printf(sf
, "%d:\t%s\n", i
+1, mmc_test_cases
[i
].name
);
2886 mutex_unlock(&mmc_test_lock
);
2891 static int mtf_testlist_open(struct inode
*inode
, struct file
*file
)
2893 return single_open(file
, mtf_testlist_show
, inode
->i_private
);
2896 static const struct file_operations mmc_test_fops_testlist
= {
2897 .open
= mtf_testlist_open
,
2899 .llseek
= seq_lseek
,
2900 .release
= single_release
,
2903 static void mmc_test_free_dbgfs_file(struct mmc_card
*card
)
2905 struct mmc_test_dbgfs_file
*df
, *dfs
;
2907 mutex_lock(&mmc_test_lock
);
2909 list_for_each_entry_safe(df
, dfs
, &mmc_test_file_test
, link
) {
2910 if (card
&& df
->card
!= card
)
2912 debugfs_remove(df
->file
);
2913 list_del(&df
->link
);
2917 mutex_unlock(&mmc_test_lock
);
2920 static int __mmc_test_register_dbgfs_file(struct mmc_card
*card
,
2921 const char *name
, mode_t mode
, const struct file_operations
*fops
)
2923 struct dentry
*file
= NULL
;
2924 struct mmc_test_dbgfs_file
*df
;
2926 if (card
->debugfs_root
)
2927 file
= debugfs_create_file(name
, mode
, card
->debugfs_root
,
2930 if (IS_ERR_OR_NULL(file
)) {
2932 "Can't create %s. Perhaps debugfs is disabled.\n",
2937 df
= kmalloc(sizeof(struct mmc_test_dbgfs_file
), GFP_KERNEL
);
2939 debugfs_remove(file
);
2941 "Can't allocate memory for internal usage.\n");
2948 list_add(&df
->link
, &mmc_test_file_test
);
2952 static int mmc_test_register_dbgfs_file(struct mmc_card
*card
)
2956 mutex_lock(&mmc_test_lock
);
2958 ret
= __mmc_test_register_dbgfs_file(card
, "test", S_IWUSR
| S_IRUGO
,
2959 &mmc_test_fops_test
);
2963 ret
= __mmc_test_register_dbgfs_file(card
, "testlist", S_IRUGO
,
2964 &mmc_test_fops_testlist
);
2969 mutex_unlock(&mmc_test_lock
);
2974 static int mmc_test_probe(struct mmc_card
*card
)
2978 if (!mmc_card_mmc(card
) && !mmc_card_sd(card
))
2981 ret
= mmc_test_register_dbgfs_file(card
);
2985 dev_info(&card
->dev
, "Card claimed for testing.\n");
2990 static void mmc_test_remove(struct mmc_card
*card
)
2992 mmc_test_free_result(card
);
2993 mmc_test_free_dbgfs_file(card
);
2996 static struct mmc_driver mmc_driver
= {
3000 .probe
= mmc_test_probe
,
3001 .remove
= mmc_test_remove
,
3004 static int __init
mmc_test_init(void)
3006 return mmc_register_driver(&mmc_driver
);
3009 static void __exit
mmc_test_exit(void)
3011 /* Clear stalled data if card is still plugged */
3012 mmc_test_free_result(NULL
);
3013 mmc_test_free_dbgfs_file(NULL
);
3015 mmc_unregister_driver(&mmc_driver
);
3018 module_init(mmc_test_init
);
3019 module_exit(mmc_test_exit
);
3021 MODULE_LICENSE("GPL");
3022 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3023 MODULE_AUTHOR("Pierre Ossman");