2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
28 #define RESULT_UNSUP_HOST 2
29 #define RESULT_UNSUP_CARD 3
31 #define BUFFER_ORDER 2
32 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
38 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
45 struct mmc_test_pages
{
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
56 struct mmc_test_pages
*arr
;
61 * struct mmc_test_area - information for performance tests.
62 * @max_sz: test area size (in bytes)
63 * @dev_addr: address on card at which to do performance tests
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
72 struct mmc_test_area
{
74 unsigned int dev_addr
;
76 unsigned int max_segs
;
77 unsigned int max_seg_sz
;
80 struct mmc_test_mem
*mem
;
81 struct scatterlist
*sg
;
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
91 * @iops: I/O operations per second (times 100)
93 struct mmc_test_transfer_result
{
94 struct list_head link
;
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
110 struct mmc_test_general_result
{
111 struct list_head link
;
112 struct mmc_card
*card
;
115 struct list_head tr_lst
;
119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
124 struct mmc_test_dbgfs_file
{
125 struct list_head link
;
126 struct mmc_card
*card
;
131 * struct mmc_test_card - test information.
132 * @card: card under test
133 * @scratch: transfer buffer
134 * @buffer: transfer buffer
135 * @highmem: buffer for highmem tests
136 * @area: information for performance tests
137 * @gr: pointer to results of current testcase
139 struct mmc_test_card
{
140 struct mmc_card
*card
;
142 u8 scratch
[BUFFER_SIZE
];
144 #ifdef CONFIG_HIGHMEM
145 struct page
*highmem
;
147 struct mmc_test_area area
;
148 struct mmc_test_general_result
*gr
;
151 /*******************************************************************/
152 /* General helper functions */
153 /*******************************************************************/
156 * Configure correct block size in card
158 static int mmc_test_set_blksize(struct mmc_test_card
*test
, unsigned size
)
160 return mmc_set_blocklen(test
->card
, size
);
164 * Fill in the mmc_request structure given a set of transfer parameters.
166 static void mmc_test_prepare_mrq(struct mmc_test_card
*test
,
167 struct mmc_request
*mrq
, struct scatterlist
*sg
, unsigned sg_len
,
168 unsigned dev_addr
, unsigned blocks
, unsigned blksz
, int write
)
170 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
|| !mrq
->stop
);
173 mrq
->cmd
->opcode
= write
?
174 MMC_WRITE_MULTIPLE_BLOCK
: MMC_READ_MULTIPLE_BLOCK
;
176 mrq
->cmd
->opcode
= write
?
177 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
180 mrq
->cmd
->arg
= dev_addr
;
181 if (!mmc_card_blockaddr(test
->card
))
184 mrq
->cmd
->flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
189 mrq
->stop
->opcode
= MMC_STOP_TRANSMISSION
;
191 mrq
->stop
->flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
194 mrq
->data
->blksz
= blksz
;
195 mrq
->data
->blocks
= blocks
;
196 mrq
->data
->flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
198 mrq
->data
->sg_len
= sg_len
;
200 mmc_set_data_timeout(mrq
->data
, test
->card
);
203 static int mmc_test_busy(struct mmc_command
*cmd
)
205 return !(cmd
->resp
[0] & R1_READY_FOR_DATA
) ||
206 (R1_CURRENT_STATE(cmd
->resp
[0]) == 7);
210 * Wait for the card to finish the busy state
212 static int mmc_test_wait_busy(struct mmc_test_card
*test
)
215 struct mmc_command cmd
;
219 memset(&cmd
, 0, sizeof(struct mmc_command
));
221 cmd
.opcode
= MMC_SEND_STATUS
;
222 cmd
.arg
= test
->card
->rca
<< 16;
223 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
225 ret
= mmc_wait_for_cmd(test
->card
->host
, &cmd
, 0);
229 if (!busy
&& mmc_test_busy(&cmd
)) {
231 if (test
->card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
232 printk(KERN_INFO
"%s: Warning: Host did not "
233 "wait for busy state to end.\n",
234 mmc_hostname(test
->card
->host
));
236 } while (mmc_test_busy(&cmd
));
242 * Transfer a single sector of kernel addressable data
244 static int mmc_test_buffer_transfer(struct mmc_test_card
*test
,
245 u8
*buffer
, unsigned addr
, unsigned blksz
, int write
)
249 struct mmc_request mrq
;
250 struct mmc_command cmd
;
251 struct mmc_command stop
;
252 struct mmc_data data
;
254 struct scatterlist sg
;
256 memset(&mrq
, 0, sizeof(struct mmc_request
));
257 memset(&cmd
, 0, sizeof(struct mmc_command
));
258 memset(&data
, 0, sizeof(struct mmc_data
));
259 memset(&stop
, 0, sizeof(struct mmc_command
));
265 sg_init_one(&sg
, buffer
, blksz
);
267 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, addr
, 1, blksz
, write
);
269 mmc_wait_for_req(test
->card
->host
, &mrq
);
276 ret
= mmc_test_wait_busy(test
);
283 static void mmc_test_free_mem(struct mmc_test_mem
*mem
)
288 __free_pages(mem
->arr
[mem
->cnt
].page
,
289 mem
->arr
[mem
->cnt
].order
);
295 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
296 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
297 * not exceed a maximum number of segments and try not to make segments much
298 * bigger than maximum segment size.
300 static struct mmc_test_mem
*mmc_test_alloc_mem(unsigned long min_sz
,
301 unsigned long max_sz
,
302 unsigned int max_segs
,
303 unsigned int max_seg_sz
)
305 unsigned long max_page_cnt
= DIV_ROUND_UP(max_sz
, PAGE_SIZE
);
306 unsigned long min_page_cnt
= DIV_ROUND_UP(min_sz
, PAGE_SIZE
);
307 unsigned long max_seg_page_cnt
= DIV_ROUND_UP(max_seg_sz
, PAGE_SIZE
);
308 unsigned long page_cnt
= 0;
309 unsigned long limit
= nr_free_buffer_pages() >> 4;
310 struct mmc_test_mem
*mem
;
312 if (max_page_cnt
> limit
)
313 max_page_cnt
= limit
;
314 if (min_page_cnt
> max_page_cnt
)
315 min_page_cnt
= max_page_cnt
;
317 if (max_seg_page_cnt
> max_page_cnt
)
318 max_seg_page_cnt
= max_page_cnt
;
320 if (max_segs
> max_page_cnt
)
321 max_segs
= max_page_cnt
;
323 mem
= kzalloc(sizeof(struct mmc_test_mem
), GFP_KERNEL
);
327 mem
->arr
= kzalloc(sizeof(struct mmc_test_pages
) * max_segs
,
332 while (max_page_cnt
) {
335 gfp_t flags
= GFP_KERNEL
| GFP_DMA
| __GFP_NOWARN
|
338 order
= get_order(max_seg_page_cnt
<< PAGE_SHIFT
);
340 page
= alloc_pages(flags
, order
);
346 if (page_cnt
< min_page_cnt
)
350 mem
->arr
[mem
->cnt
].page
= page
;
351 mem
->arr
[mem
->cnt
].order
= order
;
353 if (max_page_cnt
<= (1UL << order
))
355 max_page_cnt
-= 1UL << order
;
356 page_cnt
+= 1UL << order
;
357 if (mem
->cnt
>= max_segs
) {
358 if (page_cnt
< min_page_cnt
)
367 mmc_test_free_mem(mem
);
372 * Map memory into a scatterlist. Optionally allow the same memory to be
373 * mapped more than once.
375 static int mmc_test_map_sg(struct mmc_test_mem
*mem
, unsigned long sz
,
376 struct scatterlist
*sglist
, int repeat
,
377 unsigned int max_segs
, unsigned int max_seg_sz
,
378 unsigned int *sg_len
)
380 struct scatterlist
*sg
= NULL
;
383 sg_init_table(sglist
, max_segs
);
387 for (i
= 0; i
< mem
->cnt
; i
++) {
388 unsigned long len
= PAGE_SIZE
<< mem
->arr
[i
].order
;
392 if (len
> max_seg_sz
)
400 sg_set_page(sg
, mem
->arr
[i
].page
, len
, 0);
406 } while (sz
&& repeat
);
418 * Map memory into a scatterlist so that no pages are contiguous. Allow the
419 * same memory to be mapped more than once.
421 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem
*mem
,
423 struct scatterlist
*sglist
,
424 unsigned int max_segs
,
425 unsigned int max_seg_sz
,
426 unsigned int *sg_len
)
428 struct scatterlist
*sg
= NULL
;
429 unsigned int i
= mem
->cnt
, cnt
;
431 void *base
, *addr
, *last_addr
= NULL
;
433 sg_init_table(sglist
, max_segs
);
437 base
= page_address(mem
->arr
[--i
].page
);
438 cnt
= 1 << mem
->arr
[i
].order
;
440 addr
= base
+ PAGE_SIZE
* --cnt
;
441 if (last_addr
&& last_addr
+ PAGE_SIZE
== addr
)
445 if (len
> max_seg_sz
)
455 sg_set_page(sg
, virt_to_page(addr
), len
, 0);
470 * Calculate transfer rate in bytes per second.
472 static unsigned int mmc_test_rate(uint64_t bytes
, struct timespec
*ts
)
482 while (ns
> UINT_MAX
) {
490 do_div(bytes
, (uint32_t)ns
);
496 * Save transfer results for future usage
498 static void mmc_test_save_transfer_result(struct mmc_test_card
*test
,
499 unsigned int count
, unsigned int sectors
, struct timespec ts
,
500 unsigned int rate
, unsigned int iops
)
502 struct mmc_test_transfer_result
*tr
;
507 tr
= kmalloc(sizeof(struct mmc_test_transfer_result
), GFP_KERNEL
);
512 tr
->sectors
= sectors
;
517 list_add_tail(&tr
->link
, &test
->gr
->tr_lst
);
521 * Print the transfer rate.
523 static void mmc_test_print_rate(struct mmc_test_card
*test
, uint64_t bytes
,
524 struct timespec
*ts1
, struct timespec
*ts2
)
526 unsigned int rate
, iops
, sectors
= bytes
>> 9;
529 ts
= timespec_sub(*ts2
, *ts1
);
531 rate
= mmc_test_rate(bytes
, &ts
);
532 iops
= mmc_test_rate(100, &ts
); /* I/O ops per sec x 100 */
534 printk(KERN_INFO
"%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
535 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
536 mmc_hostname(test
->card
->host
), sectors
, sectors
>> 1,
537 (sectors
& 1 ? ".5" : ""), (unsigned long)ts
.tv_sec
,
538 (unsigned long)ts
.tv_nsec
, rate
/ 1000, rate
/ 1024,
539 iops
/ 100, iops
% 100);
541 mmc_test_save_transfer_result(test
, 1, sectors
, ts
, rate
, iops
);
545 * Print the average transfer rate.
547 static void mmc_test_print_avg_rate(struct mmc_test_card
*test
, uint64_t bytes
,
548 unsigned int count
, struct timespec
*ts1
,
549 struct timespec
*ts2
)
551 unsigned int rate
, iops
, sectors
= bytes
>> 9;
552 uint64_t tot
= bytes
* count
;
555 ts
= timespec_sub(*ts2
, *ts1
);
557 rate
= mmc_test_rate(tot
, &ts
);
558 iops
= mmc_test_rate(count
* 100, &ts
); /* I/O ops per sec x 100 */
560 printk(KERN_INFO
"%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
561 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
563 mmc_hostname(test
->card
->host
), count
, sectors
, count
,
564 sectors
>> 1, (sectors
& 1 ? ".5" : ""),
565 (unsigned long)ts
.tv_sec
, (unsigned long)ts
.tv_nsec
,
566 rate
/ 1000, rate
/ 1024, iops
/ 100, iops
% 100);
568 mmc_test_save_transfer_result(test
, count
, sectors
, ts
, rate
, iops
);
572 * Return the card size in sectors.
574 static unsigned int mmc_test_capacity(struct mmc_card
*card
)
576 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
))
577 return card
->ext_csd
.sectors
;
579 return card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9);
582 /*******************************************************************/
583 /* Test preparation and cleanup */
584 /*******************************************************************/
587 * Fill the first couple of sectors of the card with known data
588 * so that bad reads/writes can be detected
590 static int __mmc_test_prepare(struct mmc_test_card
*test
, int write
)
594 ret
= mmc_test_set_blksize(test
, 512);
599 memset(test
->buffer
, 0xDF, 512);
601 for (i
= 0;i
< 512;i
++)
605 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
606 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
614 static int mmc_test_prepare_write(struct mmc_test_card
*test
)
616 return __mmc_test_prepare(test
, 1);
619 static int mmc_test_prepare_read(struct mmc_test_card
*test
)
621 return __mmc_test_prepare(test
, 0);
624 static int mmc_test_cleanup(struct mmc_test_card
*test
)
628 ret
= mmc_test_set_blksize(test
, 512);
632 memset(test
->buffer
, 0, 512);
634 for (i
= 0;i
< BUFFER_SIZE
/ 512;i
++) {
635 ret
= mmc_test_buffer_transfer(test
, test
->buffer
, i
, 512, 1);
643 /*******************************************************************/
644 /* Test execution helpers */
645 /*******************************************************************/
648 * Modifies the mmc_request to perform the "short transfer" tests
650 static void mmc_test_prepare_broken_mrq(struct mmc_test_card
*test
,
651 struct mmc_request
*mrq
, int write
)
653 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
655 if (mrq
->data
->blocks
> 1) {
656 mrq
->cmd
->opcode
= write
?
657 MMC_WRITE_BLOCK
: MMC_READ_SINGLE_BLOCK
;
660 mrq
->cmd
->opcode
= MMC_SEND_STATUS
;
661 mrq
->cmd
->arg
= test
->card
->rca
<< 16;
666 * Checks that a normal transfer didn't have any errors
668 static int mmc_test_check_result(struct mmc_test_card
*test
,
669 struct mmc_request
*mrq
)
673 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
677 if (!ret
&& mrq
->cmd
->error
)
678 ret
= mrq
->cmd
->error
;
679 if (!ret
&& mrq
->data
->error
)
680 ret
= mrq
->data
->error
;
681 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
682 ret
= mrq
->stop
->error
;
683 if (!ret
&& mrq
->data
->bytes_xfered
!=
684 mrq
->data
->blocks
* mrq
->data
->blksz
)
688 ret
= RESULT_UNSUP_HOST
;
694 * Checks that a "short transfer" behaved as expected
696 static int mmc_test_check_broken_result(struct mmc_test_card
*test
,
697 struct mmc_request
*mrq
)
701 BUG_ON(!mrq
|| !mrq
->cmd
|| !mrq
->data
);
705 if (!ret
&& mrq
->cmd
->error
)
706 ret
= mrq
->cmd
->error
;
707 if (!ret
&& mrq
->data
->error
== 0)
709 if (!ret
&& mrq
->data
->error
!= -ETIMEDOUT
)
710 ret
= mrq
->data
->error
;
711 if (!ret
&& mrq
->stop
&& mrq
->stop
->error
)
712 ret
= mrq
->stop
->error
;
713 if (mrq
->data
->blocks
> 1) {
714 if (!ret
&& mrq
->data
->bytes_xfered
> mrq
->data
->blksz
)
717 if (!ret
&& mrq
->data
->bytes_xfered
> 0)
722 ret
= RESULT_UNSUP_HOST
;
728 * Tests a basic transfer with certain parameters
730 static int mmc_test_simple_transfer(struct mmc_test_card
*test
,
731 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
732 unsigned blocks
, unsigned blksz
, int write
)
734 struct mmc_request mrq
;
735 struct mmc_command cmd
;
736 struct mmc_command stop
;
737 struct mmc_data data
;
739 memset(&mrq
, 0, sizeof(struct mmc_request
));
740 memset(&cmd
, 0, sizeof(struct mmc_command
));
741 memset(&data
, 0, sizeof(struct mmc_data
));
742 memset(&stop
, 0, sizeof(struct mmc_command
));
748 mmc_test_prepare_mrq(test
, &mrq
, sg
, sg_len
, dev_addr
,
749 blocks
, blksz
, write
);
751 mmc_wait_for_req(test
->card
->host
, &mrq
);
753 mmc_test_wait_busy(test
);
755 return mmc_test_check_result(test
, &mrq
);
759 * Tests a transfer where the card will fail completely or partly
761 static int mmc_test_broken_transfer(struct mmc_test_card
*test
,
762 unsigned blocks
, unsigned blksz
, int write
)
764 struct mmc_request mrq
;
765 struct mmc_command cmd
;
766 struct mmc_command stop
;
767 struct mmc_data data
;
769 struct scatterlist sg
;
771 memset(&mrq
, 0, sizeof(struct mmc_request
));
772 memset(&cmd
, 0, sizeof(struct mmc_command
));
773 memset(&data
, 0, sizeof(struct mmc_data
));
774 memset(&stop
, 0, sizeof(struct mmc_command
));
780 sg_init_one(&sg
, test
->buffer
, blocks
* blksz
);
782 mmc_test_prepare_mrq(test
, &mrq
, &sg
, 1, 0, blocks
, blksz
, write
);
783 mmc_test_prepare_broken_mrq(test
, &mrq
, write
);
785 mmc_wait_for_req(test
->card
->host
, &mrq
);
787 mmc_test_wait_busy(test
);
789 return mmc_test_check_broken_result(test
, &mrq
);
793 * Does a complete transfer test where data is also validated
795 * Note: mmc_test_prepare() must have been done before this call
797 static int mmc_test_transfer(struct mmc_test_card
*test
,
798 struct scatterlist
*sg
, unsigned sg_len
, unsigned dev_addr
,
799 unsigned blocks
, unsigned blksz
, int write
)
805 for (i
= 0;i
< blocks
* blksz
;i
++)
806 test
->scratch
[i
] = i
;
808 memset(test
->scratch
, 0, BUFFER_SIZE
);
810 local_irq_save(flags
);
811 sg_copy_from_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
812 local_irq_restore(flags
);
814 ret
= mmc_test_set_blksize(test
, blksz
);
818 ret
= mmc_test_simple_transfer(test
, sg
, sg_len
, dev_addr
,
819 blocks
, blksz
, write
);
826 ret
= mmc_test_set_blksize(test
, 512);
830 sectors
= (blocks
* blksz
+ 511) / 512;
831 if ((sectors
* 512) == (blocks
* blksz
))
834 if ((sectors
* 512) > BUFFER_SIZE
)
837 memset(test
->buffer
, 0, sectors
* 512);
839 for (i
= 0;i
< sectors
;i
++) {
840 ret
= mmc_test_buffer_transfer(test
,
841 test
->buffer
+ i
* 512,
842 dev_addr
+ i
, 512, 0);
847 for (i
= 0;i
< blocks
* blksz
;i
++) {
848 if (test
->buffer
[i
] != (u8
)i
)
852 for (;i
< sectors
* 512;i
++) {
853 if (test
->buffer
[i
] != 0xDF)
857 local_irq_save(flags
);
858 sg_copy_to_buffer(sg
, sg_len
, test
->scratch
, BUFFER_SIZE
);
859 local_irq_restore(flags
);
860 for (i
= 0;i
< blocks
* blksz
;i
++) {
861 if (test
->scratch
[i
] != (u8
)i
)
869 /*******************************************************************/
871 /*******************************************************************/
873 struct mmc_test_case
{
876 int (*prepare
)(struct mmc_test_card
*);
877 int (*run
)(struct mmc_test_card
*);
878 int (*cleanup
)(struct mmc_test_card
*);
881 static int mmc_test_basic_write(struct mmc_test_card
*test
)
884 struct scatterlist sg
;
886 ret
= mmc_test_set_blksize(test
, 512);
890 sg_init_one(&sg
, test
->buffer
, 512);
892 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 1);
899 static int mmc_test_basic_read(struct mmc_test_card
*test
)
902 struct scatterlist sg
;
904 ret
= mmc_test_set_blksize(test
, 512);
908 sg_init_one(&sg
, test
->buffer
, 512);
910 ret
= mmc_test_simple_transfer(test
, &sg
, 1, 0, 1, 512, 0);
917 static int mmc_test_verify_write(struct mmc_test_card
*test
)
920 struct scatterlist sg
;
922 sg_init_one(&sg
, test
->buffer
, 512);
924 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
931 static int mmc_test_verify_read(struct mmc_test_card
*test
)
934 struct scatterlist sg
;
936 sg_init_one(&sg
, test
->buffer
, 512);
938 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
945 static int mmc_test_multi_write(struct mmc_test_card
*test
)
949 struct scatterlist sg
;
951 if (test
->card
->host
->max_blk_count
== 1)
952 return RESULT_UNSUP_HOST
;
954 size
= PAGE_SIZE
* 2;
955 size
= min(size
, test
->card
->host
->max_req_size
);
956 size
= min(size
, test
->card
->host
->max_seg_size
);
957 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
960 return RESULT_UNSUP_HOST
;
962 sg_init_one(&sg
, test
->buffer
, size
);
964 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
971 static int mmc_test_multi_read(struct mmc_test_card
*test
)
975 struct scatterlist sg
;
977 if (test
->card
->host
->max_blk_count
== 1)
978 return RESULT_UNSUP_HOST
;
980 size
= PAGE_SIZE
* 2;
981 size
= min(size
, test
->card
->host
->max_req_size
);
982 size
= min(size
, test
->card
->host
->max_seg_size
);
983 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
986 return RESULT_UNSUP_HOST
;
988 sg_init_one(&sg
, test
->buffer
, size
);
990 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
997 static int mmc_test_pow2_write(struct mmc_test_card
*test
)
1000 struct scatterlist sg
;
1002 if (!test
->card
->csd
.write_partial
)
1003 return RESULT_UNSUP_CARD
;
1005 for (i
= 1; i
< 512;i
<<= 1) {
1006 sg_init_one(&sg
, test
->buffer
, i
);
1007 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1015 static int mmc_test_pow2_read(struct mmc_test_card
*test
)
1018 struct scatterlist sg
;
1020 if (!test
->card
->csd
.read_partial
)
1021 return RESULT_UNSUP_CARD
;
1023 for (i
= 1; i
< 512;i
<<= 1) {
1024 sg_init_one(&sg
, test
->buffer
, i
);
1025 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1033 static int mmc_test_weird_write(struct mmc_test_card
*test
)
1036 struct scatterlist sg
;
1038 if (!test
->card
->csd
.write_partial
)
1039 return RESULT_UNSUP_CARD
;
1041 for (i
= 3; i
< 512;i
+= 7) {
1042 sg_init_one(&sg
, test
->buffer
, i
);
1043 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 1);
1051 static int mmc_test_weird_read(struct mmc_test_card
*test
)
1054 struct scatterlist sg
;
1056 if (!test
->card
->csd
.read_partial
)
1057 return RESULT_UNSUP_CARD
;
1059 for (i
= 3; i
< 512;i
+= 7) {
1060 sg_init_one(&sg
, test
->buffer
, i
);
1061 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, i
, 0);
1069 static int mmc_test_align_write(struct mmc_test_card
*test
)
1072 struct scatterlist sg
;
1074 for (i
= 1;i
< 4;i
++) {
1075 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1076 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1084 static int mmc_test_align_read(struct mmc_test_card
*test
)
1087 struct scatterlist sg
;
1089 for (i
= 1;i
< 4;i
++) {
1090 sg_init_one(&sg
, test
->buffer
+ i
, 512);
1091 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1099 static int mmc_test_align_multi_write(struct mmc_test_card
*test
)
1103 struct scatterlist sg
;
1105 if (test
->card
->host
->max_blk_count
== 1)
1106 return RESULT_UNSUP_HOST
;
1108 size
= PAGE_SIZE
* 2;
1109 size
= min(size
, test
->card
->host
->max_req_size
);
1110 size
= min(size
, test
->card
->host
->max_seg_size
);
1111 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1114 return RESULT_UNSUP_HOST
;
1116 for (i
= 1;i
< 4;i
++) {
1117 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1118 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1126 static int mmc_test_align_multi_read(struct mmc_test_card
*test
)
1130 struct scatterlist sg
;
1132 if (test
->card
->host
->max_blk_count
== 1)
1133 return RESULT_UNSUP_HOST
;
1135 size
= PAGE_SIZE
* 2;
1136 size
= min(size
, test
->card
->host
->max_req_size
);
1137 size
= min(size
, test
->card
->host
->max_seg_size
);
1138 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1141 return RESULT_UNSUP_HOST
;
1143 for (i
= 1;i
< 4;i
++) {
1144 sg_init_one(&sg
, test
->buffer
+ i
, size
);
1145 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1153 static int mmc_test_xfersize_write(struct mmc_test_card
*test
)
1157 ret
= mmc_test_set_blksize(test
, 512);
1161 ret
= mmc_test_broken_transfer(test
, 1, 512, 1);
1168 static int mmc_test_xfersize_read(struct mmc_test_card
*test
)
1172 ret
= mmc_test_set_blksize(test
, 512);
1176 ret
= mmc_test_broken_transfer(test
, 1, 512, 0);
1183 static int mmc_test_multi_xfersize_write(struct mmc_test_card
*test
)
1187 if (test
->card
->host
->max_blk_count
== 1)
1188 return RESULT_UNSUP_HOST
;
1190 ret
= mmc_test_set_blksize(test
, 512);
1194 ret
= mmc_test_broken_transfer(test
, 2, 512, 1);
1201 static int mmc_test_multi_xfersize_read(struct mmc_test_card
*test
)
1205 if (test
->card
->host
->max_blk_count
== 1)
1206 return RESULT_UNSUP_HOST
;
1208 ret
= mmc_test_set_blksize(test
, 512);
1212 ret
= mmc_test_broken_transfer(test
, 2, 512, 0);
1219 #ifdef CONFIG_HIGHMEM
1221 static int mmc_test_write_high(struct mmc_test_card
*test
)
1224 struct scatterlist sg
;
1226 sg_init_table(&sg
, 1);
1227 sg_set_page(&sg
, test
->highmem
, 512, 0);
1229 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 1);
1236 static int mmc_test_read_high(struct mmc_test_card
*test
)
1239 struct scatterlist sg
;
1241 sg_init_table(&sg
, 1);
1242 sg_set_page(&sg
, test
->highmem
, 512, 0);
1244 ret
= mmc_test_transfer(test
, &sg
, 1, 0, 1, 512, 0);
1251 static int mmc_test_multi_write_high(struct mmc_test_card
*test
)
1255 struct scatterlist sg
;
1257 if (test
->card
->host
->max_blk_count
== 1)
1258 return RESULT_UNSUP_HOST
;
1260 size
= PAGE_SIZE
* 2;
1261 size
= min(size
, test
->card
->host
->max_req_size
);
1262 size
= min(size
, test
->card
->host
->max_seg_size
);
1263 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1266 return RESULT_UNSUP_HOST
;
1268 sg_init_table(&sg
, 1);
1269 sg_set_page(&sg
, test
->highmem
, size
, 0);
1271 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 1);
1278 static int mmc_test_multi_read_high(struct mmc_test_card
*test
)
1282 struct scatterlist sg
;
1284 if (test
->card
->host
->max_blk_count
== 1)
1285 return RESULT_UNSUP_HOST
;
1287 size
= PAGE_SIZE
* 2;
1288 size
= min(size
, test
->card
->host
->max_req_size
);
1289 size
= min(size
, test
->card
->host
->max_seg_size
);
1290 size
= min(size
, test
->card
->host
->max_blk_count
* 512);
1293 return RESULT_UNSUP_HOST
;
1295 sg_init_table(&sg
, 1);
1296 sg_set_page(&sg
, test
->highmem
, size
, 0);
1298 ret
= mmc_test_transfer(test
, &sg
, 1, 0, size
/512, 512, 0);
1307 static int mmc_test_no_highmem(struct mmc_test_card
*test
)
1309 printk(KERN_INFO
"%s: Highmem not configured - test skipped\n",
1310 mmc_hostname(test
->card
->host
));
1314 #endif /* CONFIG_HIGHMEM */
1317 * Map sz bytes so that it can be transferred.
1319 static int mmc_test_area_map(struct mmc_test_card
*test
, unsigned long sz
,
1322 struct mmc_test_area
*t
= &test
->area
;
1325 t
->blocks
= sz
>> 9;
1328 err
= mmc_test_map_sg_max_scatter(t
->mem
, sz
, t
->sg
,
1329 t
->max_segs
, t
->max_seg_sz
,
1332 err
= mmc_test_map_sg(t
->mem
, sz
, t
->sg
, 1, t
->max_segs
,
1333 t
->max_seg_sz
, &t
->sg_len
);
1336 printk(KERN_INFO
"%s: Failed to map sg list\n",
1337 mmc_hostname(test
->card
->host
));
1342 * Transfer bytes mapped by mmc_test_area_map().
1344 static int mmc_test_area_transfer(struct mmc_test_card
*test
,
1345 unsigned int dev_addr
, int write
)
1347 struct mmc_test_area
*t
= &test
->area
;
1349 return mmc_test_simple_transfer(test
, t
->sg
, t
->sg_len
, dev_addr
,
1350 t
->blocks
, 512, write
);
1354 * Map and transfer bytes.
1356 static int mmc_test_area_io(struct mmc_test_card
*test
, unsigned long sz
,
1357 unsigned int dev_addr
, int write
, int max_scatter
,
1360 struct timespec ts1
, ts2
;
1364 * In the case of a maximally scattered transfer, the maximum transfer
1365 * size is further limited by using PAGE_SIZE segments.
1368 struct mmc_test_area
*t
= &test
->area
;
1369 unsigned long max_tfr
;
1371 if (t
->max_seg_sz
>= PAGE_SIZE
)
1372 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1374 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1379 ret
= mmc_test_area_map(test
, sz
, max_scatter
);
1384 getnstimeofday(&ts1
);
1386 ret
= mmc_test_area_transfer(test
, dev_addr
, write
);
1391 getnstimeofday(&ts2
);
1394 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1400 * Write the test area entirely.
1402 static int mmc_test_area_fill(struct mmc_test_card
*test
)
1404 return mmc_test_area_io(test
, test
->area
.max_tfr
, test
->area
.dev_addr
,
1409 * Erase the test area entirely.
1411 static int mmc_test_area_erase(struct mmc_test_card
*test
)
1413 struct mmc_test_area
*t
= &test
->area
;
1415 if (!mmc_can_erase(test
->card
))
1418 return mmc_erase(test
->card
, t
->dev_addr
, test
->area
.max_sz
>> 9,
1423 * Cleanup struct mmc_test_area.
1425 static int mmc_test_area_cleanup(struct mmc_test_card
*test
)
1427 struct mmc_test_area
*t
= &test
->area
;
1430 mmc_test_free_mem(t
->mem
);
1436 * Initialize an area for testing large transfers. The test area is set to the
1437 * middle of the card because cards may have different charateristics at the
1438 * front (for FAT file system optimization). Optionally, the area is erased
1439 * (if the card supports it) which may improve write performance. Optionally,
1440 * the area is filled with data for subsequent read tests.
1442 static int mmc_test_area_init(struct mmc_test_card
*test
, int erase
, int fill
)
1444 struct mmc_test_area
*t
= &test
->area
;
1445 unsigned long min_sz
= 64 * 1024, sz
;
1448 ret
= mmc_test_set_blksize(test
, 512);
1452 /* Make the test area size about 4MiB */
1453 sz
= (unsigned long)test
->card
->pref_erase
<< 9;
1455 while (t
->max_sz
< 4 * 1024 * 1024)
1457 while (t
->max_sz
> TEST_AREA_MAX_SIZE
&& t
->max_sz
> sz
)
1460 t
->max_segs
= test
->card
->host
->max_segs
;
1461 t
->max_seg_sz
= test
->card
->host
->max_seg_size
;
1463 t
->max_tfr
= t
->max_sz
;
1464 if (t
->max_tfr
>> 9 > test
->card
->host
->max_blk_count
)
1465 t
->max_tfr
= test
->card
->host
->max_blk_count
<< 9;
1466 if (t
->max_tfr
> test
->card
->host
->max_req_size
)
1467 t
->max_tfr
= test
->card
->host
->max_req_size
;
1468 if (t
->max_tfr
/ t
->max_seg_sz
> t
->max_segs
)
1469 t
->max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1472 * Try to allocate enough memory for a max. sized transfer. Less is OK
1473 * because the same memory can be mapped into the scatterlist more than
1474 * once. Also, take into account the limits imposed on scatterlist
1475 * segments by the host driver.
1477 t
->mem
= mmc_test_alloc_mem(min_sz
, t
->max_tfr
, t
->max_segs
,
1482 t
->sg
= kmalloc(sizeof(struct scatterlist
) * t
->max_segs
, GFP_KERNEL
);
1488 t
->dev_addr
= mmc_test_capacity(test
->card
) / 2;
1489 t
->dev_addr
-= t
->dev_addr
% (t
->max_sz
>> 9);
1492 ret
= mmc_test_area_erase(test
);
1498 ret
= mmc_test_area_fill(test
);
1506 mmc_test_area_cleanup(test
);
1511 * Prepare for large transfers. Do not erase the test area.
1513 static int mmc_test_area_prepare(struct mmc_test_card
*test
)
1515 return mmc_test_area_init(test
, 0, 0);
1519 * Prepare for large transfers. Do erase the test area.
1521 static int mmc_test_area_prepare_erase(struct mmc_test_card
*test
)
1523 return mmc_test_area_init(test
, 1, 0);
1527 * Prepare for large transfers. Erase and fill the test area.
1529 static int mmc_test_area_prepare_fill(struct mmc_test_card
*test
)
1531 return mmc_test_area_init(test
, 1, 1);
1535 * Test best-case performance. Best-case performance is expected from
1536 * a single large transfer.
1538 * An additional option (max_scatter) allows the measurement of the same
1539 * transfer but with no contiguous pages in the scatter list. This tests
1540 * the efficiency of DMA to handle scattered pages.
1542 static int mmc_test_best_performance(struct mmc_test_card
*test
, int write
,
1545 return mmc_test_area_io(test
, test
->area
.max_tfr
, test
->area
.dev_addr
,
1546 write
, max_scatter
, 1);
1550 * Best-case read performance.
1552 static int mmc_test_best_read_performance(struct mmc_test_card
*test
)
1554 return mmc_test_best_performance(test
, 0, 0);
1558 * Best-case write performance.
1560 static int mmc_test_best_write_performance(struct mmc_test_card
*test
)
1562 return mmc_test_best_performance(test
, 1, 0);
1566 * Best-case read performance into scattered pages.
1568 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card
*test
)
1570 return mmc_test_best_performance(test
, 0, 1);
1574 * Best-case write performance from scattered pages.
1576 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card
*test
)
1578 return mmc_test_best_performance(test
, 1, 1);
1582 * Single read performance by transfer size.
1584 static int mmc_test_profile_read_perf(struct mmc_test_card
*test
)
1587 unsigned int dev_addr
;
1590 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1591 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1592 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1596 sz
= test
->area
.max_tfr
;
1597 dev_addr
= test
->area
.dev_addr
;
1598 return mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 1);
1602 * Single write performance by transfer size.
1604 static int mmc_test_profile_write_perf(struct mmc_test_card
*test
)
1607 unsigned int dev_addr
;
1610 ret
= mmc_test_area_erase(test
);
1613 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1614 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1615 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1619 ret
= mmc_test_area_erase(test
);
1622 sz
= test
->area
.max_tfr
;
1623 dev_addr
= test
->area
.dev_addr
;
1624 return mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 1);
1628 * Single trim performance by transfer size.
1630 static int mmc_test_profile_trim_perf(struct mmc_test_card
*test
)
1633 unsigned int dev_addr
;
1634 struct timespec ts1
, ts2
;
1637 if (!mmc_can_trim(test
->card
))
1638 return RESULT_UNSUP_CARD
;
1640 if (!mmc_can_erase(test
->card
))
1641 return RESULT_UNSUP_HOST
;
1643 for (sz
= 512; sz
< test
->area
.max_sz
; sz
<<= 1) {
1644 dev_addr
= test
->area
.dev_addr
+ (sz
>> 9);
1645 getnstimeofday(&ts1
);
1646 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1649 getnstimeofday(&ts2
);
1650 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1652 dev_addr
= test
->area
.dev_addr
;
1653 getnstimeofday(&ts1
);
1654 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9, MMC_TRIM_ARG
);
1657 getnstimeofday(&ts2
);
1658 mmc_test_print_rate(test
, sz
, &ts1
, &ts2
);
1662 static int mmc_test_seq_read_perf(struct mmc_test_card
*test
, unsigned long sz
)
1664 unsigned int dev_addr
, i
, cnt
;
1665 struct timespec ts1
, ts2
;
1668 cnt
= test
->area
.max_sz
/ sz
;
1669 dev_addr
= test
->area
.dev_addr
;
1670 getnstimeofday(&ts1
);
1671 for (i
= 0; i
< cnt
; i
++) {
1672 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 0, 0, 0);
1675 dev_addr
+= (sz
>> 9);
1677 getnstimeofday(&ts2
);
1678 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1683 * Consecutive read performance by transfer size.
1685 static int mmc_test_profile_seq_read_perf(struct mmc_test_card
*test
)
1690 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1691 ret
= mmc_test_seq_read_perf(test
, sz
);
1695 sz
= test
->area
.max_tfr
;
1696 return mmc_test_seq_read_perf(test
, sz
);
1699 static int mmc_test_seq_write_perf(struct mmc_test_card
*test
, unsigned long sz
)
1701 unsigned int dev_addr
, i
, cnt
;
1702 struct timespec ts1
, ts2
;
1705 ret
= mmc_test_area_erase(test
);
1708 cnt
= test
->area
.max_sz
/ sz
;
1709 dev_addr
= test
->area
.dev_addr
;
1710 getnstimeofday(&ts1
);
1711 for (i
= 0; i
< cnt
; i
++) {
1712 ret
= mmc_test_area_io(test
, sz
, dev_addr
, 1, 0, 0);
1715 dev_addr
+= (sz
>> 9);
1717 getnstimeofday(&ts2
);
1718 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1723 * Consecutive write performance by transfer size.
1725 static int mmc_test_profile_seq_write_perf(struct mmc_test_card
*test
)
1730 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1731 ret
= mmc_test_seq_write_perf(test
, sz
);
1735 sz
= test
->area
.max_tfr
;
1736 return mmc_test_seq_write_perf(test
, sz
);
1740 * Consecutive trim performance by transfer size.
1742 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card
*test
)
1745 unsigned int dev_addr
, i
, cnt
;
1746 struct timespec ts1
, ts2
;
1749 if (!mmc_can_trim(test
->card
))
1750 return RESULT_UNSUP_CARD
;
1752 if (!mmc_can_erase(test
->card
))
1753 return RESULT_UNSUP_HOST
;
1755 for (sz
= 512; sz
<= test
->area
.max_sz
; sz
<<= 1) {
1756 ret
= mmc_test_area_erase(test
);
1759 ret
= mmc_test_area_fill(test
);
1762 cnt
= test
->area
.max_sz
/ sz
;
1763 dev_addr
= test
->area
.dev_addr
;
1764 getnstimeofday(&ts1
);
1765 for (i
= 0; i
< cnt
; i
++) {
1766 ret
= mmc_erase(test
->card
, dev_addr
, sz
>> 9,
1770 dev_addr
+= (sz
>> 9);
1772 getnstimeofday(&ts2
);
1773 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1778 static unsigned int rnd_next
= 1;
1780 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt
)
1784 rnd_next
= rnd_next
* 1103515245 + 12345;
1785 r
= (rnd_next
>> 16) & 0x7fff;
1786 return (r
* rnd_cnt
) >> 15;
1789 static int mmc_test_rnd_perf(struct mmc_test_card
*test
, int write
, int print
,
1792 unsigned int dev_addr
, cnt
, rnd_addr
, range1
, range2
, last_ea
= 0, ea
;
1794 struct timespec ts1
, ts2
, ts
;
1799 rnd_addr
= mmc_test_capacity(test
->card
) / 4;
1800 range1
= rnd_addr
/ test
->card
->pref_erase
;
1801 range2
= range1
/ ssz
;
1803 getnstimeofday(&ts1
);
1804 for (cnt
= 0; cnt
< UINT_MAX
; cnt
++) {
1805 getnstimeofday(&ts2
);
1806 ts
= timespec_sub(ts2
, ts1
);
1807 if (ts
.tv_sec
>= 10)
1809 ea
= mmc_test_rnd_num(range1
);
1813 dev_addr
= rnd_addr
+ test
->card
->pref_erase
* ea
+
1814 ssz
* mmc_test_rnd_num(range2
);
1815 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
, 0, 0);
1820 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1824 static int mmc_test_random_perf(struct mmc_test_card
*test
, int write
)
1830 for (sz
= 512; sz
< test
->area
.max_tfr
; sz
<<= 1) {
1832 * When writing, try to get more consistent results by running
1833 * the test twice with exactly the same I/O but outputting the
1834 * results only for the 2nd run.
1838 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1843 ret
= mmc_test_rnd_perf(test
, write
, 1, sz
);
1847 sz
= test
->area
.max_tfr
;
1850 ret
= mmc_test_rnd_perf(test
, write
, 0, sz
);
1855 return mmc_test_rnd_perf(test
, write
, 1, sz
);
1859 * Random read performance by transfer size.
1861 static int mmc_test_random_read_perf(struct mmc_test_card
*test
)
1863 return mmc_test_random_perf(test
, 0);
1867 * Random write performance by transfer size.
1869 static int mmc_test_random_write_perf(struct mmc_test_card
*test
)
1871 return mmc_test_random_perf(test
, 1);
1874 static int mmc_test_seq_perf(struct mmc_test_card
*test
, int write
,
1875 unsigned int tot_sz
, int max_scatter
)
1877 unsigned int dev_addr
, i
, cnt
, sz
, ssz
;
1878 struct timespec ts1
, ts2
;
1881 sz
= test
->area
.max_tfr
;
1883 * In the case of a maximally scattered transfer, the maximum transfer
1884 * size is further limited by using PAGE_SIZE segments.
1887 struct mmc_test_area
*t
= &test
->area
;
1888 unsigned long max_tfr
;
1890 if (t
->max_seg_sz
>= PAGE_SIZE
)
1891 max_tfr
= t
->max_segs
* PAGE_SIZE
;
1893 max_tfr
= t
->max_segs
* t
->max_seg_sz
;
1899 dev_addr
= mmc_test_capacity(test
->card
) / 4;
1900 if (tot_sz
> dev_addr
<< 9)
1901 tot_sz
= dev_addr
<< 9;
1903 dev_addr
&= 0xffff0000; /* Round to 64MiB boundary */
1905 getnstimeofday(&ts1
);
1906 for (i
= 0; i
< cnt
; i
++) {
1907 ret
= mmc_test_area_io(test
, sz
, dev_addr
, write
,
1913 getnstimeofday(&ts2
);
1915 mmc_test_print_avg_rate(test
, sz
, cnt
, &ts1
, &ts2
);
1920 static int mmc_test_large_seq_perf(struct mmc_test_card
*test
, int write
)
1924 for (i
= 0; i
< 10; i
++) {
1925 ret
= mmc_test_seq_perf(test
, write
, 10 * 1024 * 1024, 1);
1929 for (i
= 0; i
< 5; i
++) {
1930 ret
= mmc_test_seq_perf(test
, write
, 100 * 1024 * 1024, 1);
1934 for (i
= 0; i
< 3; i
++) {
1935 ret
= mmc_test_seq_perf(test
, write
, 1000 * 1024 * 1024, 1);
1944 * Large sequential read performance.
1946 static int mmc_test_large_seq_read_perf(struct mmc_test_card
*test
)
1948 return mmc_test_large_seq_perf(test
, 0);
1952 * Large sequential write performance.
1954 static int mmc_test_large_seq_write_perf(struct mmc_test_card
*test
)
1956 return mmc_test_large_seq_perf(test
, 1);
1959 static const struct mmc_test_case mmc_test_cases
[] = {
1961 .name
= "Basic write (no data verification)",
1962 .run
= mmc_test_basic_write
,
1966 .name
= "Basic read (no data verification)",
1967 .run
= mmc_test_basic_read
,
1971 .name
= "Basic write (with data verification)",
1972 .prepare
= mmc_test_prepare_write
,
1973 .run
= mmc_test_verify_write
,
1974 .cleanup
= mmc_test_cleanup
,
1978 .name
= "Basic read (with data verification)",
1979 .prepare
= mmc_test_prepare_read
,
1980 .run
= mmc_test_verify_read
,
1981 .cleanup
= mmc_test_cleanup
,
1985 .name
= "Multi-block write",
1986 .prepare
= mmc_test_prepare_write
,
1987 .run
= mmc_test_multi_write
,
1988 .cleanup
= mmc_test_cleanup
,
1992 .name
= "Multi-block read",
1993 .prepare
= mmc_test_prepare_read
,
1994 .run
= mmc_test_multi_read
,
1995 .cleanup
= mmc_test_cleanup
,
1999 .name
= "Power of two block writes",
2000 .prepare
= mmc_test_prepare_write
,
2001 .run
= mmc_test_pow2_write
,
2002 .cleanup
= mmc_test_cleanup
,
2006 .name
= "Power of two block reads",
2007 .prepare
= mmc_test_prepare_read
,
2008 .run
= mmc_test_pow2_read
,
2009 .cleanup
= mmc_test_cleanup
,
2013 .name
= "Weird sized block writes",
2014 .prepare
= mmc_test_prepare_write
,
2015 .run
= mmc_test_weird_write
,
2016 .cleanup
= mmc_test_cleanup
,
2020 .name
= "Weird sized block reads",
2021 .prepare
= mmc_test_prepare_read
,
2022 .run
= mmc_test_weird_read
,
2023 .cleanup
= mmc_test_cleanup
,
2027 .name
= "Badly aligned write",
2028 .prepare
= mmc_test_prepare_write
,
2029 .run
= mmc_test_align_write
,
2030 .cleanup
= mmc_test_cleanup
,
2034 .name
= "Badly aligned read",
2035 .prepare
= mmc_test_prepare_read
,
2036 .run
= mmc_test_align_read
,
2037 .cleanup
= mmc_test_cleanup
,
2041 .name
= "Badly aligned multi-block write",
2042 .prepare
= mmc_test_prepare_write
,
2043 .run
= mmc_test_align_multi_write
,
2044 .cleanup
= mmc_test_cleanup
,
2048 .name
= "Badly aligned multi-block read",
2049 .prepare
= mmc_test_prepare_read
,
2050 .run
= mmc_test_align_multi_read
,
2051 .cleanup
= mmc_test_cleanup
,
2055 .name
= "Correct xfer_size at write (start failure)",
2056 .run
= mmc_test_xfersize_write
,
2060 .name
= "Correct xfer_size at read (start failure)",
2061 .run
= mmc_test_xfersize_read
,
2065 .name
= "Correct xfer_size at write (midway failure)",
2066 .run
= mmc_test_multi_xfersize_write
,
2070 .name
= "Correct xfer_size at read (midway failure)",
2071 .run
= mmc_test_multi_xfersize_read
,
2074 #ifdef CONFIG_HIGHMEM
2077 .name
= "Highmem write",
2078 .prepare
= mmc_test_prepare_write
,
2079 .run
= mmc_test_write_high
,
2080 .cleanup
= mmc_test_cleanup
,
2084 .name
= "Highmem read",
2085 .prepare
= mmc_test_prepare_read
,
2086 .run
= mmc_test_read_high
,
2087 .cleanup
= mmc_test_cleanup
,
2091 .name
= "Multi-block highmem write",
2092 .prepare
= mmc_test_prepare_write
,
2093 .run
= mmc_test_multi_write_high
,
2094 .cleanup
= mmc_test_cleanup
,
2098 .name
= "Multi-block highmem read",
2099 .prepare
= mmc_test_prepare_read
,
2100 .run
= mmc_test_multi_read_high
,
2101 .cleanup
= mmc_test_cleanup
,
2107 .name
= "Highmem write",
2108 .run
= mmc_test_no_highmem
,
2112 .name
= "Highmem read",
2113 .run
= mmc_test_no_highmem
,
2117 .name
= "Multi-block highmem write",
2118 .run
= mmc_test_no_highmem
,
2122 .name
= "Multi-block highmem read",
2123 .run
= mmc_test_no_highmem
,
2126 #endif /* CONFIG_HIGHMEM */
2129 .name
= "Best-case read performance",
2130 .prepare
= mmc_test_area_prepare_fill
,
2131 .run
= mmc_test_best_read_performance
,
2132 .cleanup
= mmc_test_area_cleanup
,
2136 .name
= "Best-case write performance",
2137 .prepare
= mmc_test_area_prepare_erase
,
2138 .run
= mmc_test_best_write_performance
,
2139 .cleanup
= mmc_test_area_cleanup
,
2143 .name
= "Best-case read performance into scattered pages",
2144 .prepare
= mmc_test_area_prepare_fill
,
2145 .run
= mmc_test_best_read_perf_max_scatter
,
2146 .cleanup
= mmc_test_area_cleanup
,
2150 .name
= "Best-case write performance from scattered pages",
2151 .prepare
= mmc_test_area_prepare_erase
,
2152 .run
= mmc_test_best_write_perf_max_scatter
,
2153 .cleanup
= mmc_test_area_cleanup
,
2157 .name
= "Single read performance by transfer size",
2158 .prepare
= mmc_test_area_prepare_fill
,
2159 .run
= mmc_test_profile_read_perf
,
2160 .cleanup
= mmc_test_area_cleanup
,
2164 .name
= "Single write performance by transfer size",
2165 .prepare
= mmc_test_area_prepare
,
2166 .run
= mmc_test_profile_write_perf
,
2167 .cleanup
= mmc_test_area_cleanup
,
2171 .name
= "Single trim performance by transfer size",
2172 .prepare
= mmc_test_area_prepare_fill
,
2173 .run
= mmc_test_profile_trim_perf
,
2174 .cleanup
= mmc_test_area_cleanup
,
2178 .name
= "Consecutive read performance by transfer size",
2179 .prepare
= mmc_test_area_prepare_fill
,
2180 .run
= mmc_test_profile_seq_read_perf
,
2181 .cleanup
= mmc_test_area_cleanup
,
2185 .name
= "Consecutive write performance by transfer size",
2186 .prepare
= mmc_test_area_prepare
,
2187 .run
= mmc_test_profile_seq_write_perf
,
2188 .cleanup
= mmc_test_area_cleanup
,
2192 .name
= "Consecutive trim performance by transfer size",
2193 .prepare
= mmc_test_area_prepare
,
2194 .run
= mmc_test_profile_seq_trim_perf
,
2195 .cleanup
= mmc_test_area_cleanup
,
2199 .name
= "Random read performance by transfer size",
2200 .prepare
= mmc_test_area_prepare
,
2201 .run
= mmc_test_random_read_perf
,
2202 .cleanup
= mmc_test_area_cleanup
,
2206 .name
= "Random write performance by transfer size",
2207 .prepare
= mmc_test_area_prepare
,
2208 .run
= mmc_test_random_write_perf
,
2209 .cleanup
= mmc_test_area_cleanup
,
2213 .name
= "Large sequential read into scattered pages",
2214 .prepare
= mmc_test_area_prepare
,
2215 .run
= mmc_test_large_seq_read_perf
,
2216 .cleanup
= mmc_test_area_cleanup
,
2220 .name
= "Large sequential write from scattered pages",
2221 .prepare
= mmc_test_area_prepare
,
2222 .run
= mmc_test_large_seq_write_perf
,
2223 .cleanup
= mmc_test_area_cleanup
,
2228 static DEFINE_MUTEX(mmc_test_lock
);
2230 static LIST_HEAD(mmc_test_result
);
2232 static void mmc_test_run(struct mmc_test_card
*test
, int testcase
)
2236 printk(KERN_INFO
"%s: Starting tests of card %s...\n",
2237 mmc_hostname(test
->card
->host
), mmc_card_id(test
->card
));
2239 mmc_claim_host(test
->card
->host
);
2241 for (i
= 0;i
< ARRAY_SIZE(mmc_test_cases
);i
++) {
2242 struct mmc_test_general_result
*gr
;
2244 if (testcase
&& ((i
+ 1) != testcase
))
2247 printk(KERN_INFO
"%s: Test case %d. %s...\n",
2248 mmc_hostname(test
->card
->host
), i
+ 1,
2249 mmc_test_cases
[i
].name
);
2251 if (mmc_test_cases
[i
].prepare
) {
2252 ret
= mmc_test_cases
[i
].prepare(test
);
2254 printk(KERN_INFO
"%s: Result: Prepare "
2255 "stage failed! (%d)\n",
2256 mmc_hostname(test
->card
->host
),
2262 gr
= kzalloc(sizeof(struct mmc_test_general_result
),
2265 INIT_LIST_HEAD(&gr
->tr_lst
);
2267 /* Assign data what we know already */
2268 gr
->card
= test
->card
;
2271 /* Append container to global one */
2272 list_add_tail(&gr
->link
, &mmc_test_result
);
2275 * Save the pointer to created container in our private
2281 ret
= mmc_test_cases
[i
].run(test
);
2284 printk(KERN_INFO
"%s: Result: OK\n",
2285 mmc_hostname(test
->card
->host
));
2288 printk(KERN_INFO
"%s: Result: FAILED\n",
2289 mmc_hostname(test
->card
->host
));
2291 case RESULT_UNSUP_HOST
:
2292 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
2294 mmc_hostname(test
->card
->host
));
2296 case RESULT_UNSUP_CARD
:
2297 printk(KERN_INFO
"%s: Result: UNSUPPORTED "
2299 mmc_hostname(test
->card
->host
));
2302 printk(KERN_INFO
"%s: Result: ERROR (%d)\n",
2303 mmc_hostname(test
->card
->host
), ret
);
2306 /* Save the result */
2310 if (mmc_test_cases
[i
].cleanup
) {
2311 ret
= mmc_test_cases
[i
].cleanup(test
);
2313 printk(KERN_INFO
"%s: Warning: Cleanup "
2314 "stage failed! (%d)\n",
2315 mmc_hostname(test
->card
->host
),
2321 mmc_release_host(test
->card
->host
);
2323 printk(KERN_INFO
"%s: Tests completed.\n",
2324 mmc_hostname(test
->card
->host
));
2327 static void mmc_test_free_result(struct mmc_card
*card
)
2329 struct mmc_test_general_result
*gr
, *grs
;
2331 mutex_lock(&mmc_test_lock
);
2333 list_for_each_entry_safe(gr
, grs
, &mmc_test_result
, link
) {
2334 struct mmc_test_transfer_result
*tr
, *trs
;
2336 if (card
&& gr
->card
!= card
)
2339 list_for_each_entry_safe(tr
, trs
, &gr
->tr_lst
, link
) {
2340 list_del(&tr
->link
);
2344 list_del(&gr
->link
);
2348 mutex_unlock(&mmc_test_lock
);
2351 static LIST_HEAD(mmc_test_file_test
);
2353 static int mtf_test_show(struct seq_file
*sf
, void *data
)
2355 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2356 struct mmc_test_general_result
*gr
;
2358 mutex_lock(&mmc_test_lock
);
2360 list_for_each_entry(gr
, &mmc_test_result
, link
) {
2361 struct mmc_test_transfer_result
*tr
;
2363 if (gr
->card
!= card
)
2366 seq_printf(sf
, "Test %d: %d\n", gr
->testcase
+ 1, gr
->result
);
2368 list_for_each_entry(tr
, &gr
->tr_lst
, link
) {
2369 seq_printf(sf
, "%u %d %lu.%09lu %u %u.%02u\n",
2370 tr
->count
, tr
->sectors
,
2371 (unsigned long)tr
->ts
.tv_sec
,
2372 (unsigned long)tr
->ts
.tv_nsec
,
2373 tr
->rate
, tr
->iops
/ 100, tr
->iops
% 100);
2377 mutex_unlock(&mmc_test_lock
);
2382 static int mtf_test_open(struct inode
*inode
, struct file
*file
)
2384 return single_open(file
, mtf_test_show
, inode
->i_private
);
2387 static ssize_t
mtf_test_write(struct file
*file
, const char __user
*buf
,
2388 size_t count
, loff_t
*pos
)
2390 struct seq_file
*sf
= (struct seq_file
*)file
->private_data
;
2391 struct mmc_card
*card
= (struct mmc_card
*)sf
->private;
2392 struct mmc_test_card
*test
;
2396 if (count
>= sizeof(lbuf
))
2399 if (copy_from_user(lbuf
, buf
, count
))
2403 if (strict_strtol(lbuf
, 10, &testcase
))
2406 test
= kzalloc(sizeof(struct mmc_test_card
), GFP_KERNEL
);
2411 * Remove all test cases associated with given card. Thus we have only
2412 * actual data of the last run.
2414 mmc_test_free_result(card
);
2418 test
->buffer
= kzalloc(BUFFER_SIZE
, GFP_KERNEL
);
2419 #ifdef CONFIG_HIGHMEM
2420 test
->highmem
= alloc_pages(GFP_KERNEL
| __GFP_HIGHMEM
, BUFFER_ORDER
);
2423 #ifdef CONFIG_HIGHMEM
2424 if (test
->buffer
&& test
->highmem
) {
2428 mutex_lock(&mmc_test_lock
);
2429 mmc_test_run(test
, testcase
);
2430 mutex_unlock(&mmc_test_lock
);
2433 #ifdef CONFIG_HIGHMEM
2434 __free_pages(test
->highmem
, BUFFER_ORDER
);
2436 kfree(test
->buffer
);
2442 static const struct file_operations mmc_test_fops_test
= {
2443 .open
= mtf_test_open
,
2445 .write
= mtf_test_write
,
2446 .llseek
= seq_lseek
,
2447 .release
= single_release
,
2450 static void mmc_test_free_file_test(struct mmc_card
*card
)
2452 struct mmc_test_dbgfs_file
*df
, *dfs
;
2454 mutex_lock(&mmc_test_lock
);
2456 list_for_each_entry_safe(df
, dfs
, &mmc_test_file_test
, link
) {
2457 if (card
&& df
->card
!= card
)
2459 debugfs_remove(df
->file
);
2460 list_del(&df
->link
);
2464 mutex_unlock(&mmc_test_lock
);
2467 static int mmc_test_register_file_test(struct mmc_card
*card
)
2469 struct dentry
*file
= NULL
;
2470 struct mmc_test_dbgfs_file
*df
;
2473 mutex_lock(&mmc_test_lock
);
2475 if (card
->debugfs_root
)
2476 file
= debugfs_create_file("test", S_IWUSR
| S_IRUGO
,
2477 card
->debugfs_root
, card
, &mmc_test_fops_test
);
2479 if (IS_ERR_OR_NULL(file
)) {
2481 "Can't create file. Perhaps debugfs is disabled.\n");
2486 df
= kmalloc(sizeof(struct mmc_test_dbgfs_file
), GFP_KERNEL
);
2488 debugfs_remove(file
);
2490 "Can't allocate memory for internal usage.\n");
2498 list_add(&df
->link
, &mmc_test_file_test
);
2501 mutex_unlock(&mmc_test_lock
);
2506 static int mmc_test_probe(struct mmc_card
*card
)
2510 if (!mmc_card_mmc(card
) && !mmc_card_sd(card
))
2513 ret
= mmc_test_register_file_test(card
);
2517 dev_info(&card
->dev
, "Card claimed for testing.\n");
2522 static void mmc_test_remove(struct mmc_card
*card
)
2524 mmc_test_free_result(card
);
2525 mmc_test_free_file_test(card
);
2528 static struct mmc_driver mmc_driver
= {
2532 .probe
= mmc_test_probe
,
2533 .remove
= mmc_test_remove
,
2536 static int __init
mmc_test_init(void)
2538 return mmc_register_driver(&mmc_driver
);
2541 static void __exit
mmc_test_exit(void)
2543 /* Clear stalled data if card is still plugged */
2544 mmc_test_free_result(NULL
);
2545 mmc_test_free_file_test(NULL
);
2547 mmc_unregister_driver(&mmc_driver
);
2550 module_init(mmc_test_init
);
2551 module_exit(mmc_test_exit
);
2553 MODULE_LICENSE("GPL");
2554 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2555 MODULE_AUTHOR("Pierre Ossman");