1 // SPDX-License-Identifier: GPL-2.0-only
3 * ms_block.c - Sony MemoryStick (legacy) storage support
5 * Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com>
7 * Minor portions of the driver were copied from mspro_block.c which is
8 * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
10 #define DRIVER_NAME "ms_block"
11 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
13 #include <linux/module.h>
14 #include <linux/blk-mq.h>
15 #include <linux/memstick.h>
16 #include <linux/idr.h>
17 #include <linux/hdreg.h>
18 #include <linux/delay.h>
19 #include <linux/slab.h>
20 #include <linux/random.h>
21 #include <linux/bitmap.h>
22 #include <linux/scatterlist.h>
23 #include <linux/jiffies.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
29 static int cache_flush_timeout
= 1000;
30 static bool verify_writes
;
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
36 static size_t msb_sg_copy(struct scatterlist
*sg_from
,
37 struct scatterlist
*sg_to
, int to_nents
, size_t offset
, size_t len
)
42 if (offset
>= sg_from
->length
) {
43 if (sg_is_last(sg_from
))
46 offset
-= sg_from
->length
;
47 sg_from
= sg_next(sg_from
);
51 copied
= min(len
, sg_from
->length
- offset
);
52 sg_set_page(sg_to
, sg_page(sg_from
),
53 copied
, sg_from
->offset
+ offset
);
58 if (sg_is_last(sg_from
) || !len
)
61 sg_to
= sg_next(sg_to
);
63 sg_from
= sg_next(sg_from
);
66 while (len
> sg_from
->length
&& to_nents
--) {
67 len
-= sg_from
->length
;
68 copied
+= sg_from
->length
;
70 sg_set_page(sg_to
, sg_page(sg_from
),
71 sg_from
->length
, sg_from
->offset
);
73 if (sg_is_last(sg_from
) || !len
)
76 sg_from
= sg_next(sg_from
);
77 sg_to
= sg_next(sg_to
);
80 if (len
&& to_nents
) {
81 sg_set_page(sg_to
, sg_page(sg_from
), len
, sg_from
->offset
);
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
94 static int msb_sg_compare_to_buffer(struct scatterlist
*sg
,
95 size_t offset
, u8
*buffer
, size_t len
)
97 int retval
= 0, cmplen
;
98 struct sg_mapping_iter miter
;
100 sg_miter_start(&miter
, sg
, sg_nents(sg
),
101 SG_MITER_ATOMIC
| SG_MITER_FROM_SG
);
103 while (sg_miter_next(&miter
) && len
> 0) {
104 if (offset
>= miter
.length
) {
105 offset
-= miter
.length
;
109 cmplen
= min(miter
.length
- offset
, len
);
110 retval
= memcmp(miter
.addr
+ offset
, buffer
, cmplen
) ? -1 : 0;
122 sg_miter_stop(&miter
);
127 /* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
133 static int msb_get_zone_from_lba(int lba
)
137 return ((lba
- 494) / 496) + 1;
140 /* Get zone of physical block. Trivial */
141 static int msb_get_zone_from_pba(int pba
)
143 return pba
/ MS_BLOCKS_IN_ZONE
;
146 /* Debug test to validate free block counts */
147 static int msb_validate_used_block_bitmap(struct msb_data
*msb
)
149 int total_free_blocks
= 0;
155 for (i
= 0; i
< msb
->zone_count
; i
++)
156 total_free_blocks
+= msb
->free_block_count
[i
];
158 if (msb
->block_count
- bitmap_weight(msb
->used_blocks_bitmap
,
159 msb
->block_count
) == total_free_blocks
)
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb
->read_only
= true;
167 /* Mark physical block as used */
168 static void msb_mark_block_used(struct msb_data
*msb
, int pba
)
170 int zone
= msb_get_zone_from_pba(pba
);
172 if (test_bit(pba
, msb
->used_blocks_bitmap
)) {
174 "BUG: attempt to mark already used pba %d as used", pba
);
175 msb
->read_only
= true;
179 if (msb_validate_used_block_bitmap(msb
))
182 /* No races because all IO is single threaded */
183 __set_bit(pba
, msb
->used_blocks_bitmap
);
184 msb
->free_block_count
[zone
]--;
187 /* Mark physical block as free */
188 static void msb_mark_block_unused(struct msb_data
*msb
, int pba
)
190 int zone
= msb_get_zone_from_pba(pba
);
192 if (!test_bit(pba
, msb
->used_blocks_bitmap
)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba
);
194 msb
->read_only
= true;
198 if (msb_validate_used_block_bitmap(msb
))
201 /* No races because all IO is single threaded */
202 __clear_bit(pba
, msb
->used_blocks_bitmap
);
203 msb
->free_block_count
[zone
]++;
206 /* Invalidate current register window */
207 static void msb_invalidate_reg_window(struct msb_data
*msb
)
209 msb
->reg_addr
.w_offset
= offsetof(struct ms_register
, id
);
210 msb
->reg_addr
.w_length
= sizeof(struct ms_id_register
);
211 msb
->reg_addr
.r_offset
= offsetof(struct ms_register
, id
);
212 msb
->reg_addr
.r_length
= sizeof(struct ms_id_register
);
213 msb
->addr_valid
= false;
216 /* Start a state machine */
217 static int msb_run_state_machine(struct msb_data
*msb
, int (*state_func
)
218 (struct memstick_dev
*card
, struct memstick_request
**req
))
220 struct memstick_dev
*card
= msb
->card
;
222 WARN_ON(msb
->state
!= -1);
223 msb
->int_polling
= false;
227 memset(&card
->current_mrq
, 0, sizeof(card
->current_mrq
));
229 card
->next_request
= state_func
;
230 memstick_new_req(card
->host
);
231 wait_for_completion(&card
->mrq_complete
);
233 WARN_ON(msb
->state
!= -1);
234 return msb
->exit_error
;
237 /* State machines call that to exit */
238 static int msb_exit_state_machine(struct msb_data
*msb
, int error
)
240 WARN_ON(msb
->state
== -1);
243 msb
->exit_error
= error
;
244 msb
->card
->next_request
= h_msb_default_bad
;
246 /* Invalidate reg window on errors */
248 msb_invalidate_reg_window(msb
);
250 complete(&msb
->card
->mrq_complete
);
254 /* read INT register */
255 static int msb_read_int_reg(struct msb_data
*msb
, long timeout
)
257 struct memstick_request
*mrq
= &msb
->card
->current_mrq
;
259 WARN_ON(msb
->state
== -1);
261 if (!msb
->int_polling
) {
262 msb
->int_timeout
= jiffies
+
263 msecs_to_jiffies(timeout
== -1 ? 500 : timeout
);
264 msb
->int_polling
= true;
265 } else if (time_after(jiffies
, msb
->int_timeout
)) {
266 mrq
->data
[0] = MEMSTICK_INT_CMDNAK
;
270 if ((msb
->caps
& MEMSTICK_CAP_AUTO_GET_INT
) &&
271 mrq
->need_card_int
&& !mrq
->error
) {
272 mrq
->data
[0] = mrq
->int_reg
;
273 mrq
->need_card_int
= false;
276 memstick_init_req(mrq
, MS_TPC_GET_INT
, NULL
, 1);
281 /* Read a register */
282 static int msb_read_regs(struct msb_data
*msb
, int offset
, int len
)
284 struct memstick_request
*req
= &msb
->card
->current_mrq
;
286 if (msb
->reg_addr
.r_offset
!= offset
||
287 msb
->reg_addr
.r_length
!= len
|| !msb
->addr_valid
) {
289 msb
->reg_addr
.r_offset
= offset
;
290 msb
->reg_addr
.r_length
= len
;
291 msb
->addr_valid
= true;
293 memstick_init_req(req
, MS_TPC_SET_RW_REG_ADRS
,
294 &msb
->reg_addr
, sizeof(msb
->reg_addr
));
298 memstick_init_req(req
, MS_TPC_READ_REG
, NULL
, len
);
302 /* Write a card register */
303 static int msb_write_regs(struct msb_data
*msb
, int offset
, int len
, void *buf
)
305 struct memstick_request
*req
= &msb
->card
->current_mrq
;
307 if (msb
->reg_addr
.w_offset
!= offset
||
308 msb
->reg_addr
.w_length
!= len
|| !msb
->addr_valid
) {
310 msb
->reg_addr
.w_offset
= offset
;
311 msb
->reg_addr
.w_length
= len
;
312 msb
->addr_valid
= true;
314 memstick_init_req(req
, MS_TPC_SET_RW_REG_ADRS
,
315 &msb
->reg_addr
, sizeof(msb
->reg_addr
));
319 memstick_init_req(req
, MS_TPC_WRITE_REG
, buf
, len
);
323 /* Handler for absence of IO */
324 static int h_msb_default_bad(struct memstick_dev
*card
,
325 struct memstick_request
**mrq
)
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
335 static int h_msb_read_page(struct memstick_dev
*card
,
336 struct memstick_request
**out_mrq
)
338 struct msb_data
*msb
= memstick_get_drvdata(card
);
339 struct memstick_request
*mrq
= *out_mrq
= &card
->current_mrq
;
340 struct scatterlist sg
[2];
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb
, mrq
->error
);
348 switch (msb
->state
) {
349 case MSB_RP_SEND_BLOCK_ADDRESS
:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 the reg window, and thus it returns request for that.
352 Then we stay in this state and retry */
353 if (!msb_write_regs(msb
,
354 offsetof(struct ms_register
, param
),
355 sizeof(struct ms_param_register
),
356 (unsigned char *)&msb
->regs
.param
))
359 msb
->state
= MSB_RP_SEND_READ_COMMAND
;
362 case MSB_RP_SEND_READ_COMMAND
:
363 command
= MS_CMD_BLOCK_READ
;
364 memstick_init_req(mrq
, MS_TPC_SET_CMD
, &command
, 1);
365 msb
->state
= MSB_RP_SEND_INT_REQ
;
368 case MSB_RP_SEND_INT_REQ
:
369 msb
->state
= MSB_RP_RECEIVE_INT_REQ_RESULT
;
370 /* If dont actually need to send the int read request (only in
371 serial mode), then just fall through */
372 if (msb_read_int_reg(msb
, -1))
376 case MSB_RP_RECEIVE_INT_REQ_RESULT
:
377 intreg
= mrq
->data
[0];
378 msb
->regs
.status
.interrupt
= intreg
;
380 if (intreg
& MEMSTICK_INT_CMDNAK
)
381 return msb_exit_state_machine(msb
, -EIO
);
383 if (!(intreg
& MEMSTICK_INT_CED
)) {
384 msb
->state
= MSB_RP_SEND_INT_REQ
;
388 msb
->int_polling
= false;
389 msb
->state
= (intreg
& MEMSTICK_INT_ERR
) ?
390 MSB_RP_SEND_READ_STATUS_REG
: MSB_RP_SEND_OOB_READ
;
393 case MSB_RP_SEND_READ_STATUS_REG
:
394 /* read the status register to understand source of the INT_ERR */
395 if (!msb_read_regs(msb
,
396 offsetof(struct ms_register
, status
),
397 sizeof(struct ms_status_register
)))
400 msb
->state
= MSB_RP_RECEIVE_STATUS_REG
;
403 case MSB_RP_RECEIVE_STATUS_REG
:
404 msb
->regs
.status
= *(struct ms_status_register
*)mrq
->data
;
405 msb
->state
= MSB_RP_SEND_OOB_READ
;
408 case MSB_RP_SEND_OOB_READ
:
409 if (!msb_read_regs(msb
,
410 offsetof(struct ms_register
, extra_data
),
411 sizeof(struct ms_extra_data_register
)))
414 msb
->state
= MSB_RP_RECEIVE_OOB_READ
;
417 case MSB_RP_RECEIVE_OOB_READ
:
418 msb
->regs
.extra_data
=
419 *(struct ms_extra_data_register
*) mrq
->data
;
420 msb
->state
= MSB_RP_SEND_READ_DATA
;
423 case MSB_RP_SEND_READ_DATA
:
424 /* Skip that state if we only read the oob */
425 if (msb
->regs
.param
.cp
== MEMSTICK_CP_EXTRA
) {
426 msb
->state
= MSB_RP_RECEIVE_READ_DATA
;
430 sg_init_table(sg
, ARRAY_SIZE(sg
));
431 msb_sg_copy(msb
->current_sg
, sg
, ARRAY_SIZE(sg
),
432 msb
->current_sg_offset
,
435 memstick_init_req_sg(mrq
, MS_TPC_READ_LONG_DATA
, sg
);
436 msb
->state
= MSB_RP_RECEIVE_READ_DATA
;
439 case MSB_RP_RECEIVE_READ_DATA
:
440 if (!(msb
->regs
.status
.interrupt
& MEMSTICK_INT_ERR
)) {
441 msb
->current_sg_offset
+= msb
->page_size
;
442 return msb_exit_state_machine(msb
, 0);
445 if (msb
->regs
.status
.status1
& MEMSTICK_UNCORR_ERROR
) {
446 dbg("read_page: uncorrectable error");
447 return msb_exit_state_machine(msb
, -EBADMSG
);
450 if (msb
->regs
.status
.status1
& MEMSTICK_CORR_ERROR
) {
451 dbg("read_page: correctable error");
452 msb
->current_sg_offset
+= msb
->page_size
;
453 return msb_exit_state_machine(msb
, -EUCLEAN
);
455 dbg("read_page: INT error, but no status error bits");
456 return msb_exit_state_machine(msb
, -EIO
);
464 * Handler of writes of exactly one block.
465 * Takes address from msb->regs.param.
466 * Writes same extra data to blocks, also taken
467 * from msb->regs.extra
468 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
469 * device refuses to take the command or something else
471 static int h_msb_write_block(struct memstick_dev
*card
,
472 struct memstick_request
**out_mrq
)
474 struct msb_data
*msb
= memstick_get_drvdata(card
);
475 struct memstick_request
*mrq
= *out_mrq
= &card
->current_mrq
;
476 struct scatterlist sg
[2];
480 return msb_exit_state_machine(msb
, mrq
->error
);
483 switch (msb
->state
) {
485 /* HACK: Jmicon handling of TPCs between 8 and
486 * sizeof(memstick_request.data) is broken due to hardware
487 * bug in PIO mode that is used for these TPCs
488 * Therefore split the write
491 case MSB_WB_SEND_WRITE_PARAMS
:
492 if (!msb_write_regs(msb
,
493 offsetof(struct ms_register
, param
),
494 sizeof(struct ms_param_register
),
498 msb
->state
= MSB_WB_SEND_WRITE_OOB
;
501 case MSB_WB_SEND_WRITE_OOB
:
502 if (!msb_write_regs(msb
,
503 offsetof(struct ms_register
, extra_data
),
504 sizeof(struct ms_extra_data_register
),
505 &msb
->regs
.extra_data
))
507 msb
->state
= MSB_WB_SEND_WRITE_COMMAND
;
511 case MSB_WB_SEND_WRITE_COMMAND
:
512 command
= MS_CMD_BLOCK_WRITE
;
513 memstick_init_req(mrq
, MS_TPC_SET_CMD
, &command
, 1);
514 msb
->state
= MSB_WB_SEND_INT_REQ
;
517 case MSB_WB_SEND_INT_REQ
:
518 msb
->state
= MSB_WB_RECEIVE_INT_REQ
;
519 if (msb_read_int_reg(msb
, -1))
523 case MSB_WB_RECEIVE_INT_REQ
:
524 intreg
= mrq
->data
[0];
525 msb
->regs
.status
.interrupt
= intreg
;
527 /* errors mean out of here, and fast... */
528 if (intreg
& (MEMSTICK_INT_CMDNAK
))
529 return msb_exit_state_machine(msb
, -EIO
);
531 if (intreg
& MEMSTICK_INT_ERR
)
532 return msb_exit_state_machine(msb
, -EBADMSG
);
535 /* for last page we need to poll CED */
536 if (msb
->current_page
== msb
->pages_in_block
) {
537 if (intreg
& MEMSTICK_INT_CED
)
538 return msb_exit_state_machine(msb
, 0);
539 msb
->state
= MSB_WB_SEND_INT_REQ
;
544 /* for non-last page we need BREQ before writing next chunk */
545 if (!(intreg
& MEMSTICK_INT_BREQ
)) {
546 msb
->state
= MSB_WB_SEND_INT_REQ
;
550 msb
->int_polling
= false;
551 msb
->state
= MSB_WB_SEND_WRITE_DATA
;
554 case MSB_WB_SEND_WRITE_DATA
:
555 sg_init_table(sg
, ARRAY_SIZE(sg
));
557 if (msb_sg_copy(msb
->current_sg
, sg
, ARRAY_SIZE(sg
),
558 msb
->current_sg_offset
,
559 msb
->page_size
) < msb
->page_size
)
560 return msb_exit_state_machine(msb
, -EIO
);
562 memstick_init_req_sg(mrq
, MS_TPC_WRITE_LONG_DATA
, sg
);
563 mrq
->need_card_int
= 1;
564 msb
->state
= MSB_WB_RECEIVE_WRITE_CONFIRMATION
;
567 case MSB_WB_RECEIVE_WRITE_CONFIRMATION
:
569 msb
->current_sg_offset
+= msb
->page_size
;
570 msb
->state
= MSB_WB_SEND_INT_REQ
;
580 * This function is used to send simple IO requests to device that consist
581 * of register write + command
583 static int h_msb_send_command(struct memstick_dev
*card
,
584 struct memstick_request
**out_mrq
)
586 struct msb_data
*msb
= memstick_get_drvdata(card
);
587 struct memstick_request
*mrq
= *out_mrq
= &card
->current_mrq
;
591 dbg("send_command: unknown error");
592 return msb_exit_state_machine(msb
, mrq
->error
);
595 switch (msb
->state
) {
597 /* HACK: see h_msb_write_block */
598 case MSB_SC_SEND_WRITE_PARAMS
: /* write param register*/
599 if (!msb_write_regs(msb
,
600 offsetof(struct ms_register
, param
),
601 sizeof(struct ms_param_register
),
604 msb
->state
= MSB_SC_SEND_WRITE_OOB
;
607 case MSB_SC_SEND_WRITE_OOB
:
608 if (!msb
->command_need_oob
) {
609 msb
->state
= MSB_SC_SEND_COMMAND
;
613 if (!msb_write_regs(msb
,
614 offsetof(struct ms_register
, extra_data
),
615 sizeof(struct ms_extra_data_register
),
616 &msb
->regs
.extra_data
))
619 msb
->state
= MSB_SC_SEND_COMMAND
;
622 case MSB_SC_SEND_COMMAND
:
623 memstick_init_req(mrq
, MS_TPC_SET_CMD
, &msb
->command_value
, 1);
624 msb
->state
= MSB_SC_SEND_INT_REQ
;
627 case MSB_SC_SEND_INT_REQ
:
628 msb
->state
= MSB_SC_RECEIVE_INT_REQ
;
629 if (msb_read_int_reg(msb
, -1))
633 case MSB_SC_RECEIVE_INT_REQ
:
634 intreg
= mrq
->data
[0];
636 if (intreg
& MEMSTICK_INT_CMDNAK
)
637 return msb_exit_state_machine(msb
, -EIO
);
638 if (intreg
& MEMSTICK_INT_ERR
)
639 return msb_exit_state_machine(msb
, -EBADMSG
);
641 if (!(intreg
& MEMSTICK_INT_CED
)) {
642 msb
->state
= MSB_SC_SEND_INT_REQ
;
646 return msb_exit_state_machine(msb
, 0);
652 /* Small handler for card reset */
653 static int h_msb_reset(struct memstick_dev
*card
,
654 struct memstick_request
**out_mrq
)
656 u8 command
= MS_CMD_RESET
;
657 struct msb_data
*msb
= memstick_get_drvdata(card
);
658 struct memstick_request
*mrq
= *out_mrq
= &card
->current_mrq
;
661 return msb_exit_state_machine(msb
, mrq
->error
);
663 switch (msb
->state
) {
665 memstick_init_req(mrq
, MS_TPC_SET_CMD
, &command
, 1);
666 mrq
->need_card_int
= 0;
667 msb
->state
= MSB_RS_CONFIRM
;
670 return msb_exit_state_machine(msb
, 0);
675 /* This handler is used to do serial->parallel switch */
676 static int h_msb_parallel_switch(struct memstick_dev
*card
,
677 struct memstick_request
**out_mrq
)
679 struct msb_data
*msb
= memstick_get_drvdata(card
);
680 struct memstick_request
*mrq
= *out_mrq
= &card
->current_mrq
;
681 struct memstick_host
*host
= card
->host
;
684 dbg("parallel_switch: error");
685 msb
->regs
.param
.system
&= ~MEMSTICK_SYS_PAM
;
686 return msb_exit_state_machine(msb
, mrq
->error
);
689 switch (msb
->state
) {
690 case MSB_PS_SEND_SWITCH_COMMAND
:
691 /* Set the parallel interface on memstick side */
692 msb
->regs
.param
.system
|= MEMSTICK_SYS_PAM
;
694 if (!msb_write_regs(msb
,
695 offsetof(struct ms_register
, param
),
697 (unsigned char *)&msb
->regs
.param
))
700 msb
->state
= MSB_PS_SWICH_HOST
;
703 case MSB_PS_SWICH_HOST
:
704 /* Set parallel interface on our side + send a dummy request
705 to see if card responds */
706 host
->set_param(host
, MEMSTICK_INTERFACE
, MEMSTICK_PAR4
);
707 memstick_init_req(mrq
, MS_TPC_GET_INT
, NULL
, 1);
708 msb
->state
= MSB_PS_CONFIRM
;
712 return msb_exit_state_machine(msb
, 0);
718 static int msb_switch_to_parallel(struct msb_data
*msb
);
720 /* Reset the card, to guard against hw errors beeing treated as bad blocks */
721 static int msb_reset(struct msb_data
*msb
, bool full
)
724 bool was_parallel
= msb
->regs
.param
.system
& MEMSTICK_SYS_PAM
;
725 struct memstick_dev
*card
= msb
->card
;
726 struct memstick_host
*host
= card
->host
;
730 msb
->regs
.param
.system
= MEMSTICK_SYS_BAMD
;
733 error
= host
->set_param(host
,
734 MEMSTICK_POWER
, MEMSTICK_POWER_OFF
);
738 msb_invalidate_reg_window(msb
);
740 error
= host
->set_param(host
,
741 MEMSTICK_POWER
, MEMSTICK_POWER_ON
);
745 error
= host
->set_param(host
,
746 MEMSTICK_INTERFACE
, MEMSTICK_SERIAL
);
749 dbg("Failed to reset the host controller");
750 msb
->read_only
= true;
755 error
= msb_run_state_machine(msb
, h_msb_reset
);
757 dbg("Failed to reset the card");
758 msb
->read_only
= true;
762 /* Set parallel mode */
764 msb_switch_to_parallel(msb
);
768 /* Attempts to switch interface to parallel mode */
769 static int msb_switch_to_parallel(struct msb_data
*msb
)
773 error
= msb_run_state_machine(msb
, h_msb_parallel_switch
);
775 pr_err("Switch to parallel failed");
776 msb
->regs
.param
.system
&= ~MEMSTICK_SYS_PAM
;
777 msb_reset(msb
, true);
781 msb
->caps
|= MEMSTICK_CAP_AUTO_GET_INT
;
785 /* Changes overwrite flag on a page */
786 static int msb_set_overwrite_flag(struct msb_data
*msb
,
787 u16 pba
, u8 page
, u8 flag
)
792 msb
->regs
.param
.block_address
= cpu_to_be16(pba
);
793 msb
->regs
.param
.page_address
= page
;
794 msb
->regs
.param
.cp
= MEMSTICK_CP_OVERWRITE
;
795 msb
->regs
.extra_data
.overwrite_flag
= flag
;
796 msb
->command_value
= MS_CMD_BLOCK_WRITE
;
797 msb
->command_need_oob
= true;
799 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
801 return msb_run_state_machine(msb
, h_msb_send_command
);
804 static int msb_mark_bad(struct msb_data
*msb
, int pba
)
806 pr_notice("marking pba %d as bad", pba
);
807 msb_reset(msb
, true);
808 return msb_set_overwrite_flag(
809 msb
, pba
, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST
);
812 static int msb_mark_page_bad(struct msb_data
*msb
, int pba
, int page
)
814 dbg("marking page %d of pba %d as bad", page
, pba
);
815 msb_reset(msb
, true);
816 return msb_set_overwrite_flag(msb
,
817 pba
, page
, ~MEMSTICK_OVERWRITE_PGST0
);
820 /* Erases one physical block */
821 static int msb_erase_block(struct msb_data
*msb
, u16 pba
)
827 dbg_verbose("erasing pba %d", pba
);
829 for (try = 1; try < 3; try++) {
830 msb
->regs
.param
.block_address
= cpu_to_be16(pba
);
831 msb
->regs
.param
.page_address
= 0;
832 msb
->regs
.param
.cp
= MEMSTICK_CP_BLOCK
;
833 msb
->command_value
= MS_CMD_BLOCK_ERASE
;
834 msb
->command_need_oob
= false;
837 error
= msb_run_state_machine(msb
, h_msb_send_command
);
838 if (!error
|| msb_reset(msb
, true))
843 pr_err("erase failed, marking pba %d as bad", pba
);
844 msb_mark_bad(msb
, pba
);
847 dbg_verbose("erase success, marking pba %d as unused", pba
);
848 msb_mark_block_unused(msb
, pba
);
849 __set_bit(pba
, msb
->erased_blocks_bitmap
);
853 /* Reads one page from device */
854 static int msb_read_page(struct msb_data
*msb
,
855 u16 pba
, u8 page
, struct ms_extra_data_register
*extra
,
856 struct scatterlist
*sg
, int offset
)
860 if (pba
== MS_BLOCK_INVALID
) {
862 struct sg_mapping_iter miter
;
863 size_t len
= msb
->page_size
;
865 dbg_verbose("read unmapped sector. returning 0xFF");
867 local_irq_save(flags
);
868 sg_miter_start(&miter
, sg
, sg_nents(sg
),
869 SG_MITER_ATOMIC
| SG_MITER_TO_SG
);
871 while (sg_miter_next(&miter
) && len
> 0) {
875 if (offset
&& offset
>= miter
.length
) {
876 offset
-= miter
.length
;
880 chunklen
= min(miter
.length
- offset
, len
);
881 memset(miter
.addr
+ offset
, 0xFF, chunklen
);
886 sg_miter_stop(&miter
);
887 local_irq_restore(flags
);
893 memset(extra
, 0xFF, sizeof(*extra
));
897 if (pba
>= msb
->block_count
) {
898 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba
);
902 for (try = 1; try < 3; try++) {
903 msb
->regs
.param
.block_address
= cpu_to_be16(pba
);
904 msb
->regs
.param
.page_address
= page
;
905 msb
->regs
.param
.cp
= MEMSTICK_CP_PAGE
;
907 msb
->current_sg
= sg
;
908 msb
->current_sg_offset
= offset
;
909 error
= msb_run_state_machine(msb
, h_msb_read_page
);
912 if (error
== -EUCLEAN
) {
913 pr_notice("correctable error on pba %d, page %d",
919 *extra
= msb
->regs
.extra_data
;
921 if (!error
|| msb_reset(msb
, true))
927 if (error
== -EBADMSG
) {
928 pr_err("uncorrectable error on read of pba %d, page %d",
931 if (msb
->regs
.extra_data
.overwrite_flag
&
932 MEMSTICK_OVERWRITE_PGST0
)
933 msb_mark_page_bad(msb
, pba
, page
);
938 pr_err("read of pba %d, page %d failed with error %d",
943 /* Reads oob of page only */
944 static int msb_read_oob(struct msb_data
*msb
, u16 pba
, u16 page
,
945 struct ms_extra_data_register
*extra
)
950 msb
->regs
.param
.block_address
= cpu_to_be16(pba
);
951 msb
->regs
.param
.page_address
= page
;
952 msb
->regs
.param
.cp
= MEMSTICK_CP_EXTRA
;
954 if (pba
> msb
->block_count
) {
955 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba
);
959 error
= msb_run_state_machine(msb
, h_msb_read_page
);
960 *extra
= msb
->regs
.extra_data
;
962 if (error
== -EUCLEAN
) {
963 pr_notice("correctable error on pba %d, page %d",
971 /* Reads a block and compares it with data contained in scatterlist orig_sg */
972 static int msb_verify_block(struct msb_data
*msb
, u16 pba
,
973 struct scatterlist
*orig_sg
, int offset
)
975 struct scatterlist sg
;
978 sg_init_one(&sg
, msb
->block_buffer
, msb
->block_size
);
980 while (page
< msb
->pages_in_block
) {
982 error
= msb_read_page(msb
, pba
, page
,
983 NULL
, &sg
, page
* msb
->page_size
);
989 if (msb_sg_compare_to_buffer(orig_sg
, offset
,
990 msb
->block_buffer
, msb
->block_size
))
995 /* Writes exectly one block + oob */
996 static int msb_write_block(struct msb_data
*msb
,
997 u16 pba
, u32 lba
, struct scatterlist
*sg
, int offset
)
999 int error
, current_try
= 1;
1000 BUG_ON(sg
->length
< msb
->page_size
);
1005 if (pba
== MS_BLOCK_INVALID
) {
1007 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1011 if (pba
>= msb
->block_count
|| lba
>= msb
->logical_block_count
) {
1013 "BUG: write: attempt to write beyond the end of device");
1017 if (msb_get_zone_from_lba(lba
) != msb_get_zone_from_pba(pba
)) {
1018 pr_err("BUG: write: lba zone mismatch");
1022 if (pba
== msb
->boot_block_locations
[0] ||
1023 pba
== msb
->boot_block_locations
[1]) {
1024 pr_err("BUG: write: attempt to write to boot blocks!");
1033 msb
->regs
.param
.cp
= MEMSTICK_CP_BLOCK
;
1034 msb
->regs
.param
.page_address
= 0;
1035 msb
->regs
.param
.block_address
= cpu_to_be16(pba
);
1037 msb
->regs
.extra_data
.management_flag
= 0xFF;
1038 msb
->regs
.extra_data
.overwrite_flag
= 0xF8;
1039 msb
->regs
.extra_data
.logical_address
= cpu_to_be16(lba
);
1041 msb
->current_sg
= sg
;
1042 msb
->current_sg_offset
= offset
;
1043 msb
->current_page
= 0;
1045 error
= msb_run_state_machine(msb
, h_msb_write_block
);
1047 /* Sector we just wrote to is assumed erased since its pba
1048 was erased. If it wasn't erased, write will succeed
1049 and will just clear the bits that were set in the block
1050 thus test that what we have written,
1051 matches what we expect.
1052 We do trust the blocks that we erased */
1053 if (!error
&& (verify_writes
||
1054 !test_bit(pba
, msb
->erased_blocks_bitmap
)))
1055 error
= msb_verify_block(msb
, pba
, sg
, offset
);
1060 if (current_try
> 1 || msb_reset(msb
, true))
1063 pr_err("write failed, trying to erase the pba %d", pba
);
1064 error
= msb_erase_block(msb
, pba
);
1073 /* Finds a free block for write replacement */
1074 static u16
msb_get_free_block(struct msb_data
*msb
, int zone
)
1077 int pba
= zone
* MS_BLOCKS_IN_ZONE
;
1080 get_random_bytes(&pos
, sizeof(pos
));
1082 if (!msb
->free_block_count
[zone
]) {
1083 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone
);
1084 msb
->read_only
= true;
1085 return MS_BLOCK_INVALID
;
1088 pos
%= msb
->free_block_count
[zone
];
1090 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1091 msb
->free_block_count
[zone
], pos
);
1093 pba
= find_next_zero_bit(msb
->used_blocks_bitmap
,
1094 msb
->block_count
, pba
);
1095 for (i
= 0; i
< pos
; ++i
)
1096 pba
= find_next_zero_bit(msb
->used_blocks_bitmap
,
1097 msb
->block_count
, pba
+ 1);
1099 dbg_verbose("result of the free blocks scan: pba %d", pba
);
1101 if (pba
== msb
->block_count
|| (msb_get_zone_from_pba(pba
)) != zone
) {
1102 pr_err("BUG: cant get a free block");
1103 msb
->read_only
= true;
1104 return MS_BLOCK_INVALID
;
1107 msb_mark_block_used(msb
, pba
);
1111 static int msb_update_block(struct msb_data
*msb
, u16 lba
,
1112 struct scatterlist
*sg
, int offset
)
1117 pba
= msb
->lba_to_pba_table
[lba
];
1118 dbg_verbose("start of a block update at lba %d, pba %d", lba
, pba
);
1120 if (pba
!= MS_BLOCK_INVALID
) {
1121 dbg_verbose("setting the update flag on the block");
1122 msb_set_overwrite_flag(msb
, pba
, 0,
1123 0xFF & ~MEMSTICK_OVERWRITE_UDST
);
1126 for (try = 0; try < 3; try++) {
1127 new_pba
= msb_get_free_block(msb
,
1128 msb_get_zone_from_lba(lba
));
1130 if (new_pba
== MS_BLOCK_INVALID
) {
1135 dbg_verbose("block update: writing updated block to the pba %d",
1137 error
= msb_write_block(msb
, new_pba
, lba
, sg
, offset
);
1138 if (error
== -EBADMSG
) {
1139 msb_mark_bad(msb
, new_pba
);
1146 dbg_verbose("block update: erasing the old block");
1147 msb_erase_block(msb
, pba
);
1148 msb
->lba_to_pba_table
[lba
] = new_pba
;
1153 pr_err("block update error after %d tries, switching to r/o mode", try);
1154 msb
->read_only
= true;
1159 /* Converts endiannes in the boot block for easy use */
1160 static void msb_fix_boot_page_endianness(struct ms_boot_page
*p
)
1162 p
->header
.block_id
= be16_to_cpu(p
->header
.block_id
);
1163 p
->header
.format_reserved
= be16_to_cpu(p
->header
.format_reserved
);
1164 p
->entry
.disabled_block
.start_addr
1165 = be32_to_cpu(p
->entry
.disabled_block
.start_addr
);
1166 p
->entry
.disabled_block
.data_size
1167 = be32_to_cpu(p
->entry
.disabled_block
.data_size
);
1168 p
->entry
.cis_idi
.start_addr
1169 = be32_to_cpu(p
->entry
.cis_idi
.start_addr
);
1170 p
->entry
.cis_idi
.data_size
1171 = be32_to_cpu(p
->entry
.cis_idi
.data_size
);
1172 p
->attr
.block_size
= be16_to_cpu(p
->attr
.block_size
);
1173 p
->attr
.number_of_blocks
= be16_to_cpu(p
->attr
.number_of_blocks
);
1174 p
->attr
.number_of_effective_blocks
1175 = be16_to_cpu(p
->attr
.number_of_effective_blocks
);
1176 p
->attr
.page_size
= be16_to_cpu(p
->attr
.page_size
);
1177 p
->attr
.memory_manufacturer_code
1178 = be16_to_cpu(p
->attr
.memory_manufacturer_code
);
1179 p
->attr
.memory_device_code
= be16_to_cpu(p
->attr
.memory_device_code
);
1180 p
->attr
.implemented_capacity
1181 = be16_to_cpu(p
->attr
.implemented_capacity
);
1182 p
->attr
.controller_number
= be16_to_cpu(p
->attr
.controller_number
);
1183 p
->attr
.controller_function
= be16_to_cpu(p
->attr
.controller_function
);
1186 static int msb_read_boot_blocks(struct msb_data
*msb
)
1189 struct scatterlist sg
;
1190 struct ms_extra_data_register extra
;
1191 struct ms_boot_page
*page
;
1193 msb
->boot_block_locations
[0] = MS_BLOCK_INVALID
;
1194 msb
->boot_block_locations
[1] = MS_BLOCK_INVALID
;
1195 msb
->boot_block_count
= 0;
1197 dbg_verbose("Start of a scan for the boot blocks");
1199 if (!msb
->boot_page
) {
1200 page
= kmalloc_array(2, sizeof(struct ms_boot_page
),
1205 msb
->boot_page
= page
;
1207 page
= msb
->boot_page
;
1209 msb
->block_count
= MS_BLOCK_MAX_BOOT_ADDR
;
1211 for (pba
= 0; pba
< MS_BLOCK_MAX_BOOT_ADDR
; pba
++) {
1213 sg_init_one(&sg
, page
, sizeof(*page
));
1214 if (msb_read_page(msb
, pba
, 0, &extra
, &sg
, 0)) {
1215 dbg("boot scan: can't read pba %d", pba
);
1219 if (extra
.management_flag
& MEMSTICK_MANAGEMENT_SYSFLG
) {
1220 dbg("management flag doesn't indicate boot block %d",
1225 if (be16_to_cpu(page
->header
.block_id
) != MS_BLOCK_BOOT_ID
) {
1226 dbg("the pba at %d doesn't contain boot block ID", pba
);
1230 msb_fix_boot_page_endianness(page
);
1231 msb
->boot_block_locations
[msb
->boot_block_count
] = pba
;
1234 msb
->boot_block_count
++;
1236 if (msb
->boot_block_count
== 2)
1240 if (!msb
->boot_block_count
) {
1241 pr_err("media doesn't contain master page, aborting");
1245 dbg_verbose("End of scan for boot blocks");
1249 static int msb_read_bad_block_table(struct msb_data
*msb
, int block_nr
)
1251 struct ms_boot_page
*boot_block
;
1252 struct scatterlist sg
;
1256 int data_size
, data_offset
, page
, page_offset
, size_to_read
;
1259 BUG_ON(block_nr
> 1);
1260 boot_block
= &msb
->boot_page
[block_nr
];
1261 pba
= msb
->boot_block_locations
[block_nr
];
1263 if (msb
->boot_block_locations
[block_nr
] == MS_BLOCK_INVALID
)
1266 data_size
= boot_block
->entry
.disabled_block
.data_size
;
1267 data_offset
= sizeof(struct ms_boot_page
) +
1268 boot_block
->entry
.disabled_block
.start_addr
;
1272 page
= data_offset
/ msb
->page_size
;
1273 page_offset
= data_offset
% msb
->page_size
;
1275 DIV_ROUND_UP(data_size
+ page_offset
, msb
->page_size
) *
1278 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1279 pba
, data_offset
, data_size
);
1281 buffer
= kzalloc(size_to_read
, GFP_KERNEL
);
1285 /* Read the buffer */
1286 sg_init_one(&sg
, buffer
, size_to_read
);
1288 while (offset
< size_to_read
) {
1289 error
= msb_read_page(msb
, pba
, page
, NULL
, &sg
, offset
);
1294 offset
+= msb
->page_size
;
1296 if (page
== msb
->pages_in_block
) {
1298 "bad block table extends beyond the boot block");
1303 /* Process the bad block table */
1304 for (i
= page_offset
; i
< data_size
/ sizeof(u16
); i
++) {
1306 u16 bad_block
= be16_to_cpu(buffer
[i
]);
1308 if (bad_block
>= msb
->block_count
) {
1309 dbg("bad block table contains invalid block %d",
1314 if (test_bit(bad_block
, msb
->used_blocks_bitmap
)) {
1315 dbg("duplicate bad block %d in the table",
1320 dbg("block %d is marked as factory bad", bad_block
);
1321 msb_mark_block_used(msb
, bad_block
);
1328 static int msb_ftl_initialize(struct msb_data
*msb
)
1332 if (msb
->ftl_initialized
)
1335 msb
->zone_count
= msb
->block_count
/ MS_BLOCKS_IN_ZONE
;
1336 msb
->logical_block_count
= msb
->zone_count
* 496 - 2;
1338 msb
->used_blocks_bitmap
= kzalloc(msb
->block_count
/ 8, GFP_KERNEL
);
1339 msb
->erased_blocks_bitmap
= kzalloc(msb
->block_count
/ 8, GFP_KERNEL
);
1340 msb
->lba_to_pba_table
=
1341 kmalloc_array(msb
->logical_block_count
, sizeof(u16
),
1344 if (!msb
->used_blocks_bitmap
|| !msb
->lba_to_pba_table
||
1345 !msb
->erased_blocks_bitmap
) {
1346 kfree(msb
->used_blocks_bitmap
);
1347 kfree(msb
->lba_to_pba_table
);
1348 kfree(msb
->erased_blocks_bitmap
);
1352 for (i
= 0; i
< msb
->zone_count
; i
++)
1353 msb
->free_block_count
[i
] = MS_BLOCKS_IN_ZONE
;
1355 memset(msb
->lba_to_pba_table
, MS_BLOCK_INVALID
,
1356 msb
->logical_block_count
* sizeof(u16
));
1358 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1359 msb
->zone_count
, msb
->logical_block_count
);
1361 msb
->ftl_initialized
= true;
1365 static int msb_ftl_scan(struct msb_data
*msb
)
1367 u16 pba
, lba
, other_block
;
1368 u8 overwrite_flag
, management_flag
, other_overwrite_flag
;
1370 struct ms_extra_data_register extra
;
1371 u8
*overwrite_flags
= kzalloc(msb
->block_count
, GFP_KERNEL
);
1373 if (!overwrite_flags
)
1376 dbg("Start of media scanning");
1377 for (pba
= 0; pba
< msb
->block_count
; pba
++) {
1379 if (pba
== msb
->boot_block_locations
[0] ||
1380 pba
== msb
->boot_block_locations
[1]) {
1381 dbg_verbose("pba %05d -> [boot block]", pba
);
1382 msb_mark_block_used(msb
, pba
);
1386 if (test_bit(pba
, msb
->used_blocks_bitmap
)) {
1387 dbg_verbose("pba %05d -> [factory bad]", pba
);
1391 memset(&extra
, 0, sizeof(extra
));
1392 error
= msb_read_oob(msb
, pba
, 0, &extra
);
1394 /* can't trust the page if we can't read the oob */
1395 if (error
== -EBADMSG
) {
1397 "oob of pba %d damaged, will try to erase it", pba
);
1398 msb_mark_block_used(msb
, pba
);
1399 msb_erase_block(msb
, pba
);
1402 pr_err("unknown error %d on read of oob of pba %d - aborting",
1405 kfree(overwrite_flags
);
1409 lba
= be16_to_cpu(extra
.logical_address
);
1410 management_flag
= extra
.management_flag
;
1411 overwrite_flag
= extra
.overwrite_flag
;
1412 overwrite_flags
[pba
] = overwrite_flag
;
1414 /* Skip bad blocks */
1415 if (!(overwrite_flag
& MEMSTICK_OVERWRITE_BKST
)) {
1416 dbg("pba %05d -> [BAD]", pba
);
1417 msb_mark_block_used(msb
, pba
);
1421 /* Skip system/drm blocks */
1422 if ((management_flag
& MEMSTICK_MANAGEMENT_FLAG_NORMAL
) !=
1423 MEMSTICK_MANAGEMENT_FLAG_NORMAL
) {
1424 dbg("pba %05d -> [reserved management flag %02x]",
1425 pba
, management_flag
);
1426 msb_mark_block_used(msb
, pba
);
1430 /* Erase temporary tables */
1431 if (!(management_flag
& MEMSTICK_MANAGEMENT_ATFLG
)) {
1432 dbg("pba %05d -> [temp table] - will erase", pba
);
1434 msb_mark_block_used(msb
, pba
);
1435 msb_erase_block(msb
, pba
);
1439 if (lba
== MS_BLOCK_INVALID
) {
1440 dbg_verbose("pba %05d -> [free]", pba
);
1444 msb_mark_block_used(msb
, pba
);
1446 /* Block has LBA not according to zoning*/
1447 if (msb_get_zone_from_lba(lba
) != msb_get_zone_from_pba(pba
)) {
1448 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1450 msb_erase_block(msb
, pba
);
1454 /* No collisions - great */
1455 if (msb
->lba_to_pba_table
[lba
] == MS_BLOCK_INVALID
) {
1456 dbg_verbose("pba %05d -> [lba %05d]", pba
, lba
);
1457 msb
->lba_to_pba_table
[lba
] = pba
;
1461 other_block
= msb
->lba_to_pba_table
[lba
];
1462 other_overwrite_flag
= overwrite_flags
[other_block
];
1464 pr_notice("Collision between pba %d and pba %d",
1467 if (!(overwrite_flag
& MEMSTICK_OVERWRITE_UDST
)) {
1468 pr_notice("pba %d is marked as stable, use it", pba
);
1469 msb_erase_block(msb
, other_block
);
1470 msb
->lba_to_pba_table
[lba
] = pba
;
1474 if (!(other_overwrite_flag
& MEMSTICK_OVERWRITE_UDST
)) {
1475 pr_notice("pba %d is marked as stable, use it",
1477 msb_erase_block(msb
, pba
);
1481 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1482 pba
, other_block
, other_block
);
1484 msb_erase_block(msb
, other_block
);
1485 msb
->lba_to_pba_table
[lba
] = pba
;
1488 dbg("End of media scanning");
1489 kfree(overwrite_flags
);
1493 static void msb_cache_flush_timer(struct timer_list
*t
)
1495 struct msb_data
*msb
= from_timer(msb
, t
, cache_flush_timer
);
1496 msb
->need_flush_cache
= true;
1497 queue_work(msb
->io_queue
, &msb
->io_work
);
1501 static void msb_cache_discard(struct msb_data
*msb
)
1503 if (msb
->cache_block_lba
== MS_BLOCK_INVALID
)
1506 del_timer_sync(&msb
->cache_flush_timer
);
1508 dbg_verbose("Discarding the write cache");
1509 msb
->cache_block_lba
= MS_BLOCK_INVALID
;
1510 bitmap_zero(&msb
->valid_cache_bitmap
, msb
->pages_in_block
);
1513 static int msb_cache_init(struct msb_data
*msb
)
1515 timer_setup(&msb
->cache_flush_timer
, msb_cache_flush_timer
, 0);
1518 msb
->cache
= kzalloc(msb
->block_size
, GFP_KERNEL
);
1522 msb_cache_discard(msb
);
1526 static int msb_cache_flush(struct msb_data
*msb
)
1528 struct scatterlist sg
;
1529 struct ms_extra_data_register extra
;
1530 int page
, offset
, error
;
1536 if (msb
->cache_block_lba
== MS_BLOCK_INVALID
)
1539 lba
= msb
->cache_block_lba
;
1540 pba
= msb
->lba_to_pba_table
[lba
];
1542 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1543 pba
, msb
->cache_block_lba
);
1545 sg_init_one(&sg
, msb
->cache
, msb
->block_size
);
1547 /* Read all missing pages in cache */
1548 for (page
= 0; page
< msb
->pages_in_block
; page
++) {
1550 if (test_bit(page
, &msb
->valid_cache_bitmap
))
1553 offset
= page
* msb
->page_size
;
1555 dbg_verbose("reading non-present sector %d of cache block %d",
1557 error
= msb_read_page(msb
, pba
, page
, &extra
, &sg
, offset
);
1559 /* Bad pages are copied with 00 page status */
1560 if (error
== -EBADMSG
) {
1561 pr_err("read error on sector %d, contents probably damaged", page
);
1568 if ((extra
.overwrite_flag
& MEMSTICK_OV_PG_NORMAL
) !=
1569 MEMSTICK_OV_PG_NORMAL
) {
1570 dbg("page %d is marked as bad", page
);
1574 set_bit(page
, &msb
->valid_cache_bitmap
);
1577 /* Write the cache now */
1578 error
= msb_update_block(msb
, msb
->cache_block_lba
, &sg
, 0);
1579 pba
= msb
->lba_to_pba_table
[msb
->cache_block_lba
];
1581 /* Mark invalid pages */
1583 for (page
= 0; page
< msb
->pages_in_block
; page
++) {
1585 if (test_bit(page
, &msb
->valid_cache_bitmap
))
1588 dbg("marking page %d as containing damaged data",
1590 msb_set_overwrite_flag(msb
,
1591 pba
, page
, 0xFF & ~MEMSTICK_OV_PG_NORMAL
);
1595 msb_cache_discard(msb
);
1599 static int msb_cache_write(struct msb_data
*msb
, int lba
,
1600 int page
, bool add_to_cache_only
, struct scatterlist
*sg
, int offset
)
1603 struct scatterlist sg_tmp
[10];
1608 if (msb
->cache_block_lba
== MS_BLOCK_INVALID
||
1609 lba
!= msb
->cache_block_lba
)
1610 if (add_to_cache_only
)
1613 /* If we need to write different block */
1614 if (msb
->cache_block_lba
!= MS_BLOCK_INVALID
&&
1615 lba
!= msb
->cache_block_lba
) {
1616 dbg_verbose("first flush the cache");
1617 error
= msb_cache_flush(msb
);
1622 if (msb
->cache_block_lba
== MS_BLOCK_INVALID
) {
1623 msb
->cache_block_lba
= lba
;
1624 mod_timer(&msb
->cache_flush_timer
,
1625 jiffies
+ msecs_to_jiffies(cache_flush_timeout
));
1628 dbg_verbose("Write of LBA %d page %d to cache ", lba
, page
);
1630 sg_init_table(sg_tmp
, ARRAY_SIZE(sg_tmp
));
1631 msb_sg_copy(sg
, sg_tmp
, ARRAY_SIZE(sg_tmp
), offset
, msb
->page_size
);
1633 sg_copy_to_buffer(sg_tmp
, sg_nents(sg_tmp
),
1634 msb
->cache
+ page
* msb
->page_size
, msb
->page_size
);
1636 set_bit(page
, &msb
->valid_cache_bitmap
);
1640 static int msb_cache_read(struct msb_data
*msb
, int lba
,
1641 int page
, struct scatterlist
*sg
, int offset
)
1643 int pba
= msb
->lba_to_pba_table
[lba
];
1644 struct scatterlist sg_tmp
[10];
1647 if (lba
== msb
->cache_block_lba
&&
1648 test_bit(page
, &msb
->valid_cache_bitmap
)) {
1650 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1653 sg_init_table(sg_tmp
, ARRAY_SIZE(sg_tmp
));
1654 msb_sg_copy(sg
, sg_tmp
, ARRAY_SIZE(sg_tmp
),
1655 offset
, msb
->page_size
);
1656 sg_copy_from_buffer(sg_tmp
, sg_nents(sg_tmp
),
1657 msb
->cache
+ msb
->page_size
* page
,
1660 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1663 error
= msb_read_page(msb
, pba
, page
, NULL
, sg
, offset
);
1667 msb_cache_write(msb
, lba
, page
, true, sg
, offset
);
1672 /* Emulated geometry table
1673 * This table content isn't that importaint,
1674 * One could put here different values, providing that they still
1676 * 64 MB entry is what windows reports for my 64M memstick */
1678 static const struct chs_entry chs_table
[] = {
1679 /* size sectors cylynders heads */
1685 {128, 16, 991, 16 },
1689 /* Load information about the card */
1690 static int msb_init_card(struct memstick_dev
*card
)
1692 struct msb_data
*msb
= memstick_get_drvdata(card
);
1693 struct memstick_host
*host
= card
->host
;
1694 struct ms_boot_page
*boot_block
;
1695 int error
= 0, i
, raw_size_in_megs
;
1699 if (card
->id
.class >= MEMSTICK_CLASS_ROM
&&
1700 card
->id
.class <= MEMSTICK_CLASS_ROM
)
1701 msb
->read_only
= true;
1704 error
= msb_reset(msb
, false);
1708 /* Due to a bug in Jmicron driver written by Alex Dubov,
1709 its serial mode barely works,
1710 so we switch to parallel mode right away */
1711 if (host
->caps
& MEMSTICK_CAP_PAR4
)
1712 msb_switch_to_parallel(msb
);
1714 msb
->page_size
= sizeof(struct ms_boot_page
);
1716 /* Read the boot page */
1717 error
= msb_read_boot_blocks(msb
);
1721 boot_block
= &msb
->boot_page
[0];
1723 /* Save intersting attributes from boot page */
1724 msb
->block_count
= boot_block
->attr
.number_of_blocks
;
1725 msb
->page_size
= boot_block
->attr
.page_size
;
1727 msb
->pages_in_block
= boot_block
->attr
.block_size
* 2;
1728 msb
->block_size
= msb
->page_size
* msb
->pages_in_block
;
1730 if (msb
->page_size
> PAGE_SIZE
) {
1731 /* this isn't supported by linux at all, anyway*/
1732 dbg("device page %d size isn't supported", msb
->page_size
);
1736 msb
->block_buffer
= kzalloc(msb
->block_size
, GFP_KERNEL
);
1737 if (!msb
->block_buffer
)
1740 raw_size_in_megs
= (msb
->block_size
* msb
->block_count
) >> 20;
1742 for (i
= 0; chs_table
[i
].size
; i
++) {
1744 if (chs_table
[i
].size
!= raw_size_in_megs
)
1747 msb
->geometry
.cylinders
= chs_table
[i
].cyl
;
1748 msb
->geometry
.heads
= chs_table
[i
].head
;
1749 msb
->geometry
.sectors
= chs_table
[i
].sec
;
1753 if (boot_block
->attr
.transfer_supporting
== 1)
1754 msb
->caps
|= MEMSTICK_CAP_PAR4
;
1756 if (boot_block
->attr
.device_type
& 0x03)
1757 msb
->read_only
= true;
1759 dbg("Total block count = %d", msb
->block_count
);
1760 dbg("Each block consists of %d pages", msb
->pages_in_block
);
1761 dbg("Page size = %d bytes", msb
->page_size
);
1762 dbg("Parallel mode supported: %d", !!(msb
->caps
& MEMSTICK_CAP_PAR4
));
1763 dbg("Read only: %d", msb
->read_only
);
1766 /* Now we can switch the interface */
1767 if (host
->caps
& msb
->caps
& MEMSTICK_CAP_PAR4
)
1768 msb_switch_to_parallel(msb
);
1771 error
= msb_cache_init(msb
);
1775 error
= msb_ftl_initialize(msb
);
1780 /* Read the bad block table */
1781 error
= msb_read_bad_block_table(msb
, 0);
1783 if (error
&& error
!= -ENOMEM
) {
1784 dbg("failed to read bad block table from primary boot block, trying from backup");
1785 error
= msb_read_bad_block_table(msb
, 1);
1791 /* *drum roll* Scan the media */
1792 error
= msb_ftl_scan(msb
);
1794 pr_err("Scan of media failed");
1802 static int msb_do_write_request(struct msb_data
*msb
, int lba
,
1803 int page
, struct scatterlist
*sg
, size_t len
, int *sucessfuly_written
)
1807 *sucessfuly_written
= 0;
1809 while (offset
< len
) {
1810 if (page
== 0 && len
- offset
>= msb
->block_size
) {
1812 if (msb
->cache_block_lba
== lba
)
1813 msb_cache_discard(msb
);
1815 dbg_verbose("Writing whole lba %d", lba
);
1816 error
= msb_update_block(msb
, lba
, sg
, offset
);
1820 offset
+= msb
->block_size
;
1821 *sucessfuly_written
+= msb
->block_size
;
1826 error
= msb_cache_write(msb
, lba
, page
, false, sg
, offset
);
1830 offset
+= msb
->page_size
;
1831 *sucessfuly_written
+= msb
->page_size
;
1834 if (page
== msb
->pages_in_block
) {
1842 static int msb_do_read_request(struct msb_data
*msb
, int lba
,
1843 int page
, struct scatterlist
*sg
, int len
, int *sucessfuly_read
)
1847 *sucessfuly_read
= 0;
1849 while (offset
< len
) {
1851 error
= msb_cache_read(msb
, lba
, page
, sg
, offset
);
1855 offset
+= msb
->page_size
;
1856 *sucessfuly_read
+= msb
->page_size
;
1859 if (page
== msb
->pages_in_block
) {
1867 static void msb_io_work(struct work_struct
*work
)
1869 struct msb_data
*msb
= container_of(work
, struct msb_data
, io_work
);
1870 int page
, error
, len
;
1872 struct scatterlist
*sg
= msb
->prealloc_sg
;
1873 struct request
*req
;
1875 dbg_verbose("IO: work started");
1878 spin_lock_irq(&msb
->q_lock
);
1880 if (msb
->need_flush_cache
) {
1881 msb
->need_flush_cache
= false;
1882 spin_unlock_irq(&msb
->q_lock
);
1883 msb_cache_flush(msb
);
1889 dbg_verbose("IO: no more requests exiting");
1890 spin_unlock_irq(&msb
->q_lock
);
1894 spin_unlock_irq(&msb
->q_lock
);
1896 /* process the request */
1897 dbg_verbose("IO: processing new request");
1898 blk_rq_map_sg(msb
->queue
, req
, sg
);
1900 lba
= blk_rq_pos(req
);
1902 sector_div(lba
, msb
->page_size
/ 512);
1903 page
= sector_div(lba
, msb
->pages_in_block
);
1905 if (rq_data_dir(msb
->req
) == READ
)
1906 error
= msb_do_read_request(msb
, lba
, page
, sg
,
1907 blk_rq_bytes(req
), &len
);
1909 error
= msb_do_write_request(msb
, lba
, page
, sg
,
1910 blk_rq_bytes(req
), &len
);
1912 if (len
&& !blk_update_request(req
, BLK_STS_OK
, len
)) {
1913 __blk_mq_end_request(req
, BLK_STS_OK
);
1914 spin_lock_irq(&msb
->q_lock
);
1916 spin_unlock_irq(&msb
->q_lock
);
1919 if (error
&& msb
->req
) {
1920 blk_status_t ret
= errno_to_blk_status(error
);
1922 dbg_verbose("IO: ending one sector of the request with error");
1923 blk_mq_end_request(req
, ret
);
1924 spin_lock_irq(&msb
->q_lock
);
1926 spin_unlock_irq(&msb
->q_lock
);
1930 dbg_verbose("IO: request still pending");
1934 static DEFINE_IDR(msb_disk_idr
); /*set of used disk numbers */
1935 static DEFINE_MUTEX(msb_disk_lock
); /* protects against races in open/release */
1937 static int msb_bd_open(struct block_device
*bdev
, fmode_t mode
)
1939 struct gendisk
*disk
= bdev
->bd_disk
;
1940 struct msb_data
*msb
= disk
->private_data
;
1942 dbg_verbose("block device open");
1944 mutex_lock(&msb_disk_lock
);
1946 if (msb
&& msb
->card
)
1949 mutex_unlock(&msb_disk_lock
);
1953 static void msb_data_clear(struct msb_data
*msb
)
1955 kfree(msb
->boot_page
);
1956 kfree(msb
->used_blocks_bitmap
);
1957 kfree(msb
->lba_to_pba_table
);
1962 static int msb_disk_release(struct gendisk
*disk
)
1964 struct msb_data
*msb
= disk
->private_data
;
1966 dbg_verbose("block device release");
1967 mutex_lock(&msb_disk_lock
);
1970 if (msb
->usage_count
)
1973 if (!msb
->usage_count
) {
1974 disk
->private_data
= NULL
;
1975 idr_remove(&msb_disk_idr
, msb
->disk_id
);
1980 mutex_unlock(&msb_disk_lock
);
1984 static void msb_bd_release(struct gendisk
*disk
, fmode_t mode
)
1986 msb_disk_release(disk
);
1989 static int msb_bd_getgeo(struct block_device
*bdev
,
1990 struct hd_geometry
*geo
)
1992 struct msb_data
*msb
= bdev
->bd_disk
->private_data
;
1993 *geo
= msb
->geometry
;
1997 static blk_status_t
msb_queue_rq(struct blk_mq_hw_ctx
*hctx
,
1998 const struct blk_mq_queue_data
*bd
)
2000 struct memstick_dev
*card
= hctx
->queue
->queuedata
;
2001 struct msb_data
*msb
= memstick_get_drvdata(card
);
2002 struct request
*req
= bd
->rq
;
2004 dbg_verbose("Submit request");
2006 spin_lock_irq(&msb
->q_lock
);
2008 if (msb
->card_dead
) {
2009 dbg("Refusing requests on removed card");
2011 WARN_ON(!msb
->io_queue_stopped
);
2013 spin_unlock_irq(&msb
->q_lock
);
2014 blk_mq_start_request(req
);
2015 return BLK_STS_IOERR
;
2019 spin_unlock_irq(&msb
->q_lock
);
2020 return BLK_STS_DEV_RESOURCE
;
2023 blk_mq_start_request(req
);
2026 if (!msb
->io_queue_stopped
)
2027 queue_work(msb
->io_queue
, &msb
->io_work
);
2029 spin_unlock_irq(&msb
->q_lock
);
2033 static int msb_check_card(struct memstick_dev
*card
)
2035 struct msb_data
*msb
= memstick_get_drvdata(card
);
2036 return (msb
->card_dead
== 0);
2039 static void msb_stop(struct memstick_dev
*card
)
2041 struct msb_data
*msb
= memstick_get_drvdata(card
);
2042 unsigned long flags
;
2044 dbg("Stopping all msblock IO");
2046 blk_mq_stop_hw_queues(msb
->queue
);
2047 spin_lock_irqsave(&msb
->q_lock
, flags
);
2048 msb
->io_queue_stopped
= true;
2049 spin_unlock_irqrestore(&msb
->q_lock
, flags
);
2051 del_timer_sync(&msb
->cache_flush_timer
);
2052 flush_workqueue(msb
->io_queue
);
2054 spin_lock_irqsave(&msb
->q_lock
, flags
);
2056 blk_mq_requeue_request(msb
->req
, false);
2059 spin_unlock_irqrestore(&msb
->q_lock
, flags
);
2062 static void msb_start(struct memstick_dev
*card
)
2064 struct msb_data
*msb
= memstick_get_drvdata(card
);
2065 unsigned long flags
;
2067 dbg("Resuming IO from msblock");
2069 msb_invalidate_reg_window(msb
);
2071 spin_lock_irqsave(&msb
->q_lock
, flags
);
2072 if (!msb
->io_queue_stopped
|| msb
->card_dead
) {
2073 spin_unlock_irqrestore(&msb
->q_lock
, flags
);
2076 spin_unlock_irqrestore(&msb
->q_lock
, flags
);
2078 /* Kick cache flush anyway, its harmless */
2079 msb
->need_flush_cache
= true;
2080 msb
->io_queue_stopped
= false;
2082 blk_mq_start_hw_queues(msb
->queue
);
2084 queue_work(msb
->io_queue
, &msb
->io_work
);
2088 static const struct block_device_operations msb_bdops
= {
2089 .open
= msb_bd_open
,
2090 .release
= msb_bd_release
,
2091 .getgeo
= msb_bd_getgeo
,
2092 .owner
= THIS_MODULE
2095 static const struct blk_mq_ops msb_mq_ops
= {
2096 .queue_rq
= msb_queue_rq
,
2099 /* Registers the block device */
2100 static int msb_init_disk(struct memstick_dev
*card
)
2102 struct msb_data
*msb
= memstick_get_drvdata(card
);
2104 unsigned long capacity
;
2106 mutex_lock(&msb_disk_lock
);
2107 msb
->disk_id
= idr_alloc(&msb_disk_idr
, card
, 0, 256, GFP_KERNEL
);
2108 mutex_unlock(&msb_disk_lock
);
2110 if (msb
->disk_id
< 0)
2111 return msb
->disk_id
;
2113 msb
->disk
= alloc_disk(0);
2116 goto out_release_id
;
2119 msb
->queue
= blk_mq_init_sq_queue(&msb
->tag_set
, &msb_mq_ops
, 2,
2120 BLK_MQ_F_SHOULD_MERGE
);
2121 if (IS_ERR(msb
->queue
)) {
2122 rc
= PTR_ERR(msb
->queue
);
2127 msb
->queue
->queuedata
= card
;
2129 blk_queue_max_hw_sectors(msb
->queue
, MS_BLOCK_MAX_PAGES
);
2130 blk_queue_max_segments(msb
->queue
, MS_BLOCK_MAX_SEGS
);
2131 blk_queue_max_segment_size(msb
->queue
,
2132 MS_BLOCK_MAX_PAGES
* msb
->page_size
);
2133 blk_queue_logical_block_size(msb
->queue
, msb
->page_size
);
2135 sprintf(msb
->disk
->disk_name
, "msblk%d", msb
->disk_id
);
2136 msb
->disk
->fops
= &msb_bdops
;
2137 msb
->disk
->private_data
= msb
;
2138 msb
->disk
->queue
= msb
->queue
;
2139 msb
->disk
->flags
|= GENHD_FL_EXT_DEVT
;
2141 capacity
= msb
->pages_in_block
* msb
->logical_block_count
;
2142 capacity
*= (msb
->page_size
/ 512);
2143 set_capacity(msb
->disk
, capacity
);
2144 dbg("Set total disk size to %lu sectors", capacity
);
2146 msb
->usage_count
= 1;
2147 msb
->io_queue
= alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM
);
2148 INIT_WORK(&msb
->io_work
, msb_io_work
);
2149 sg_init_table(msb
->prealloc_sg
, MS_BLOCK_MAX_SEGS
+1);
2152 set_disk_ro(msb
->disk
, 1);
2155 device_add_disk(&card
->dev
, msb
->disk
, NULL
);
2160 put_disk(msb
->disk
);
2162 mutex_lock(&msb_disk_lock
);
2163 idr_remove(&msb_disk_idr
, msb
->disk_id
);
2164 mutex_unlock(&msb_disk_lock
);
2168 static int msb_probe(struct memstick_dev
*card
)
2170 struct msb_data
*msb
;
2173 msb
= kzalloc(sizeof(struct msb_data
), GFP_KERNEL
);
2176 memstick_set_drvdata(card
, msb
);
2178 spin_lock_init(&msb
->q_lock
);
2180 rc
= msb_init_card(card
);
2184 rc
= msb_init_disk(card
);
2186 card
->check
= msb_check_card
;
2187 card
->stop
= msb_stop
;
2188 card
->start
= msb_start
;
2192 memstick_set_drvdata(card
, NULL
);
2193 msb_data_clear(msb
);
2198 static void msb_remove(struct memstick_dev
*card
)
2200 struct msb_data
*msb
= memstick_get_drvdata(card
);
2201 unsigned long flags
;
2203 if (!msb
->io_queue_stopped
)
2206 dbg("Removing the disk device");
2208 /* Take care of unhandled + new requests from now on */
2209 spin_lock_irqsave(&msb
->q_lock
, flags
);
2210 msb
->card_dead
= true;
2211 spin_unlock_irqrestore(&msb
->q_lock
, flags
);
2212 blk_mq_start_hw_queues(msb
->queue
);
2214 /* Remove the disk */
2215 del_gendisk(msb
->disk
);
2216 blk_cleanup_queue(msb
->queue
);
2217 blk_mq_free_tag_set(&msb
->tag_set
);
2220 mutex_lock(&msb_disk_lock
);
2221 msb_data_clear(msb
);
2222 mutex_unlock(&msb_disk_lock
);
2224 msb_disk_release(msb
->disk
);
2225 memstick_set_drvdata(card
, NULL
);
2230 static int msb_suspend(struct memstick_dev
*card
, pm_message_t state
)
2236 static int msb_resume(struct memstick_dev
*card
)
2238 struct msb_data
*msb
= memstick_get_drvdata(card
);
2239 struct msb_data
*new_msb
= NULL
;
2240 bool card_dead
= true;
2242 #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2243 msb
->card_dead
= true;
2246 mutex_lock(&card
->host
->lock
);
2248 new_msb
= kzalloc(sizeof(struct msb_data
), GFP_KERNEL
);
2252 new_msb
->card
= card
;
2253 memstick_set_drvdata(card
, new_msb
);
2254 spin_lock_init(&new_msb
->q_lock
);
2255 sg_init_table(msb
->prealloc_sg
, MS_BLOCK_MAX_SEGS
+1);
2257 if (msb_init_card(card
))
2260 if (msb
->block_size
!= new_msb
->block_size
)
2263 if (memcmp(msb
->boot_page
, new_msb
->boot_page
,
2264 sizeof(struct ms_boot_page
)))
2267 if (msb
->logical_block_count
!= new_msb
->logical_block_count
||
2268 memcmp(msb
->lba_to_pba_table
, new_msb
->lba_to_pba_table
,
2269 msb
->logical_block_count
))
2272 if (msb
->block_count
!= new_msb
->block_count
||
2273 memcmp(msb
->used_blocks_bitmap
, new_msb
->used_blocks_bitmap
,
2274 msb
->block_count
/ 8))
2280 dbg("Card was removed/replaced during suspend");
2282 msb
->card_dead
= card_dead
;
2283 memstick_set_drvdata(card
, msb
);
2286 msb_data_clear(new_msb
);
2291 mutex_unlock(&card
->host
->lock
);
2296 #define msb_suspend NULL
2297 #define msb_resume NULL
2299 #endif /* CONFIG_PM */
2301 static struct memstick_device_id msb_id_tbl
[] = {
2302 {MEMSTICK_MATCH_ALL
, MEMSTICK_TYPE_LEGACY
, MEMSTICK_CATEGORY_STORAGE
,
2303 MEMSTICK_CLASS_FLASH
},
2305 {MEMSTICK_MATCH_ALL
, MEMSTICK_TYPE_LEGACY
, MEMSTICK_CATEGORY_STORAGE
,
2306 MEMSTICK_CLASS_ROM
},
2308 {MEMSTICK_MATCH_ALL
, MEMSTICK_TYPE_LEGACY
, MEMSTICK_CATEGORY_STORAGE
,
2311 {MEMSTICK_MATCH_ALL
, MEMSTICK_TYPE_LEGACY
, MEMSTICK_CATEGORY_STORAGE
,
2314 {MEMSTICK_MATCH_ALL
, MEMSTICK_TYPE_DUO
, MEMSTICK_CATEGORY_STORAGE_DUO
,
2315 MEMSTICK_CLASS_DUO
},
2318 MODULE_DEVICE_TABLE(memstick
, msb_id_tbl
);
2321 static struct memstick_driver msb_driver
= {
2323 .name
= DRIVER_NAME
,
2324 .owner
= THIS_MODULE
2326 .id_table
= msb_id_tbl
,
2328 .remove
= msb_remove
,
2329 .suspend
= msb_suspend
,
2330 .resume
= msb_resume
2333 static int __init
msb_init(void)
2335 int rc
= memstick_register_driver(&msb_driver
);
2337 pr_err("failed to register memstick driver (error %d)\n", rc
);
2342 static void __exit
msb_exit(void)
2344 memstick_unregister_driver(&msb_driver
);
2345 idr_destroy(&msb_disk_idr
);
2348 module_init(msb_init
);
2349 module_exit(msb_exit
);
2351 module_param(cache_flush_timeout
, int, S_IRUGO
);
2352 MODULE_PARM_DESC(cache_flush_timeout
,
2353 "Cache flush timeout in msec (1000 default)");
2354 module_param(debug
, int, S_IRUGO
| S_IWUSR
);
2355 MODULE_PARM_DESC(debug
, "Debug level (0-2)");
2357 module_param(verify_writes
, bool, S_IRUGO
);
2358 MODULE_PARM_DESC(verify_writes
, "Read back and check all data that is written");
2360 MODULE_LICENSE("GPL");
2361 MODULE_AUTHOR("Maxim Levitsky");
2362 MODULE_DESCRIPTION("Sony MemoryStick block device driver");