2 * Driver for SEGA Dreamcast Visual Memory Unit
4 * Copyright (c) Adrian McMenamin 2002 - 2009
5 * Copyright (c) Paul Mundt 2001
7 * Licensed under version 2 of the
8 * GNU General Public Licence
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/maple.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/mtd/map.h>
19 unsigned char *buffer
; /* Cache */
20 unsigned int block
; /* Which block was cached */
21 unsigned long jiffies_atc
; /* When was it cached? */
26 struct maple_device
*mdev
;
35 struct vmu_cache
*pcache
;
48 unsigned char *blockread
;
49 struct vmupart
*parts
;
54 unsigned int num
; /* block number */
55 unsigned int ofs
; /* block offset */
58 static struct vmu_block
*ofs_to_block(unsigned long src_ofs
,
59 struct mtd_info
*mtd
, int partition
)
61 struct vmu_block
*vblock
;
62 struct maple_device
*mdev
;
64 struct mdev_part
*mpart
;
69 card
= maple_get_drvdata(mdev
);
71 if (src_ofs
>= card
->parts
[partition
].numblocks
* card
->blocklen
)
74 num
= src_ofs
/ card
->blocklen
;
75 if (num
> card
->parts
[partition
].numblocks
)
78 vblock
= kmalloc(sizeof(struct vmu_block
), GFP_KERNEL
);
83 vblock
->ofs
= src_ofs
% card
->blocklen
;
90 /* Maple bus callback function for reads */
91 static void vmu_blockread(struct mapleq
*mq
)
93 struct maple_device
*mdev
;
97 card
= maple_get_drvdata(mdev
);
98 /* copy the read in data */
100 if (unlikely(!card
->blockread
))
103 memcpy(card
->blockread
, mq
->recvbuf
->buf
+ 12,
104 card
->blocklen
/card
->readcnt
);
108 /* Interface with maple bus to read blocks
109 * caching the results so that other parts
110 * of the driver can access block reads */
111 static int maple_vmu_read_block(unsigned int num
, unsigned char *buf
,
112 struct mtd_info
*mtd
)
114 struct memcard
*card
;
115 struct mdev_part
*mpart
;
116 struct maple_device
*mdev
;
117 int partition
, error
= 0, x
, wait
;
118 unsigned char *blockread
= NULL
;
119 struct vmu_cache
*pcache
;
124 partition
= mpart
->partition
;
125 card
= maple_get_drvdata(mdev
);
126 pcache
= card
->parts
[partition
].pcache
;
129 /* prepare the cache for this block */
130 if (!pcache
->buffer
) {
131 pcache
->buffer
= kmalloc(card
->blocklen
, GFP_KERNEL
);
132 if (!pcache
->buffer
) {
133 dev_err(&mdev
->dev
, "VMU at (%d, %d) - read fails due"
134 " to lack of memory\n", mdev
->port
,
142 * Reads may be phased - again the hardware spec
143 * supports this - though may not be any devices in
144 * the wild that implement it, but we will here
146 for (x
= 0; x
< card
->readcnt
; x
++) {
147 sendbuf
= cpu_to_be32(partition
<< 24 | x
<< 16 | num
);
149 if (atomic_read(&mdev
->busy
) == 1) {
150 wait_event_interruptible_timeout(mdev
->maple_wait
,
151 atomic_read(&mdev
->busy
) == 0, HZ
);
152 if (atomic_read(&mdev
->busy
) == 1) {
153 dev_notice(&mdev
->dev
, "VMU at (%d, %d)"
154 " is busy\n", mdev
->port
, mdev
->unit
);
160 atomic_set(&mdev
->busy
, 1);
161 blockread
= kmalloc(card
->blocklen
/card
->readcnt
, GFP_KERNEL
);
164 atomic_set(&mdev
->busy
, 0);
167 card
->blockread
= blockread
;
169 maple_getcond_callback(mdev
, vmu_blockread
, 0,
171 error
= maple_add_packet(mdev
, MAPLE_FUNC_MEMCARD
,
172 MAPLE_COMMAND_BREAD
, 2, &sendbuf
);
173 /* Very long timeouts seem to be needed when box is stressed */
174 wait
= wait_event_interruptible_timeout(mdev
->maple_wait
,
175 (atomic_read(&mdev
->busy
) == 0 ||
176 atomic_read(&mdev
->busy
) == 2), HZ
* 3);
178 * MTD layer does not handle hotplugging well
179 * so have to return errors when VMU is unplugged
180 * in the middle of a read (busy == 2)
182 if (error
|| atomic_read(&mdev
->busy
) == 2) {
183 if (atomic_read(&mdev
->busy
) == 2)
185 atomic_set(&mdev
->busy
, 0);
186 card
->blockread
= NULL
;
189 if (wait
== 0 || wait
== -ERESTARTSYS
) {
190 card
->blockread
= NULL
;
191 atomic_set(&mdev
->busy
, 0);
193 list_del_init(&(mdev
->mq
->list
));
194 kfree(mdev
->mq
->sendbuf
);
195 mdev
->mq
->sendbuf
= NULL
;
196 if (wait
== -ERESTARTSYS
) {
197 dev_warn(&mdev
->dev
, "VMU read on (%d, %d)"
198 " interrupted on block 0x%X\n",
199 mdev
->port
, mdev
->unit
, num
);
201 dev_notice(&mdev
->dev
, "VMU read on (%d, %d)"
202 " timed out on block 0x%X\n",
203 mdev
->port
, mdev
->unit
, num
);
207 memcpy(buf
+ (card
->blocklen
/card
->readcnt
) * x
, blockread
,
208 card
->blocklen
/card
->readcnt
);
210 memcpy(pcache
->buffer
+ (card
->blocklen
/card
->readcnt
) * x
,
211 card
->blockread
, card
->blocklen
/card
->readcnt
);
212 card
->blockread
= NULL
;
214 pcache
->jiffies_atc
= jiffies
;
227 /* communicate with maple bus for phased writing */
228 static int maple_vmu_write_block(unsigned int num
, const unsigned char *buf
,
229 struct mtd_info
*mtd
)
231 struct memcard
*card
;
232 struct mdev_part
*mpart
;
233 struct maple_device
*mdev
;
234 int partition
, error
, locking
, x
, phaselen
, wait
;
239 partition
= mpart
->partition
;
240 card
= maple_get_drvdata(mdev
);
242 phaselen
= card
->blocklen
/card
->writecnt
;
244 sendbuf
= kmalloc(phaselen
+ 4, GFP_KERNEL
);
249 for (x
= 0; x
< card
->writecnt
; x
++) {
250 sendbuf
[0] = cpu_to_be32(partition
<< 24 | x
<< 16 | num
);
251 memcpy(&sendbuf
[1], buf
+ phaselen
* x
, phaselen
);
252 /* wait until the device is not busy doing something else
253 * or 1 second - which ever is longer */
254 if (atomic_read(&mdev
->busy
) == 1) {
255 wait_event_interruptible_timeout(mdev
->maple_wait
,
256 atomic_read(&mdev
->busy
) == 0, HZ
);
257 if (atomic_read(&mdev
->busy
) == 1) {
259 dev_notice(&mdev
->dev
, "VMU write at (%d, %d)"
260 "failed - device is busy\n",
261 mdev
->port
, mdev
->unit
);
265 atomic_set(&mdev
->busy
, 1);
267 locking
= maple_add_packet(mdev
, MAPLE_FUNC_MEMCARD
,
268 MAPLE_COMMAND_BWRITE
, phaselen
/ 4 + 2, sendbuf
);
269 wait
= wait_event_interruptible_timeout(mdev
->maple_wait
,
270 atomic_read(&mdev
->busy
) == 0, HZ
/10);
273 atomic_set(&mdev
->busy
, 0);
276 if (atomic_read(&mdev
->busy
) == 2) {
277 atomic_set(&mdev
->busy
, 0);
278 } else if (wait
== 0 || wait
== -ERESTARTSYS
) {
280 dev_warn(&mdev
->dev
, "Write at (%d, %d) of block"
281 " 0x%X at phase %d failed: could not"
282 " communicate with VMU", mdev
->port
,
284 atomic_set(&mdev
->busy
, 0);
285 kfree(mdev
->mq
->sendbuf
);
286 mdev
->mq
->sendbuf
= NULL
;
287 list_del_init(&(mdev
->mq
->list
));
293 return card
->blocklen
;
298 dev_err(&mdev
->dev
, "VMU (%d, %d): write failed\n", mdev
->port
,
303 /* mtd function to simulate reading byte by byte */
304 static unsigned char vmu_flash_read_char(unsigned long ofs
, int *retval
,
305 struct mtd_info
*mtd
)
307 struct vmu_block
*vblock
;
308 struct memcard
*card
;
309 struct mdev_part
*mpart
;
310 struct maple_device
*mdev
;
311 unsigned char *buf
, ret
;
312 int partition
, error
;
316 partition
= mpart
->partition
;
317 card
= maple_get_drvdata(mdev
);
320 buf
= kmalloc(card
->blocklen
, GFP_KERNEL
);
327 vblock
= ofs_to_block(ofs
, mtd
, partition
);
334 error
= maple_vmu_read_block(vblock
->num
, buf
, mtd
);
341 ret
= buf
[vblock
->ofs
];
351 /* mtd higher order function to read flash */
352 static int vmu_flash_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
353 size_t *retlen
, u_char
*buf
)
355 struct maple_device
*mdev
;
356 struct memcard
*card
;
357 struct mdev_part
*mpart
;
358 struct vmu_cache
*pcache
;
359 struct vmu_block
*vblock
;
360 int index
= 0, retval
, partition
, leftover
, numblocks
;
365 partition
= mpart
->partition
;
366 card
= maple_get_drvdata(mdev
);
368 numblocks
= card
->parts
[partition
].numblocks
;
369 if (from
+ len
> numblocks
* card
->blocklen
)
370 len
= numblocks
* card
->blocklen
- from
;
373 /* Have we cached this bit already? */
374 pcache
= card
->parts
[partition
].pcache
;
376 vblock
= ofs_to_block(from
+ index
, mtd
, partition
);
379 /* Have we cached this and is the cache valid and timely? */
381 time_before(jiffies
, pcache
->jiffies_atc
+ HZ
) &&
382 (pcache
->block
== vblock
->num
)) {
383 /* we have cached it, so do necessary copying */
384 leftover
= card
->blocklen
- vblock
->ofs
;
385 if (vblock
->ofs
+ len
- index
< card
->blocklen
) {
386 /* only a bit of this block to copy */
388 pcache
->buffer
+ vblock
->ofs
,
392 /* otherwise copy remainder of whole block */
393 memcpy(buf
+ index
, pcache
->buffer
+
394 vblock
->ofs
, leftover
);
399 * Not cached so read one byte -
400 * but cache the rest of the block
402 cx
= vmu_flash_read_char(from
+ index
, &retval
, mtd
);
408 memset(buf
+ index
, cx
, 1);
412 } while (len
> index
);
418 static int vmu_flash_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
419 size_t *retlen
, const u_char
*buf
)
421 struct maple_device
*mdev
;
422 struct memcard
*card
;
423 struct mdev_part
*mpart
;
424 int index
= 0, partition
, error
= 0, numblocks
;
425 struct vmu_cache
*pcache
;
426 struct vmu_block
*vblock
;
427 unsigned char *buffer
;
431 partition
= mpart
->partition
;
432 card
= maple_get_drvdata(mdev
);
434 numblocks
= card
->parts
[partition
].numblocks
;
435 if (to
+ len
> numblocks
* card
->blocklen
)
436 len
= numblocks
* card
->blocklen
- to
;
442 vblock
= ofs_to_block(to
, mtd
, partition
);
448 buffer
= kmalloc(card
->blocklen
, GFP_KERNEL
);
455 /* Read in the block we are to write to */
456 error
= maple_vmu_read_block(vblock
->num
, buffer
, mtd
);
461 buffer
[vblock
->ofs
] = buf
[index
];
466 } while (vblock
->ofs
< card
->blocklen
);
468 /* write out new buffer */
469 error
= maple_vmu_write_block(vblock
->num
, buffer
, mtd
);
470 /* invalidate the cache */
471 pcache
= card
->parts
[partition
].pcache
;
474 if (error
!= card
->blocklen
)
479 } while (len
> index
);
491 dev_err(&mdev
->dev
, "VMU write failing with error %d\n", error
);
495 static void vmu_flash_sync(struct mtd_info
*mtd
)
497 /* Do nothing here */
500 /* Maple bus callback function to recursively query hardware details */
501 static void vmu_queryblocks(struct mapleq
*mq
)
503 struct maple_device
*mdev
;
505 struct memcard
*card
;
507 struct vmu_cache
*pcache
;
508 struct mdev_part
*mpart
;
509 struct mtd_info
*mtd_cur
;
510 struct vmupart
*part_cur
;
514 card
= maple_get_drvdata(mdev
);
515 res
= (unsigned short *) (mq
->recvbuf
->buf
);
516 card
->tempA
= res
[12];
517 card
->tempB
= res
[6];
519 dev_info(&mdev
->dev
, "VMU device at partition %d has %d user "
520 "blocks with a root block at %d\n", card
->partition
,
521 card
->tempA
, card
->tempB
);
523 part_cur
= &card
->parts
[card
->partition
];
524 part_cur
->user_blocks
= card
->tempA
;
525 part_cur
->root_block
= card
->tempB
;
526 part_cur
->numblocks
= card
->tempB
+ 1;
527 part_cur
->name
= kmalloc(12, GFP_KERNEL
);
531 sprintf(part_cur
->name
, "vmu%d.%d.%d",
532 mdev
->port
, mdev
->unit
, card
->partition
);
533 mtd_cur
= &card
->mtd
[card
->partition
];
534 mtd_cur
->name
= part_cur
->name
;
536 mtd_cur
->flags
= MTD_WRITEABLE
|MTD_NO_ERASE
;
537 mtd_cur
->size
= part_cur
->numblocks
* card
->blocklen
;
538 mtd_cur
->erasesize
= card
->blocklen
;
539 mtd_cur
->_write
= vmu_flash_write
;
540 mtd_cur
->_read
= vmu_flash_read
;
541 mtd_cur
->_sync
= vmu_flash_sync
;
542 mtd_cur
->writesize
= card
->blocklen
;
544 mpart
= kmalloc(sizeof(struct mdev_part
), GFP_KERNEL
);
549 mpart
->partition
= card
->partition
;
550 mtd_cur
->priv
= mpart
;
551 mtd_cur
->owner
= THIS_MODULE
;
553 pcache
= kzalloc(sizeof(struct vmu_cache
), GFP_KERNEL
);
555 goto fail_cache_create
;
556 part_cur
->pcache
= pcache
;
558 error
= mtd_device_register(mtd_cur
, NULL
, 0);
560 goto fail_mtd_register
;
562 maple_getcond_callback(mdev
, NULL
, 0,
566 * Set up a recursive call to the (probably theoretical)
567 * second or more partition
569 if (++card
->partition
< card
->partitions
) {
570 partnum
= cpu_to_be32(card
->partition
<< 24);
571 maple_getcond_callback(mdev
, vmu_queryblocks
, 0,
573 maple_add_packet(mdev
, MAPLE_FUNC_MEMCARD
,
574 MAPLE_COMMAND_GETMINFO
, 2, &partnum
);
579 dev_err(&mdev
->dev
, "Could not register maple device at (%d, %d)"
580 "error is 0x%X\n", mdev
->port
, mdev
->unit
, error
);
581 for (error
= 0; error
<= card
->partition
; error
++) {
582 kfree(((card
->parts
)[error
]).pcache
);
583 ((card
->parts
)[error
]).pcache
= NULL
;
587 for (error
= 0; error
<= card
->partition
; error
++) {
588 kfree(((card
->mtd
)[error
]).priv
);
589 ((card
->mtd
)[error
]).priv
= NULL
;
591 maple_getcond_callback(mdev
, NULL
, 0,
593 kfree(part_cur
->name
);
598 /* Handles very basic info about the flash, queries for details */
599 static int vmu_connect(struct maple_device
*mdev
)
601 unsigned long test_flash_data
, basic_flash_data
;
603 struct memcard
*card
;
606 test_flash_data
= be32_to_cpu(mdev
->devinfo
.function
);
607 /* Need to count how many bits are set - to find out which
608 * function_data element has details of the memory card
610 c
= hweight_long(test_flash_data
);
612 basic_flash_data
= be32_to_cpu(mdev
->devinfo
.function_data
[c
- 1]);
614 card
= kmalloc(sizeof(struct memcard
), GFP_KERNEL
);
620 card
->partitions
= (basic_flash_data
>> 24 & 0xFF) + 1;
621 card
->blocklen
= ((basic_flash_data
>> 16 & 0xFF) + 1) << 5;
622 card
->writecnt
= basic_flash_data
>> 12 & 0xF;
623 card
->readcnt
= basic_flash_data
>> 8 & 0xF;
624 card
->removeable
= basic_flash_data
>> 7 & 1;
629 * Not sure there are actually any multi-partition devices in the
630 * real world, but the hardware supports them, so, so will we
632 card
->parts
= kmalloc(sizeof(struct vmupart
) * card
->partitions
,
636 goto fail_partitions
;
639 card
->mtd
= kmalloc(sizeof(struct mtd_info
) * card
->partitions
,
646 maple_set_drvdata(mdev
, card
);
649 * We want to trap meminfo not get cond
650 * so set interval to zero, but rely on maple bus
651 * driver to pass back the results of the meminfo
653 maple_getcond_callback(mdev
, vmu_queryblocks
, 0,
656 /* Make sure we are clear to go */
657 if (atomic_read(&mdev
->busy
) == 1) {
658 wait_event_interruptible_timeout(mdev
->maple_wait
,
659 atomic_read(&mdev
->busy
) == 0, HZ
);
660 if (atomic_read(&mdev
->busy
) == 1) {
661 dev_notice(&mdev
->dev
, "VMU at (%d, %d) is busy\n",
662 mdev
->port
, mdev
->unit
);
664 goto fail_device_busy
;
668 atomic_set(&mdev
->busy
, 1);
671 * Set up the minfo call: vmu_queryblocks will handle
672 * the information passed back
674 error
= maple_add_packet(mdev
, MAPLE_FUNC_MEMCARD
,
675 MAPLE_COMMAND_GETMINFO
, 2, &partnum
);
677 dev_err(&mdev
->dev
, "Could not lock VMU at (%d, %d)"
678 " error is 0x%X\n", mdev
->port
, mdev
->unit
, error
);
693 static void vmu_disconnect(struct maple_device
*mdev
)
695 struct memcard
*card
;
696 struct mdev_part
*mpart
;
699 mdev
->callback
= NULL
;
700 card
= maple_get_drvdata(mdev
);
701 for (x
= 0; x
< card
->partitions
; x
++) {
702 mpart
= ((card
->mtd
)[x
]).priv
;
704 mtd_device_unregister(&((card
->mtd
)[x
]));
705 kfree(((card
->parts
)[x
]).name
);
712 /* Callback to handle eccentricities of both mtd subsystem
713 * and general flakyness of Dreamcast VMUs
715 static int vmu_can_unload(struct maple_device
*mdev
)
717 struct memcard
*card
;
719 struct mtd_info
*mtd
;
721 card
= maple_get_drvdata(mdev
);
722 for (x
= 0; x
< card
->partitions
; x
++) {
723 mtd
= &((card
->mtd
)[x
]);
724 if (mtd
->usecount
> 0)
730 #define ERRSTR "VMU at (%d, %d) file error -"
732 static void vmu_file_error(struct maple_device
*mdev
, void *recvbuf
)
734 enum maple_file_errors error
= ((int *)recvbuf
)[1];
738 case MAPLE_FILEERR_INVALID_PARTITION
:
739 dev_notice(&mdev
->dev
, ERRSTR
" invalid partition number\n",
740 mdev
->port
, mdev
->unit
);
743 case MAPLE_FILEERR_PHASE_ERROR
:
744 dev_notice(&mdev
->dev
, ERRSTR
" phase error\n",
745 mdev
->port
, mdev
->unit
);
748 case MAPLE_FILEERR_INVALID_BLOCK
:
749 dev_notice(&mdev
->dev
, ERRSTR
" invalid block number\n",
750 mdev
->port
, mdev
->unit
);
753 case MAPLE_FILEERR_WRITE_ERROR
:
754 dev_notice(&mdev
->dev
, ERRSTR
" write error\n",
755 mdev
->port
, mdev
->unit
);
758 case MAPLE_FILEERR_INVALID_WRITE_LENGTH
:
759 dev_notice(&mdev
->dev
, ERRSTR
" invalid write length\n",
760 mdev
->port
, mdev
->unit
);
763 case MAPLE_FILEERR_BAD_CRC
:
764 dev_notice(&mdev
->dev
, ERRSTR
" bad CRC\n",
765 mdev
->port
, mdev
->unit
);
769 dev_notice(&mdev
->dev
, ERRSTR
" 0x%X\n",
770 mdev
->port
, mdev
->unit
, error
);
775 static int probe_maple_vmu(struct device
*dev
)
778 struct maple_device
*mdev
= to_maple_dev(dev
);
779 struct maple_driver
*mdrv
= to_maple_driver(dev
->driver
);
781 mdev
->can_unload
= vmu_can_unload
;
782 mdev
->fileerr_handler
= vmu_file_error
;
785 error
= vmu_connect(mdev
);
792 static int remove_maple_vmu(struct device
*dev
)
794 struct maple_device
*mdev
= to_maple_dev(dev
);
796 vmu_disconnect(mdev
);
800 static struct maple_driver vmu_flash_driver
= {
801 .function
= MAPLE_FUNC_MEMCARD
,
803 .name
= "Dreamcast_visual_memory",
804 .probe
= probe_maple_vmu
,
805 .remove
= remove_maple_vmu
,
809 static int __init
vmu_flash_map_init(void)
811 return maple_driver_register(&vmu_flash_driver
);
814 static void __exit
vmu_flash_map_exit(void)
816 maple_driver_unregister(&vmu_flash_driver
);
819 module_init(vmu_flash_map_init
);
820 module_exit(vmu_flash_map_exit
);
822 MODULE_LICENSE("GPL");
823 MODULE_AUTHOR("Adrian McMenamin");
824 MODULE_DESCRIPTION("Flash mapping for Sega Dreamcast visual memory");