2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/blkdev.h>
25 #include <linux/wait.h>
26 #include <linux/mutex.h>
27 #include <linux/kthread.h>
28 #include <linux/log2.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/async.h>
33 /**** Helper functions used for Div, Remainder operation on u64 ****/
35 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
36 * Function: GLOB_Calc_Used_Bits
37 * Inputs: Power of 2 number
38 * Outputs: Number of Used Bits
39 * 0, if the argument is 0
40 * Description: Calculate the number of bits used by a given power of 2 number
41 * Number can be up to 32 bit
42 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
43 int GLOB_Calc_Used_Bits(u32 n
)
70 return ((n
== 0) ? (0) : tot_bits
);
73 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
74 * Function: GLOB_u64_Div
75 * Inputs: Number of u64
76 * A power of 2 number as Division
77 * Outputs: Quotient of the Divisor operation
78 * Description: It divides the address by divisor by using bit shift operation
79 * (essentially without explicitely using "/").
80 * Divisor is a power of 2 number and Divided is of u64
81 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
82 u64
GLOB_u64_Div(u64 addr
, u32 divisor
)
84 return (u64
)(addr
>> GLOB_Calc_Used_Bits(divisor
));
87 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
88 * Function: GLOB_u64_Remainder
89 * Inputs: Number of u64
90 * Divisor Type (1 -PageAddress, 2- BlockAddress)
91 * Outputs: Remainder of the Division operation
92 * Description: It calculates the remainder of a number (of u64) by
93 * divisor(power of 2 number ) by using bit shifting and multiply
94 * operation(essentially without explicitely using "/").
95 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
96 u64
GLOB_u64_Remainder(u64 addr
, u32 divisor_type
)
100 if (divisor_type
== 1) { /* Remainder -- Page */
101 result
= (addr
>> DeviceInfo
.nBitsInPageDataSize
);
102 result
= result
* DeviceInfo
.wPageDataSize
;
103 } else if (divisor_type
== 2) { /* Remainder -- Block */
104 result
= (addr
>> DeviceInfo
.nBitsInBlockDataSize
);
105 result
= result
* DeviceInfo
.wBlockDataSize
;
108 result
= addr
- result
;
113 #define NUM_DEVICES 1
116 #define GLOB_SBD_NAME "nd"
117 #define GLOB_SBD_IRQ_NUM (29)
119 #define GLOB_SBD_IOCTL_GC (0x7701)
120 #define GLOB_SBD_IOCTL_WL (0x7702)
121 #define GLOB_SBD_IOCTL_FORMAT (0x7703)
122 #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
123 #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
124 #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
125 #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
126 #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
127 #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
128 #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
130 static int reserved_mb
= 0;
131 module_param(reserved_mb
, int, 0);
132 MODULE_PARM_DESC(reserved_mb
, "Reserved space for OS image, in MiB (default 25 MiB)");
134 int nand_debug_level
;
135 module_param(nand_debug_level
, int, 0644);
136 MODULE_PARM_DESC(nand_debug_level
, "debug level value: 1-3");
138 MODULE_LICENSE("GPL");
140 struct spectra_nand_dev
{
145 void __iomem
*ioaddr
; /* Mapped address */
146 struct request_queue
*queue
;
147 struct task_struct
*thread
;
153 static int GLOB_SBD_majornum
;
155 static char *GLOB_version
= GLOB_VERSION
;
157 static struct spectra_nand_dev nand_device
[NUM_DEVICES
];
159 static struct mutex spectra_lock
;
161 static int res_blks_os
= 1;
163 struct spectra_indentfy_dev_tag IdentifyDeviceData
;
165 static int force_flush_cache(void)
167 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
168 __FILE__
, __LINE__
, __func__
);
170 if (ERR
== GLOB_FTL_Flush_Cache()) {
171 printk(KERN_ERR
"Fail to Flush FTL Cache!\n");
175 if (glob_ftl_execute_cmds())
183 struct ioctl_rw_page_info
{
188 static int ioctl_read_page_data(unsigned long arg
)
191 struct ioctl_rw_page_info info
;
194 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
197 buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
199 printk(KERN_ERR
"ioctl_read_page_data: "
200 "failed to allocate memory\n");
204 mutex_lock(&spectra_lock
);
205 result
= GLOB_FTL_Page_Read(buf
,
206 (u64
)info
.page
* IdentifyDeviceData
.PageDataSize
);
207 mutex_unlock(&spectra_lock
);
209 if (copy_to_user((void __user
*)info
.data
, buf
,
210 IdentifyDeviceData
.PageDataSize
)) {
211 printk(KERN_ERR
"ioctl_read_page_data: "
212 "failed to copy user data\n");
221 static int ioctl_write_page_data(unsigned long arg
)
224 struct ioctl_rw_page_info info
;
227 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
230 buf
= memdup_user((void __user
*)info
.data
,
231 IdentifyDeviceData
.PageDataSize
);
233 printk(KERN_ERR
"ioctl_write_page_data: "
234 "failed to copy user data\n");
238 mutex_lock(&spectra_lock
);
239 result
= GLOB_FTL_Page_Write(buf
,
240 (u64
)info
.page
* IdentifyDeviceData
.PageDataSize
);
241 mutex_unlock(&spectra_lock
);
247 /* Return how many blocks should be reserved for bad block replacement */
248 static int get_res_blk_num_bad_blk(void)
250 return IdentifyDeviceData
.wDataBlockNum
/ 10;
253 /* Return how many blocks should be reserved for OS image */
254 static int get_res_blk_num_os(void)
256 u32 res_blks
, blk_size
;
258 blk_size
= IdentifyDeviceData
.PageDataSize
*
259 IdentifyDeviceData
.PagesPerBlock
;
261 res_blks
= (reserved_mb
* 1024 * 1024) / blk_size
;
263 if ((res_blks
< 1) || (res_blks
>= IdentifyDeviceData
.wDataBlockNum
))
264 res_blks
= 1; /* Reserved 1 block for block table */
269 /* Transfer a full request. */
270 static int do_transfer(struct spectra_nand_dev
*tr
, struct request
*req
)
272 u64 start_addr
, addr
;
273 u32 logical_start_sect
, hd_start_sect
;
275 u32 rsect
, tsect
= 0;
277 u32 ratio
= IdentifyDeviceData
.PageDataSize
>> 9;
279 start_addr
= (u64
)(blk_rq_pos(req
)) << 9;
280 /* Add a big enough offset to prevent the OS Image from
281 * being accessed or damaged by file system */
282 start_addr
+= IdentifyDeviceData
.PageDataSize
*
283 IdentifyDeviceData
.PagesPerBlock
*
286 if (req
->cmd_type
& REQ_FLUSH
) {
287 if (force_flush_cache()) /* Fail to flush cache */
293 if (req
->cmd_type
!= REQ_TYPE_FS
)
296 if (blk_rq_pos(req
) + blk_rq_cur_sectors(req
) > get_capacity(tr
->gd
)) {
297 printk(KERN_ERR
"Spectra error: request over the NAND "
298 "capacity!sector %d, current_nr_sectors %d, "
299 "while capacity is %d\n",
300 (int)blk_rq_pos(req
),
301 blk_rq_cur_sectors(req
),
302 (int)get_capacity(tr
->gd
));
306 logical_start_sect
= start_addr
>> 9;
307 hd_start_sect
= logical_start_sect
/ ratio
;
308 rsect
= logical_start_sect
- hd_start_sect
* ratio
;
310 addr
= (u64
)hd_start_sect
* ratio
* 512;
312 nsect
= blk_rq_cur_sectors(req
);
315 tsect
= (ratio
- rsect
) < nsect
? (ratio
- rsect
) : nsect
;
317 switch (rq_data_dir(req
)) {
319 /* Read the first NAND page */
321 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
322 printk(KERN_ERR
"Error in %s, Line %d\n",
326 memcpy(buf
, tr
->tmp_buf
+ (rsect
<< 9), tsect
<< 9);
327 addr
+= IdentifyDeviceData
.PageDataSize
;
332 /* Read the other NAND pages */
333 for (hd_sects
= nsect
/ ratio
; hd_sects
> 0; hd_sects
--) {
334 if (GLOB_FTL_Page_Read(buf
, addr
)) {
335 printk(KERN_ERR
"Error in %s, Line %d\n",
339 addr
+= IdentifyDeviceData
.PageDataSize
;
340 buf
+= IdentifyDeviceData
.PageDataSize
;
343 /* Read the last NAND pages */
345 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
346 printk(KERN_ERR
"Error in %s, Line %d\n",
350 memcpy(buf
, tr
->tmp_buf
, (nsect
% ratio
) << 9);
353 if (glob_ftl_execute_cmds())
361 /* Write the first NAND page */
363 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
364 printk(KERN_ERR
"Error in %s, Line %d\n",
368 memcpy(tr
->tmp_buf
+ (rsect
<< 9), buf
, tsect
<< 9);
369 if (GLOB_FTL_Page_Write(tr
->tmp_buf
, addr
)) {
370 printk(KERN_ERR
"Error in %s, Line %d\n",
374 addr
+= IdentifyDeviceData
.PageDataSize
;
379 /* Write the other NAND pages */
380 for (hd_sects
= nsect
/ ratio
; hd_sects
> 0; hd_sects
--) {
381 if (GLOB_FTL_Page_Write(buf
, addr
)) {
382 printk(KERN_ERR
"Error in %s, Line %d\n",
386 addr
+= IdentifyDeviceData
.PageDataSize
;
387 buf
+= IdentifyDeviceData
.PageDataSize
;
390 /* Write the last NAND pages */
392 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
393 printk(KERN_ERR
"Error in %s, Line %d\n",
397 memcpy(tr
->tmp_buf
, buf
, (nsect
% ratio
) << 9);
398 if (GLOB_FTL_Page_Write(tr
->tmp_buf
, addr
)) {
399 printk(KERN_ERR
"Error in %s, Line %d\n",
405 if (glob_ftl_execute_cmds())
413 printk(KERN_NOTICE
"Unknown request %u\n", rq_data_dir(req
));
418 /* This function is copied from drivers/mtd/mtd_blkdevs.c */
419 static int spectra_trans_thread(void *arg
)
421 struct spectra_nand_dev
*tr
= arg
;
422 struct request_queue
*rq
= tr
->queue
;
423 struct request
*req
= NULL
;
425 /* we might get involved when memory gets low, so use PF_MEMALLOC */
426 current
->flags
|= PF_MEMALLOC
;
428 spin_lock_irq(rq
->queue_lock
);
429 while (!kthread_should_stop()) {
433 req
= blk_fetch_request(rq
);
435 set_current_state(TASK_INTERRUPTIBLE
);
436 spin_unlock_irq(rq
->queue_lock
);
438 spin_lock_irq(rq
->queue_lock
);
443 spin_unlock_irq(rq
->queue_lock
);
445 mutex_lock(&spectra_lock
);
446 res
= do_transfer(tr
, req
);
447 mutex_unlock(&spectra_lock
);
449 spin_lock_irq(rq
->queue_lock
);
451 if (!__blk_end_request_cur(req
, res
))
456 __blk_end_request_all(req
, -EIO
);
458 spin_unlock_irq(rq
->queue_lock
);
464 /* Request function that "handles clustering". */
465 static void GLOB_SBD_request(struct request_queue
*rq
)
467 struct spectra_nand_dev
*pdev
= rq
->queuedata
;
468 wake_up_process(pdev
->thread
);
471 static int GLOB_SBD_open(struct block_device
*bdev
, fmode_t mode
)
474 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
475 __FILE__
, __LINE__
, __func__
);
479 static int GLOB_SBD_release(struct gendisk
*disk
, fmode_t mode
)
483 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
484 __FILE__
, __LINE__
, __func__
);
486 mutex_lock(&spectra_lock
);
487 ret
= force_flush_cache();
488 mutex_unlock(&spectra_lock
);
493 static int GLOB_SBD_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
497 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
499 nand_dbg_print(NAND_DBG_DEBUG
,
500 "heads: %d, sectors: %d, cylinders: %d\n",
501 geo
->heads
, geo
->sectors
, geo
->cylinders
);
506 int GLOB_SBD_ioctl(struct block_device
*bdev
, fmode_t mode
,
507 unsigned int cmd
, unsigned long arg
)
511 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
512 __FILE__
, __LINE__
, __func__
);
515 case GLOB_SBD_IOCTL_GC
:
516 nand_dbg_print(NAND_DBG_DEBUG
,
517 "Spectra IOCTL: Garbage Collection "
518 "being performed\n");
519 if (PASS
!= GLOB_FTL_Garbage_Collection())
523 case GLOB_SBD_IOCTL_WL
:
524 nand_dbg_print(NAND_DBG_DEBUG
,
525 "Spectra IOCTL: Static Wear Leveling "
526 "being performed\n");
527 if (PASS
!= GLOB_FTL_Wear_Leveling())
531 case GLOB_SBD_IOCTL_FORMAT
:
532 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: Flash format "
533 "being performed\n");
534 if (PASS
!= GLOB_FTL_Flash_Format())
538 case GLOB_SBD_IOCTL_FLUSH_CACHE
:
539 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: Cache flush "
540 "being performed\n");
541 mutex_lock(&spectra_lock
);
542 ret
= force_flush_cache();
543 mutex_unlock(&spectra_lock
);
546 case GLOB_SBD_IOCTL_COPY_BLK_TABLE
:
547 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
548 "Copy block table\n");
549 if (copy_to_user((void __user
*)arg
,
550 get_blk_table_start_addr(),
551 get_blk_table_len()))
555 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE
:
556 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
557 "Copy wear leveling table\n");
558 if (copy_to_user((void __user
*)arg
,
559 get_wear_leveling_table_start_addr(),
560 get_wear_leveling_table_len()))
564 case GLOB_SBD_IOCTL_GET_NAND_INFO
:
565 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
567 if (copy_to_user((void __user
*)arg
, &IdentifyDeviceData
,
568 sizeof(IdentifyDeviceData
)))
572 case GLOB_SBD_IOCTL_WRITE_DATA
:
573 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
574 "Write one page data\n");
575 return ioctl_write_page_data(arg
);
577 case GLOB_SBD_IOCTL_READ_DATA
:
578 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
579 "Read one page data\n");
580 return ioctl_read_page_data(arg
);
586 static DEFINE_MUTEX(ffsport_mutex
);
588 int GLOB_SBD_unlocked_ioctl(struct block_device
*bdev
, fmode_t mode
,
589 unsigned int cmd
, unsigned long arg
)
593 mutex_lock(&ffsport_mutex
);
594 ret
= GLOB_SBD_ioctl(bdev
, mode
, cmd
, arg
);
595 mutex_unlock(&ffsport_mutex
);
600 static struct block_device_operations GLOB_SBD_ops
= {
601 .owner
= THIS_MODULE
,
602 .open
= GLOB_SBD_open
,
603 .release
= GLOB_SBD_release
,
604 .ioctl
= GLOB_SBD_unlocked_ioctl
,
605 .getgeo
= GLOB_SBD_getgeo
,
608 static int SBD_setup_device(struct spectra_nand_dev
*dev
, int which
)
613 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
614 __FILE__
, __LINE__
, __func__
);
616 memset(dev
, 0, sizeof(struct spectra_nand_dev
));
618 nand_dbg_print(NAND_DBG_WARN
, "Reserved %d blocks "
619 "for OS image, %d blocks for bad block replacement.\n",
620 get_res_blk_num_os(),
621 get_res_blk_num_bad_blk());
623 res_blks
= get_res_blk_num_bad_blk() + get_res_blk_num_os();
625 dev
->size
= (u64
)IdentifyDeviceData
.PageDataSize
*
626 IdentifyDeviceData
.PagesPerBlock
*
627 (IdentifyDeviceData
.wDataBlockNum
- res_blks
);
629 res_blks_os
= get_res_blk_num_os();
631 spin_lock_init(&dev
->qlock
);
633 dev
->tmp_buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
635 printk(KERN_ERR
"Failed to kmalloc memory in %s Line %d, exit.\n",
640 dev
->queue
= blk_init_queue(GLOB_SBD_request
, &dev
->qlock
);
641 if (dev
->queue
== NULL
) {
643 "Spectra: Request queue could not be initialized."
647 dev
->queue
->queuedata
= dev
;
649 /* As Linux block layer doesn't support >4KB hardware sector, */
650 /* Here we force report 512 byte hardware sector size to Kernel */
651 blk_queue_logical_block_size(dev
->queue
, 512);
653 blk_queue_flush(dev
->queue
, REQ_FLUSH
);
655 dev
->thread
= kthread_run(spectra_trans_thread
, dev
, "nand_thd");
656 if (IS_ERR(dev
->thread
)) {
657 blk_cleanup_queue(dev
->queue
);
658 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
659 return PTR_ERR(dev
->thread
);
662 dev
->gd
= alloc_disk(PARTITIONS
);
665 "Spectra: Could not allocate disk. Aborting \n ");
668 dev
->gd
->major
= GLOB_SBD_majornum
;
669 dev
->gd
->first_minor
= which
* PARTITIONS
;
670 dev
->gd
->fops
= &GLOB_SBD_ops
;
671 dev
->gd
->queue
= dev
->queue
;
672 dev
->gd
->private_data
= dev
;
673 snprintf(dev
->gd
->disk_name
, 32, "%s%c", GLOB_SBD_NAME
, which
+ 'a');
675 sects
= dev
->size
>> 9;
676 nand_dbg_print(NAND_DBG_WARN
, "Capacity sects: %d\n", sects
);
677 set_capacity(dev
->gd
, sects
);
687 static ssize_t show_nand_block_num(struct device *dev,
688 struct device_attribute *attr, char *buf)
690 return snprintf(buf, PAGE_SIZE, "%d\n",
691 (int)IdentifyDeviceData.wDataBlockNum);
694 static ssize_t show_nand_pages_per_block(struct device *dev,
695 struct device_attribute *attr, char *buf)
697 return snprintf(buf, PAGE_SIZE, "%d\n",
698 (int)IdentifyDeviceData.PagesPerBlock);
701 static ssize_t show_nand_page_size(struct device *dev,
702 struct device_attribute *attr, char *buf)
704 return snprintf(buf, PAGE_SIZE, "%d\n",
705 (int)IdentifyDeviceData.PageDataSize);
708 static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
709 static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
710 static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
712 static void create_sysfs_entry(struct device *dev)
714 if (device_create_file(dev, &dev_attr_nand_block_num))
715 printk(KERN_ERR "Spectra: "
716 "failed to create sysfs entry nand_block_num.\n");
717 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
718 printk(KERN_ERR "Spectra: "
719 "failed to create sysfs entry nand_pages_per_block.\n");
720 if (device_create_file(dev, &dev_attr_nand_page_size))
721 printk(KERN_ERR "Spectra: "
722 "failed to create sysfs entry nand_page_size.\n");
726 static void register_spectra_ftl_async(void *unused
, async_cookie_t cookie
)
730 /* create_sysfs_entry(&dev->dev); */
732 if (PASS
!= GLOB_FTL_IdentifyDevice(&IdentifyDeviceData
)) {
733 printk(KERN_ERR
"Spectra: Unable to Read Flash Device. "
737 nand_dbg_print(NAND_DBG_WARN
, "In GLOB_SBD_init: "
738 "Num blocks=%d, pagesperblock=%d, "
739 "pagedatasize=%d, ECCBytesPerSector=%d\n",
740 (int)IdentifyDeviceData
.NumBlocks
,
741 (int)IdentifyDeviceData
.PagesPerBlock
,
742 (int)IdentifyDeviceData
.PageDataSize
,
743 (int)IdentifyDeviceData
.wECCBytesPerSector
);
746 printk(KERN_ALERT
"Spectra: searching block table, please wait ...\n");
747 if (GLOB_FTL_Init() != PASS
) {
748 printk(KERN_ERR
"Spectra: Unable to Initialize FTL Layer. "
750 goto out_ftl_flash_register
;
752 printk(KERN_ALERT
"Spectra: block table has been found.\n");
754 GLOB_SBD_majornum
= register_blkdev(0, GLOB_SBD_NAME
);
755 if (GLOB_SBD_majornum
<= 0) {
756 printk(KERN_ERR
"Unable to get the major %d for Spectra",
758 goto out_ftl_flash_register
;
761 for (i
= 0; i
< NUM_DEVICES
; i
++)
762 if (SBD_setup_device(&nand_device
[i
], i
) == -ENOMEM
)
763 goto out_blk_register
;
765 nand_dbg_print(NAND_DBG_DEBUG
,
766 "Spectra: module loaded with major number %d\n",
772 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
773 out_ftl_flash_register
:
774 GLOB_FTL_Cache_Release();
775 printk(KERN_ERR
"Spectra: Module load failed.\n");
778 int register_spectra_ftl()
780 async_schedule(register_spectra_ftl_async
, NULL
);
783 EXPORT_SYMBOL_GPL(register_spectra_ftl
);
785 static int GLOB_SBD_init(void)
787 /* Set debug output level (0~3) here. 3 is most verbose */
788 printk(KERN_ALERT
"Spectra: %s\n", GLOB_version
);
790 mutex_init(&spectra_lock
);
792 if (PASS
!= GLOB_FTL_Flash_Init()) {
793 printk(KERN_ERR
"Spectra: Unable to Initialize Flash Device. "
800 static void __exit
GLOB_SBD_exit(void)
804 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
805 __FILE__
, __LINE__
, __func__
);
807 for (i
= 0; i
< NUM_DEVICES
; i
++) {
808 struct spectra_nand_dev
*dev
= &nand_device
[i
];
810 del_gendisk(dev
->gd
);
814 blk_cleanup_queue(dev
->queue
);
818 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
820 mutex_lock(&spectra_lock
);
822 mutex_unlock(&spectra_lock
);
824 GLOB_FTL_Cache_Release();
826 GLOB_FTL_Flash_Release();
828 nand_dbg_print(NAND_DBG_DEBUG
,
829 "Spectra FTL module (major number %d) unloaded.\n",
833 module_init(GLOB_SBD_init
);
834 module_exit(GLOB_SBD_exit
);