2 * NAND Flash Controller Device Driver
3 * Copyright (c) 2009, Intel Corporation and its suppliers.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/blkdev.h>
25 #include <linux/wait.h>
26 #include <linux/mutex.h>
27 #include <linux/kthread.h>
28 #include <linux/log2.h>
29 #include <linux/init.h>
30 #include <linux/slab.h>
31 #include <linux/async.h>
33 /**** Helper functions used for Div, Remainder operation on u64 ****/
35 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
36 * Function: GLOB_Calc_Used_Bits
37 * Inputs: Power of 2 number
38 * Outputs: Number of Used Bits
39 * 0, if the argument is 0
40 * Description: Calculate the number of bits used by a given power of 2 number
41 * Number can be up to 32 bit
42 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
43 int GLOB_Calc_Used_Bits(u32 n
)
70 return ((n
== 0) ? (0) : tot_bits
);
73 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
74 * Function: GLOB_u64_Div
75 * Inputs: Number of u64
76 * A power of 2 number as Division
77 * Outputs: Quotient of the Divisor operation
78 * Description: It divides the address by divisor by using bit shift operation
79 * (essentially without explicitely using "/").
80 * Divisor is a power of 2 number and Divided is of u64
81 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
82 u64
GLOB_u64_Div(u64 addr
, u32 divisor
)
84 return (u64
)(addr
>> GLOB_Calc_Used_Bits(divisor
));
87 /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
88 * Function: GLOB_u64_Remainder
89 * Inputs: Number of u64
90 * Divisor Type (1 -PageAddress, 2- BlockAddress)
91 * Outputs: Remainder of the Division operation
92 * Description: It calculates the remainder of a number (of u64) by
93 * divisor(power of 2 number ) by using bit shifting and multiply
94 * operation(essentially without explicitely using "/").
95 *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
96 u64
GLOB_u64_Remainder(u64 addr
, u32 divisor_type
)
100 if (divisor_type
== 1) { /* Remainder -- Page */
101 result
= (addr
>> DeviceInfo
.nBitsInPageDataSize
);
102 result
= result
* DeviceInfo
.wPageDataSize
;
103 } else if (divisor_type
== 2) { /* Remainder -- Block */
104 result
= (addr
>> DeviceInfo
.nBitsInBlockDataSize
);
105 result
= result
* DeviceInfo
.wBlockDataSize
;
108 result
= addr
- result
;
113 #define NUM_DEVICES 1
116 #define GLOB_SBD_NAME "nd"
117 #define GLOB_SBD_IRQ_NUM (29)
119 #define GLOB_SBD_IOCTL_GC (0x7701)
120 #define GLOB_SBD_IOCTL_WL (0x7702)
121 #define GLOB_SBD_IOCTL_FORMAT (0x7703)
122 #define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
123 #define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
124 #define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
125 #define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
126 #define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
127 #define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
128 #define GLOB_SBD_IOCTL_READ_DATA (0x770A)
130 static int reserved_mb
= 0;
131 module_param(reserved_mb
, int, 0);
132 MODULE_PARM_DESC(reserved_mb
, "Reserved space for OS image, in MiB (default 25 MiB)");
134 int nand_debug_level
;
135 module_param(nand_debug_level
, int, 0644);
136 MODULE_PARM_DESC(nand_debug_level
, "debug level value: 1-3");
138 MODULE_LICENSE("GPL");
140 struct spectra_nand_dev
{
145 void __iomem
*ioaddr
; /* Mapped address */
146 struct request_queue
*queue
;
147 struct task_struct
*thread
;
153 static int GLOB_SBD_majornum
;
155 static char *GLOB_version
= GLOB_VERSION
;
157 static struct spectra_nand_dev nand_device
[NUM_DEVICES
];
159 static struct mutex spectra_lock
;
161 static int res_blks_os
= 1;
163 struct spectra_indentfy_dev_tag IdentifyDeviceData
;
165 static int force_flush_cache(void)
167 nand_dbg_print(NAND_DBG_DEBUG
, "%s, Line %d, Function: %s\n",
168 __FILE__
, __LINE__
, __func__
);
170 if (ERR
== GLOB_FTL_Flush_Cache()) {
171 printk(KERN_ERR
"Fail to Flush FTL Cache!\n");
175 if (glob_ftl_execute_cmds())
183 struct ioctl_rw_page_info
{
188 static int ioctl_read_page_data(unsigned long arg
)
191 struct ioctl_rw_page_info info
;
194 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
197 buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
199 printk(KERN_ERR
"ioctl_read_page_data: "
200 "failed to allocate memory\n");
204 mutex_lock(&spectra_lock
);
205 result
= GLOB_FTL_Page_Read(buf
,
206 (u64
)info
.page
* IdentifyDeviceData
.PageDataSize
);
207 mutex_unlock(&spectra_lock
);
209 if (copy_to_user((void __user
*)info
.data
, buf
,
210 IdentifyDeviceData
.PageDataSize
)) {
211 printk(KERN_ERR
"ioctl_read_page_data: "
212 "failed to copy user data\n");
221 static int ioctl_write_page_data(unsigned long arg
)
224 struct ioctl_rw_page_info info
;
227 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
230 buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
232 printk(KERN_ERR
"ioctl_write_page_data: "
233 "failed to allocate memory\n");
237 if (copy_from_user(buf
, (void __user
*)info
.data
,
238 IdentifyDeviceData
.PageDataSize
)) {
239 printk(KERN_ERR
"ioctl_write_page_data: "
240 "failed to copy user data\n");
245 mutex_lock(&spectra_lock
);
246 result
= GLOB_FTL_Page_Write(buf
,
247 (u64
)info
.page
* IdentifyDeviceData
.PageDataSize
);
248 mutex_unlock(&spectra_lock
);
254 /* Return how many blocks should be reserved for bad block replacement */
255 static int get_res_blk_num_bad_blk(void)
257 return IdentifyDeviceData
.wDataBlockNum
/ 10;
260 /* Return how many blocks should be reserved for OS image */
261 static int get_res_blk_num_os(void)
263 u32 res_blks
, blk_size
;
265 blk_size
= IdentifyDeviceData
.PageDataSize
*
266 IdentifyDeviceData
.PagesPerBlock
;
268 res_blks
= (reserved_mb
* 1024 * 1024) / blk_size
;
270 if ((res_blks
< 1) || (res_blks
>= IdentifyDeviceData
.wDataBlockNum
))
271 res_blks
= 1; /* Reserved 1 block for block table */
276 /* Transfer a full request. */
277 static int do_transfer(struct spectra_nand_dev
*tr
, struct request
*req
)
279 u64 start_addr
, addr
;
280 u32 logical_start_sect
, hd_start_sect
;
282 u32 rsect
, tsect
= 0;
284 u32 ratio
= IdentifyDeviceData
.PageDataSize
>> 9;
286 start_addr
= (u64
)(blk_rq_pos(req
)) << 9;
287 /* Add a big enough offset to prevent the OS Image from
288 * being accessed or damaged by file system */
289 start_addr
+= IdentifyDeviceData
.PageDataSize
*
290 IdentifyDeviceData
.PagesPerBlock
*
293 if (req
->cmd_type
& REQ_FLUSH
) {
294 if (force_flush_cache()) /* Fail to flush cache */
300 if (req
->cmd_type
!= REQ_TYPE_FS
)
303 if (blk_rq_pos(req
) + blk_rq_cur_sectors(req
) > get_capacity(tr
->gd
)) {
304 printk(KERN_ERR
"Spectra error: request over the NAND "
305 "capacity!sector %d, current_nr_sectors %d, "
306 "while capacity is %d\n",
307 (int)blk_rq_pos(req
),
308 blk_rq_cur_sectors(req
),
309 (int)get_capacity(tr
->gd
));
313 logical_start_sect
= start_addr
>> 9;
314 hd_start_sect
= logical_start_sect
/ ratio
;
315 rsect
= logical_start_sect
- hd_start_sect
* ratio
;
317 addr
= (u64
)hd_start_sect
* ratio
* 512;
319 nsect
= blk_rq_cur_sectors(req
);
322 tsect
= (ratio
- rsect
) < nsect
? (ratio
- rsect
) : nsect
;
324 switch (rq_data_dir(req
)) {
326 /* Read the first NAND page */
328 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
329 printk(KERN_ERR
"Error in %s, Line %d\n",
333 memcpy(buf
, tr
->tmp_buf
+ (rsect
<< 9), tsect
<< 9);
334 addr
+= IdentifyDeviceData
.PageDataSize
;
339 /* Read the other NAND pages */
340 for (hd_sects
= nsect
/ ratio
; hd_sects
> 0; hd_sects
--) {
341 if (GLOB_FTL_Page_Read(buf
, addr
)) {
342 printk(KERN_ERR
"Error in %s, Line %d\n",
346 addr
+= IdentifyDeviceData
.PageDataSize
;
347 buf
+= IdentifyDeviceData
.PageDataSize
;
350 /* Read the last NAND pages */
352 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
353 printk(KERN_ERR
"Error in %s, Line %d\n",
357 memcpy(buf
, tr
->tmp_buf
, (nsect
% ratio
) << 9);
360 if (glob_ftl_execute_cmds())
368 /* Write the first NAND page */
370 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
371 printk(KERN_ERR
"Error in %s, Line %d\n",
375 memcpy(tr
->tmp_buf
+ (rsect
<< 9), buf
, tsect
<< 9);
376 if (GLOB_FTL_Page_Write(tr
->tmp_buf
, addr
)) {
377 printk(KERN_ERR
"Error in %s, Line %d\n",
381 addr
+= IdentifyDeviceData
.PageDataSize
;
386 /* Write the other NAND pages */
387 for (hd_sects
= nsect
/ ratio
; hd_sects
> 0; hd_sects
--) {
388 if (GLOB_FTL_Page_Write(buf
, addr
)) {
389 printk(KERN_ERR
"Error in %s, Line %d\n",
393 addr
+= IdentifyDeviceData
.PageDataSize
;
394 buf
+= IdentifyDeviceData
.PageDataSize
;
397 /* Write the last NAND pages */
399 if (GLOB_FTL_Page_Read(tr
->tmp_buf
, addr
)) {
400 printk(KERN_ERR
"Error in %s, Line %d\n",
404 memcpy(tr
->tmp_buf
, buf
, (nsect
% ratio
) << 9);
405 if (GLOB_FTL_Page_Write(tr
->tmp_buf
, addr
)) {
406 printk(KERN_ERR
"Error in %s, Line %d\n",
412 if (glob_ftl_execute_cmds())
420 printk(KERN_NOTICE
"Unknown request %u\n", rq_data_dir(req
));
425 /* This function is copied from drivers/mtd/mtd_blkdevs.c */
426 static int spectra_trans_thread(void *arg
)
428 struct spectra_nand_dev
*tr
= arg
;
429 struct request_queue
*rq
= tr
->queue
;
430 struct request
*req
= NULL
;
432 /* we might get involved when memory gets low, so use PF_MEMALLOC */
433 current
->flags
|= PF_MEMALLOC
;
435 spin_lock_irq(rq
->queue_lock
);
436 while (!kthread_should_stop()) {
440 req
= blk_fetch_request(rq
);
442 set_current_state(TASK_INTERRUPTIBLE
);
443 spin_unlock_irq(rq
->queue_lock
);
445 spin_lock_irq(rq
->queue_lock
);
450 spin_unlock_irq(rq
->queue_lock
);
452 mutex_lock(&spectra_lock
);
453 res
= do_transfer(tr
, req
);
454 mutex_unlock(&spectra_lock
);
456 spin_lock_irq(rq
->queue_lock
);
458 if (!__blk_end_request_cur(req
, res
))
463 __blk_end_request_all(req
, -EIO
);
465 spin_unlock_irq(rq
->queue_lock
);
471 /* Request function that "handles clustering". */
472 static void GLOB_SBD_request(struct request_queue
*rq
)
474 struct spectra_nand_dev
*pdev
= rq
->queuedata
;
475 wake_up_process(pdev
->thread
);
478 static int GLOB_SBD_open(struct block_device
*bdev
, fmode_t mode
)
481 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
482 __FILE__
, __LINE__
, __func__
);
486 static int GLOB_SBD_release(struct gendisk
*disk
, fmode_t mode
)
490 nand_dbg_print(NAND_DBG_WARN
, "%s, Line %d, Function: %s\n",
491 __FILE__
, __LINE__
, __func__
);
493 mutex_lock(&spectra_lock
);
494 ret
= force_flush_cache();
495 mutex_unlock(&spectra_lock
);
500 static int GLOB_SBD_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
504 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
506 nand_dbg_print(NAND_DBG_DEBUG
,
507 "heads: %d, sectors: %d, cylinders: %d\n",
508 geo
->heads
, geo
->sectors
, geo
->cylinders
);
513 int GLOB_SBD_ioctl(struct block_device
*bdev
, fmode_t mode
,
514 unsigned int cmd
, unsigned long arg
)
518 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
519 __FILE__
, __LINE__
, __func__
);
522 case GLOB_SBD_IOCTL_GC
:
523 nand_dbg_print(NAND_DBG_DEBUG
,
524 "Spectra IOCTL: Garbage Collection "
525 "being performed\n");
526 if (PASS
!= GLOB_FTL_Garbage_Collection())
530 case GLOB_SBD_IOCTL_WL
:
531 nand_dbg_print(NAND_DBG_DEBUG
,
532 "Spectra IOCTL: Static Wear Leveling "
533 "being performed\n");
534 if (PASS
!= GLOB_FTL_Wear_Leveling())
538 case GLOB_SBD_IOCTL_FORMAT
:
539 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: Flash format "
540 "being performed\n");
541 if (PASS
!= GLOB_FTL_Flash_Format())
545 case GLOB_SBD_IOCTL_FLUSH_CACHE
:
546 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: Cache flush "
547 "being performed\n");
548 mutex_lock(&spectra_lock
);
549 ret
= force_flush_cache();
550 mutex_unlock(&spectra_lock
);
553 case GLOB_SBD_IOCTL_COPY_BLK_TABLE
:
554 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
555 "Copy block table\n");
556 if (copy_to_user((void __user
*)arg
,
557 get_blk_table_start_addr(),
558 get_blk_table_len()))
562 case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE
:
563 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
564 "Copy wear leveling table\n");
565 if (copy_to_user((void __user
*)arg
,
566 get_wear_leveling_table_start_addr(),
567 get_wear_leveling_table_len()))
571 case GLOB_SBD_IOCTL_GET_NAND_INFO
:
572 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
574 if (copy_to_user((void __user
*)arg
, &IdentifyDeviceData
,
575 sizeof(IdentifyDeviceData
)))
579 case GLOB_SBD_IOCTL_WRITE_DATA
:
580 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
581 "Write one page data\n");
582 return ioctl_write_page_data(arg
);
584 case GLOB_SBD_IOCTL_READ_DATA
:
585 nand_dbg_print(NAND_DBG_DEBUG
, "Spectra IOCTL: "
586 "Read one page data\n");
587 return ioctl_read_page_data(arg
);
593 static DEFINE_MUTEX(ffsport_mutex
);
595 int GLOB_SBD_unlocked_ioctl(struct block_device
*bdev
, fmode_t mode
,
596 unsigned int cmd
, unsigned long arg
)
600 mutex_lock(&ffsport_mutex
);
601 ret
= GLOB_SBD_ioctl(bdev
, mode
, cmd
, arg
);
602 mutex_unlock(&ffsport_mutex
);
607 static struct block_device_operations GLOB_SBD_ops
= {
608 .owner
= THIS_MODULE
,
609 .open
= GLOB_SBD_open
,
610 .release
= GLOB_SBD_release
,
611 .ioctl
= GLOB_SBD_unlocked_ioctl
,
612 .getgeo
= GLOB_SBD_getgeo
,
615 static int SBD_setup_device(struct spectra_nand_dev
*dev
, int which
)
620 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
621 __FILE__
, __LINE__
, __func__
);
623 memset(dev
, 0, sizeof(struct spectra_nand_dev
));
625 nand_dbg_print(NAND_DBG_WARN
, "Reserved %d blocks "
626 "for OS image, %d blocks for bad block replacement.\n",
627 get_res_blk_num_os(),
628 get_res_blk_num_bad_blk());
630 res_blks
= get_res_blk_num_bad_blk() + get_res_blk_num_os();
632 dev
->size
= (u64
)IdentifyDeviceData
.PageDataSize
*
633 IdentifyDeviceData
.PagesPerBlock
*
634 (IdentifyDeviceData
.wDataBlockNum
- res_blks
);
636 res_blks_os
= get_res_blk_num_os();
638 spin_lock_init(&dev
->qlock
);
640 dev
->tmp_buf
= kmalloc(IdentifyDeviceData
.PageDataSize
, GFP_ATOMIC
);
642 printk(KERN_ERR
"Failed to kmalloc memory in %s Line %d, exit.\n",
647 dev
->queue
= blk_init_queue(GLOB_SBD_request
, &dev
->qlock
);
648 if (dev
->queue
== NULL
) {
650 "Spectra: Request queue could not be initialized."
654 dev
->queue
->queuedata
= dev
;
656 /* As Linux block layer doesn't support >4KB hardware sector, */
657 /* Here we force report 512 byte hardware sector size to Kernel */
658 blk_queue_logical_block_size(dev
->queue
, 512);
660 blk_queue_flush(dev
->queue
, REQ_FLUSH
);
662 dev
->thread
= kthread_run(spectra_trans_thread
, dev
, "nand_thd");
663 if (IS_ERR(dev
->thread
)) {
664 blk_cleanup_queue(dev
->queue
);
665 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
666 return PTR_ERR(dev
->thread
);
669 dev
->gd
= alloc_disk(PARTITIONS
);
672 "Spectra: Could not allocate disk. Aborting \n ");
675 dev
->gd
->major
= GLOB_SBD_majornum
;
676 dev
->gd
->first_minor
= which
* PARTITIONS
;
677 dev
->gd
->fops
= &GLOB_SBD_ops
;
678 dev
->gd
->queue
= dev
->queue
;
679 dev
->gd
->private_data
= dev
;
680 snprintf(dev
->gd
->disk_name
, 32, "%s%c", GLOB_SBD_NAME
, which
+ 'a');
682 sects
= dev
->size
>> 9;
683 nand_dbg_print(NAND_DBG_WARN
, "Capacity sects: %d\n", sects
);
684 set_capacity(dev
->gd
, sects
);
694 static ssize_t show_nand_block_num(struct device *dev,
695 struct device_attribute *attr, char *buf)
697 return snprintf(buf, PAGE_SIZE, "%d\n",
698 (int)IdentifyDeviceData.wDataBlockNum);
701 static ssize_t show_nand_pages_per_block(struct device *dev,
702 struct device_attribute *attr, char *buf)
704 return snprintf(buf, PAGE_SIZE, "%d\n",
705 (int)IdentifyDeviceData.PagesPerBlock);
708 static ssize_t show_nand_page_size(struct device *dev,
709 struct device_attribute *attr, char *buf)
711 return snprintf(buf, PAGE_SIZE, "%d\n",
712 (int)IdentifyDeviceData.PageDataSize);
715 static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
716 static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
717 static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
719 static void create_sysfs_entry(struct device *dev)
721 if (device_create_file(dev, &dev_attr_nand_block_num))
722 printk(KERN_ERR "Spectra: "
723 "failed to create sysfs entry nand_block_num.\n");
724 if (device_create_file(dev, &dev_attr_nand_pages_per_block))
725 printk(KERN_ERR "Spectra: "
726 "failed to create sysfs entry nand_pages_per_block.\n");
727 if (device_create_file(dev, &dev_attr_nand_page_size))
728 printk(KERN_ERR "Spectra: "
729 "failed to create sysfs entry nand_page_size.\n");
733 static void register_spectra_ftl_async(void *unused
, async_cookie_t cookie
)
737 /* create_sysfs_entry(&dev->dev); */
739 if (PASS
!= GLOB_FTL_IdentifyDevice(&IdentifyDeviceData
)) {
740 printk(KERN_ERR
"Spectra: Unable to Read Flash Device. "
744 nand_dbg_print(NAND_DBG_WARN
, "In GLOB_SBD_init: "
745 "Num blocks=%d, pagesperblock=%d, "
746 "pagedatasize=%d, ECCBytesPerSector=%d\n",
747 (int)IdentifyDeviceData
.NumBlocks
,
748 (int)IdentifyDeviceData
.PagesPerBlock
,
749 (int)IdentifyDeviceData
.PageDataSize
,
750 (int)IdentifyDeviceData
.wECCBytesPerSector
);
753 printk(KERN_ALERT
"Spectra: searching block table, please wait ...\n");
754 if (GLOB_FTL_Init() != PASS
) {
755 printk(KERN_ERR
"Spectra: Unable to Initialize FTL Layer. "
757 goto out_ftl_flash_register
;
759 printk(KERN_ALERT
"Spectra: block table has been found.\n");
761 GLOB_SBD_majornum
= register_blkdev(0, GLOB_SBD_NAME
);
762 if (GLOB_SBD_majornum
<= 0) {
763 printk(KERN_ERR
"Unable to get the major %d for Spectra",
765 goto out_ftl_flash_register
;
768 for (i
= 0; i
< NUM_DEVICES
; i
++)
769 if (SBD_setup_device(&nand_device
[i
], i
) == -ENOMEM
)
770 goto out_blk_register
;
772 nand_dbg_print(NAND_DBG_DEBUG
,
773 "Spectra: module loaded with major number %d\n",
779 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
780 out_ftl_flash_register
:
781 GLOB_FTL_Cache_Release();
782 printk(KERN_ERR
"Spectra: Module load failed.\n");
785 int register_spectra_ftl()
787 async_schedule(register_spectra_ftl_async
, NULL
);
790 EXPORT_SYMBOL_GPL(register_spectra_ftl
);
792 static int GLOB_SBD_init(void)
794 /* Set debug output level (0~3) here. 3 is most verbose */
795 printk(KERN_ALERT
"Spectra: %s\n", GLOB_version
);
797 mutex_init(&spectra_lock
);
799 if (PASS
!= GLOB_FTL_Flash_Init()) {
800 printk(KERN_ERR
"Spectra: Unable to Initialize Flash Device. "
807 static void __exit
GLOB_SBD_exit(void)
811 nand_dbg_print(NAND_DBG_TRACE
, "%s, Line %d, Function: %s\n",
812 __FILE__
, __LINE__
, __func__
);
814 for (i
= 0; i
< NUM_DEVICES
; i
++) {
815 struct spectra_nand_dev
*dev
= &nand_device
[i
];
817 del_gendisk(dev
->gd
);
821 blk_cleanup_queue(dev
->queue
);
825 unregister_blkdev(GLOB_SBD_majornum
, GLOB_SBD_NAME
);
827 mutex_lock(&spectra_lock
);
829 mutex_unlock(&spectra_lock
);
831 GLOB_FTL_Cache_Release();
833 GLOB_FTL_Flash_Release();
835 nand_dbg_print(NAND_DBG_DEBUG
,
836 "Spectra FTL module (major number %d) unloaded.\n",
840 module_init(GLOB_SBD_init
);
841 module_exit(GLOB_SBD_exit
);