x86, k8 nb: Fix boot crash: enable k8_northbridges unconditionally on AMD systems
[linux/fpc-iii.git] / drivers / mtd / onenand / omap2.c
blob75f38b95811ea729b0e9f2494399253d93ed8472
1 /*
2 * linux/drivers/mtd/onenand/omap2.c
4 * OneNAND driver for OMAP2 / OMAP3
6 * Copyright © 2005-2006 Nokia Corporation
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/device.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/onenand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/platform_device.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/io.h>
38 #include <asm/mach/flash.h>
39 #include <plat/gpmc.h>
40 #include <plat/onenand.h>
41 #include <mach/gpio.h>
43 #include <plat/dma.h>
45 #include <plat/board.h>
47 #define DRIVER_NAME "omap2-onenand"
49 #define ONENAND_IO_SIZE SZ_128K
50 #define ONENAND_BUFRAM_SIZE (1024 * 5)
52 struct omap2_onenand {
53 struct platform_device *pdev;
54 int gpmc_cs;
55 unsigned long phys_base;
56 int gpio_irq;
57 struct mtd_info mtd;
58 struct mtd_partition *parts;
59 struct onenand_chip onenand;
60 struct completion irq_done;
61 struct completion dma_done;
62 int dma_channel;
63 int freq;
64 int (*setup)(void __iomem *base, int freq);
67 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
69 struct omap2_onenand *c = data;
71 complete(&c->dma_done);
74 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
76 struct omap2_onenand *c = dev_id;
78 complete(&c->irq_done);
80 return IRQ_HANDLED;
83 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
85 return readw(c->onenand.base + reg);
88 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
89 int reg)
91 writew(value, c->onenand.base + reg);
94 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
96 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
97 msg, state, ctrl, intr);
100 static void wait_warn(char *msg, int state, unsigned int ctrl,
101 unsigned int intr)
103 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
104 "intr 0x%04x\n", msg, state, ctrl, intr);
107 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
109 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
110 unsigned int intr = 0;
111 unsigned int ctrl;
112 unsigned long timeout;
113 u32 syscfg;
115 if (state == FL_RESETING || state == FL_PREPARING_ERASE ||
116 state == FL_VERIFYING_ERASE) {
117 int i = 21;
118 unsigned int intr_flags = ONENAND_INT_MASTER;
120 switch (state) {
121 case FL_RESETING:
122 intr_flags |= ONENAND_INT_RESET;
123 break;
124 case FL_PREPARING_ERASE:
125 intr_flags |= ONENAND_INT_ERASE;
126 break;
127 case FL_VERIFYING_ERASE:
128 i = 101;
129 break;
132 while (--i) {
133 udelay(1);
134 intr = read_reg(c, ONENAND_REG_INTERRUPT);
135 if (intr & ONENAND_INT_MASTER)
136 break;
138 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
139 if (ctrl & ONENAND_CTRL_ERROR) {
140 wait_err("controller error", state, ctrl, intr);
141 return -EIO;
143 if ((intr & intr_flags) != intr_flags) {
144 wait_err("timeout", state, ctrl, intr);
145 return -EIO;
147 return 0;
150 if (state != FL_READING) {
151 int result;
153 /* Turn interrupts on */
154 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
155 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
156 syscfg |= ONENAND_SYS_CFG1_IOBE;
157 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
158 if (cpu_is_omap34xx())
159 /* Add a delay to let GPIO settle */
160 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
163 INIT_COMPLETION(c->irq_done);
164 if (c->gpio_irq) {
165 result = gpio_get_value(c->gpio_irq);
166 if (result == -1) {
167 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
168 intr = read_reg(c, ONENAND_REG_INTERRUPT);
169 wait_err("gpio error", state, ctrl, intr);
170 return -EIO;
172 } else
173 result = 0;
174 if (result == 0) {
175 int retry_cnt = 0;
176 retry:
177 result = wait_for_completion_timeout(&c->irq_done,
178 msecs_to_jiffies(20));
179 if (result == 0) {
180 /* Timeout after 20ms */
181 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
182 if (ctrl & ONENAND_CTRL_ONGO) {
184 * The operation seems to be still going
185 * so give it some more time.
187 retry_cnt += 1;
188 if (retry_cnt < 3)
189 goto retry;
190 intr = read_reg(c,
191 ONENAND_REG_INTERRUPT);
192 wait_err("timeout", state, ctrl, intr);
193 return -EIO;
195 intr = read_reg(c, ONENAND_REG_INTERRUPT);
196 if ((intr & ONENAND_INT_MASTER) == 0)
197 wait_warn("timeout", state, ctrl, intr);
200 } else {
201 int retry_cnt = 0;
203 /* Turn interrupts off */
204 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
205 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
206 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
208 timeout = jiffies + msecs_to_jiffies(20);
209 while (1) {
210 if (time_before(jiffies, timeout)) {
211 intr = read_reg(c, ONENAND_REG_INTERRUPT);
212 if (intr & ONENAND_INT_MASTER)
213 break;
214 } else {
215 /* Timeout after 20ms */
216 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
217 if (ctrl & ONENAND_CTRL_ONGO) {
219 * The operation seems to be still going
220 * so give it some more time.
222 retry_cnt += 1;
223 if (retry_cnt < 3) {
224 timeout = jiffies +
225 msecs_to_jiffies(20);
226 continue;
229 break;
234 intr = read_reg(c, ONENAND_REG_INTERRUPT);
235 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
237 if (intr & ONENAND_INT_READ) {
238 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
240 if (ecc) {
241 unsigned int addr1, addr8;
243 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
244 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
245 if (ecc & ONENAND_ECC_2BIT_ALL) {
246 printk(KERN_ERR "onenand_wait: ECC error = "
247 "0x%04x, addr1 %#x, addr8 %#x\n",
248 ecc, addr1, addr8);
249 mtd->ecc_stats.failed++;
250 return -EBADMSG;
251 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
252 printk(KERN_NOTICE "onenand_wait: correctable "
253 "ECC error = 0x%04x, addr1 %#x, "
254 "addr8 %#x\n", ecc, addr1, addr8);
255 mtd->ecc_stats.corrected++;
258 } else if (state == FL_READING) {
259 wait_err("timeout", state, ctrl, intr);
260 return -EIO;
263 if (ctrl & ONENAND_CTRL_ERROR) {
264 wait_err("controller error", state, ctrl, intr);
265 if (ctrl & ONENAND_CTRL_LOCK)
266 printk(KERN_ERR "onenand_wait: "
267 "Device is write protected!!!\n");
268 return -EIO;
271 if (ctrl & 0xFE9F)
272 wait_warn("unexpected controller status", state, ctrl, intr);
274 return 0;
277 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
279 struct onenand_chip *this = mtd->priv;
281 if (ONENAND_CURRENT_BUFFERRAM(this)) {
282 if (area == ONENAND_DATARAM)
283 return this->writesize;
284 if (area == ONENAND_SPARERAM)
285 return mtd->oobsize;
288 return 0;
291 #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
293 static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
294 unsigned char *buffer, int offset,
295 size_t count)
297 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
298 struct onenand_chip *this = mtd->priv;
299 dma_addr_t dma_src, dma_dst;
300 int bram_offset;
301 unsigned long timeout;
302 void *buf = (void *)buffer;
303 size_t xtra;
304 volatile unsigned *done;
306 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
307 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
308 goto out_copy;
310 /* panic_write() may be in an interrupt context */
311 if (in_interrupt())
312 goto out_copy;
314 if (buf >= high_memory) {
315 struct page *p1;
317 if (((size_t)buf & PAGE_MASK) !=
318 ((size_t)(buf + count - 1) & PAGE_MASK))
319 goto out_copy;
320 p1 = vmalloc_to_page(buf);
321 if (!p1)
322 goto out_copy;
323 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
326 xtra = count & 3;
327 if (xtra) {
328 count -= xtra;
329 memcpy(buf + count, this->base + bram_offset + count, xtra);
332 dma_src = c->phys_base + bram_offset;
333 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
334 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
335 dev_err(&c->pdev->dev,
336 "Couldn't DMA map a %d byte buffer\n",
337 count);
338 goto out_copy;
341 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
342 count >> 2, 1, 0, 0, 0);
343 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
344 dma_src, 0, 0);
345 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
346 dma_dst, 0, 0);
348 INIT_COMPLETION(c->dma_done);
349 omap_start_dma(c->dma_channel);
351 timeout = jiffies + msecs_to_jiffies(20);
352 done = &c->dma_done.done;
353 while (time_before(jiffies, timeout))
354 if (*done)
355 break;
357 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
359 if (!*done) {
360 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
361 goto out_copy;
364 return 0;
366 out_copy:
367 memcpy(buf, this->base + bram_offset, count);
368 return 0;
371 static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
372 const unsigned char *buffer,
373 int offset, size_t count)
375 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
376 struct onenand_chip *this = mtd->priv;
377 dma_addr_t dma_src, dma_dst;
378 int bram_offset;
379 unsigned long timeout;
380 void *buf = (void *)buffer;
381 volatile unsigned *done;
383 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
384 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
385 goto out_copy;
387 /* panic_write() may be in an interrupt context */
388 if (in_interrupt())
389 goto out_copy;
391 if (buf >= high_memory) {
392 struct page *p1;
394 if (((size_t)buf & PAGE_MASK) !=
395 ((size_t)(buf + count - 1) & PAGE_MASK))
396 goto out_copy;
397 p1 = vmalloc_to_page(buf);
398 if (!p1)
399 goto out_copy;
400 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
403 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
404 dma_dst = c->phys_base + bram_offset;
405 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
406 dev_err(&c->pdev->dev,
407 "Couldn't DMA map a %d byte buffer\n",
408 count);
409 return -1;
412 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
413 count >> 2, 1, 0, 0, 0);
414 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
415 dma_src, 0, 0);
416 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
417 dma_dst, 0, 0);
419 INIT_COMPLETION(c->dma_done);
420 omap_start_dma(c->dma_channel);
422 timeout = jiffies + msecs_to_jiffies(20);
423 done = &c->dma_done.done;
424 while (time_before(jiffies, timeout))
425 if (*done)
426 break;
428 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
430 if (!*done) {
431 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
432 goto out_copy;
435 return 0;
437 out_copy:
438 memcpy(this->base + bram_offset, buf, count);
439 return 0;
442 #else
444 int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
445 unsigned char *buffer, int offset,
446 size_t count);
448 int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
449 const unsigned char *buffer,
450 int offset, size_t count);
452 #endif
454 #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
456 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
457 unsigned char *buffer, int offset,
458 size_t count)
460 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
461 struct onenand_chip *this = mtd->priv;
462 dma_addr_t dma_src, dma_dst;
463 int bram_offset;
465 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
466 /* DMA is not used. Revisit PM requirements before enabling it. */
467 if (1 || (c->dma_channel < 0) ||
468 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
469 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
470 memcpy(buffer, (__force void *)(this->base + bram_offset),
471 count);
472 return 0;
475 dma_src = c->phys_base + bram_offset;
476 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
477 DMA_FROM_DEVICE);
478 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
479 dev_err(&c->pdev->dev,
480 "Couldn't DMA map a %d byte buffer\n",
481 count);
482 return -1;
485 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
486 count / 4, 1, 0, 0, 0);
487 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
488 dma_src, 0, 0);
489 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
490 dma_dst, 0, 0);
492 INIT_COMPLETION(c->dma_done);
493 omap_start_dma(c->dma_channel);
494 wait_for_completion(&c->dma_done);
496 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
498 return 0;
501 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
502 const unsigned char *buffer,
503 int offset, size_t count)
505 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
506 struct onenand_chip *this = mtd->priv;
507 dma_addr_t dma_src, dma_dst;
508 int bram_offset;
510 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
511 /* DMA is not used. Revisit PM requirements before enabling it. */
512 if (1 || (c->dma_channel < 0) ||
513 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
514 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
515 memcpy((__force void *)(this->base + bram_offset), buffer,
516 count);
517 return 0;
520 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
521 DMA_TO_DEVICE);
522 dma_dst = c->phys_base + bram_offset;
523 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
524 dev_err(&c->pdev->dev,
525 "Couldn't DMA map a %d byte buffer\n",
526 count);
527 return -1;
530 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
531 count / 2, 1, 0, 0, 0);
532 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
533 dma_src, 0, 0);
534 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
535 dma_dst, 0, 0);
537 INIT_COMPLETION(c->dma_done);
538 omap_start_dma(c->dma_channel);
539 wait_for_completion(&c->dma_done);
541 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
543 return 0;
546 #else
548 int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
549 unsigned char *buffer, int offset,
550 size_t count);
552 int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
553 const unsigned char *buffer,
554 int offset, size_t count);
556 #endif
558 static struct platform_driver omap2_onenand_driver;
560 static int __adjust_timing(struct device *dev, void *data)
562 int ret = 0;
563 struct omap2_onenand *c;
565 c = dev_get_drvdata(dev);
567 BUG_ON(c->setup == NULL);
569 /* DMA is not in use so this is all that is needed */
570 /* Revisit for OMAP3! */
571 ret = c->setup(c->onenand.base, c->freq);
573 return ret;
576 int omap2_onenand_rephase(void)
578 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
579 NULL, __adjust_timing);
582 static void omap2_onenand_shutdown(struct platform_device *pdev)
584 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
586 /* With certain content in the buffer RAM, the OMAP boot ROM code
587 * can recognize the flash chip incorrectly. Zero it out before
588 * soft reset.
590 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
593 static int __devinit omap2_onenand_probe(struct platform_device *pdev)
595 struct omap_onenand_platform_data *pdata;
596 struct omap2_onenand *c;
597 int r;
599 pdata = pdev->dev.platform_data;
600 if (pdata == NULL) {
601 dev_err(&pdev->dev, "platform data missing\n");
602 return -ENODEV;
605 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
606 if (!c)
607 return -ENOMEM;
609 init_completion(&c->irq_done);
610 init_completion(&c->dma_done);
611 c->gpmc_cs = pdata->cs;
612 c->gpio_irq = pdata->gpio_irq;
613 c->dma_channel = pdata->dma_channel;
614 if (c->dma_channel < 0) {
615 /* if -1, don't use DMA */
616 c->gpio_irq = 0;
619 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
620 if (r < 0) {
621 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
622 goto err_kfree;
625 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
626 pdev->dev.driver->name) == NULL) {
627 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
628 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
629 r = -EBUSY;
630 goto err_free_cs;
632 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
633 if (c->onenand.base == NULL) {
634 r = -ENOMEM;
635 goto err_release_mem_region;
638 if (pdata->onenand_setup != NULL) {
639 r = pdata->onenand_setup(c->onenand.base, c->freq);
640 if (r < 0) {
641 dev_err(&pdev->dev, "Onenand platform setup failed: "
642 "%d\n", r);
643 goto err_iounmap;
645 c->setup = pdata->onenand_setup;
648 if (c->gpio_irq) {
649 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
650 dev_err(&pdev->dev, "Failed to request GPIO%d for "
651 "OneNAND\n", c->gpio_irq);
652 goto err_iounmap;
654 gpio_direction_input(c->gpio_irq);
656 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
657 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
658 pdev->dev.driver->name, c)) < 0)
659 goto err_release_gpio;
662 if (c->dma_channel >= 0) {
663 r = omap_request_dma(0, pdev->dev.driver->name,
664 omap2_onenand_dma_cb, (void *) c,
665 &c->dma_channel);
666 if (r == 0) {
667 omap_set_dma_write_mode(c->dma_channel,
668 OMAP_DMA_WRITE_NON_POSTED);
669 omap_set_dma_src_data_pack(c->dma_channel, 1);
670 omap_set_dma_src_burst_mode(c->dma_channel,
671 OMAP_DMA_DATA_BURST_8);
672 omap_set_dma_dest_data_pack(c->dma_channel, 1);
673 omap_set_dma_dest_burst_mode(c->dma_channel,
674 OMAP_DMA_DATA_BURST_8);
675 } else {
676 dev_info(&pdev->dev,
677 "failed to allocate DMA for OneNAND, "
678 "using PIO instead\n");
679 c->dma_channel = -1;
683 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
684 "base %p\n", c->gpmc_cs, c->phys_base,
685 c->onenand.base);
687 c->pdev = pdev;
688 c->mtd.name = dev_name(&pdev->dev);
689 c->mtd.priv = &c->onenand;
690 c->mtd.owner = THIS_MODULE;
692 c->mtd.dev.parent = &pdev->dev;
694 if (c->dma_channel >= 0) {
695 struct onenand_chip *this = &c->onenand;
697 this->wait = omap2_onenand_wait;
698 if (cpu_is_omap34xx()) {
699 this->read_bufferram = omap3_onenand_read_bufferram;
700 this->write_bufferram = omap3_onenand_write_bufferram;
701 } else {
702 this->read_bufferram = omap2_onenand_read_bufferram;
703 this->write_bufferram = omap2_onenand_write_bufferram;
707 if ((r = onenand_scan(&c->mtd, 1)) < 0)
708 goto err_release_dma;
710 switch ((c->onenand.version_id >> 4) & 0xf) {
711 case 0:
712 c->freq = 40;
713 break;
714 case 1:
715 c->freq = 54;
716 break;
717 case 2:
718 c->freq = 66;
719 break;
720 case 3:
721 c->freq = 83;
722 break;
725 #ifdef CONFIG_MTD_PARTITIONS
726 if (pdata->parts != NULL)
727 r = add_mtd_partitions(&c->mtd, pdata->parts,
728 pdata->nr_parts);
729 else
730 #endif
731 r = add_mtd_device(&c->mtd);
732 if (r < 0)
733 goto err_release_onenand;
735 platform_set_drvdata(pdev, c);
737 return 0;
739 err_release_onenand:
740 onenand_release(&c->mtd);
741 err_release_dma:
742 if (c->dma_channel != -1)
743 omap_free_dma(c->dma_channel);
744 if (c->gpio_irq)
745 free_irq(gpio_to_irq(c->gpio_irq), c);
746 err_release_gpio:
747 if (c->gpio_irq)
748 gpio_free(c->gpio_irq);
749 err_iounmap:
750 iounmap(c->onenand.base);
751 err_release_mem_region:
752 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
753 err_free_cs:
754 gpmc_cs_free(c->gpmc_cs);
755 err_kfree:
756 kfree(c);
758 return r;
761 static int __devexit omap2_onenand_remove(struct platform_device *pdev)
763 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
765 BUG_ON(c == NULL);
767 #ifdef CONFIG_MTD_PARTITIONS
768 if (c->parts)
769 del_mtd_partitions(&c->mtd);
770 else
771 del_mtd_device(&c->mtd);
772 #else
773 del_mtd_device(&c->mtd);
774 #endif
776 onenand_release(&c->mtd);
777 if (c->dma_channel != -1)
778 omap_free_dma(c->dma_channel);
779 omap2_onenand_shutdown(pdev);
780 platform_set_drvdata(pdev, NULL);
781 if (c->gpio_irq) {
782 free_irq(gpio_to_irq(c->gpio_irq), c);
783 gpio_free(c->gpio_irq);
785 iounmap(c->onenand.base);
786 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
787 gpmc_cs_free(c->gpmc_cs);
788 kfree(c);
790 return 0;
793 static struct platform_driver omap2_onenand_driver = {
794 .probe = omap2_onenand_probe,
795 .remove = __devexit_p(omap2_onenand_remove),
796 .shutdown = omap2_onenand_shutdown,
797 .driver = {
798 .name = DRIVER_NAME,
799 .owner = THIS_MODULE,
803 static int __init omap2_onenand_init(void)
805 printk(KERN_INFO "OneNAND driver initializing\n");
806 return platform_driver_register(&omap2_onenand_driver);
809 static void __exit omap2_onenand_exit(void)
811 platform_driver_unregister(&omap2_onenand_driver);
814 module_init(omap2_onenand_init);
815 module_exit(omap2_onenand_exit);
817 MODULE_ALIAS(DRIVER_NAME);
818 MODULE_LICENSE("GPL");
819 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
820 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");