[ARM] pxa: Gumstix Verdex PCMCIA support
[linux-2.6/verdex.git] / drivers / mtd / onenand / omap2.c
blob0108ed42e877b628d773f4ee847bccd47c98bbc7
1 /*
2 * linux/drivers/mtd/onenand/omap2.c
4 * OneNAND driver for OMAP2 / OMAP3
6 * Copyright © 2005-2006 Nokia Corporation
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 #include <linux/device.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/onenand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/platform_device.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/io.h>
38 #include <asm/mach/flash.h>
39 #include <mach/gpmc.h>
40 #include <mach/onenand.h>
41 #include <mach/gpio.h>
43 #include <mach/dma.h>
45 #include <mach/board.h>
47 #define DRIVER_NAME "omap2-onenand"
49 #define ONENAND_IO_SIZE SZ_128K
50 #define ONENAND_BUFRAM_SIZE (1024 * 5)
52 struct omap2_onenand {
53 struct platform_device *pdev;
54 int gpmc_cs;
55 unsigned long phys_base;
56 int gpio_irq;
57 struct mtd_info mtd;
58 struct mtd_partition *parts;
59 struct onenand_chip onenand;
60 struct completion irq_done;
61 struct completion dma_done;
62 int dma_channel;
63 int freq;
64 int (*setup)(void __iomem *base, int freq);
67 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
69 struct omap2_onenand *c = data;
71 complete(&c->dma_done);
74 static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
76 struct omap2_onenand *c = dev_id;
78 complete(&c->irq_done);
80 return IRQ_HANDLED;
83 static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
85 return readw(c->onenand.base + reg);
88 static inline void write_reg(struct omap2_onenand *c, unsigned short value,
89 int reg)
91 writew(value, c->onenand.base + reg);
94 static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
96 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
97 msg, state, ctrl, intr);
100 static void wait_warn(char *msg, int state, unsigned int ctrl,
101 unsigned int intr)
103 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
104 "intr 0x%04x\n", msg, state, ctrl, intr);
107 static int omap2_onenand_wait(struct mtd_info *mtd, int state)
109 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
110 unsigned int intr = 0;
111 unsigned int ctrl;
112 unsigned long timeout;
113 u32 syscfg;
115 if (state == FL_RESETING) {
116 int i;
118 for (i = 0; i < 20; i++) {
119 udelay(1);
120 intr = read_reg(c, ONENAND_REG_INTERRUPT);
121 if (intr & ONENAND_INT_MASTER)
122 break;
124 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
125 if (ctrl & ONENAND_CTRL_ERROR) {
126 wait_err("controller error", state, ctrl, intr);
127 return -EIO;
129 if (!(intr & ONENAND_INT_RESET)) {
130 wait_err("timeout", state, ctrl, intr);
131 return -EIO;
133 return 0;
136 if (state != FL_READING) {
137 int result;
139 /* Turn interrupts on */
140 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
141 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
142 syscfg |= ONENAND_SYS_CFG1_IOBE;
143 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
144 if (cpu_is_omap34xx())
145 /* Add a delay to let GPIO settle */
146 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
149 INIT_COMPLETION(c->irq_done);
150 if (c->gpio_irq) {
151 result = gpio_get_value(c->gpio_irq);
152 if (result == -1) {
153 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
154 intr = read_reg(c, ONENAND_REG_INTERRUPT);
155 wait_err("gpio error", state, ctrl, intr);
156 return -EIO;
158 } else
159 result = 0;
160 if (result == 0) {
161 int retry_cnt = 0;
162 retry:
163 result = wait_for_completion_timeout(&c->irq_done,
164 msecs_to_jiffies(20));
165 if (result == 0) {
166 /* Timeout after 20ms */
167 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
168 if (ctrl & ONENAND_CTRL_ONGO) {
170 * The operation seems to be still going
171 * so give it some more time.
173 retry_cnt += 1;
174 if (retry_cnt < 3)
175 goto retry;
176 intr = read_reg(c,
177 ONENAND_REG_INTERRUPT);
178 wait_err("timeout", state, ctrl, intr);
179 return -EIO;
181 intr = read_reg(c, ONENAND_REG_INTERRUPT);
182 if ((intr & ONENAND_INT_MASTER) == 0)
183 wait_warn("timeout", state, ctrl, intr);
186 } else {
187 int retry_cnt = 0;
189 /* Turn interrupts off */
190 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
191 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
192 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
194 timeout = jiffies + msecs_to_jiffies(20);
195 while (1) {
196 if (time_before(jiffies, timeout)) {
197 intr = read_reg(c, ONENAND_REG_INTERRUPT);
198 if (intr & ONENAND_INT_MASTER)
199 break;
200 } else {
201 /* Timeout after 20ms */
202 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
203 if (ctrl & ONENAND_CTRL_ONGO) {
205 * The operation seems to be still going
206 * so give it some more time.
208 retry_cnt += 1;
209 if (retry_cnt < 3) {
210 timeout = jiffies +
211 msecs_to_jiffies(20);
212 continue;
215 break;
220 intr = read_reg(c, ONENAND_REG_INTERRUPT);
221 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
223 if (intr & ONENAND_INT_READ) {
224 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
226 if (ecc) {
227 unsigned int addr1, addr8;
229 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
230 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
231 if (ecc & ONENAND_ECC_2BIT_ALL) {
232 printk(KERN_ERR "onenand_wait: ECC error = "
233 "0x%04x, addr1 %#x, addr8 %#x\n",
234 ecc, addr1, addr8);
235 mtd->ecc_stats.failed++;
236 return -EBADMSG;
237 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
238 printk(KERN_NOTICE "onenand_wait: correctable "
239 "ECC error = 0x%04x, addr1 %#x, "
240 "addr8 %#x\n", ecc, addr1, addr8);
241 mtd->ecc_stats.corrected++;
244 } else if (state == FL_READING) {
245 wait_err("timeout", state, ctrl, intr);
246 return -EIO;
249 if (ctrl & ONENAND_CTRL_ERROR) {
250 wait_err("controller error", state, ctrl, intr);
251 if (ctrl & ONENAND_CTRL_LOCK)
252 printk(KERN_ERR "onenand_wait: "
253 "Device is write protected!!!\n");
254 return -EIO;
257 if (ctrl & 0xFE9F)
258 wait_warn("unexpected controller status", state, ctrl, intr);
260 return 0;
263 static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
265 struct onenand_chip *this = mtd->priv;
267 if (ONENAND_CURRENT_BUFFERRAM(this)) {
268 if (area == ONENAND_DATARAM)
269 return this->writesize;
270 if (area == ONENAND_SPARERAM)
271 return mtd->oobsize;
274 return 0;
277 #if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
279 static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
280 unsigned char *buffer, int offset,
281 size_t count)
283 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
284 struct onenand_chip *this = mtd->priv;
285 dma_addr_t dma_src, dma_dst;
286 int bram_offset;
287 unsigned long timeout;
288 void *buf = (void *)buffer;
289 size_t xtra;
290 volatile unsigned *done;
292 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
293 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
294 goto out_copy;
296 /* panic_write() may be in an interrupt context */
297 if (in_interrupt())
298 goto out_copy;
300 if (buf >= high_memory) {
301 struct page *p1;
303 if (((size_t)buf & PAGE_MASK) !=
304 ((size_t)(buf + count - 1) & PAGE_MASK))
305 goto out_copy;
306 p1 = vmalloc_to_page(buf);
307 if (!p1)
308 goto out_copy;
309 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
312 xtra = count & 3;
313 if (xtra) {
314 count -= xtra;
315 memcpy(buf + count, this->base + bram_offset + count, xtra);
318 dma_src = c->phys_base + bram_offset;
319 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
320 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
321 dev_err(&c->pdev->dev,
322 "Couldn't DMA map a %d byte buffer\n",
323 count);
324 goto out_copy;
327 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
328 count >> 2, 1, 0, 0, 0);
329 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
330 dma_src, 0, 0);
331 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
332 dma_dst, 0, 0);
334 INIT_COMPLETION(c->dma_done);
335 omap_start_dma(c->dma_channel);
337 timeout = jiffies + msecs_to_jiffies(20);
338 done = &c->dma_done.done;
339 while (time_before(jiffies, timeout))
340 if (*done)
341 break;
343 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
345 if (!*done) {
346 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
347 goto out_copy;
350 return 0;
352 out_copy:
353 memcpy(buf, this->base + bram_offset, count);
354 return 0;
357 static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
358 const unsigned char *buffer,
359 int offset, size_t count)
361 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
362 struct onenand_chip *this = mtd->priv;
363 dma_addr_t dma_src, dma_dst;
364 int bram_offset;
365 unsigned long timeout;
366 void *buf = (void *)buffer;
367 volatile unsigned *done;
369 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
370 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
371 goto out_copy;
373 /* panic_write() may be in an interrupt context */
374 if (in_interrupt())
375 goto out_copy;
377 if (buf >= high_memory) {
378 struct page *p1;
380 if (((size_t)buf & PAGE_MASK) !=
381 ((size_t)(buf + count - 1) & PAGE_MASK))
382 goto out_copy;
383 p1 = vmalloc_to_page(buf);
384 if (!p1)
385 goto out_copy;
386 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
389 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
390 dma_dst = c->phys_base + bram_offset;
391 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
392 dev_err(&c->pdev->dev,
393 "Couldn't DMA map a %d byte buffer\n",
394 count);
395 return -1;
398 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
399 count >> 2, 1, 0, 0, 0);
400 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
401 dma_src, 0, 0);
402 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
403 dma_dst, 0, 0);
405 INIT_COMPLETION(c->dma_done);
406 omap_start_dma(c->dma_channel);
408 timeout = jiffies + msecs_to_jiffies(20);
409 done = &c->dma_done.done;
410 while (time_before(jiffies, timeout))
411 if (*done)
412 break;
414 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
416 if (!*done) {
417 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
418 goto out_copy;
421 return 0;
423 out_copy:
424 memcpy(this->base + bram_offset, buf, count);
425 return 0;
428 #else
430 int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
431 unsigned char *buffer, int offset,
432 size_t count);
434 int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
435 const unsigned char *buffer,
436 int offset, size_t count);
438 #endif
440 #if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
442 static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
443 unsigned char *buffer, int offset,
444 size_t count)
446 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
447 struct onenand_chip *this = mtd->priv;
448 dma_addr_t dma_src, dma_dst;
449 int bram_offset;
451 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
452 /* DMA is not used. Revisit PM requirements before enabling it. */
453 if (1 || (c->dma_channel < 0) ||
454 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
455 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
456 memcpy(buffer, (__force void *)(this->base + bram_offset),
457 count);
458 return 0;
461 dma_src = c->phys_base + bram_offset;
462 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
463 DMA_FROM_DEVICE);
464 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
465 dev_err(&c->pdev->dev,
466 "Couldn't DMA map a %d byte buffer\n",
467 count);
468 return -1;
471 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
472 count / 4, 1, 0, 0, 0);
473 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
474 dma_src, 0, 0);
475 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
476 dma_dst, 0, 0);
478 INIT_COMPLETION(c->dma_done);
479 omap_start_dma(c->dma_channel);
480 wait_for_completion(&c->dma_done);
482 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
484 return 0;
487 static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
488 const unsigned char *buffer,
489 int offset, size_t count)
491 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
492 struct onenand_chip *this = mtd->priv;
493 dma_addr_t dma_src, dma_dst;
494 int bram_offset;
496 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
497 /* DMA is not used. Revisit PM requirements before enabling it. */
498 if (1 || (c->dma_channel < 0) ||
499 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
500 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
501 memcpy((__force void *)(this->base + bram_offset), buffer,
502 count);
503 return 0;
506 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
507 DMA_TO_DEVICE);
508 dma_dst = c->phys_base + bram_offset;
509 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
510 dev_err(&c->pdev->dev,
511 "Couldn't DMA map a %d byte buffer\n",
512 count);
513 return -1;
516 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
517 count / 2, 1, 0, 0, 0);
518 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
519 dma_src, 0, 0);
520 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
521 dma_dst, 0, 0);
523 INIT_COMPLETION(c->dma_done);
524 omap_start_dma(c->dma_channel);
525 wait_for_completion(&c->dma_done);
527 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
529 return 0;
532 #else
534 int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
535 unsigned char *buffer, int offset,
536 size_t count);
538 int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
539 const unsigned char *buffer,
540 int offset, size_t count);
542 #endif
544 static struct platform_driver omap2_onenand_driver;
546 static int __adjust_timing(struct device *dev, void *data)
548 int ret = 0;
549 struct omap2_onenand *c;
551 c = dev_get_drvdata(dev);
553 BUG_ON(c->setup == NULL);
555 /* DMA is not in use so this is all that is needed */
556 /* Revisit for OMAP3! */
557 ret = c->setup(c->onenand.base, c->freq);
559 return ret;
562 int omap2_onenand_rephase(void)
564 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
565 NULL, __adjust_timing);
568 static void omap2_onenand_shutdown(struct platform_device *pdev)
570 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
572 /* With certain content in the buffer RAM, the OMAP boot ROM code
573 * can recognize the flash chip incorrectly. Zero it out before
574 * soft reset.
576 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
579 static int __devinit omap2_onenand_probe(struct platform_device *pdev)
581 struct omap_onenand_platform_data *pdata;
582 struct omap2_onenand *c;
583 int r;
585 pdata = pdev->dev.platform_data;
586 if (pdata == NULL) {
587 dev_err(&pdev->dev, "platform data missing\n");
588 return -ENODEV;
591 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
592 if (!c)
593 return -ENOMEM;
595 init_completion(&c->irq_done);
596 init_completion(&c->dma_done);
597 c->gpmc_cs = pdata->cs;
598 c->gpio_irq = pdata->gpio_irq;
599 c->dma_channel = pdata->dma_channel;
600 if (c->dma_channel < 0) {
601 /* if -1, don't use DMA */
602 c->gpio_irq = 0;
605 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
606 if (r < 0) {
607 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
608 goto err_kfree;
611 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
612 pdev->dev.driver->name) == NULL) {
613 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
614 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
615 r = -EBUSY;
616 goto err_free_cs;
618 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
619 if (c->onenand.base == NULL) {
620 r = -ENOMEM;
621 goto err_release_mem_region;
624 if (pdata->onenand_setup != NULL) {
625 r = pdata->onenand_setup(c->onenand.base, c->freq);
626 if (r < 0) {
627 dev_err(&pdev->dev, "Onenand platform setup failed: "
628 "%d\n", r);
629 goto err_iounmap;
631 c->setup = pdata->onenand_setup;
634 if (c->gpio_irq) {
635 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
636 dev_err(&pdev->dev, "Failed to request GPIO%d for "
637 "OneNAND\n", c->gpio_irq);
638 goto err_iounmap;
640 gpio_direction_input(c->gpio_irq);
642 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
643 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
644 pdev->dev.driver->name, c)) < 0)
645 goto err_release_gpio;
648 if (c->dma_channel >= 0) {
649 r = omap_request_dma(0, pdev->dev.driver->name,
650 omap2_onenand_dma_cb, (void *) c,
651 &c->dma_channel);
652 if (r == 0) {
653 omap_set_dma_write_mode(c->dma_channel,
654 OMAP_DMA_WRITE_NON_POSTED);
655 omap_set_dma_src_data_pack(c->dma_channel, 1);
656 omap_set_dma_src_burst_mode(c->dma_channel,
657 OMAP_DMA_DATA_BURST_8);
658 omap_set_dma_dest_data_pack(c->dma_channel, 1);
659 omap_set_dma_dest_burst_mode(c->dma_channel,
660 OMAP_DMA_DATA_BURST_8);
661 } else {
662 dev_info(&pdev->dev,
663 "failed to allocate DMA for OneNAND, "
664 "using PIO instead\n");
665 c->dma_channel = -1;
669 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
670 "base %p\n", c->gpmc_cs, c->phys_base,
671 c->onenand.base);
673 c->pdev = pdev;
674 c->mtd.name = dev_name(&pdev->dev);
675 c->mtd.priv = &c->onenand;
676 c->mtd.owner = THIS_MODULE;
678 c->mtd.dev.parent = &pdev->dev;
680 if (c->dma_channel >= 0) {
681 struct onenand_chip *this = &c->onenand;
683 this->wait = omap2_onenand_wait;
684 if (cpu_is_omap34xx()) {
685 this->read_bufferram = omap3_onenand_read_bufferram;
686 this->write_bufferram = omap3_onenand_write_bufferram;
687 } else {
688 this->read_bufferram = omap2_onenand_read_bufferram;
689 this->write_bufferram = omap2_onenand_write_bufferram;
693 if ((r = onenand_scan(&c->mtd, 1)) < 0)
694 goto err_release_dma;
696 switch ((c->onenand.version_id >> 4) & 0xf) {
697 case 0:
698 c->freq = 40;
699 break;
700 case 1:
701 c->freq = 54;
702 break;
703 case 2:
704 c->freq = 66;
705 break;
706 case 3:
707 c->freq = 83;
708 break;
711 #ifdef CONFIG_MTD_PARTITIONS
712 if (pdata->parts != NULL)
713 r = add_mtd_partitions(&c->mtd, pdata->parts,
714 pdata->nr_parts);
715 else
716 #endif
717 r = add_mtd_device(&c->mtd);
718 if (r < 0)
719 goto err_release_onenand;
721 platform_set_drvdata(pdev, c);
723 return 0;
725 err_release_onenand:
726 onenand_release(&c->mtd);
727 err_release_dma:
728 if (c->dma_channel != -1)
729 omap_free_dma(c->dma_channel);
730 if (c->gpio_irq)
731 free_irq(gpio_to_irq(c->gpio_irq), c);
732 err_release_gpio:
733 if (c->gpio_irq)
734 gpio_free(c->gpio_irq);
735 err_iounmap:
736 iounmap(c->onenand.base);
737 err_release_mem_region:
738 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
739 err_free_cs:
740 gpmc_cs_free(c->gpmc_cs);
741 err_kfree:
742 kfree(c);
744 return r;
747 static int __devexit omap2_onenand_remove(struct platform_device *pdev)
749 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
751 BUG_ON(c == NULL);
753 #ifdef CONFIG_MTD_PARTITIONS
754 if (c->parts)
755 del_mtd_partitions(&c->mtd);
756 else
757 del_mtd_device(&c->mtd);
758 #else
759 del_mtd_device(&c->mtd);
760 #endif
762 onenand_release(&c->mtd);
763 if (c->dma_channel != -1)
764 omap_free_dma(c->dma_channel);
765 omap2_onenand_shutdown(pdev);
766 platform_set_drvdata(pdev, NULL);
767 if (c->gpio_irq) {
768 free_irq(gpio_to_irq(c->gpio_irq), c);
769 gpio_free(c->gpio_irq);
771 iounmap(c->onenand.base);
772 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
773 gpmc_cs_free(c->gpmc_cs);
774 kfree(c);
776 return 0;
779 static struct platform_driver omap2_onenand_driver = {
780 .probe = omap2_onenand_probe,
781 .remove = __devexit_p(omap2_onenand_remove),
782 .shutdown = omap2_onenand_shutdown,
783 .driver = {
784 .name = DRIVER_NAME,
785 .owner = THIS_MODULE,
789 static int __init omap2_onenand_init(void)
791 printk(KERN_INFO "OneNAND driver initializing\n");
792 return platform_driver_register(&omap2_onenand_driver);
795 static void __exit omap2_onenand_exit(void)
797 platform_driver_unregister(&omap2_onenand_driver);
800 module_init(omap2_onenand_init);
801 module_exit(omap2_onenand_exit);
803 MODULE_ALIAS(DRIVER_NAME);
804 MODULE_LICENSE("GPL");
805 MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
806 MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");