2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
28 #include <asm/mach-types.h>
29 #include <plat/gpmc.h>
31 #include <plat/sdrc.h>
33 /* GPMC register offsets */
34 #define GPMC_REVISION 0x00
35 #define GPMC_SYSCONFIG 0x10
36 #define GPMC_SYSSTATUS 0x14
37 #define GPMC_IRQSTATUS 0x18
38 #define GPMC_IRQENABLE 0x1c
39 #define GPMC_TIMEOUT_CONTROL 0x40
40 #define GPMC_ERR_ADDRESS 0x44
41 #define GPMC_ERR_TYPE 0x48
42 #define GPMC_CONFIG 0x50
43 #define GPMC_STATUS 0x54
44 #define GPMC_PREFETCH_CONFIG1 0x1e0
45 #define GPMC_PREFETCH_CONFIG2 0x1e4
46 #define GPMC_PREFETCH_CONTROL 0x1ec
47 #define GPMC_PREFETCH_STATUS 0x1f0
48 #define GPMC_ECC_CONFIG 0x1f4
49 #define GPMC_ECC_CONTROL 0x1f8
50 #define GPMC_ECC_SIZE_CONFIG 0x1fc
51 #define GPMC_ECC1_RESULT 0x200
53 #define GPMC_CS0_OFFSET 0x60
54 #define GPMC_CS_SIZE 0x30
56 #define GPMC_MEM_START 0x00000000
57 #define GPMC_MEM_END 0x3FFFFFFF
58 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
60 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
61 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
63 #define CS_NUM_SHIFT 24
64 #define ENABLE_PREFETCH (0x1 << 7)
65 #define DMA_MPU_MODE 2
67 /* Structure to save gpmc cs context */
68 struct gpmc_cs_config
{
80 * Structure to save/restore gpmc context
81 * to support core off on OMAP3
83 struct omap3_gpmc_regs
{
91 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
94 static struct resource gpmc_mem_root
;
95 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
96 static DEFINE_SPINLOCK(gpmc_mem_lock
);
97 static unsigned int gpmc_cs_map
; /* flag for cs which are initialized */
98 static int gpmc_ecc_used
= -EINVAL
; /* cs using ecc engine */
100 static void __iomem
*gpmc_base
;
102 static struct clk
*gpmc_l3_clk
;
104 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
);
106 static void gpmc_write_reg(int idx
, u32 val
)
108 __raw_writel(val
, gpmc_base
+ idx
);
111 static u32
gpmc_read_reg(int idx
)
113 return __raw_readl(gpmc_base
+ idx
);
116 static void gpmc_cs_write_byte(int cs
, int idx
, u8 val
)
118 void __iomem
*reg_addr
;
120 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
121 __raw_writeb(val
, reg_addr
);
124 static u8
gpmc_cs_read_byte(int cs
, int idx
)
126 void __iomem
*reg_addr
;
128 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
129 return __raw_readb(reg_addr
);
132 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
134 void __iomem
*reg_addr
;
136 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
137 __raw_writel(val
, reg_addr
);
140 u32
gpmc_cs_read_reg(int cs
, int idx
)
142 void __iomem
*reg_addr
;
144 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
145 return __raw_readl(reg_addr
);
148 /* TODO: Add support for gpmc_fck to clock framework and use it */
149 unsigned long gpmc_get_fclk_period(void)
151 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
154 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
159 rate
= 1000000000 / rate
; /* In picoseconds */
164 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
166 unsigned long tick_ps
;
168 /* Calculate in picosecs to yield more exact results */
169 tick_ps
= gpmc_get_fclk_period();
171 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
174 unsigned int gpmc_ps_to_ticks(unsigned int time_ps
)
176 unsigned long tick_ps
;
178 /* Calculate in picosecs to yield more exact results */
179 tick_ps
= gpmc_get_fclk_period();
181 return (time_ps
+ tick_ps
- 1) / tick_ps
;
184 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
186 return ticks
* gpmc_get_fclk_period() / 1000;
189 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
191 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
193 return ticks
* gpmc_get_fclk_period() / 1000;
197 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
198 int time
, const char *name
)
200 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
205 int ticks
, mask
, nr_bits
;
210 ticks
= gpmc_ns_to_ticks(time
);
211 nr_bits
= end_bit
- st_bit
+ 1;
212 if (ticks
>= 1 << nr_bits
) {
214 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
215 cs
, name
, time
, ticks
, 1 << nr_bits
);
220 mask
= (1 << nr_bits
) - 1;
221 l
= gpmc_cs_read_reg(cs
, reg
);
224 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
225 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
226 (l
>> st_bit
) & mask
, time
);
228 l
&= ~(mask
<< st_bit
);
229 l
|= ticks
<< st_bit
;
230 gpmc_cs_write_reg(cs
, reg
, l
);
236 #define GPMC_SET_ONE(reg, st, end, field) \
237 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
238 t->field, #field) < 0) \
241 #define GPMC_SET_ONE(reg, st, end, field) \
242 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
246 int gpmc_cs_calc_divider(int cs
, unsigned int sync_clk
)
251 l
= sync_clk
+ (gpmc_get_fclk_period() - 1);
252 div
= l
/ gpmc_get_fclk_period();
261 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
266 div
= gpmc_cs_calc_divider(cs
, t
->sync_clk
);
270 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
271 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
272 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
274 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
275 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
276 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
278 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
279 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
280 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
281 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
283 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
284 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
285 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
287 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
289 if (cpu_is_omap34xx()) {
290 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
291 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
294 /* caller is expected to have initialized CONFIG1 to cover
295 * at least sync vs async
297 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
298 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
300 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
301 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
305 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
311 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
316 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
317 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
319 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
321 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
322 l
|= GPMC_CONFIG7_CSVALID
;
323 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
326 static void gpmc_cs_disable_mem(int cs
)
330 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
331 l
&= ~GPMC_CONFIG7_CSVALID
;
332 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
335 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
340 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
341 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
342 mask
= (l
>> 8) & 0x0f;
343 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
346 static int gpmc_cs_mem_enabled(int cs
)
350 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
351 return l
& GPMC_CONFIG7_CSVALID
;
354 int gpmc_cs_set_reserved(int cs
, int reserved
)
356 if (cs
> GPMC_CS_NUM
)
359 gpmc_cs_map
&= ~(1 << cs
);
360 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
365 int gpmc_cs_reserved(int cs
)
367 if (cs
> GPMC_CS_NUM
)
370 return gpmc_cs_map
& (1 << cs
);
373 static unsigned long gpmc_mem_align(unsigned long size
)
377 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
378 order
= GPMC_CHUNK_SHIFT
- 1;
387 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
389 struct resource
*res
= &gpmc_cs_mem
[cs
];
392 size
= gpmc_mem_align(size
);
393 spin_lock(&gpmc_mem_lock
);
395 res
->end
= base
+ size
- 1;
396 r
= request_resource(&gpmc_mem_root
, res
);
397 spin_unlock(&gpmc_mem_lock
);
402 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
404 struct resource
*res
= &gpmc_cs_mem
[cs
];
407 if (cs
> GPMC_CS_NUM
)
410 size
= gpmc_mem_align(size
);
411 if (size
> (1 << GPMC_SECTION_SHIFT
))
414 spin_lock(&gpmc_mem_lock
);
415 if (gpmc_cs_reserved(cs
)) {
419 if (gpmc_cs_mem_enabled(cs
))
420 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
422 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
427 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
429 gpmc_cs_set_reserved(cs
, 1);
431 spin_unlock(&gpmc_mem_lock
);
434 EXPORT_SYMBOL(gpmc_cs_request
);
436 void gpmc_cs_free(int cs
)
438 spin_lock(&gpmc_mem_lock
);
439 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
440 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
442 spin_unlock(&gpmc_mem_lock
);
445 gpmc_cs_disable_mem(cs
);
446 release_resource(&gpmc_cs_mem
[cs
]);
447 gpmc_cs_set_reserved(cs
, 0);
448 spin_unlock(&gpmc_mem_lock
);
450 EXPORT_SYMBOL(gpmc_cs_free
);
453 * gpmc_read_status - read access request to get the different gpmc status
457 int gpmc_read_status(int cmd
)
459 int status
= -EINVAL
;
463 case GPMC_GET_IRQ_STATUS
:
464 status
= gpmc_read_reg(GPMC_IRQSTATUS
);
467 case GPMC_PREFETCH_FIFO_CNT
:
468 regval
= gpmc_read_reg(GPMC_PREFETCH_STATUS
);
469 status
= GPMC_PREFETCH_STATUS_FIFO_CNT(regval
);
472 case GPMC_PREFETCH_COUNT
:
473 regval
= gpmc_read_reg(GPMC_PREFETCH_STATUS
);
474 status
= GPMC_PREFETCH_STATUS_COUNT(regval
);
477 case GPMC_STATUS_BUFFER
:
478 regval
= gpmc_read_reg(GPMC_STATUS
);
479 /* 1 : buffer is available to write */
480 status
= regval
& GPMC_STATUS_BUFF_EMPTY
;
484 printk(KERN_ERR
"gpmc_read_status: Not supported\n");
488 EXPORT_SYMBOL(gpmc_read_status
);
491 * gpmc_cs_configure - write request to configure gpmc
492 * @cs: chip select number
494 * @wval: value to write
495 * @return status of the operation
497 int gpmc_cs_configure(int cs
, int cmd
, int wval
)
503 case GPMC_ENABLE_IRQ
:
504 gpmc_write_reg(GPMC_IRQENABLE
, wval
);
507 case GPMC_SET_IRQ_STATUS
:
508 gpmc_write_reg(GPMC_IRQSTATUS
, wval
);
512 regval
= gpmc_read_reg(GPMC_CONFIG
);
514 regval
&= ~GPMC_CONFIG_WRITEPROTECT
; /* WP is ON */
516 regval
|= GPMC_CONFIG_WRITEPROTECT
; /* WP is OFF */
517 gpmc_write_reg(GPMC_CONFIG
, regval
);
520 case GPMC_CONFIG_RDY_BSY
:
521 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
523 regval
|= WR_RD_PIN_MONITORING
;
525 regval
&= ~WR_RD_PIN_MONITORING
;
526 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
529 case GPMC_CONFIG_DEV_SIZE
:
530 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
531 regval
|= GPMC_CONFIG1_DEVICESIZE(wval
);
532 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
535 case GPMC_CONFIG_DEV_TYPE
:
536 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
537 regval
|= GPMC_CONFIG1_DEVICETYPE(wval
);
538 if (wval
== GPMC_DEVICETYPE_NOR
)
539 regval
|= GPMC_CONFIG1_MUXADDDATA
;
540 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
544 printk(KERN_ERR
"gpmc_configure_cs: Not supported\n");
550 EXPORT_SYMBOL(gpmc_cs_configure
);
553 * gpmc_nand_read - nand specific read access request
554 * @cs: chip select number
557 int gpmc_nand_read(int cs
, int cmd
)
563 rval
= gpmc_cs_read_byte(cs
, GPMC_CS_NAND_DATA
);
567 printk(KERN_ERR
"gpmc_read_nand_ctrl: Not supported\n");
571 EXPORT_SYMBOL(gpmc_nand_read
);
574 * gpmc_nand_write - nand specific write request
575 * @cs: chip select number
577 * @wval: value to write
579 int gpmc_nand_write(int cs
, int cmd
, int wval
)
584 case GPMC_NAND_COMMAND
:
585 gpmc_cs_write_byte(cs
, GPMC_CS_NAND_COMMAND
, wval
);
588 case GPMC_NAND_ADDRESS
:
589 gpmc_cs_write_byte(cs
, GPMC_CS_NAND_ADDRESS
, wval
);
593 gpmc_cs_write_byte(cs
, GPMC_CS_NAND_DATA
, wval
);
596 printk(KERN_ERR
"gpmc_write_nand_ctrl: Not supported\n");
601 EXPORT_SYMBOL(gpmc_nand_write
);
606 * gpmc_prefetch_enable - configures and starts prefetch transfer
607 * @cs: cs (chip select) number
608 * @fifo_th: fifo threshold to be used for read/ write
609 * @dma_mode: dma mode enable (1) or disable (0)
610 * @u32_count: number of bytes to be transferred
611 * @is_write: prefetch read(0) or write post(1) mode
613 int gpmc_prefetch_enable(int cs
, int fifo_th
, int dma_mode
,
614 unsigned int u32_count
, int is_write
)
617 if (fifo_th
> PREFETCH_FIFOTHRESHOLD_MAX
) {
618 pr_err("gpmc: fifo threshold is not supported\n");
620 } else if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL
))) {
621 /* Set the amount of bytes to be prefetched */
622 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, u32_count
);
624 /* Set dma/mpu mode, the prefetch read / post write and
625 * enable the engine. Set which cs is has requested for.
627 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, ((cs
<< CS_NUM_SHIFT
) |
628 PREFETCH_FIFOTHRESHOLD(fifo_th
) |
630 (dma_mode
<< DMA_MPU_MODE
) |
633 /* Start the prefetch engine */
634 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, 0x1);
641 EXPORT_SYMBOL(gpmc_prefetch_enable
);
644 * gpmc_prefetch_reset - disables and stops the prefetch engine
646 int gpmc_prefetch_reset(int cs
)
650 /* check if the same module/cs is trying to reset */
651 config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
652 if (((config1
>> CS_NUM_SHIFT
) & 0x7) != cs
)
655 /* Stop the PFPW engine */
656 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, 0x0);
658 /* Reset/disable the PFPW engine */
659 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, 0x0);
663 EXPORT_SYMBOL(gpmc_prefetch_reset
);
665 static void __init
gpmc_mem_init(void)
668 unsigned long boot_rom_space
= 0;
670 /* never allocate the first page, to facilitate bug detection;
671 * even if we didn't boot from ROM.
673 boot_rom_space
= BOOT_ROM_SPACE
;
674 /* In apollon the CS0 is mapped as 0x0000 0000 */
675 if (machine_is_omap_apollon())
677 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
678 gpmc_mem_root
.end
= GPMC_MEM_END
;
680 /* Reserve all regions that has been set up by bootloader */
681 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
684 if (!gpmc_cs_mem_enabled(cs
))
686 gpmc_cs_get_memconf(cs
, &base
, &size
);
687 if (gpmc_cs_insert_mem(cs
, base
, size
) < 0)
692 static int __init
gpmc_init(void)
695 int cs
, ret
= -EINVAL
;
699 if (cpu_is_omap24xx()) {
701 if (cpu_is_omap2420())
702 l
= OMAP2420_GPMC_BASE
;
704 l
= OMAP34XX_GPMC_BASE
;
705 gpmc_irq
= INT_34XX_GPMC_IRQ
;
706 } else if (cpu_is_omap34xx()) {
708 l
= OMAP34XX_GPMC_BASE
;
709 gpmc_irq
= INT_34XX_GPMC_IRQ
;
710 } else if (cpu_is_omap44xx()) {
712 l
= OMAP44XX_GPMC_BASE
;
713 gpmc_irq
= OMAP44XX_IRQ_GPMC
;
719 gpmc_l3_clk
= clk_get(NULL
, ck
);
720 if (IS_ERR(gpmc_l3_clk
)) {
721 printk(KERN_ERR
"Could not get GPMC clock %s\n", ck
);
725 gpmc_base
= ioremap(l
, SZ_4K
);
727 clk_put(gpmc_l3_clk
);
728 printk(KERN_ERR
"Could not get GPMC register memory\n");
732 clk_enable(gpmc_l3_clk
);
734 l
= gpmc_read_reg(GPMC_REVISION
);
735 printk(KERN_INFO
"GPMC revision %d.%d\n", (l
>> 4) & 0x0f, l
& 0x0f);
736 /* Set smart idle mode and automatic L3 clock gating */
737 l
= gpmc_read_reg(GPMC_SYSCONFIG
);
739 l
|= (0x02 << 3) | (1 << 0);
740 gpmc_write_reg(GPMC_SYSCONFIG
, l
);
743 /* initalize the irq_chained */
744 irq
= OMAP_GPMC_IRQ_BASE
;
745 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
746 irq_set_chip_and_handler(irq
, &dummy_irq_chip
,
748 set_irq_flags(irq
, IRQF_VALID
);
752 ret
= request_irq(gpmc_irq
,
753 gpmc_handle_irq
, IRQF_SHARED
, "gpmc", gpmc_base
);
755 pr_err("gpmc: irq-%d could not claim: err %d\n",
759 postcore_initcall(gpmc_init
);
761 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
)
765 /* check cs to invoke the irq */
766 cs
= ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1
)) >> CS_NUM_SHIFT
) & 0x7;
767 if (OMAP_GPMC_IRQ_BASE
+cs
<= OMAP_GPMC_IRQ_END
)
768 generic_handle_irq(OMAP_GPMC_IRQ_BASE
+cs
);
773 #ifdef CONFIG_ARCH_OMAP3
774 static struct omap3_gpmc_regs gpmc_context
;
776 void omap3_gpmc_save_context(void)
780 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
781 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
782 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
783 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
784 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
785 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
786 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
787 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
788 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
789 if (gpmc_context
.cs_context
[i
].is_valid
) {
790 gpmc_context
.cs_context
[i
].config1
=
791 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
792 gpmc_context
.cs_context
[i
].config2
=
793 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
794 gpmc_context
.cs_context
[i
].config3
=
795 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
796 gpmc_context
.cs_context
[i
].config4
=
797 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
798 gpmc_context
.cs_context
[i
].config5
=
799 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
800 gpmc_context
.cs_context
[i
].config6
=
801 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
802 gpmc_context
.cs_context
[i
].config7
=
803 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
808 void omap3_gpmc_restore_context(void)
812 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
813 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
814 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
815 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
816 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
817 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
818 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
819 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
820 if (gpmc_context
.cs_context
[i
].is_valid
) {
821 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
822 gpmc_context
.cs_context
[i
].config1
);
823 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
824 gpmc_context
.cs_context
[i
].config2
);
825 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
826 gpmc_context
.cs_context
[i
].config3
);
827 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
828 gpmc_context
.cs_context
[i
].config4
);
829 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
830 gpmc_context
.cs_context
[i
].config5
);
831 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
832 gpmc_context
.cs_context
[i
].config6
);
833 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
834 gpmc_context
.cs_context
[i
].config7
);
838 #endif /* CONFIG_ARCH_OMAP3 */
841 * gpmc_enable_hwecc - enable hardware ecc functionality
842 * @cs: chip select number
843 * @mode: read/write mode
844 * @dev_width: device bus width(1 for x16, 0 for x8)
845 * @ecc_size: bytes for which ECC will be generated
847 int gpmc_enable_hwecc(int cs
, int mode
, int dev_width
, int ecc_size
)
851 /* check if ecc module is in used */
852 if (gpmc_ecc_used
!= -EINVAL
)
857 /* clear ecc and enable bits */
858 val
= ((0x00000001<<8) | 0x00000001);
859 gpmc_write_reg(GPMC_ECC_CONTROL
, val
);
861 /* program ecc and result sizes */
862 val
= ((((ecc_size
>> 1) - 1) << 22) | (0x0000000F));
863 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG
, val
);
867 gpmc_write_reg(GPMC_ECC_CONTROL
, 0x101);
869 case GPMC_ECC_READSYN
:
870 gpmc_write_reg(GPMC_ECC_CONTROL
, 0x100);
873 gpmc_write_reg(GPMC_ECC_CONTROL
, 0x101);
876 printk(KERN_INFO
"Error: Unrecognized Mode[%d]!\n", mode
);
880 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
881 val
= (dev_width
<< 7) | (cs
<< 1) | (0x1);
882 gpmc_write_reg(GPMC_ECC_CONFIG
, val
);
887 * gpmc_calculate_ecc - generate non-inverted ecc bytes
888 * @cs: chip select number
889 * @dat: data pointer over which ecc is computed
890 * @ecc_code: ecc code buffer
892 * Using non-inverted ECC is considered ugly since writing a blank
893 * page (padding) will clear the ECC bytes. This is not a problem as long
894 * no one is trying to write data on the seemingly unused page. Reading
895 * an erased page will produce an ECC mismatch between generated and read
896 * ECC bytes that has to be dealt with separately.
898 int gpmc_calculate_ecc(int cs
, const u_char
*dat
, u_char
*ecc_code
)
900 unsigned int val
= 0x0;
902 if (gpmc_ecc_used
!= cs
)
905 /* read ecc result */
906 val
= gpmc_read_reg(GPMC_ECC1_RESULT
);
907 *ecc_code
++ = val
; /* P128e, ..., P1e */
908 *ecc_code
++ = val
>> 16; /* P128o, ..., P1o */
909 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
910 *ecc_code
++ = ((val
>> 8) & 0x0f) | ((val
>> 20) & 0xf0);
912 gpmc_ecc_used
= -EINVAL
;