2 * GPMC support functions
4 * Copyright (C) 2005-2006 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/err.h>
21 #include <linux/clk.h>
22 #include <linux/ioport.h>
23 #include <linux/spinlock.h>
25 #include <linux/module.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
29 #include <linux/platform_data/mtd-nand-omap2.h>
31 #include <asm/mach-types.h>
35 #include "omap_device.h"
38 #define DEVICE_NAME "omap-gpmc"
40 /* GPMC register offsets */
41 #define GPMC_REVISION 0x00
42 #define GPMC_SYSCONFIG 0x10
43 #define GPMC_SYSSTATUS 0x14
44 #define GPMC_IRQSTATUS 0x18
45 #define GPMC_IRQENABLE 0x1c
46 #define GPMC_TIMEOUT_CONTROL 0x40
47 #define GPMC_ERR_ADDRESS 0x44
48 #define GPMC_ERR_TYPE 0x48
49 #define GPMC_CONFIG 0x50
50 #define GPMC_STATUS 0x54
51 #define GPMC_PREFETCH_CONFIG1 0x1e0
52 #define GPMC_PREFETCH_CONFIG2 0x1e4
53 #define GPMC_PREFETCH_CONTROL 0x1ec
54 #define GPMC_PREFETCH_STATUS 0x1f0
55 #define GPMC_ECC_CONFIG 0x1f4
56 #define GPMC_ECC_CONTROL 0x1f8
57 #define GPMC_ECC_SIZE_CONFIG 0x1fc
58 #define GPMC_ECC1_RESULT 0x200
59 #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
60 #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
61 #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
62 #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
64 /* GPMC ECC control settings */
65 #define GPMC_ECC_CTRL_ECCCLEAR 0x100
66 #define GPMC_ECC_CTRL_ECCDISABLE 0x000
67 #define GPMC_ECC_CTRL_ECCREG1 0x001
68 #define GPMC_ECC_CTRL_ECCREG2 0x002
69 #define GPMC_ECC_CTRL_ECCREG3 0x003
70 #define GPMC_ECC_CTRL_ECCREG4 0x004
71 #define GPMC_ECC_CTRL_ECCREG5 0x005
72 #define GPMC_ECC_CTRL_ECCREG6 0x006
73 #define GPMC_ECC_CTRL_ECCREG7 0x007
74 #define GPMC_ECC_CTRL_ECCREG8 0x008
75 #define GPMC_ECC_CTRL_ECCREG9 0x009
77 #define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
78 #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
79 #define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
80 #define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
81 #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
82 #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
84 #define GPMC_CS0_OFFSET 0x60
85 #define GPMC_CS_SIZE 0x30
86 #define GPMC_BCH_SIZE 0x10
88 #define GPMC_MEM_START 0x00000000
89 #define GPMC_MEM_END 0x3FFFFFFF
90 #define BOOT_ROM_SPACE 0x100000 /* 1MB */
92 #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
93 #define GPMC_SECTION_SHIFT 28 /* 128 MB */
95 #define CS_NUM_SHIFT 24
96 #define ENABLE_PREFETCH (0x1 << 7)
97 #define DMA_MPU_MODE 2
99 #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
100 #define GPMC_REVISION_MINOR(l) (l & 0xf)
102 #define GPMC_HAS_WR_ACCESS 0x1
103 #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
105 /* XXX: Only NAND irq has been considered,currently these are the only ones used
107 #define GPMC_NR_IRQ 2
109 struct gpmc_client_irq
{
114 /* Structure to save gpmc cs context */
115 struct gpmc_cs_config
{
127 * Structure to save/restore gpmc context
128 * to support core off on OMAP3
130 struct omap3_gpmc_regs
{
135 u32 prefetch_config1
;
136 u32 prefetch_config2
;
137 u32 prefetch_control
;
138 struct gpmc_cs_config cs_context
[GPMC_CS_NUM
];
141 static struct gpmc_client_irq gpmc_client_irq
[GPMC_NR_IRQ
];
142 static struct irq_chip gpmc_irq_chip
;
143 static unsigned gpmc_irq_start
;
145 static struct resource gpmc_mem_root
;
146 static struct resource gpmc_cs_mem
[GPMC_CS_NUM
];
147 static DEFINE_SPINLOCK(gpmc_mem_lock
);
148 static unsigned int gpmc_cs_map
; /* flag for cs which are initialized */
149 static struct device
*gpmc_dev
;
151 static resource_size_t phys_base
, mem_size
;
152 static unsigned gpmc_capability
;
153 static void __iomem
*gpmc_base
;
155 static struct clk
*gpmc_l3_clk
;
157 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
);
159 static void gpmc_write_reg(int idx
, u32 val
)
161 __raw_writel(val
, gpmc_base
+ idx
);
164 static u32
gpmc_read_reg(int idx
)
166 return __raw_readl(gpmc_base
+ idx
);
169 void gpmc_cs_write_reg(int cs
, int idx
, u32 val
)
171 void __iomem
*reg_addr
;
173 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
174 __raw_writel(val
, reg_addr
);
177 u32
gpmc_cs_read_reg(int cs
, int idx
)
179 void __iomem
*reg_addr
;
181 reg_addr
= gpmc_base
+ GPMC_CS0_OFFSET
+ (cs
* GPMC_CS_SIZE
) + idx
;
182 return __raw_readl(reg_addr
);
185 /* TODO: Add support for gpmc_fck to clock framework and use it */
186 unsigned long gpmc_get_fclk_period(void)
188 unsigned long rate
= clk_get_rate(gpmc_l3_clk
);
191 printk(KERN_WARNING
"gpmc_l3_clk not enabled\n");
196 rate
= 1000000000 / rate
; /* In picoseconds */
201 unsigned int gpmc_ns_to_ticks(unsigned int time_ns
)
203 unsigned long tick_ps
;
205 /* Calculate in picosecs to yield more exact results */
206 tick_ps
= gpmc_get_fclk_period();
208 return (time_ns
* 1000 + tick_ps
- 1) / tick_ps
;
211 unsigned int gpmc_ps_to_ticks(unsigned int time_ps
)
213 unsigned long tick_ps
;
215 /* Calculate in picosecs to yield more exact results */
216 tick_ps
= gpmc_get_fclk_period();
218 return (time_ps
+ tick_ps
- 1) / tick_ps
;
221 unsigned int gpmc_ticks_to_ns(unsigned int ticks
)
223 return ticks
* gpmc_get_fclk_period() / 1000;
226 unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns
)
228 unsigned long ticks
= gpmc_ns_to_ticks(time_ns
);
230 return ticks
* gpmc_get_fclk_period() / 1000;
233 static unsigned int gpmc_ticks_to_ps(unsigned int ticks
)
235 return ticks
* gpmc_get_fclk_period();
238 static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps
)
240 unsigned long ticks
= gpmc_ps_to_ticks(time_ps
);
242 return ticks
* gpmc_get_fclk_period();
245 static inline void gpmc_cs_modify_reg(int cs
, int reg
, u32 mask
, bool value
)
249 l
= gpmc_cs_read_reg(cs
, reg
);
254 gpmc_cs_write_reg(cs
, reg
, l
);
257 static void gpmc_cs_bool_timings(int cs
, const struct gpmc_bool_timings
*p
)
259 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG1
,
260 GPMC_CONFIG1_TIME_PARA_GRAN
,
261 p
->time_para_granularity
);
262 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG2
,
263 GPMC_CONFIG2_CSEXTRADELAY
, p
->cs_extra_delay
);
264 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG3
,
265 GPMC_CONFIG3_ADVEXTRADELAY
, p
->adv_extra_delay
);
266 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG4
,
267 GPMC_CONFIG4_OEEXTRADELAY
, p
->oe_extra_delay
);
268 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG4
,
269 GPMC_CONFIG4_OEEXTRADELAY
, p
->we_extra_delay
);
270 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG6
,
271 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN
,
272 p
->cycle2cyclesamecsen
);
273 gpmc_cs_modify_reg(cs
, GPMC_CS_CONFIG6
,
274 GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN
,
275 p
->cycle2cyclediffcsen
);
279 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
280 int time
, const char *name
)
282 static int set_gpmc_timing_reg(int cs
, int reg
, int st_bit
, int end_bit
,
287 int ticks
, mask
, nr_bits
;
292 ticks
= gpmc_ns_to_ticks(time
);
293 nr_bits
= end_bit
- st_bit
+ 1;
294 if (ticks
>= 1 << nr_bits
) {
296 printk(KERN_INFO
"GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
297 cs
, name
, time
, ticks
, 1 << nr_bits
);
302 mask
= (1 << nr_bits
) - 1;
303 l
= gpmc_cs_read_reg(cs
, reg
);
306 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
307 cs
, name
, ticks
, gpmc_get_fclk_period() * ticks
/ 1000,
308 (l
>> st_bit
) & mask
, time
);
310 l
&= ~(mask
<< st_bit
);
311 l
|= ticks
<< st_bit
;
312 gpmc_cs_write_reg(cs
, reg
, l
);
318 #define GPMC_SET_ONE(reg, st, end, field) \
319 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
320 t->field, #field) < 0) \
323 #define GPMC_SET_ONE(reg, st, end, field) \
324 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
328 int gpmc_calc_divider(unsigned int sync_clk
)
333 l
= sync_clk
+ (gpmc_get_fclk_period() - 1);
334 div
= l
/ gpmc_get_fclk_period();
343 int gpmc_cs_set_timings(int cs
, const struct gpmc_timings
*t
)
348 div
= gpmc_calc_divider(t
->sync_clk
);
352 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 0, 3, cs_on
);
353 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 8, 12, cs_rd_off
);
354 GPMC_SET_ONE(GPMC_CS_CONFIG2
, 16, 20, cs_wr_off
);
356 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 0, 3, adv_on
);
357 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 8, 12, adv_rd_off
);
358 GPMC_SET_ONE(GPMC_CS_CONFIG3
, 16, 20, adv_wr_off
);
360 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 0, 3, oe_on
);
361 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 8, 12, oe_off
);
362 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 16, 19, we_on
);
363 GPMC_SET_ONE(GPMC_CS_CONFIG4
, 24, 28, we_off
);
365 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 0, 4, rd_cycle
);
366 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 8, 12, wr_cycle
);
367 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 16, 20, access
);
369 GPMC_SET_ONE(GPMC_CS_CONFIG5
, 24, 27, page_burst_access
);
371 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 0, 3, bus_turnaround
);
372 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 8, 11, cycle2cycle_delay
);
374 GPMC_SET_ONE(GPMC_CS_CONFIG1
, 18, 19, wait_monitoring
);
375 GPMC_SET_ONE(GPMC_CS_CONFIG1
, 25, 26, clk_activation
);
377 if (gpmc_capability
& GPMC_HAS_WR_DATA_MUX_BUS
)
378 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 16, 19, wr_data_mux_bus
);
379 if (gpmc_capability
& GPMC_HAS_WR_ACCESS
)
380 GPMC_SET_ONE(GPMC_CS_CONFIG6
, 24, 28, wr_access
);
382 /* caller is expected to have initialized CONFIG1 to cover
383 * at least sync vs async
385 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
386 if (l
& (GPMC_CONFIG1_READTYPE_SYNC
| GPMC_CONFIG1_WRITETYPE_SYNC
)) {
388 printk(KERN_INFO
"GPMC CS%d CLK period is %lu ns (div %d)\n",
389 cs
, (div
* gpmc_get_fclk_period()) / 1000, div
);
393 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, l
);
396 gpmc_cs_bool_timings(cs
, &t
->bool_timings
);
401 static void gpmc_cs_enable_mem(int cs
, u32 base
, u32 size
)
406 mask
= (1 << GPMC_SECTION_SHIFT
) - size
;
407 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
409 l
= (base
>> GPMC_CHUNK_SHIFT
) & 0x3f;
411 l
|= ((mask
>> GPMC_CHUNK_SHIFT
) & 0x0f) << 8;
412 l
|= GPMC_CONFIG7_CSVALID
;
413 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
416 static void gpmc_cs_disable_mem(int cs
)
420 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
421 l
&= ~GPMC_CONFIG7_CSVALID
;
422 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG7
, l
);
425 static void gpmc_cs_get_memconf(int cs
, u32
*base
, u32
*size
)
430 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
431 *base
= (l
& 0x3f) << GPMC_CHUNK_SHIFT
;
432 mask
= (l
>> 8) & 0x0f;
433 *size
= (1 << GPMC_SECTION_SHIFT
) - (mask
<< GPMC_CHUNK_SHIFT
);
436 static int gpmc_cs_mem_enabled(int cs
)
440 l
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG7
);
441 return l
& GPMC_CONFIG7_CSVALID
;
444 int gpmc_cs_set_reserved(int cs
, int reserved
)
446 if (cs
> GPMC_CS_NUM
)
449 gpmc_cs_map
&= ~(1 << cs
);
450 gpmc_cs_map
|= (reserved
? 1 : 0) << cs
;
455 int gpmc_cs_reserved(int cs
)
457 if (cs
> GPMC_CS_NUM
)
460 return gpmc_cs_map
& (1 << cs
);
463 static unsigned long gpmc_mem_align(unsigned long size
)
467 size
= (size
- 1) >> (GPMC_CHUNK_SHIFT
- 1);
468 order
= GPMC_CHUNK_SHIFT
- 1;
477 static int gpmc_cs_insert_mem(int cs
, unsigned long base
, unsigned long size
)
479 struct resource
*res
= &gpmc_cs_mem
[cs
];
482 size
= gpmc_mem_align(size
);
483 spin_lock(&gpmc_mem_lock
);
485 res
->end
= base
+ size
- 1;
486 r
= request_resource(&gpmc_mem_root
, res
);
487 spin_unlock(&gpmc_mem_lock
);
492 static int gpmc_cs_delete_mem(int cs
)
494 struct resource
*res
= &gpmc_cs_mem
[cs
];
497 spin_lock(&gpmc_mem_lock
);
498 r
= release_resource(&gpmc_cs_mem
[cs
]);
501 spin_unlock(&gpmc_mem_lock
);
506 int gpmc_cs_request(int cs
, unsigned long size
, unsigned long *base
)
508 struct resource
*res
= &gpmc_cs_mem
[cs
];
511 if (cs
> GPMC_CS_NUM
)
514 size
= gpmc_mem_align(size
);
515 if (size
> (1 << GPMC_SECTION_SHIFT
))
518 spin_lock(&gpmc_mem_lock
);
519 if (gpmc_cs_reserved(cs
)) {
523 if (gpmc_cs_mem_enabled(cs
))
524 r
= adjust_resource(res
, res
->start
& ~(size
- 1), size
);
526 r
= allocate_resource(&gpmc_mem_root
, res
, size
, 0, ~0,
531 gpmc_cs_enable_mem(cs
, res
->start
, resource_size(res
));
533 gpmc_cs_set_reserved(cs
, 1);
535 spin_unlock(&gpmc_mem_lock
);
538 EXPORT_SYMBOL(gpmc_cs_request
);
540 void gpmc_cs_free(int cs
)
542 spin_lock(&gpmc_mem_lock
);
543 if (cs
>= GPMC_CS_NUM
|| cs
< 0 || !gpmc_cs_reserved(cs
)) {
544 printk(KERN_ERR
"Trying to free non-reserved GPMC CS%d\n", cs
);
546 spin_unlock(&gpmc_mem_lock
);
549 gpmc_cs_disable_mem(cs
);
550 release_resource(&gpmc_cs_mem
[cs
]);
551 gpmc_cs_set_reserved(cs
, 0);
552 spin_unlock(&gpmc_mem_lock
);
554 EXPORT_SYMBOL(gpmc_cs_free
);
557 * gpmc_cs_configure - write request to configure gpmc
558 * @cs: chip select number
560 * @wval: value to write
561 * @return status of the operation
563 int gpmc_cs_configure(int cs
, int cmd
, int wval
)
569 case GPMC_ENABLE_IRQ
:
570 gpmc_write_reg(GPMC_IRQENABLE
, wval
);
573 case GPMC_SET_IRQ_STATUS
:
574 gpmc_write_reg(GPMC_IRQSTATUS
, wval
);
578 regval
= gpmc_read_reg(GPMC_CONFIG
);
580 regval
&= ~GPMC_CONFIG_WRITEPROTECT
; /* WP is ON */
582 regval
|= GPMC_CONFIG_WRITEPROTECT
; /* WP is OFF */
583 gpmc_write_reg(GPMC_CONFIG
, regval
);
586 case GPMC_CONFIG_RDY_BSY
:
587 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
589 regval
|= WR_RD_PIN_MONITORING
;
591 regval
&= ~WR_RD_PIN_MONITORING
;
592 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
595 case GPMC_CONFIG_DEV_SIZE
:
596 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
598 /* clear 2 target bits */
599 regval
&= ~GPMC_CONFIG1_DEVICESIZE(3);
601 /* set the proper value */
602 regval
|= GPMC_CONFIG1_DEVICESIZE(wval
);
604 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
607 case GPMC_CONFIG_DEV_TYPE
:
608 regval
= gpmc_cs_read_reg(cs
, GPMC_CS_CONFIG1
);
609 regval
|= GPMC_CONFIG1_DEVICETYPE(wval
);
610 if (wval
== GPMC_DEVICETYPE_NOR
)
611 regval
|= GPMC_CONFIG1_MUXADDDATA
;
612 gpmc_cs_write_reg(cs
, GPMC_CS_CONFIG1
, regval
);
616 printk(KERN_ERR
"gpmc_configure_cs: Not supported\n");
622 EXPORT_SYMBOL(gpmc_cs_configure
);
624 void gpmc_update_nand_reg(struct gpmc_nand_regs
*reg
, int cs
)
628 reg
->gpmc_status
= gpmc_base
+ GPMC_STATUS
;
629 reg
->gpmc_nand_command
= gpmc_base
+ GPMC_CS0_OFFSET
+
630 GPMC_CS_NAND_COMMAND
+ GPMC_CS_SIZE
* cs
;
631 reg
->gpmc_nand_address
= gpmc_base
+ GPMC_CS0_OFFSET
+
632 GPMC_CS_NAND_ADDRESS
+ GPMC_CS_SIZE
* cs
;
633 reg
->gpmc_nand_data
= gpmc_base
+ GPMC_CS0_OFFSET
+
634 GPMC_CS_NAND_DATA
+ GPMC_CS_SIZE
* cs
;
635 reg
->gpmc_prefetch_config1
= gpmc_base
+ GPMC_PREFETCH_CONFIG1
;
636 reg
->gpmc_prefetch_config2
= gpmc_base
+ GPMC_PREFETCH_CONFIG2
;
637 reg
->gpmc_prefetch_control
= gpmc_base
+ GPMC_PREFETCH_CONTROL
;
638 reg
->gpmc_prefetch_status
= gpmc_base
+ GPMC_PREFETCH_STATUS
;
639 reg
->gpmc_ecc_config
= gpmc_base
+ GPMC_ECC_CONFIG
;
640 reg
->gpmc_ecc_control
= gpmc_base
+ GPMC_ECC_CONTROL
;
641 reg
->gpmc_ecc_size_config
= gpmc_base
+ GPMC_ECC_SIZE_CONFIG
;
642 reg
->gpmc_ecc1_result
= gpmc_base
+ GPMC_ECC1_RESULT
;
644 for (i
= 0; i
< GPMC_BCH_NUM_REMAINDER
; i
++) {
645 reg
->gpmc_bch_result0
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_0
+
647 reg
->gpmc_bch_result1
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_1
+
649 reg
->gpmc_bch_result2
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_2
+
651 reg
->gpmc_bch_result3
[i
] = gpmc_base
+ GPMC_ECC_BCH_RESULT_3
+
656 int gpmc_get_client_irq(unsigned irq_config
)
660 if (hweight32(irq_config
) > 1)
663 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
664 if (gpmc_client_irq
[i
].bitmask
& irq_config
)
665 return gpmc_client_irq
[i
].irq
;
670 static int gpmc_irq_endis(unsigned irq
, bool endis
)
675 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
676 if (irq
== gpmc_client_irq
[i
].irq
) {
677 regval
= gpmc_read_reg(GPMC_IRQENABLE
);
679 regval
|= gpmc_client_irq
[i
].bitmask
;
681 regval
&= ~gpmc_client_irq
[i
].bitmask
;
682 gpmc_write_reg(GPMC_IRQENABLE
, regval
);
689 static void gpmc_irq_disable(struct irq_data
*p
)
691 gpmc_irq_endis(p
->irq
, false);
694 static void gpmc_irq_enable(struct irq_data
*p
)
696 gpmc_irq_endis(p
->irq
, true);
699 static void gpmc_irq_noop(struct irq_data
*data
) { }
701 static unsigned int gpmc_irq_noop_ret(struct irq_data
*data
) { return 0; }
703 static int gpmc_setup_irq(void)
711 gpmc_irq_start
= irq_alloc_descs(-1, 0, GPMC_NR_IRQ
, 0);
712 if (IS_ERR_VALUE(gpmc_irq_start
)) {
713 pr_err("irq_alloc_descs failed\n");
714 return gpmc_irq_start
;
717 gpmc_irq_chip
.name
= "gpmc";
718 gpmc_irq_chip
.irq_startup
= gpmc_irq_noop_ret
;
719 gpmc_irq_chip
.irq_enable
= gpmc_irq_enable
;
720 gpmc_irq_chip
.irq_disable
= gpmc_irq_disable
;
721 gpmc_irq_chip
.irq_shutdown
= gpmc_irq_noop
;
722 gpmc_irq_chip
.irq_ack
= gpmc_irq_noop
;
723 gpmc_irq_chip
.irq_mask
= gpmc_irq_noop
;
724 gpmc_irq_chip
.irq_unmask
= gpmc_irq_noop
;
726 gpmc_client_irq
[0].bitmask
= GPMC_IRQ_FIFOEVENTENABLE
;
727 gpmc_client_irq
[1].bitmask
= GPMC_IRQ_COUNT_EVENT
;
729 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
730 gpmc_client_irq
[i
].irq
= gpmc_irq_start
+ i
;
731 irq_set_chip_and_handler(gpmc_client_irq
[i
].irq
,
732 &gpmc_irq_chip
, handle_simple_irq
);
733 set_irq_flags(gpmc_client_irq
[i
].irq
,
734 IRQF_VALID
| IRQF_NOAUTOEN
);
737 /* Disable interrupts */
738 gpmc_write_reg(GPMC_IRQENABLE
, 0);
740 /* clear interrupts */
741 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
742 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
744 return request_irq(gpmc_irq
, gpmc_handle_irq
, 0, "gpmc", NULL
);
747 static int gpmc_free_irq(void)
752 free_irq(gpmc_irq
, NULL
);
754 for (i
= 0; i
< GPMC_NR_IRQ
; i
++) {
755 irq_set_handler(gpmc_client_irq
[i
].irq
, NULL
);
756 irq_set_chip(gpmc_client_irq
[i
].irq
, &no_irq_chip
);
757 irq_modify_status(gpmc_client_irq
[i
].irq
, 0, 0);
760 irq_free_descs(gpmc_irq_start
, GPMC_NR_IRQ
);
765 static void gpmc_mem_exit(void)
769 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
770 if (!gpmc_cs_mem_enabled(cs
))
772 gpmc_cs_delete_mem(cs
);
777 static int gpmc_mem_init(void)
780 unsigned long boot_rom_space
= 0;
782 /* never allocate the first page, to facilitate bug detection;
783 * even if we didn't boot from ROM.
785 boot_rom_space
= BOOT_ROM_SPACE
;
786 /* In apollon the CS0 is mapped as 0x0000 0000 */
787 if (machine_is_omap_apollon())
789 gpmc_mem_root
.start
= GPMC_MEM_START
+ boot_rom_space
;
790 gpmc_mem_root
.end
= GPMC_MEM_END
;
792 /* Reserve all regions that has been set up by bootloader */
793 for (cs
= 0; cs
< GPMC_CS_NUM
; cs
++) {
796 if (!gpmc_cs_mem_enabled(cs
))
798 gpmc_cs_get_memconf(cs
, &base
, &size
);
799 rc
= gpmc_cs_insert_mem(cs
, base
, size
);
800 if (IS_ERR_VALUE(rc
)) {
802 if (gpmc_cs_mem_enabled(cs
))
803 gpmc_cs_delete_mem(cs
);
811 static u32
gpmc_round_ps_to_sync_clk(u32 time_ps
, u32 sync_clk
)
816 div
= gpmc_calc_divider(sync_clk
);
817 temp
= gpmc_ps_to_ticks(time_ps
);
818 temp
= (temp
+ div
- 1) / div
;
819 return gpmc_ticks_to_ps(temp
* div
);
822 /* XXX: can the cycles be avoided ? */
823 static int gpmc_calc_sync_read_timings(struct gpmc_timings
*gpmc_t
,
824 struct gpmc_device_timings
*dev_t
)
826 bool mux
= dev_t
->mux
;
830 temp
= dev_t
->t_avdp_r
;
831 /* XXX: mux check required ? */
833 /* XXX: t_avdp not to be required for sync, only added for tusb
834 * this indirectly necessitates requirement of t_avdp_r and
835 * t_avdp_w instead of having a single t_avdp
837 temp
= max_t(u32
, temp
, gpmc_t
->clk_activation
+ dev_t
->t_avdh
);
838 temp
= max_t(u32
, gpmc_t
->adv_on
+ gpmc_ticks_to_ps(1), temp
);
840 gpmc_t
->adv_rd_off
= gpmc_round_ps_to_ticks(temp
);
843 temp
= dev_t
->t_oeasu
; /* XXX: remove this ? */
845 temp
= max_t(u32
, temp
, gpmc_t
->clk_activation
+ dev_t
->t_ach
);
846 temp
= max_t(u32
, temp
, gpmc_t
->adv_rd_off
+
847 gpmc_ticks_to_ps(dev_t
->cyc_aavdh_oe
));
849 gpmc_t
->oe_on
= gpmc_round_ps_to_ticks(temp
);
852 /* XXX: any scope for improvement ?, by combining oe_on
853 * and clk_activation, need to check whether
854 * access = clk_activation + round to sync clk ?
856 temp
= max_t(u32
, dev_t
->t_iaa
, dev_t
->cyc_iaa
* gpmc_t
->sync_clk
);
857 temp
+= gpmc_t
->clk_activation
;
859 temp
= max_t(u32
, temp
, gpmc_t
->oe_on
+
860 gpmc_ticks_to_ps(dev_t
->cyc_oe
));
861 gpmc_t
->access
= gpmc_round_ps_to_ticks(temp
);
863 gpmc_t
->oe_off
= gpmc_t
->access
+ gpmc_ticks_to_ps(1);
864 gpmc_t
->cs_rd_off
= gpmc_t
->oe_off
;
867 temp
= max_t(u32
, dev_t
->t_cez_r
, dev_t
->t_oez
);
868 temp
= gpmc_round_ps_to_sync_clk(temp
, gpmc_t
->sync_clk
) +
870 /* XXX: barter t_ce_rdyz with t_cez_r ? */
871 if (dev_t
->t_ce_rdyz
)
872 temp
= max_t(u32
, temp
, gpmc_t
->cs_rd_off
+ dev_t
->t_ce_rdyz
);
873 gpmc_t
->rd_cycle
= gpmc_round_ps_to_ticks(temp
);
878 static int gpmc_calc_sync_write_timings(struct gpmc_timings
*gpmc_t
,
879 struct gpmc_device_timings
*dev_t
)
881 bool mux
= dev_t
->mux
;
885 temp
= dev_t
->t_avdp_w
;
887 temp
= max_t(u32
, temp
,
888 gpmc_t
->clk_activation
+ dev_t
->t_avdh
);
889 temp
= max_t(u32
, gpmc_t
->adv_on
+ gpmc_ticks_to_ps(1), temp
);
891 gpmc_t
->adv_wr_off
= gpmc_round_ps_to_ticks(temp
);
893 /* wr_data_mux_bus */
894 temp
= max_t(u32
, dev_t
->t_weasu
,
895 gpmc_t
->clk_activation
+ dev_t
->t_rdyo
);
896 /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
897 * and in that case remember to handle we_on properly
900 temp
= max_t(u32
, temp
,
901 gpmc_t
->adv_wr_off
+ dev_t
->t_aavdh
);
902 temp
= max_t(u32
, temp
, gpmc_t
->adv_wr_off
+
903 gpmc_ticks_to_ps(dev_t
->cyc_aavdh_we
));
905 gpmc_t
->wr_data_mux_bus
= gpmc_round_ps_to_ticks(temp
);
908 if (gpmc_capability
& GPMC_HAS_WR_DATA_MUX_BUS
)
909 gpmc_t
->we_on
= gpmc_round_ps_to_ticks(dev_t
->t_weasu
);
911 gpmc_t
->we_on
= gpmc_t
->wr_data_mux_bus
;
914 /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
915 gpmc_t
->wr_access
= gpmc_t
->access
;
918 temp
= gpmc_t
->we_on
+ dev_t
->t_wpl
;
919 temp
= max_t(u32
, temp
,
920 gpmc_t
->wr_access
+ gpmc_ticks_to_ps(1));
921 temp
= max_t(u32
, temp
,
922 gpmc_t
->we_on
+ gpmc_ticks_to_ps(dev_t
->cyc_wpl
));
923 gpmc_t
->we_off
= gpmc_round_ps_to_ticks(temp
);
925 gpmc_t
->cs_wr_off
= gpmc_round_ps_to_ticks(gpmc_t
->we_off
+
929 temp
= gpmc_round_ps_to_sync_clk(dev_t
->t_cez_w
, gpmc_t
->sync_clk
);
930 temp
+= gpmc_t
->wr_access
;
931 /* XXX: barter t_ce_rdyz with t_cez_w ? */
932 if (dev_t
->t_ce_rdyz
)
933 temp
= max_t(u32
, temp
,
934 gpmc_t
->cs_wr_off
+ dev_t
->t_ce_rdyz
);
935 gpmc_t
->wr_cycle
= gpmc_round_ps_to_ticks(temp
);
940 static int gpmc_calc_async_read_timings(struct gpmc_timings
*gpmc_t
,
941 struct gpmc_device_timings
*dev_t
)
943 bool mux
= dev_t
->mux
;
947 temp
= dev_t
->t_avdp_r
;
949 temp
= max_t(u32
, gpmc_t
->adv_on
+ gpmc_ticks_to_ps(1), temp
);
950 gpmc_t
->adv_rd_off
= gpmc_round_ps_to_ticks(temp
);
953 temp
= dev_t
->t_oeasu
;
955 temp
= max_t(u32
, temp
,
956 gpmc_t
->adv_rd_off
+ dev_t
->t_aavdh
);
957 gpmc_t
->oe_on
= gpmc_round_ps_to_ticks(temp
);
960 temp
= max_t(u32
, dev_t
->t_iaa
, /* XXX: remove t_iaa in async ? */
961 gpmc_t
->oe_on
+ dev_t
->t_oe
);
962 temp
= max_t(u32
, temp
,
963 gpmc_t
->cs_on
+ dev_t
->t_ce
);
964 temp
= max_t(u32
, temp
,
965 gpmc_t
->adv_on
+ dev_t
->t_aa
);
966 gpmc_t
->access
= gpmc_round_ps_to_ticks(temp
);
968 gpmc_t
->oe_off
= gpmc_t
->access
+ gpmc_ticks_to_ps(1);
969 gpmc_t
->cs_rd_off
= gpmc_t
->oe_off
;
972 temp
= max_t(u32
, dev_t
->t_rd_cycle
,
973 gpmc_t
->cs_rd_off
+ dev_t
->t_cez_r
);
974 temp
= max_t(u32
, temp
, gpmc_t
->oe_off
+ dev_t
->t_oez
);
975 gpmc_t
->rd_cycle
= gpmc_round_ps_to_ticks(temp
);
980 static int gpmc_calc_async_write_timings(struct gpmc_timings
*gpmc_t
,
981 struct gpmc_device_timings
*dev_t
)
983 bool mux
= dev_t
->mux
;
987 temp
= dev_t
->t_avdp_w
;
989 temp
= max_t(u32
, gpmc_t
->adv_on
+ gpmc_ticks_to_ps(1), temp
);
990 gpmc_t
->adv_wr_off
= gpmc_round_ps_to_ticks(temp
);
992 /* wr_data_mux_bus */
993 temp
= dev_t
->t_weasu
;
995 temp
= max_t(u32
, temp
, gpmc_t
->adv_wr_off
+ dev_t
->t_aavdh
);
996 temp
= max_t(u32
, temp
, gpmc_t
->adv_wr_off
+
997 gpmc_ticks_to_ps(dev_t
->cyc_aavdh_we
));
999 gpmc_t
->wr_data_mux_bus
= gpmc_round_ps_to_ticks(temp
);
1002 if (gpmc_capability
& GPMC_HAS_WR_DATA_MUX_BUS
)
1003 gpmc_t
->we_on
= gpmc_round_ps_to_ticks(dev_t
->t_weasu
);
1005 gpmc_t
->we_on
= gpmc_t
->wr_data_mux_bus
;
1008 temp
= gpmc_t
->we_on
+ dev_t
->t_wpl
;
1009 gpmc_t
->we_off
= gpmc_round_ps_to_ticks(temp
);
1011 gpmc_t
->cs_wr_off
= gpmc_round_ps_to_ticks(gpmc_t
->we_off
+
1015 temp
= max_t(u32
, dev_t
->t_wr_cycle
,
1016 gpmc_t
->cs_wr_off
+ dev_t
->t_cez_w
);
1017 gpmc_t
->wr_cycle
= gpmc_round_ps_to_ticks(temp
);
1022 static int gpmc_calc_sync_common_timings(struct gpmc_timings
*gpmc_t
,
1023 struct gpmc_device_timings
*dev_t
)
1027 gpmc_t
->sync_clk
= gpmc_calc_divider(dev_t
->clk
) *
1028 gpmc_get_fclk_period();
1030 gpmc_t
->page_burst_access
= gpmc_round_ps_to_sync_clk(
1034 temp
= max_t(u32
, dev_t
->t_ces
, dev_t
->t_avds
);
1035 gpmc_t
->clk_activation
= gpmc_round_ps_to_ticks(temp
);
1037 if (gpmc_calc_divider(gpmc_t
->sync_clk
) != 1)
1040 if (dev_t
->ce_xdelay
)
1041 gpmc_t
->bool_timings
.cs_extra_delay
= true;
1042 if (dev_t
->avd_xdelay
)
1043 gpmc_t
->bool_timings
.adv_extra_delay
= true;
1044 if (dev_t
->oe_xdelay
)
1045 gpmc_t
->bool_timings
.oe_extra_delay
= true;
1046 if (dev_t
->we_xdelay
)
1047 gpmc_t
->bool_timings
.we_extra_delay
= true;
1052 static int gpmc_calc_common_timings(struct gpmc_timings
*gpmc_t
,
1053 struct gpmc_device_timings
*dev_t
)
1058 gpmc_t
->cs_on
= gpmc_round_ps_to_ticks(dev_t
->t_ceasu
);
1061 temp
= dev_t
->t_avdasu
;
1062 if (dev_t
->t_ce_avd
)
1063 temp
= max_t(u32
, temp
,
1064 gpmc_t
->cs_on
+ dev_t
->t_ce_avd
);
1065 gpmc_t
->adv_on
= gpmc_round_ps_to_ticks(temp
);
1067 if (dev_t
->sync_write
|| dev_t
->sync_read
)
1068 gpmc_calc_sync_common_timings(gpmc_t
, dev_t
);
1073 /* TODO: remove this function once all peripherals are confirmed to
1074 * work with generic timing. Simultaneously gpmc_cs_set_timings()
1075 * has to be modified to handle timings in ps instead of ns
1077 static void gpmc_convert_ps_to_ns(struct gpmc_timings
*t
)
1080 t
->cs_rd_off
/= 1000;
1081 t
->cs_wr_off
/= 1000;
1083 t
->adv_rd_off
/= 1000;
1084 t
->adv_wr_off
/= 1000;
1089 t
->page_burst_access
/= 1000;
1091 t
->rd_cycle
/= 1000;
1092 t
->wr_cycle
/= 1000;
1093 t
->bus_turnaround
/= 1000;
1094 t
->cycle2cycle_delay
/= 1000;
1095 t
->wait_monitoring
/= 1000;
1096 t
->clk_activation
/= 1000;
1097 t
->wr_access
/= 1000;
1098 t
->wr_data_mux_bus
/= 1000;
1101 int gpmc_calc_timings(struct gpmc_timings
*gpmc_t
,
1102 struct gpmc_device_timings
*dev_t
)
1104 memset(gpmc_t
, 0, sizeof(*gpmc_t
));
1106 gpmc_calc_common_timings(gpmc_t
, dev_t
);
1108 if (dev_t
->sync_read
)
1109 gpmc_calc_sync_read_timings(gpmc_t
, dev_t
);
1111 gpmc_calc_async_read_timings(gpmc_t
, dev_t
);
1113 if (dev_t
->sync_write
)
1114 gpmc_calc_sync_write_timings(gpmc_t
, dev_t
);
1116 gpmc_calc_async_write_timings(gpmc_t
, dev_t
);
1118 /* TODO: remove, see function definition */
1119 gpmc_convert_ps_to_ns(gpmc_t
);
1124 static int gpmc_probe(struct platform_device
*pdev
)
1128 struct resource
*res
;
1130 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1134 phys_base
= res
->start
;
1135 mem_size
= resource_size(res
);
1137 gpmc_base
= devm_request_and_ioremap(&pdev
->dev
, res
);
1139 dev_err(&pdev
->dev
, "error: request memory / ioremap\n");
1140 return -EADDRNOTAVAIL
;
1143 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
1145 dev_warn(&pdev
->dev
, "Failed to get resource: irq\n");
1147 gpmc_irq
= res
->start
;
1149 gpmc_l3_clk
= clk_get(&pdev
->dev
, "fck");
1150 if (IS_ERR(gpmc_l3_clk
)) {
1151 dev_err(&pdev
->dev
, "error: clk_get\n");
1153 return PTR_ERR(gpmc_l3_clk
);
1156 clk_prepare_enable(gpmc_l3_clk
);
1158 gpmc_dev
= &pdev
->dev
;
1160 l
= gpmc_read_reg(GPMC_REVISION
);
1161 if (GPMC_REVISION_MAJOR(l
) > 0x4)
1162 gpmc_capability
= GPMC_HAS_WR_ACCESS
| GPMC_HAS_WR_DATA_MUX_BUS
;
1163 dev_info(gpmc_dev
, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l
),
1164 GPMC_REVISION_MINOR(l
));
1166 rc
= gpmc_mem_init();
1167 if (IS_ERR_VALUE(rc
)) {
1168 clk_disable_unprepare(gpmc_l3_clk
);
1169 clk_put(gpmc_l3_clk
);
1170 dev_err(gpmc_dev
, "failed to reserve memory\n");
1174 if (IS_ERR_VALUE(gpmc_setup_irq()))
1175 dev_warn(gpmc_dev
, "gpmc_setup_irq failed\n");
1180 static int gpmc_remove(struct platform_device
*pdev
)
1188 static struct platform_driver gpmc_driver
= {
1189 .probe
= gpmc_probe
,
1190 .remove
= gpmc_remove
,
1192 .name
= DEVICE_NAME
,
1193 .owner
= THIS_MODULE
,
1197 static __init
int gpmc_init(void)
1199 return platform_driver_register(&gpmc_driver
);
1202 static __exit
void gpmc_exit(void)
1204 platform_driver_unregister(&gpmc_driver
);
1208 postcore_initcall(gpmc_init
);
1209 module_exit(gpmc_exit
);
1211 static int __init
omap_gpmc_init(void)
1213 struct omap_hwmod
*oh
;
1214 struct platform_device
*pdev
;
1215 char *oh_name
= "gpmc";
1217 oh
= omap_hwmod_lookup(oh_name
);
1219 pr_err("Could not look up %s\n", oh_name
);
1223 pdev
= omap_device_build(DEVICE_NAME
, -1, oh
, NULL
, 0, NULL
, 0, 0);
1224 WARN(IS_ERR(pdev
), "could not build omap_device for %s\n", oh_name
);
1226 return IS_ERR(pdev
) ? PTR_ERR(pdev
) : 0;
1228 postcore_initcall(omap_gpmc_init
);
1230 static irqreturn_t
gpmc_handle_irq(int irq
, void *dev
)
1235 regval
= gpmc_read_reg(GPMC_IRQSTATUS
);
1240 for (i
= 0; i
< GPMC_NR_IRQ
; i
++)
1241 if (regval
& gpmc_client_irq
[i
].bitmask
)
1242 generic_handle_irq(gpmc_client_irq
[i
].irq
);
1244 gpmc_write_reg(GPMC_IRQSTATUS
, regval
);
1249 #ifdef CONFIG_ARCH_OMAP3
1250 static struct omap3_gpmc_regs gpmc_context
;
1252 void omap3_gpmc_save_context(void)
1256 gpmc_context
.sysconfig
= gpmc_read_reg(GPMC_SYSCONFIG
);
1257 gpmc_context
.irqenable
= gpmc_read_reg(GPMC_IRQENABLE
);
1258 gpmc_context
.timeout_ctrl
= gpmc_read_reg(GPMC_TIMEOUT_CONTROL
);
1259 gpmc_context
.config
= gpmc_read_reg(GPMC_CONFIG
);
1260 gpmc_context
.prefetch_config1
= gpmc_read_reg(GPMC_PREFETCH_CONFIG1
);
1261 gpmc_context
.prefetch_config2
= gpmc_read_reg(GPMC_PREFETCH_CONFIG2
);
1262 gpmc_context
.prefetch_control
= gpmc_read_reg(GPMC_PREFETCH_CONTROL
);
1263 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
1264 gpmc_context
.cs_context
[i
].is_valid
= gpmc_cs_mem_enabled(i
);
1265 if (gpmc_context
.cs_context
[i
].is_valid
) {
1266 gpmc_context
.cs_context
[i
].config1
=
1267 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG1
);
1268 gpmc_context
.cs_context
[i
].config2
=
1269 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG2
);
1270 gpmc_context
.cs_context
[i
].config3
=
1271 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG3
);
1272 gpmc_context
.cs_context
[i
].config4
=
1273 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG4
);
1274 gpmc_context
.cs_context
[i
].config5
=
1275 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG5
);
1276 gpmc_context
.cs_context
[i
].config6
=
1277 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG6
);
1278 gpmc_context
.cs_context
[i
].config7
=
1279 gpmc_cs_read_reg(i
, GPMC_CS_CONFIG7
);
1284 void omap3_gpmc_restore_context(void)
1288 gpmc_write_reg(GPMC_SYSCONFIG
, gpmc_context
.sysconfig
);
1289 gpmc_write_reg(GPMC_IRQENABLE
, gpmc_context
.irqenable
);
1290 gpmc_write_reg(GPMC_TIMEOUT_CONTROL
, gpmc_context
.timeout_ctrl
);
1291 gpmc_write_reg(GPMC_CONFIG
, gpmc_context
.config
);
1292 gpmc_write_reg(GPMC_PREFETCH_CONFIG1
, gpmc_context
.prefetch_config1
);
1293 gpmc_write_reg(GPMC_PREFETCH_CONFIG2
, gpmc_context
.prefetch_config2
);
1294 gpmc_write_reg(GPMC_PREFETCH_CONTROL
, gpmc_context
.prefetch_control
);
1295 for (i
= 0; i
< GPMC_CS_NUM
; i
++) {
1296 if (gpmc_context
.cs_context
[i
].is_valid
) {
1297 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG1
,
1298 gpmc_context
.cs_context
[i
].config1
);
1299 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG2
,
1300 gpmc_context
.cs_context
[i
].config2
);
1301 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG3
,
1302 gpmc_context
.cs_context
[i
].config3
);
1303 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG4
,
1304 gpmc_context
.cs_context
[i
].config4
);
1305 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG5
,
1306 gpmc_context
.cs_context
[i
].config5
);
1307 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG6
,
1308 gpmc_context
.cs_context
[i
].config6
);
1309 gpmc_cs_write_reg(i
, GPMC_CS_CONFIG7
,
1310 gpmc_context
.cs_context
[i
].config7
);
1314 #endif /* CONFIG_ARCH_OMAP3 */