Adding support for MOXA ART SoC. Testing port of linux-2.6.32.60-moxart.
[linux-3.6.7-moxart.git] / arch / arm / mach-exynos / common.c
blob3e02ae6b303fe47a7564a1868ddd57055960ba8d
1 /*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Common Codes for EXYNOS
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <linux/device.h>
17 #include <linux/gpio.h>
18 #include <linux/sched.h>
19 #include <linux/serial_core.h>
20 #include <linux/of.h>
21 #include <linux/of_irq.h>
22 #include <linux/export.h>
23 #include <linux/irqdomain.h>
24 #include <linux/of_address.h>
26 #include <asm/proc-fns.h>
27 #include <asm/exception.h>
28 #include <asm/hardware/cache-l2x0.h>
29 #include <asm/hardware/gic.h>
30 #include <asm/mach/map.h>
31 #include <asm/mach/irq.h>
32 #include <asm/cacheflush.h>
34 #include <mach/regs-irq.h>
35 #include <mach/regs-pmu.h>
36 #include <mach/regs-gpio.h>
37 #include <mach/pmu.h>
39 #include <plat/cpu.h>
40 #include <plat/clock.h>
41 #include <plat/devs.h>
42 #include <plat/pm.h>
43 #include <plat/sdhci.h>
44 #include <plat/gpio-cfg.h>
45 #include <plat/adc-core.h>
46 #include <plat/fb-core.h>
47 #include <plat/fimc-core.h>
48 #include <plat/iic-core.h>
49 #include <plat/tv-core.h>
50 #include <plat/spi-core.h>
51 #include <plat/regs-serial.h>
53 #include "common.h"
54 #define L2_AUX_VAL 0x7C470001
55 #define L2_AUX_MASK 0xC200ffff
57 static const char name_exynos4210[] = "EXYNOS4210";
58 static const char name_exynos4212[] = "EXYNOS4212";
59 static const char name_exynos4412[] = "EXYNOS4412";
60 static const char name_exynos5250[] = "EXYNOS5250";
62 static void exynos4_map_io(void);
63 static void exynos5_map_io(void);
64 static void exynos4_init_clocks(int xtal);
65 static void exynos5_init_clocks(int xtal);
66 static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
67 static int exynos_init(void);
69 static struct cpu_table cpu_ids[] __initdata = {
71 .idcode = EXYNOS4210_CPU_ID,
72 .idmask = EXYNOS4_CPU_MASK,
73 .map_io = exynos4_map_io,
74 .init_clocks = exynos4_init_clocks,
75 .init_uarts = exynos_init_uarts,
76 .init = exynos_init,
77 .name = name_exynos4210,
78 }, {
79 .idcode = EXYNOS4212_CPU_ID,
80 .idmask = EXYNOS4_CPU_MASK,
81 .map_io = exynos4_map_io,
82 .init_clocks = exynos4_init_clocks,
83 .init_uarts = exynos_init_uarts,
84 .init = exynos_init,
85 .name = name_exynos4212,
86 }, {
87 .idcode = EXYNOS4412_CPU_ID,
88 .idmask = EXYNOS4_CPU_MASK,
89 .map_io = exynos4_map_io,
90 .init_clocks = exynos4_init_clocks,
91 .init_uarts = exynos_init_uarts,
92 .init = exynos_init,
93 .name = name_exynos4412,
94 }, {
95 .idcode = EXYNOS5250_SOC_ID,
96 .idmask = EXYNOS5_SOC_MASK,
97 .map_io = exynos5_map_io,
98 .init_clocks = exynos5_init_clocks,
99 .init_uarts = exynos_init_uarts,
100 .init = exynos_init,
101 .name = name_exynos5250,
105 /* Initial IO mappings */
107 static struct map_desc exynos_iodesc[] __initdata = {
109 .virtual = (unsigned long)S5P_VA_CHIPID,
110 .pfn = __phys_to_pfn(EXYNOS_PA_CHIPID),
111 .length = SZ_4K,
112 .type = MT_DEVICE,
116 static struct map_desc exynos4_iodesc[] __initdata = {
118 .virtual = (unsigned long)S3C_VA_SYS,
119 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
120 .length = SZ_64K,
121 .type = MT_DEVICE,
122 }, {
123 .virtual = (unsigned long)S3C_VA_TIMER,
124 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
125 .length = SZ_16K,
126 .type = MT_DEVICE,
127 }, {
128 .virtual = (unsigned long)S3C_VA_WATCHDOG,
129 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
130 .length = SZ_4K,
131 .type = MT_DEVICE,
132 }, {
133 .virtual = (unsigned long)S5P_VA_SROMC,
134 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
135 .length = SZ_4K,
136 .type = MT_DEVICE,
137 }, {
138 .virtual = (unsigned long)S5P_VA_SYSTIMER,
139 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
140 .length = SZ_4K,
141 .type = MT_DEVICE,
142 }, {
143 .virtual = (unsigned long)S5P_VA_PMU,
144 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
145 .length = SZ_64K,
146 .type = MT_DEVICE,
147 }, {
148 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
149 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
150 .length = SZ_4K,
151 .type = MT_DEVICE,
152 }, {
153 .virtual = (unsigned long)S5P_VA_GIC_CPU,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
155 .length = SZ_64K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_GIC_DIST,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
160 .length = SZ_64K,
161 .type = MT_DEVICE,
162 }, {
163 .virtual = (unsigned long)S3C_VA_UART,
164 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
165 .length = SZ_512K,
166 .type = MT_DEVICE,
167 }, {
168 .virtual = (unsigned long)S5P_VA_CMU,
169 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
170 .length = SZ_128K,
171 .type = MT_DEVICE,
172 }, {
173 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
174 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
175 .length = SZ_8K,
176 .type = MT_DEVICE,
177 }, {
178 .virtual = (unsigned long)S5P_VA_L2CC,
179 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
180 .length = SZ_4K,
181 .type = MT_DEVICE,
182 }, {
183 .virtual = (unsigned long)S5P_VA_DMC0,
184 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
185 .length = SZ_64K,
186 .type = MT_DEVICE,
187 }, {
188 .virtual = (unsigned long)S5P_VA_DMC1,
189 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
190 .length = SZ_64K,
191 .type = MT_DEVICE,
192 }, {
193 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
194 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
195 .length = SZ_4K,
196 .type = MT_DEVICE,
200 static struct map_desc exynos4_iodesc0[] __initdata = {
202 .virtual = (unsigned long)S5P_VA_SYSRAM,
203 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
204 .length = SZ_4K,
205 .type = MT_DEVICE,
209 static struct map_desc exynos4_iodesc1[] __initdata = {
211 .virtual = (unsigned long)S5P_VA_SYSRAM,
212 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
213 .length = SZ_4K,
214 .type = MT_DEVICE,
218 static struct map_desc exynos5_iodesc[] __initdata = {
220 .virtual = (unsigned long)S3C_VA_SYS,
221 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
222 .length = SZ_64K,
223 .type = MT_DEVICE,
224 }, {
225 .virtual = (unsigned long)S3C_VA_TIMER,
226 .pfn = __phys_to_pfn(EXYNOS5_PA_TIMER),
227 .length = SZ_16K,
228 .type = MT_DEVICE,
229 }, {
230 .virtual = (unsigned long)S3C_VA_WATCHDOG,
231 .pfn = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
232 .length = SZ_4K,
233 .type = MT_DEVICE,
234 }, {
235 .virtual = (unsigned long)S5P_VA_SROMC,
236 .pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
237 .length = SZ_4K,
238 .type = MT_DEVICE,
239 }, {
240 .virtual = (unsigned long)S5P_VA_SYSTIMER,
241 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
242 .length = SZ_4K,
243 .type = MT_DEVICE,
244 }, {
245 .virtual = (unsigned long)S5P_VA_SYSRAM,
246 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
247 .length = SZ_4K,
248 .type = MT_DEVICE,
249 }, {
250 .virtual = (unsigned long)S5P_VA_CMU,
251 .pfn = __phys_to_pfn(EXYNOS5_PA_CMU),
252 .length = 144 * SZ_1K,
253 .type = MT_DEVICE,
254 }, {
255 .virtual = (unsigned long)S5P_VA_PMU,
256 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
257 .length = SZ_64K,
258 .type = MT_DEVICE,
259 }, {
260 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
261 .pfn = __phys_to_pfn(EXYNOS5_PA_COMBINER),
262 .length = SZ_4K,
263 .type = MT_DEVICE,
264 }, {
265 .virtual = (unsigned long)S3C_VA_UART,
266 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
267 .length = SZ_512K,
268 .type = MT_DEVICE,
269 }, {
270 .virtual = (unsigned long)S5P_VA_GIC_CPU,
271 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
272 .length = SZ_8K,
273 .type = MT_DEVICE,
274 }, {
275 .virtual = (unsigned long)S5P_VA_GIC_DIST,
276 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
277 .length = SZ_4K,
278 .type = MT_DEVICE,
282 void exynos4_restart(char mode, const char *cmd)
284 __raw_writel(0x1, S5P_SWRESET);
287 void exynos5_restart(char mode, const char *cmd)
289 __raw_writel(0x1, EXYNOS_SWRESET);
292 void __init exynos_init_late(void)
294 exynos_pm_late_initcall();
298 * exynos_map_io
300 * register the standard cpu IO areas
303 void __init exynos_init_io(struct map_desc *mach_desc, int size)
305 /* initialize the io descriptors we need for initialization */
306 iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
307 if (mach_desc)
308 iotable_init(mach_desc, size);
310 /* detect cpu id and rev. */
311 s5p_init_cpu(S5P_VA_CHIPID);
313 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
316 static void __init exynos4_map_io(void)
318 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
320 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
321 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
322 else
323 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
325 /* initialize device information early */
326 exynos4_default_sdhci0();
327 exynos4_default_sdhci1();
328 exynos4_default_sdhci2();
329 exynos4_default_sdhci3();
331 s3c_adc_setname("samsung-adc-v3");
333 s3c_fimc_setname(0, "exynos4-fimc");
334 s3c_fimc_setname(1, "exynos4-fimc");
335 s3c_fimc_setname(2, "exynos4-fimc");
336 s3c_fimc_setname(3, "exynos4-fimc");
338 s3c_sdhci_setname(0, "exynos4-sdhci");
339 s3c_sdhci_setname(1, "exynos4-sdhci");
340 s3c_sdhci_setname(2, "exynos4-sdhci");
341 s3c_sdhci_setname(3, "exynos4-sdhci");
343 /* The I2C bus controllers are directly compatible with s3c2440 */
344 s3c_i2c0_setname("s3c2440-i2c");
345 s3c_i2c1_setname("s3c2440-i2c");
346 s3c_i2c2_setname("s3c2440-i2c");
348 s5p_fb_setname(0, "exynos4-fb");
349 s5p_hdmi_setname("exynos4-hdmi");
351 s3c64xx_spi_setname("exynos4210-spi");
354 static void __init exynos5_map_io(void)
356 iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
358 s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
359 s3c_device_i2c0.resource[0].end = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
360 s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
361 s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC;
363 s3c_sdhci_setname(0, "exynos4-sdhci");
364 s3c_sdhci_setname(1, "exynos4-sdhci");
365 s3c_sdhci_setname(2, "exynos4-sdhci");
366 s3c_sdhci_setname(3, "exynos4-sdhci");
368 /* The I2C bus controllers are directly compatible with s3c2440 */
369 s3c_i2c0_setname("s3c2440-i2c");
370 s3c_i2c1_setname("s3c2440-i2c");
371 s3c_i2c2_setname("s3c2440-i2c");
373 s3c64xx_spi_setname("exynos4210-spi");
376 static void __init exynos4_init_clocks(int xtal)
378 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
380 s3c24xx_register_baseclocks(xtal);
381 s5p_register_clocks(xtal);
383 if (soc_is_exynos4210())
384 exynos4210_register_clocks();
385 else if (soc_is_exynos4212() || soc_is_exynos4412())
386 exynos4212_register_clocks();
388 exynos4_register_clocks();
389 exynos4_setup_clocks();
392 static void __init exynos5_init_clocks(int xtal)
394 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
396 s3c24xx_register_baseclocks(xtal);
397 s5p_register_clocks(xtal);
399 exynos5_register_clocks();
400 exynos5_setup_clocks();
403 #define COMBINER_ENABLE_SET 0x0
404 #define COMBINER_ENABLE_CLEAR 0x4
405 #define COMBINER_INT_STATUS 0xC
407 static DEFINE_SPINLOCK(irq_controller_lock);
409 struct combiner_chip_data {
410 unsigned int irq_offset;
411 unsigned int irq_mask;
412 void __iomem *base;
415 static struct irq_domain *combiner_irq_domain;
416 static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
418 static inline void __iomem *combiner_base(struct irq_data *data)
420 struct combiner_chip_data *combiner_data =
421 irq_data_get_irq_chip_data(data);
423 return combiner_data->base;
426 static void combiner_mask_irq(struct irq_data *data)
428 u32 mask = 1 << (data->hwirq % 32);
430 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
433 static void combiner_unmask_irq(struct irq_data *data)
435 u32 mask = 1 << (data->hwirq % 32);
437 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
440 static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
442 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
443 struct irq_chip *chip = irq_get_chip(irq);
444 unsigned int cascade_irq, combiner_irq;
445 unsigned long status;
447 chained_irq_enter(chip, desc);
449 spin_lock(&irq_controller_lock);
450 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
451 spin_unlock(&irq_controller_lock);
452 status &= chip_data->irq_mask;
454 if (status == 0)
455 goto out;
457 combiner_irq = __ffs(status);
459 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
460 if (unlikely(cascade_irq >= NR_IRQS))
461 do_bad_IRQ(cascade_irq, desc);
462 else
463 generic_handle_irq(cascade_irq);
465 out:
466 chained_irq_exit(chip, desc);
469 static struct irq_chip combiner_chip = {
470 .name = "COMBINER",
471 .irq_mask = combiner_mask_irq,
472 .irq_unmask = combiner_unmask_irq,
475 static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
477 unsigned int max_nr;
479 if (soc_is_exynos5250())
480 max_nr = EXYNOS5_MAX_COMBINER_NR;
481 else
482 max_nr = EXYNOS4_MAX_COMBINER_NR;
484 if (combiner_nr >= max_nr)
485 BUG();
486 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
487 BUG();
488 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
491 static void __init combiner_init_one(unsigned int combiner_nr,
492 void __iomem *base)
494 combiner_data[combiner_nr].base = base;
495 combiner_data[combiner_nr].irq_offset = irq_find_mapping(
496 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
497 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
499 /* Disable all interrupts */
500 __raw_writel(combiner_data[combiner_nr].irq_mask,
501 base + COMBINER_ENABLE_CLEAR);
504 #ifdef CONFIG_OF
505 static int combiner_irq_domain_xlate(struct irq_domain *d,
506 struct device_node *controller,
507 const u32 *intspec, unsigned int intsize,
508 unsigned long *out_hwirq,
509 unsigned int *out_type)
511 if (d->of_node != controller)
512 return -EINVAL;
514 if (intsize < 2)
515 return -EINVAL;
517 *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
518 *out_type = 0;
520 return 0;
522 #else
523 static int combiner_irq_domain_xlate(struct irq_domain *d,
524 struct device_node *controller,
525 const u32 *intspec, unsigned int intsize,
526 unsigned long *out_hwirq,
527 unsigned int *out_type)
529 return -EINVAL;
531 #endif
533 static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
534 irq_hw_number_t hw)
536 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
537 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
538 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
540 return 0;
543 static struct irq_domain_ops combiner_irq_domain_ops = {
544 .xlate = combiner_irq_domain_xlate,
545 .map = combiner_irq_domain_map,
548 static void __init combiner_init(void __iomem *combiner_base,
549 struct device_node *np)
551 int i, irq, irq_base;
552 unsigned int max_nr, nr_irq;
554 if (np) {
555 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
556 pr_warning("%s: number of combiners not specified, "
557 "setting default as %d.\n",
558 __func__, EXYNOS4_MAX_COMBINER_NR);
559 max_nr = EXYNOS4_MAX_COMBINER_NR;
561 } else {
562 max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
563 EXYNOS4_MAX_COMBINER_NR;
565 nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
567 irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
568 if (IS_ERR_VALUE(irq_base)) {
569 irq_base = COMBINER_IRQ(0, 0);
570 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
573 combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
574 &combiner_irq_domain_ops, &combiner_data);
575 if (WARN_ON(!combiner_irq_domain)) {
576 pr_warning("%s: irq domain init failed\n", __func__);
577 return;
580 for (i = 0; i < max_nr; i++) {
581 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
582 irq = IRQ_SPI(i);
583 #ifdef CONFIG_OF
584 if (np)
585 irq = irq_of_parse_and_map(np, i);
586 #endif
587 combiner_cascade_irq(i, irq);
591 #ifdef CONFIG_OF
592 int __init combiner_of_init(struct device_node *np, struct device_node *parent)
594 void __iomem *combiner_base;
596 combiner_base = of_iomap(np, 0);
597 if (!combiner_base) {
598 pr_err("%s: failed to map combiner registers\n", __func__);
599 return -ENXIO;
602 combiner_init(combiner_base, np);
604 return 0;
607 static const struct of_device_id exynos4_dt_irq_match[] = {
608 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
609 { .compatible = "samsung,exynos4210-combiner",
610 .data = combiner_of_init, },
613 #endif
615 void __init exynos4_init_irq(void)
617 unsigned int gic_bank_offset;
619 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
621 if (!of_have_populated_dt())
622 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
623 #ifdef CONFIG_OF
624 else
625 of_irq_init(exynos4_dt_irq_match);
626 #endif
628 if (!of_have_populated_dt())
629 combiner_init(S5P_VA_COMBINER_BASE, NULL);
632 * The parameters of s5p_init_irq() are for VIC init.
633 * Theses parameters should be NULL and 0 because EXYNOS4
634 * uses GIC instead of VIC.
636 s5p_init_irq(NULL, 0);
639 void __init exynos5_init_irq(void)
641 #ifdef CONFIG_OF
642 of_irq_init(exynos4_dt_irq_match);
643 #endif
645 * The parameters of s5p_init_irq() are for VIC init.
646 * Theses parameters should be NULL and 0 because EXYNOS4
647 * uses GIC instead of VIC.
649 s5p_init_irq(NULL, 0);
652 struct bus_type exynos_subsys = {
653 .name = "exynos-core",
654 .dev_name = "exynos-core",
657 static struct device exynos4_dev = {
658 .bus = &exynos_subsys,
661 static int __init exynos_core_init(void)
663 return subsys_system_register(&exynos_subsys, NULL);
665 core_initcall(exynos_core_init);
667 #ifdef CONFIG_CACHE_L2X0
668 static int __init exynos4_l2x0_cache_init(void)
670 int ret;
672 if (soc_is_exynos5250())
673 return 0;
675 ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
676 if (!ret) {
677 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
678 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
679 return 0;
682 if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
683 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
684 /* TAG, Data Latency Control: 2 cycles */
685 l2x0_saved_regs.tag_latency = 0x110;
687 if (soc_is_exynos4212() || soc_is_exynos4412())
688 l2x0_saved_regs.data_latency = 0x120;
689 else
690 l2x0_saved_regs.data_latency = 0x110;
692 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
693 l2x0_saved_regs.pwr_ctrl =
694 (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
696 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
698 __raw_writel(l2x0_saved_regs.tag_latency,
699 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
700 __raw_writel(l2x0_saved_regs.data_latency,
701 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
703 /* L2X0 Prefetch Control */
704 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
705 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
707 /* L2X0 Power Control */
708 __raw_writel(l2x0_saved_regs.pwr_ctrl,
709 S5P_VA_L2CC + L2X0_POWER_CTRL);
711 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
712 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
715 l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
716 return 0;
718 early_initcall(exynos4_l2x0_cache_init);
719 #endif
721 static int __init exynos_init(void)
723 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
725 return device_register(&exynos4_dev);
728 /* uart registration process */
730 static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
732 struct s3c2410_uartcfg *tcfg = cfg;
733 u32 ucnt;
735 for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
736 tcfg->has_fracval = 1;
738 if (soc_is_exynos5250())
739 s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
740 else
741 s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
744 static void __iomem *exynos_eint_base;
746 static DEFINE_SPINLOCK(eint_lock);
748 static unsigned int eint0_15_data[16];
750 static inline int exynos4_irq_to_gpio(unsigned int irq)
752 if (irq < IRQ_EINT(0))
753 return -EINVAL;
755 irq -= IRQ_EINT(0);
756 if (irq < 8)
757 return EXYNOS4_GPX0(irq);
759 irq -= 8;
760 if (irq < 8)
761 return EXYNOS4_GPX1(irq);
763 irq -= 8;
764 if (irq < 8)
765 return EXYNOS4_GPX2(irq);
767 irq -= 8;
768 if (irq < 8)
769 return EXYNOS4_GPX3(irq);
771 return -EINVAL;
774 static inline int exynos5_irq_to_gpio(unsigned int irq)
776 if (irq < IRQ_EINT(0))
777 return -EINVAL;
779 irq -= IRQ_EINT(0);
780 if (irq < 8)
781 return EXYNOS5_GPX0(irq);
783 irq -= 8;
784 if (irq < 8)
785 return EXYNOS5_GPX1(irq);
787 irq -= 8;
788 if (irq < 8)
789 return EXYNOS5_GPX2(irq);
791 irq -= 8;
792 if (irq < 8)
793 return EXYNOS5_GPX3(irq);
795 return -EINVAL;
798 static unsigned int exynos4_eint0_15_src_int[16] = {
799 EXYNOS4_IRQ_EINT0,
800 EXYNOS4_IRQ_EINT1,
801 EXYNOS4_IRQ_EINT2,
802 EXYNOS4_IRQ_EINT3,
803 EXYNOS4_IRQ_EINT4,
804 EXYNOS4_IRQ_EINT5,
805 EXYNOS4_IRQ_EINT6,
806 EXYNOS4_IRQ_EINT7,
807 EXYNOS4_IRQ_EINT8,
808 EXYNOS4_IRQ_EINT9,
809 EXYNOS4_IRQ_EINT10,
810 EXYNOS4_IRQ_EINT11,
811 EXYNOS4_IRQ_EINT12,
812 EXYNOS4_IRQ_EINT13,
813 EXYNOS4_IRQ_EINT14,
814 EXYNOS4_IRQ_EINT15,
817 static unsigned int exynos5_eint0_15_src_int[16] = {
818 EXYNOS5_IRQ_EINT0,
819 EXYNOS5_IRQ_EINT1,
820 EXYNOS5_IRQ_EINT2,
821 EXYNOS5_IRQ_EINT3,
822 EXYNOS5_IRQ_EINT4,
823 EXYNOS5_IRQ_EINT5,
824 EXYNOS5_IRQ_EINT6,
825 EXYNOS5_IRQ_EINT7,
826 EXYNOS5_IRQ_EINT8,
827 EXYNOS5_IRQ_EINT9,
828 EXYNOS5_IRQ_EINT10,
829 EXYNOS5_IRQ_EINT11,
830 EXYNOS5_IRQ_EINT12,
831 EXYNOS5_IRQ_EINT13,
832 EXYNOS5_IRQ_EINT14,
833 EXYNOS5_IRQ_EINT15,
835 static inline void exynos_irq_eint_mask(struct irq_data *data)
837 u32 mask;
839 spin_lock(&eint_lock);
840 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
841 mask |= EINT_OFFSET_BIT(data->irq);
842 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
843 spin_unlock(&eint_lock);
846 static void exynos_irq_eint_unmask(struct irq_data *data)
848 u32 mask;
850 spin_lock(&eint_lock);
851 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
852 mask &= ~(EINT_OFFSET_BIT(data->irq));
853 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
854 spin_unlock(&eint_lock);
857 static inline void exynos_irq_eint_ack(struct irq_data *data)
859 __raw_writel(EINT_OFFSET_BIT(data->irq),
860 EINT_PEND(exynos_eint_base, data->irq));
863 static void exynos_irq_eint_maskack(struct irq_data *data)
865 exynos_irq_eint_mask(data);
866 exynos_irq_eint_ack(data);
869 static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
871 int offs = EINT_OFFSET(data->irq);
872 int shift;
873 u32 ctrl, mask;
874 u32 newvalue = 0;
876 switch (type) {
877 case IRQ_TYPE_EDGE_RISING:
878 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
879 break;
881 case IRQ_TYPE_EDGE_FALLING:
882 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
883 break;
885 case IRQ_TYPE_EDGE_BOTH:
886 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
887 break;
889 case IRQ_TYPE_LEVEL_LOW:
890 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
891 break;
893 case IRQ_TYPE_LEVEL_HIGH:
894 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
895 break;
897 default:
898 printk(KERN_ERR "No such irq type %d", type);
899 return -EINVAL;
902 shift = (offs & 0x7) * 4;
903 mask = 0x7 << shift;
905 spin_lock(&eint_lock);
906 ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
907 ctrl &= ~mask;
908 ctrl |= newvalue << shift;
909 __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
910 spin_unlock(&eint_lock);
912 if (soc_is_exynos5250())
913 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
914 else
915 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
917 return 0;
920 static struct irq_chip exynos_irq_eint = {
921 .name = "exynos-eint",
922 .irq_mask = exynos_irq_eint_mask,
923 .irq_unmask = exynos_irq_eint_unmask,
924 .irq_mask_ack = exynos_irq_eint_maskack,
925 .irq_ack = exynos_irq_eint_ack,
926 .irq_set_type = exynos_irq_eint_set_type,
927 #ifdef CONFIG_PM
928 .irq_set_wake = s3c_irqext_wake,
929 #endif
933 * exynos4_irq_demux_eint
935 * This function demuxes the IRQ from from EINTs 16 to 31.
936 * It is designed to be inlined into the specific handler
937 * s5p_irq_demux_eintX_Y.
939 * Each EINT pend/mask registers handle eight of them.
941 static inline void exynos_irq_demux_eint(unsigned int start)
943 unsigned int irq;
945 u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
946 u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
948 status &= ~mask;
949 status &= 0xff;
951 while (status) {
952 irq = fls(status) - 1;
953 generic_handle_irq(irq + start);
954 status &= ~(1 << irq);
958 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
960 struct irq_chip *chip = irq_get_chip(irq);
961 chained_irq_enter(chip, desc);
962 exynos_irq_demux_eint(IRQ_EINT(16));
963 exynos_irq_demux_eint(IRQ_EINT(24));
964 chained_irq_exit(chip, desc);
967 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
969 u32 *irq_data = irq_get_handler_data(irq);
970 struct irq_chip *chip = irq_get_chip(irq);
972 chained_irq_enter(chip, desc);
973 chip->irq_mask(&desc->irq_data);
975 if (chip->irq_ack)
976 chip->irq_ack(&desc->irq_data);
978 generic_handle_irq(*irq_data);
980 chip->irq_unmask(&desc->irq_data);
981 chained_irq_exit(chip, desc);
984 static int __init exynos_init_irq_eint(void)
986 int irq;
988 if (soc_is_exynos5250())
989 exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
990 else
991 exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
993 if (exynos_eint_base == NULL) {
994 pr_err("unable to ioremap for EINT base address\n");
995 return -ENOMEM;
998 for (irq = 0 ; irq <= 31 ; irq++) {
999 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
1000 handle_level_irq);
1001 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
1004 irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
1006 for (irq = 0 ; irq <= 15 ; irq++) {
1007 eint0_15_data[irq] = IRQ_EINT(irq);
1009 if (soc_is_exynos5250()) {
1010 irq_set_handler_data(exynos5_eint0_15_src_int[irq],
1011 &eint0_15_data[irq]);
1012 irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
1013 exynos_irq_eint0_15);
1014 } else {
1015 irq_set_handler_data(exynos4_eint0_15_src_int[irq],
1016 &eint0_15_data[irq]);
1017 irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
1018 exynos_irq_eint0_15);
1022 return 0;
1024 arch_initcall(exynos_init_irq_eint);