2 * linux/arch/arm/plat-pxa/gpio.c
4 * Generic PXA GPIO handling
6 * Author: Nicolas Pitre
7 * Created: Jun 15, 2001
8 * Copyright: MontaVista Software Inc.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/gpio.h>
17 #include <linux/gpio-pxa.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
21 #include <linux/platform_device.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/slab.h>
25 #include <mach/irqs.h>
28 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
29 * one set of registers. The register offsets are organized below:
31 * GPLR GPDR GPSR GPCR GRER GFER GEDR
32 * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048
33 * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C
34 * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050
36 * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148
37 * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
38 * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
41 * BANK 3 is only available on PXA27x and later processors.
42 * BANK 4 and 5 are only available on PXA935
45 #define GPLR_OFFSET 0x00
46 #define GPDR_OFFSET 0x0C
47 #define GPSR_OFFSET 0x18
48 #define GPCR_OFFSET 0x24
49 #define GRER_OFFSET 0x30
50 #define GFER_OFFSET 0x3C
51 #define GEDR_OFFSET 0x48
52 #define GAFR_OFFSET 0x54
53 #define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
55 #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
59 struct pxa_gpio_chip
{
60 struct gpio_chip chip
;
61 void __iomem
*regbase
;
64 unsigned long irq_mask
;
65 unsigned long irq_edge_rise
;
66 unsigned long irq_edge_fall
;
67 int (*set_wake
)(unsigned int gpio
, unsigned int on
);
70 unsigned long saved_gplr
;
71 unsigned long saved_gpdr
;
72 unsigned long saved_grer
;
73 unsigned long saved_gfer
;
87 static DEFINE_SPINLOCK(gpio_lock
);
88 static struct pxa_gpio_chip
*pxa_gpio_chips
;
90 static void __iomem
*gpio_reg_base
;
92 #define for_each_gpio_chip(i, c) \
93 for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
95 static inline void __iomem
*gpio_chip_base(struct gpio_chip
*c
)
97 return container_of(c
, struct pxa_gpio_chip
, chip
)->regbase
;
100 static inline struct pxa_gpio_chip
*gpio_to_pxachip(unsigned gpio
)
102 return &pxa_gpio_chips
[gpio_to_bank(gpio
)];
105 static inline int gpio_is_pxa_type(int type
)
107 return (type
& MMP_GPIO
) == 0;
110 static inline int gpio_is_mmp_type(int type
)
112 return (type
& MMP_GPIO
) != 0;
115 /* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
116 * as well as their Alternate Function value being '1' for GPIO in GAFRx.
118 static inline int __gpio_is_inverted(int gpio
)
120 if ((gpio_type
== PXA26X_GPIO
) && (gpio
> 85))
126 * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
127 * function of a GPIO, and GPDRx cannot be altered once configured. It
128 * is attributed as "occupied" here (I know this terminology isn't
129 * accurate, you are welcome to propose a better one :-)
131 static inline int __gpio_is_occupied(unsigned gpio
)
133 struct pxa_gpio_chip
*pxachip
;
135 unsigned long gafr
= 0, gpdr
= 0;
136 int ret
, af
= 0, dir
= 0;
138 pxachip
= gpio_to_pxachip(gpio
);
139 base
= gpio_chip_base(&pxachip
->chip
);
140 gpdr
= readl_relaxed(base
+ GPDR_OFFSET
);
146 gafr
= readl_relaxed(base
+ GAFR_OFFSET
);
147 af
= (gafr
>> ((gpio
& 0xf) * 2)) & 0x3;
148 dir
= gpdr
& GPIO_bit(gpio
);
150 if (__gpio_is_inverted(gpio
))
151 ret
= (af
!= 1) || (dir
== 0);
153 ret
= (af
!= 0) || (dir
!= 0);
156 ret
= gpdr
& GPIO_bit(gpio
);
162 #ifdef CONFIG_ARCH_PXA
163 static inline int __pxa_gpio_to_irq(int gpio
)
165 if (gpio_is_pxa_type(gpio_type
))
166 return PXA_GPIO_TO_IRQ(gpio
);
170 static inline int __pxa_irq_to_gpio(int irq
)
172 if (gpio_is_pxa_type(gpio_type
))
173 return irq
- PXA_GPIO_TO_IRQ(0);
177 static inline int __pxa_gpio_to_irq(int gpio
) { return -1; }
178 static inline int __pxa_irq_to_gpio(int irq
) { return -1; }
181 #ifdef CONFIG_ARCH_MMP
182 static inline int __mmp_gpio_to_irq(int gpio
)
184 if (gpio_is_mmp_type(gpio_type
))
185 return MMP_GPIO_TO_IRQ(gpio
);
189 static inline int __mmp_irq_to_gpio(int irq
)
191 if (gpio_is_mmp_type(gpio_type
))
192 return irq
- MMP_GPIO_TO_IRQ(0);
196 static inline int __mmp_gpio_to_irq(int gpio
) { return -1; }
197 static inline int __mmp_irq_to_gpio(int irq
) { return -1; }
200 static int pxa_gpio_to_irq(struct gpio_chip
*chip
, unsigned offset
)
204 gpio
= chip
->base
+ offset
;
205 ret
= __pxa_gpio_to_irq(gpio
);
208 return __mmp_gpio_to_irq(gpio
);
211 int pxa_irq_to_gpio(int irq
)
215 ret
= __pxa_irq_to_gpio(irq
);
218 return __mmp_irq_to_gpio(irq
);
221 static int pxa_gpio_direction_input(struct gpio_chip
*chip
, unsigned offset
)
223 void __iomem
*base
= gpio_chip_base(chip
);
224 uint32_t value
, mask
= 1 << offset
;
227 spin_lock_irqsave(&gpio_lock
, flags
);
229 value
= readl_relaxed(base
+ GPDR_OFFSET
);
230 if (__gpio_is_inverted(chip
->base
+ offset
))
234 writel_relaxed(value
, base
+ GPDR_OFFSET
);
236 spin_unlock_irqrestore(&gpio_lock
, flags
);
240 static int pxa_gpio_direction_output(struct gpio_chip
*chip
,
241 unsigned offset
, int value
)
243 void __iomem
*base
= gpio_chip_base(chip
);
244 uint32_t tmp
, mask
= 1 << offset
;
247 writel_relaxed(mask
, base
+ (value
? GPSR_OFFSET
: GPCR_OFFSET
));
249 spin_lock_irqsave(&gpio_lock
, flags
);
251 tmp
= readl_relaxed(base
+ GPDR_OFFSET
);
252 if (__gpio_is_inverted(chip
->base
+ offset
))
256 writel_relaxed(tmp
, base
+ GPDR_OFFSET
);
258 spin_unlock_irqrestore(&gpio_lock
, flags
);
262 static int pxa_gpio_get(struct gpio_chip
*chip
, unsigned offset
)
264 return readl_relaxed(gpio_chip_base(chip
) + GPLR_OFFSET
) & (1 << offset
);
267 static void pxa_gpio_set(struct gpio_chip
*chip
, unsigned offset
, int value
)
269 writel_relaxed(1 << offset
, gpio_chip_base(chip
) +
270 (value
? GPSR_OFFSET
: GPCR_OFFSET
));
273 static int __devinit
pxa_init_gpio_chip(int gpio_end
,
274 int (*set_wake
)(unsigned int, unsigned int))
276 int i
, gpio
, nbanks
= gpio_to_bank(gpio_end
) + 1;
277 struct pxa_gpio_chip
*chips
;
279 chips
= kzalloc(nbanks
* sizeof(struct pxa_gpio_chip
), GFP_KERNEL
);
281 pr_err("%s: failed to allocate GPIO chips\n", __func__
);
285 for (i
= 0, gpio
= 0; i
< nbanks
; i
++, gpio
+= 32) {
286 struct gpio_chip
*c
= &chips
[i
].chip
;
288 sprintf(chips
[i
].label
, "gpio-%d", i
);
289 chips
[i
].regbase
= gpio_reg_base
+ BANK_OFF(i
);
290 chips
[i
].set_wake
= set_wake
;
293 c
->label
= chips
[i
].label
;
295 c
->direction_input
= pxa_gpio_direction_input
;
296 c
->direction_output
= pxa_gpio_direction_output
;
297 c
->get
= pxa_gpio_get
;
298 c
->set
= pxa_gpio_set
;
299 c
->to_irq
= pxa_gpio_to_irq
;
301 /* number of GPIOs on last bank may be less than 32 */
302 c
->ngpio
= (gpio
+ 31 > gpio_end
) ? (gpio_end
- gpio
+ 1) : 32;
305 pxa_gpio_chips
= chips
;
309 /* Update only those GRERx and GFERx edge detection register bits if those
310 * bits are set in c->irq_mask
312 static inline void update_edge_detect(struct pxa_gpio_chip
*c
)
316 grer
= readl_relaxed(c
->regbase
+ GRER_OFFSET
) & ~c
->irq_mask
;
317 gfer
= readl_relaxed(c
->regbase
+ GFER_OFFSET
) & ~c
->irq_mask
;
318 grer
|= c
->irq_edge_rise
& c
->irq_mask
;
319 gfer
|= c
->irq_edge_fall
& c
->irq_mask
;
320 writel_relaxed(grer
, c
->regbase
+ GRER_OFFSET
);
321 writel_relaxed(gfer
, c
->regbase
+ GFER_OFFSET
);
324 static int pxa_gpio_irq_type(struct irq_data
*d
, unsigned int type
)
326 struct pxa_gpio_chip
*c
;
327 int gpio
= pxa_irq_to_gpio(d
->irq
);
328 unsigned long gpdr
, mask
= GPIO_bit(gpio
);
330 c
= gpio_to_pxachip(gpio
);
332 if (type
== IRQ_TYPE_PROBE
) {
333 /* Don't mess with enabled GPIOs using preconfigured edges or
334 * GPIOs set to alternate function or to output during probe
336 if ((c
->irq_edge_rise
| c
->irq_edge_fall
) & GPIO_bit(gpio
))
339 if (__gpio_is_occupied(gpio
))
342 type
= IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_EDGE_FALLING
;
345 gpdr
= readl_relaxed(c
->regbase
+ GPDR_OFFSET
);
347 if (__gpio_is_inverted(gpio
))
348 writel_relaxed(gpdr
| mask
, c
->regbase
+ GPDR_OFFSET
);
350 writel_relaxed(gpdr
& ~mask
, c
->regbase
+ GPDR_OFFSET
);
352 if (type
& IRQ_TYPE_EDGE_RISING
)
353 c
->irq_edge_rise
|= mask
;
355 c
->irq_edge_rise
&= ~mask
;
357 if (type
& IRQ_TYPE_EDGE_FALLING
)
358 c
->irq_edge_fall
|= mask
;
360 c
->irq_edge_fall
&= ~mask
;
362 update_edge_detect(c
);
364 pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__
, d
->irq
, gpio
,
365 ((type
& IRQ_TYPE_EDGE_RISING
) ? " rising" : ""),
366 ((type
& IRQ_TYPE_EDGE_FALLING
) ? " falling" : ""));
370 static void pxa_gpio_demux_handler(unsigned int irq
, struct irq_desc
*desc
)
372 struct pxa_gpio_chip
*c
;
373 int loop
, gpio
, gpio_base
, n
;
378 for_each_gpio_chip(gpio
, c
) {
379 gpio_base
= c
->chip
.base
;
381 gedr
= readl_relaxed(c
->regbase
+ GEDR_OFFSET
);
382 gedr
= gedr
& c
->irq_mask
;
383 writel_relaxed(gedr
, c
->regbase
+ GEDR_OFFSET
);
385 n
= find_first_bit(&gedr
, BITS_PER_LONG
);
386 while (n
< BITS_PER_LONG
) {
389 generic_handle_irq(gpio_to_irq(gpio_base
+ n
));
390 n
= find_next_bit(&gedr
, BITS_PER_LONG
, n
+ 1);
396 static void pxa_ack_muxed_gpio(struct irq_data
*d
)
398 int gpio
= pxa_irq_to_gpio(d
->irq
);
399 struct pxa_gpio_chip
*c
= gpio_to_pxachip(gpio
);
401 writel_relaxed(GPIO_bit(gpio
), c
->regbase
+ GEDR_OFFSET
);
404 static void pxa_mask_muxed_gpio(struct irq_data
*d
)
406 int gpio
= pxa_irq_to_gpio(d
->irq
);
407 struct pxa_gpio_chip
*c
= gpio_to_pxachip(gpio
);
410 c
->irq_mask
&= ~GPIO_bit(gpio
);
412 grer
= readl_relaxed(c
->regbase
+ GRER_OFFSET
) & ~GPIO_bit(gpio
);
413 gfer
= readl_relaxed(c
->regbase
+ GFER_OFFSET
) & ~GPIO_bit(gpio
);
414 writel_relaxed(grer
, c
->regbase
+ GRER_OFFSET
);
415 writel_relaxed(gfer
, c
->regbase
+ GFER_OFFSET
);
418 static int pxa_gpio_set_wake(struct irq_data
*d
, unsigned int on
)
420 int gpio
= pxa_irq_to_gpio(d
->irq
);
421 struct pxa_gpio_chip
*c
= gpio_to_pxachip(gpio
);
424 return c
->set_wake(gpio
, on
);
429 static void pxa_unmask_muxed_gpio(struct irq_data
*d
)
431 int gpio
= pxa_irq_to_gpio(d
->irq
);
432 struct pxa_gpio_chip
*c
= gpio_to_pxachip(gpio
);
434 c
->irq_mask
|= GPIO_bit(gpio
);
435 update_edge_detect(c
);
438 static struct irq_chip pxa_muxed_gpio_chip
= {
440 .irq_ack
= pxa_ack_muxed_gpio
,
441 .irq_mask
= pxa_mask_muxed_gpio
,
442 .irq_unmask
= pxa_unmask_muxed_gpio
,
443 .irq_set_type
= pxa_gpio_irq_type
,
444 .irq_set_wake
= pxa_gpio_set_wake
,
447 static int pxa_gpio_nums(void)
451 #ifdef CONFIG_ARCH_PXA
452 if (cpu_is_pxa25x()) {
453 #ifdef CONFIG_CPU_PXA26x
455 gpio_type
= PXA26X_GPIO
;
456 #elif defined(CONFIG_PXA25x)
458 gpio_type
= PXA26X_GPIO
;
459 #endif /* CONFIG_CPU_PXA26x */
460 } else if (cpu_is_pxa27x()) {
462 gpio_type
= PXA27X_GPIO
;
463 } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
465 gpio_type
= PXA93X_GPIO
;
466 } else if (cpu_is_pxa3xx()) {
468 gpio_type
= PXA3XX_GPIO
;
470 #endif /* CONFIG_ARCH_PXA */
472 #ifdef CONFIG_ARCH_MMP
473 if (cpu_is_pxa168() || cpu_is_pxa910()) {
475 gpio_type
= MMP_GPIO
;
476 } else if (cpu_is_mmp2()) {
478 gpio_type
= MMP2_GPIO
;
480 #endif /* CONFIG_ARCH_MMP */
484 static int __devinit
pxa_gpio_probe(struct platform_device
*pdev
)
486 struct pxa_gpio_chip
*c
;
487 struct resource
*res
;
489 struct pxa_gpio_platform_data
*info
;
491 int irq0
= 0, irq1
= 0, irq_mux
, gpio_offset
= 0;
493 pxa_last_gpio
= pxa_gpio_nums();
497 irq0
= platform_get_irq_byname(pdev
, "gpio0");
498 irq1
= platform_get_irq_byname(pdev
, "gpio1");
499 irq_mux
= platform_get_irq_byname(pdev
, "gpio_mux");
500 if ((irq0
> 0 && irq1
<= 0) || (irq0
<= 0 && irq1
> 0)
503 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
506 gpio_reg_base
= ioremap(res
->start
, resource_size(res
));
513 clk
= clk_get(&pdev
->dev
, NULL
);
515 dev_err(&pdev
->dev
, "Error %ld to get gpio clock\n",
517 iounmap(gpio_reg_base
);
520 ret
= clk_prepare(clk
);
523 iounmap(gpio_reg_base
);
526 ret
= clk_enable(clk
);
530 iounmap(gpio_reg_base
);
534 /* Initialize GPIO chips */
535 info
= dev_get_platdata(&pdev
->dev
);
536 pxa_init_gpio_chip(pxa_last_gpio
, info
? info
->gpio_set_wake
: NULL
);
538 /* clear all GPIO edge detects */
539 for_each_gpio_chip(gpio
, c
) {
540 writel_relaxed(0, c
->regbase
+ GFER_OFFSET
);
541 writel_relaxed(0, c
->regbase
+ GRER_OFFSET
);
542 writel_relaxed(~0,c
->regbase
+ GEDR_OFFSET
);
543 /* unmask GPIO edge detect for AP side */
544 if (gpio_is_mmp_type(gpio_type
))
545 writel_relaxed(~0, c
->regbase
+ ED_MASK_OFFSET
);
548 #ifdef CONFIG_ARCH_PXA
549 irq
= gpio_to_irq(0);
550 irq_set_chip_and_handler(irq
, &pxa_muxed_gpio_chip
,
552 set_irq_flags(irq
, IRQF_VALID
| IRQF_PROBE
);
553 irq_set_chained_handler(IRQ_GPIO0
, pxa_gpio_demux_handler
);
555 irq
= gpio_to_irq(1);
556 irq_set_chip_and_handler(irq
, &pxa_muxed_gpio_chip
,
558 set_irq_flags(irq
, IRQF_VALID
| IRQF_PROBE
);
559 irq_set_chained_handler(IRQ_GPIO1
, pxa_gpio_demux_handler
);
562 for (irq
= gpio_to_irq(gpio_offset
);
563 irq
<= gpio_to_irq(pxa_last_gpio
); irq
++) {
564 irq_set_chip_and_handler(irq
, &pxa_muxed_gpio_chip
,
566 set_irq_flags(irq
, IRQF_VALID
| IRQF_PROBE
);
569 irq_set_chained_handler(irq_mux
, pxa_gpio_demux_handler
);
573 static struct platform_driver pxa_gpio_driver
= {
574 .probe
= pxa_gpio_probe
,
580 static int __init
pxa_gpio_init(void)
582 return platform_driver_register(&pxa_gpio_driver
);
584 postcore_initcall(pxa_gpio_init
);
587 static int pxa_gpio_suspend(void)
589 struct pxa_gpio_chip
*c
;
592 for_each_gpio_chip(gpio
, c
) {
593 c
->saved_gplr
= readl_relaxed(c
->regbase
+ GPLR_OFFSET
);
594 c
->saved_gpdr
= readl_relaxed(c
->regbase
+ GPDR_OFFSET
);
595 c
->saved_grer
= readl_relaxed(c
->regbase
+ GRER_OFFSET
);
596 c
->saved_gfer
= readl_relaxed(c
->regbase
+ GFER_OFFSET
);
598 /* Clear GPIO transition detect bits */
599 writel_relaxed(0xffffffff, c
->regbase
+ GEDR_OFFSET
);
604 static void pxa_gpio_resume(void)
606 struct pxa_gpio_chip
*c
;
609 for_each_gpio_chip(gpio
, c
) {
610 /* restore level with set/clear */
611 writel_relaxed( c
->saved_gplr
, c
->regbase
+ GPSR_OFFSET
);
612 writel_relaxed(~c
->saved_gplr
, c
->regbase
+ GPCR_OFFSET
);
614 writel_relaxed(c
->saved_grer
, c
->regbase
+ GRER_OFFSET
);
615 writel_relaxed(c
->saved_gfer
, c
->regbase
+ GFER_OFFSET
);
616 writel_relaxed(c
->saved_gpdr
, c
->regbase
+ GPDR_OFFSET
);
620 #define pxa_gpio_suspend NULL
621 #define pxa_gpio_resume NULL
624 struct syscore_ops pxa_gpio_syscore_ops
= {
625 .suspend
= pxa_gpio_suspend
,
626 .resume
= pxa_gpio_resume
,
629 static int __init
pxa_gpio_sysinit(void)
631 register_syscore_ops(&pxa_gpio_syscore_ops
);
634 postcore_initcall(pxa_gpio_sysinit
);