1 // SPDX-License-Identifier: GPL-2.0-only
3 * Support functions for OMAP GPIO
5 * Copyright (C) 2003-2005 Nokia Corporation
6 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
8 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/device.h>
22 #include <linux/pm_runtime.h>
25 #include <linux/gpio/driver.h>
26 #include <linux/bitops.h>
27 #include <linux/platform_data/gpio-omap.h>
29 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
49 const struct omap_gpio_reg_offs
*regs
;
54 u32 enabled_non_wakeup_gpios
;
55 struct gpio_regs context
;
60 raw_spinlock_t wa_lock
;
61 struct gpio_chip chip
;
63 struct notifier_block nb
;
64 unsigned int is_suspended
:1;
65 unsigned int needs_resume
:1;
76 int context_loss_count
;
78 void (*set_dataout
)(struct gpio_bank
*bank
, unsigned gpio
, int enable
);
79 int (*get_context_loss_count
)(struct device
*dev
);
82 #define GPIO_MOD_CTRL_BIT BIT(0)
84 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
85 #define LINE_USED(line, offset) (line & (BIT(offset)))
87 static void omap_gpio_unmask_irq(struct irq_data
*d
);
89 static inline struct gpio_bank
*omap_irq_data_get_bank(struct irq_data
*d
)
91 struct gpio_chip
*chip
= irq_data_get_irq_chip_data(d
);
92 return gpiochip_get_data(chip
);
95 static inline u32
omap_gpio_rmw(void __iomem
*reg
, u32 mask
, bool set
)
97 u32 val
= readl_relaxed(reg
);
104 writel_relaxed(val
, reg
);
109 static void omap_set_gpio_direction(struct gpio_bank
*bank
, int gpio
,
112 bank
->context
.oe
= omap_gpio_rmw(bank
->base
+ bank
->regs
->direction
,
113 BIT(gpio
), is_input
);
117 /* set data out value using dedicate set/clear register */
118 static void omap_set_gpio_dataout_reg(struct gpio_bank
*bank
, unsigned offset
,
121 void __iomem
*reg
= bank
->base
;
125 reg
+= bank
->regs
->set_dataout
;
126 bank
->context
.dataout
|= l
;
128 reg
+= bank
->regs
->clr_dataout
;
129 bank
->context
.dataout
&= ~l
;
132 writel_relaxed(l
, reg
);
135 /* set data out value using mask register */
136 static void omap_set_gpio_dataout_mask(struct gpio_bank
*bank
, unsigned offset
,
139 bank
->context
.dataout
= omap_gpio_rmw(bank
->base
+ bank
->regs
->dataout
,
140 BIT(offset
), enable
);
143 static inline void omap_gpio_dbck_enable(struct gpio_bank
*bank
)
145 if (bank
->dbck_enable_mask
&& !bank
->dbck_enabled
) {
146 clk_enable(bank
->dbck
);
147 bank
->dbck_enabled
= true;
149 writel_relaxed(bank
->dbck_enable_mask
,
150 bank
->base
+ bank
->regs
->debounce_en
);
154 static inline void omap_gpio_dbck_disable(struct gpio_bank
*bank
)
156 if (bank
->dbck_enable_mask
&& bank
->dbck_enabled
) {
158 * Disable debounce before cutting it's clock. If debounce is
159 * enabled but the clock is not, GPIO module seems to be unable
160 * to detect events and generate interrupts at least on OMAP3.
162 writel_relaxed(0, bank
->base
+ bank
->regs
->debounce_en
);
164 clk_disable(bank
->dbck
);
165 bank
->dbck_enabled
= false;
170 * omap2_set_gpio_debounce - low level gpio debounce time
171 * @bank: the gpio bank we're acting upon
172 * @offset: the gpio number on this @bank
173 * @debounce: debounce time to use
175 * OMAP's debounce time is in 31us steps
176 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
177 * so we need to convert and round up to the closest unit.
179 * Return: 0 on success, negative error otherwise.
181 static int omap2_set_gpio_debounce(struct gpio_bank
*bank
, unsigned offset
,
186 bool enable
= !!debounce
;
188 if (!bank
->dbck_flag
)
192 debounce
= DIV_ROUND_UP(debounce
, 31) - 1;
193 if ((debounce
& OMAP4_GPIO_DEBOUNCINGTIME_MASK
) != debounce
)
199 clk_enable(bank
->dbck
);
200 writel_relaxed(debounce
, bank
->base
+ bank
->regs
->debounce
);
202 val
= omap_gpio_rmw(bank
->base
+ bank
->regs
->debounce_en
, l
, enable
);
203 bank
->dbck_enable_mask
= val
;
205 clk_disable(bank
->dbck
);
207 * Enable debounce clock per module.
208 * This call is mandatory because in omap_gpio_request() when
209 * *_runtime_get_sync() is called, _gpio_dbck_enable() within
210 * runtime callbck fails to turn on dbck because dbck_enable_mask
211 * used within _gpio_dbck_enable() is still not initialized at
212 * that point. Therefore we have to enable dbck here.
214 omap_gpio_dbck_enable(bank
);
215 if (bank
->dbck_enable_mask
) {
216 bank
->context
.debounce
= debounce
;
217 bank
->context
.debounce_en
= val
;
224 * omap_clear_gpio_debounce - clear debounce settings for a gpio
225 * @bank: the gpio bank we're acting upon
226 * @offset: the gpio number on this @bank
228 * If a gpio is using debounce, then clear the debounce enable bit and if
229 * this is the only gpio in this bank using debounce, then clear the debounce
230 * time too. The debounce clock will also be disabled when calling this function
231 * if this is the only gpio in the bank using debounce.
233 static void omap_clear_gpio_debounce(struct gpio_bank
*bank
, unsigned offset
)
235 u32 gpio_bit
= BIT(offset
);
237 if (!bank
->dbck_flag
)
240 if (!(bank
->dbck_enable_mask
& gpio_bit
))
243 bank
->dbck_enable_mask
&= ~gpio_bit
;
244 bank
->context
.debounce_en
&= ~gpio_bit
;
245 writel_relaxed(bank
->context
.debounce_en
,
246 bank
->base
+ bank
->regs
->debounce_en
);
248 if (!bank
->dbck_enable_mask
) {
249 bank
->context
.debounce
= 0;
250 writel_relaxed(bank
->context
.debounce
, bank
->base
+
251 bank
->regs
->debounce
);
252 clk_disable(bank
->dbck
);
253 bank
->dbck_enabled
= false;
258 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
259 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
260 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
261 * are capable waking up the system from off mode.
263 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank
*bank
, u32 gpio_mask
)
265 u32 no_wake
= bank
->non_wakeup_gpios
;
268 return !!(~no_wake
& gpio_mask
);
273 static inline void omap_set_gpio_trigger(struct gpio_bank
*bank
, int gpio
,
276 void __iomem
*base
= bank
->base
;
277 u32 gpio_bit
= BIT(gpio
);
279 omap_gpio_rmw(base
+ bank
->regs
->leveldetect0
, gpio_bit
,
280 trigger
& IRQ_TYPE_LEVEL_LOW
);
281 omap_gpio_rmw(base
+ bank
->regs
->leveldetect1
, gpio_bit
,
282 trigger
& IRQ_TYPE_LEVEL_HIGH
);
285 * We need the edge detection enabled for to allow the GPIO block
286 * to be woken from idle state. Set the appropriate edge detection
287 * in addition to the level detection.
289 omap_gpio_rmw(base
+ bank
->regs
->risingdetect
, gpio_bit
,
290 trigger
& (IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_LEVEL_HIGH
));
291 omap_gpio_rmw(base
+ bank
->regs
->fallingdetect
, gpio_bit
,
292 trigger
& (IRQ_TYPE_EDGE_FALLING
| IRQ_TYPE_LEVEL_LOW
));
294 bank
->context
.leveldetect0
=
295 readl_relaxed(bank
->base
+ bank
->regs
->leveldetect0
);
296 bank
->context
.leveldetect1
=
297 readl_relaxed(bank
->base
+ bank
->regs
->leveldetect1
);
298 bank
->context
.risingdetect
=
299 readl_relaxed(bank
->base
+ bank
->regs
->risingdetect
);
300 bank
->context
.fallingdetect
=
301 readl_relaxed(bank
->base
+ bank
->regs
->fallingdetect
);
303 bank
->level_mask
= bank
->context
.leveldetect0
|
304 bank
->context
.leveldetect1
;
306 /* This part needs to be executed always for OMAP{34xx, 44xx} */
307 if (!bank
->regs
->irqctrl
&& !omap_gpio_is_off_wakeup_capable(bank
, gpio
)) {
309 * Log the edge gpio and manually trigger the IRQ
310 * after resume if the input level changes
311 * to avoid irq lost during PER RET/OFF mode
312 * Applies for omap2 non-wakeup gpio and all omap3 gpios
314 if (trigger
& IRQ_TYPE_EDGE_BOTH
)
315 bank
->enabled_non_wakeup_gpios
|= gpio_bit
;
317 bank
->enabled_non_wakeup_gpios
&= ~gpio_bit
;
322 * This only applies to chips that can't do both rising and falling edge
323 * detection at once. For all other chips, this function is a noop.
325 static void omap_toggle_gpio_edge_triggering(struct gpio_bank
*bank
, int gpio
)
327 if (IS_ENABLED(CONFIG_ARCH_OMAP1
) && bank
->regs
->irqctrl
) {
328 void __iomem
*reg
= bank
->base
+ bank
->regs
->irqctrl
;
330 writel_relaxed(readl_relaxed(reg
) ^ BIT(gpio
), reg
);
334 static int omap_set_gpio_triggering(struct gpio_bank
*bank
, int gpio
,
337 void __iomem
*reg
= bank
->base
;
340 if (bank
->regs
->leveldetect0
&& bank
->regs
->wkup_en
) {
341 omap_set_gpio_trigger(bank
, gpio
, trigger
);
342 } else if (bank
->regs
->irqctrl
) {
343 reg
+= bank
->regs
->irqctrl
;
345 l
= readl_relaxed(reg
);
346 if ((trigger
& IRQ_TYPE_SENSE_MASK
) == IRQ_TYPE_EDGE_BOTH
)
347 bank
->toggle_mask
|= BIT(gpio
);
348 if (trigger
& IRQ_TYPE_EDGE_RISING
)
350 else if (trigger
& IRQ_TYPE_EDGE_FALLING
)
355 writel_relaxed(l
, reg
);
356 } else if (bank
->regs
->edgectrl1
) {
358 reg
+= bank
->regs
->edgectrl2
;
360 reg
+= bank
->regs
->edgectrl1
;
363 l
= readl_relaxed(reg
);
364 l
&= ~(3 << (gpio
<< 1));
365 if (trigger
& IRQ_TYPE_EDGE_RISING
)
366 l
|= 2 << (gpio
<< 1);
367 if (trigger
& IRQ_TYPE_EDGE_FALLING
)
369 writel_relaxed(l
, reg
);
374 static void omap_enable_gpio_module(struct gpio_bank
*bank
, unsigned offset
)
376 if (bank
->regs
->pinctrl
) {
377 void __iomem
*reg
= bank
->base
+ bank
->regs
->pinctrl
;
379 /* Claim the pin for MPU */
380 writel_relaxed(readl_relaxed(reg
) | (BIT(offset
)), reg
);
383 if (bank
->regs
->ctrl
&& !BANK_USED(bank
)) {
384 void __iomem
*reg
= bank
->base
+ bank
->regs
->ctrl
;
387 ctrl
= readl_relaxed(reg
);
388 /* Module is enabled, clocks are not gated */
389 ctrl
&= ~GPIO_MOD_CTRL_BIT
;
390 writel_relaxed(ctrl
, reg
);
391 bank
->context
.ctrl
= ctrl
;
395 static void omap_disable_gpio_module(struct gpio_bank
*bank
, unsigned offset
)
397 if (bank
->regs
->ctrl
&& !BANK_USED(bank
)) {
398 void __iomem
*reg
= bank
->base
+ bank
->regs
->ctrl
;
401 ctrl
= readl_relaxed(reg
);
402 /* Module is disabled, clocks are gated */
403 ctrl
|= GPIO_MOD_CTRL_BIT
;
404 writel_relaxed(ctrl
, reg
);
405 bank
->context
.ctrl
= ctrl
;
409 static int omap_gpio_is_input(struct gpio_bank
*bank
, unsigned offset
)
411 void __iomem
*reg
= bank
->base
+ bank
->regs
->direction
;
413 return readl_relaxed(reg
) & BIT(offset
);
416 static void omap_gpio_init_irq(struct gpio_bank
*bank
, unsigned offset
)
418 if (!LINE_USED(bank
->mod_usage
, offset
)) {
419 omap_enable_gpio_module(bank
, offset
);
420 omap_set_gpio_direction(bank
, offset
, 1);
422 bank
->irq_usage
|= BIT(offset
);
425 static int omap_gpio_irq_type(struct irq_data
*d
, unsigned type
)
427 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
430 unsigned offset
= d
->hwirq
;
432 if (type
& ~IRQ_TYPE_SENSE_MASK
)
435 if (!bank
->regs
->leveldetect0
&&
436 (type
& (IRQ_TYPE_LEVEL_LOW
|IRQ_TYPE_LEVEL_HIGH
)))
439 raw_spin_lock_irqsave(&bank
->lock
, flags
);
440 retval
= omap_set_gpio_triggering(bank
, offset
, type
);
442 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
445 omap_gpio_init_irq(bank
, offset
);
446 if (!omap_gpio_is_input(bank
, offset
)) {
447 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
451 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
453 if (type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_LEVEL_HIGH
))
454 irq_set_handler_locked(d
, handle_level_irq
);
455 else if (type
& (IRQ_TYPE_EDGE_FALLING
| IRQ_TYPE_EDGE_RISING
))
457 * Edge IRQs are already cleared/acked in irq_handler and
458 * not need to be masked, as result handle_edge_irq()
459 * logic is excessed here and may cause lose of interrupts.
460 * So just use handle_simple_irq.
462 irq_set_handler_locked(d
, handle_simple_irq
);
470 static void omap_clear_gpio_irqbank(struct gpio_bank
*bank
, int gpio_mask
)
472 void __iomem
*reg
= bank
->base
;
474 reg
+= bank
->regs
->irqstatus
;
475 writel_relaxed(gpio_mask
, reg
);
477 /* Workaround for clearing DSP GPIO interrupts to allow retention */
478 if (bank
->regs
->irqstatus2
) {
479 reg
= bank
->base
+ bank
->regs
->irqstatus2
;
480 writel_relaxed(gpio_mask
, reg
);
483 /* Flush posted write for the irq status to avoid spurious interrupts */
487 static inline void omap_clear_gpio_irqstatus(struct gpio_bank
*bank
,
490 omap_clear_gpio_irqbank(bank
, BIT(offset
));
493 static u32
omap_get_gpio_irqbank_mask(struct gpio_bank
*bank
)
495 void __iomem
*reg
= bank
->base
;
497 u32 mask
= (BIT(bank
->width
)) - 1;
499 reg
+= bank
->regs
->irqenable
;
500 l
= readl_relaxed(reg
);
501 if (bank
->regs
->irqenable_inv
)
507 static inline void omap_set_gpio_irqenable(struct gpio_bank
*bank
,
508 unsigned offset
, int enable
)
510 void __iomem
*reg
= bank
->base
;
511 u32 gpio_mask
= BIT(offset
);
513 if (bank
->regs
->set_irqenable
&& bank
->regs
->clr_irqenable
) {
515 reg
+= bank
->regs
->set_irqenable
;
516 bank
->context
.irqenable1
|= gpio_mask
;
518 reg
+= bank
->regs
->clr_irqenable
;
519 bank
->context
.irqenable1
&= ~gpio_mask
;
521 writel_relaxed(gpio_mask
, reg
);
523 bank
->context
.irqenable1
=
524 omap_gpio_rmw(reg
+ bank
->regs
->irqenable
, gpio_mask
,
525 enable
^ bank
->regs
->irqenable_inv
);
529 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM
530 * note requiring correlation between the IRQ enable registers and
531 * the wakeup registers. In any case, we want wakeup from idle
532 * enabled for the GPIOs which support this feature.
534 if (bank
->regs
->wkup_en
&&
535 (bank
->regs
->edgectrl1
|| !(bank
->non_wakeup_gpios
& gpio_mask
))) {
536 bank
->context
.wake_en
=
537 omap_gpio_rmw(bank
->base
+ bank
->regs
->wkup_en
,
542 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
543 static int omap_gpio_wake_enable(struct irq_data
*d
, unsigned int enable
)
545 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
547 return irq_set_irq_wake(bank
->irq
, enable
);
551 * We need to unmask the GPIO bank interrupt as soon as possible to
552 * avoid missing GPIO interrupts for other lines in the bank.
553 * Then we need to mask-read-clear-unmask the triggered GPIO lines
554 * in the bank to avoid missing nested interrupts for a GPIO line.
555 * If we wait to unmask individual GPIO lines in the bank after the
556 * line's interrupt handler has been run, we may miss some nested
559 static irqreturn_t
omap_gpio_irq_handler(int irq
, void *gpiobank
)
561 void __iomem
*isr_reg
= NULL
;
562 u32 enabled
, isr
, edge
;
564 struct gpio_bank
*bank
= gpiobank
;
565 unsigned long wa_lock_flags
;
566 unsigned long lock_flags
;
568 isr_reg
= bank
->base
+ bank
->regs
->irqstatus
;
569 if (WARN_ON(!isr_reg
))
572 if (WARN_ONCE(!pm_runtime_active(bank
->chip
.parent
),
573 "gpio irq%i while runtime suspended?\n", irq
))
577 raw_spin_lock_irqsave(&bank
->lock
, lock_flags
);
579 enabled
= omap_get_gpio_irqbank_mask(bank
);
580 isr
= readl_relaxed(isr_reg
) & enabled
;
583 * Clear edge sensitive interrupts before calling handler(s)
584 * so subsequent edge transitions are not missed while the
585 * handlers are running.
587 edge
= isr
& ~bank
->level_mask
;
589 omap_clear_gpio_irqbank(bank
, edge
);
591 raw_spin_unlock_irqrestore(&bank
->lock
, lock_flags
);
600 raw_spin_lock_irqsave(&bank
->lock
, lock_flags
);
602 * Some chips can't respond to both rising and falling
603 * at the same time. If this irq was requested with
604 * both flags, we need to flip the ICR data for the IRQ
605 * to respond to the IRQ for the opposite direction.
606 * This will be indicated in the bank toggle_mask.
608 if (bank
->toggle_mask
& (BIT(bit
)))
609 omap_toggle_gpio_edge_triggering(bank
, bit
);
611 raw_spin_unlock_irqrestore(&bank
->lock
, lock_flags
);
613 raw_spin_lock_irqsave(&bank
->wa_lock
, wa_lock_flags
);
615 generic_handle_domain_irq(bank
->chip
.irq
.domain
, bit
);
617 raw_spin_unlock_irqrestore(&bank
->wa_lock
,
625 static unsigned int omap_gpio_irq_startup(struct irq_data
*d
)
627 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
629 unsigned offset
= d
->hwirq
;
631 raw_spin_lock_irqsave(&bank
->lock
, flags
);
633 if (!LINE_USED(bank
->mod_usage
, offset
))
634 omap_set_gpio_direction(bank
, offset
, 1);
635 omap_enable_gpio_module(bank
, offset
);
636 bank
->irq_usage
|= BIT(offset
);
638 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
639 omap_gpio_unmask_irq(d
);
644 static void omap_gpio_irq_shutdown(struct irq_data
*d
)
646 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
648 unsigned offset
= d
->hwirq
;
650 raw_spin_lock_irqsave(&bank
->lock
, flags
);
651 bank
->irq_usage
&= ~(BIT(offset
));
652 omap_set_gpio_triggering(bank
, offset
, IRQ_TYPE_NONE
);
653 omap_clear_gpio_irqstatus(bank
, offset
);
654 omap_set_gpio_irqenable(bank
, offset
, 0);
655 if (!LINE_USED(bank
->mod_usage
, offset
))
656 omap_clear_gpio_debounce(bank
, offset
);
657 omap_disable_gpio_module(bank
, offset
);
658 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
661 static void omap_gpio_irq_bus_lock(struct irq_data
*data
)
663 struct gpio_bank
*bank
= omap_irq_data_get_bank(data
);
665 pm_runtime_get_sync(bank
->chip
.parent
);
668 static void gpio_irq_bus_sync_unlock(struct irq_data
*data
)
670 struct gpio_bank
*bank
= omap_irq_data_get_bank(data
);
672 pm_runtime_put(bank
->chip
.parent
);
675 static void omap_gpio_mask_irq(struct irq_data
*d
)
677 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
678 unsigned offset
= d
->hwirq
;
681 raw_spin_lock_irqsave(&bank
->lock
, flags
);
682 omap_set_gpio_triggering(bank
, offset
, IRQ_TYPE_NONE
);
683 omap_set_gpio_irqenable(bank
, offset
, 0);
684 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
685 gpiochip_disable_irq(&bank
->chip
, offset
);
688 static void omap_gpio_unmask_irq(struct irq_data
*d
)
690 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
691 unsigned offset
= d
->hwirq
;
692 u32 trigger
= irqd_get_trigger_type(d
);
695 gpiochip_enable_irq(&bank
->chip
, offset
);
696 raw_spin_lock_irqsave(&bank
->lock
, flags
);
697 omap_set_gpio_irqenable(bank
, offset
, 1);
700 * For level-triggered GPIOs, clearing must be done after the source
701 * is cleared, thus after the handler has run. OMAP4 needs this done
702 * after enabing the interrupt to clear the wakeup status.
704 if (bank
->regs
->leveldetect0
&& bank
->regs
->wkup_en
&&
705 trigger
& (IRQ_TYPE_LEVEL_HIGH
| IRQ_TYPE_LEVEL_LOW
))
706 omap_clear_gpio_irqstatus(bank
, offset
);
709 omap_set_gpio_triggering(bank
, offset
, trigger
);
711 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
714 static void omap_gpio_irq_print_chip(struct irq_data
*d
, struct seq_file
*p
)
716 struct gpio_bank
*bank
= omap_irq_data_get_bank(d
);
718 seq_puts(p
, dev_name(bank
->dev
));
721 static const struct irq_chip omap_gpio_irq_chip
= {
722 .irq_startup
= omap_gpio_irq_startup
,
723 .irq_shutdown
= omap_gpio_irq_shutdown
,
724 .irq_mask
= omap_gpio_mask_irq
,
725 .irq_unmask
= omap_gpio_unmask_irq
,
726 .irq_set_type
= omap_gpio_irq_type
,
727 .irq_set_wake
= omap_gpio_wake_enable
,
728 .irq_bus_lock
= omap_gpio_irq_bus_lock
,
729 .irq_bus_sync_unlock
= gpio_irq_bus_sync_unlock
,
730 .irq_print_chip
= omap_gpio_irq_print_chip
,
731 .flags
= IRQCHIP_MASK_ON_SUSPEND
| IRQCHIP_IMMUTABLE
,
732 GPIOCHIP_IRQ_RESOURCE_HELPERS
,
735 static const struct irq_chip omap_gpio_irq_chip_nowake
= {
736 .irq_startup
= omap_gpio_irq_startup
,
737 .irq_shutdown
= omap_gpio_irq_shutdown
,
738 .irq_mask
= omap_gpio_mask_irq
,
739 .irq_unmask
= omap_gpio_unmask_irq
,
740 .irq_set_type
= omap_gpio_irq_type
,
741 .irq_bus_lock
= omap_gpio_irq_bus_lock
,
742 .irq_bus_sync_unlock
= gpio_irq_bus_sync_unlock
,
743 .irq_print_chip
= omap_gpio_irq_print_chip
,
744 .flags
= IRQCHIP_MASK_ON_SUSPEND
| IRQCHIP_IMMUTABLE
,
745 GPIOCHIP_IRQ_RESOURCE_HELPERS
,
748 /*---------------------------------------------------------------------*/
750 static int omap_mpuio_suspend_noirq(struct device
*dev
)
752 struct gpio_bank
*bank
= dev_get_drvdata(dev
);
753 void __iomem
*mask_reg
= bank
->base
+
754 OMAP_MPUIO_GPIO_MASKIT
/ bank
->stride
;
757 raw_spin_lock_irqsave(&bank
->lock
, flags
);
758 writel_relaxed(0xffff & ~bank
->context
.wake_en
, mask_reg
);
759 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
764 static int omap_mpuio_resume_noirq(struct device
*dev
)
766 struct gpio_bank
*bank
= dev_get_drvdata(dev
);
767 void __iomem
*mask_reg
= bank
->base
+
768 OMAP_MPUIO_GPIO_MASKIT
/ bank
->stride
;
771 raw_spin_lock_irqsave(&bank
->lock
, flags
);
772 writel_relaxed(bank
->context
.wake_en
, mask_reg
);
773 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
778 static const struct dev_pm_ops omap_mpuio_dev_pm_ops
= {
779 .suspend_noirq
= omap_mpuio_suspend_noirq
,
780 .resume_noirq
= omap_mpuio_resume_noirq
,
783 /* use platform_driver for this. */
784 static struct platform_driver omap_mpuio_driver
= {
787 .pm
= &omap_mpuio_dev_pm_ops
,
791 static struct platform_device omap_mpuio_device
= {
795 .driver
= &omap_mpuio_driver
.driver
,
797 /* could list the /proc/iomem resources */
800 static inline void omap_mpuio_init(struct gpio_bank
*bank
)
802 platform_set_drvdata(&omap_mpuio_device
, bank
);
804 if (platform_driver_register(&omap_mpuio_driver
) == 0)
805 (void) platform_device_register(&omap_mpuio_device
);
808 /*---------------------------------------------------------------------*/
810 static int omap_gpio_request(struct gpio_chip
*chip
, unsigned offset
)
812 struct gpio_bank
*bank
= gpiochip_get_data(chip
);
815 pm_runtime_get_sync(chip
->parent
);
817 raw_spin_lock_irqsave(&bank
->lock
, flags
);
818 omap_enable_gpio_module(bank
, offset
);
819 bank
->mod_usage
|= BIT(offset
);
820 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
825 static void omap_gpio_free(struct gpio_chip
*chip
, unsigned offset
)
827 struct gpio_bank
*bank
= gpiochip_get_data(chip
);
830 raw_spin_lock_irqsave(&bank
->lock
, flags
);
831 bank
->mod_usage
&= ~(BIT(offset
));
832 if (!LINE_USED(bank
->irq_usage
, offset
)) {
833 omap_set_gpio_direction(bank
, offset
, 1);
834 omap_clear_gpio_debounce(bank
, offset
);
836 omap_disable_gpio_module(bank
, offset
);
837 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
839 pm_runtime_put(chip
->parent
);
842 static int omap_gpio_get_direction(struct gpio_chip
*chip
, unsigned offset
)
844 struct gpio_bank
*bank
= gpiochip_get_data(chip
);
846 if (readl_relaxed(bank
->base
+ bank
->regs
->direction
) & BIT(offset
))
847 return GPIO_LINE_DIRECTION_IN
;
849 return GPIO_LINE_DIRECTION_OUT
;
852 static int omap_gpio_input(struct gpio_chip
*chip
, unsigned offset
)
854 struct gpio_bank
*bank
;
857 bank
= gpiochip_get_data(chip
);
858 raw_spin_lock_irqsave(&bank
->lock
, flags
);
859 omap_set_gpio_direction(bank
, offset
, 1);
860 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
864 static int omap_gpio_get(struct gpio_chip
*chip
, unsigned offset
)
866 struct gpio_bank
*bank
= gpiochip_get_data(chip
);
869 if (omap_gpio_is_input(bank
, offset
))
870 reg
= bank
->base
+ bank
->regs
->datain
;
872 reg
= bank
->base
+ bank
->regs
->dataout
;
874 return (readl_relaxed(reg
) & BIT(offset
)) != 0;
877 static int omap_gpio_output(struct gpio_chip
*chip
, unsigned offset
, int value
)
879 struct gpio_bank
*bank
;
882 bank
= gpiochip_get_data(chip
);
883 raw_spin_lock_irqsave(&bank
->lock
, flags
);
884 bank
->set_dataout(bank
, offset
, value
);
885 omap_set_gpio_direction(bank
, offset
, 0);
886 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
890 static int omap_gpio_get_multiple(struct gpio_chip
*chip
, unsigned long *mask
,
893 struct gpio_bank
*bank
= gpiochip_get_data(chip
);
894 void __iomem
*base
= bank
->base
;
895 u32 direction
, m
, val
= 0;
897 direction
= readl_relaxed(base
+ bank
->regs
->direction
);
899 m
= direction
& *mask
;
901 val
|= readl_relaxed(base
+ bank
->regs
->datain
) & m
;
903 m
= ~direction
& *mask
;
905 val
|= readl_relaxed(base
+ bank
->regs
->dataout
) & m
;
912 static int omap_gpio_debounce(struct gpio_chip
*chip
, unsigned offset
,
915 struct gpio_bank
*bank
;
919 bank
= gpiochip_get_data(chip
);
921 raw_spin_lock_irqsave(&bank
->lock
, flags
);
922 ret
= omap2_set_gpio_debounce(bank
, offset
, debounce
);
923 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
926 dev_info(chip
->parent
,
927 "Could not set line %u debounce to %u microseconds (%d)",
928 offset
, debounce
, ret
);
933 static int omap_gpio_set_config(struct gpio_chip
*chip
, unsigned offset
,
934 unsigned long config
)
939 switch (pinconf_to_config_param(config
)) {
940 case PIN_CONFIG_BIAS_DISABLE
:
941 case PIN_CONFIG_BIAS_PULL_UP
:
942 case PIN_CONFIG_BIAS_PULL_DOWN
:
943 ret
= gpiochip_generic_config(chip
, offset
, config
);
945 case PIN_CONFIG_INPUT_DEBOUNCE
:
946 debounce
= pinconf_to_config_argument(config
);
947 ret
= omap_gpio_debounce(chip
, offset
, debounce
);
956 static void omap_gpio_set(struct gpio_chip
*chip
, unsigned offset
, int value
)
958 struct gpio_bank
*bank
;
961 bank
= gpiochip_get_data(chip
);
962 raw_spin_lock_irqsave(&bank
->lock
, flags
);
963 bank
->set_dataout(bank
, offset
, value
);
964 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
967 static void omap_gpio_set_multiple(struct gpio_chip
*chip
, unsigned long *mask
,
970 struct gpio_bank
*bank
= gpiochip_get_data(chip
);
971 void __iomem
*reg
= bank
->base
+ bank
->regs
->dataout
;
975 raw_spin_lock_irqsave(&bank
->lock
, flags
);
976 l
= (readl_relaxed(reg
) & ~*mask
) | (*bits
& *mask
);
977 writel_relaxed(l
, reg
);
978 bank
->context
.dataout
= l
;
979 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
982 /*---------------------------------------------------------------------*/
984 static void omap_gpio_show_rev(struct gpio_bank
*bank
)
989 if (called
|| bank
->regs
->revision
== USHRT_MAX
)
992 rev
= readw_relaxed(bank
->base
+ bank
->regs
->revision
);
993 pr_info("OMAP GPIO hardware version %d.%d\n",
994 (rev
>> 4) & 0x0f, rev
& 0x0f);
999 static void omap_gpio_mod_init(struct gpio_bank
*bank
)
1001 void __iomem
*base
= bank
->base
;
1004 if (bank
->width
== 16)
1007 if (bank
->is_mpuio
) {
1008 writel_relaxed(l
, bank
->base
+ bank
->regs
->irqenable
);
1012 omap_gpio_rmw(base
+ bank
->regs
->irqenable
, l
,
1013 bank
->regs
->irqenable_inv
);
1014 omap_gpio_rmw(base
+ bank
->regs
->irqstatus
, l
,
1015 !bank
->regs
->irqenable_inv
);
1016 if (bank
->regs
->debounce_en
)
1017 writel_relaxed(0, base
+ bank
->regs
->debounce_en
);
1019 /* Save OE default value (0xffffffff) in the context */
1020 bank
->context
.oe
= readl_relaxed(bank
->base
+ bank
->regs
->direction
);
1021 /* Initialize interface clk ungated, module enabled */
1022 if (bank
->regs
->ctrl
)
1023 writel_relaxed(0, base
+ bank
->regs
->ctrl
);
1026 static int omap_gpio_chip_init(struct gpio_bank
*bank
, struct device
*pm_dev
)
1028 struct gpio_irq_chip
*irq
;
1034 * REVISIT eventually switch from OMAP-specific gpio structs
1035 * over to the generic ones
1037 bank
->chip
.request
= omap_gpio_request
;
1038 bank
->chip
.free
= omap_gpio_free
;
1039 bank
->chip
.get_direction
= omap_gpio_get_direction
;
1040 bank
->chip
.direction_input
= omap_gpio_input
;
1041 bank
->chip
.get
= omap_gpio_get
;
1042 bank
->chip
.get_multiple
= omap_gpio_get_multiple
;
1043 bank
->chip
.direction_output
= omap_gpio_output
;
1044 bank
->chip
.set_config
= omap_gpio_set_config
;
1045 bank
->chip
.set
= omap_gpio_set
;
1046 bank
->chip
.set_multiple
= omap_gpio_set_multiple
;
1047 if (bank
->is_mpuio
) {
1048 bank
->chip
.label
= "mpuio";
1049 if (bank
->regs
->wkup_en
)
1050 bank
->chip
.parent
= &omap_mpuio_device
.dev
;
1052 label
= devm_kasprintf(bank
->chip
.parent
, GFP_KERNEL
, "gpio-%d-%d",
1053 gpio
, gpio
+ bank
->width
- 1);
1056 bank
->chip
.label
= label
;
1058 bank
->chip
.base
= -1;
1059 bank
->chip
.ngpio
= bank
->width
;
1061 irq
= &bank
->chip
.irq
;
1062 /* MPUIO is a bit different, reading IRQ status clears it */
1063 if (bank
->is_mpuio
&& !bank
->regs
->wkup_en
)
1064 gpio_irq_chip_set_chip(irq
, &omap_gpio_irq_chip_nowake
);
1066 gpio_irq_chip_set_chip(irq
, &omap_gpio_irq_chip
);
1067 irq
->handler
= handle_bad_irq
;
1068 irq
->default_type
= IRQ_TYPE_NONE
;
1069 irq
->num_parents
= 1;
1070 irq
->parents
= &bank
->irq
;
1072 ret
= gpiochip_add_data(&bank
->chip
, bank
);
1074 return dev_err_probe(bank
->chip
.parent
, ret
, "Could not register gpio chip\n");
1076 irq_domain_set_pm_device(bank
->chip
.irq
.domain
, pm_dev
);
1077 ret
= devm_request_irq(bank
->chip
.parent
, bank
->irq
,
1078 omap_gpio_irq_handler
,
1079 0, dev_name(bank
->chip
.parent
), bank
);
1081 gpiochip_remove(&bank
->chip
);
1083 if (!bank
->is_mpuio
)
1084 gpio
+= bank
->width
;
1089 static void omap_gpio_init_context(struct gpio_bank
*p
)
1091 const struct omap_gpio_reg_offs
*regs
= p
->regs
;
1092 void __iomem
*base
= p
->base
;
1094 p
->context
.sysconfig
= readl_relaxed(base
+ regs
->sysconfig
);
1095 p
->context
.ctrl
= readl_relaxed(base
+ regs
->ctrl
);
1096 p
->context
.oe
= readl_relaxed(base
+ regs
->direction
);
1097 p
->context
.wake_en
= readl_relaxed(base
+ regs
->wkup_en
);
1098 p
->context
.leveldetect0
= readl_relaxed(base
+ regs
->leveldetect0
);
1099 p
->context
.leveldetect1
= readl_relaxed(base
+ regs
->leveldetect1
);
1100 p
->context
.risingdetect
= readl_relaxed(base
+ regs
->risingdetect
);
1101 p
->context
.fallingdetect
= readl_relaxed(base
+ regs
->fallingdetect
);
1102 p
->context
.irqenable1
= readl_relaxed(base
+ regs
->irqenable
);
1103 p
->context
.irqenable2
= readl_relaxed(base
+ regs
->irqenable2
);
1104 p
->context
.dataout
= readl_relaxed(base
+ regs
->dataout
);
1106 p
->context_valid
= true;
1109 static void omap_gpio_restore_context(struct gpio_bank
*bank
)
1111 const struct omap_gpio_reg_offs
*regs
= bank
->regs
;
1112 void __iomem
*base
= bank
->base
;
1114 writel_relaxed(bank
->context
.sysconfig
, base
+ regs
->sysconfig
);
1115 writel_relaxed(bank
->context
.wake_en
, base
+ regs
->wkup_en
);
1116 writel_relaxed(bank
->context
.ctrl
, base
+ regs
->ctrl
);
1117 writel_relaxed(bank
->context
.leveldetect0
, base
+ regs
->leveldetect0
);
1118 writel_relaxed(bank
->context
.leveldetect1
, base
+ regs
->leveldetect1
);
1119 writel_relaxed(bank
->context
.risingdetect
, base
+ regs
->risingdetect
);
1120 writel_relaxed(bank
->context
.fallingdetect
, base
+ regs
->fallingdetect
);
1121 writel_relaxed(bank
->context
.dataout
, base
+ regs
->dataout
);
1122 writel_relaxed(bank
->context
.oe
, base
+ regs
->direction
);
1124 if (bank
->dbck_enable_mask
) {
1125 writel_relaxed(bank
->context
.debounce
, base
+ regs
->debounce
);
1126 writel_relaxed(bank
->context
.debounce_en
,
1127 base
+ regs
->debounce_en
);
1130 writel_relaxed(bank
->context
.irqenable1
, base
+ regs
->irqenable
);
1131 writel_relaxed(bank
->context
.irqenable2
, base
+ regs
->irqenable2
);
1134 static void omap_gpio_idle(struct gpio_bank
*bank
, bool may_lose_context
)
1136 struct device
*dev
= bank
->chip
.parent
;
1137 void __iomem
*base
= bank
->base
;
1140 bank
->saved_datain
= readl_relaxed(base
+ bank
->regs
->datain
);
1142 /* Save syconfig, it's runtime value can be different from init value */
1143 if (bank
->loses_context
)
1144 bank
->context
.sysconfig
= readl_relaxed(base
+ bank
->regs
->sysconfig
);
1146 if (!bank
->enabled_non_wakeup_gpios
)
1147 goto update_gpio_context_count
;
1149 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
1150 mask
= bank
->enabled_non_wakeup_gpios
& bank
->context
.fallingdetect
;
1151 mask
&= ~bank
->context
.risingdetect
;
1152 bank
->saved_datain
|= mask
;
1154 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
1155 mask
= bank
->enabled_non_wakeup_gpios
& bank
->context
.risingdetect
;
1156 mask
&= ~bank
->context
.fallingdetect
;
1157 bank
->saved_datain
&= ~mask
;
1159 if (!may_lose_context
)
1160 goto update_gpio_context_count
;
1163 * If going to OFF, remove triggering for all wkup domain
1164 * non-wakeup GPIOs. Otherwise spurious IRQs will be
1165 * generated. See OMAP2420 Errata item 1.101.
1167 if (!bank
->loses_context
&& bank
->enabled_non_wakeup_gpios
) {
1168 nowake
= bank
->enabled_non_wakeup_gpios
;
1169 omap_gpio_rmw(base
+ bank
->regs
->fallingdetect
, nowake
, ~nowake
);
1170 omap_gpio_rmw(base
+ bank
->regs
->risingdetect
, nowake
, ~nowake
);
1173 update_gpio_context_count
:
1174 if (bank
->get_context_loss_count
)
1175 bank
->context_loss_count
=
1176 bank
->get_context_loss_count(dev
);
1178 omap_gpio_dbck_disable(bank
);
1181 static void omap_gpio_unidle(struct gpio_bank
*bank
)
1183 struct device
*dev
= bank
->chip
.parent
;
1184 u32 l
= 0, gen
, gen0
, gen1
;
1188 * On the first resume during the probe, the context has not
1189 * been initialised and so initialise it now. Also initialise
1190 * the context loss count.
1192 if (bank
->loses_context
&& !bank
->context_valid
) {
1193 omap_gpio_init_context(bank
);
1195 if (bank
->get_context_loss_count
)
1196 bank
->context_loss_count
=
1197 bank
->get_context_loss_count(dev
);
1200 omap_gpio_dbck_enable(bank
);
1202 if (bank
->loses_context
) {
1203 if (!bank
->get_context_loss_count
) {
1204 omap_gpio_restore_context(bank
);
1206 c
= bank
->get_context_loss_count(dev
);
1207 if (c
!= bank
->context_loss_count
) {
1208 omap_gpio_restore_context(bank
);
1214 /* Restore changes done for OMAP2420 errata 1.101 */
1215 writel_relaxed(bank
->context
.fallingdetect
,
1216 bank
->base
+ bank
->regs
->fallingdetect
);
1217 writel_relaxed(bank
->context
.risingdetect
,
1218 bank
->base
+ bank
->regs
->risingdetect
);
1221 l
= readl_relaxed(bank
->base
+ bank
->regs
->datain
);
1224 * Check if any of the non-wakeup interrupt GPIOs have changed
1225 * state. If so, generate an IRQ by software. This is
1226 * horribly racy, but it's the best we can do to work around
1229 l
^= bank
->saved_datain
;
1230 l
&= bank
->enabled_non_wakeup_gpios
;
1233 * No need to generate IRQs for the rising edge for gpio IRQs
1234 * configured with falling edge only; and vice versa.
1236 gen0
= l
& bank
->context
.fallingdetect
;
1237 gen0
&= bank
->saved_datain
;
1239 gen1
= l
& bank
->context
.risingdetect
;
1240 gen1
&= ~(bank
->saved_datain
);
1242 /* FIXME: Consider GPIO IRQs with level detections properly! */
1243 gen
= l
& (~(bank
->context
.fallingdetect
) &
1244 ~(bank
->context
.risingdetect
));
1245 /* Consider all GPIO IRQs needed to be updated */
1251 old0
= readl_relaxed(bank
->base
+ bank
->regs
->leveldetect0
);
1252 old1
= readl_relaxed(bank
->base
+ bank
->regs
->leveldetect1
);
1254 if (!bank
->regs
->irqstatus_raw0
) {
1255 writel_relaxed(old0
| gen
, bank
->base
+
1256 bank
->regs
->leveldetect0
);
1257 writel_relaxed(old1
| gen
, bank
->base
+
1258 bank
->regs
->leveldetect1
);
1261 if (bank
->regs
->irqstatus_raw0
) {
1262 writel_relaxed(old0
| l
, bank
->base
+
1263 bank
->regs
->leveldetect0
);
1264 writel_relaxed(old1
| l
, bank
->base
+
1265 bank
->regs
->leveldetect1
);
1267 writel_relaxed(old0
, bank
->base
+ bank
->regs
->leveldetect0
);
1268 writel_relaxed(old1
, bank
->base
+ bank
->regs
->leveldetect1
);
1272 static int gpio_omap_cpu_notifier(struct notifier_block
*nb
,
1273 unsigned long cmd
, void *v
)
1275 struct gpio_bank
*bank
;
1276 unsigned long flags
;
1277 int ret
= NOTIFY_OK
;
1280 bank
= container_of(nb
, struct gpio_bank
, nb
);
1282 raw_spin_lock_irqsave(&bank
->lock
, flags
);
1283 if (bank
->is_suspended
)
1287 case CPU_CLUSTER_PM_ENTER
:
1288 mask
= omap_get_gpio_irqbank_mask(bank
);
1289 isr
= readl_relaxed(bank
->base
+ bank
->regs
->irqstatus
) & mask
;
1294 omap_gpio_idle(bank
, true);
1296 case CPU_CLUSTER_PM_ENTER_FAILED
:
1297 case CPU_CLUSTER_PM_EXIT
:
1298 omap_gpio_unidle(bank
);
1303 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
1308 static const struct omap_gpio_reg_offs omap2_gpio_regs
= {
1309 .revision
= OMAP24XX_GPIO_REVISION
,
1310 .sysconfig
= OMAP24XX_GPIO_SYSCONFIG
,
1311 .direction
= OMAP24XX_GPIO_OE
,
1312 .datain
= OMAP24XX_GPIO_DATAIN
,
1313 .dataout
= OMAP24XX_GPIO_DATAOUT
,
1314 .set_dataout
= OMAP24XX_GPIO_SETDATAOUT
,
1315 .clr_dataout
= OMAP24XX_GPIO_CLEARDATAOUT
,
1316 .irqstatus
= OMAP24XX_GPIO_IRQSTATUS1
,
1317 .irqstatus2
= OMAP24XX_GPIO_IRQSTATUS2
,
1318 .irqenable
= OMAP24XX_GPIO_IRQENABLE1
,
1319 .irqenable2
= OMAP24XX_GPIO_IRQENABLE2
,
1320 .set_irqenable
= OMAP24XX_GPIO_SETIRQENABLE1
,
1321 .clr_irqenable
= OMAP24XX_GPIO_CLEARIRQENABLE1
,
1322 .debounce
= OMAP24XX_GPIO_DEBOUNCE_VAL
,
1323 .debounce_en
= OMAP24XX_GPIO_DEBOUNCE_EN
,
1324 .ctrl
= OMAP24XX_GPIO_CTRL
,
1325 .wkup_en
= OMAP24XX_GPIO_WAKE_EN
,
1326 .leveldetect0
= OMAP24XX_GPIO_LEVELDETECT0
,
1327 .leveldetect1
= OMAP24XX_GPIO_LEVELDETECT1
,
1328 .risingdetect
= OMAP24XX_GPIO_RISINGDETECT
,
1329 .fallingdetect
= OMAP24XX_GPIO_FALLINGDETECT
,
1332 static const struct omap_gpio_reg_offs omap4_gpio_regs
= {
1333 .revision
= OMAP4_GPIO_REVISION
,
1334 .sysconfig
= OMAP4_GPIO_SYSCONFIG
,
1335 .direction
= OMAP4_GPIO_OE
,
1336 .datain
= OMAP4_GPIO_DATAIN
,
1337 .dataout
= OMAP4_GPIO_DATAOUT
,
1338 .set_dataout
= OMAP4_GPIO_SETDATAOUT
,
1339 .clr_dataout
= OMAP4_GPIO_CLEARDATAOUT
,
1340 .irqstatus
= OMAP4_GPIO_IRQSTATUS0
,
1341 .irqstatus2
= OMAP4_GPIO_IRQSTATUS1
,
1342 .irqstatus_raw0
= OMAP4_GPIO_IRQSTATUSRAW0
,
1343 .irqstatus_raw1
= OMAP4_GPIO_IRQSTATUSRAW1
,
1344 .irqenable
= OMAP4_GPIO_IRQSTATUSSET0
,
1345 .irqenable2
= OMAP4_GPIO_IRQSTATUSSET1
,
1346 .set_irqenable
= OMAP4_GPIO_IRQSTATUSSET0
,
1347 .clr_irqenable
= OMAP4_GPIO_IRQSTATUSCLR0
,
1348 .debounce
= OMAP4_GPIO_DEBOUNCINGTIME
,
1349 .debounce_en
= OMAP4_GPIO_DEBOUNCENABLE
,
1350 .ctrl
= OMAP4_GPIO_CTRL
,
1351 .wkup_en
= OMAP4_GPIO_IRQWAKEN0
,
1352 .leveldetect0
= OMAP4_GPIO_LEVELDETECT0
,
1353 .leveldetect1
= OMAP4_GPIO_LEVELDETECT1
,
1354 .risingdetect
= OMAP4_GPIO_RISINGDETECT
,
1355 .fallingdetect
= OMAP4_GPIO_FALLINGDETECT
,
1358 static const struct omap_gpio_platform_data omap2_pdata
= {
1359 .regs
= &omap2_gpio_regs
,
1364 static const struct omap_gpio_platform_data omap3_pdata
= {
1365 .regs
= &omap2_gpio_regs
,
1370 static const struct omap_gpio_platform_data omap4_pdata
= {
1371 .regs
= &omap4_gpio_regs
,
1376 static const struct of_device_id omap_gpio_match
[] = {
1378 .compatible
= "ti,omap4-gpio",
1379 .data
= &omap4_pdata
,
1382 .compatible
= "ti,omap3-gpio",
1383 .data
= &omap3_pdata
,
1386 .compatible
= "ti,omap2-gpio",
1387 .data
= &omap2_pdata
,
1391 MODULE_DEVICE_TABLE(of
, omap_gpio_match
);
1393 static int omap_gpio_probe(struct platform_device
*pdev
)
1395 struct device
*dev
= &pdev
->dev
;
1396 struct device_node
*node
= dev
->of_node
;
1397 const struct omap_gpio_platform_data
*pdata
;
1398 struct gpio_bank
*bank
;
1401 pdata
= device_get_match_data(dev
);
1403 pdata
= pdata
?: dev_get_platdata(dev
);
1407 bank
= devm_kzalloc(dev
, sizeof(*bank
), GFP_KERNEL
);
1413 bank
->irq
= platform_get_irq(pdev
, 0);
1417 bank
->chip
.parent
= dev
;
1418 bank
->chip
.owner
= THIS_MODULE
;
1419 bank
->dbck_flag
= pdata
->dbck_flag
;
1420 bank
->stride
= pdata
->bank_stride
;
1421 bank
->width
= pdata
->bank_width
;
1422 bank
->is_mpuio
= pdata
->is_mpuio
;
1423 bank
->non_wakeup_gpios
= pdata
->non_wakeup_gpios
;
1424 bank
->regs
= pdata
->regs
;
1427 if (!of_property_read_bool(node
, "ti,gpio-always-on"))
1428 bank
->loses_context
= true;
1430 bank
->loses_context
= pdata
->loses_context
;
1432 if (bank
->loses_context
)
1433 bank
->get_context_loss_count
=
1434 pdata
->get_context_loss_count
;
1437 if (bank
->regs
->set_dataout
&& bank
->regs
->clr_dataout
)
1438 bank
->set_dataout
= omap_set_gpio_dataout_reg
;
1440 bank
->set_dataout
= omap_set_gpio_dataout_mask
;
1442 raw_spin_lock_init(&bank
->lock
);
1443 raw_spin_lock_init(&bank
->wa_lock
);
1445 /* Static mapping, never released */
1446 bank
->base
= devm_platform_ioremap_resource(pdev
, 0);
1447 if (IS_ERR(bank
->base
)) {
1448 return PTR_ERR(bank
->base
);
1451 if (bank
->dbck_flag
) {
1452 bank
->dbck
= devm_clk_get(dev
, "dbclk");
1453 if (IS_ERR(bank
->dbck
)) {
1455 "Could not get gpio dbck. Disable debounce\n");
1456 bank
->dbck_flag
= false;
1458 clk_prepare(bank
->dbck
);
1462 platform_set_drvdata(pdev
, bank
);
1464 pm_runtime_enable(dev
);
1465 pm_runtime_get_sync(dev
);
1468 omap_mpuio_init(bank
);
1470 omap_gpio_mod_init(bank
);
1472 ret
= omap_gpio_chip_init(bank
, dev
);
1474 pm_runtime_put_sync(dev
);
1475 pm_runtime_disable(dev
);
1476 if (bank
->dbck_flag
)
1477 clk_unprepare(bank
->dbck
);
1481 omap_gpio_show_rev(bank
);
1483 bank
->nb
.notifier_call
= gpio_omap_cpu_notifier
;
1484 cpu_pm_register_notifier(&bank
->nb
);
1486 pm_runtime_put(dev
);
1491 static void omap_gpio_remove(struct platform_device
*pdev
)
1493 struct gpio_bank
*bank
= platform_get_drvdata(pdev
);
1495 cpu_pm_unregister_notifier(&bank
->nb
);
1496 gpiochip_remove(&bank
->chip
);
1497 pm_runtime_disable(&pdev
->dev
);
1498 if (bank
->dbck_flag
)
1499 clk_unprepare(bank
->dbck
);
1502 static int __maybe_unused
omap_gpio_runtime_suspend(struct device
*dev
)
1504 struct gpio_bank
*bank
= dev_get_drvdata(dev
);
1505 unsigned long flags
;
1507 raw_spin_lock_irqsave(&bank
->lock
, flags
);
1508 omap_gpio_idle(bank
, true);
1509 bank
->is_suspended
= true;
1510 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
1515 static int __maybe_unused
omap_gpio_runtime_resume(struct device
*dev
)
1517 struct gpio_bank
*bank
= dev_get_drvdata(dev
);
1518 unsigned long flags
;
1520 raw_spin_lock_irqsave(&bank
->lock
, flags
);
1521 omap_gpio_unidle(bank
);
1522 bank
->is_suspended
= false;
1523 raw_spin_unlock_irqrestore(&bank
->lock
, flags
);
1528 static int __maybe_unused
omap_gpio_suspend(struct device
*dev
)
1530 struct gpio_bank
*bank
= dev_get_drvdata(dev
);
1532 if (bank
->is_suspended
)
1535 bank
->needs_resume
= 1;
1537 return omap_gpio_runtime_suspend(dev
);
1540 static int __maybe_unused
omap_gpio_resume(struct device
*dev
)
1542 struct gpio_bank
*bank
= dev_get_drvdata(dev
);
1544 if (!bank
->needs_resume
)
1547 bank
->needs_resume
= 0;
1549 return omap_gpio_runtime_resume(dev
);
1552 static const struct dev_pm_ops gpio_pm_ops
= {
1553 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend
, omap_gpio_runtime_resume
,
1555 SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend
, omap_gpio_resume
)
1558 static struct platform_driver omap_gpio_driver
= {
1559 .probe
= omap_gpio_probe
,
1560 .remove
= omap_gpio_remove
,
1562 .name
= "omap_gpio",
1564 .of_match_table
= omap_gpio_match
,
1569 * gpio driver register needs to be done before
1570 * machine_init functions access gpio APIs.
1571 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1573 static int __init
omap_gpio_drv_reg(void)
1575 return platform_driver_register(&omap_gpio_driver
);
1577 postcore_initcall(omap_gpio_drv_reg
);
1579 static void __exit
omap_gpio_exit(void)
1581 platform_driver_unregister(&omap_gpio_driver
);
1583 module_exit(omap_gpio_exit
);
1585 MODULE_DESCRIPTION("omap gpio driver");
1586 MODULE_ALIAS("platform:gpio-omap");
1587 MODULE_LICENSE("GPL v2");