1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2014-2018 MediaTek Inc.
5 * Library for MediaTek External Interrupt Support
7 * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8 * Sean Wang <sean.wang@mediatek.com>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/gpio/driver.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
24 #define MTK_EINT_EDGE_SENSITIVE 0
25 #define MTK_EINT_LEVEL_SENSITIVE 1
26 #define MTK_EINT_DBNC_SET_DBNC_BITS 4
27 #define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
28 #define MTK_EINT_DBNC_SET_EN (0x1 << 0)
30 static const struct mtk_eint_regs mtk_generic_eint_regs
= {
51 static void __iomem
*mtk_eint_get_offset(struct mtk_eint
*eint
,
52 unsigned int eint_num
,
55 unsigned int eint_base
= 0;
58 if (eint_num
>= eint
->hw
->ap_num
)
59 eint_base
= eint
->hw
->ap_num
;
61 reg
= eint
->base
+ offset
+ ((eint_num
- eint_base
) / 32) * 4;
66 static unsigned int mtk_eint_can_en_debounce(struct mtk_eint
*eint
,
67 unsigned int eint_num
)
70 unsigned int bit
= BIT(eint_num
% 32);
71 void __iomem
*reg
= mtk_eint_get_offset(eint
, eint_num
,
75 sens
= MTK_EINT_LEVEL_SENSITIVE
;
77 sens
= MTK_EINT_EDGE_SENSITIVE
;
79 if (eint_num
< eint
->hw
->db_cnt
&& sens
!= MTK_EINT_EDGE_SENSITIVE
)
85 static int mtk_eint_flip_edge(struct mtk_eint
*eint
, int hwirq
)
87 int start_level
, curr_level
;
88 unsigned int reg_offset
;
89 u32 mask
= BIT(hwirq
& 0x1f);
90 u32 port
= (hwirq
>> 5) & eint
->hw
->port_mask
;
91 void __iomem
*reg
= eint
->base
+ (port
<< 2);
93 curr_level
= eint
->gpio_xlate
->get_gpio_state(eint
->pctl
, hwirq
);
96 start_level
= curr_level
;
98 reg_offset
= eint
->regs
->pol_clr
;
100 reg_offset
= eint
->regs
->pol_set
;
101 writel(mask
, reg
+ reg_offset
);
103 curr_level
= eint
->gpio_xlate
->get_gpio_state(eint
->pctl
,
105 } while (start_level
!= curr_level
);
110 static void mtk_eint_mask(struct irq_data
*d
)
112 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
113 u32 mask
= BIT(d
->hwirq
& 0x1f);
114 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
115 eint
->regs
->mask_set
);
117 eint
->cur_mask
[d
->hwirq
>> 5] &= ~mask
;
122 static void mtk_eint_unmask(struct irq_data
*d
)
124 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
125 u32 mask
= BIT(d
->hwirq
& 0x1f);
126 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
127 eint
->regs
->mask_clr
);
129 eint
->cur_mask
[d
->hwirq
>> 5] |= mask
;
133 if (eint
->dual_edge
[d
->hwirq
])
134 mtk_eint_flip_edge(eint
, d
->hwirq
);
137 static unsigned int mtk_eint_get_mask(struct mtk_eint
*eint
,
138 unsigned int eint_num
)
140 unsigned int bit
= BIT(eint_num
% 32);
141 void __iomem
*reg
= mtk_eint_get_offset(eint
, eint_num
,
144 return !!(readl(reg
) & bit
);
147 static void mtk_eint_ack(struct irq_data
*d
)
149 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
150 u32 mask
= BIT(d
->hwirq
& 0x1f);
151 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
157 static int mtk_eint_set_type(struct irq_data
*d
, unsigned int type
)
159 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
160 u32 mask
= BIT(d
->hwirq
& 0x1f);
163 if (((type
& IRQ_TYPE_EDGE_BOTH
) && (type
& IRQ_TYPE_LEVEL_MASK
)) ||
164 ((type
& IRQ_TYPE_LEVEL_MASK
) == IRQ_TYPE_LEVEL_MASK
)) {
166 "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
167 d
->irq
, d
->hwirq
, type
);
171 if ((type
& IRQ_TYPE_EDGE_BOTH
) == IRQ_TYPE_EDGE_BOTH
)
172 eint
->dual_edge
[d
->hwirq
] = 1;
174 eint
->dual_edge
[d
->hwirq
] = 0;
176 if (type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
)) {
177 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->pol_clr
);
180 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->pol_set
);
184 if (type
& (IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_EDGE_FALLING
)) {
185 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->sens_clr
);
188 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->sens_set
);
192 if (eint
->dual_edge
[d
->hwirq
])
193 mtk_eint_flip_edge(eint
, d
->hwirq
);
198 static int mtk_eint_irq_set_wake(struct irq_data
*d
, unsigned int on
)
200 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
201 int shift
= d
->hwirq
& 0x1f;
202 int reg
= d
->hwirq
>> 5;
205 eint
->wake_mask
[reg
] |= BIT(shift
);
207 eint
->wake_mask
[reg
] &= ~BIT(shift
);
212 static void mtk_eint_chip_write_mask(const struct mtk_eint
*eint
,
213 void __iomem
*base
, u32
*buf
)
218 for (port
= 0; port
< eint
->hw
->ports
; port
++) {
219 reg
= base
+ (port
<< 2);
220 writel_relaxed(~buf
[port
], reg
+ eint
->regs
->mask_set
);
221 writel_relaxed(buf
[port
], reg
+ eint
->regs
->mask_clr
);
225 static int mtk_eint_irq_request_resources(struct irq_data
*d
)
227 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
228 struct gpio_chip
*gpio_c
;
232 err
= eint
->gpio_xlate
->get_gpio_n(eint
->pctl
, d
->hwirq
,
235 dev_err(eint
->dev
, "Can not find pin\n");
239 err
= gpiochip_lock_as_irq(gpio_c
, gpio_n
);
241 dev_err(eint
->dev
, "unable to lock HW IRQ %lu for IRQ\n",
246 err
= eint
->gpio_xlate
->set_gpio_as_eint(eint
->pctl
, d
->hwirq
);
248 dev_err(eint
->dev
, "Can not eint mode\n");
255 static void mtk_eint_irq_release_resources(struct irq_data
*d
)
257 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
258 struct gpio_chip
*gpio_c
;
261 eint
->gpio_xlate
->get_gpio_n(eint
->pctl
, d
->hwirq
, &gpio_n
,
264 gpiochip_unlock_as_irq(gpio_c
, gpio_n
);
267 static struct irq_chip mtk_eint_irq_chip
= {
269 .irq_disable
= mtk_eint_mask
,
270 .irq_mask
= mtk_eint_mask
,
271 .irq_unmask
= mtk_eint_unmask
,
272 .irq_ack
= mtk_eint_ack
,
273 .irq_set_type
= mtk_eint_set_type
,
274 .irq_set_wake
= mtk_eint_irq_set_wake
,
275 .irq_request_resources
= mtk_eint_irq_request_resources
,
276 .irq_release_resources
= mtk_eint_irq_release_resources
,
279 static unsigned int mtk_eint_hw_init(struct mtk_eint
*eint
)
281 void __iomem
*reg
= eint
->base
+ eint
->regs
->dom_en
;
284 for (i
= 0; i
< eint
->hw
->ap_num
; i
+= 32) {
285 writel(0xffffffff, reg
);
293 mtk_eint_debounce_process(struct mtk_eint
*eint
, int index
)
295 unsigned int rst
, ctrl_offset
;
296 unsigned int bit
, dbnc
;
298 ctrl_offset
= (index
/ 4) * 4 + eint
->regs
->dbnc_ctrl
;
299 dbnc
= readl(eint
->base
+ ctrl_offset
);
300 bit
= MTK_EINT_DBNC_SET_EN
<< ((index
% 4) * 8);
301 if ((bit
& dbnc
) > 0) {
302 ctrl_offset
= (index
/ 4) * 4 + eint
->regs
->dbnc_set
;
303 rst
= MTK_EINT_DBNC_RST_BIT
<< ((index
% 4) * 8);
304 writel(rst
, eint
->base
+ ctrl_offset
);
308 static void mtk_eint_irq_handler(struct irq_desc
*desc
)
310 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
311 struct mtk_eint
*eint
= irq_desc_get_handler_data(desc
);
312 unsigned int status
, eint_num
;
313 int offset
, mask_offset
, index
, virq
;
314 void __iomem
*reg
= mtk_eint_get_offset(eint
, 0, eint
->regs
->stat
);
315 int dual_edge
, start_level
, curr_level
;
317 chained_irq_enter(chip
, desc
);
318 for (eint_num
= 0; eint_num
< eint
->hw
->ap_num
; eint_num
+= 32,
322 offset
= __ffs(status
);
323 mask_offset
= eint_num
>> 5;
324 index
= eint_num
+ offset
;
325 virq
= irq_find_mapping(eint
->domain
, index
);
326 status
&= ~BIT(offset
);
329 * If we get an interrupt on pin that was only required
330 * for wake (but no real interrupt requested), mask the
331 * interrupt (as would mtk_eint_resume do anyway later
332 * in the resume sequence).
334 if (eint
->wake_mask
[mask_offset
] & BIT(offset
) &&
335 !(eint
->cur_mask
[mask_offset
] & BIT(offset
))) {
336 writel_relaxed(BIT(offset
), reg
-
338 eint
->regs
->mask_set
);
341 dual_edge
= eint
->dual_edge
[index
];
344 * Clear soft-irq in case we raised it last
347 writel(BIT(offset
), reg
- eint
->regs
->stat
+
348 eint
->regs
->soft_clr
);
351 eint
->gpio_xlate
->get_gpio_state(eint
->pctl
,
355 generic_handle_irq(virq
);
358 curr_level
= mtk_eint_flip_edge(eint
, index
);
361 * If level changed, we might lost one edge
362 * interrupt, raised it through soft-irq.
364 if (start_level
!= curr_level
)
365 writel(BIT(offset
), reg
-
367 eint
->regs
->soft_set
);
370 if (index
< eint
->hw
->db_cnt
)
371 mtk_eint_debounce_process(eint
, index
);
374 chained_irq_exit(chip
, desc
);
377 int mtk_eint_do_suspend(struct mtk_eint
*eint
)
379 mtk_eint_chip_write_mask(eint
, eint
->base
, eint
->wake_mask
);
383 EXPORT_SYMBOL_GPL(mtk_eint_do_suspend
);
385 int mtk_eint_do_resume(struct mtk_eint
*eint
)
387 mtk_eint_chip_write_mask(eint
, eint
->base
, eint
->cur_mask
);
391 EXPORT_SYMBOL_GPL(mtk_eint_do_resume
);
393 int mtk_eint_set_debounce(struct mtk_eint
*eint
, unsigned long eint_num
,
394 unsigned int debounce
)
396 int virq
, eint_offset
;
397 unsigned int set_offset
, bit
, clr_bit
, clr_offset
, rst
, i
, unmask
,
399 static const unsigned int debounce_time
[] = {500, 1000, 16000, 32000,
400 64000, 128000, 256000};
403 virq
= irq_find_mapping(eint
->domain
, eint_num
);
404 eint_offset
= (eint_num
% 4) * 8;
405 d
= irq_get_irq_data(virq
);
407 set_offset
= (eint_num
/ 4) * 4 + eint
->regs
->dbnc_set
;
408 clr_offset
= (eint_num
/ 4) * 4 + eint
->regs
->dbnc_clr
;
410 if (!mtk_eint_can_en_debounce(eint
, eint_num
))
413 dbnc
= ARRAY_SIZE(debounce_time
);
414 for (i
= 0; i
< ARRAY_SIZE(debounce_time
); i
++) {
415 if (debounce
<= debounce_time
[i
]) {
421 if (!mtk_eint_get_mask(eint
, eint_num
)) {
428 clr_bit
= 0xff << eint_offset
;
429 writel(clr_bit
, eint
->base
+ clr_offset
);
431 bit
= ((dbnc
<< MTK_EINT_DBNC_SET_DBNC_BITS
) | MTK_EINT_DBNC_SET_EN
) <<
433 rst
= MTK_EINT_DBNC_RST_BIT
<< eint_offset
;
434 writel(rst
| bit
, eint
->base
+ set_offset
);
437 * Delay a while (more than 2T) to wait for hw debounce counter reset
446 EXPORT_SYMBOL_GPL(mtk_eint_set_debounce
);
448 int mtk_eint_find_irq(struct mtk_eint
*eint
, unsigned long eint_n
)
452 irq
= irq_find_mapping(eint
->domain
, eint_n
);
458 EXPORT_SYMBOL_GPL(mtk_eint_find_irq
);
460 int mtk_eint_do_init(struct mtk_eint
*eint
)
464 /* If clients don't assign a specific regs, let's use generic one */
466 eint
->regs
= &mtk_generic_eint_regs
;
468 eint
->wake_mask
= devm_kcalloc(eint
->dev
, eint
->hw
->ports
,
469 sizeof(*eint
->wake_mask
), GFP_KERNEL
);
470 if (!eint
->wake_mask
)
473 eint
->cur_mask
= devm_kcalloc(eint
->dev
, eint
->hw
->ports
,
474 sizeof(*eint
->cur_mask
), GFP_KERNEL
);
478 eint
->dual_edge
= devm_kcalloc(eint
->dev
, eint
->hw
->ap_num
,
479 sizeof(int), GFP_KERNEL
);
480 if (!eint
->dual_edge
)
483 eint
->domain
= irq_domain_add_linear(eint
->dev
->of_node
,
485 &irq_domain_simple_ops
, NULL
);
489 mtk_eint_hw_init(eint
);
490 for (i
= 0; i
< eint
->hw
->ap_num
; i
++) {
491 int virq
= irq_create_mapping(eint
->domain
, i
);
493 irq_set_chip_and_handler(virq
, &mtk_eint_irq_chip
,
495 irq_set_chip_data(virq
, eint
);
498 irq_set_chained_handler_and_data(eint
->irq
, mtk_eint_irq_handler
,
503 EXPORT_SYMBOL_GPL(mtk_eint_do_init
);
505 MODULE_LICENSE("GPL v2");
506 MODULE_DESCRIPTION("MediaTek EINT Driver");