1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2014-2018 MediaTek Inc.
5 * Library for MediaTek External Interrupt Support
7 * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8 * Sean Wang <sean.wang@mediatek.com>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/gpio/driver.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/of_irq.h>
19 #include <linux/platform_device.h>
23 #define MTK_EINT_EDGE_SENSITIVE 0
24 #define MTK_EINT_LEVEL_SENSITIVE 1
25 #define MTK_EINT_DBNC_SET_DBNC_BITS 4
26 #define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
27 #define MTK_EINT_DBNC_SET_EN (0x1 << 0)
29 static const struct mtk_eint_regs mtk_generic_eint_regs
= {
50 static void __iomem
*mtk_eint_get_offset(struct mtk_eint
*eint
,
51 unsigned int eint_num
,
54 unsigned int eint_base
= 0;
57 if (eint_num
>= eint
->hw
->ap_num
)
58 eint_base
= eint
->hw
->ap_num
;
60 reg
= eint
->base
+ offset
+ ((eint_num
- eint_base
) / 32) * 4;
65 static unsigned int mtk_eint_can_en_debounce(struct mtk_eint
*eint
,
66 unsigned int eint_num
)
69 unsigned int bit
= BIT(eint_num
% 32);
70 void __iomem
*reg
= mtk_eint_get_offset(eint
, eint_num
,
74 sens
= MTK_EINT_LEVEL_SENSITIVE
;
76 sens
= MTK_EINT_EDGE_SENSITIVE
;
78 if (eint_num
< eint
->hw
->db_cnt
&& sens
!= MTK_EINT_EDGE_SENSITIVE
)
84 static int mtk_eint_flip_edge(struct mtk_eint
*eint
, int hwirq
)
86 int start_level
, curr_level
;
87 unsigned int reg_offset
;
88 u32 mask
= BIT(hwirq
& 0x1f);
89 u32 port
= (hwirq
>> 5) & eint
->hw
->port_mask
;
90 void __iomem
*reg
= eint
->base
+ (port
<< 2);
92 curr_level
= eint
->gpio_xlate
->get_gpio_state(eint
->pctl
, hwirq
);
95 start_level
= curr_level
;
97 reg_offset
= eint
->regs
->pol_clr
;
99 reg_offset
= eint
->regs
->pol_set
;
100 writel(mask
, reg
+ reg_offset
);
102 curr_level
= eint
->gpio_xlate
->get_gpio_state(eint
->pctl
,
104 } while (start_level
!= curr_level
);
109 static void mtk_eint_mask(struct irq_data
*d
)
111 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
112 u32 mask
= BIT(d
->hwirq
& 0x1f);
113 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
114 eint
->regs
->mask_set
);
116 eint
->cur_mask
[d
->hwirq
>> 5] &= ~mask
;
121 static void mtk_eint_unmask(struct irq_data
*d
)
123 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
124 u32 mask
= BIT(d
->hwirq
& 0x1f);
125 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
126 eint
->regs
->mask_clr
);
128 eint
->cur_mask
[d
->hwirq
>> 5] |= mask
;
132 if (eint
->dual_edge
[d
->hwirq
])
133 mtk_eint_flip_edge(eint
, d
->hwirq
);
136 static unsigned int mtk_eint_get_mask(struct mtk_eint
*eint
,
137 unsigned int eint_num
)
139 unsigned int bit
= BIT(eint_num
% 32);
140 void __iomem
*reg
= mtk_eint_get_offset(eint
, eint_num
,
143 return !!(readl(reg
) & bit
);
146 static void mtk_eint_ack(struct irq_data
*d
)
148 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
149 u32 mask
= BIT(d
->hwirq
& 0x1f);
150 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
156 static int mtk_eint_set_type(struct irq_data
*d
, unsigned int type
)
158 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
159 u32 mask
= BIT(d
->hwirq
& 0x1f);
162 if (((type
& IRQ_TYPE_EDGE_BOTH
) && (type
& IRQ_TYPE_LEVEL_MASK
)) ||
163 ((type
& IRQ_TYPE_LEVEL_MASK
) == IRQ_TYPE_LEVEL_MASK
)) {
165 "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
166 d
->irq
, d
->hwirq
, type
);
170 if ((type
& IRQ_TYPE_EDGE_BOTH
) == IRQ_TYPE_EDGE_BOTH
)
171 eint
->dual_edge
[d
->hwirq
] = 1;
173 eint
->dual_edge
[d
->hwirq
] = 0;
175 if (type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
)) {
176 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->pol_clr
);
179 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->pol_set
);
183 if (type
& (IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_EDGE_FALLING
)) {
184 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->sens_clr
);
187 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->sens_set
);
191 if (eint
->dual_edge
[d
->hwirq
])
192 mtk_eint_flip_edge(eint
, d
->hwirq
);
197 static int mtk_eint_irq_set_wake(struct irq_data
*d
, unsigned int on
)
199 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
200 int shift
= d
->hwirq
& 0x1f;
201 int reg
= d
->hwirq
>> 5;
204 eint
->wake_mask
[reg
] |= BIT(shift
);
206 eint
->wake_mask
[reg
] &= ~BIT(shift
);
211 static void mtk_eint_chip_write_mask(const struct mtk_eint
*eint
,
212 void __iomem
*base
, u32
*buf
)
217 for (port
= 0; port
< eint
->hw
->ports
; port
++) {
218 reg
= base
+ (port
<< 2);
219 writel_relaxed(~buf
[port
], reg
+ eint
->regs
->mask_set
);
220 writel_relaxed(buf
[port
], reg
+ eint
->regs
->mask_clr
);
224 static int mtk_eint_irq_request_resources(struct irq_data
*d
)
226 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
227 struct gpio_chip
*gpio_c
;
231 err
= eint
->gpio_xlate
->get_gpio_n(eint
->pctl
, d
->hwirq
,
234 dev_err(eint
->dev
, "Can not find pin\n");
238 err
= gpiochip_lock_as_irq(gpio_c
, gpio_n
);
240 dev_err(eint
->dev
, "unable to lock HW IRQ %lu for IRQ\n",
245 err
= eint
->gpio_xlate
->set_gpio_as_eint(eint
->pctl
, d
->hwirq
);
247 dev_err(eint
->dev
, "Can not eint mode\n");
254 static void mtk_eint_irq_release_resources(struct irq_data
*d
)
256 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
257 struct gpio_chip
*gpio_c
;
260 eint
->gpio_xlate
->get_gpio_n(eint
->pctl
, d
->hwirq
, &gpio_n
,
263 gpiochip_unlock_as_irq(gpio_c
, gpio_n
);
266 static struct irq_chip mtk_eint_irq_chip
= {
268 .irq_disable
= mtk_eint_mask
,
269 .irq_mask
= mtk_eint_mask
,
270 .irq_unmask
= mtk_eint_unmask
,
271 .irq_ack
= mtk_eint_ack
,
272 .irq_set_type
= mtk_eint_set_type
,
273 .irq_set_wake
= mtk_eint_irq_set_wake
,
274 .irq_request_resources
= mtk_eint_irq_request_resources
,
275 .irq_release_resources
= mtk_eint_irq_release_resources
,
278 static unsigned int mtk_eint_hw_init(struct mtk_eint
*eint
)
280 void __iomem
*reg
= eint
->base
+ eint
->regs
->dom_en
;
283 for (i
= 0; i
< eint
->hw
->ap_num
; i
+= 32) {
284 writel(0xffffffff, reg
);
292 mtk_eint_debounce_process(struct mtk_eint
*eint
, int index
)
294 unsigned int rst
, ctrl_offset
;
295 unsigned int bit
, dbnc
;
297 ctrl_offset
= (index
/ 4) * 4 + eint
->regs
->dbnc_ctrl
;
298 dbnc
= readl(eint
->base
+ ctrl_offset
);
299 bit
= MTK_EINT_DBNC_SET_EN
<< ((index
% 4) * 8);
300 if ((bit
& dbnc
) > 0) {
301 ctrl_offset
= (index
/ 4) * 4 + eint
->regs
->dbnc_set
;
302 rst
= MTK_EINT_DBNC_RST_BIT
<< ((index
% 4) * 8);
303 writel(rst
, eint
->base
+ ctrl_offset
);
307 static void mtk_eint_irq_handler(struct irq_desc
*desc
)
309 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
310 struct mtk_eint
*eint
= irq_desc_get_handler_data(desc
);
311 unsigned int status
, eint_num
;
312 int offset
, mask_offset
, index
, virq
;
313 void __iomem
*reg
= mtk_eint_get_offset(eint
, 0, eint
->regs
->stat
);
314 int dual_edge
, start_level
, curr_level
;
316 chained_irq_enter(chip
, desc
);
317 for (eint_num
= 0; eint_num
< eint
->hw
->ap_num
; eint_num
+= 32,
321 offset
= __ffs(status
);
322 mask_offset
= eint_num
>> 5;
323 index
= eint_num
+ offset
;
324 virq
= irq_find_mapping(eint
->domain
, index
);
325 status
&= ~BIT(offset
);
328 * If we get an interrupt on pin that was only required
329 * for wake (but no real interrupt requested), mask the
330 * interrupt (as would mtk_eint_resume do anyway later
331 * in the resume sequence).
333 if (eint
->wake_mask
[mask_offset
] & BIT(offset
) &&
334 !(eint
->cur_mask
[mask_offset
] & BIT(offset
))) {
335 writel_relaxed(BIT(offset
), reg
-
337 eint
->regs
->mask_set
);
340 dual_edge
= eint
->dual_edge
[index
];
343 * Clear soft-irq in case we raised it last
346 writel(BIT(offset
), reg
- eint
->regs
->stat
+
347 eint
->regs
->soft_clr
);
350 eint
->gpio_xlate
->get_gpio_state(eint
->pctl
,
354 generic_handle_irq(virq
);
357 curr_level
= mtk_eint_flip_edge(eint
, index
);
360 * If level changed, we might lost one edge
361 * interrupt, raised it through soft-irq.
363 if (start_level
!= curr_level
)
364 writel(BIT(offset
), reg
-
366 eint
->regs
->soft_set
);
369 if (index
< eint
->hw
->db_cnt
)
370 mtk_eint_debounce_process(eint
, index
);
373 chained_irq_exit(chip
, desc
);
376 int mtk_eint_do_suspend(struct mtk_eint
*eint
)
378 mtk_eint_chip_write_mask(eint
, eint
->base
, eint
->wake_mask
);
383 int mtk_eint_do_resume(struct mtk_eint
*eint
)
385 mtk_eint_chip_write_mask(eint
, eint
->base
, eint
->cur_mask
);
390 int mtk_eint_set_debounce(struct mtk_eint
*eint
, unsigned long eint_num
,
391 unsigned int debounce
)
393 int virq
, eint_offset
;
394 unsigned int set_offset
, bit
, clr_bit
, clr_offset
, rst
, i
, unmask
,
396 static const unsigned int debounce_time
[] = {500, 1000, 16000, 32000,
397 64000, 128000, 256000};
400 virq
= irq_find_mapping(eint
->domain
, eint_num
);
401 eint_offset
= (eint_num
% 4) * 8;
402 d
= irq_get_irq_data(virq
);
404 set_offset
= (eint_num
/ 4) * 4 + eint
->regs
->dbnc_set
;
405 clr_offset
= (eint_num
/ 4) * 4 + eint
->regs
->dbnc_clr
;
407 if (!mtk_eint_can_en_debounce(eint
, eint_num
))
410 dbnc
= ARRAY_SIZE(debounce_time
);
411 for (i
= 0; i
< ARRAY_SIZE(debounce_time
); i
++) {
412 if (debounce
<= debounce_time
[i
]) {
418 if (!mtk_eint_get_mask(eint
, eint_num
)) {
425 clr_bit
= 0xff << eint_offset
;
426 writel(clr_bit
, eint
->base
+ clr_offset
);
428 bit
= ((dbnc
<< MTK_EINT_DBNC_SET_DBNC_BITS
) | MTK_EINT_DBNC_SET_EN
) <<
430 rst
= MTK_EINT_DBNC_RST_BIT
<< eint_offset
;
431 writel(rst
| bit
, eint
->base
+ set_offset
);
434 * Delay a while (more than 2T) to wait for hw debounce counter reset
444 int mtk_eint_find_irq(struct mtk_eint
*eint
, unsigned long eint_n
)
448 irq
= irq_find_mapping(eint
->domain
, eint_n
);
455 int mtk_eint_do_init(struct mtk_eint
*eint
)
459 /* If clients don't assign a specific regs, let's use generic one */
461 eint
->regs
= &mtk_generic_eint_regs
;
463 eint
->wake_mask
= devm_kcalloc(eint
->dev
, eint
->hw
->ports
,
464 sizeof(*eint
->wake_mask
), GFP_KERNEL
);
465 if (!eint
->wake_mask
)
468 eint
->cur_mask
= devm_kcalloc(eint
->dev
, eint
->hw
->ports
,
469 sizeof(*eint
->cur_mask
), GFP_KERNEL
);
473 eint
->dual_edge
= devm_kcalloc(eint
->dev
, eint
->hw
->ap_num
,
474 sizeof(int), GFP_KERNEL
);
475 if (!eint
->dual_edge
)
478 eint
->domain
= irq_domain_add_linear(eint
->dev
->of_node
,
480 &irq_domain_simple_ops
, NULL
);
484 mtk_eint_hw_init(eint
);
485 for (i
= 0; i
< eint
->hw
->ap_num
; i
++) {
486 int virq
= irq_create_mapping(eint
->domain
, i
);
488 irq_set_chip_and_handler(virq
, &mtk_eint_irq_chip
,
490 irq_set_chip_data(virq
, eint
);
493 irq_set_chained_handler_and_data(eint
->irq
, mtk_eint_irq_handler
,