1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2014-2018 MediaTek Inc.
5 * Library for MediaTek External Interrupt Support
7 * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8 * Sean Wang <sean.wang@mediatek.com>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/gpio/driver.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
24 #define MTK_EINT_EDGE_SENSITIVE 0
25 #define MTK_EINT_LEVEL_SENSITIVE 1
26 #define MTK_EINT_DBNC_SET_DBNC_BITS 4
27 #define MTK_EINT_DBNC_MAX 16
28 #define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
29 #define MTK_EINT_DBNC_SET_EN (0x1 << 0)
31 static const struct mtk_eint_regs mtk_generic_eint_regs
= {
52 const unsigned int debounce_time_mt2701
[] = {
53 500, 1000, 16000, 32000, 64000, 128000, 256000, 0
55 EXPORT_SYMBOL_GPL(debounce_time_mt2701
);
57 const unsigned int debounce_time_mt6765
[] = {
58 125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
60 EXPORT_SYMBOL_GPL(debounce_time_mt6765
);
62 const unsigned int debounce_time_mt6795
[] = {
63 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
65 EXPORT_SYMBOL_GPL(debounce_time_mt6795
);
67 static void __iomem
*mtk_eint_get_offset(struct mtk_eint
*eint
,
68 unsigned int eint_num
,
71 unsigned int eint_base
= 0;
74 if (eint_num
>= eint
->hw
->ap_num
)
75 eint_base
= eint
->hw
->ap_num
;
77 reg
= eint
->base
+ offset
+ ((eint_num
- eint_base
) / 32) * 4;
82 static unsigned int mtk_eint_can_en_debounce(struct mtk_eint
*eint
,
83 unsigned int eint_num
)
86 unsigned int bit
= BIT(eint_num
% 32);
87 void __iomem
*reg
= mtk_eint_get_offset(eint
, eint_num
,
91 sens
= MTK_EINT_LEVEL_SENSITIVE
;
93 sens
= MTK_EINT_EDGE_SENSITIVE
;
95 if (eint_num
< eint
->hw
->db_cnt
&& sens
!= MTK_EINT_EDGE_SENSITIVE
)
101 static int mtk_eint_flip_edge(struct mtk_eint
*eint
, int hwirq
)
103 int start_level
, curr_level
;
104 unsigned int reg_offset
;
105 u32 mask
= BIT(hwirq
& 0x1f);
106 u32 port
= (hwirq
>> 5) & eint
->hw
->port_mask
;
107 void __iomem
*reg
= eint
->base
+ (port
<< 2);
109 curr_level
= eint
->gpio_xlate
->get_gpio_state(eint
->pctl
, hwirq
);
112 start_level
= curr_level
;
114 reg_offset
= eint
->regs
->pol_clr
;
116 reg_offset
= eint
->regs
->pol_set
;
117 writel(mask
, reg
+ reg_offset
);
119 curr_level
= eint
->gpio_xlate
->get_gpio_state(eint
->pctl
,
121 } while (start_level
!= curr_level
);
126 static void mtk_eint_mask(struct irq_data
*d
)
128 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
129 u32 mask
= BIT(d
->hwirq
& 0x1f);
130 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
131 eint
->regs
->mask_set
);
133 eint
->cur_mask
[d
->hwirq
>> 5] &= ~mask
;
138 static void mtk_eint_unmask(struct irq_data
*d
)
140 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
141 u32 mask
= BIT(d
->hwirq
& 0x1f);
142 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
143 eint
->regs
->mask_clr
);
145 eint
->cur_mask
[d
->hwirq
>> 5] |= mask
;
149 if (eint
->dual_edge
[d
->hwirq
])
150 mtk_eint_flip_edge(eint
, d
->hwirq
);
153 static unsigned int mtk_eint_get_mask(struct mtk_eint
*eint
,
154 unsigned int eint_num
)
156 unsigned int bit
= BIT(eint_num
% 32);
157 void __iomem
*reg
= mtk_eint_get_offset(eint
, eint_num
,
160 return !!(readl(reg
) & bit
);
163 static void mtk_eint_ack(struct irq_data
*d
)
165 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
166 u32 mask
= BIT(d
->hwirq
& 0x1f);
167 void __iomem
*reg
= mtk_eint_get_offset(eint
, d
->hwirq
,
173 static int mtk_eint_set_type(struct irq_data
*d
, unsigned int type
)
175 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
177 u32 mask
= BIT(d
->hwirq
& 0x1f);
180 if (((type
& IRQ_TYPE_EDGE_BOTH
) && (type
& IRQ_TYPE_LEVEL_MASK
)) ||
181 ((type
& IRQ_TYPE_LEVEL_MASK
) == IRQ_TYPE_LEVEL_MASK
)) {
183 "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
184 d
->irq
, d
->hwirq
, type
);
188 if ((type
& IRQ_TYPE_EDGE_BOTH
) == IRQ_TYPE_EDGE_BOTH
)
189 eint
->dual_edge
[d
->hwirq
] = 1;
191 eint
->dual_edge
[d
->hwirq
] = 0;
193 if (!mtk_eint_get_mask(eint
, d
->hwirq
)) {
200 if (type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
)) {
201 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->pol_clr
);
204 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->pol_set
);
208 if (type
& (IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_EDGE_FALLING
)) {
209 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->sens_clr
);
212 reg
= mtk_eint_get_offset(eint
, d
->hwirq
, eint
->regs
->sens_set
);
223 static int mtk_eint_irq_set_wake(struct irq_data
*d
, unsigned int on
)
225 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
226 int shift
= d
->hwirq
& 0x1f;
227 int reg
= d
->hwirq
>> 5;
230 eint
->wake_mask
[reg
] |= BIT(shift
);
232 eint
->wake_mask
[reg
] &= ~BIT(shift
);
237 static void mtk_eint_chip_write_mask(const struct mtk_eint
*eint
,
238 void __iomem
*base
, u32
*buf
)
243 for (port
= 0; port
< eint
->hw
->ports
; port
++) {
244 reg
= base
+ (port
<< 2);
245 writel_relaxed(~buf
[port
], reg
+ eint
->regs
->mask_set
);
246 writel_relaxed(buf
[port
], reg
+ eint
->regs
->mask_clr
);
250 static int mtk_eint_irq_request_resources(struct irq_data
*d
)
252 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
253 struct gpio_chip
*gpio_c
;
257 err
= eint
->gpio_xlate
->get_gpio_n(eint
->pctl
, d
->hwirq
,
260 dev_err(eint
->dev
, "Can not find pin\n");
264 err
= gpiochip_lock_as_irq(gpio_c
, gpio_n
);
266 dev_err(eint
->dev
, "unable to lock HW IRQ %lu for IRQ\n",
271 err
= eint
->gpio_xlate
->set_gpio_as_eint(eint
->pctl
, d
->hwirq
);
273 dev_err(eint
->dev
, "Can not eint mode\n");
280 static void mtk_eint_irq_release_resources(struct irq_data
*d
)
282 struct mtk_eint
*eint
= irq_data_get_irq_chip_data(d
);
283 struct gpio_chip
*gpio_c
;
286 eint
->gpio_xlate
->get_gpio_n(eint
->pctl
, d
->hwirq
, &gpio_n
,
289 gpiochip_unlock_as_irq(gpio_c
, gpio_n
);
292 static struct irq_chip mtk_eint_irq_chip
= {
294 .irq_disable
= mtk_eint_mask
,
295 .irq_mask
= mtk_eint_mask
,
296 .irq_unmask
= mtk_eint_unmask
,
297 .irq_ack
= mtk_eint_ack
,
298 .irq_set_type
= mtk_eint_set_type
,
299 .irq_set_wake
= mtk_eint_irq_set_wake
,
300 .irq_request_resources
= mtk_eint_irq_request_resources
,
301 .irq_release_resources
= mtk_eint_irq_release_resources
,
304 static unsigned int mtk_eint_hw_init(struct mtk_eint
*eint
)
306 void __iomem
*dom_en
= eint
->base
+ eint
->regs
->dom_en
;
307 void __iomem
*mask_set
= eint
->base
+ eint
->regs
->mask_set
;
310 for (i
= 0; i
< eint
->hw
->ap_num
; i
+= 32) {
311 writel(0xffffffff, dom_en
);
312 writel(0xffffffff, mask_set
);
321 mtk_eint_debounce_process(struct mtk_eint
*eint
, int index
)
323 unsigned int rst
, ctrl_offset
;
324 unsigned int bit
, dbnc
;
326 ctrl_offset
= (index
/ 4) * 4 + eint
->regs
->dbnc_ctrl
;
327 dbnc
= readl(eint
->base
+ ctrl_offset
);
328 bit
= MTK_EINT_DBNC_SET_EN
<< ((index
% 4) * 8);
329 if ((bit
& dbnc
) > 0) {
330 ctrl_offset
= (index
/ 4) * 4 + eint
->regs
->dbnc_set
;
331 rst
= MTK_EINT_DBNC_RST_BIT
<< ((index
% 4) * 8);
332 writel(rst
, eint
->base
+ ctrl_offset
);
336 static void mtk_eint_irq_handler(struct irq_desc
*desc
)
338 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
339 struct mtk_eint
*eint
= irq_desc_get_handler_data(desc
);
340 unsigned int status
, eint_num
;
341 int offset
, mask_offset
, index
;
342 void __iomem
*reg
= mtk_eint_get_offset(eint
, 0, eint
->regs
->stat
);
343 int dual_edge
, start_level
, curr_level
;
345 chained_irq_enter(chip
, desc
);
346 for (eint_num
= 0; eint_num
< eint
->hw
->ap_num
; eint_num
+= 32,
350 offset
= __ffs(status
);
351 mask_offset
= eint_num
>> 5;
352 index
= eint_num
+ offset
;
353 status
&= ~BIT(offset
);
356 * If we get an interrupt on pin that was only required
357 * for wake (but no real interrupt requested), mask the
358 * interrupt (as would mtk_eint_resume do anyway later
359 * in the resume sequence).
361 if (eint
->wake_mask
[mask_offset
] & BIT(offset
) &&
362 !(eint
->cur_mask
[mask_offset
] & BIT(offset
))) {
363 writel_relaxed(BIT(offset
), reg
-
365 eint
->regs
->mask_set
);
368 dual_edge
= eint
->dual_edge
[index
];
371 * Clear soft-irq in case we raised it last
374 writel(BIT(offset
), reg
- eint
->regs
->stat
+
375 eint
->regs
->soft_clr
);
378 eint
->gpio_xlate
->get_gpio_state(eint
->pctl
,
382 generic_handle_domain_irq(eint
->domain
, index
);
385 curr_level
= mtk_eint_flip_edge(eint
, index
);
388 * If level changed, we might lost one edge
389 * interrupt, raised it through soft-irq.
391 if (start_level
!= curr_level
)
392 writel(BIT(offset
), reg
-
394 eint
->regs
->soft_set
);
397 if (index
< eint
->hw
->db_cnt
)
398 mtk_eint_debounce_process(eint
, index
);
401 chained_irq_exit(chip
, desc
);
404 int mtk_eint_do_suspend(struct mtk_eint
*eint
)
406 mtk_eint_chip_write_mask(eint
, eint
->base
, eint
->wake_mask
);
410 EXPORT_SYMBOL_GPL(mtk_eint_do_suspend
);
412 int mtk_eint_do_resume(struct mtk_eint
*eint
)
414 mtk_eint_chip_write_mask(eint
, eint
->base
, eint
->cur_mask
);
418 EXPORT_SYMBOL_GPL(mtk_eint_do_resume
);
420 int mtk_eint_set_debounce(struct mtk_eint
*eint
, unsigned long eint_num
,
421 unsigned int debounce
)
423 int virq
, eint_offset
;
424 unsigned int set_offset
, bit
, clr_bit
, clr_offset
, rst
, i
, unmask
,
428 if (!eint
->hw
->db_time
)
431 virq
= irq_find_mapping(eint
->domain
, eint_num
);
432 eint_offset
= (eint_num
% 4) * 8;
433 d
= irq_get_irq_data(virq
);
435 set_offset
= (eint_num
/ 4) * 4 + eint
->regs
->dbnc_set
;
436 clr_offset
= (eint_num
/ 4) * 4 + eint
->regs
->dbnc_clr
;
438 if (!mtk_eint_can_en_debounce(eint
, eint_num
))
441 dbnc
= eint
->num_db_time
;
442 for (i
= 0; i
< eint
->num_db_time
; i
++) {
443 if (debounce
<= eint
->hw
->db_time
[i
]) {
449 if (!mtk_eint_get_mask(eint
, eint_num
)) {
456 clr_bit
= 0xff << eint_offset
;
457 writel(clr_bit
, eint
->base
+ clr_offset
);
459 bit
= ((dbnc
<< MTK_EINT_DBNC_SET_DBNC_BITS
) | MTK_EINT_DBNC_SET_EN
) <<
461 rst
= MTK_EINT_DBNC_RST_BIT
<< eint_offset
;
462 writel(rst
| bit
, eint
->base
+ set_offset
);
465 * Delay a while (more than 2T) to wait for hw debounce counter reset
474 EXPORT_SYMBOL_GPL(mtk_eint_set_debounce
);
476 int mtk_eint_find_irq(struct mtk_eint
*eint
, unsigned long eint_n
)
480 irq
= irq_find_mapping(eint
->domain
, eint_n
);
486 EXPORT_SYMBOL_GPL(mtk_eint_find_irq
);
488 int mtk_eint_do_init(struct mtk_eint
*eint
)
492 /* If clients don't assign a specific regs, let's use generic one */
494 eint
->regs
= &mtk_generic_eint_regs
;
496 eint
->wake_mask
= devm_kcalloc(eint
->dev
, eint
->hw
->ports
,
497 sizeof(*eint
->wake_mask
), GFP_KERNEL
);
498 if (!eint
->wake_mask
)
501 eint
->cur_mask
= devm_kcalloc(eint
->dev
, eint
->hw
->ports
,
502 sizeof(*eint
->cur_mask
), GFP_KERNEL
);
506 eint
->dual_edge
= devm_kcalloc(eint
->dev
, eint
->hw
->ap_num
,
507 sizeof(int), GFP_KERNEL
);
508 if (!eint
->dual_edge
)
511 eint
->domain
= irq_domain_add_linear(eint
->dev
->of_node
,
513 &irq_domain_simple_ops
, NULL
);
517 if (eint
->hw
->db_time
) {
518 for (i
= 0; i
< MTK_EINT_DBNC_MAX
; i
++)
519 if (eint
->hw
->db_time
[i
] == 0)
521 eint
->num_db_time
= i
;
524 mtk_eint_hw_init(eint
);
525 for (i
= 0; i
< eint
->hw
->ap_num
; i
++) {
526 int virq
= irq_create_mapping(eint
->domain
, i
);
528 irq_set_chip_and_handler(virq
, &mtk_eint_irq_chip
,
530 irq_set_chip_data(virq
, eint
);
533 irq_set_chained_handler_and_data(eint
->irq
, mtk_eint_irq_handler
,
538 EXPORT_SYMBOL_GPL(mtk_eint_do_init
);
540 MODULE_LICENSE("GPL v2");
541 MODULE_DESCRIPTION("MediaTek EINT Driver");