1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 MediaTek Inc.
4 * Author: Youlin.Pei <youlin.pei@mediatek.com>
7 #include <linux/interrupt.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqdomain.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_address.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
18 enum mtk_cirq_regoffs_index
{
30 static const u32 mtk_cirq_regoffs_v1
[] = {
33 [CIRQ_MASK_SET
] = 0xc0,
34 [CIRQ_MASK_CLR
] = 0x100,
35 [CIRQ_SENS_SET
] = 0x180,
36 [CIRQ_SENS_CLR
] = 0x1c0,
37 [CIRQ_POL_SET
] = 0x240,
38 [CIRQ_POL_CLR
] = 0x280,
39 [CIRQ_CONTROL
] = 0x300,
42 static const u32 mtk_cirq_regoffs_v2
[] = {
45 [CIRQ_MASK_SET
] = 0x180,
46 [CIRQ_MASK_CLR
] = 0x200,
47 [CIRQ_SENS_SET
] = 0x300,
48 [CIRQ_SENS_CLR
] = 0x380,
49 [CIRQ_POL_SET
] = 0x480,
50 [CIRQ_POL_CLR
] = 0x500,
51 [CIRQ_CONTROL
] = 0x600,
56 #define CIRQ_FLUSH 0x4
58 struct mtk_cirq_chip_data
{
60 unsigned int ext_irq_start
;
61 unsigned int ext_irq_end
;
63 struct irq_domain
*domain
;
66 static struct mtk_cirq_chip_data
*cirq_data
;
68 static void __iomem
*mtk_cirq_reg(struct mtk_cirq_chip_data
*chip_data
,
69 enum mtk_cirq_regoffs_index idx
)
71 return chip_data
->base
+ chip_data
->offsets
[idx
];
74 static void __iomem
*mtk_cirq_irq_reg(struct mtk_cirq_chip_data
*chip_data
,
75 enum mtk_cirq_regoffs_index idx
,
76 unsigned int cirq_num
)
78 return mtk_cirq_reg(chip_data
, idx
) + (cirq_num
/ 32) * 4;
81 static void mtk_cirq_write_mask(struct irq_data
*data
, enum mtk_cirq_regoffs_index idx
)
83 struct mtk_cirq_chip_data
*chip_data
= data
->chip_data
;
84 unsigned int cirq_num
= data
->hwirq
;
85 u32 mask
= 1 << (cirq_num
% 32);
87 writel_relaxed(mask
, mtk_cirq_irq_reg(chip_data
, idx
, cirq_num
));
90 static void mtk_cirq_mask(struct irq_data
*data
)
92 mtk_cirq_write_mask(data
, CIRQ_MASK_SET
);
93 irq_chip_mask_parent(data
);
96 static void mtk_cirq_unmask(struct irq_data
*data
)
98 mtk_cirq_write_mask(data
, CIRQ_MASK_CLR
);
99 irq_chip_unmask_parent(data
);
102 static int mtk_cirq_set_type(struct irq_data
*data
, unsigned int type
)
106 switch (type
& IRQ_TYPE_SENSE_MASK
) {
107 case IRQ_TYPE_EDGE_FALLING
:
108 mtk_cirq_write_mask(data
, CIRQ_POL_CLR
);
109 mtk_cirq_write_mask(data
, CIRQ_SENS_CLR
);
111 case IRQ_TYPE_EDGE_RISING
:
112 mtk_cirq_write_mask(data
, CIRQ_POL_SET
);
113 mtk_cirq_write_mask(data
, CIRQ_SENS_CLR
);
115 case IRQ_TYPE_LEVEL_LOW
:
116 mtk_cirq_write_mask(data
, CIRQ_POL_CLR
);
117 mtk_cirq_write_mask(data
, CIRQ_SENS_SET
);
119 case IRQ_TYPE_LEVEL_HIGH
:
120 mtk_cirq_write_mask(data
, CIRQ_POL_SET
);
121 mtk_cirq_write_mask(data
, CIRQ_SENS_SET
);
127 data
= data
->parent_data
;
128 ret
= data
->chip
->irq_set_type(data
, type
);
132 static struct irq_chip mtk_cirq_chip
= {
134 .irq_mask
= mtk_cirq_mask
,
135 .irq_unmask
= mtk_cirq_unmask
,
136 .irq_eoi
= irq_chip_eoi_parent
,
137 .irq_set_type
= mtk_cirq_set_type
,
138 .irq_retrigger
= irq_chip_retrigger_hierarchy
,
140 .irq_set_affinity
= irq_chip_set_affinity_parent
,
144 static int mtk_cirq_domain_translate(struct irq_domain
*d
,
145 struct irq_fwspec
*fwspec
,
146 unsigned long *hwirq
,
149 if (is_of_node(fwspec
->fwnode
)) {
150 if (fwspec
->param_count
!= 3)
153 /* No PPI should point to this domain */
154 if (fwspec
->param
[0] != 0)
157 /* cirq support irq number check */
158 if (fwspec
->param
[1] < cirq_data
->ext_irq_start
||
159 fwspec
->param
[1] > cirq_data
->ext_irq_end
)
162 *hwirq
= fwspec
->param
[1] - cirq_data
->ext_irq_start
;
163 *type
= fwspec
->param
[2] & IRQ_TYPE_SENSE_MASK
;
170 static int mtk_cirq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
171 unsigned int nr_irqs
, void *arg
)
174 irq_hw_number_t hwirq
;
176 struct irq_fwspec
*fwspec
= arg
;
177 struct irq_fwspec parent_fwspec
= *fwspec
;
179 ret
= mtk_cirq_domain_translate(domain
, fwspec
, &hwirq
, &type
);
183 if (WARN_ON(nr_irqs
!= 1))
186 irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
,
190 parent_fwspec
.fwnode
= domain
->parent
->fwnode
;
191 return irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
,
195 static const struct irq_domain_ops cirq_domain_ops
= {
196 .translate
= mtk_cirq_domain_translate
,
197 .alloc
= mtk_cirq_domain_alloc
,
198 .free
= irq_domain_free_irqs_common
,
201 #ifdef CONFIG_PM_SLEEP
202 static int mtk_cirq_suspend(void)
206 unsigned int irq
, hwirq_num
;
207 bool pending
, masked
;
208 int i
, pendret
, maskret
;
211 * When external interrupts happened, CIRQ will record the status
212 * even CIRQ is not enabled. When execute flush command, CIRQ will
213 * resend the signals according to the status. So if don't clear the
214 * status, CIRQ will resend the wrong signals.
216 * arch_suspend_disable_irqs() will be called before CIRQ suspend
217 * callback. If clear all the status simply, the external interrupts
218 * which happened between arch_suspend_disable_irqs and CIRQ suspend
219 * callback will be lost. Using following steps to avoid this issue;
221 * - Iterate over all the CIRQ supported interrupts;
222 * - For each interrupt, inspect its pending and masked status at GIC
224 * - If pending and unmasked, it happened between
225 * arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
226 * it. Otherwise, ACK it.
228 hwirq_num
= cirq_data
->ext_irq_end
- cirq_data
->ext_irq_start
+ 1;
229 for (i
= 0; i
< hwirq_num
; i
++) {
230 irq
= irq_find_mapping(cirq_data
->domain
, i
);
232 pendret
= irq_get_irqchip_state(irq
,
233 IRQCHIP_STATE_PENDING
,
236 maskret
= irq_get_irqchip_state(irq
,
237 IRQCHIP_STATE_MASKED
,
240 if (pendret
== 0 && maskret
== 0 &&
241 (pending
&& !masked
))
245 reg
= mtk_cirq_irq_reg(cirq_data
, CIRQ_ACK
, i
);
246 mask
= 1 << (i
% 32);
247 writel_relaxed(mask
, reg
);
250 /* set edge_only mode, record edge-triggerd interrupts */
252 reg
= mtk_cirq_reg(cirq_data
, CIRQ_CONTROL
);
253 value
= readl_relaxed(reg
);
254 value
|= (CIRQ_EDGE
| CIRQ_EN
);
255 writel_relaxed(value
, reg
);
260 static void mtk_cirq_resume(void)
262 void __iomem
*reg
= mtk_cirq_reg(cirq_data
, CIRQ_CONTROL
);
265 /* flush recorded interrupts, will send signals to parent controller */
266 value
= readl_relaxed(reg
);
267 writel_relaxed(value
| CIRQ_FLUSH
, reg
);
270 value
= readl_relaxed(reg
);
271 value
&= ~(CIRQ_EDGE
| CIRQ_EN
);
272 writel_relaxed(value
, reg
);
275 static struct syscore_ops mtk_cirq_syscore_ops
= {
276 .suspend
= mtk_cirq_suspend
,
277 .resume
= mtk_cirq_resume
,
280 static void mtk_cirq_syscore_init(void)
282 register_syscore_ops(&mtk_cirq_syscore_ops
);
285 static inline void mtk_cirq_syscore_init(void) {}
288 static const struct of_device_id mtk_cirq_of_match
[] = {
289 { .compatible
= "mediatek,mt2701-cirq", .data
= &mtk_cirq_regoffs_v1
},
290 { .compatible
= "mediatek,mt8135-cirq", .data
= &mtk_cirq_regoffs_v1
},
291 { .compatible
= "mediatek,mt8173-cirq", .data
= &mtk_cirq_regoffs_v1
},
292 { .compatible
= "mediatek,mt8192-cirq", .data
= &mtk_cirq_regoffs_v2
},
296 static int __init
mtk_cirq_of_init(struct device_node
*node
,
297 struct device_node
*parent
)
299 struct irq_domain
*domain
, *domain_parent
;
300 const struct of_device_id
*match
;
301 unsigned int irq_num
;
304 domain_parent
= irq_find_host(parent
);
305 if (!domain_parent
) {
306 pr_err("mtk_cirq: interrupt-parent not found\n");
310 cirq_data
= kzalloc(sizeof(*cirq_data
), GFP_KERNEL
);
314 cirq_data
->base
= of_iomap(node
, 0);
315 if (!cirq_data
->base
) {
316 pr_err("mtk_cirq: unable to map cirq register\n");
321 ret
= of_property_read_u32_index(node
, "mediatek,ext-irq-range", 0,
322 &cirq_data
->ext_irq_start
);
326 ret
= of_property_read_u32_index(node
, "mediatek,ext-irq-range", 1,
327 &cirq_data
->ext_irq_end
);
331 match
= of_match_node(mtk_cirq_of_match
, node
);
336 cirq_data
->offsets
= match
->data
;
338 irq_num
= cirq_data
->ext_irq_end
- cirq_data
->ext_irq_start
+ 1;
339 domain
= irq_domain_add_hierarchy(domain_parent
, 0,
341 &cirq_domain_ops
, cirq_data
);
346 cirq_data
->domain
= domain
;
348 mtk_cirq_syscore_init();
353 iounmap(cirq_data
->base
);
359 IRQCHIP_DECLARE(mtk_cirq
, "mediatek,mtk-cirq", mtk_cirq_of_init
);