1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas RZ/G2L IRQC Driver
5 * Copyright (C) 2022 Renesas Electronics Corporation.
7 * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
10 #include <linux/bitfield.h>
11 #include <linux/cleanup.h>
12 #include <linux/clk.h>
13 #include <linux/err.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/reset.h>
21 #include <linux/spinlock.h>
22 #include <linux/syscore_ops.h>
24 #define IRQC_IRQ_START 1
25 #define IRQC_IRQ_COUNT 8
26 #define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT)
27 #define IRQC_TINT_COUNT 32
28 #define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT)
33 #define TITSR(n) (0x24 + (n) * 4)
34 #define TITSR0_MAX_INT 16
35 #define TITSEL_WIDTH 0x2
36 #define TSSR(n) (0x30 + ((n) * 4))
38 #define TSSEL_SHIFT(n) (8 * (n))
39 #define TSSEL_MASK GENMASK(7, 0)
44 #define TSSR_OFFSET(n) ((n) % 4)
45 #define TSSR_INDEX(n) ((n) / 4)
47 #define TITSR_TITSEL_EDGE_RISING 0
48 #define TITSR_TITSEL_EDGE_FALLING 1
49 #define TITSR_TITSEL_LEVEL_HIGH 2
50 #define TITSR_TITSEL_LEVEL_LOW 3
52 #define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2))
53 #define IITSR_IITSEL_LEVEL_LOW 0
54 #define IITSR_IITSEL_EDGE_FALLING 1
55 #define IITSR_IITSEL_EDGE_RISING 2
56 #define IITSR_IITSEL_EDGE_BOTH 3
57 #define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
59 #define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
60 #define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
63 * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
64 * @iitsr: IITSR register
65 * @titsr: TITSR registers
67 struct rzg2l_irqc_reg_cache
{
73 * struct rzg2l_irqc_priv - IRQ controller private data structure
74 * @base: Controller's base address
75 * @irqchip: Pointer to struct irq_chip
76 * @fwspec: IRQ firmware specific data
77 * @lock: Lock to serialize access to hardware registers
78 * @cache: Registers cache for suspend/resume
80 static struct rzg2l_irqc_priv
{
82 const struct irq_chip
*irqchip
;
83 struct irq_fwspec fwspec
[IRQC_NUM_IRQ
];
85 struct rzg2l_irqc_reg_cache cache
;
88 static struct rzg2l_irqc_priv
*irq_data_to_priv(struct irq_data
*data
)
90 return data
->domain
->host_data
;
93 static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv
*priv
, unsigned int hwirq
)
95 unsigned int hw_irq
= hwirq
- IRQC_IRQ_START
;
96 u32 bit
= BIT(hw_irq
);
99 iscr
= readl_relaxed(priv
->base
+ ISCR
);
100 iitsr
= readl_relaxed(priv
->base
+ IITSR
);
103 * ISCR can only be cleared if the type is falling-edge, rising-edge or
104 * falling/rising-edge.
106 if ((iscr
& bit
) && (iitsr
& IITSR_IITSEL_MASK(hw_irq
))) {
107 writel_relaxed(iscr
& ~bit
, priv
->base
+ ISCR
);
109 * Enforce that the posted write is flushed to prevent that the
110 * just handled interrupt is raised again.
112 readl_relaxed(priv
->base
+ ISCR
);
116 static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv
*priv
, unsigned int hwirq
)
118 u32 bit
= BIT(hwirq
- IRQC_TINT_START
);
121 reg
= readl_relaxed(priv
->base
+ TSCR
);
123 writel_relaxed(reg
& ~bit
, priv
->base
+ TSCR
);
125 * Enforce that the posted write is flushed to prevent that the
126 * just handled interrupt is raised again.
128 readl_relaxed(priv
->base
+ TSCR
);
132 static void rzg2l_irqc_eoi(struct irq_data
*d
)
134 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
135 unsigned int hw_irq
= irqd_to_hwirq(d
);
137 raw_spin_lock(&priv
->lock
);
138 if (hw_irq
>= IRQC_IRQ_START
&& hw_irq
<= IRQC_IRQ_COUNT
)
139 rzg2l_clear_irq_int(priv
, hw_irq
);
140 else if (hw_irq
>= IRQC_TINT_START
&& hw_irq
< IRQC_NUM_IRQ
)
141 rzg2l_clear_tint_int(priv
, hw_irq
);
142 raw_spin_unlock(&priv
->lock
);
143 irq_chip_eoi_parent(d
);
146 static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv
*priv
,
149 u32 bit
= BIT(hwirq
- IRQC_IRQ_START
);
151 writel_relaxed(readl_relaxed(priv
->base
+ IMSK
) | bit
, priv
->base
+ IMSK
);
154 static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv
*priv
,
157 u32 bit
= BIT(hwirq
- IRQC_IRQ_START
);
159 writel_relaxed(readl_relaxed(priv
->base
+ IMSK
) & ~bit
, priv
->base
+ IMSK
);
162 static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv
*priv
,
165 u32 bit
= BIT(hwirq
- IRQC_TINT_START
);
167 writel_relaxed(readl_relaxed(priv
->base
+ TMSK
) | bit
, priv
->base
+ TMSK
);
170 static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv
*priv
,
173 u32 bit
= BIT(hwirq
- IRQC_TINT_START
);
175 writel_relaxed(readl_relaxed(priv
->base
+ TMSK
) & ~bit
, priv
->base
+ TMSK
);
178 static void rzfive_irqc_mask(struct irq_data
*d
)
180 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
181 unsigned int hwirq
= irqd_to_hwirq(d
);
183 raw_spin_lock(&priv
->lock
);
184 if (hwirq
>= IRQC_IRQ_START
&& hwirq
<= IRQC_IRQ_COUNT
)
185 rzfive_irqc_mask_irq_interrupt(priv
, hwirq
);
186 else if (hwirq
>= IRQC_TINT_START
&& hwirq
< IRQC_NUM_IRQ
)
187 rzfive_irqc_mask_tint_interrupt(priv
, hwirq
);
188 raw_spin_unlock(&priv
->lock
);
189 irq_chip_mask_parent(d
);
192 static void rzfive_irqc_unmask(struct irq_data
*d
)
194 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
195 unsigned int hwirq
= irqd_to_hwirq(d
);
197 raw_spin_lock(&priv
->lock
);
198 if (hwirq
>= IRQC_IRQ_START
&& hwirq
<= IRQC_IRQ_COUNT
)
199 rzfive_irqc_unmask_irq_interrupt(priv
, hwirq
);
200 else if (hwirq
>= IRQC_TINT_START
&& hwirq
< IRQC_NUM_IRQ
)
201 rzfive_irqc_unmask_tint_interrupt(priv
, hwirq
);
202 raw_spin_unlock(&priv
->lock
);
203 irq_chip_unmask_parent(d
);
206 static void rzfive_tint_irq_endisable(struct irq_data
*d
, bool enable
)
208 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
209 unsigned int hwirq
= irqd_to_hwirq(d
);
211 if (hwirq
>= IRQC_TINT_START
&& hwirq
< IRQC_NUM_IRQ
) {
212 u32 offset
= hwirq
- IRQC_TINT_START
;
213 u32 tssr_offset
= TSSR_OFFSET(offset
);
214 u8 tssr_index
= TSSR_INDEX(offset
);
217 raw_spin_lock(&priv
->lock
);
219 rzfive_irqc_unmask_tint_interrupt(priv
, hwirq
);
221 rzfive_irqc_mask_tint_interrupt(priv
, hwirq
);
222 reg
= readl_relaxed(priv
->base
+ TSSR(tssr_index
));
224 reg
|= TIEN
<< TSSEL_SHIFT(tssr_offset
);
226 reg
&= ~(TIEN
<< TSSEL_SHIFT(tssr_offset
));
227 writel_relaxed(reg
, priv
->base
+ TSSR(tssr_index
));
228 raw_spin_unlock(&priv
->lock
);
230 raw_spin_lock(&priv
->lock
);
232 rzfive_irqc_unmask_irq_interrupt(priv
, hwirq
);
234 rzfive_irqc_mask_irq_interrupt(priv
, hwirq
);
235 raw_spin_unlock(&priv
->lock
);
239 static void rzfive_irqc_irq_disable(struct irq_data
*d
)
241 irq_chip_disable_parent(d
);
242 rzfive_tint_irq_endisable(d
, false);
245 static void rzfive_irqc_irq_enable(struct irq_data
*d
)
247 rzfive_tint_irq_endisable(d
, true);
248 irq_chip_enable_parent(d
);
251 static void rzg2l_tint_irq_endisable(struct irq_data
*d
, bool enable
)
253 unsigned int hw_irq
= irqd_to_hwirq(d
);
255 if (hw_irq
>= IRQC_TINT_START
&& hw_irq
< IRQC_NUM_IRQ
) {
256 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
257 u32 offset
= hw_irq
- IRQC_TINT_START
;
258 u32 tssr_offset
= TSSR_OFFSET(offset
);
259 u8 tssr_index
= TSSR_INDEX(offset
);
262 raw_spin_lock(&priv
->lock
);
263 reg
= readl_relaxed(priv
->base
+ TSSR(tssr_index
));
265 reg
|= TIEN
<< TSSEL_SHIFT(tssr_offset
);
267 reg
&= ~(TIEN
<< TSSEL_SHIFT(tssr_offset
));
268 writel_relaxed(reg
, priv
->base
+ TSSR(tssr_index
));
269 raw_spin_unlock(&priv
->lock
);
273 static void rzg2l_irqc_irq_disable(struct irq_data
*d
)
275 irq_chip_disable_parent(d
);
276 rzg2l_tint_irq_endisable(d
, false);
279 static void rzg2l_irqc_irq_enable(struct irq_data
*d
)
281 rzg2l_tint_irq_endisable(d
, true);
282 irq_chip_enable_parent(d
);
285 static int rzg2l_irq_set_type(struct irq_data
*d
, unsigned int type
)
287 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
288 unsigned int hwirq
= irqd_to_hwirq(d
);
289 u32 iitseln
= hwirq
- IRQC_IRQ_START
;
290 bool clear_irq_int
= false;
293 switch (type
& IRQ_TYPE_SENSE_MASK
) {
294 case IRQ_TYPE_LEVEL_LOW
:
295 sense
= IITSR_IITSEL_LEVEL_LOW
;
298 case IRQ_TYPE_EDGE_FALLING
:
299 sense
= IITSR_IITSEL_EDGE_FALLING
;
300 clear_irq_int
= true;
303 case IRQ_TYPE_EDGE_RISING
:
304 sense
= IITSR_IITSEL_EDGE_RISING
;
305 clear_irq_int
= true;
308 case IRQ_TYPE_EDGE_BOTH
:
309 sense
= IITSR_IITSEL_EDGE_BOTH
;
310 clear_irq_int
= true;
317 raw_spin_lock(&priv
->lock
);
318 tmp
= readl_relaxed(priv
->base
+ IITSR
);
319 tmp
&= ~IITSR_IITSEL_MASK(iitseln
);
320 tmp
|= IITSR_IITSEL(iitseln
, sense
);
322 rzg2l_clear_irq_int(priv
, hwirq
);
323 writel_relaxed(tmp
, priv
->base
+ IITSR
);
324 raw_spin_unlock(&priv
->lock
);
329 static u32
rzg2l_disable_tint_and_set_tint_source(struct irq_data
*d
, struct rzg2l_irqc_priv
*priv
,
330 u32 reg
, u32 tssr_offset
, u8 tssr_index
)
332 u32 tint
= (u32
)(uintptr_t)irq_data_get_irq_chip_data(d
);
333 u32 tien
= reg
& (TIEN
<< TSSEL_SHIFT(tssr_offset
));
335 /* Clear the relevant byte in reg */
336 reg
&= ~(TSSEL_MASK
<< TSSEL_SHIFT(tssr_offset
));
337 /* Set TINT and leave TIEN clear */
338 reg
|= tint
<< TSSEL_SHIFT(tssr_offset
);
339 writel_relaxed(reg
, priv
->base
+ TSSR(tssr_index
));
344 static int rzg2l_tint_set_edge(struct irq_data
*d
, unsigned int type
)
346 struct rzg2l_irqc_priv
*priv
= irq_data_to_priv(d
);
347 unsigned int hwirq
= irqd_to_hwirq(d
);
348 u32 titseln
= hwirq
- IRQC_TINT_START
;
349 u32 tssr_offset
= TSSR_OFFSET(titseln
);
350 u8 tssr_index
= TSSR_INDEX(titseln
);
354 switch (type
& IRQ_TYPE_SENSE_MASK
) {
355 case IRQ_TYPE_EDGE_RISING
:
356 sense
= TITSR_TITSEL_EDGE_RISING
;
359 case IRQ_TYPE_EDGE_FALLING
:
360 sense
= TITSR_TITSEL_EDGE_FALLING
;
368 if (titseln
>= TITSR0_MAX_INT
) {
369 titseln
-= TITSR0_MAX_INT
;
373 raw_spin_lock(&priv
->lock
);
374 tssr
= readl_relaxed(priv
->base
+ TSSR(tssr_index
));
375 tssr
= rzg2l_disable_tint_and_set_tint_source(d
, priv
, tssr
, tssr_offset
, tssr_index
);
376 reg
= readl_relaxed(priv
->base
+ TITSR(index
));
377 reg
&= ~(IRQ_MASK
<< (titseln
* TITSEL_WIDTH
));
378 reg
|= sense
<< (titseln
* TITSEL_WIDTH
);
379 writel_relaxed(reg
, priv
->base
+ TITSR(index
));
380 rzg2l_clear_tint_int(priv
, hwirq
);
381 writel_relaxed(tssr
, priv
->base
+ TSSR(tssr_index
));
382 raw_spin_unlock(&priv
->lock
);
387 static int rzg2l_irqc_set_type(struct irq_data
*d
, unsigned int type
)
389 unsigned int hw_irq
= irqd_to_hwirq(d
);
392 if (hw_irq
>= IRQC_IRQ_START
&& hw_irq
<= IRQC_IRQ_COUNT
)
393 ret
= rzg2l_irq_set_type(d
, type
);
394 else if (hw_irq
>= IRQC_TINT_START
&& hw_irq
< IRQC_NUM_IRQ
)
395 ret
= rzg2l_tint_set_edge(d
, type
);
399 return irq_chip_set_type_parent(d
, IRQ_TYPE_LEVEL_HIGH
);
402 static int rzg2l_irqc_irq_suspend(void)
404 struct rzg2l_irqc_reg_cache
*cache
= &rzg2l_irqc_data
->cache
;
405 void __iomem
*base
= rzg2l_irqc_data
->base
;
407 cache
->iitsr
= readl_relaxed(base
+ IITSR
);
408 for (u8 i
= 0; i
< 2; i
++)
409 cache
->titsr
[i
] = readl_relaxed(base
+ TITSR(i
));
414 static void rzg2l_irqc_irq_resume(void)
416 struct rzg2l_irqc_reg_cache
*cache
= &rzg2l_irqc_data
->cache
;
417 void __iomem
*base
= rzg2l_irqc_data
->base
;
420 * Restore only interrupt type. TSSRx will be restored at the
421 * request of pin controller to avoid spurious interrupts due
422 * to invalid PIN states.
424 for (u8 i
= 0; i
< 2; i
++)
425 writel_relaxed(cache
->titsr
[i
], base
+ TITSR(i
));
426 writel_relaxed(cache
->iitsr
, base
+ IITSR
);
429 static struct syscore_ops rzg2l_irqc_syscore_ops
= {
430 .suspend
= rzg2l_irqc_irq_suspend
,
431 .resume
= rzg2l_irqc_irq_resume
,
434 static const struct irq_chip rzg2l_irqc_chip
= {
435 .name
= "rzg2l-irqc",
436 .irq_eoi
= rzg2l_irqc_eoi
,
437 .irq_mask
= irq_chip_mask_parent
,
438 .irq_unmask
= irq_chip_unmask_parent
,
439 .irq_disable
= rzg2l_irqc_irq_disable
,
440 .irq_enable
= rzg2l_irqc_irq_enable
,
441 .irq_get_irqchip_state
= irq_chip_get_parent_state
,
442 .irq_set_irqchip_state
= irq_chip_set_parent_state
,
443 .irq_retrigger
= irq_chip_retrigger_hierarchy
,
444 .irq_set_type
= rzg2l_irqc_set_type
,
445 .irq_set_affinity
= irq_chip_set_affinity_parent
,
446 .flags
= IRQCHIP_MASK_ON_SUSPEND
|
447 IRQCHIP_SET_TYPE_MASKED
|
448 IRQCHIP_SKIP_SET_WAKE
,
451 static const struct irq_chip rzfive_irqc_chip
= {
452 .name
= "rzfive-irqc",
453 .irq_eoi
= rzg2l_irqc_eoi
,
454 .irq_mask
= rzfive_irqc_mask
,
455 .irq_unmask
= rzfive_irqc_unmask
,
456 .irq_disable
= rzfive_irqc_irq_disable
,
457 .irq_enable
= rzfive_irqc_irq_enable
,
458 .irq_get_irqchip_state
= irq_chip_get_parent_state
,
459 .irq_set_irqchip_state
= irq_chip_set_parent_state
,
460 .irq_retrigger
= irq_chip_retrigger_hierarchy
,
461 .irq_set_type
= rzg2l_irqc_set_type
,
462 .irq_set_affinity
= irq_chip_set_affinity_parent
,
463 .flags
= IRQCHIP_MASK_ON_SUSPEND
|
464 IRQCHIP_SET_TYPE_MASKED
|
465 IRQCHIP_SKIP_SET_WAKE
,
468 static int rzg2l_irqc_alloc(struct irq_domain
*domain
, unsigned int virq
,
469 unsigned int nr_irqs
, void *arg
)
471 struct rzg2l_irqc_priv
*priv
= domain
->host_data
;
472 unsigned long tint
= 0;
473 irq_hw_number_t hwirq
;
477 ret
= irq_domain_translate_twocell(domain
, arg
, &hwirq
, &type
);
482 * For TINT interrupts ie where pinctrl driver is child of irqc domain
483 * the hwirq and TINT are encoded in fwspec->param[0].
484 * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
485 * from 16-31 bits. TINT from the pinctrl driver needs to be programmed
486 * in IRQC registers to enable a given gpio pin as interrupt.
488 if (hwirq
> IRQC_IRQ_COUNT
) {
489 tint
= TINT_EXTRACT_GPIOINT(hwirq
);
490 hwirq
= TINT_EXTRACT_HWIRQ(hwirq
);
492 if (hwirq
< IRQC_TINT_START
)
496 if (hwirq
> (IRQC_NUM_IRQ
- 1))
499 ret
= irq_domain_set_hwirq_and_chip(domain
, virq
, hwirq
, priv
->irqchip
,
500 (void *)(uintptr_t)tint
);
504 return irq_domain_alloc_irqs_parent(domain
, virq
, nr_irqs
, &priv
->fwspec
[hwirq
]);
507 static const struct irq_domain_ops rzg2l_irqc_domain_ops
= {
508 .alloc
= rzg2l_irqc_alloc
,
509 .free
= irq_domain_free_irqs_common
,
510 .translate
= irq_domain_translate_twocell
,
513 static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv
*priv
,
514 struct device_node
*np
)
516 struct of_phandle_args map
;
520 for (i
= 0; i
< IRQC_NUM_IRQ
; i
++) {
521 ret
= of_irq_parse_one(np
, i
, &map
);
524 of_phandle_args_to_fwspec(np
, map
.args
, map
.args_count
,
531 static int rzg2l_irqc_common_init(struct device_node
*node
, struct device_node
*parent
,
532 const struct irq_chip
*irq_chip
)
534 struct platform_device
*pdev
= of_find_device_by_node(node
);
535 struct device
*dev
__free(put_device
) = pdev
? &pdev
->dev
: NULL
;
536 struct irq_domain
*irq_domain
, *parent_domain
;
537 struct reset_control
*resetn
;
543 parent_domain
= irq_find_host(parent
);
544 if (!parent_domain
) {
545 dev_err(&pdev
->dev
, "cannot find parent domain\n");
549 rzg2l_irqc_data
= devm_kzalloc(&pdev
->dev
, sizeof(*rzg2l_irqc_data
), GFP_KERNEL
);
550 if (!rzg2l_irqc_data
)
553 rzg2l_irqc_data
->irqchip
= irq_chip
;
555 rzg2l_irqc_data
->base
= devm_of_iomap(&pdev
->dev
, pdev
->dev
.of_node
, 0, NULL
);
556 if (IS_ERR(rzg2l_irqc_data
->base
))
557 return PTR_ERR(rzg2l_irqc_data
->base
);
559 ret
= rzg2l_irqc_parse_interrupts(rzg2l_irqc_data
, node
);
561 dev_err(&pdev
->dev
, "cannot parse interrupts: %d\n", ret
);
565 resetn
= devm_reset_control_get_exclusive(&pdev
->dev
, NULL
);
567 return PTR_ERR(resetn
);
569 ret
= reset_control_deassert(resetn
);
571 dev_err(&pdev
->dev
, "failed to deassert resetn pin, %d\n", ret
);
575 pm_runtime_enable(&pdev
->dev
);
576 ret
= pm_runtime_resume_and_get(&pdev
->dev
);
578 dev_err(&pdev
->dev
, "pm_runtime_resume_and_get failed: %d\n", ret
);
582 raw_spin_lock_init(&rzg2l_irqc_data
->lock
);
584 irq_domain
= irq_domain_add_hierarchy(parent_domain
, 0, IRQC_NUM_IRQ
,
585 node
, &rzg2l_irqc_domain_ops
,
588 dev_err(&pdev
->dev
, "failed to add irq domain\n");
593 register_syscore_ops(&rzg2l_irqc_syscore_ops
);
596 * Prevent the cleanup function from invoking put_device by assigning
599 * make coccicheck will complain about missing put_device calls, but
600 * those are false positives, as dev will be automatically "put" via
601 * __free_put_device on the failing path.
602 * On the successful path we don't actually want to "put" dev.
609 pm_runtime_put(&pdev
->dev
);
611 pm_runtime_disable(&pdev
->dev
);
612 reset_control_assert(resetn
);
616 static int __init
rzg2l_irqc_init(struct device_node
*node
,
617 struct device_node
*parent
)
619 return rzg2l_irqc_common_init(node
, parent
, &rzg2l_irqc_chip
);
622 static int __init
rzfive_irqc_init(struct device_node
*node
,
623 struct device_node
*parent
)
625 return rzg2l_irqc_common_init(node
, parent
, &rzfive_irqc_chip
);
628 IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc
)
629 IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init
)
630 IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init
)
631 IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc
)
632 MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
633 MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");