1 // SPDX-License-Identifier: GPL-2.0
3 // regmap based irq_chip
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
20 struct regmap_irq_chip_data
{
22 struct irq_chip irq_chip
;
25 const struct regmap_irq_chip
*chip
;
28 struct irq_domain
*domain
;
34 unsigned int *main_status_buf
;
35 unsigned int *status_buf
;
36 unsigned int *mask_buf
;
37 unsigned int *mask_buf_def
;
38 unsigned int *wake_buf
;
39 unsigned int *type_buf
;
40 unsigned int *type_buf_def
;
42 unsigned int irq_reg_stride
;
43 unsigned int type_reg_stride
;
49 struct regmap_irq
*irq_to_regmap_irq(struct regmap_irq_chip_data
*data
,
52 return &data
->chip
->irqs
[irq
];
55 static void regmap_irq_lock(struct irq_data
*data
)
57 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
62 static int regmap_irq_update_bits(struct regmap_irq_chip_data
*d
,
63 unsigned int reg
, unsigned int mask
,
66 if (d
->chip
->mask_writeonly
)
67 return regmap_write_bits(d
->map
, reg
, mask
, val
);
69 return regmap_update_bits(d
->map
, reg
, mask
, val
);
72 static void regmap_irq_sync_unlock(struct irq_data
*data
)
74 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
75 struct regmap
*map
= d
->map
;
81 if (d
->chip
->runtime_pm
) {
82 ret
= pm_runtime_get_sync(map
->dev
);
84 dev_err(map
->dev
, "IRQ sync failed to resume: %d\n",
88 if (d
->clear_status
) {
89 for (i
= 0; i
< d
->chip
->num_regs
; i
++) {
90 reg
= d
->chip
->status_base
+
91 (i
* map
->reg_stride
* d
->irq_reg_stride
);
93 ret
= regmap_read(map
, reg
, &val
);
96 "Failed to clear the interrupt status bits\n");
99 d
->clear_status
= false;
103 * If there's been a change in the mask write it back to the
104 * hardware. We rely on the use of the regmap core cache to
105 * suppress pointless writes.
107 for (i
= 0; i
< d
->chip
->num_regs
; i
++) {
108 if (!d
->chip
->mask_base
)
111 reg
= d
->chip
->mask_base
+
112 (i
* map
->reg_stride
* d
->irq_reg_stride
);
113 if (d
->chip
->mask_invert
) {
114 ret
= regmap_irq_update_bits(d
, reg
,
115 d
->mask_buf_def
[i
], ~d
->mask_buf
[i
]);
116 } else if (d
->chip
->unmask_base
) {
117 /* set mask with mask_base register */
118 ret
= regmap_irq_update_bits(d
, reg
,
119 d
->mask_buf_def
[i
], ~d
->mask_buf
[i
]);
122 "Failed to sync unmasks in %x\n",
124 unmask_offset
= d
->chip
->unmask_base
-
126 /* clear mask with unmask_base register */
127 ret
= regmap_irq_update_bits(d
,
132 ret
= regmap_irq_update_bits(d
, reg
,
133 d
->mask_buf_def
[i
], d
->mask_buf
[i
]);
136 dev_err(d
->map
->dev
, "Failed to sync masks in %x\n",
139 reg
= d
->chip
->wake_base
+
140 (i
* map
->reg_stride
* d
->irq_reg_stride
);
142 if (d
->chip
->wake_invert
)
143 ret
= regmap_irq_update_bits(d
, reg
,
147 ret
= regmap_irq_update_bits(d
, reg
,
152 "Failed to sync wakes in %x: %d\n",
156 if (!d
->chip
->init_ack_masked
)
159 * Ack all the masked interrupts unconditionally,
160 * OR if there is masked interrupt which hasn't been Acked,
161 * it'll be ignored in irq handler, then may introduce irq storm
163 if (d
->mask_buf
[i
] && (d
->chip
->ack_base
|| d
->chip
->use_ack
)) {
164 reg
= d
->chip
->ack_base
+
165 (i
* map
->reg_stride
* d
->irq_reg_stride
);
166 /* some chips ack by write 0 */
167 if (d
->chip
->ack_invert
)
168 ret
= regmap_write(map
, reg
, ~d
->mask_buf
[i
]);
170 ret
= regmap_write(map
, reg
, d
->mask_buf
[i
]);
171 if (d
->chip
->clear_ack
) {
172 if (d
->chip
->ack_invert
&& !ret
)
173 ret
= regmap_write(map
, reg
,
176 ret
= regmap_write(map
, reg
,
180 dev_err(d
->map
->dev
, "Failed to ack 0x%x: %d\n",
185 /* Don't update the type bits if we're using mask bits for irq type. */
186 if (!d
->chip
->type_in_mask
) {
187 for (i
= 0; i
< d
->chip
->num_type_reg
; i
++) {
188 if (!d
->type_buf_def
[i
])
190 reg
= d
->chip
->type_base
+
191 (i
* map
->reg_stride
* d
->type_reg_stride
);
192 if (d
->chip
->type_invert
)
193 ret
= regmap_irq_update_bits(d
, reg
,
194 d
->type_buf_def
[i
], ~d
->type_buf
[i
]);
196 ret
= regmap_irq_update_bits(d
, reg
,
197 d
->type_buf_def
[i
], d
->type_buf
[i
]);
199 dev_err(d
->map
->dev
, "Failed to sync type in %x\n",
204 if (d
->chip
->runtime_pm
)
205 pm_runtime_put(map
->dev
);
207 /* If we've changed our wakeup count propagate it to the parent */
208 if (d
->wake_count
< 0)
209 for (i
= d
->wake_count
; i
< 0; i
++)
210 irq_set_irq_wake(d
->irq
, 0);
211 else if (d
->wake_count
> 0)
212 for (i
= 0; i
< d
->wake_count
; i
++)
213 irq_set_irq_wake(d
->irq
, 1);
217 mutex_unlock(&d
->lock
);
220 static void regmap_irq_enable(struct irq_data
*data
)
222 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
223 struct regmap
*map
= d
->map
;
224 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
225 unsigned int mask
, type
;
227 type
= irq_data
->type
.type_falling_val
| irq_data
->type
.type_rising_val
;
230 * The type_in_mask flag means that the underlying hardware uses
231 * separate mask bits for rising and falling edge interrupts, but
232 * we want to make them into a single virtual interrupt with
235 * If the interrupt we're enabling defines the falling or rising
236 * masks then instead of using the regular mask bits for this
237 * interrupt, use the value previously written to the type buffer
238 * at the corresponding offset in regmap_irq_set_type().
240 if (d
->chip
->type_in_mask
&& type
)
241 mask
= d
->type_buf
[irq_data
->reg_offset
/ map
->reg_stride
];
243 mask
= irq_data
->mask
;
245 if (d
->chip
->clear_on_unmask
)
246 d
->clear_status
= true;
248 d
->mask_buf
[irq_data
->reg_offset
/ map
->reg_stride
] &= ~mask
;
251 static void regmap_irq_disable(struct irq_data
*data
)
253 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
254 struct regmap
*map
= d
->map
;
255 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
257 d
->mask_buf
[irq_data
->reg_offset
/ map
->reg_stride
] |= irq_data
->mask
;
260 static int regmap_irq_set_type(struct irq_data
*data
, unsigned int type
)
262 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
263 struct regmap
*map
= d
->map
;
264 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
266 const struct regmap_irq_type
*t
= &irq_data
->type
;
268 if ((t
->types_supported
& type
) != type
)
271 reg
= t
->type_reg_offset
/ map
->reg_stride
;
273 if (t
->type_reg_mask
)
274 d
->type_buf
[reg
] &= ~t
->type_reg_mask
;
276 d
->type_buf
[reg
] &= ~(t
->type_falling_val
|
278 t
->type_level_low_val
|
279 t
->type_level_high_val
);
281 case IRQ_TYPE_EDGE_FALLING
:
282 d
->type_buf
[reg
] |= t
->type_falling_val
;
285 case IRQ_TYPE_EDGE_RISING
:
286 d
->type_buf
[reg
] |= t
->type_rising_val
;
289 case IRQ_TYPE_EDGE_BOTH
:
290 d
->type_buf
[reg
] |= (t
->type_falling_val
|
294 case IRQ_TYPE_LEVEL_HIGH
:
295 d
->type_buf
[reg
] |= t
->type_level_high_val
;
298 case IRQ_TYPE_LEVEL_LOW
:
299 d
->type_buf
[reg
] |= t
->type_level_low_val
;
307 static int regmap_irq_set_wake(struct irq_data
*data
, unsigned int on
)
309 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
310 struct regmap
*map
= d
->map
;
311 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
315 d
->wake_buf
[irq_data
->reg_offset
/ map
->reg_stride
]
320 d
->wake_buf
[irq_data
->reg_offset
/ map
->reg_stride
]
328 static const struct irq_chip regmap_irq_chip
= {
329 .irq_bus_lock
= regmap_irq_lock
,
330 .irq_bus_sync_unlock
= regmap_irq_sync_unlock
,
331 .irq_disable
= regmap_irq_disable
,
332 .irq_enable
= regmap_irq_enable
,
333 .irq_set_type
= regmap_irq_set_type
,
334 .irq_set_wake
= regmap_irq_set_wake
,
337 static inline int read_sub_irq_data(struct regmap_irq_chip_data
*data
,
340 const struct regmap_irq_chip
*chip
= data
->chip
;
341 struct regmap
*map
= data
->map
;
342 struct regmap_irq_sub_irq_map
*subreg
;
345 if (!chip
->sub_reg_offsets
) {
346 /* Assume linear mapping */
347 ret
= regmap_read(map
, chip
->status_base
+
348 (b
* map
->reg_stride
* data
->irq_reg_stride
),
349 &data
->status_buf
[b
]);
351 subreg
= &chip
->sub_reg_offsets
[b
];
352 for (i
= 0; i
< subreg
->num_regs
; i
++) {
353 unsigned int offset
= subreg
->offset
[i
];
355 ret
= regmap_read(map
, chip
->status_base
+ offset
,
356 &data
->status_buf
[offset
]);
364 static irqreturn_t
regmap_irq_thread(int irq
, void *d
)
366 struct regmap_irq_chip_data
*data
= d
;
367 const struct regmap_irq_chip
*chip
= data
->chip
;
368 struct regmap
*map
= data
->map
;
370 bool handled
= false;
373 if (chip
->handle_pre_irq
)
374 chip
->handle_pre_irq(chip
->irq_drv_data
);
376 if (chip
->runtime_pm
) {
377 ret
= pm_runtime_get_sync(map
->dev
);
379 dev_err(map
->dev
, "IRQ thread failed to resume: %d\n",
386 * Read only registers with active IRQs if the chip has 'main status
387 * register'. Else read in the statuses, using a single bulk read if
388 * possible in order to reduce the I/O overheads.
391 if (chip
->num_main_regs
) {
392 unsigned int max_main_bits
;
395 size
= chip
->num_regs
* sizeof(unsigned int);
397 max_main_bits
= (chip
->num_main_status_bits
) ?
398 chip
->num_main_status_bits
: chip
->num_regs
;
399 /* Clear the status buf as we don't read all status regs */
400 memset(data
->status_buf
, 0, size
);
402 /* We could support bulk read for main status registers
403 * but I don't expect to see devices with really many main
404 * status registers so let's only support single reads for the
405 * sake of simplicity. and add bulk reads only if needed
407 for (i
= 0; i
< chip
->num_main_regs
; i
++) {
408 ret
= regmap_read(map
, chip
->main_status
+
410 * data
->irq_reg_stride
),
411 &data
->main_status_buf
[i
]);
414 "Failed to read IRQ status %d\n",
420 /* Read sub registers with active IRQs */
421 for (i
= 0; i
< chip
->num_main_regs
; i
++) {
423 const unsigned long mreg
= data
->main_status_buf
[i
];
425 for_each_set_bit(b
, &mreg
, map
->format
.val_bytes
* 8) {
426 if (i
* map
->format
.val_bytes
* 8 + b
>
429 ret
= read_sub_irq_data(data
, b
);
433 "Failed to read IRQ status %d\n",
440 } else if (!map
->use_single_read
&& map
->reg_stride
== 1 &&
441 data
->irq_reg_stride
== 1) {
443 u8
*buf8
= data
->status_reg_buf
;
444 u16
*buf16
= data
->status_reg_buf
;
445 u32
*buf32
= data
->status_reg_buf
;
447 BUG_ON(!data
->status_reg_buf
);
449 ret
= regmap_bulk_read(map
, chip
->status_base
,
450 data
->status_reg_buf
,
453 dev_err(map
->dev
, "Failed to read IRQ status: %d\n",
458 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
459 switch (map
->format
.val_bytes
) {
461 data
->status_buf
[i
] = buf8
[i
];
464 data
->status_buf
[i
] = buf16
[i
];
467 data
->status_buf
[i
] = buf32
[i
];
476 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
477 ret
= regmap_read(map
, chip
->status_base
+
479 * data
->irq_reg_stride
),
480 &data
->status_buf
[i
]);
484 "Failed to read IRQ status: %d\n",
492 * Ignore masked IRQs and ack if we need to; we ack early so
493 * there is no race between handling and acknowleding the
494 * interrupt. We assume that typically few of the interrupts
495 * will fire simultaneously so don't worry about overhead from
496 * doing a write per register.
498 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
499 data
->status_buf
[i
] &= ~data
->mask_buf
[i
];
501 if (data
->status_buf
[i
] && (chip
->ack_base
|| chip
->use_ack
)) {
502 reg
= chip
->ack_base
+
503 (i
* map
->reg_stride
* data
->irq_reg_stride
);
504 if (chip
->ack_invert
)
505 ret
= regmap_write(map
, reg
,
506 ~data
->status_buf
[i
]);
508 ret
= regmap_write(map
, reg
,
509 data
->status_buf
[i
]);
510 if (chip
->clear_ack
) {
511 if (chip
->ack_invert
&& !ret
)
512 ret
= regmap_write(map
, reg
,
513 data
->status_buf
[i
]);
515 ret
= regmap_write(map
, reg
,
516 ~data
->status_buf
[i
]);
519 dev_err(map
->dev
, "Failed to ack 0x%x: %d\n",
524 for (i
= 0; i
< chip
->num_irqs
; i
++) {
525 if (data
->status_buf
[chip
->irqs
[i
].reg_offset
/
526 map
->reg_stride
] & chip
->irqs
[i
].mask
) {
527 handle_nested_irq(irq_find_mapping(data
->domain
, i
));
533 if (chip
->runtime_pm
)
534 pm_runtime_put(map
->dev
);
536 if (chip
->handle_post_irq
)
537 chip
->handle_post_irq(chip
->irq_drv_data
);
545 static int regmap_irq_map(struct irq_domain
*h
, unsigned int virq
,
548 struct regmap_irq_chip_data
*data
= h
->host_data
;
550 irq_set_chip_data(virq
, data
);
551 irq_set_chip(virq
, &data
->irq_chip
);
552 irq_set_nested_thread(virq
, 1);
553 irq_set_parent(virq
, data
->irq
);
554 irq_set_noprobe(virq
);
559 static const struct irq_domain_ops regmap_domain_ops
= {
560 .map
= regmap_irq_map
,
561 .xlate
= irq_domain_xlate_onetwocell
,
565 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
567 * @fwnode: The firmware node where the IRQ domain should be added to.
568 * @map: The regmap for the device.
569 * @irq: The IRQ the device uses to signal interrupts.
570 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
571 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
572 * @chip: Configuration for the interrupt controller.
573 * @data: Runtime data structure for the controller, allocated on success.
575 * Returns 0 on success or an errno on failure.
577 * In order for this to be efficient the chip really should use a
578 * register cache. The chip driver is responsible for restoring the
579 * register values used by the IRQ controller over suspend and resume.
581 int regmap_add_irq_chip_fwnode(struct fwnode_handle
*fwnode
,
582 struct regmap
*map
, int irq
,
583 int irq_flags
, int irq_base
,
584 const struct regmap_irq_chip
*chip
,
585 struct regmap_irq_chip_data
**data
)
587 struct regmap_irq_chip_data
*d
;
594 if (chip
->num_regs
<= 0)
597 if (chip
->clear_on_unmask
&& (chip
->ack_base
|| chip
->use_ack
))
600 for (i
= 0; i
< chip
->num_irqs
; i
++) {
601 if (chip
->irqs
[i
].reg_offset
% map
->reg_stride
)
603 if (chip
->irqs
[i
].reg_offset
/ map
->reg_stride
>=
609 irq_base
= irq_alloc_descs(irq_base
, 0, chip
->num_irqs
, 0);
611 dev_warn(map
->dev
, "Failed to allocate IRQs: %d\n",
617 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
621 if (chip
->num_main_regs
) {
622 d
->main_status_buf
= kcalloc(chip
->num_main_regs
,
623 sizeof(unsigned int),
626 if (!d
->main_status_buf
)
630 d
->status_buf
= kcalloc(chip
->num_regs
, sizeof(unsigned int),
635 d
->mask_buf
= kcalloc(chip
->num_regs
, sizeof(unsigned int),
640 d
->mask_buf_def
= kcalloc(chip
->num_regs
, sizeof(unsigned int),
642 if (!d
->mask_buf_def
)
645 if (chip
->wake_base
) {
646 d
->wake_buf
= kcalloc(chip
->num_regs
, sizeof(unsigned int),
652 num_type_reg
= chip
->type_in_mask
? chip
->num_regs
: chip
->num_type_reg
;
654 d
->type_buf_def
= kcalloc(num_type_reg
,
655 sizeof(unsigned int), GFP_KERNEL
);
656 if (!d
->type_buf_def
)
659 d
->type_buf
= kcalloc(num_type_reg
, sizeof(unsigned int),
665 d
->irq_chip
= regmap_irq_chip
;
666 d
->irq_chip
.name
= chip
->name
;
670 d
->irq_base
= irq_base
;
672 if (chip
->irq_reg_stride
)
673 d
->irq_reg_stride
= chip
->irq_reg_stride
;
675 d
->irq_reg_stride
= 1;
677 if (chip
->type_reg_stride
)
678 d
->type_reg_stride
= chip
->type_reg_stride
;
680 d
->type_reg_stride
= 1;
682 if (!map
->use_single_read
&& map
->reg_stride
== 1 &&
683 d
->irq_reg_stride
== 1) {
684 d
->status_reg_buf
= kmalloc_array(chip
->num_regs
,
685 map
->format
.val_bytes
,
687 if (!d
->status_reg_buf
)
691 mutex_init(&d
->lock
);
693 for (i
= 0; i
< chip
->num_irqs
; i
++)
694 d
->mask_buf_def
[chip
->irqs
[i
].reg_offset
/ map
->reg_stride
]
695 |= chip
->irqs
[i
].mask
;
697 /* Mask all the interrupts by default */
698 for (i
= 0; i
< chip
->num_regs
; i
++) {
699 d
->mask_buf
[i
] = d
->mask_buf_def
[i
];
700 if (!chip
->mask_base
)
703 reg
= chip
->mask_base
+
704 (i
* map
->reg_stride
* d
->irq_reg_stride
);
705 if (chip
->mask_invert
)
706 ret
= regmap_irq_update_bits(d
, reg
,
707 d
->mask_buf
[i
], ~d
->mask_buf
[i
]);
708 else if (d
->chip
->unmask_base
) {
709 unmask_offset
= d
->chip
->unmask_base
-
711 ret
= regmap_irq_update_bits(d
,
716 ret
= regmap_irq_update_bits(d
, reg
,
717 d
->mask_buf
[i
], d
->mask_buf
[i
]);
719 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
724 if (!chip
->init_ack_masked
)
727 /* Ack masked but set interrupts */
728 reg
= chip
->status_base
+
729 (i
* map
->reg_stride
* d
->irq_reg_stride
);
730 ret
= regmap_read(map
, reg
, &d
->status_buf
[i
]);
732 dev_err(map
->dev
, "Failed to read IRQ status: %d\n",
737 if (d
->status_buf
[i
] && (chip
->ack_base
|| chip
->use_ack
)) {
738 reg
= chip
->ack_base
+
739 (i
* map
->reg_stride
* d
->irq_reg_stride
);
740 if (chip
->ack_invert
)
741 ret
= regmap_write(map
, reg
,
742 ~(d
->status_buf
[i
] & d
->mask_buf
[i
]));
744 ret
= regmap_write(map
, reg
,
745 d
->status_buf
[i
] & d
->mask_buf
[i
]);
746 if (chip
->clear_ack
) {
747 if (chip
->ack_invert
&& !ret
)
748 ret
= regmap_write(map
, reg
,
752 ret
= regmap_write(map
, reg
,
757 dev_err(map
->dev
, "Failed to ack 0x%x: %d\n",
764 /* Wake is disabled by default */
766 for (i
= 0; i
< chip
->num_regs
; i
++) {
767 d
->wake_buf
[i
] = d
->mask_buf_def
[i
];
768 reg
= chip
->wake_base
+
769 (i
* map
->reg_stride
* d
->irq_reg_stride
);
771 if (chip
->wake_invert
)
772 ret
= regmap_irq_update_bits(d
, reg
,
776 ret
= regmap_irq_update_bits(d
, reg
,
780 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
787 if (chip
->num_type_reg
&& !chip
->type_in_mask
) {
788 for (i
= 0; i
< chip
->num_type_reg
; ++i
) {
789 reg
= chip
->type_base
+
790 (i
* map
->reg_stride
* d
->type_reg_stride
);
792 ret
= regmap_read(map
, reg
, &d
->type_buf_def
[i
]);
794 if (d
->chip
->type_invert
)
795 d
->type_buf_def
[i
] = ~d
->type_buf_def
[i
];
798 dev_err(map
->dev
, "Failed to get type defaults at 0x%x: %d\n",
806 d
->domain
= irq_domain_create_legacy(fwnode
, chip
->num_irqs
,
808 ®map_domain_ops
, d
);
810 d
->domain
= irq_domain_create_linear(fwnode
, chip
->num_irqs
,
811 ®map_domain_ops
, d
);
813 dev_err(map
->dev
, "Failed to create IRQ domain\n");
818 ret
= request_threaded_irq(irq
, NULL
, regmap_irq_thread
,
819 irq_flags
| IRQF_ONESHOT
,
822 dev_err(map
->dev
, "Failed to request IRQ %d for %s: %d\n",
823 irq
, chip
->name
, ret
);
832 /* Should really dispose of the domain but... */
835 kfree(d
->type_buf_def
);
837 kfree(d
->mask_buf_def
);
839 kfree(d
->status_buf
);
840 kfree(d
->status_reg_buf
);
844 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode
);
847 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
849 * @map: The regmap for the device.
850 * @irq: The IRQ the device uses to signal interrupts.
851 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
852 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
853 * @chip: Configuration for the interrupt controller.
854 * @data: Runtime data structure for the controller, allocated on success.
856 * Returns 0 on success or an errno on failure.
858 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
859 * node of the regmap is used.
861 int regmap_add_irq_chip(struct regmap
*map
, int irq
, int irq_flags
,
862 int irq_base
, const struct regmap_irq_chip
*chip
,
863 struct regmap_irq_chip_data
**data
)
865 return regmap_add_irq_chip_fwnode(dev_fwnode(map
->dev
), map
, irq
,
866 irq_flags
, irq_base
, chip
, data
);
868 EXPORT_SYMBOL_GPL(regmap_add_irq_chip
);
871 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
873 * @irq: Primary IRQ for the device
874 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip()
876 * This function also disposes of all mapped IRQs on the chip.
878 void regmap_del_irq_chip(int irq
, struct regmap_irq_chip_data
*d
)
888 /* Dispose all virtual irq from irq domain before removing it */
889 for (hwirq
= 0; hwirq
< d
->chip
->num_irqs
; hwirq
++) {
890 /* Ignore hwirq if holes in the IRQ list */
891 if (!d
->chip
->irqs
[hwirq
].mask
)
895 * Find the virtual irq of hwirq on chip and if it is
896 * there then dispose it
898 virq
= irq_find_mapping(d
->domain
, hwirq
);
900 irq_dispose_mapping(virq
);
903 irq_domain_remove(d
->domain
);
905 kfree(d
->type_buf_def
);
907 kfree(d
->mask_buf_def
);
909 kfree(d
->status_reg_buf
);
910 kfree(d
->status_buf
);
913 EXPORT_SYMBOL_GPL(regmap_del_irq_chip
);
915 static void devm_regmap_irq_chip_release(struct device
*dev
, void *res
)
917 struct regmap_irq_chip_data
*d
= *(struct regmap_irq_chip_data
**)res
;
919 regmap_del_irq_chip(d
->irq
, d
);
922 static int devm_regmap_irq_chip_match(struct device
*dev
, void *res
, void *data
)
925 struct regmap_irq_chip_data
**r
= res
;
935 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
937 * @dev: The device pointer on which irq_chip belongs to.
938 * @fwnode: The firmware node where the IRQ domain should be added to.
939 * @map: The regmap for the device.
940 * @irq: The IRQ the device uses to signal interrupts
941 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
942 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
943 * @chip: Configuration for the interrupt controller.
944 * @data: Runtime data structure for the controller, allocated on success
946 * Returns 0 on success or an errno on failure.
948 * The ®map_irq_chip_data will be automatically released when the device is
951 int devm_regmap_add_irq_chip_fwnode(struct device
*dev
,
952 struct fwnode_handle
*fwnode
,
953 struct regmap
*map
, int irq
,
954 int irq_flags
, int irq_base
,
955 const struct regmap_irq_chip
*chip
,
956 struct regmap_irq_chip_data
**data
)
958 struct regmap_irq_chip_data
**ptr
, *d
;
961 ptr
= devres_alloc(devm_regmap_irq_chip_release
, sizeof(*ptr
),
966 ret
= regmap_add_irq_chip_fwnode(fwnode
, map
, irq
, irq_flags
, irq_base
,
974 devres_add(dev
, ptr
);
978 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode
);
981 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
983 * @dev: The device pointer on which irq_chip belongs to.
984 * @map: The regmap for the device.
985 * @irq: The IRQ the device uses to signal interrupts
986 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
987 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
988 * @chip: Configuration for the interrupt controller.
989 * @data: Runtime data structure for the controller, allocated on success
991 * Returns 0 on success or an errno on failure.
993 * The ®map_irq_chip_data will be automatically released when the device is
996 int devm_regmap_add_irq_chip(struct device
*dev
, struct regmap
*map
, int irq
,
997 int irq_flags
, int irq_base
,
998 const struct regmap_irq_chip
*chip
,
999 struct regmap_irq_chip_data
**data
)
1001 return devm_regmap_add_irq_chip_fwnode(dev
, dev_fwnode(map
->dev
), map
,
1002 irq
, irq_flags
, irq_base
, chip
,
1005 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip
);
1008 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1010 * @dev: Device for which which resource was allocated.
1011 * @irq: Primary IRQ for the device.
1012 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip().
1014 * A resource managed version of regmap_del_irq_chip().
1016 void devm_regmap_del_irq_chip(struct device
*dev
, int irq
,
1017 struct regmap_irq_chip_data
*data
)
1021 WARN_ON(irq
!= data
->irq
);
1022 rc
= devres_release(dev
, devm_regmap_irq_chip_release
,
1023 devm_regmap_irq_chip_match
, data
);
1028 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip
);
1031 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1033 * @data: regmap irq controller to operate on.
1035 * Useful for drivers to request their own IRQs.
1037 int regmap_irq_chip_get_base(struct regmap_irq_chip_data
*data
)
1039 WARN_ON(!data
->irq_base
);
1040 return data
->irq_base
;
1042 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base
);
1045 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1047 * @data: regmap irq controller to operate on.
1048 * @irq: index of the interrupt requested in the chip IRQs.
1050 * Useful for drivers to request their own IRQs.
1052 int regmap_irq_get_virq(struct regmap_irq_chip_data
*data
, int irq
)
1054 /* Handle holes in the IRQ list */
1055 if (!data
->chip
->irqs
[irq
].mask
)
1058 return irq_create_mapping(data
->domain
, irq
);
1060 EXPORT_SYMBOL_GPL(regmap_irq_get_virq
);
1063 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1065 * @data: regmap_irq controller to operate on.
1067 * Useful for drivers to request their own IRQs and for integration
1068 * with subsystems. For ease of integration NULL is accepted as a
1069 * domain, allowing devices to just call this even if no domain is
1072 struct irq_domain
*regmap_irq_get_domain(struct regmap_irq_chip_data
*data
)
1075 return data
->domain
;
1079 EXPORT_SYMBOL_GPL(regmap_irq_get_domain
);