1 // SPDX-License-Identifier: GPL-2.0
3 // regmap based irq_chip
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
20 struct regmap_irq_chip_data
{
22 struct irq_chip irq_chip
;
25 const struct regmap_irq_chip
*chip
;
28 struct irq_domain
*domain
;
34 unsigned int *main_status_buf
;
35 unsigned int *status_buf
;
36 unsigned int *mask_buf
;
37 unsigned int *mask_buf_def
;
38 unsigned int *wake_buf
;
39 unsigned int *type_buf
;
40 unsigned int *type_buf_def
;
41 unsigned int **config_buf
;
43 unsigned int irq_reg_stride
;
45 unsigned int (*get_irq_reg
)(struct regmap_irq_chip_data
*data
,
46 unsigned int base
, int index
);
48 unsigned int clear_status
:1;
52 struct regmap_irq
*irq_to_regmap_irq(struct regmap_irq_chip_data
*data
,
55 return &data
->chip
->irqs
[irq
];
58 static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data
*data
)
60 struct regmap
*map
= data
->map
;
63 * While possible that a user-defined ->get_irq_reg() callback might
64 * be linear enough to support bulk reads, most of the time it won't.
65 * Therefore only allow them if the default callback is being used.
67 return data
->irq_reg_stride
== 1 && map
->reg_stride
== 1 &&
68 data
->get_irq_reg
== regmap_irq_get_irq_reg_linear
&&
69 !map
->use_single_read
;
72 static void regmap_irq_lock(struct irq_data
*data
)
74 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
79 static void regmap_irq_sync_unlock(struct irq_data
*data
)
81 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
82 struct regmap
*map
= d
->map
;
87 if (d
->chip
->runtime_pm
) {
88 ret
= pm_runtime_get_sync(map
->dev
);
90 dev_err(map
->dev
, "IRQ sync failed to resume: %d\n",
94 if (d
->clear_status
) {
95 for (i
= 0; i
< d
->chip
->num_regs
; i
++) {
96 reg
= d
->get_irq_reg(d
, d
->chip
->status_base
, i
);
98 ret
= regmap_read(map
, reg
, &val
);
101 "Failed to clear the interrupt status bits\n");
104 d
->clear_status
= false;
108 * If there's been a change in the mask write it back to the
109 * hardware. We rely on the use of the regmap core cache to
110 * suppress pointless writes.
112 for (i
= 0; i
< d
->chip
->num_regs
; i
++) {
113 if (d
->chip
->handle_mask_sync
)
114 d
->chip
->handle_mask_sync(i
, d
->mask_buf_def
[i
],
116 d
->chip
->irq_drv_data
);
118 if (d
->chip
->mask_base
&& !d
->chip
->handle_mask_sync
) {
119 reg
= d
->get_irq_reg(d
, d
->chip
->mask_base
, i
);
120 ret
= regmap_update_bits(d
->map
, reg
,
124 dev_err(d
->map
->dev
, "Failed to sync masks in %x\n", reg
);
127 if (d
->chip
->unmask_base
&& !d
->chip
->handle_mask_sync
) {
128 reg
= d
->get_irq_reg(d
, d
->chip
->unmask_base
, i
);
129 ret
= regmap_update_bits(d
->map
, reg
,
130 d
->mask_buf_def
[i
], ~d
->mask_buf
[i
]);
132 dev_err(d
->map
->dev
, "Failed to sync masks in %x\n",
136 reg
= d
->get_irq_reg(d
, d
->chip
->wake_base
, i
);
138 if (d
->chip
->wake_invert
)
139 ret
= regmap_update_bits(d
->map
, reg
,
143 ret
= regmap_update_bits(d
->map
, reg
,
148 "Failed to sync wakes in %x: %d\n",
152 if (!d
->chip
->init_ack_masked
)
155 * Ack all the masked interrupts unconditionally,
156 * OR if there is masked interrupt which hasn't been Acked,
157 * it'll be ignored in irq handler, then may introduce irq storm
159 if (d
->mask_buf
[i
] && (d
->chip
->ack_base
|| d
->chip
->use_ack
)) {
160 reg
= d
->get_irq_reg(d
, d
->chip
->ack_base
, i
);
162 /* some chips ack by write 0 */
163 if (d
->chip
->ack_invert
)
164 ret
= regmap_write(map
, reg
, ~d
->mask_buf
[i
]);
166 ret
= regmap_write(map
, reg
, d
->mask_buf
[i
]);
167 if (d
->chip
->clear_ack
) {
168 if (d
->chip
->ack_invert
&& !ret
)
169 ret
= regmap_write(map
, reg
, UINT_MAX
);
171 ret
= regmap_write(map
, reg
, 0);
174 dev_err(d
->map
->dev
, "Failed to ack 0x%x: %d\n",
179 for (i
= 0; i
< d
->chip
->num_config_bases
; i
++) {
180 for (j
= 0; j
< d
->chip
->num_config_regs
; j
++) {
181 reg
= d
->get_irq_reg(d
, d
->chip
->config_base
[i
], j
);
182 ret
= regmap_write(map
, reg
, d
->config_buf
[i
][j
]);
185 "Failed to write config %x: %d\n",
190 if (d
->chip
->runtime_pm
)
191 pm_runtime_put(map
->dev
);
193 /* If we've changed our wakeup count propagate it to the parent */
194 if (d
->wake_count
< 0)
195 for (i
= d
->wake_count
; i
< 0; i
++)
196 irq_set_irq_wake(d
->irq
, 0);
197 else if (d
->wake_count
> 0)
198 for (i
= 0; i
< d
->wake_count
; i
++)
199 irq_set_irq_wake(d
->irq
, 1);
203 mutex_unlock(&d
->lock
);
206 static void regmap_irq_enable(struct irq_data
*data
)
208 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
209 struct regmap
*map
= d
->map
;
210 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
211 unsigned int reg
= irq_data
->reg_offset
/ map
->reg_stride
;
215 * The type_in_mask flag means that the underlying hardware uses
216 * separate mask bits for each interrupt trigger type, but we want
217 * to have a single logical interrupt with a configurable type.
219 * If the interrupt we're enabling defines any supported types
220 * then instead of using the regular mask bits for this interrupt,
221 * use the value previously written to the type buffer at the
222 * corresponding offset in regmap_irq_set_type().
224 if (d
->chip
->type_in_mask
&& irq_data
->type
.types_supported
)
225 mask
= d
->type_buf
[reg
] & irq_data
->mask
;
227 mask
= irq_data
->mask
;
229 if (d
->chip
->clear_on_unmask
)
230 d
->clear_status
= true;
232 d
->mask_buf
[reg
] &= ~mask
;
235 static void regmap_irq_disable(struct irq_data
*data
)
237 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
238 struct regmap
*map
= d
->map
;
239 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
241 d
->mask_buf
[irq_data
->reg_offset
/ map
->reg_stride
] |= irq_data
->mask
;
244 static int regmap_irq_set_type(struct irq_data
*data
, unsigned int type
)
246 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
247 struct regmap
*map
= d
->map
;
248 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
250 const struct regmap_irq_type
*t
= &irq_data
->type
;
252 if ((t
->types_supported
& type
) != type
)
255 reg
= t
->type_reg_offset
/ map
->reg_stride
;
257 if (d
->chip
->type_in_mask
) {
258 ret
= regmap_irq_set_type_config_simple(&d
->type_buf
, type
,
259 irq_data
, reg
, d
->chip
->irq_drv_data
);
264 if (d
->chip
->set_type_config
) {
265 ret
= d
->chip
->set_type_config(d
->config_buf
, type
, irq_data
,
266 reg
, d
->chip
->irq_drv_data
);
274 static int regmap_irq_set_wake(struct irq_data
*data
, unsigned int on
)
276 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
277 struct regmap
*map
= d
->map
;
278 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
282 d
->wake_buf
[irq_data
->reg_offset
/ map
->reg_stride
]
287 d
->wake_buf
[irq_data
->reg_offset
/ map
->reg_stride
]
295 static const struct irq_chip regmap_irq_chip
= {
296 .irq_bus_lock
= regmap_irq_lock
,
297 .irq_bus_sync_unlock
= regmap_irq_sync_unlock
,
298 .irq_disable
= regmap_irq_disable
,
299 .irq_enable
= regmap_irq_enable
,
300 .irq_set_type
= regmap_irq_set_type
,
301 .irq_set_wake
= regmap_irq_set_wake
,
304 static inline int read_sub_irq_data(struct regmap_irq_chip_data
*data
,
307 const struct regmap_irq_chip
*chip
= data
->chip
;
308 const struct regmap_irq_sub_irq_map
*subreg
;
309 struct regmap
*map
= data
->map
;
313 if (!chip
->sub_reg_offsets
) {
314 reg
= data
->get_irq_reg(data
, chip
->status_base
, b
);
315 ret
= regmap_read(map
, reg
, &data
->status_buf
[b
]);
318 * Note we can't use ->get_irq_reg() here because the offsets
319 * in 'subreg' are *not* interchangeable with indices.
321 subreg
= &chip
->sub_reg_offsets
[b
];
322 for (i
= 0; i
< subreg
->num_regs
; i
++) {
323 unsigned int offset
= subreg
->offset
[i
];
324 unsigned int index
= offset
/ map
->reg_stride
;
326 ret
= regmap_read(map
, chip
->status_base
+ offset
,
327 &data
->status_buf
[index
]);
335 static irqreturn_t
regmap_irq_thread(int irq
, void *d
)
337 struct regmap_irq_chip_data
*data
= d
;
338 const struct regmap_irq_chip
*chip
= data
->chip
;
339 struct regmap
*map
= data
->map
;
341 bool handled
= false;
344 if (chip
->handle_pre_irq
)
345 chip
->handle_pre_irq(chip
->irq_drv_data
);
347 if (chip
->runtime_pm
) {
348 ret
= pm_runtime_get_sync(map
->dev
);
350 dev_err(map
->dev
, "IRQ thread failed to resume: %d\n",
357 * Read only registers with active IRQs if the chip has 'main status
358 * register'. Else read in the statuses, using a single bulk read if
359 * possible in order to reduce the I/O overheads.
362 if (chip
->no_status
) {
363 /* no status register so default to all active */
364 memset32(data
->status_buf
, GENMASK(31, 0), chip
->num_regs
);
365 } else if (chip
->num_main_regs
) {
366 unsigned int max_main_bits
;
368 max_main_bits
= (chip
->num_main_status_bits
) ?
369 chip
->num_main_status_bits
: chip
->num_regs
;
370 /* Clear the status buf as we don't read all status regs */
371 memset32(data
->status_buf
, 0, chip
->num_regs
);
373 /* We could support bulk read for main status registers
374 * but I don't expect to see devices with really many main
375 * status registers so let's only support single reads for the
376 * sake of simplicity. and add bulk reads only if needed
378 for (i
= 0; i
< chip
->num_main_regs
; i
++) {
379 reg
= data
->get_irq_reg(data
, chip
->main_status
, i
);
380 ret
= regmap_read(map
, reg
, &data
->main_status_buf
[i
]);
383 "Failed to read IRQ status %d\n",
389 /* Read sub registers with active IRQs */
390 for (i
= 0; i
< chip
->num_main_regs
; i
++) {
392 const unsigned long mreg
= data
->main_status_buf
[i
];
394 for_each_set_bit(b
, &mreg
, map
->format
.val_bytes
* 8) {
395 if (i
* map
->format
.val_bytes
* 8 + b
>
398 ret
= read_sub_irq_data(data
, b
);
402 "Failed to read IRQ status %d\n",
409 } else if (regmap_irq_can_bulk_read_status(data
)) {
411 u8
*buf8
= data
->status_reg_buf
;
412 u16
*buf16
= data
->status_reg_buf
;
413 u32
*buf32
= data
->status_reg_buf
;
415 BUG_ON(!data
->status_reg_buf
);
417 ret
= regmap_bulk_read(map
, chip
->status_base
,
418 data
->status_reg_buf
,
421 dev_err(map
->dev
, "Failed to read IRQ status: %d\n",
426 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
427 switch (map
->format
.val_bytes
) {
429 data
->status_buf
[i
] = buf8
[i
];
432 data
->status_buf
[i
] = buf16
[i
];
435 data
->status_buf
[i
] = buf32
[i
];
444 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
445 unsigned int reg
= data
->get_irq_reg(data
,
446 data
->chip
->status_base
, i
);
447 ret
= regmap_read(map
, reg
, &data
->status_buf
[i
]);
451 "Failed to read IRQ status: %d\n",
458 if (chip
->status_invert
)
459 for (i
= 0; i
< data
->chip
->num_regs
; i
++)
460 data
->status_buf
[i
] = ~data
->status_buf
[i
];
463 * Ignore masked IRQs and ack if we need to; we ack early so
464 * there is no race between handling and acknowledging the
465 * interrupt. We assume that typically few of the interrupts
466 * will fire simultaneously so don't worry about overhead from
467 * doing a write per register.
469 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
470 data
->status_buf
[i
] &= ~data
->mask_buf
[i
];
472 if (data
->status_buf
[i
] && (chip
->ack_base
|| chip
->use_ack
)) {
473 reg
= data
->get_irq_reg(data
, data
->chip
->ack_base
, i
);
475 if (chip
->ack_invert
)
476 ret
= regmap_write(map
, reg
,
477 ~data
->status_buf
[i
]);
479 ret
= regmap_write(map
, reg
,
480 data
->status_buf
[i
]);
481 if (chip
->clear_ack
) {
482 if (chip
->ack_invert
&& !ret
)
483 ret
= regmap_write(map
, reg
, UINT_MAX
);
485 ret
= regmap_write(map
, reg
, 0);
488 dev_err(map
->dev
, "Failed to ack 0x%x: %d\n",
493 for (i
= 0; i
< chip
->num_irqs
; i
++) {
494 if (data
->status_buf
[chip
->irqs
[i
].reg_offset
/
495 map
->reg_stride
] & chip
->irqs
[i
].mask
) {
496 handle_nested_irq(irq_find_mapping(data
->domain
, i
));
502 if (chip
->handle_post_irq
)
503 chip
->handle_post_irq(chip
->irq_drv_data
);
505 if (chip
->runtime_pm
)
506 pm_runtime_put(map
->dev
);
514 static struct lock_class_key regmap_irq_lock_class
;
515 static struct lock_class_key regmap_irq_request_class
;
517 static int regmap_irq_map(struct irq_domain
*h
, unsigned int virq
,
520 struct regmap_irq_chip_data
*data
= h
->host_data
;
522 irq_set_chip_data(virq
, data
);
523 irq_set_lockdep_class(virq
, ®map_irq_lock_class
, ®map_irq_request_class
);
524 irq_set_chip(virq
, &data
->irq_chip
);
525 irq_set_nested_thread(virq
, 1);
526 irq_set_parent(virq
, data
->irq
);
527 irq_set_noprobe(virq
);
532 static const struct irq_domain_ops regmap_domain_ops
= {
533 .map
= regmap_irq_map
,
534 .xlate
= irq_domain_xlate_onetwocell
,
538 * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
539 * @data: Data for the &struct regmap_irq_chip
540 * @base: Base register
541 * @index: Register index
543 * Returns the register address corresponding to the given @base and @index
544 * by the formula ``base + index * regmap_stride * irq_reg_stride``.
546 unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data
*data
,
547 unsigned int base
, int index
)
549 struct regmap
*map
= data
->map
;
551 return base
+ index
* map
->reg_stride
* data
->irq_reg_stride
;
553 EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear
);
556 * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
557 * @buf: Buffer containing configuration register values, this is a 2D array of
558 * `num_config_bases` rows, each of `num_config_regs` elements.
559 * @type: The requested IRQ type.
560 * @irq_data: The IRQ being configured.
561 * @idx: Index of the irq's config registers within each array `buf[i]`
562 * @irq_drv_data: Driver specific IRQ data
564 * This is a &struct regmap_irq_chip->set_type_config callback suitable for
565 * chips with one config register. Register values are updated according to
566 * the &struct regmap_irq_type data associated with an IRQ.
568 int regmap_irq_set_type_config_simple(unsigned int **buf
, unsigned int type
,
569 const struct regmap_irq
*irq_data
,
570 int idx
, void *irq_drv_data
)
572 const struct regmap_irq_type
*t
= &irq_data
->type
;
574 if (t
->type_reg_mask
)
575 buf
[0][idx
] &= ~t
->type_reg_mask
;
577 buf
[0][idx
] &= ~(t
->type_falling_val
|
579 t
->type_level_low_val
|
580 t
->type_level_high_val
);
583 case IRQ_TYPE_EDGE_FALLING
:
584 buf
[0][idx
] |= t
->type_falling_val
;
587 case IRQ_TYPE_EDGE_RISING
:
588 buf
[0][idx
] |= t
->type_rising_val
;
591 case IRQ_TYPE_EDGE_BOTH
:
592 buf
[0][idx
] |= (t
->type_falling_val
|
596 case IRQ_TYPE_LEVEL_HIGH
:
597 buf
[0][idx
] |= t
->type_level_high_val
;
600 case IRQ_TYPE_LEVEL_LOW
:
601 buf
[0][idx
] |= t
->type_level_low_val
;
610 EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple
);
612 static int regmap_irq_create_domain(struct fwnode_handle
*fwnode
, int irq_base
,
613 const struct regmap_irq_chip
*chip
,
614 struct regmap_irq_chip_data
*d
)
616 struct irq_domain_info info
= {
618 .size
= chip
->num_irqs
,
619 .hwirq_max
= chip
->num_irqs
,
620 .virq_base
= irq_base
,
621 .ops
= ®map_domain_ops
,
623 .name_suffix
= chip
->domain_suffix
,
626 d
->domain
= irq_domain_instantiate(&info
);
627 if (IS_ERR(d
->domain
)) {
628 dev_err(d
->map
->dev
, "Failed to create IRQ domain\n");
629 return PTR_ERR(d
->domain
);
637 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
639 * @fwnode: The firmware node where the IRQ domain should be added to.
640 * @map: The regmap for the device.
641 * @irq: The IRQ the device uses to signal interrupts.
642 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
643 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
644 * @chip: Configuration for the interrupt controller.
645 * @data: Runtime data structure for the controller, allocated on success.
647 * Returns 0 on success or an errno on failure.
649 * In order for this to be efficient the chip really should use a
650 * register cache. The chip driver is responsible for restoring the
651 * register values used by the IRQ controller over suspend and resume.
653 int regmap_add_irq_chip_fwnode(struct fwnode_handle
*fwnode
,
654 struct regmap
*map
, int irq
,
655 int irq_flags
, int irq_base
,
656 const struct regmap_irq_chip
*chip
,
657 struct regmap_irq_chip_data
**data
)
659 struct regmap_irq_chip_data
*d
;
664 if (chip
->num_regs
<= 0)
667 if (chip
->clear_on_unmask
&& (chip
->ack_base
|| chip
->use_ack
))
670 if (chip
->mask_base
&& chip
->unmask_base
&& !chip
->mask_unmask_non_inverted
)
673 for (i
= 0; i
< chip
->num_irqs
; i
++) {
674 if (chip
->irqs
[i
].reg_offset
% map
->reg_stride
)
676 if (chip
->irqs
[i
].reg_offset
/ map
->reg_stride
>=
682 irq_base
= irq_alloc_descs(irq_base
, 0, chip
->num_irqs
, 0);
684 dev_warn(map
->dev
, "Failed to allocate IRQs: %d\n",
690 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
694 if (chip
->num_main_regs
) {
695 d
->main_status_buf
= kcalloc(chip
->num_main_regs
,
696 sizeof(*d
->main_status_buf
),
699 if (!d
->main_status_buf
)
703 d
->status_buf
= kcalloc(chip
->num_regs
, sizeof(*d
->status_buf
),
708 d
->mask_buf
= kcalloc(chip
->num_regs
, sizeof(*d
->mask_buf
),
713 d
->mask_buf_def
= kcalloc(chip
->num_regs
, sizeof(*d
->mask_buf_def
),
715 if (!d
->mask_buf_def
)
718 if (chip
->wake_base
) {
719 d
->wake_buf
= kcalloc(chip
->num_regs
, sizeof(*d
->wake_buf
),
725 if (chip
->type_in_mask
) {
726 d
->type_buf_def
= kcalloc(chip
->num_regs
,
727 sizeof(*d
->type_buf_def
), GFP_KERNEL
);
728 if (!d
->type_buf_def
)
731 d
->type_buf
= kcalloc(chip
->num_regs
, sizeof(*d
->type_buf
), GFP_KERNEL
);
736 if (chip
->num_config_bases
&& chip
->num_config_regs
) {
738 * Create config_buf[num_config_bases][num_config_regs]
740 d
->config_buf
= kcalloc(chip
->num_config_bases
,
741 sizeof(*d
->config_buf
), GFP_KERNEL
);
745 for (i
= 0; i
< chip
->num_config_bases
; i
++) {
746 d
->config_buf
[i
] = kcalloc(chip
->num_config_regs
,
747 sizeof(**d
->config_buf
),
749 if (!d
->config_buf
[i
])
754 d
->irq_chip
= regmap_irq_chip
;
755 d
->irq_chip
.name
= chip
->name
;
759 d
->irq_base
= irq_base
;
761 if (chip
->irq_reg_stride
)
762 d
->irq_reg_stride
= chip
->irq_reg_stride
;
764 d
->irq_reg_stride
= 1;
766 if (chip
->get_irq_reg
)
767 d
->get_irq_reg
= chip
->get_irq_reg
;
769 d
->get_irq_reg
= regmap_irq_get_irq_reg_linear
;
771 if (regmap_irq_can_bulk_read_status(d
)) {
772 d
->status_reg_buf
= kmalloc_array(chip
->num_regs
,
773 map
->format
.val_bytes
,
775 if (!d
->status_reg_buf
)
779 mutex_init(&d
->lock
);
781 for (i
= 0; i
< chip
->num_irqs
; i
++)
782 d
->mask_buf_def
[chip
->irqs
[i
].reg_offset
/ map
->reg_stride
]
783 |= chip
->irqs
[i
].mask
;
785 /* Mask all the interrupts by default */
786 for (i
= 0; i
< chip
->num_regs
; i
++) {
787 d
->mask_buf
[i
] = d
->mask_buf_def
[i
];
789 if (chip
->handle_mask_sync
) {
790 ret
= chip
->handle_mask_sync(i
, d
->mask_buf_def
[i
],
797 if (chip
->mask_base
&& !chip
->handle_mask_sync
) {
798 reg
= d
->get_irq_reg(d
, chip
->mask_base
, i
);
799 ret
= regmap_update_bits(d
->map
, reg
,
803 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
809 if (chip
->unmask_base
&& !chip
->handle_mask_sync
) {
810 reg
= d
->get_irq_reg(d
, chip
->unmask_base
, i
);
811 ret
= regmap_update_bits(d
->map
, reg
,
812 d
->mask_buf_def
[i
], ~d
->mask_buf
[i
]);
814 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
820 if (!chip
->init_ack_masked
)
823 /* Ack masked but set interrupts */
824 if (d
->chip
->no_status
) {
825 /* no status register so default to all active */
826 d
->status_buf
[i
] = GENMASK(31, 0);
828 reg
= d
->get_irq_reg(d
, d
->chip
->status_base
, i
);
829 ret
= regmap_read(map
, reg
, &d
->status_buf
[i
]);
831 dev_err(map
->dev
, "Failed to read IRQ status: %d\n",
837 if (chip
->status_invert
)
838 d
->status_buf
[i
] = ~d
->status_buf
[i
];
840 if (d
->status_buf
[i
] && (chip
->ack_base
|| chip
->use_ack
)) {
841 reg
= d
->get_irq_reg(d
, d
->chip
->ack_base
, i
);
842 if (chip
->ack_invert
)
843 ret
= regmap_write(map
, reg
,
844 ~(d
->status_buf
[i
] & d
->mask_buf
[i
]));
846 ret
= regmap_write(map
, reg
,
847 d
->status_buf
[i
] & d
->mask_buf
[i
]);
848 if (chip
->clear_ack
) {
849 if (chip
->ack_invert
&& !ret
)
850 ret
= regmap_write(map
, reg
, UINT_MAX
);
852 ret
= regmap_write(map
, reg
, 0);
855 dev_err(map
->dev
, "Failed to ack 0x%x: %d\n",
862 /* Wake is disabled by default */
864 for (i
= 0; i
< chip
->num_regs
; i
++) {
865 d
->wake_buf
[i
] = d
->mask_buf_def
[i
];
866 reg
= d
->get_irq_reg(d
, d
->chip
->wake_base
, i
);
868 if (chip
->wake_invert
)
869 ret
= regmap_update_bits(d
->map
, reg
,
873 ret
= regmap_update_bits(d
->map
, reg
,
877 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
884 ret
= regmap_irq_create_domain(fwnode
, irq_base
, chip
, d
);
888 ret
= request_threaded_irq(irq
, NULL
, regmap_irq_thread
,
889 irq_flags
| IRQF_ONESHOT
,
892 dev_err(map
->dev
, "Failed to request IRQ %d for %s: %d\n",
893 irq
, chip
->name
, ret
);
902 /* Should really dispose of the domain but... */
905 kfree(d
->type_buf_def
);
907 kfree(d
->mask_buf_def
);
909 kfree(d
->status_buf
);
910 kfree(d
->status_reg_buf
);
912 for (i
= 0; i
< chip
->num_config_bases
; i
++)
913 kfree(d
->config_buf
[i
]);
914 kfree(d
->config_buf
);
919 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode
);
922 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
924 * @map: The regmap for the device.
925 * @irq: The IRQ the device uses to signal interrupts.
926 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
927 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
928 * @chip: Configuration for the interrupt controller.
929 * @data: Runtime data structure for the controller, allocated on success.
931 * Returns 0 on success or an errno on failure.
933 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
934 * node of the regmap is used.
936 int regmap_add_irq_chip(struct regmap
*map
, int irq
, int irq_flags
,
937 int irq_base
, const struct regmap_irq_chip
*chip
,
938 struct regmap_irq_chip_data
**data
)
940 return regmap_add_irq_chip_fwnode(dev_fwnode(map
->dev
), map
, irq
,
941 irq_flags
, irq_base
, chip
, data
);
943 EXPORT_SYMBOL_GPL(regmap_add_irq_chip
);
946 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
948 * @irq: Primary IRQ for the device
949 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip()
951 * This function also disposes of all mapped IRQs on the chip.
953 void regmap_del_irq_chip(int irq
, struct regmap_irq_chip_data
*d
)
963 /* Dispose all virtual irq from irq domain before removing it */
964 for (hwirq
= 0; hwirq
< d
->chip
->num_irqs
; hwirq
++) {
965 /* Ignore hwirq if holes in the IRQ list */
966 if (!d
->chip
->irqs
[hwirq
].mask
)
970 * Find the virtual irq of hwirq on chip and if it is
971 * there then dispose it
973 virq
= irq_find_mapping(d
->domain
, hwirq
);
975 irq_dispose_mapping(virq
);
978 irq_domain_remove(d
->domain
);
980 kfree(d
->type_buf_def
);
982 kfree(d
->mask_buf_def
);
984 kfree(d
->status_reg_buf
);
985 kfree(d
->status_buf
);
987 for (i
= 0; i
< d
->chip
->num_config_bases
; i
++)
988 kfree(d
->config_buf
[i
]);
989 kfree(d
->config_buf
);
993 EXPORT_SYMBOL_GPL(regmap_del_irq_chip
);
995 static void devm_regmap_irq_chip_release(struct device
*dev
, void *res
)
997 struct regmap_irq_chip_data
*d
= *(struct regmap_irq_chip_data
**)res
;
999 regmap_del_irq_chip(d
->irq
, d
);
1002 static int devm_regmap_irq_chip_match(struct device
*dev
, void *res
, void *data
)
1005 struct regmap_irq_chip_data
**r
= res
;
1015 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
1017 * @dev: The device pointer on which irq_chip belongs to.
1018 * @fwnode: The firmware node where the IRQ domain should be added to.
1019 * @map: The regmap for the device.
1020 * @irq: The IRQ the device uses to signal interrupts
1021 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1022 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1023 * @chip: Configuration for the interrupt controller.
1024 * @data: Runtime data structure for the controller, allocated on success
1026 * Returns 0 on success or an errno on failure.
1028 * The ®map_irq_chip_data will be automatically released when the device is
1031 int devm_regmap_add_irq_chip_fwnode(struct device
*dev
,
1032 struct fwnode_handle
*fwnode
,
1033 struct regmap
*map
, int irq
,
1034 int irq_flags
, int irq_base
,
1035 const struct regmap_irq_chip
*chip
,
1036 struct regmap_irq_chip_data
**data
)
1038 struct regmap_irq_chip_data
**ptr
, *d
;
1041 ptr
= devres_alloc(devm_regmap_irq_chip_release
, sizeof(*ptr
),
1046 ret
= regmap_add_irq_chip_fwnode(fwnode
, map
, irq
, irq_flags
, irq_base
,
1054 devres_add(dev
, ptr
);
1058 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode
);
1061 * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
1063 * @dev: The device pointer on which irq_chip belongs to.
1064 * @map: The regmap for the device.
1065 * @irq: The IRQ the device uses to signal interrupts
1066 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
1067 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
1068 * @chip: Configuration for the interrupt controller.
1069 * @data: Runtime data structure for the controller, allocated on success
1071 * Returns 0 on success or an errno on failure.
1073 * The ®map_irq_chip_data will be automatically released when the device is
1076 int devm_regmap_add_irq_chip(struct device
*dev
, struct regmap
*map
, int irq
,
1077 int irq_flags
, int irq_base
,
1078 const struct regmap_irq_chip
*chip
,
1079 struct regmap_irq_chip_data
**data
)
1081 return devm_regmap_add_irq_chip_fwnode(dev
, dev_fwnode(map
->dev
), map
,
1082 irq
, irq_flags
, irq_base
, chip
,
1085 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip
);
1088 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1090 * @dev: Device for which the resource was allocated.
1091 * @irq: Primary IRQ for the device.
1092 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip().
1094 * A resource managed version of regmap_del_irq_chip().
1096 void devm_regmap_del_irq_chip(struct device
*dev
, int irq
,
1097 struct regmap_irq_chip_data
*data
)
1101 WARN_ON(irq
!= data
->irq
);
1102 rc
= devres_release(dev
, devm_regmap_irq_chip_release
,
1103 devm_regmap_irq_chip_match
, data
);
1108 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip
);
1111 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1113 * @data: regmap irq controller to operate on.
1115 * Useful for drivers to request their own IRQs.
1117 int regmap_irq_chip_get_base(struct regmap_irq_chip_data
*data
)
1119 WARN_ON(!data
->irq_base
);
1120 return data
->irq_base
;
1122 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base
);
1125 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1127 * @data: regmap irq controller to operate on.
1128 * @irq: index of the interrupt requested in the chip IRQs.
1130 * Useful for drivers to request their own IRQs.
1132 int regmap_irq_get_virq(struct regmap_irq_chip_data
*data
, int irq
)
1134 /* Handle holes in the IRQ list */
1135 if (!data
->chip
->irqs
[irq
].mask
)
1138 return irq_create_mapping(data
->domain
, irq
);
1140 EXPORT_SYMBOL_GPL(regmap_irq_get_virq
);
1143 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1145 * @data: regmap_irq controller to operate on.
1147 * Useful for drivers to request their own IRQs and for integration
1148 * with subsystems. For ease of integration NULL is accepted as a
1149 * domain, allowing devices to just call this even if no domain is
1152 struct irq_domain
*regmap_irq_get_domain(struct regmap_irq_chip_data
*data
)
1155 return data
->domain
;
1159 EXPORT_SYMBOL_GPL(regmap_irq_get_domain
);