2 * regmap based irq_chip
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
24 struct regmap_irq_chip_data
{
26 struct irq_chip irq_chip
;
29 const struct regmap_irq_chip
*chip
;
32 struct irq_domain
*domain
;
38 unsigned int *status_buf
;
39 unsigned int *mask_buf
;
40 unsigned int *mask_buf_def
;
41 unsigned int *wake_buf
;
43 unsigned int irq_reg_stride
;
47 struct regmap_irq
*irq_to_regmap_irq(struct regmap_irq_chip_data
*data
,
50 return &data
->chip
->irqs
[irq
];
53 static void regmap_irq_lock(struct irq_data
*data
)
55 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
60 static void regmap_irq_sync_unlock(struct irq_data
*data
)
62 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
63 struct regmap
*map
= d
->map
;
68 if (d
->chip
->runtime_pm
) {
69 ret
= pm_runtime_get_sync(map
->dev
);
71 dev_err(map
->dev
, "IRQ sync failed to resume: %d\n",
76 * If there's been a change in the mask write it back to the
77 * hardware. We rely on the use of the regmap core cache to
78 * suppress pointless writes.
80 for (i
= 0; i
< d
->chip
->num_regs
; i
++) {
81 reg
= d
->chip
->mask_base
+
82 (i
* map
->reg_stride
* d
->irq_reg_stride
);
83 if (d
->chip
->mask_invert
) {
84 ret
= regmap_update_bits(d
->map
, reg
,
85 d
->mask_buf_def
[i
], ~d
->mask_buf
[i
]);
86 } else if (d
->chip
->unmask_base
) {
87 /* set mask with mask_base register */
88 ret
= regmap_update_bits(d
->map
, reg
,
89 d
->mask_buf_def
[i
], ~d
->mask_buf
[i
]);
92 "Failed to sync unmasks in %x\n",
94 unmask_offset
= d
->chip
->unmask_base
-
96 /* clear mask with unmask_base register */
97 ret
= regmap_update_bits(d
->map
,
102 ret
= regmap_update_bits(d
->map
, reg
,
103 d
->mask_buf_def
[i
], d
->mask_buf
[i
]);
106 dev_err(d
->map
->dev
, "Failed to sync masks in %x\n",
109 reg
= d
->chip
->wake_base
+
110 (i
* map
->reg_stride
* d
->irq_reg_stride
);
112 if (d
->chip
->wake_invert
)
113 ret
= regmap_update_bits(d
->map
, reg
,
117 ret
= regmap_update_bits(d
->map
, reg
,
122 "Failed to sync wakes in %x: %d\n",
126 if (!d
->chip
->init_ack_masked
)
129 * Ack all the masked interrupts unconditionally,
130 * OR if there is masked interrupt which hasn't been Acked,
131 * it'll be ignored in irq handler, then may introduce irq storm
133 if (d
->mask_buf
[i
] && (d
->chip
->ack_base
|| d
->chip
->use_ack
)) {
134 reg
= d
->chip
->ack_base
+
135 (i
* map
->reg_stride
* d
->irq_reg_stride
);
136 /* some chips ack by write 0 */
137 if (d
->chip
->ack_invert
)
138 ret
= regmap_write(map
, reg
, ~d
->mask_buf
[i
]);
140 ret
= regmap_write(map
, reg
, d
->mask_buf
[i
]);
142 dev_err(d
->map
->dev
, "Failed to ack 0x%x: %d\n",
147 if (d
->chip
->runtime_pm
)
148 pm_runtime_put(map
->dev
);
150 /* If we've changed our wakeup count propagate it to the parent */
151 if (d
->wake_count
< 0)
152 for (i
= d
->wake_count
; i
< 0; i
++)
153 irq_set_irq_wake(d
->irq
, 0);
154 else if (d
->wake_count
> 0)
155 for (i
= 0; i
< d
->wake_count
; i
++)
156 irq_set_irq_wake(d
->irq
, 1);
160 mutex_unlock(&d
->lock
);
163 static void regmap_irq_enable(struct irq_data
*data
)
165 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
166 struct regmap
*map
= d
->map
;
167 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
169 d
->mask_buf
[irq_data
->reg_offset
/ map
->reg_stride
] &= ~irq_data
->mask
;
172 static void regmap_irq_disable(struct irq_data
*data
)
174 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
175 struct regmap
*map
= d
->map
;
176 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
178 d
->mask_buf
[irq_data
->reg_offset
/ map
->reg_stride
] |= irq_data
->mask
;
181 static int regmap_irq_set_wake(struct irq_data
*data
, unsigned int on
)
183 struct regmap_irq_chip_data
*d
= irq_data_get_irq_chip_data(data
);
184 struct regmap
*map
= d
->map
;
185 const struct regmap_irq
*irq_data
= irq_to_regmap_irq(d
, data
->hwirq
);
189 d
->wake_buf
[irq_data
->reg_offset
/ map
->reg_stride
]
194 d
->wake_buf
[irq_data
->reg_offset
/ map
->reg_stride
]
202 static const struct irq_chip regmap_irq_chip
= {
203 .irq_bus_lock
= regmap_irq_lock
,
204 .irq_bus_sync_unlock
= regmap_irq_sync_unlock
,
205 .irq_disable
= regmap_irq_disable
,
206 .irq_enable
= regmap_irq_enable
,
207 .irq_set_wake
= regmap_irq_set_wake
,
210 static irqreturn_t
regmap_irq_thread(int irq
, void *d
)
212 struct regmap_irq_chip_data
*data
= d
;
213 const struct regmap_irq_chip
*chip
= data
->chip
;
214 struct regmap
*map
= data
->map
;
216 bool handled
= false;
219 if (chip
->runtime_pm
) {
220 ret
= pm_runtime_get_sync(map
->dev
);
222 dev_err(map
->dev
, "IRQ thread failed to resume: %d\n",
224 pm_runtime_put(map
->dev
);
230 * Read in the statuses, using a single bulk read if possible
231 * in order to reduce the I/O overheads.
233 if (!map
->use_single_read
&& map
->reg_stride
== 1 &&
234 data
->irq_reg_stride
== 1) {
235 u8
*buf8
= data
->status_reg_buf
;
236 u16
*buf16
= data
->status_reg_buf
;
237 u32
*buf32
= data
->status_reg_buf
;
239 BUG_ON(!data
->status_reg_buf
);
241 ret
= regmap_bulk_read(map
, chip
->status_base
,
242 data
->status_reg_buf
,
245 dev_err(map
->dev
, "Failed to read IRQ status: %d\n",
250 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
251 switch (map
->format
.val_bytes
) {
253 data
->status_buf
[i
] = buf8
[i
];
256 data
->status_buf
[i
] = buf16
[i
];
259 data
->status_buf
[i
] = buf32
[i
];
268 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
269 ret
= regmap_read(map
, chip
->status_base
+
271 * data
->irq_reg_stride
),
272 &data
->status_buf
[i
]);
276 "Failed to read IRQ status: %d\n",
278 if (chip
->runtime_pm
)
279 pm_runtime_put(map
->dev
);
286 * Ignore masked IRQs and ack if we need to; we ack early so
287 * there is no race between handling and acknowleding the
288 * interrupt. We assume that typically few of the interrupts
289 * will fire simultaneously so don't worry about overhead from
290 * doing a write per register.
292 for (i
= 0; i
< data
->chip
->num_regs
; i
++) {
293 data
->status_buf
[i
] &= ~data
->mask_buf
[i
];
295 if (data
->status_buf
[i
] && (chip
->ack_base
|| chip
->use_ack
)) {
296 reg
= chip
->ack_base
+
297 (i
* map
->reg_stride
* data
->irq_reg_stride
);
298 ret
= regmap_write(map
, reg
, data
->status_buf
[i
]);
300 dev_err(map
->dev
, "Failed to ack 0x%x: %d\n",
305 for (i
= 0; i
< chip
->num_irqs
; i
++) {
306 if (data
->status_buf
[chip
->irqs
[i
].reg_offset
/
307 map
->reg_stride
] & chip
->irqs
[i
].mask
) {
308 handle_nested_irq(irq_find_mapping(data
->domain
, i
));
313 if (chip
->runtime_pm
)
314 pm_runtime_put(map
->dev
);
322 static int regmap_irq_map(struct irq_domain
*h
, unsigned int virq
,
325 struct regmap_irq_chip_data
*data
= h
->host_data
;
327 irq_set_chip_data(virq
, data
);
328 irq_set_chip(virq
, &data
->irq_chip
);
329 irq_set_nested_thread(virq
, 1);
330 irq_set_noprobe(virq
);
335 static const struct irq_domain_ops regmap_domain_ops
= {
336 .map
= regmap_irq_map
,
337 .xlate
= irq_domain_xlate_twocell
,
341 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
343 * map: The regmap for the device.
344 * irq: The IRQ the device uses to signal interrupts
345 * irq_flags: The IRQF_ flags to use for the primary interrupt.
346 * chip: Configuration for the interrupt controller.
347 * data: Runtime data structure for the controller, allocated on success
349 * Returns 0 on success or an errno on failure.
351 * In order for this to be efficient the chip really should use a
352 * register cache. The chip driver is responsible for restoring the
353 * register values used by the IRQ controller over suspend and resume.
355 int regmap_add_irq_chip(struct regmap
*map
, int irq
, int irq_flags
,
356 int irq_base
, const struct regmap_irq_chip
*chip
,
357 struct regmap_irq_chip_data
**data
)
359 struct regmap_irq_chip_data
*d
;
365 if (chip
->num_regs
<= 0)
368 for (i
= 0; i
< chip
->num_irqs
; i
++) {
369 if (chip
->irqs
[i
].reg_offset
% map
->reg_stride
)
371 if (chip
->irqs
[i
].reg_offset
/ map
->reg_stride
>=
377 irq_base
= irq_alloc_descs(irq_base
, 0, chip
->num_irqs
, 0);
379 dev_warn(map
->dev
, "Failed to allocate IRQs: %d\n",
385 d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
389 d
->status_buf
= kzalloc(sizeof(unsigned int) * chip
->num_regs
,
394 d
->mask_buf
= kzalloc(sizeof(unsigned int) * chip
->num_regs
,
399 d
->mask_buf_def
= kzalloc(sizeof(unsigned int) * chip
->num_regs
,
401 if (!d
->mask_buf_def
)
404 if (chip
->wake_base
) {
405 d
->wake_buf
= kzalloc(sizeof(unsigned int) * chip
->num_regs
,
411 d
->irq_chip
= regmap_irq_chip
;
412 d
->irq_chip
.name
= chip
->name
;
416 d
->irq_base
= irq_base
;
418 if (chip
->irq_reg_stride
)
419 d
->irq_reg_stride
= chip
->irq_reg_stride
;
421 d
->irq_reg_stride
= 1;
423 if (!map
->use_single_read
&& map
->reg_stride
== 1 &&
424 d
->irq_reg_stride
== 1) {
425 d
->status_reg_buf
= kmalloc(map
->format
.val_bytes
*
426 chip
->num_regs
, GFP_KERNEL
);
427 if (!d
->status_reg_buf
)
431 mutex_init(&d
->lock
);
433 for (i
= 0; i
< chip
->num_irqs
; i
++)
434 d
->mask_buf_def
[chip
->irqs
[i
].reg_offset
/ map
->reg_stride
]
435 |= chip
->irqs
[i
].mask
;
437 /* Mask all the interrupts by default */
438 for (i
= 0; i
< chip
->num_regs
; i
++) {
439 d
->mask_buf
[i
] = d
->mask_buf_def
[i
];
440 reg
= chip
->mask_base
+
441 (i
* map
->reg_stride
* d
->irq_reg_stride
);
442 if (chip
->mask_invert
)
443 ret
= regmap_update_bits(map
, reg
,
444 d
->mask_buf
[i
], ~d
->mask_buf
[i
]);
445 else if (d
->chip
->unmask_base
) {
446 unmask_offset
= d
->chip
->unmask_base
-
448 ret
= regmap_update_bits(d
->map
,
453 ret
= regmap_update_bits(map
, reg
,
454 d
->mask_buf
[i
], d
->mask_buf
[i
]);
456 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
461 if (!chip
->init_ack_masked
)
464 /* Ack masked but set interrupts */
465 reg
= chip
->status_base
+
466 (i
* map
->reg_stride
* d
->irq_reg_stride
);
467 ret
= regmap_read(map
, reg
, &d
->status_buf
[i
]);
469 dev_err(map
->dev
, "Failed to read IRQ status: %d\n",
474 if (d
->status_buf
[i
] && (chip
->ack_base
|| chip
->use_ack
)) {
475 reg
= chip
->ack_base
+
476 (i
* map
->reg_stride
* d
->irq_reg_stride
);
477 if (chip
->ack_invert
)
478 ret
= regmap_write(map
, reg
,
479 ~(d
->status_buf
[i
] & d
->mask_buf
[i
]));
481 ret
= regmap_write(map
, reg
,
482 d
->status_buf
[i
] & d
->mask_buf
[i
]);
484 dev_err(map
->dev
, "Failed to ack 0x%x: %d\n",
491 /* Wake is disabled by default */
493 for (i
= 0; i
< chip
->num_regs
; i
++) {
494 d
->wake_buf
[i
] = d
->mask_buf_def
[i
];
495 reg
= chip
->wake_base
+
496 (i
* map
->reg_stride
* d
->irq_reg_stride
);
498 if (chip
->wake_invert
)
499 ret
= regmap_update_bits(map
, reg
,
503 ret
= regmap_update_bits(map
, reg
,
507 dev_err(map
->dev
, "Failed to set masks in 0x%x: %d\n",
515 d
->domain
= irq_domain_add_legacy(map
->dev
->of_node
,
516 chip
->num_irqs
, irq_base
, 0,
517 ®map_domain_ops
, d
);
519 d
->domain
= irq_domain_add_linear(map
->dev
->of_node
,
521 ®map_domain_ops
, d
);
523 dev_err(map
->dev
, "Failed to create IRQ domain\n");
528 ret
= request_threaded_irq(irq
, NULL
, regmap_irq_thread
,
529 irq_flags
| IRQF_ONESHOT
,
532 dev_err(map
->dev
, "Failed to request IRQ %d for %s: %d\n",
533 irq
, chip
->name
, ret
);
542 /* Should really dispose of the domain but... */
545 kfree(d
->mask_buf_def
);
547 kfree(d
->status_buf
);
548 kfree(d
->status_reg_buf
);
552 EXPORT_SYMBOL_GPL(regmap_add_irq_chip
);
555 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
557 * @irq: Primary IRQ for the device
558 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
560 void regmap_del_irq_chip(int irq
, struct regmap_irq_chip_data
*d
)
566 irq_domain_remove(d
->domain
);
568 kfree(d
->mask_buf_def
);
570 kfree(d
->status_reg_buf
);
571 kfree(d
->status_buf
);
574 EXPORT_SYMBOL_GPL(regmap_del_irq_chip
);
577 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
579 * Useful for drivers to request their own IRQs.
581 * @data: regmap_irq controller to operate on.
583 int regmap_irq_chip_get_base(struct regmap_irq_chip_data
*data
)
585 WARN_ON(!data
->irq_base
);
586 return data
->irq_base
;
588 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base
);
591 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
593 * Useful for drivers to request their own IRQs.
595 * @data: regmap_irq controller to operate on.
596 * @irq: index of the interrupt requested in the chip IRQs
598 int regmap_irq_get_virq(struct regmap_irq_chip_data
*data
, int irq
)
600 /* Handle holes in the IRQ list */
601 if (!data
->chip
->irqs
[irq
].mask
)
604 return irq_create_mapping(data
->domain
, irq
);
606 EXPORT_SYMBOL_GPL(regmap_irq_get_virq
);
609 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
611 * Useful for drivers to request their own IRQs and for integration
612 * with subsystems. For ease of integration NULL is accepted as a
613 * domain, allowing devices to just call this even if no domain is
616 * @data: regmap_irq controller to operate on.
618 struct irq_domain
*regmap_irq_get_domain(struct regmap_irq_chip_data
*data
)
625 EXPORT_SYMBOL_GPL(regmap_irq_get_domain
);