drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / kernel / irq / generic-chip.c
blob32ffcbb87fa126cc32c2cb45434415d08e79a4a7
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Library implementing the most common irq chip callback functions
5 * Copyright (C) 2011, Thomas Gleixner
6 */
7 #include <linux/io.h>
8 #include <linux/irq.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/irqdomain.h>
12 #include <linux/interrupt.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/syscore_ops.h>
16 #include "internals.h"
18 static LIST_HEAD(gc_list);
19 static DEFINE_RAW_SPINLOCK(gc_lock);
21 /**
22 * irq_gc_noop - NOOP function
23 * @d: irq_data
25 void irq_gc_noop(struct irq_data *d)
28 EXPORT_SYMBOL_GPL(irq_gc_noop);
30 /**
31 * irq_gc_mask_disable_reg - Mask chip via disable register
32 * @d: irq_data
34 * Chip has separate enable/disable registers instead of a single mask
35 * register.
37 void irq_gc_mask_disable_reg(struct irq_data *d)
39 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
40 struct irq_chip_type *ct = irq_data_get_chip_type(d);
41 u32 mask = d->mask;
43 irq_gc_lock(gc);
44 irq_reg_writel(gc, mask, ct->regs.disable);
45 *ct->mask_cache &= ~mask;
46 irq_gc_unlock(gc);
48 EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
50 /**
51 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
52 * @d: irq_data
54 * Chip has a single mask register. Values of this register are cached
55 * and protected by gc->lock
57 void irq_gc_mask_set_bit(struct irq_data *d)
59 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
60 struct irq_chip_type *ct = irq_data_get_chip_type(d);
61 u32 mask = d->mask;
63 irq_gc_lock(gc);
64 *ct->mask_cache |= mask;
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
66 irq_gc_unlock(gc);
68 EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
70 /**
71 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
72 * @d: irq_data
74 * Chip has a single mask register. Values of this register are cached
75 * and protected by gc->lock
77 void irq_gc_mask_clr_bit(struct irq_data *d)
79 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
80 struct irq_chip_type *ct = irq_data_get_chip_type(d);
81 u32 mask = d->mask;
83 irq_gc_lock(gc);
84 *ct->mask_cache &= ~mask;
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
86 irq_gc_unlock(gc);
88 EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
90 /**
91 * irq_gc_unmask_enable_reg - Unmask chip via enable register
92 * @d: irq_data
94 * Chip has separate enable/disable registers instead of a single mask
95 * register.
97 void irq_gc_unmask_enable_reg(struct irq_data *d)
99 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
100 struct irq_chip_type *ct = irq_data_get_chip_type(d);
101 u32 mask = d->mask;
103 irq_gc_lock(gc);
104 irq_reg_writel(gc, mask, ct->regs.enable);
105 *ct->mask_cache |= mask;
106 irq_gc_unlock(gc);
108 EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
111 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
112 * @d: irq_data
114 void irq_gc_ack_set_bit(struct irq_data *d)
116 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
117 struct irq_chip_type *ct = irq_data_get_chip_type(d);
118 u32 mask = d->mask;
120 irq_gc_lock(gc);
121 irq_reg_writel(gc, mask, ct->regs.ack);
122 irq_gc_unlock(gc);
124 EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
127 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
128 * @d: irq_data
130 void irq_gc_ack_clr_bit(struct irq_data *d)
132 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
133 struct irq_chip_type *ct = irq_data_get_chip_type(d);
134 u32 mask = ~d->mask;
136 irq_gc_lock(gc);
137 irq_reg_writel(gc, mask, ct->regs.ack);
138 irq_gc_unlock(gc);
142 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
143 * @d: irq_data
145 * This generic implementation of the irq_mask_ack method is for chips
146 * with separate enable/disable registers instead of a single mask
147 * register and where a pending interrupt is acknowledged by setting a
148 * bit.
150 * Note: This is the only permutation currently used. Similar generic
151 * functions should be added here if other permutations are required.
153 void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
155 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
156 struct irq_chip_type *ct = irq_data_get_chip_type(d);
157 u32 mask = d->mask;
159 irq_gc_lock(gc);
160 irq_reg_writel(gc, mask, ct->regs.disable);
161 *ct->mask_cache &= ~mask;
162 irq_reg_writel(gc, mask, ct->regs.ack);
163 irq_gc_unlock(gc);
167 * irq_gc_eoi - EOI interrupt
168 * @d: irq_data
170 void irq_gc_eoi(struct irq_data *d)
172 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
173 struct irq_chip_type *ct = irq_data_get_chip_type(d);
174 u32 mask = d->mask;
176 irq_gc_lock(gc);
177 irq_reg_writel(gc, mask, ct->regs.eoi);
178 irq_gc_unlock(gc);
182 * irq_gc_set_wake - Set/clr wake bit for an interrupt
183 * @d: irq_data
184 * @on: Indicates whether the wake bit should be set or cleared
186 * For chips where the wake from suspend functionality is not
187 * configured in a separate register and the wakeup active state is
188 * just stored in a bitmask.
190 int irq_gc_set_wake(struct irq_data *d, unsigned int on)
192 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
193 u32 mask = d->mask;
195 if (!(mask & gc->wake_enabled))
196 return -EINVAL;
198 irq_gc_lock(gc);
199 if (on)
200 gc->wake_active |= mask;
201 else
202 gc->wake_active &= ~mask;
203 irq_gc_unlock(gc);
204 return 0;
206 EXPORT_SYMBOL_GPL(irq_gc_set_wake);
208 static u32 irq_readl_be(void __iomem *addr)
210 return ioread32be(addr);
213 static void irq_writel_be(u32 val, void __iomem *addr)
215 iowrite32be(val, addr);
218 void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
219 int num_ct, unsigned int irq_base,
220 void __iomem *reg_base, irq_flow_handler_t handler)
222 struct irq_chip_type *ct = gc->chip_types;
223 int i;
225 raw_spin_lock_init(&gc->lock);
226 gc->num_ct = num_ct;
227 gc->irq_base = irq_base;
228 gc->reg_base = reg_base;
229 for (i = 0; i < num_ct; i++)
230 ct[i].chip.name = name;
231 gc->chip_types->handler = handler;
235 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
236 * @name: Name of the irq chip
237 * @num_ct: Number of irq_chip_type instances associated with this
238 * @irq_base: Interrupt base nr for this chip
239 * @reg_base: Register base address (virtual)
240 * @handler: Default flow handler associated with this chip
242 * Returns an initialized irq_chip_generic structure. The chip defaults
243 * to the primary (index 0) irq_chip_type and @handler
245 struct irq_chip_generic *
246 irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
247 void __iomem *reg_base, irq_flow_handler_t handler)
249 struct irq_chip_generic *gc;
251 gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
252 if (gc) {
253 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
254 handler);
256 return gc;
258 EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
260 static void
261 irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
263 struct irq_chip_type *ct = gc->chip_types;
264 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
265 int i;
267 for (i = 0; i < gc->num_ct; i++) {
268 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
269 mskptr = &ct[i].mask_cache_priv;
270 mskreg = ct[i].regs.mask;
272 ct[i].mask_cache = mskptr;
273 if (flags & IRQ_GC_INIT_MASK_CACHE)
274 *mskptr = irq_reg_readl(gc, mskreg);
279 * irq_domain_alloc_generic_chips - Allocate generic chips for an irq domain
280 * @d: irq domain for which to allocate chips
281 * @info: Generic chip information
283 * Return: 0 on success, negative error code on failure
285 int irq_domain_alloc_generic_chips(struct irq_domain *d,
286 const struct irq_domain_chip_generic_info *info)
288 struct irq_domain_chip_generic *dgc;
289 struct irq_chip_generic *gc;
290 unsigned long flags;
291 int numchips, i;
292 size_t dgc_sz;
293 size_t gc_sz;
294 size_t sz;
295 void *tmp;
296 int ret;
298 if (d->gc)
299 return -EBUSY;
301 numchips = DIV_ROUND_UP(d->revmap_size, info->irqs_per_chip);
302 if (!numchips)
303 return -EINVAL;
305 /* Allocate a pointer, generic chip and chiptypes for each chip */
306 gc_sz = struct_size(gc, chip_types, info->num_ct);
307 dgc_sz = struct_size(dgc, gc, numchips);
308 sz = dgc_sz + numchips * gc_sz;
310 tmp = dgc = kzalloc(sz, GFP_KERNEL);
311 if (!dgc)
312 return -ENOMEM;
313 dgc->irqs_per_chip = info->irqs_per_chip;
314 dgc->num_chips = numchips;
315 dgc->irq_flags_to_set = info->irq_flags_to_set;
316 dgc->irq_flags_to_clear = info->irq_flags_to_clear;
317 dgc->gc_flags = info->gc_flags;
318 dgc->exit = info->exit;
319 d->gc = dgc;
321 /* Calc pointer to the first generic chip */
322 tmp += dgc_sz;
323 for (i = 0; i < numchips; i++) {
324 /* Store the pointer to the generic chip */
325 dgc->gc[i] = gc = tmp;
326 irq_init_generic_chip(gc, info->name, info->num_ct,
327 i * dgc->irqs_per_chip, NULL,
328 info->handler);
330 gc->domain = d;
331 if (dgc->gc_flags & IRQ_GC_BE_IO) {
332 gc->reg_readl = &irq_readl_be;
333 gc->reg_writel = &irq_writel_be;
336 if (info->init) {
337 ret = info->init(gc);
338 if (ret)
339 goto err;
342 raw_spin_lock_irqsave(&gc_lock, flags);
343 list_add_tail(&gc->list, &gc_list);
344 raw_spin_unlock_irqrestore(&gc_lock, flags);
345 /* Calc pointer to the next generic chip */
346 tmp += gc_sz;
348 return 0;
350 err:
351 while (i--) {
352 if (dgc->exit)
353 dgc->exit(dgc->gc[i]);
354 irq_remove_generic_chip(dgc->gc[i], ~0U, 0, 0);
356 d->gc = NULL;
357 kfree(dgc);
358 return ret;
360 EXPORT_SYMBOL_GPL(irq_domain_alloc_generic_chips);
363 * irq_domain_remove_generic_chips - Remove generic chips from an irq domain
364 * @d: irq domain for which generic chips are to be removed
366 void irq_domain_remove_generic_chips(struct irq_domain *d)
368 struct irq_domain_chip_generic *dgc = d->gc;
369 unsigned int i;
371 if (!dgc)
372 return;
374 for (i = 0; i < dgc->num_chips; i++) {
375 if (dgc->exit)
376 dgc->exit(dgc->gc[i]);
377 irq_remove_generic_chip(dgc->gc[i], ~0U, 0, 0);
379 d->gc = NULL;
380 kfree(dgc);
382 EXPORT_SYMBOL_GPL(irq_domain_remove_generic_chips);
385 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
386 * @d: irq domain for which to allocate chips
387 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
388 * @num_ct: Number of irq_chip_type instances associated with this
389 * @name: Name of the irq chip
390 * @handler: Default flow handler associated with these chips
391 * @clr: IRQ_* bits to clear in the mapping function
392 * @set: IRQ_* bits to set in the mapping function
393 * @gcflags: Generic chip specific setup flags
395 int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
396 int num_ct, const char *name,
397 irq_flow_handler_t handler,
398 unsigned int clr, unsigned int set,
399 enum irq_gc_flags gcflags)
401 struct irq_domain_chip_generic_info info = {
402 .irqs_per_chip = irqs_per_chip,
403 .num_ct = num_ct,
404 .name = name,
405 .handler = handler,
406 .irq_flags_to_clear = clr,
407 .irq_flags_to_set = set,
408 .gc_flags = gcflags,
411 return irq_domain_alloc_generic_chips(d, &info);
413 EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
415 static struct irq_chip_generic *
416 __irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
418 struct irq_domain_chip_generic *dgc = d->gc;
419 int idx;
421 if (!dgc)
422 return ERR_PTR(-ENODEV);
423 idx = hw_irq / dgc->irqs_per_chip;
424 if (idx >= dgc->num_chips)
425 return ERR_PTR(-EINVAL);
426 return dgc->gc[idx];
430 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
431 * @d: irq domain pointer
432 * @hw_irq: Hardware interrupt number
434 struct irq_chip_generic *
435 irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
437 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
439 return !IS_ERR(gc) ? gc : NULL;
441 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
444 * Separate lockdep classes for interrupt chip which can nest irq_desc
445 * lock and request mutex.
447 static struct lock_class_key irq_nested_lock_class;
448 static struct lock_class_key irq_nested_request_class;
451 * irq_map_generic_chip - Map a generic chip for an irq domain
453 int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
454 irq_hw_number_t hw_irq)
456 struct irq_data *data = irq_domain_get_irq_data(d, virq);
457 struct irq_domain_chip_generic *dgc = d->gc;
458 struct irq_chip_generic *gc;
459 struct irq_chip_type *ct;
460 struct irq_chip *chip;
461 unsigned long flags;
462 int idx;
464 gc = __irq_get_domain_generic_chip(d, hw_irq);
465 if (IS_ERR(gc))
466 return PTR_ERR(gc);
468 idx = hw_irq % dgc->irqs_per_chip;
470 if (test_bit(idx, &gc->unused))
471 return -ENOTSUPP;
473 if (test_bit(idx, &gc->installed))
474 return -EBUSY;
476 ct = gc->chip_types;
477 chip = &ct->chip;
479 /* We only init the cache for the first mapping of a generic chip */
480 if (!gc->installed) {
481 raw_spin_lock_irqsave(&gc->lock, flags);
482 irq_gc_init_mask_cache(gc, dgc->gc_flags);
483 raw_spin_unlock_irqrestore(&gc->lock, flags);
486 /* Mark the interrupt as installed */
487 set_bit(idx, &gc->installed);
489 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
490 irq_set_lockdep_class(virq, &irq_nested_lock_class,
491 &irq_nested_request_class);
493 if (chip->irq_calc_mask)
494 chip->irq_calc_mask(data);
495 else
496 data->mask = 1 << idx;
498 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
499 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
500 return 0;
503 void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
505 struct irq_data *data = irq_domain_get_irq_data(d, virq);
506 struct irq_domain_chip_generic *dgc = d->gc;
507 unsigned int hw_irq = data->hwirq;
508 struct irq_chip_generic *gc;
509 int irq_idx;
511 gc = irq_get_domain_generic_chip(d, hw_irq);
512 if (!gc)
513 return;
515 irq_idx = hw_irq % dgc->irqs_per_chip;
517 clear_bit(irq_idx, &gc->installed);
518 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
519 NULL);
523 const struct irq_domain_ops irq_generic_chip_ops = {
524 .map = irq_map_generic_chip,
525 .unmap = irq_unmap_generic_chip,
526 .xlate = irq_domain_xlate_onetwocell,
528 EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
531 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
532 * @gc: Generic irq chip holding all data
533 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
534 * @flags: Flags for initialization
535 * @clr: IRQ_* bits to clear
536 * @set: IRQ_* bits to set
538 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
539 * initializes all interrupts to the primary irq_chip_type and its
540 * associated handler.
542 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
543 enum irq_gc_flags flags, unsigned int clr,
544 unsigned int set)
546 struct irq_chip_type *ct = gc->chip_types;
547 struct irq_chip *chip = &ct->chip;
548 unsigned int i;
550 raw_spin_lock(&gc_lock);
551 list_add_tail(&gc->list, &gc_list);
552 raw_spin_unlock(&gc_lock);
554 irq_gc_init_mask_cache(gc, flags);
556 for (i = gc->irq_base; msk; msk >>= 1, i++) {
557 if (!(msk & 0x01))
558 continue;
560 if (flags & IRQ_GC_INIT_NESTED_LOCK)
561 irq_set_lockdep_class(i, &irq_nested_lock_class,
562 &irq_nested_request_class);
564 if (!(flags & IRQ_GC_NO_MASK)) {
565 struct irq_data *d = irq_get_irq_data(i);
567 if (chip->irq_calc_mask)
568 chip->irq_calc_mask(d);
569 else
570 d->mask = 1 << (i - gc->irq_base);
572 irq_set_chip_and_handler(i, chip, ct->handler);
573 irq_set_chip_data(i, gc);
574 irq_modify_status(i, clr, set);
576 gc->irq_cnt = i - gc->irq_base;
578 EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
581 * irq_setup_alt_chip - Switch to alternative chip
582 * @d: irq_data for this interrupt
583 * @type: Flow type to be initialized
585 * Only to be called from chip->irq_set_type() callbacks.
587 int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
589 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
590 struct irq_chip_type *ct = gc->chip_types;
591 unsigned int i;
593 for (i = 0; i < gc->num_ct; i++, ct++) {
594 if (ct->type & type) {
595 d->chip = &ct->chip;
596 irq_data_to_desc(d)->handle_irq = ct->handler;
597 return 0;
600 return -EINVAL;
602 EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
605 * irq_remove_generic_chip - Remove a chip
606 * @gc: Generic irq chip holding all data
607 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
608 * @clr: IRQ_* bits to clear
609 * @set: IRQ_* bits to set
611 * Remove up to 32 interrupts starting from gc->irq_base.
613 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
614 unsigned int clr, unsigned int set)
616 unsigned int i, virq;
618 raw_spin_lock(&gc_lock);
619 list_del(&gc->list);
620 raw_spin_unlock(&gc_lock);
622 for (i = 0; msk; msk >>= 1, i++) {
623 if (!(msk & 0x01))
624 continue;
627 * Interrupt domain based chips store the base hardware
628 * interrupt number in gc::irq_base. Otherwise gc::irq_base
629 * contains the base Linux interrupt number.
631 if (gc->domain) {
632 virq = irq_find_mapping(gc->domain, gc->irq_base + i);
633 if (!virq)
634 continue;
635 } else {
636 virq = gc->irq_base + i;
639 /* Remove handler first. That will mask the irq line */
640 irq_set_handler(virq, NULL);
641 irq_set_chip(virq, &no_irq_chip);
642 irq_set_chip_data(virq, NULL);
643 irq_modify_status(virq, clr, set);
646 EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
648 static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
650 unsigned int virq;
652 if (!gc->domain)
653 return irq_get_irq_data(gc->irq_base);
656 * We don't know which of the irqs has been actually
657 * installed. Use the first one.
659 if (!gc->installed)
660 return NULL;
662 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
663 return virq ? irq_get_irq_data(virq) : NULL;
666 #ifdef CONFIG_PM
667 static int irq_gc_suspend(void)
669 struct irq_chip_generic *gc;
671 list_for_each_entry(gc, &gc_list, list) {
672 struct irq_chip_type *ct = gc->chip_types;
674 if (ct->chip.irq_suspend) {
675 struct irq_data *data = irq_gc_get_irq_data(gc);
677 if (data)
678 ct->chip.irq_suspend(data);
681 if (gc->suspend)
682 gc->suspend(gc);
684 return 0;
687 static void irq_gc_resume(void)
689 struct irq_chip_generic *gc;
691 list_for_each_entry(gc, &gc_list, list) {
692 struct irq_chip_type *ct = gc->chip_types;
694 if (gc->resume)
695 gc->resume(gc);
697 if (ct->chip.irq_resume) {
698 struct irq_data *data = irq_gc_get_irq_data(gc);
700 if (data)
701 ct->chip.irq_resume(data);
705 #else
706 #define irq_gc_suspend NULL
707 #define irq_gc_resume NULL
708 #endif
710 static void irq_gc_shutdown(void)
712 struct irq_chip_generic *gc;
714 list_for_each_entry(gc, &gc_list, list) {
715 struct irq_chip_type *ct = gc->chip_types;
717 if (ct->chip.irq_pm_shutdown) {
718 struct irq_data *data = irq_gc_get_irq_data(gc);
720 if (data)
721 ct->chip.irq_pm_shutdown(data);
726 static struct syscore_ops irq_gc_syscore_ops = {
727 .suspend = irq_gc_suspend,
728 .resume = irq_gc_resume,
729 .shutdown = irq_gc_shutdown,
732 static int __init irq_gc_init_ops(void)
734 register_syscore_ops(&irq_gc_syscore_ops);
735 return 0;
737 device_initcall(irq_gc_init_ops);