drivers/rtc/rtc-fm3130.c: remove empty function
[linux/fpc-iii.git] / kernel / irq / irqdomain.c
blob1ed8dff17eb9041e31e18829d6d86734f33fdc7d
1 #define pr_fmt(fmt) "irq: " fmt
3 #include <linux/debugfs.h>
4 #include <linux/hardirq.h>
5 #include <linux/interrupt.h>
6 #include <linux/irq.h>
7 #include <linux/irqdesc.h>
8 #include <linux/irqdomain.h>
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/topology.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/fs.h>
19 static LIST_HEAD(irq_domain_list);
20 static DEFINE_MUTEX(irq_domain_mutex);
22 static DEFINE_MUTEX(revmap_trees_mutex);
23 static struct irq_domain *irq_default_domain;
25 /**
26 * irq_domain_alloc() - Allocate a new irq_domain data structure
27 * @of_node: optional device-tree node of the interrupt controller
28 * @revmap_type: type of reverse mapping to use
29 * @ops: map/unmap domain callbacks
30 * @host_data: Controller private data pointer
32 * Allocates and initialize and irq_domain structure. Caller is expected to
33 * register allocated irq_domain with irq_domain_register(). Returns pointer
34 * to IRQ domain, or NULL on failure.
36 static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
37 unsigned int revmap_type,
38 const struct irq_domain_ops *ops,
39 void *host_data)
41 struct irq_domain *domain;
43 domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
44 of_node_to_nid(of_node));
45 if (WARN_ON(!domain))
46 return NULL;
48 /* Fill structure */
49 domain->revmap_type = revmap_type;
50 domain->ops = ops;
51 domain->host_data = host_data;
52 domain->of_node = of_node_get(of_node);
54 return domain;
57 static void irq_domain_free(struct irq_domain *domain)
59 of_node_put(domain->of_node);
60 kfree(domain);
63 static void irq_domain_add(struct irq_domain *domain)
65 mutex_lock(&irq_domain_mutex);
66 list_add(&domain->link, &irq_domain_list);
67 mutex_unlock(&irq_domain_mutex);
68 pr_debug("Allocated domain of type %d @0x%p\n",
69 domain->revmap_type, domain);
72 /**
73 * irq_domain_remove() - Remove an irq domain.
74 * @domain: domain to remove
76 * This routine is used to remove an irq domain. The caller must ensure
77 * that all mappings within the domain have been disposed of prior to
78 * use, depending on the revmap type.
80 void irq_domain_remove(struct irq_domain *domain)
82 mutex_lock(&irq_domain_mutex);
84 switch (domain->revmap_type) {
85 case IRQ_DOMAIN_MAP_LEGACY:
87 * Legacy domains don't manage their own irq_desc
88 * allocations, we expect the caller to handle irq_desc
89 * freeing on their own.
91 break;
92 case IRQ_DOMAIN_MAP_TREE:
94 * radix_tree_delete() takes care of destroying the root
95 * node when all entries are removed. Shout if there are
96 * any mappings left.
98 WARN_ON(domain->revmap_data.tree.height);
99 break;
100 case IRQ_DOMAIN_MAP_LINEAR:
101 kfree(domain->revmap_data.linear.revmap);
102 domain->revmap_data.linear.size = 0;
103 break;
104 case IRQ_DOMAIN_MAP_NOMAP:
105 break;
108 list_del(&domain->link);
111 * If the going away domain is the default one, reset it.
113 if (unlikely(irq_default_domain == domain))
114 irq_set_default_host(NULL);
116 mutex_unlock(&irq_domain_mutex);
118 pr_debug("Removed domain of type %d @0x%p\n",
119 domain->revmap_type, domain);
121 irq_domain_free(domain);
123 EXPORT_SYMBOL_GPL(irq_domain_remove);
125 static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
126 irq_hw_number_t hwirq)
128 irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
129 int size = domain->revmap_data.legacy.size;
131 if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
132 return 0;
133 return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
137 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
138 * @of_node: pointer to interrupt controller's device tree node.
139 * @size: total number of irqs in mapping
140 * @first_irq: first number of irq block assigned to the domain,
141 * pass zero to assign irqs on-the-fly. This will result in a
142 * linear IRQ domain so it is important to use irq_create_mapping()
143 * for each used IRQ, especially when SPARSE_IRQ is enabled.
144 * @ops: map/unmap domain callbacks
145 * @host_data: Controller private data pointer
147 * Allocates a legacy irq_domain if irq_base is positive or a linear
148 * domain otherwise. For the legacy domain, IRQ descriptors will also
149 * be allocated.
151 * This is intended to implement the expected behaviour for most
152 * interrupt controllers which is that a linear mapping should
153 * normally be used unless the system requires a legacy mapping in
154 * order to support supplying interrupt numbers during non-DT
155 * registration of devices.
157 struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
158 unsigned int size,
159 unsigned int first_irq,
160 const struct irq_domain_ops *ops,
161 void *host_data)
163 if (first_irq > 0) {
164 int irq_base;
166 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
168 * Set the descriptor allocator to search for a
169 * 1-to-1 mapping, such as irq_alloc_desc_at().
170 * Use of_node_to_nid() which is defined to
171 * numa_node_id() on platforms that have no custom
172 * implementation.
174 irq_base = irq_alloc_descs(first_irq, first_irq, size,
175 of_node_to_nid(of_node));
176 if (irq_base < 0) {
177 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
178 first_irq);
179 irq_base = first_irq;
181 } else
182 irq_base = first_irq;
184 return irq_domain_add_legacy(of_node, size, irq_base, 0,
185 ops, host_data);
188 /* A linear domain is the default */
189 return irq_domain_add_linear(of_node, size, ops, host_data);
191 EXPORT_SYMBOL_GPL(irq_domain_add_simple);
194 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
195 * @of_node: pointer to interrupt controller's device tree node.
196 * @size: total number of irqs in legacy mapping
197 * @first_irq: first number of irq block assigned to the domain
198 * @first_hwirq: first hwirq number to use for the translation. Should normally
199 * be '0', but a positive integer can be used if the effective
200 * hwirqs numbering does not begin at zero.
201 * @ops: map/unmap domain callbacks
202 * @host_data: Controller private data pointer
204 * Note: the map() callback will be called before this function returns
205 * for all legacy interrupts except 0 (which is always the invalid irq for
206 * a legacy controller).
208 struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
209 unsigned int size,
210 unsigned int first_irq,
211 irq_hw_number_t first_hwirq,
212 const struct irq_domain_ops *ops,
213 void *host_data)
215 struct irq_domain *domain;
216 unsigned int i;
218 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
219 if (!domain)
220 return NULL;
222 domain->revmap_data.legacy.first_irq = first_irq;
223 domain->revmap_data.legacy.first_hwirq = first_hwirq;
224 domain->revmap_data.legacy.size = size;
226 mutex_lock(&irq_domain_mutex);
227 /* Verify that all the irqs are available */
228 for (i = 0; i < size; i++) {
229 int irq = first_irq + i;
230 struct irq_data *irq_data = irq_get_irq_data(irq);
232 if (WARN_ON(!irq_data || irq_data->domain)) {
233 mutex_unlock(&irq_domain_mutex);
234 irq_domain_free(domain);
235 return NULL;
239 /* Claim all of the irqs before registering a legacy domain */
240 for (i = 0; i < size; i++) {
241 struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
242 irq_data->hwirq = first_hwirq + i;
243 irq_data->domain = domain;
245 mutex_unlock(&irq_domain_mutex);
247 for (i = 0; i < size; i++) {
248 int irq = first_irq + i;
249 int hwirq = first_hwirq + i;
251 /* IRQ0 gets ignored */
252 if (!irq)
253 continue;
255 /* Legacy flags are left to default at this point,
256 * one can then use irq_create_mapping() to
257 * explicitly change them
259 if (ops->map)
260 ops->map(domain, irq, hwirq);
262 /* Clear norequest flags */
263 irq_clear_status_flags(irq, IRQ_NOREQUEST);
266 irq_domain_add(domain);
267 return domain;
269 EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
272 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
273 * @of_node: pointer to interrupt controller's device tree node.
274 * @size: Number of interrupts in the domain.
275 * @ops: map/unmap domain callbacks
276 * @host_data: Controller private data pointer
278 struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
279 unsigned int size,
280 const struct irq_domain_ops *ops,
281 void *host_data)
283 struct irq_domain *domain;
284 unsigned int *revmap;
286 revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
287 of_node_to_nid(of_node));
288 if (WARN_ON(!revmap))
289 return NULL;
291 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
292 if (!domain) {
293 kfree(revmap);
294 return NULL;
296 domain->revmap_data.linear.size = size;
297 domain->revmap_data.linear.revmap = revmap;
298 irq_domain_add(domain);
299 return domain;
301 EXPORT_SYMBOL_GPL(irq_domain_add_linear);
303 struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
304 unsigned int max_irq,
305 const struct irq_domain_ops *ops,
306 void *host_data)
308 struct irq_domain *domain = irq_domain_alloc(of_node,
309 IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
310 if (domain) {
311 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
312 irq_domain_add(domain);
314 return domain;
316 EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
319 * irq_domain_add_tree()
320 * @of_node: pointer to interrupt controller's device tree node.
321 * @ops: map/unmap domain callbacks
323 * Note: The radix tree will be allocated later during boot automatically
324 * (the reverse mapping will use the slow path until that happens).
326 struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
327 const struct irq_domain_ops *ops,
328 void *host_data)
330 struct irq_domain *domain = irq_domain_alloc(of_node,
331 IRQ_DOMAIN_MAP_TREE, ops, host_data);
332 if (domain) {
333 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
334 irq_domain_add(domain);
336 return domain;
338 EXPORT_SYMBOL_GPL(irq_domain_add_tree);
341 * irq_find_host() - Locates a domain for a given device node
342 * @node: device-tree node of the interrupt controller
344 struct irq_domain *irq_find_host(struct device_node *node)
346 struct irq_domain *h, *found = NULL;
347 int rc;
349 /* We might want to match the legacy controller last since
350 * it might potentially be set to match all interrupts in
351 * the absence of a device node. This isn't a problem so far
352 * yet though...
354 mutex_lock(&irq_domain_mutex);
355 list_for_each_entry(h, &irq_domain_list, link) {
356 if (h->ops->match)
357 rc = h->ops->match(h, node);
358 else
359 rc = (h->of_node != NULL) && (h->of_node == node);
361 if (rc) {
362 found = h;
363 break;
366 mutex_unlock(&irq_domain_mutex);
367 return found;
369 EXPORT_SYMBOL_GPL(irq_find_host);
372 * irq_set_default_host() - Set a "default" irq domain
373 * @domain: default domain pointer
375 * For convenience, it's possible to set a "default" domain that will be used
376 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
377 * platforms that want to manipulate a few hard coded interrupt numbers that
378 * aren't properly represented in the device-tree.
380 void irq_set_default_host(struct irq_domain *domain)
382 pr_debug("Default domain set to @0x%p\n", domain);
384 irq_default_domain = domain;
386 EXPORT_SYMBOL_GPL(irq_set_default_host);
388 static void irq_domain_disassociate_many(struct irq_domain *domain,
389 unsigned int irq_base, int count)
392 * disassociate in reverse order;
393 * not strictly necessary, but nice for unwinding
395 while (count--) {
396 int irq = irq_base + count;
397 struct irq_data *irq_data = irq_get_irq_data(irq);
398 irq_hw_number_t hwirq;
400 if (WARN_ON(!irq_data || irq_data->domain != domain))
401 continue;
403 hwirq = irq_data->hwirq;
404 irq_set_status_flags(irq, IRQ_NOREQUEST);
406 /* remove chip and handler */
407 irq_set_chip_and_handler(irq, NULL, NULL);
409 /* Make sure it's completed */
410 synchronize_irq(irq);
412 /* Tell the PIC about it */
413 if (domain->ops->unmap)
414 domain->ops->unmap(domain, irq);
415 smp_mb();
417 irq_data->domain = NULL;
418 irq_data->hwirq = 0;
420 /* Clear reverse map */
421 switch(domain->revmap_type) {
422 case IRQ_DOMAIN_MAP_LINEAR:
423 if (hwirq < domain->revmap_data.linear.size)
424 domain->revmap_data.linear.revmap[hwirq] = 0;
425 break;
426 case IRQ_DOMAIN_MAP_TREE:
427 mutex_lock(&revmap_trees_mutex);
428 radix_tree_delete(&domain->revmap_data.tree, hwirq);
429 mutex_unlock(&revmap_trees_mutex);
430 break;
435 int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
436 irq_hw_number_t hwirq_base, int count)
438 unsigned int virq = irq_base;
439 irq_hw_number_t hwirq = hwirq_base;
440 int i, ret;
442 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
443 of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
445 for (i = 0; i < count; i++) {
446 struct irq_data *irq_data = irq_get_irq_data(virq + i);
448 if (WARN(!irq_data, "error: irq_desc not allocated; "
449 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
450 return -EINVAL;
451 if (WARN(irq_data->domain, "error: irq_desc already associated; "
452 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
453 return -EINVAL;
456 for (i = 0; i < count; i++, virq++, hwirq++) {
457 struct irq_data *irq_data = irq_get_irq_data(virq);
459 irq_data->hwirq = hwirq;
460 irq_data->domain = domain;
461 if (domain->ops->map) {
462 ret = domain->ops->map(domain, virq, hwirq);
463 if (ret != 0) {
465 * If map() returns -EPERM, this interrupt is protected
466 * by the firmware or some other service and shall not
467 * be mapped.
469 * Since on some platforms we blindly try to map everything
470 * we end up with a log full of backtraces.
472 * So instead, we silently fail on -EPERM, it is the
473 * responsibility of the PIC driver to display a relevant
474 * message if needed.
476 if (ret != -EPERM) {
477 pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
478 virq, hwirq, ret);
479 WARN_ON(1);
481 irq_data->domain = NULL;
482 irq_data->hwirq = 0;
483 goto err_unmap;
487 switch (domain->revmap_type) {
488 case IRQ_DOMAIN_MAP_LINEAR:
489 if (hwirq < domain->revmap_data.linear.size)
490 domain->revmap_data.linear.revmap[hwirq] = virq;
491 break;
492 case IRQ_DOMAIN_MAP_TREE:
493 mutex_lock(&revmap_trees_mutex);
494 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
495 mutex_unlock(&revmap_trees_mutex);
496 break;
499 irq_clear_status_flags(virq, IRQ_NOREQUEST);
502 return 0;
504 err_unmap:
505 irq_domain_disassociate_many(domain, irq_base, i);
506 return -EINVAL;
508 EXPORT_SYMBOL_GPL(irq_domain_associate_many);
511 * irq_create_direct_mapping() - Allocate an irq for direct mapping
512 * @domain: domain to allocate the irq for or NULL for default domain
514 * This routine is used for irq controllers which can choose the hardware
515 * interrupt numbers they generate. In such a case it's simplest to use
516 * the linux irq as the hardware interrupt number.
518 unsigned int irq_create_direct_mapping(struct irq_domain *domain)
520 unsigned int virq;
522 if (domain == NULL)
523 domain = irq_default_domain;
525 if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
526 return 0;
528 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
529 if (!virq) {
530 pr_debug("create_direct virq allocation failed\n");
531 return 0;
533 if (virq >= domain->revmap_data.nomap.max_irq) {
534 pr_err("ERROR: no free irqs available below %i maximum\n",
535 domain->revmap_data.nomap.max_irq);
536 irq_free_desc(virq);
537 return 0;
539 pr_debug("create_direct obtained virq %d\n", virq);
541 if (irq_domain_associate(domain, virq, virq)) {
542 irq_free_desc(virq);
543 return 0;
546 return virq;
548 EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
551 * irq_create_mapping() - Map a hardware interrupt into linux irq space
552 * @domain: domain owning this hardware interrupt or NULL for default domain
553 * @hwirq: hardware irq number in that domain space
555 * Only one mapping per hardware interrupt is permitted. Returns a linux
556 * irq number.
557 * If the sense/trigger is to be specified, set_irq_type() should be called
558 * on the number returned from that call.
560 unsigned int irq_create_mapping(struct irq_domain *domain,
561 irq_hw_number_t hwirq)
563 unsigned int hint;
564 int virq;
566 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
568 /* Look for default domain if nececssary */
569 if (domain == NULL)
570 domain = irq_default_domain;
571 if (domain == NULL) {
572 pr_warning("irq_create_mapping called for"
573 " NULL domain, hwirq=%lx\n", hwirq);
574 WARN_ON(1);
575 return 0;
577 pr_debug("-> using domain @%p\n", domain);
579 /* Check if mapping already exists */
580 virq = irq_find_mapping(domain, hwirq);
581 if (virq) {
582 pr_debug("-> existing mapping on virq %d\n", virq);
583 return virq;
586 /* Get a virtual interrupt number */
587 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
588 return irq_domain_legacy_revmap(domain, hwirq);
590 /* Allocate a virtual interrupt number */
591 hint = hwirq % nr_irqs;
592 if (hint == 0)
593 hint++;
594 virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
595 if (virq <= 0)
596 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
597 if (virq <= 0) {
598 pr_debug("-> virq allocation failed\n");
599 return 0;
602 if (irq_domain_associate(domain, virq, hwirq)) {
603 irq_free_desc(virq);
604 return 0;
607 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
608 hwirq, of_node_full_name(domain->of_node), virq);
610 return virq;
612 EXPORT_SYMBOL_GPL(irq_create_mapping);
615 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
616 * @domain: domain owning the interrupt range
617 * @irq_base: beginning of linux IRQ range
618 * @hwirq_base: beginning of hardware IRQ range
619 * @count: Number of interrupts to map
621 * This routine is used for allocating and mapping a range of hardware
622 * irqs to linux irqs where the linux irq numbers are at pre-defined
623 * locations. For use by controllers that already have static mappings
624 * to insert in to the domain.
626 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
627 * domain insertion.
629 * 0 is returned upon success, while any failure to establish a static
630 * mapping is treated as an error.
632 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
633 irq_hw_number_t hwirq_base, int count)
635 int ret;
637 ret = irq_alloc_descs(irq_base, irq_base, count,
638 of_node_to_nid(domain->of_node));
639 if (unlikely(ret < 0))
640 return ret;
642 ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
643 if (unlikely(ret < 0)) {
644 irq_free_descs(irq_base, count);
645 return ret;
648 return 0;
650 EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
652 unsigned int irq_create_of_mapping(struct device_node *controller,
653 const u32 *intspec, unsigned int intsize)
655 struct irq_domain *domain;
656 irq_hw_number_t hwirq;
657 unsigned int type = IRQ_TYPE_NONE;
658 unsigned int virq;
660 domain = controller ? irq_find_host(controller) : irq_default_domain;
661 if (!domain) {
662 #ifdef CONFIG_MIPS
664 * Workaround to avoid breaking interrupt controller drivers
665 * that don't yet register an irq_domain. This is temporary
666 * code. ~~~gcl, Feb 24, 2012
668 * Scheduled for removal in Linux v3.6. That should be enough
669 * time.
671 if (intsize > 0)
672 return intspec[0];
673 #endif
674 pr_warning("no irq domain found for %s !\n",
675 of_node_full_name(controller));
676 return 0;
679 /* If domain has no translation, then we assume interrupt line */
680 if (domain->ops->xlate == NULL)
681 hwirq = intspec[0];
682 else {
683 if (domain->ops->xlate(domain, controller, intspec, intsize,
684 &hwirq, &type))
685 return 0;
688 /* Create mapping */
689 virq = irq_create_mapping(domain, hwirq);
690 if (!virq)
691 return virq;
693 /* Set type if specified and different than the current one */
694 if (type != IRQ_TYPE_NONE &&
695 type != irq_get_trigger_type(virq))
696 irq_set_irq_type(virq, type);
697 return virq;
699 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
702 * irq_dispose_mapping() - Unmap an interrupt
703 * @virq: linux irq number of the interrupt to unmap
705 void irq_dispose_mapping(unsigned int virq)
707 struct irq_data *irq_data = irq_get_irq_data(virq);
708 struct irq_domain *domain;
710 if (!virq || !irq_data)
711 return;
713 domain = irq_data->domain;
714 if (WARN_ON(domain == NULL))
715 return;
717 /* Never unmap legacy interrupts */
718 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
719 return;
721 irq_domain_disassociate_many(domain, virq, 1);
722 irq_free_desc(virq);
724 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
727 * irq_find_mapping() - Find a linux irq from an hw irq number.
728 * @domain: domain owning this hardware interrupt
729 * @hwirq: hardware irq number in that domain space
731 unsigned int irq_find_mapping(struct irq_domain *domain,
732 irq_hw_number_t hwirq)
734 struct irq_data *data;
736 /* Look for default domain if nececssary */
737 if (domain == NULL)
738 domain = irq_default_domain;
739 if (domain == NULL)
740 return 0;
742 switch (domain->revmap_type) {
743 case IRQ_DOMAIN_MAP_LEGACY:
744 return irq_domain_legacy_revmap(domain, hwirq);
745 case IRQ_DOMAIN_MAP_LINEAR:
746 return irq_linear_revmap(domain, hwirq);
747 case IRQ_DOMAIN_MAP_TREE:
748 rcu_read_lock();
749 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
750 rcu_read_unlock();
751 if (data)
752 return data->irq;
753 break;
754 case IRQ_DOMAIN_MAP_NOMAP:
755 data = irq_get_irq_data(hwirq);
756 if (data && (data->domain == domain) && (data->hwirq == hwirq))
757 return hwirq;
758 break;
761 return 0;
763 EXPORT_SYMBOL_GPL(irq_find_mapping);
766 * irq_linear_revmap() - Find a linux irq from a hw irq number.
767 * @domain: domain owning this hardware interrupt
768 * @hwirq: hardware irq number in that domain space
770 * This is a fast path that can be called directly by irq controller code to
771 * save a handful of instructions.
773 unsigned int irq_linear_revmap(struct irq_domain *domain,
774 irq_hw_number_t hwirq)
776 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
778 /* Check revmap bounds; complain if exceeded */
779 if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
780 return 0;
782 return domain->revmap_data.linear.revmap[hwirq];
784 EXPORT_SYMBOL_GPL(irq_linear_revmap);
786 #ifdef CONFIG_IRQ_DOMAIN_DEBUG
787 static int virq_debug_show(struct seq_file *m, void *private)
789 unsigned long flags;
790 struct irq_desc *desc;
791 const char *p;
792 static const char none[] = "none";
793 void *data;
794 int i;
796 seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
797 "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
798 "domain name");
800 for (i = 1; i < nr_irqs; i++) {
801 desc = irq_to_desc(i);
802 if (!desc)
803 continue;
805 raw_spin_lock_irqsave(&desc->lock, flags);
807 if (desc->action && desc->action->handler) {
808 struct irq_chip *chip;
810 seq_printf(m, "%5d ", i);
811 seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
813 chip = irq_desc_get_chip(desc);
814 if (chip && chip->name)
815 p = chip->name;
816 else
817 p = none;
818 seq_printf(m, "%-15s ", p);
820 data = irq_desc_get_chip_data(desc);
821 seq_printf(m, data ? "0x%p " : " %p ", data);
823 if (desc->irq_data.domain)
824 p = of_node_full_name(desc->irq_data.domain->of_node);
825 else
826 p = none;
827 seq_printf(m, "%s\n", p);
830 raw_spin_unlock_irqrestore(&desc->lock, flags);
833 return 0;
836 static int virq_debug_open(struct inode *inode, struct file *file)
838 return single_open(file, virq_debug_show, inode->i_private);
841 static const struct file_operations virq_debug_fops = {
842 .open = virq_debug_open,
843 .read = seq_read,
844 .llseek = seq_lseek,
845 .release = single_release,
848 static int __init irq_debugfs_init(void)
850 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
851 NULL, &virq_debug_fops) == NULL)
852 return -ENOMEM;
854 return 0;
856 __initcall(irq_debugfs_init);
857 #endif /* CONFIG_IRQ_DOMAIN_DEBUG */
860 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
862 * Device Tree IRQ specifier translation function which works with one cell
863 * bindings where the cell value maps directly to the hwirq number.
865 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
866 const u32 *intspec, unsigned int intsize,
867 unsigned long *out_hwirq, unsigned int *out_type)
869 if (WARN_ON(intsize < 1))
870 return -EINVAL;
871 *out_hwirq = intspec[0];
872 *out_type = IRQ_TYPE_NONE;
873 return 0;
875 EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
878 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
880 * Device Tree IRQ specifier translation function which works with two cell
881 * bindings where the cell values map directly to the hwirq number
882 * and linux irq flags.
884 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
885 const u32 *intspec, unsigned int intsize,
886 irq_hw_number_t *out_hwirq, unsigned int *out_type)
888 if (WARN_ON(intsize < 2))
889 return -EINVAL;
890 *out_hwirq = intspec[0];
891 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
892 return 0;
894 EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
897 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
899 * Device Tree IRQ specifier translation function which works with either one
900 * or two cell bindings where the cell values map directly to the hwirq number
901 * and linux irq flags.
903 * Note: don't use this function unless your interrupt controller explicitly
904 * supports both one and two cell bindings. For the majority of controllers
905 * the _onecell() or _twocell() variants above should be used.
907 int irq_domain_xlate_onetwocell(struct irq_domain *d,
908 struct device_node *ctrlr,
909 const u32 *intspec, unsigned int intsize,
910 unsigned long *out_hwirq, unsigned int *out_type)
912 if (WARN_ON(intsize < 1))
913 return -EINVAL;
914 *out_hwirq = intspec[0];
915 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
916 return 0;
918 EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
920 const struct irq_domain_ops irq_domain_simple_ops = {
921 .xlate = irq_domain_xlate_onetwocell,
923 EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
925 #ifdef CONFIG_OF_IRQ
926 void irq_domain_generate_simple(const struct of_device_id *match,
927 u64 phys_base, unsigned int irq_start)
929 struct device_node *node;
930 pr_debug("looking for phys_base=%llx, irq_start=%i\n",
931 (unsigned long long) phys_base, (int) irq_start);
932 node = of_find_matching_node_by_address(NULL, match, phys_base);
933 if (node)
934 irq_domain_add_legacy(node, 32, irq_start, 0,
935 &irq_domain_simple_ops, NULL);
937 EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
938 #endif