1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) "OF: " fmt
4 #include <linux/device.h>
5 #include <linux/fwnode.h>
7 #include <linux/ioport.h>
8 #include <linux/logic_pio.h>
9 #include <linux/module.h>
10 #include <linux/of_address.h>
11 #include <linux/pci.h>
12 #include <linux/pci_regs.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
17 #include "of_private.h"
19 /* Max address size we deal with */
20 #define OF_MAX_ADDR_CELLS 4
21 #define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
22 #define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
24 static struct of_bus
*of_match_bus(struct device_node
*np
);
25 static int __of_address_to_resource(struct device_node
*dev
,
26 const __be32
*addrp
, u64 size
, unsigned int flags
,
27 const char *name
, struct resource
*r
);
31 static void of_dump_addr(const char *s
, const __be32
*addr
, int na
)
35 pr_cont(" %08x", be32_to_cpu(*(addr
++)));
39 static void of_dump_addr(const char *s
, const __be32
*addr
, int na
) { }
42 /* Callbacks for bus specific translators */
45 const char *addresses
;
46 int (*match
)(struct device_node
*parent
);
47 void (*count_cells
)(struct device_node
*child
,
48 int *addrc
, int *sizec
);
49 u64 (*map
)(__be32
*addr
, const __be32
*range
,
50 int na
, int ns
, int pna
);
51 int (*translate
)(__be32
*addr
, u64 offset
, int na
);
52 unsigned int (*get_flags
)(const __be32
*addr
);
56 * Default translator (generic bus)
59 static void of_bus_default_count_cells(struct device_node
*dev
,
60 int *addrc
, int *sizec
)
63 *addrc
= of_n_addr_cells(dev
);
65 *sizec
= of_n_size_cells(dev
);
68 static u64
of_bus_default_map(__be32
*addr
, const __be32
*range
,
69 int na
, int ns
, int pna
)
73 cp
= of_read_number(range
, na
);
74 s
= of_read_number(range
+ na
+ pna
, ns
);
75 da
= of_read_number(addr
, na
);
77 pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
78 (unsigned long long)cp
, (unsigned long long)s
,
79 (unsigned long long)da
);
81 if (da
< cp
|| da
>= (cp
+ s
))
86 static int of_bus_default_translate(__be32
*addr
, u64 offset
, int na
)
88 u64 a
= of_read_number(addr
, na
);
89 memset(addr
, 0, na
* 4);
92 addr
[na
- 2] = cpu_to_be32(a
>> 32);
93 addr
[na
- 1] = cpu_to_be32(a
& 0xffffffffu
);
98 static unsigned int of_bus_default_get_flags(const __be32
*addr
)
100 return IORESOURCE_MEM
;
103 static unsigned int of_bus_pci_get_flags(const __be32
*addr
)
105 unsigned int flags
= 0;
106 u32 w
= be32_to_cpup(addr
);
108 if (!IS_ENABLED(CONFIG_PCI
))
111 switch((w
>> 24) & 0x03) {
113 flags
|= IORESOURCE_IO
;
115 case 0x02: /* 32 bits */
116 case 0x03: /* 64 bits */
117 flags
|= IORESOURCE_MEM
;
121 flags
|= IORESOURCE_PREFETCH
;
127 * PCI bus specific translator
130 static int of_bus_pci_match(struct device_node
*np
)
133 * "pciex" is PCI Express
134 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
135 * "ht" is hypertransport
137 return of_node_is_type(np
, "pci") || of_node_is_type(np
, "pciex") ||
138 of_node_is_type(np
, "vci") || of_node_is_type(np
, "ht");
141 static void of_bus_pci_count_cells(struct device_node
*np
,
142 int *addrc
, int *sizec
)
150 static u64
of_bus_pci_map(__be32
*addr
, const __be32
*range
, int na
, int ns
,
156 af
= of_bus_pci_get_flags(addr
);
157 rf
= of_bus_pci_get_flags(range
);
159 /* Check address type match */
160 if ((af
^ rf
) & (IORESOURCE_MEM
| IORESOURCE_IO
))
163 /* Read address values, skipping high cell */
164 cp
= of_read_number(range
+ 1, na
- 1);
165 s
= of_read_number(range
+ na
+ pna
, ns
);
166 da
= of_read_number(addr
+ 1, na
- 1);
168 pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
169 (unsigned long long)cp
, (unsigned long long)s
,
170 (unsigned long long)da
);
172 if (da
< cp
|| da
>= (cp
+ s
))
177 static int of_bus_pci_translate(__be32
*addr
, u64 offset
, int na
)
179 return of_bus_default_translate(addr
+ 1, offset
, na
- 1);
182 const __be32
*of_get_pci_address(struct device_node
*dev
, int bar_no
, u64
*size
,
187 struct device_node
*parent
;
189 int onesize
, i
, na
, ns
;
191 /* Get parent & match bus type */
192 parent
= of_get_parent(dev
);
195 bus
= of_match_bus(parent
);
196 if (strcmp(bus
->name
, "pci")) {
200 bus
->count_cells(dev
, &na
, &ns
);
202 if (!OF_CHECK_ADDR_COUNT(na
))
205 /* Get "reg" or "assigned-addresses" property */
206 prop
= of_get_property(dev
, bus
->addresses
, &psize
);
212 for (i
= 0; psize
>= onesize
; psize
-= onesize
, prop
+= onesize
, i
++) {
213 u32 val
= be32_to_cpu(prop
[0]);
214 if ((val
& 0xff) == ((bar_no
* 4) + PCI_BASE_ADDRESS_0
)) {
216 *size
= of_read_number(prop
+ na
, ns
);
218 *flags
= bus
->get_flags(prop
);
224 EXPORT_SYMBOL(of_get_pci_address
);
226 int of_pci_address_to_resource(struct device_node
*dev
, int bar
,
233 addrp
= of_get_pci_address(dev
, bar
, &size
, &flags
);
236 return __of_address_to_resource(dev
, addrp
, size
, flags
, NULL
, r
);
238 EXPORT_SYMBOL_GPL(of_pci_address_to_resource
);
241 * of_pci_range_to_resource - Create a resource from an of_pci_range
242 * @range: the PCI range that describes the resource
243 * @np: device node where the range belongs to
244 * @res: pointer to a valid resource that will be updated to
245 * reflect the values contained in the range.
247 * Returns EINVAL if the range cannot be converted to resource.
249 * Note that if the range is an IO range, the resource will be converted
250 * using pci_address_to_pio() which can fail if it is called too early or
251 * if the range cannot be matched to any host bridge IO space (our case here).
252 * To guard against that we try to register the IO range first.
253 * If that fails we know that pci_address_to_pio() will do too.
255 int of_pci_range_to_resource(struct of_pci_range
*range
,
256 struct device_node
*np
, struct resource
*res
)
259 res
->flags
= range
->flags
;
260 res
->parent
= res
->child
= res
->sibling
= NULL
;
261 res
->name
= np
->full_name
;
263 if (res
->flags
& IORESOURCE_IO
) {
265 err
= pci_register_io_range(&np
->fwnode
, range
->cpu_addr
,
269 port
= pci_address_to_pio(range
->cpu_addr
);
270 if (port
== (unsigned long)-1) {
276 if ((sizeof(resource_size_t
) < 8) &&
277 upper_32_bits(range
->cpu_addr
)) {
282 res
->start
= range
->cpu_addr
;
284 res
->end
= res
->start
+ range
->size
- 1;
288 res
->start
= (resource_size_t
)OF_BAD_ADDR
;
289 res
->end
= (resource_size_t
)OF_BAD_ADDR
;
292 EXPORT_SYMBOL(of_pci_range_to_resource
);
293 #endif /* CONFIG_PCI */
296 * ISA bus specific translator
299 static int of_bus_isa_match(struct device_node
*np
)
301 return of_node_name_eq(np
, "isa");
304 static void of_bus_isa_count_cells(struct device_node
*child
,
305 int *addrc
, int *sizec
)
313 static u64
of_bus_isa_map(__be32
*addr
, const __be32
*range
, int na
, int ns
,
318 /* Check address type match */
319 if ((addr
[0] ^ range
[0]) & cpu_to_be32(1))
322 /* Read address values, skipping high cell */
323 cp
= of_read_number(range
+ 1, na
- 1);
324 s
= of_read_number(range
+ na
+ pna
, ns
);
325 da
= of_read_number(addr
+ 1, na
- 1);
327 pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
328 (unsigned long long)cp
, (unsigned long long)s
,
329 (unsigned long long)da
);
331 if (da
< cp
|| da
>= (cp
+ s
))
336 static int of_bus_isa_translate(__be32
*addr
, u64 offset
, int na
)
338 return of_bus_default_translate(addr
+ 1, offset
, na
- 1);
341 static unsigned int of_bus_isa_get_flags(const __be32
*addr
)
343 unsigned int flags
= 0;
344 u32 w
= be32_to_cpup(addr
);
347 flags
|= IORESOURCE_IO
;
349 flags
|= IORESOURCE_MEM
;
354 * Array of bus specific translators
357 static struct of_bus of_busses
[] = {
362 .addresses
= "assigned-addresses",
363 .match
= of_bus_pci_match
,
364 .count_cells
= of_bus_pci_count_cells
,
365 .map
= of_bus_pci_map
,
366 .translate
= of_bus_pci_translate
,
367 .get_flags
= of_bus_pci_get_flags
,
369 #endif /* CONFIG_PCI */
374 .match
= of_bus_isa_match
,
375 .count_cells
= of_bus_isa_count_cells
,
376 .map
= of_bus_isa_map
,
377 .translate
= of_bus_isa_translate
,
378 .get_flags
= of_bus_isa_get_flags
,
385 .count_cells
= of_bus_default_count_cells
,
386 .map
= of_bus_default_map
,
387 .translate
= of_bus_default_translate
,
388 .get_flags
= of_bus_default_get_flags
,
392 static struct of_bus
*of_match_bus(struct device_node
*np
)
396 for (i
= 0; i
< ARRAY_SIZE(of_busses
); i
++)
397 if (!of_busses
[i
].match
|| of_busses
[i
].match(np
))
398 return &of_busses
[i
];
403 static int of_empty_ranges_quirk(struct device_node
*np
)
405 if (IS_ENABLED(CONFIG_PPC
)) {
406 /* To save cycles, we cache the result for global "Mac" setting */
407 static int quirk_state
= -1;
409 /* PA-SEMI sdc DT bug */
410 if (of_device_is_compatible(np
, "1682m-sdc"))
413 /* Make quirk cached */
416 of_machine_is_compatible("Power Macintosh") ||
417 of_machine_is_compatible("MacRISC");
423 static int of_translate_one(struct device_node
*parent
, struct of_bus
*bus
,
424 struct of_bus
*pbus
, __be32
*addr
,
425 int na
, int ns
, int pna
, const char *rprop
)
427 const __be32
*ranges
;
430 u64 offset
= OF_BAD_ADDR
;
433 * Normally, an absence of a "ranges" property means we are
434 * crossing a non-translatable boundary, and thus the addresses
435 * below the current cannot be converted to CPU physical ones.
436 * Unfortunately, while this is very clear in the spec, it's not
437 * what Apple understood, and they do have things like /uni-n or
438 * /ht nodes with no "ranges" property and a lot of perfectly
439 * useable mapped devices below them. Thus we treat the absence of
440 * "ranges" as equivalent to an empty "ranges" property which means
441 * a 1:1 translation at that level. It's up to the caller not to try
442 * to translate addresses that aren't supposed to be translated in
443 * the first place. --BenH.
445 * As far as we know, this damage only exists on Apple machines, so
446 * This code is only enabled on powerpc. --gcl
448 * This quirk also applies for 'dma-ranges' which frequently exist in
449 * child nodes without 'dma-ranges' in the parent nodes. --RobH
451 ranges
= of_get_property(parent
, rprop
, &rlen
);
452 if (ranges
== NULL
&& !of_empty_ranges_quirk(parent
) &&
453 strcmp(rprop
, "dma-ranges")) {
454 pr_debug("no ranges; cannot translate\n");
457 if (ranges
== NULL
|| rlen
== 0) {
458 offset
= of_read_number(addr
, na
);
459 memset(addr
, 0, pna
* 4);
460 pr_debug("empty ranges; 1:1 translation\n");
464 pr_debug("walking ranges...\n");
466 /* Now walk through the ranges */
468 rone
= na
+ pna
+ ns
;
469 for (; rlen
>= rone
; rlen
-= rone
, ranges
+= rone
) {
470 offset
= bus
->map(addr
, ranges
, na
, ns
, pna
);
471 if (offset
!= OF_BAD_ADDR
)
474 if (offset
== OF_BAD_ADDR
) {
475 pr_debug("not found !\n");
478 memcpy(addr
, ranges
+ na
, 4 * pna
);
481 of_dump_addr("parent translation for:", addr
, pna
);
482 pr_debug("with offset: %llx\n", (unsigned long long)offset
);
484 /* Translate it into parent bus space */
485 return pbus
->translate(addr
, offset
, pna
);
489 * Translate an address from the device-tree into a CPU physical address,
490 * this walks up the tree and applies the various bus mappings on the
493 * Note: We consider that crossing any level with #size-cells == 0 to mean
494 * that translation is impossible (that is we are not dealing with a value
495 * that can be mapped to a cpu physical address). This is not really specified
496 * that way, but this is traditionally the way IBM at least do things
498 * Whenever the translation fails, the *host pointer will be set to the
499 * device that had registered logical PIO mapping, and the return code is
500 * relative to that node.
502 static u64
__of_translate_address(struct device_node
*dev
,
503 struct device_node
*(*get_parent
)(const struct device_node
*),
504 const __be32
*in_addr
, const char *rprop
,
505 struct device_node
**host
)
507 struct device_node
*parent
= NULL
;
508 struct of_bus
*bus
, *pbus
;
509 __be32 addr
[OF_MAX_ADDR_CELLS
];
510 int na
, ns
, pna
, pns
;
511 u64 result
= OF_BAD_ADDR
;
513 pr_debug("** translation for device %pOF **\n", dev
);
515 /* Increase refcount at current level */
519 /* Get parent & match bus type */
520 parent
= get_parent(dev
);
523 bus
= of_match_bus(parent
);
525 /* Count address cells & copy address locally */
526 bus
->count_cells(dev
, &na
, &ns
);
527 if (!OF_CHECK_COUNTS(na
, ns
)) {
528 pr_debug("Bad cell count for %pOF\n", dev
);
531 memcpy(addr
, in_addr
, na
* 4);
533 pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n",
534 bus
->name
, na
, ns
, parent
);
535 of_dump_addr("translating address:", addr
, na
);
539 struct logic_pio_hwaddr
*iorange
;
541 /* Switch to parent bus */
544 parent
= get_parent(dev
);
546 /* If root, we have finished */
547 if (parent
== NULL
) {
548 pr_debug("reached root node\n");
549 result
= of_read_number(addr
, na
);
554 * For indirectIO device which has no ranges property, get
555 * the address from reg directly.
557 iorange
= find_io_range_by_fwnode(&dev
->fwnode
);
558 if (iorange
&& (iorange
->flags
!= LOGIC_PIO_CPU_MMIO
)) {
559 result
= of_read_number(addr
+ 1, na
- 1);
560 pr_debug("indirectIO matched(%pOF) 0x%llx\n",
562 *host
= of_node_get(dev
);
566 /* Get new parent bus and counts */
567 pbus
= of_match_bus(parent
);
568 pbus
->count_cells(dev
, &pna
, &pns
);
569 if (!OF_CHECK_COUNTS(pna
, pns
)) {
570 pr_err("Bad cell count for %pOF\n", dev
);
574 pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
575 pbus
->name
, pna
, pns
, parent
);
577 /* Apply bus translation */
578 if (of_translate_one(dev
, bus
, pbus
, addr
, na
, ns
, pna
, rprop
))
581 /* Complete the move up one level */
586 of_dump_addr("one level translation:", addr
, na
);
595 u64
of_translate_address(struct device_node
*dev
, const __be32
*in_addr
)
597 struct device_node
*host
;
600 ret
= __of_translate_address(dev
, of_get_parent
,
601 in_addr
, "ranges", &host
);
609 EXPORT_SYMBOL(of_translate_address
);
611 static struct device_node
*__of_get_dma_parent(const struct device_node
*np
)
613 struct of_phandle_args args
;
616 index
= of_property_match_string(np
, "interconnect-names", "dma-mem");
618 return of_get_parent(np
);
620 ret
= of_parse_phandle_with_args(np
, "interconnects",
621 "#interconnect-cells",
624 return of_get_parent(np
);
626 return of_node_get(args
.np
);
629 static struct device_node
*of_get_next_dma_parent(struct device_node
*np
)
631 struct device_node
*parent
;
633 parent
= __of_get_dma_parent(np
);
639 u64
of_translate_dma_address(struct device_node
*dev
, const __be32
*in_addr
)
641 struct device_node
*host
;
644 ret
= __of_translate_address(dev
, __of_get_dma_parent
,
645 in_addr
, "dma-ranges", &host
);
654 EXPORT_SYMBOL(of_translate_dma_address
);
656 const __be32
*of_get_address(struct device_node
*dev
, int index
, u64
*size
,
661 struct device_node
*parent
;
663 int onesize
, i
, na
, ns
;
665 /* Get parent & match bus type */
666 parent
= of_get_parent(dev
);
669 bus
= of_match_bus(parent
);
670 bus
->count_cells(dev
, &na
, &ns
);
672 if (!OF_CHECK_ADDR_COUNT(na
))
675 /* Get "reg" or "assigned-addresses" property */
676 prop
= of_get_property(dev
, bus
->addresses
, &psize
);
682 for (i
= 0; psize
>= onesize
; psize
-= onesize
, prop
+= onesize
, i
++)
685 *size
= of_read_number(prop
+ na
, ns
);
687 *flags
= bus
->get_flags(prop
);
692 EXPORT_SYMBOL(of_get_address
);
694 static int parser_init(struct of_pci_range_parser
*parser
,
695 struct device_node
*node
, const char *name
)
700 parser
->pna
= of_n_addr_cells(node
);
701 parser
->na
= of_bus_n_addr_cells(node
);
702 parser
->ns
= of_bus_n_size_cells(node
);
703 parser
->dma
= !strcmp(name
, "dma-ranges");
705 parser
->range
= of_get_property(node
, name
, &rlen
);
706 if (parser
->range
== NULL
)
709 parser
->end
= parser
->range
+ rlen
/ sizeof(__be32
);
714 int of_pci_range_parser_init(struct of_pci_range_parser
*parser
,
715 struct device_node
*node
)
717 return parser_init(parser
, node
, "ranges");
719 EXPORT_SYMBOL_GPL(of_pci_range_parser_init
);
721 int of_pci_dma_range_parser_init(struct of_pci_range_parser
*parser
,
722 struct device_node
*node
)
724 return parser_init(parser
, node
, "dma-ranges");
726 EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init
);
727 #define of_dma_range_parser_init of_pci_dma_range_parser_init
729 struct of_pci_range
*of_pci_range_parser_one(struct of_pci_range_parser
*parser
,
730 struct of_pci_range
*range
)
734 int np
= parser
->pna
+ na
+ ns
;
739 if (!parser
->range
|| parser
->range
+ np
> parser
->end
)
743 range
->flags
= of_bus_pci_get_flags(parser
->range
);
747 range
->pci_addr
= of_read_number(parser
->range
, na
);
750 range
->cpu_addr
= of_translate_dma_address(parser
->node
,
753 range
->cpu_addr
= of_translate_address(parser
->node
,
755 range
->size
= of_read_number(parser
->range
+ parser
->pna
+ na
, ns
);
759 /* Now consume following elements while they are contiguous */
760 while (parser
->range
+ np
<= parser
->end
) {
762 u64 pci_addr
, cpu_addr
, size
;
765 flags
= of_bus_pci_get_flags(parser
->range
);
766 pci_addr
= of_read_number(parser
->range
, na
);
768 cpu_addr
= of_translate_dma_address(parser
->node
,
771 cpu_addr
= of_translate_address(parser
->node
,
773 size
= of_read_number(parser
->range
+ parser
->pna
+ na
, ns
);
775 if (flags
!= range
->flags
)
777 if (pci_addr
!= range
->pci_addr
+ range
->size
||
778 cpu_addr
!= range
->cpu_addr
+ range
->size
)
787 EXPORT_SYMBOL_GPL(of_pci_range_parser_one
);
789 static u64
of_translate_ioport(struct device_node
*dev
, const __be32
*in_addr
,
794 struct device_node
*host
;
796 taddr
= __of_translate_address(dev
, of_get_parent
,
797 in_addr
, "ranges", &host
);
799 /* host-specific port access */
800 port
= logic_pio_trans_hwaddr(&host
->fwnode
, taddr
, size
);
803 /* memory-mapped I/O range */
804 port
= pci_address_to_pio(taddr
);
807 if (port
== (unsigned long)-1)
813 static int __of_address_to_resource(struct device_node
*dev
,
814 const __be32
*addrp
, u64 size
, unsigned int flags
,
815 const char *name
, struct resource
*r
)
819 if (flags
& IORESOURCE_MEM
)
820 taddr
= of_translate_address(dev
, addrp
);
821 else if (flags
& IORESOURCE_IO
)
822 taddr
= of_translate_ioport(dev
, addrp
, size
);
826 if (taddr
== OF_BAD_ADDR
)
828 memset(r
, 0, sizeof(struct resource
));
831 r
->end
= taddr
+ size
- 1;
833 r
->name
= name
? name
: dev
->full_name
;
839 * of_address_to_resource - Translate device tree address and return as resource
841 * Note that if your address is a PIO address, the conversion will fail if
842 * the physical address can't be internally converted to an IO token with
843 * pci_address_to_pio(), that is because it's either called too early or it
844 * can't be matched to any host bridge IO space
846 int of_address_to_resource(struct device_node
*dev
, int index
,
852 const char *name
= NULL
;
854 addrp
= of_get_address(dev
, index
, &size
, &flags
);
858 /* Get optional "reg-names" property to add a name to a resource */
859 of_property_read_string_index(dev
, "reg-names", index
, &name
);
861 return __of_address_to_resource(dev
, addrp
, size
, flags
, name
, r
);
863 EXPORT_SYMBOL_GPL(of_address_to_resource
);
866 * of_iomap - Maps the memory mapped IO for a given device_node
867 * @device: the device whose io range will be mapped
868 * @index: index of the io range
870 * Returns a pointer to the mapped memory
872 void __iomem
*of_iomap(struct device_node
*np
, int index
)
876 if (of_address_to_resource(np
, index
, &res
))
879 return ioremap(res
.start
, resource_size(&res
));
881 EXPORT_SYMBOL(of_iomap
);
884 * of_io_request_and_map - Requests a resource and maps the memory mapped IO
885 * for a given device_node
886 * @device: the device whose io range will be mapped
887 * @index: index of the io range
888 * @name: name "override" for the memory region request or NULL
890 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
891 * error code on failure. Usage example:
893 * base = of_io_request_and_map(node, 0, "foo");
895 * return PTR_ERR(base);
897 void __iomem
*of_io_request_and_map(struct device_node
*np
, int index
,
903 if (of_address_to_resource(np
, index
, &res
))
904 return IOMEM_ERR_PTR(-EINVAL
);
908 if (!request_mem_region(res
.start
, resource_size(&res
), name
))
909 return IOMEM_ERR_PTR(-EBUSY
);
911 mem
= ioremap(res
.start
, resource_size(&res
));
913 release_mem_region(res
.start
, resource_size(&res
));
914 return IOMEM_ERR_PTR(-ENOMEM
);
919 EXPORT_SYMBOL(of_io_request_and_map
);
922 * of_dma_get_range - Get DMA range info
923 * @np: device node to get DMA range info
924 * @dma_addr: pointer to store initial DMA address of DMA range
925 * @paddr: pointer to store initial CPU address of DMA range
926 * @size: pointer to store size of DMA range
928 * Look in bottom up direction for the first "dma-ranges" property
931 * DMA addr (dma_addr) : naddr cells
932 * CPU addr (phys_addr_t) : pna cells
935 * It returns -ENODEV if "dma-ranges" property was not found
936 * for this device in DT.
938 int of_dma_get_range(struct device_node
*np
, u64
*dma_addr
, u64
*paddr
, u64
*size
)
940 struct device_node
*node
= of_node_get(np
);
941 const __be32
*ranges
= NULL
;
944 bool found_dma_ranges
= false;
945 struct of_range_parser parser
;
946 struct of_range range
;
947 u64 dma_start
= U64_MAX
, dma_end
= 0, dma_offset
= 0;
950 ranges
= of_get_property(node
, "dma-ranges", &len
);
952 /* Ignore empty ranges, they imply no translation required */
953 if (ranges
&& len
> 0)
956 /* Once we find 'dma-ranges', then a missing one is an error */
957 if (found_dma_ranges
&& !ranges
) {
961 found_dma_ranges
= true;
963 node
= of_get_next_dma_parent(node
);
966 if (!node
|| !ranges
) {
967 pr_debug("no dma-ranges found for node(%pOF)\n", np
);
972 of_dma_range_parser_init(&parser
, node
);
974 for_each_of_range(&parser
, &range
) {
975 pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
976 range
.bus_addr
, range
.cpu_addr
, range
.size
);
978 if (dma_offset
&& range
.cpu_addr
- range
.bus_addr
!= dma_offset
) {
979 pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node
);
980 /* Don't error out as we'd break some existing DTs */
983 dma_offset
= range
.cpu_addr
- range
.bus_addr
;
985 /* Take lower and upper limits */
986 if (range
.bus_addr
< dma_start
)
987 dma_start
= range
.bus_addr
;
988 if (range
.bus_addr
+ range
.size
> dma_end
)
989 dma_end
= range
.bus_addr
+ range
.size
;
992 if (dma_start
>= dma_end
) {
994 pr_debug("Invalid DMA ranges configuration on node(%pOF)\n",
999 *dma_addr
= dma_start
;
1000 *size
= dma_end
- dma_start
;
1001 *paddr
= dma_start
+ dma_offset
;
1003 pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
1004 *dma_addr
, *paddr
, *size
);
1013 * of_dma_is_coherent - Check if device is coherent
1016 * It returns true if "dma-coherent" property was found
1017 * for this device in the DT, or if DMA is coherent by
1018 * default for OF devices on the current platform.
1020 bool of_dma_is_coherent(struct device_node
*np
)
1022 struct device_node
*node
= of_node_get(np
);
1024 if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT
))
1028 if (of_property_read_bool(node
, "dma-coherent")) {
1032 node
= of_get_next_dma_parent(node
);
1037 EXPORT_SYMBOL_GPL(of_dma_is_coherent
);