1 // SPDX-License-Identifier: GPL-2.0+
3 * Device tree based initialization code for reserved memory.
5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7 * http://www.samsung.com
8 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9 * Author: Josh Cartwright <joshc@codeaurora.org>
12 #define pr_fmt(fmt) "OF: reserved mem: " fmt
14 #include <linux/err.h>
15 #include <linux/libfdt.h>
17 #include <linux/of_fdt.h>
18 #include <linux/of_platform.h>
20 #include <linux/sizes.h>
21 #include <linux/of_reserved_mem.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/memblock.h>
25 #include <linux/kmemleak.h>
26 #include <linux/cma.h>
28 #include "of_private.h"
30 static struct reserved_mem reserved_mem_array
[MAX_RESERVED_REGIONS
] __initdata
;
31 static struct reserved_mem
*reserved_mem __refdata
= reserved_mem_array
;
32 static int total_reserved_mem_cnt
= MAX_RESERVED_REGIONS
;
33 static int reserved_mem_count
;
35 static int __init
early_init_dt_alloc_reserved_memory_arch(phys_addr_t size
,
36 phys_addr_t align
, phys_addr_t start
, phys_addr_t end
, bool nomap
,
37 phys_addr_t
*res_base
)
42 end
= !end
? MEMBLOCK_ALLOC_ANYWHERE
: end
;
43 align
= !align
? SMP_CACHE_BYTES
: align
;
44 base
= memblock_phys_alloc_range(size
, align
, start
, end
);
50 err
= memblock_mark_nomap(base
, size
);
52 memblock_phys_free(base
, size
);
55 kmemleak_ignore_phys(base
);
61 * alloc_reserved_mem_array() - allocate memory for the reserved_mem
62 * array using memblock
64 * This function is used to allocate memory for the reserved_mem
65 * array according to the total number of reserved memory regions
67 * After the new array is allocated, the information stored in
68 * the initial static array is copied over to this new array and
69 * the new array is used from this point on.
71 static void __init
alloc_reserved_mem_array(void)
73 struct reserved_mem
*new_array
;
74 size_t alloc_size
, copy_size
, memset_size
;
76 alloc_size
= array_size(total_reserved_mem_cnt
, sizeof(*new_array
));
77 if (alloc_size
== SIZE_MAX
) {
78 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW
);
82 new_array
= memblock_alloc(alloc_size
, SMP_CACHE_BYTES
);
84 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM
);
88 copy_size
= array_size(reserved_mem_count
, sizeof(*new_array
));
89 if (copy_size
== SIZE_MAX
) {
90 memblock_free(new_array
, alloc_size
);
91 total_reserved_mem_cnt
= MAX_RESERVED_REGIONS
;
92 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW
);
96 memset_size
= alloc_size
- copy_size
;
98 memcpy(new_array
, reserved_mem
, copy_size
);
99 memset(new_array
+ reserved_mem_count
, 0, memset_size
);
101 reserved_mem
= new_array
;
104 static void __init
fdt_init_reserved_mem_node(struct reserved_mem
*rmem
);
106 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
108 static void __init
fdt_reserved_mem_save_node(unsigned long node
, const char *uname
,
109 phys_addr_t base
, phys_addr_t size
)
111 struct reserved_mem
*rmem
= &reserved_mem
[reserved_mem_count
];
113 if (reserved_mem_count
== total_reserved_mem_cnt
) {
114 pr_err("not enough space for all defined regions.\n");
118 rmem
->fdt_node
= node
;
123 /* Call the region specific initialization function */
124 fdt_init_reserved_mem_node(rmem
);
126 reserved_mem_count
++;
130 static int __init
early_init_dt_reserve_memory(phys_addr_t base
,
131 phys_addr_t size
, bool nomap
)
135 * If the memory is already reserved (by another region), we
136 * should not allow it to be marked nomap, but don't worry
137 * if the region isn't memory as it won't be mapped.
139 if (memblock_overlaps_region(&memblock
.memory
, base
, size
) &&
140 memblock_is_region_reserved(base
, size
))
143 return memblock_mark_nomap(base
, size
);
145 return memblock_reserve(base
, size
);
149 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
151 static int __init
__reserved_mem_reserve_reg(unsigned long node
,
154 int t_len
= (dt_root_addr_cells
+ dt_root_size_cells
) * sizeof(__be32
);
155 phys_addr_t base
, size
;
160 prop
= of_get_flat_dt_prop(node
, "reg", &len
);
164 if (len
&& len
% t_len
!= 0) {
165 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
170 nomap
= of_get_flat_dt_prop(node
, "no-map", NULL
) != NULL
;
172 while (len
>= t_len
) {
173 base
= dt_mem_next_cell(dt_root_addr_cells
, &prop
);
174 size
= dt_mem_next_cell(dt_root_size_cells
, &prop
);
177 early_init_dt_reserve_memory(base
, size
, nomap
) == 0)
178 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
179 uname
, &base
, (unsigned long)(size
/ SZ_1M
));
181 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
182 uname
, &base
, (unsigned long)(size
/ SZ_1M
));
190 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
191 * in /reserved-memory matches the values supported by the current implementation,
192 * also check if ranges property has been provided
194 static int __init
__reserved_mem_check_root(unsigned long node
)
198 prop
= of_get_flat_dt_prop(node
, "#size-cells", NULL
);
199 if (!prop
|| be32_to_cpup(prop
) != dt_root_size_cells
)
202 prop
= of_get_flat_dt_prop(node
, "#address-cells", NULL
);
203 if (!prop
|| be32_to_cpup(prop
) != dt_root_addr_cells
)
206 prop
= of_get_flat_dt_prop(node
, "ranges", NULL
);
212 static void __init
__rmem_check_for_overlap(void);
215 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
216 * reserved memory regions.
218 * This function is used to scan through the DT and store the
219 * information for the reserved memory regions that are defined using
220 * the "reg" property. The region node number, name, base address, and
221 * size are all stored in the reserved_mem array by calling the
222 * fdt_reserved_mem_save_node() function.
224 void __init
fdt_scan_reserved_mem_reg_nodes(void)
226 int t_len
= (dt_root_addr_cells
+ dt_root_size_cells
) * sizeof(__be32
);
227 const void *fdt
= initial_boot_params
;
228 phys_addr_t base
, size
;
236 node
= fdt_path_offset(fdt
, "/reserved-memory");
238 pr_info("Reserved memory: No reserved-memory node in the DT\n");
242 /* Attempt dynamic allocation of a new reserved_mem array */
243 alloc_reserved_mem_array();
245 if (__reserved_mem_check_root(node
)) {
246 pr_err("Reserved memory: unsupported node format, ignoring\n");
250 fdt_for_each_subnode(child
, fdt
, node
) {
253 prop
= of_get_flat_dt_prop(child
, "reg", &len
);
256 if (!of_fdt_device_is_available(fdt
, child
))
259 uname
= fdt_get_name(fdt
, child
, NULL
);
260 if (len
&& len
% t_len
!= 0) {
261 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
265 base
= dt_mem_next_cell(dt_root_addr_cells
, &prop
);
266 size
= dt_mem_next_cell(dt_root_size_cells
, &prop
);
269 fdt_reserved_mem_save_node(child
, uname
, base
, size
);
272 /* check for overlapping reserved regions */
273 __rmem_check_for_overlap();
276 static int __init
__reserved_mem_alloc_size(unsigned long node
, const char *uname
);
279 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
281 int __init
fdt_scan_reserved_mem(void)
284 int dynamic_nodes_cnt
= 0, count
= 0;
285 int dynamic_nodes
[MAX_RESERVED_REGIONS
];
286 const void *fdt
= initial_boot_params
;
288 node
= fdt_path_offset(fdt
, "/reserved-memory");
292 if (__reserved_mem_check_root(node
) != 0) {
293 pr_err("Reserved memory: unsupported node format, ignoring\n");
297 fdt_for_each_subnode(child
, fdt
, node
) {
301 if (!of_fdt_device_is_available(fdt
, child
))
304 uname
= fdt_get_name(fdt
, child
, NULL
);
306 err
= __reserved_mem_reserve_reg(child
, uname
);
310 * Save the nodes for the dynamically-placed regions
311 * into an array which will be used for allocation right
312 * after all the statically-placed regions are reserved
313 * or marked as no-map. This is done to avoid dynamically
314 * allocating from one of the statically-placed regions.
316 if (err
== -ENOENT
&& of_get_flat_dt_prop(child
, "size", NULL
)) {
317 dynamic_nodes
[dynamic_nodes_cnt
] = child
;
321 for (int i
= 0; i
< dynamic_nodes_cnt
; i
++) {
325 child
= dynamic_nodes
[i
];
326 uname
= fdt_get_name(fdt
, child
, NULL
);
327 err
= __reserved_mem_alloc_size(child
, uname
);
331 total_reserved_mem_cnt
= count
;
336 * __reserved_mem_alloc_in_range() - allocate reserved memory described with
337 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
338 * reserved regions to keep the reserved memory contiguous if possible.
340 static int __init
__reserved_mem_alloc_in_range(phys_addr_t size
,
341 phys_addr_t align
, phys_addr_t start
, phys_addr_t end
, bool nomap
,
342 phys_addr_t
*res_base
)
344 bool prev_bottom_up
= memblock_bottom_up();
345 bool bottom_up
= false, top_down
= false;
348 for (i
= 0; i
< reserved_mem_count
; i
++) {
349 struct reserved_mem
*rmem
= &reserved_mem
[i
];
351 /* Skip regions that were not reserved yet */
356 * If range starts next to an existing reservation, use bottom-up:
357 * |....RRRR................RRRRRRRR..............|
360 if (start
>= rmem
->base
&& start
<= (rmem
->base
+ rmem
->size
))
364 * If range ends next to an existing reservation, use top-down:
365 * |....RRRR................RRRRRRRR..............|
368 if (end
>= rmem
->base
&& end
<= (rmem
->base
+ rmem
->size
))
372 /* Change setting only if either bottom-up or top-down was selected */
373 if (bottom_up
!= top_down
)
374 memblock_set_bottom_up(bottom_up
);
376 ret
= early_init_dt_alloc_reserved_memory_arch(size
, align
,
377 start
, end
, nomap
, res_base
);
379 /* Restore old setting if needed */
380 if (bottom_up
!= top_down
)
381 memblock_set_bottom_up(prev_bottom_up
);
387 * __reserved_mem_alloc_size() - allocate reserved memory described by
388 * 'size', 'alignment' and 'alloc-ranges' properties.
390 static int __init
__reserved_mem_alloc_size(unsigned long node
, const char *uname
)
392 int t_len
= (dt_root_addr_cells
+ dt_root_size_cells
) * sizeof(__be32
);
393 phys_addr_t start
= 0, end
= 0;
394 phys_addr_t base
= 0, align
= 0, size
;
400 prop
= of_get_flat_dt_prop(node
, "size", &len
);
404 if (len
!= dt_root_size_cells
* sizeof(__be32
)) {
405 pr_err("invalid size property in '%s' node.\n", uname
);
408 size
= dt_mem_next_cell(dt_root_size_cells
, &prop
);
410 prop
= of_get_flat_dt_prop(node
, "alignment", &len
);
412 if (len
!= dt_root_addr_cells
* sizeof(__be32
)) {
413 pr_err("invalid alignment property in '%s' node.\n",
417 align
= dt_mem_next_cell(dt_root_addr_cells
, &prop
);
420 nomap
= of_get_flat_dt_prop(node
, "no-map", NULL
) != NULL
;
422 /* Need adjust the alignment to satisfy the CMA requirement */
423 if (IS_ENABLED(CONFIG_CMA
)
424 && of_flat_dt_is_compatible(node
, "shared-dma-pool")
425 && of_get_flat_dt_prop(node
, "reusable", NULL
)
427 align
= max_t(phys_addr_t
, align
, CMA_MIN_ALIGNMENT_BYTES
);
429 prop
= of_get_flat_dt_prop(node
, "alloc-ranges", &len
);
432 if (len
% t_len
!= 0) {
433 pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
441 start
= dt_mem_next_cell(dt_root_addr_cells
, &prop
);
442 end
= start
+ dt_mem_next_cell(dt_root_size_cells
,
445 ret
= __reserved_mem_alloc_in_range(size
, align
,
446 start
, end
, nomap
, &base
);
448 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
450 (unsigned long)(size
/ SZ_1M
));
457 ret
= early_init_dt_alloc_reserved_memory_arch(size
, align
,
460 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
461 uname
, &base
, (unsigned long)(size
/ SZ_1M
));
465 pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
466 uname
, (unsigned long)(size
/ SZ_1M
));
470 /* Save region in the reserved_mem array */
471 fdt_reserved_mem_save_node(node
, uname
, base
, size
);
475 static const struct of_device_id __rmem_of_table_sentinel
476 __used
__section("__reservedmem_of_table_end");
479 * __reserved_mem_init_node() - call region specific reserved memory init code
481 static int __init
__reserved_mem_init_node(struct reserved_mem
*rmem
)
483 extern const struct of_device_id __reservedmem_of_table
[];
484 const struct of_device_id
*i
;
487 for (i
= __reservedmem_of_table
; i
< &__rmem_of_table_sentinel
; i
++) {
488 reservedmem_of_init_fn initfn
= i
->data
;
489 const char *compat
= i
->compatible
;
491 if (!of_flat_dt_is_compatible(rmem
->fdt_node
, compat
))
496 pr_info("initialized node %s, compatible id %s\n",
504 static int __init
__rmem_cmp(const void *a
, const void *b
)
506 const struct reserved_mem
*ra
= a
, *rb
= b
;
508 if (ra
->base
< rb
->base
)
511 if (ra
->base
> rb
->base
)
515 * Put the dynamic allocations (address == 0, size == 0) before static
516 * allocations at address 0x0 so that overlap detection works
519 if (ra
->size
< rb
->size
)
521 if (ra
->size
> rb
->size
)
524 if (ra
->fdt_node
< rb
->fdt_node
)
526 if (ra
->fdt_node
> rb
->fdt_node
)
532 static void __init
__rmem_check_for_overlap(void)
536 if (reserved_mem_count
< 2)
539 sort(reserved_mem
, reserved_mem_count
, sizeof(reserved_mem
[0]),
541 for (i
= 0; i
< reserved_mem_count
- 1; i
++) {
542 struct reserved_mem
*this, *next
;
544 this = &reserved_mem
[i
];
545 next
= &reserved_mem
[i
+ 1];
547 if (this->base
+ this->size
> next
->base
) {
548 phys_addr_t this_end
, next_end
;
550 this_end
= this->base
+ this->size
;
551 next_end
= next
->base
+ next
->size
;
552 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
553 this->name
, &this->base
, &this_end
,
554 next
->name
, &next
->base
, &next_end
);
560 * fdt_init_reserved_mem_node() - Initialize a reserved memory region
561 * @rmem: reserved_mem struct of the memory region to be initialized.
563 * This function is used to call the region specific initialization
564 * function for a reserved memory region.
566 static void __init
fdt_init_reserved_mem_node(struct reserved_mem
*rmem
)
568 unsigned long node
= rmem
->fdt_node
;
572 nomap
= of_get_flat_dt_prop(node
, "no-map", NULL
) != NULL
;
574 err
= __reserved_mem_init_node(rmem
);
575 if (err
!= 0 && err
!= -ENOENT
) {
576 pr_info("node %s compatible matching fail\n", rmem
->name
);
578 memblock_clear_nomap(rmem
->base
, rmem
->size
);
580 memblock_phys_free(rmem
->base
, rmem
->size
);
582 phys_addr_t end
= rmem
->base
+ rmem
->size
- 1;
584 (of_get_flat_dt_prop(node
, "reusable", NULL
)) != NULL
;
586 pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
587 &rmem
->base
, &end
, (unsigned long)(rmem
->size
/ SZ_1K
),
588 nomap
? "nomap" : "map",
589 reusable
? "reusable" : "non-reusable",
590 rmem
->name
? rmem
->name
: "unknown");
594 struct rmem_assigned_device
{
596 struct reserved_mem
*rmem
;
597 struct list_head list
;
600 static LIST_HEAD(of_rmem_assigned_device_list
);
601 static DEFINE_MUTEX(of_rmem_assigned_device_mutex
);
604 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
606 * @dev: Pointer to the device to configure
607 * @np: Pointer to the device_node with 'reserved-memory' property
608 * @idx: Index of selected region
610 * This function assigns respective DMA-mapping operations based on reserved
611 * memory region specified by 'memory-region' property in @np node to the @dev
612 * device. When driver needs to use more than one reserved memory region, it
613 * should allocate child devices and initialize regions by name for each of
616 * Returns error code or zero on success.
618 int of_reserved_mem_device_init_by_idx(struct device
*dev
,
619 struct device_node
*np
, int idx
)
621 struct rmem_assigned_device
*rd
;
622 struct device_node
*target
;
623 struct reserved_mem
*rmem
;
629 target
= of_parse_phandle(np
, "memory-region", idx
);
633 if (!of_device_is_available(target
)) {
638 rmem
= of_reserved_mem_lookup(target
);
641 if (!rmem
|| !rmem
->ops
|| !rmem
->ops
->device_init
)
644 rd
= kmalloc(sizeof(struct rmem_assigned_device
), GFP_KERNEL
);
648 ret
= rmem
->ops
->device_init(rmem
, dev
);
653 mutex_lock(&of_rmem_assigned_device_mutex
);
654 list_add(&rd
->list
, &of_rmem_assigned_device_list
);
655 mutex_unlock(&of_rmem_assigned_device_mutex
);
657 dev_info(dev
, "assigned reserved memory node %s\n", rmem
->name
);
664 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx
);
667 * of_reserved_mem_device_init_by_name() - assign named reserved memory region
669 * @dev: pointer to the device to configure
670 * @np: pointer to the device node with 'memory-region' property
671 * @name: name of the selected memory region
673 * Returns: 0 on success or a negative error-code on failure.
675 int of_reserved_mem_device_init_by_name(struct device
*dev
,
676 struct device_node
*np
,
679 int idx
= of_property_match_string(np
, "memory-region-names", name
);
681 return of_reserved_mem_device_init_by_idx(dev
, np
, idx
);
683 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name
);
686 * of_reserved_mem_device_release() - release reserved memory device structures
687 * @dev: Pointer to the device to deconfigure
689 * This function releases structures allocated for memory region handling for
692 void of_reserved_mem_device_release(struct device
*dev
)
694 struct rmem_assigned_device
*rd
, *tmp
;
695 LIST_HEAD(release_list
);
697 mutex_lock(&of_rmem_assigned_device_mutex
);
698 list_for_each_entry_safe(rd
, tmp
, &of_rmem_assigned_device_list
, list
) {
700 list_move_tail(&rd
->list
, &release_list
);
702 mutex_unlock(&of_rmem_assigned_device_mutex
);
704 list_for_each_entry_safe(rd
, tmp
, &release_list
, list
) {
705 if (rd
->rmem
&& rd
->rmem
->ops
&& rd
->rmem
->ops
->device_release
)
706 rd
->rmem
->ops
->device_release(rd
->rmem
, dev
);
711 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release
);
714 * of_reserved_mem_lookup() - acquire reserved_mem from a device node
715 * @np: node pointer of the desired reserved-memory region
717 * This function allows drivers to acquire a reference to the reserved_mem
718 * struct based on a device node handle.
720 * Returns a reserved_mem reference, or NULL on error.
722 struct reserved_mem
*of_reserved_mem_lookup(struct device_node
*np
)
730 name
= kbasename(np
->full_name
);
731 for (i
= 0; i
< reserved_mem_count
; i
++)
732 if (!strcmp(reserved_mem
[i
].name
, name
))
733 return &reserved_mem
[i
];
737 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup
);