Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / of / of_reserved_mem.c
blob45517b9e57b1add36bdf2109227ebbf7df631a66
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Device tree based initialization code for reserved memory.
5 * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6 * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7 * http://www.samsung.com
8 * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9 * Author: Josh Cartwright <joshc@codeaurora.org>
12 #define pr_fmt(fmt) "OF: reserved mem: " fmt
14 #include <linux/err.h>
15 #include <linux/libfdt.h>
16 #include <linux/of.h>
17 #include <linux/of_fdt.h>
18 #include <linux/of_platform.h>
19 #include <linux/mm.h>
20 #include <linux/sizes.h>
21 #include <linux/of_reserved_mem.h>
22 #include <linux/sort.h>
23 #include <linux/slab.h>
24 #include <linux/memblock.h>
25 #include <linux/kmemleak.h>
26 #include <linux/cma.h>
28 #include "of_private.h"
30 static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
31 static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
32 static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
33 static int reserved_mem_count;
35 static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
36 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
37 phys_addr_t *res_base)
39 phys_addr_t base;
40 int err = 0;
42 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
43 align = !align ? SMP_CACHE_BYTES : align;
44 base = memblock_phys_alloc_range(size, align, start, end);
45 if (!base)
46 return -ENOMEM;
48 *res_base = base;
49 if (nomap) {
50 err = memblock_mark_nomap(base, size);
51 if (err)
52 memblock_phys_free(base, size);
55 kmemleak_ignore_phys(base);
57 return err;
61 * alloc_reserved_mem_array() - allocate memory for the reserved_mem
62 * array using memblock
64 * This function is used to allocate memory for the reserved_mem
65 * array according to the total number of reserved memory regions
66 * defined in the DT.
67 * After the new array is allocated, the information stored in
68 * the initial static array is copied over to this new array and
69 * the new array is used from this point on.
71 static void __init alloc_reserved_mem_array(void)
73 struct reserved_mem *new_array;
74 size_t alloc_size, copy_size, memset_size;
76 alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
77 if (alloc_size == SIZE_MAX) {
78 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
79 return;
82 new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
83 if (!new_array) {
84 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
85 return;
88 copy_size = array_size(reserved_mem_count, sizeof(*new_array));
89 if (copy_size == SIZE_MAX) {
90 memblock_free(new_array, alloc_size);
91 total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
92 pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
93 return;
96 memset_size = alloc_size - copy_size;
98 memcpy(new_array, reserved_mem, copy_size);
99 memset(new_array + reserved_mem_count, 0, memset_size);
101 reserved_mem = new_array;
104 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
106 * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
108 static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
109 phys_addr_t base, phys_addr_t size)
111 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
113 if (reserved_mem_count == total_reserved_mem_cnt) {
114 pr_err("not enough space for all defined regions.\n");
115 return;
118 rmem->fdt_node = node;
119 rmem->name = uname;
120 rmem->base = base;
121 rmem->size = size;
123 /* Call the region specific initialization function */
124 fdt_init_reserved_mem_node(rmem);
126 reserved_mem_count++;
127 return;
130 static int __init early_init_dt_reserve_memory(phys_addr_t base,
131 phys_addr_t size, bool nomap)
133 if (nomap) {
135 * If the memory is already reserved (by another region), we
136 * should not allow it to be marked nomap, but don't worry
137 * if the region isn't memory as it won't be mapped.
139 if (memblock_overlaps_region(&memblock.memory, base, size) &&
140 memblock_is_region_reserved(base, size))
141 return -EBUSY;
143 return memblock_mark_nomap(base, size);
145 return memblock_reserve(base, size);
149 * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
151 static int __init __reserved_mem_reserve_reg(unsigned long node,
152 const char *uname)
154 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
155 phys_addr_t base, size;
156 int len;
157 const __be32 *prop;
158 bool nomap;
160 prop = of_get_flat_dt_prop(node, "reg", &len);
161 if (!prop)
162 return -ENOENT;
164 if (len && len % t_len != 0) {
165 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
166 uname);
167 return -EINVAL;
170 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
172 while (len >= t_len) {
173 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
174 size = dt_mem_next_cell(dt_root_size_cells, &prop);
176 if (size &&
177 early_init_dt_reserve_memory(base, size, nomap) == 0)
178 pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
179 uname, &base, (unsigned long)(size / SZ_1M));
180 else
181 pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
182 uname, &base, (unsigned long)(size / SZ_1M));
184 len -= t_len;
186 return 0;
190 * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
191 * in /reserved-memory matches the values supported by the current implementation,
192 * also check if ranges property has been provided
194 static int __init __reserved_mem_check_root(unsigned long node)
196 const __be32 *prop;
198 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
199 if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
200 return -EINVAL;
202 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
203 if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
204 return -EINVAL;
206 prop = of_get_flat_dt_prop(node, "ranges", NULL);
207 if (!prop)
208 return -EINVAL;
209 return 0;
212 static void __init __rmem_check_for_overlap(void);
215 * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
216 * reserved memory regions.
218 * This function is used to scan through the DT and store the
219 * information for the reserved memory regions that are defined using
220 * the "reg" property. The region node number, name, base address, and
221 * size are all stored in the reserved_mem array by calling the
222 * fdt_reserved_mem_save_node() function.
224 void __init fdt_scan_reserved_mem_reg_nodes(void)
226 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
227 const void *fdt = initial_boot_params;
228 phys_addr_t base, size;
229 const __be32 *prop;
230 int node, child;
231 int len;
233 if (!fdt)
234 return;
236 node = fdt_path_offset(fdt, "/reserved-memory");
237 if (node < 0) {
238 pr_info("Reserved memory: No reserved-memory node in the DT\n");
239 return;
242 /* Attempt dynamic allocation of a new reserved_mem array */
243 alloc_reserved_mem_array();
245 if (__reserved_mem_check_root(node)) {
246 pr_err("Reserved memory: unsupported node format, ignoring\n");
247 return;
250 fdt_for_each_subnode(child, fdt, node) {
251 const char *uname;
253 prop = of_get_flat_dt_prop(child, "reg", &len);
254 if (!prop)
255 continue;
256 if (!of_fdt_device_is_available(fdt, child))
257 continue;
259 uname = fdt_get_name(fdt, child, NULL);
260 if (len && len % t_len != 0) {
261 pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
262 uname);
263 continue;
265 base = dt_mem_next_cell(dt_root_addr_cells, &prop);
266 size = dt_mem_next_cell(dt_root_size_cells, &prop);
268 if (size)
269 fdt_reserved_mem_save_node(child, uname, base, size);
272 /* check for overlapping reserved regions */
273 __rmem_check_for_overlap();
276 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
279 * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
281 int __init fdt_scan_reserved_mem(void)
283 int node, child;
284 int dynamic_nodes_cnt = 0, count = 0;
285 int dynamic_nodes[MAX_RESERVED_REGIONS];
286 const void *fdt = initial_boot_params;
288 node = fdt_path_offset(fdt, "/reserved-memory");
289 if (node < 0)
290 return -ENODEV;
292 if (__reserved_mem_check_root(node) != 0) {
293 pr_err("Reserved memory: unsupported node format, ignoring\n");
294 return -EINVAL;
297 fdt_for_each_subnode(child, fdt, node) {
298 const char *uname;
299 int err;
301 if (!of_fdt_device_is_available(fdt, child))
302 continue;
304 uname = fdt_get_name(fdt, child, NULL);
306 err = __reserved_mem_reserve_reg(child, uname);
307 if (!err)
308 count++;
310 * Save the nodes for the dynamically-placed regions
311 * into an array which will be used for allocation right
312 * after all the statically-placed regions are reserved
313 * or marked as no-map. This is done to avoid dynamically
314 * allocating from one of the statically-placed regions.
316 if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
317 dynamic_nodes[dynamic_nodes_cnt] = child;
318 dynamic_nodes_cnt++;
321 for (int i = 0; i < dynamic_nodes_cnt; i++) {
322 const char *uname;
323 int err;
325 child = dynamic_nodes[i];
326 uname = fdt_get_name(fdt, child, NULL);
327 err = __reserved_mem_alloc_size(child, uname);
328 if (!err)
329 count++;
331 total_reserved_mem_cnt = count;
332 return 0;
336 * __reserved_mem_alloc_in_range() - allocate reserved memory described with
337 * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing
338 * reserved regions to keep the reserved memory contiguous if possible.
340 static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
341 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
342 phys_addr_t *res_base)
344 bool prev_bottom_up = memblock_bottom_up();
345 bool bottom_up = false, top_down = false;
346 int ret, i;
348 for (i = 0; i < reserved_mem_count; i++) {
349 struct reserved_mem *rmem = &reserved_mem[i];
351 /* Skip regions that were not reserved yet */
352 if (rmem->size == 0)
353 continue;
356 * If range starts next to an existing reservation, use bottom-up:
357 * |....RRRR................RRRRRRRR..............|
358 * --RRRR------
360 if (start >= rmem->base && start <= (rmem->base + rmem->size))
361 bottom_up = true;
364 * If range ends next to an existing reservation, use top-down:
365 * |....RRRR................RRRRRRRR..............|
366 * -------RRRR-----
368 if (end >= rmem->base && end <= (rmem->base + rmem->size))
369 top_down = true;
372 /* Change setting only if either bottom-up or top-down was selected */
373 if (bottom_up != top_down)
374 memblock_set_bottom_up(bottom_up);
376 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
377 start, end, nomap, res_base);
379 /* Restore old setting if needed */
380 if (bottom_up != top_down)
381 memblock_set_bottom_up(prev_bottom_up);
383 return ret;
387 * __reserved_mem_alloc_size() - allocate reserved memory described by
388 * 'size', 'alignment' and 'alloc-ranges' properties.
390 static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
392 int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
393 phys_addr_t start = 0, end = 0;
394 phys_addr_t base = 0, align = 0, size;
395 int len;
396 const __be32 *prop;
397 bool nomap;
398 int ret;
400 prop = of_get_flat_dt_prop(node, "size", &len);
401 if (!prop)
402 return -EINVAL;
404 if (len != dt_root_size_cells * sizeof(__be32)) {
405 pr_err("invalid size property in '%s' node.\n", uname);
406 return -EINVAL;
408 size = dt_mem_next_cell(dt_root_size_cells, &prop);
410 prop = of_get_flat_dt_prop(node, "alignment", &len);
411 if (prop) {
412 if (len != dt_root_addr_cells * sizeof(__be32)) {
413 pr_err("invalid alignment property in '%s' node.\n",
414 uname);
415 return -EINVAL;
417 align = dt_mem_next_cell(dt_root_addr_cells, &prop);
420 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
422 /* Need adjust the alignment to satisfy the CMA requirement */
423 if (IS_ENABLED(CONFIG_CMA)
424 && of_flat_dt_is_compatible(node, "shared-dma-pool")
425 && of_get_flat_dt_prop(node, "reusable", NULL)
426 && !nomap)
427 align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES);
429 prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
430 if (prop) {
432 if (len % t_len != 0) {
433 pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
434 uname);
435 return -EINVAL;
438 base = 0;
440 while (len > 0) {
441 start = dt_mem_next_cell(dt_root_addr_cells, &prop);
442 end = start + dt_mem_next_cell(dt_root_size_cells,
443 &prop);
445 ret = __reserved_mem_alloc_in_range(size, align,
446 start, end, nomap, &base);
447 if (ret == 0) {
448 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
449 uname, &base,
450 (unsigned long)(size / SZ_1M));
451 break;
453 len -= t_len;
456 } else {
457 ret = early_init_dt_alloc_reserved_memory_arch(size, align,
458 0, 0, nomap, &base);
459 if (ret == 0)
460 pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
461 uname, &base, (unsigned long)(size / SZ_1M));
464 if (base == 0) {
465 pr_err("failed to allocate memory for node '%s': size %lu MiB\n",
466 uname, (unsigned long)(size / SZ_1M));
467 return -ENOMEM;
470 /* Save region in the reserved_mem array */
471 fdt_reserved_mem_save_node(node, uname, base, size);
472 return 0;
475 static const struct of_device_id __rmem_of_table_sentinel
476 __used __section("__reservedmem_of_table_end");
479 * __reserved_mem_init_node() - call region specific reserved memory init code
481 static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
483 extern const struct of_device_id __reservedmem_of_table[];
484 const struct of_device_id *i;
485 int ret = -ENOENT;
487 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
488 reservedmem_of_init_fn initfn = i->data;
489 const char *compat = i->compatible;
491 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
492 continue;
494 ret = initfn(rmem);
495 if (ret == 0) {
496 pr_info("initialized node %s, compatible id %s\n",
497 rmem->name, compat);
498 break;
501 return ret;
504 static int __init __rmem_cmp(const void *a, const void *b)
506 const struct reserved_mem *ra = a, *rb = b;
508 if (ra->base < rb->base)
509 return -1;
511 if (ra->base > rb->base)
512 return 1;
515 * Put the dynamic allocations (address == 0, size == 0) before static
516 * allocations at address 0x0 so that overlap detection works
517 * correctly.
519 if (ra->size < rb->size)
520 return -1;
521 if (ra->size > rb->size)
522 return 1;
524 if (ra->fdt_node < rb->fdt_node)
525 return -1;
526 if (ra->fdt_node > rb->fdt_node)
527 return 1;
529 return 0;
532 static void __init __rmem_check_for_overlap(void)
534 int i;
536 if (reserved_mem_count < 2)
537 return;
539 sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
540 __rmem_cmp, NULL);
541 for (i = 0; i < reserved_mem_count - 1; i++) {
542 struct reserved_mem *this, *next;
544 this = &reserved_mem[i];
545 next = &reserved_mem[i + 1];
547 if (this->base + this->size > next->base) {
548 phys_addr_t this_end, next_end;
550 this_end = this->base + this->size;
551 next_end = next->base + next->size;
552 pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
553 this->name, &this->base, &this_end,
554 next->name, &next->base, &next_end);
560 * fdt_init_reserved_mem_node() - Initialize a reserved memory region
561 * @rmem: reserved_mem struct of the memory region to be initialized.
563 * This function is used to call the region specific initialization
564 * function for a reserved memory region.
566 static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
568 unsigned long node = rmem->fdt_node;
569 int err = 0;
570 bool nomap;
572 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
574 err = __reserved_mem_init_node(rmem);
575 if (err != 0 && err != -ENOENT) {
576 pr_info("node %s compatible matching fail\n", rmem->name);
577 if (nomap)
578 memblock_clear_nomap(rmem->base, rmem->size);
579 else
580 memblock_phys_free(rmem->base, rmem->size);
581 } else {
582 phys_addr_t end = rmem->base + rmem->size - 1;
583 bool reusable =
584 (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
586 pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
587 &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
588 nomap ? "nomap" : "map",
589 reusable ? "reusable" : "non-reusable",
590 rmem->name ? rmem->name : "unknown");
594 struct rmem_assigned_device {
595 struct device *dev;
596 struct reserved_mem *rmem;
597 struct list_head list;
600 static LIST_HEAD(of_rmem_assigned_device_list);
601 static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
604 * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
605 * given device
606 * @dev: Pointer to the device to configure
607 * @np: Pointer to the device_node with 'reserved-memory' property
608 * @idx: Index of selected region
610 * This function assigns respective DMA-mapping operations based on reserved
611 * memory region specified by 'memory-region' property in @np node to the @dev
612 * device. When driver needs to use more than one reserved memory region, it
613 * should allocate child devices and initialize regions by name for each of
614 * child device.
616 * Returns error code or zero on success.
618 int of_reserved_mem_device_init_by_idx(struct device *dev,
619 struct device_node *np, int idx)
621 struct rmem_assigned_device *rd;
622 struct device_node *target;
623 struct reserved_mem *rmem;
624 int ret;
626 if (!np || !dev)
627 return -EINVAL;
629 target = of_parse_phandle(np, "memory-region", idx);
630 if (!target)
631 return -ENODEV;
633 if (!of_device_is_available(target)) {
634 of_node_put(target);
635 return 0;
638 rmem = of_reserved_mem_lookup(target);
639 of_node_put(target);
641 if (!rmem || !rmem->ops || !rmem->ops->device_init)
642 return -EINVAL;
644 rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
645 if (!rd)
646 return -ENOMEM;
648 ret = rmem->ops->device_init(rmem, dev);
649 if (ret == 0) {
650 rd->dev = dev;
651 rd->rmem = rmem;
653 mutex_lock(&of_rmem_assigned_device_mutex);
654 list_add(&rd->list, &of_rmem_assigned_device_list);
655 mutex_unlock(&of_rmem_assigned_device_mutex);
657 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
658 } else {
659 kfree(rd);
662 return ret;
664 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
667 * of_reserved_mem_device_init_by_name() - assign named reserved memory region
668 * to given device
669 * @dev: pointer to the device to configure
670 * @np: pointer to the device node with 'memory-region' property
671 * @name: name of the selected memory region
673 * Returns: 0 on success or a negative error-code on failure.
675 int of_reserved_mem_device_init_by_name(struct device *dev,
676 struct device_node *np,
677 const char *name)
679 int idx = of_property_match_string(np, "memory-region-names", name);
681 return of_reserved_mem_device_init_by_idx(dev, np, idx);
683 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
686 * of_reserved_mem_device_release() - release reserved memory device structures
687 * @dev: Pointer to the device to deconfigure
689 * This function releases structures allocated for memory region handling for
690 * the given device.
692 void of_reserved_mem_device_release(struct device *dev)
694 struct rmem_assigned_device *rd, *tmp;
695 LIST_HEAD(release_list);
697 mutex_lock(&of_rmem_assigned_device_mutex);
698 list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
699 if (rd->dev == dev)
700 list_move_tail(&rd->list, &release_list);
702 mutex_unlock(&of_rmem_assigned_device_mutex);
704 list_for_each_entry_safe(rd, tmp, &release_list, list) {
705 if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
706 rd->rmem->ops->device_release(rd->rmem, dev);
708 kfree(rd);
711 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
714 * of_reserved_mem_lookup() - acquire reserved_mem from a device node
715 * @np: node pointer of the desired reserved-memory region
717 * This function allows drivers to acquire a reference to the reserved_mem
718 * struct based on a device node handle.
720 * Returns a reserved_mem reference, or NULL on error.
722 struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
724 const char *name;
725 int i;
727 if (!np->full_name)
728 return NULL;
730 name = kbasename(np->full_name);
731 for (i = 0; i < reserved_mem_count; i++)
732 if (!strcmp(reserved_mem[i].name, name))
733 return &reserved_mem[i];
735 return NULL;
737 EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);