1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Contiguous Memory Allocator
5 * Copyright (c) 2010-2011 by Samsung Electronics.
6 * Copyright IBM Corporation, 2013
7 * Copyright LG Electronics Inc., 2014
9 * Marek Szyprowski <m.szyprowski@samsung.com>
10 * Michal Nazarewicz <mina86@mina86.com>
11 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
15 #define pr_fmt(fmt) "cma: " fmt
17 #define CREATE_TRACE_POINTS
19 #include <linux/memblock.h>
20 #include <linux/err.h>
22 #include <linux/sizes.h>
23 #include <linux/slab.h>
24 #include <linux/log2.h>
25 #include <linux/cma.h>
26 #include <linux/highmem.h>
28 #include <linux/kmemleak.h>
29 #include <trace/events/cma.h>
34 struct cma cma_areas
[MAX_CMA_AREAS
];
35 unsigned int cma_area_count
;
36 static DEFINE_MUTEX(cma_mutex
);
38 phys_addr_t
cma_get_base(const struct cma
*cma
)
40 return PFN_PHYS(cma
->base_pfn
);
43 unsigned long cma_get_size(const struct cma
*cma
)
45 return cma
->count
<< PAGE_SHIFT
;
48 const char *cma_get_name(const struct cma
*cma
)
53 static unsigned long cma_bitmap_aligned_mask(const struct cma
*cma
,
54 unsigned int align_order
)
56 if (align_order
<= cma
->order_per_bit
)
58 return (1UL << (align_order
- cma
->order_per_bit
)) - 1;
62 * Find the offset of the base PFN from the specified align_order.
63 * The value returned is represented in order_per_bits.
65 static unsigned long cma_bitmap_aligned_offset(const struct cma
*cma
,
66 unsigned int align_order
)
68 return (cma
->base_pfn
& ((1UL << align_order
) - 1))
69 >> cma
->order_per_bit
;
72 static unsigned long cma_bitmap_pages_to_bits(const struct cma
*cma
,
75 return ALIGN(pages
, 1UL << cma
->order_per_bit
) >> cma
->order_per_bit
;
78 static void cma_clear_bitmap(struct cma
*cma
, unsigned long pfn
,
81 unsigned long bitmap_no
, bitmap_count
;
84 bitmap_no
= (pfn
- cma
->base_pfn
) >> cma
->order_per_bit
;
85 bitmap_count
= cma_bitmap_pages_to_bits(cma
, count
);
87 spin_lock_irqsave(&cma
->lock
, flags
);
88 bitmap_clear(cma
->bitmap
, bitmap_no
, bitmap_count
);
89 spin_unlock_irqrestore(&cma
->lock
, flags
);
92 static void __init
cma_activate_area(struct cma
*cma
)
94 unsigned long base_pfn
= cma
->base_pfn
, pfn
;
97 cma
->bitmap
= bitmap_zalloc(cma_bitmap_maxno(cma
), GFP_KERNEL
);
102 * alloc_contig_range() requires the pfn range specified to be in the
103 * same zone. Simplify by forcing the entire CMA resv range to be in the
106 WARN_ON_ONCE(!pfn_valid(base_pfn
));
107 zone
= page_zone(pfn_to_page(base_pfn
));
108 for (pfn
= base_pfn
+ 1; pfn
< base_pfn
+ cma
->count
; pfn
++) {
109 WARN_ON_ONCE(!pfn_valid(pfn
));
110 if (page_zone(pfn_to_page(pfn
)) != zone
)
114 for (pfn
= base_pfn
; pfn
< base_pfn
+ cma
->count
;
115 pfn
+= pageblock_nr_pages
)
116 init_cma_reserved_pageblock(pfn_to_page(pfn
));
118 spin_lock_init(&cma
->lock
);
120 #ifdef CONFIG_CMA_DEBUGFS
121 INIT_HLIST_HEAD(&cma
->mem_head
);
122 spin_lock_init(&cma
->mem_head_lock
);
128 bitmap_free(cma
->bitmap
);
130 /* Expose all pages to the buddy, they are useless for CMA. */
131 if (!cma
->reserve_pages_on_error
) {
132 for (pfn
= base_pfn
; pfn
< base_pfn
+ cma
->count
; pfn
++)
133 free_reserved_page(pfn_to_page(pfn
));
135 totalcma_pages
-= cma
->count
;
137 pr_err("CMA area %s could not be activated\n", cma
->name
);
140 static int __init
cma_init_reserved_areas(void)
144 for (i
= 0; i
< cma_area_count
; i
++)
145 cma_activate_area(&cma_areas
[i
]);
149 core_initcall(cma_init_reserved_areas
);
151 void __init
cma_reserve_pages_on_error(struct cma
*cma
)
153 cma
->reserve_pages_on_error
= true;
157 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
158 * @base: Base address of the reserved area
159 * @size: Size of the reserved area (in bytes),
160 * @order_per_bit: Order of pages represented by one bit on bitmap.
161 * @name: The name of the area. If this parameter is NULL, the name of
162 * the area will be set to "cmaN", where N is a running counter of
164 * @res_cma: Pointer to store the created cma region.
166 * This function creates custom contiguous area from already reserved memory.
168 int __init
cma_init_reserved_mem(phys_addr_t base
, phys_addr_t size
,
169 unsigned int order_per_bit
,
171 struct cma
**res_cma
)
176 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
177 pr_err("Not enough slots for CMA reserved regions!\n");
181 if (!size
|| !memblock_is_region_reserved(base
, size
))
185 * CMA uses CMA_MIN_ALIGNMENT_BYTES as alignment requirement which
186 * needs pageblock_order to be initialized. Let's enforce it.
188 if (!pageblock_order
) {
189 pr_err("pageblock_order not yet initialized. Called during early boot?\n");
193 /* ensure minimal alignment required by mm core */
194 if (!IS_ALIGNED(base
| size
, CMA_MIN_ALIGNMENT_BYTES
))
198 * Each reserved area must be initialised later, when more kernel
199 * subsystems (like slab allocator) are available.
201 cma
= &cma_areas
[cma_area_count
];
204 snprintf(cma
->name
, CMA_MAX_NAME
, name
);
206 snprintf(cma
->name
, CMA_MAX_NAME
, "cma%d\n", cma_area_count
);
208 cma
->base_pfn
= PFN_DOWN(base
);
209 cma
->count
= size
>> PAGE_SHIFT
;
210 cma
->order_per_bit
= order_per_bit
;
213 totalcma_pages
+= cma
->count
;
219 * cma_declare_contiguous_nid() - reserve custom contiguous area
220 * @base: Base address of the reserved area optional, use 0 for any
221 * @size: Size of the reserved area (in bytes),
222 * @limit: End address of the reserved memory (optional, 0 for any).
223 * @alignment: Alignment for the CMA area, should be power of 2 or zero
224 * @order_per_bit: Order of pages represented by one bit on bitmap.
225 * @fixed: hint about where to place the reserved area
226 * @name: The name of the area. See function cma_init_reserved_mem()
227 * @res_cma: Pointer to store the created cma region.
228 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
230 * This function reserves memory from early allocator. It should be
231 * called by arch specific code once the early allocator (memblock or bootmem)
232 * has been activated and all other subsystems have already allocated/reserved
233 * memory. This function allows to create custom reserved areas.
235 * If @fixed is true, reserve contiguous area at exactly @base. If false,
236 * reserve in range from @base to @limit.
238 int __init
cma_declare_contiguous_nid(phys_addr_t base
,
239 phys_addr_t size
, phys_addr_t limit
,
240 phys_addr_t alignment
, unsigned int order_per_bit
,
241 bool fixed
, const char *name
, struct cma
**res_cma
,
244 phys_addr_t memblock_end
= memblock_end_of_DRAM();
245 phys_addr_t highmem_start
;
249 * We can't use __pa(high_memory) directly, since high_memory
250 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
251 * complain. Find the boundary by adding one to the last valid
254 highmem_start
= __pa(high_memory
- 1) + 1;
255 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
256 __func__
, &size
, &base
, &limit
, &alignment
);
258 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
259 pr_err("Not enough slots for CMA reserved regions!\n");
266 if (alignment
&& !is_power_of_2(alignment
))
269 if (!IS_ENABLED(CONFIG_NUMA
))
272 /* Sanitise input arguments. */
273 alignment
= max_t(phys_addr_t
, alignment
, CMA_MIN_ALIGNMENT_BYTES
);
274 if (fixed
&& base
& (alignment
- 1)) {
276 pr_err("Region at %pa must be aligned to %pa bytes\n",
280 base
= ALIGN(base
, alignment
);
281 size
= ALIGN(size
, alignment
);
282 limit
&= ~(alignment
- 1);
287 /* size should be aligned with order_per_bit */
288 if (!IS_ALIGNED(size
>> PAGE_SHIFT
, 1 << order_per_bit
))
292 * If allocating at a fixed base the request region must not cross the
293 * low/high memory boundary.
295 if (fixed
&& base
< highmem_start
&& base
+ size
> highmem_start
) {
297 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
298 &base
, &highmem_start
);
303 * If the limit is unspecified or above the memblock end, its effective
304 * value will be the memblock end. Set it explicitly to simplify further
307 if (limit
== 0 || limit
> memblock_end
)
308 limit
= memblock_end
;
310 if (base
+ size
> limit
) {
312 pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
313 &size
, &base
, &limit
);
319 if (memblock_is_region_reserved(base
, size
) ||
320 memblock_reserve(base
, size
) < 0) {
325 phys_addr_t addr
= 0;
328 * If there is enough memory, try a bottom-up allocation first.
329 * It will place the new cma area close to the start of the node
330 * and guarantee that the compaction is moving pages out of the
331 * cma area and not into it.
332 * Avoid using first 4GB to not interfere with constrained zones
335 #ifdef CONFIG_PHYS_ADDR_T_64BIT
336 if (!memblock_bottom_up() && memblock_end
>= SZ_4G
+ size
) {
337 memblock_set_bottom_up(true);
338 addr
= memblock_alloc_range_nid(size
, alignment
, SZ_4G
,
340 memblock_set_bottom_up(false);
345 * All pages in the reserved area must come from the same zone.
346 * If the requested region crosses the low/high memory boundary,
347 * try allocating from high memory first and fall back to low
348 * memory in case of failure.
350 if (!addr
&& base
< highmem_start
&& limit
> highmem_start
) {
351 addr
= memblock_alloc_range_nid(size
, alignment
,
352 highmem_start
, limit
, nid
, true);
353 limit
= highmem_start
;
357 addr
= memblock_alloc_range_nid(size
, alignment
, base
,
366 * kmemleak scans/reads tracked objects for pointers to other
367 * objects but this address isn't mapped and accessible
369 kmemleak_ignore_phys(addr
);
373 ret
= cma_init_reserved_mem(base
, size
, order_per_bit
, name
, res_cma
);
377 pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size
/ SZ_1M
,
382 memblock_phys_free(base
, size
);
384 pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size
/ SZ_1M
,
389 static void cma_debug_show_areas(struct cma
*cma
)
391 unsigned long next_zero_bit
, next_set_bit
, nr_zero
;
392 unsigned long start
= 0;
393 unsigned long nr_part
, nr_total
= 0;
394 unsigned long nbits
= cma_bitmap_maxno(cma
);
396 spin_lock_irq(&cma
->lock
);
397 pr_info("number of available pages: ");
399 next_zero_bit
= find_next_zero_bit(cma
->bitmap
, nbits
, start
);
400 if (next_zero_bit
>= nbits
)
402 next_set_bit
= find_next_bit(cma
->bitmap
, nbits
, next_zero_bit
);
403 nr_zero
= next_set_bit
- next_zero_bit
;
404 nr_part
= nr_zero
<< cma
->order_per_bit
;
405 pr_cont("%s%lu@%lu", nr_total
? "+" : "", nr_part
,
408 start
= next_zero_bit
+ nr_zero
;
410 pr_cont("=> %lu free of %lu total pages\n", nr_total
, cma
->count
);
411 spin_unlock_irq(&cma
->lock
);
414 static struct page
*__cma_alloc(struct cma
*cma
, unsigned long count
,
415 unsigned int align
, gfp_t gfp
)
417 unsigned long mask
, offset
;
418 unsigned long pfn
= -1;
419 unsigned long start
= 0;
420 unsigned long bitmap_maxno
, bitmap_no
, bitmap_count
;
422 struct page
*page
= NULL
;
424 const char *name
= cma
? cma
->name
: NULL
;
426 trace_cma_alloc_start(name
, count
, align
);
428 if (!cma
|| !cma
->count
|| !cma
->bitmap
)
431 pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__
,
432 (void *)cma
, cma
->name
, count
, align
);
437 mask
= cma_bitmap_aligned_mask(cma
, align
);
438 offset
= cma_bitmap_aligned_offset(cma
, align
);
439 bitmap_maxno
= cma_bitmap_maxno(cma
);
440 bitmap_count
= cma_bitmap_pages_to_bits(cma
, count
);
442 if (bitmap_count
> bitmap_maxno
)
446 spin_lock_irq(&cma
->lock
);
447 bitmap_no
= bitmap_find_next_zero_area_off(cma
->bitmap
,
448 bitmap_maxno
, start
, bitmap_count
, mask
,
450 if (bitmap_no
>= bitmap_maxno
) {
451 spin_unlock_irq(&cma
->lock
);
454 bitmap_set(cma
->bitmap
, bitmap_no
, bitmap_count
);
456 * It's safe to drop the lock here. We've marked this region for
457 * our exclusive use. If the migration fails we will take the
458 * lock again and unmark it.
460 spin_unlock_irq(&cma
->lock
);
462 pfn
= cma
->base_pfn
+ (bitmap_no
<< cma
->order_per_bit
);
463 mutex_lock(&cma_mutex
);
464 ret
= alloc_contig_range(pfn
, pfn
+ count
, MIGRATE_CMA
, gfp
);
465 mutex_unlock(&cma_mutex
);
467 page
= pfn_to_page(pfn
);
471 cma_clear_bitmap(cma
, pfn
, count
);
475 pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n",
476 __func__
, pfn
, pfn_to_page(pfn
));
478 trace_cma_alloc_busy_retry(cma
->name
, pfn
, pfn_to_page(pfn
),
480 /* try again with a bit different memory target */
481 start
= bitmap_no
+ mask
+ 1;
485 * CMA can allocate multiple page blocks, which results in different
486 * blocks being marked with different tags. Reset the tags to ignore
490 for (i
= 0; i
< count
; i
++)
491 page_kasan_tag_reset(nth_page(page
, i
));
494 if (ret
&& !(gfp
& __GFP_NOWARN
)) {
495 pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n",
496 __func__
, cma
->name
, count
, ret
);
497 cma_debug_show_areas(cma
);
500 pr_debug("%s(): returned %p\n", __func__
, page
);
501 trace_cma_alloc_finish(name
, pfn
, page
, count
, align
, ret
);
503 count_vm_event(CMA_ALLOC_SUCCESS
);
504 cma_sysfs_account_success_pages(cma
, count
);
506 count_vm_event(CMA_ALLOC_FAIL
);
507 cma_sysfs_account_fail_pages(cma
, count
);
514 * cma_alloc() - allocate pages from contiguous area
515 * @cma: Contiguous memory region for which the allocation is performed.
516 * @count: Requested number of pages.
517 * @align: Requested alignment of pages (in PAGE_SIZE order).
518 * @no_warn: Avoid printing message about failed allocation
520 * This function allocates part of contiguous memory on specific
521 * contiguous memory area.
523 struct page
*cma_alloc(struct cma
*cma
, unsigned long count
,
524 unsigned int align
, bool no_warn
)
526 return __cma_alloc(cma
, count
, align
, GFP_KERNEL
| (no_warn
? __GFP_NOWARN
: 0));
529 struct folio
*cma_alloc_folio(struct cma
*cma
, int order
, gfp_t gfp
)
533 if (WARN_ON(!order
|| !(gfp
& __GFP_COMP
)))
536 page
= __cma_alloc(cma
, 1 << order
, order
, gfp
);
538 return page
? page_folio(page
) : NULL
;
541 bool cma_pages_valid(struct cma
*cma
, const struct page
*pages
,
549 pfn
= page_to_pfn(pages
);
551 if (pfn
< cma
->base_pfn
|| pfn
>= cma
->base_pfn
+ cma
->count
) {
552 pr_debug("%s(page %p, count %lu)\n", __func__
,
553 (void *)pages
, count
);
561 * cma_release() - release allocated pages
562 * @cma: Contiguous memory region for which the allocation is performed.
563 * @pages: Allocated pages.
564 * @count: Number of allocated pages.
566 * This function releases memory allocated by cma_alloc().
567 * It returns false when provided pages do not belong to contiguous area and
570 bool cma_release(struct cma
*cma
, const struct page
*pages
,
575 if (!cma_pages_valid(cma
, pages
, count
))
578 pr_debug("%s(page %p, count %lu)\n", __func__
, (void *)pages
, count
);
580 pfn
= page_to_pfn(pages
);
582 VM_BUG_ON(pfn
+ count
> cma
->base_pfn
+ cma
->count
);
584 free_contig_range(pfn
, count
);
585 cma_clear_bitmap(cma
, pfn
, count
);
586 cma_sysfs_account_release_pages(cma
, count
);
587 trace_cma_release(cma
->name
, pfn
, pages
, count
);
592 bool cma_free_folio(struct cma
*cma
, const struct folio
*folio
)
594 if (WARN_ON(!folio_test_large(folio
)))
597 return cma_release(cma
, &folio
->page
, folio_nr_pages(folio
));
600 int cma_for_each_area(int (*it
)(struct cma
*cma
, void *data
), void *data
)
604 for (i
= 0; i
< cma_area_count
; i
++) {
605 int ret
= it(&cma_areas
[i
], data
);