2 * Contiguous Memory Allocator
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
19 #define pr_fmt(fmt) "cma: " fmt
21 #ifdef CONFIG_CMA_DEBUG
26 #define CREATE_TRACE_POINTS
28 #include <linux/memblock.h>
29 #include <linux/err.h>
31 #include <linux/mutex.h>
32 #include <linux/sizes.h>
33 #include <linux/slab.h>
34 #include <linux/log2.h>
35 #include <linux/cma.h>
36 #include <linux/highmem.h>
38 #include <linux/kmemleak.h>
39 #include <trace/events/cma.h>
44 struct cma cma_areas
[MAX_CMA_AREAS
];
45 unsigned cma_area_count
;
46 static DEFINE_MUTEX(cma_mutex
);
48 phys_addr_t
cma_get_base(const struct cma
*cma
)
50 return PFN_PHYS(cma
->base_pfn
);
53 unsigned long cma_get_size(const struct cma
*cma
)
55 return cma
->count
<< PAGE_SHIFT
;
58 const char *cma_get_name(const struct cma
*cma
)
60 return cma
->name
? cma
->name
: "(undefined)";
63 static unsigned long cma_bitmap_aligned_mask(const struct cma
*cma
,
64 unsigned int align_order
)
66 if (align_order
<= cma
->order_per_bit
)
68 return (1UL << (align_order
- cma
->order_per_bit
)) - 1;
72 * Find the offset of the base PFN from the specified align_order.
73 * The value returned is represented in order_per_bits.
75 static unsigned long cma_bitmap_aligned_offset(const struct cma
*cma
,
76 unsigned int align_order
)
78 return (cma
->base_pfn
& ((1UL << align_order
) - 1))
79 >> cma
->order_per_bit
;
82 static unsigned long cma_bitmap_pages_to_bits(const struct cma
*cma
,
85 return ALIGN(pages
, 1UL << cma
->order_per_bit
) >> cma
->order_per_bit
;
88 static void cma_clear_bitmap(struct cma
*cma
, unsigned long pfn
,
91 unsigned long bitmap_no
, bitmap_count
;
93 bitmap_no
= (pfn
- cma
->base_pfn
) >> cma
->order_per_bit
;
94 bitmap_count
= cma_bitmap_pages_to_bits(cma
, count
);
96 mutex_lock(&cma
->lock
);
97 bitmap_clear(cma
->bitmap
, bitmap_no
, bitmap_count
);
98 mutex_unlock(&cma
->lock
);
101 static int __init
cma_activate_area(struct cma
*cma
)
103 int bitmap_size
= BITS_TO_LONGS(cma_bitmap_maxno(cma
)) * sizeof(long);
104 unsigned long base_pfn
= cma
->base_pfn
, pfn
= base_pfn
;
105 unsigned i
= cma
->count
>> pageblock_order
;
108 cma
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
117 if (!pfn_valid(base_pfn
))
120 zone
= page_zone(pfn_to_page(base_pfn
));
121 for (j
= pageblock_nr_pages
; j
; --j
, pfn
++) {
126 * In init_cma_reserved_pageblock(), present_pages
127 * is adjusted with assumption that all pages in
128 * the pageblock come from a single zone.
130 if (page_zone(pfn_to_page(pfn
)) != zone
)
133 init_cma_reserved_pageblock(pfn_to_page(base_pfn
));
136 mutex_init(&cma
->lock
);
138 #ifdef CONFIG_CMA_DEBUGFS
139 INIT_HLIST_HEAD(&cma
->mem_head
);
140 spin_lock_init(&cma
->mem_head_lock
);
146 pr_err("CMA area %s could not be activated\n", cma
->name
);
152 static int __init
cma_init_reserved_areas(void)
161 for_each_online_pgdat(pgdat
) {
162 unsigned long start_pfn
= UINT_MAX
, end_pfn
= 0;
164 zone
= &pgdat
->node_zones
[ZONE_MOVABLE
];
167 * In this case, we cannot adjust the zone range
168 * since it is now maximum node span and we don't
169 * know original zone range.
171 if (populated_zone(zone
))
174 for (i
= 0; i
< cma_area_count
; i
++) {
175 if (pfn_to_nid(cma_areas
[i
].base_pfn
) !=
179 start_pfn
= min(start_pfn
, cma_areas
[i
].base_pfn
);
180 end_pfn
= max(end_pfn
, cma_areas
[i
].base_pfn
+
187 zone
->zone_start_pfn
= start_pfn
;
188 zone
->spanned_pages
= end_pfn
- start_pfn
;
191 for (i
= 0; i
< cma_area_count
; i
++) {
192 int ret
= cma_activate_area(&cma_areas
[i
]);
199 * Reserved pages for ZONE_MOVABLE are now activated and
200 * this would change ZONE_MOVABLE's managed page counter and
201 * the other zones' present counter. We need to re-calculate
202 * various zone information that depends on this initialization.
204 build_all_zonelists(NULL
);
205 for_each_populated_zone(zone
) {
206 if (zone_idx(zone
) == ZONE_MOVABLE
) {
207 zone_pcp_reset(zone
);
208 setup_zone_pageset(zone
);
210 zone_pcp_update(zone
);
212 set_zone_contiguous(zone
);
216 * We need to re-init per zone wmark by calling
217 * init_per_zone_wmark_min() but doesn't call here because it is
218 * registered on core_initcall and it will be called later than us.
223 pure_initcall(cma_init_reserved_areas
);
226 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
227 * @base: Base address of the reserved area
228 * @size: Size of the reserved area (in bytes),
229 * @order_per_bit: Order of pages represented by one bit on bitmap.
230 * @name: The name of the area. If this parameter is NULL, the name of
231 * the area will be set to "cmaN", where N is a running counter of
233 * @res_cma: Pointer to store the created cma region.
235 * This function creates custom contiguous area from already reserved memory.
237 int __init
cma_init_reserved_mem(phys_addr_t base
, phys_addr_t size
,
238 unsigned int order_per_bit
,
240 struct cma
**res_cma
)
243 phys_addr_t alignment
;
246 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
247 pr_err("Not enough slots for CMA reserved regions!\n");
251 if (!size
|| !memblock_is_region_reserved(base
, size
))
254 /* ensure minimal alignment required by mm core */
255 alignment
= PAGE_SIZE
<<
256 max_t(unsigned long, MAX_ORDER
- 1, pageblock_order
);
258 /* alignment should be aligned with order_per_bit */
259 if (!IS_ALIGNED(alignment
>> PAGE_SHIFT
, 1 << order_per_bit
))
262 if (ALIGN(base
, alignment
) != base
|| ALIGN(size
, alignment
) != size
)
266 * Each reserved area must be initialised later, when more kernel
267 * subsystems (like slab allocator) are available.
269 cma
= &cma_areas
[cma_area_count
];
273 cma
->name
= kasprintf(GFP_KERNEL
, "cma%d\n", cma_area_count
);
277 cma
->base_pfn
= PFN_DOWN(base
);
278 cma
->count
= size
>> PAGE_SHIFT
;
279 cma
->order_per_bit
= order_per_bit
;
282 totalcma_pages
+= (size
/ PAGE_SIZE
);
288 * cma_declare_contiguous() - reserve custom contiguous area
289 * @base: Base address of the reserved area optional, use 0 for any
290 * @size: Size of the reserved area (in bytes),
291 * @limit: End address of the reserved memory (optional, 0 for any).
292 * @alignment: Alignment for the CMA area, should be power of 2 or zero
293 * @order_per_bit: Order of pages represented by one bit on bitmap.
294 * @fixed: hint about where to place the reserved area
295 * @name: The name of the area. See function cma_init_reserved_mem()
296 * @res_cma: Pointer to store the created cma region.
298 * This function reserves memory from early allocator. It should be
299 * called by arch specific code once the early allocator (memblock or bootmem)
300 * has been activated and all other subsystems have already allocated/reserved
301 * memory. This function allows to create custom reserved areas.
303 * If @fixed is true, reserve contiguous area at exactly @base. If false,
304 * reserve in range from @base to @limit.
306 int __init
cma_declare_contiguous(phys_addr_t base
,
307 phys_addr_t size
, phys_addr_t limit
,
308 phys_addr_t alignment
, unsigned int order_per_bit
,
309 bool fixed
, const char *name
, struct cma
**res_cma
)
311 phys_addr_t memblock_end
= memblock_end_of_DRAM();
312 phys_addr_t highmem_start
;
316 * We can't use __pa(high_memory) directly, since high_memory
317 * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
318 * complain. Find the boundary by adding one to the last valid
321 highmem_start
= __pa(high_memory
- 1) + 1;
322 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
323 __func__
, &size
, &base
, &limit
, &alignment
);
325 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
326 pr_err("Not enough slots for CMA reserved regions!\n");
333 if (alignment
&& !is_power_of_2(alignment
))
337 * Sanitise input arguments.
338 * Pages both ends in CMA area could be merged into adjacent unmovable
339 * migratetype page by page allocator's buddy algorithm. In the case,
340 * you couldn't get a contiguous memory, which is not what we want.
342 alignment
= max(alignment
, (phys_addr_t
)PAGE_SIZE
<<
343 max_t(unsigned long, MAX_ORDER
- 1, pageblock_order
));
344 base
= ALIGN(base
, alignment
);
345 size
= ALIGN(size
, alignment
);
346 limit
&= ~(alignment
- 1);
351 /* size should be aligned with order_per_bit */
352 if (!IS_ALIGNED(size
>> PAGE_SHIFT
, 1 << order_per_bit
))
356 * If allocating at a fixed base the request region must not cross the
357 * low/high memory boundary.
359 if (fixed
&& base
< highmem_start
&& base
+ size
> highmem_start
) {
361 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
362 &base
, &highmem_start
);
367 * If the limit is unspecified or above the memblock end, its effective
368 * value will be the memblock end. Set it explicitly to simplify further
371 if (limit
== 0 || limit
> memblock_end
)
372 limit
= memblock_end
;
376 if (memblock_is_region_reserved(base
, size
) ||
377 memblock_reserve(base
, size
) < 0) {
382 phys_addr_t addr
= 0;
385 * All pages in the reserved area must come from the same zone.
386 * If the requested region crosses the low/high memory boundary,
387 * try allocating from high memory first and fall back to low
388 * memory in case of failure.
390 if (base
< highmem_start
&& limit
> highmem_start
) {
391 addr
= memblock_alloc_range(size
, alignment
,
392 highmem_start
, limit
,
394 limit
= highmem_start
;
398 addr
= memblock_alloc_range(size
, alignment
, base
,
408 * kmemleak scans/reads tracked objects for pointers to other
409 * objects but this address isn't mapped and accessible
411 kmemleak_ignore_phys(addr
);
415 ret
= cma_init_reserved_mem(base
, size
, order_per_bit
, name
, res_cma
);
419 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size
/ SZ_1M
,
424 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size
/ SZ_1M
);
428 #ifdef CONFIG_CMA_DEBUG
429 static void cma_debug_show_areas(struct cma
*cma
)
431 unsigned long next_zero_bit
, next_set_bit
;
432 unsigned long start
= 0;
433 unsigned int nr_zero
, nr_total
= 0;
435 mutex_lock(&cma
->lock
);
436 pr_info("number of available pages: ");
438 next_zero_bit
= find_next_zero_bit(cma
->bitmap
, cma
->count
, start
);
439 if (next_zero_bit
>= cma
->count
)
441 next_set_bit
= find_next_bit(cma
->bitmap
, cma
->count
, next_zero_bit
);
442 nr_zero
= next_set_bit
- next_zero_bit
;
443 pr_cont("%s%u@%lu", nr_total
? "+" : "", nr_zero
, next_zero_bit
);
445 start
= next_zero_bit
+ nr_zero
;
447 pr_cont("=> %u free of %lu total pages\n", nr_total
, cma
->count
);
448 mutex_unlock(&cma
->lock
);
451 static inline void cma_debug_show_areas(struct cma
*cma
) { }
455 * cma_alloc() - allocate pages from contiguous area
456 * @cma: Contiguous memory region for which the allocation is performed.
457 * @count: Requested number of pages.
458 * @align: Requested alignment of pages (in PAGE_SIZE order).
459 * @gfp_mask: GFP mask to use during compaction
461 * This function allocates part of contiguous memory on specific
462 * contiguous memory area.
464 struct page
*cma_alloc(struct cma
*cma
, size_t count
, unsigned int align
,
467 unsigned long mask
, offset
;
468 unsigned long pfn
= -1;
469 unsigned long start
= 0;
470 unsigned long bitmap_maxno
, bitmap_no
, bitmap_count
;
471 struct page
*page
= NULL
;
474 if (!cma
|| !cma
->count
)
477 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__
, (void *)cma
,
483 mask
= cma_bitmap_aligned_mask(cma
, align
);
484 offset
= cma_bitmap_aligned_offset(cma
, align
);
485 bitmap_maxno
= cma_bitmap_maxno(cma
);
486 bitmap_count
= cma_bitmap_pages_to_bits(cma
, count
);
488 if (bitmap_count
> bitmap_maxno
)
492 mutex_lock(&cma
->lock
);
493 bitmap_no
= bitmap_find_next_zero_area_off(cma
->bitmap
,
494 bitmap_maxno
, start
, bitmap_count
, mask
,
496 if (bitmap_no
>= bitmap_maxno
) {
497 mutex_unlock(&cma
->lock
);
500 bitmap_set(cma
->bitmap
, bitmap_no
, bitmap_count
);
502 * It's safe to drop the lock here. We've marked this region for
503 * our exclusive use. If the migration fails we will take the
504 * lock again and unmark it.
506 mutex_unlock(&cma
->lock
);
508 pfn
= cma
->base_pfn
+ (bitmap_no
<< cma
->order_per_bit
);
509 mutex_lock(&cma_mutex
);
510 ret
= alloc_contig_range(pfn
, pfn
+ count
, MIGRATE_CMA
,
512 mutex_unlock(&cma_mutex
);
514 page
= pfn_to_page(pfn
);
518 cma_clear_bitmap(cma
, pfn
, count
);
522 pr_debug("%s(): memory range at %p is busy, retrying\n",
523 __func__
, pfn_to_page(pfn
));
524 /* try again with a bit different memory target */
525 start
= bitmap_no
+ mask
+ 1;
528 trace_cma_alloc(pfn
, page
, count
, align
);
530 if (ret
&& !(gfp_mask
& __GFP_NOWARN
)) {
531 pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n",
532 __func__
, count
, ret
);
533 cma_debug_show_areas(cma
);
536 pr_debug("%s(): returned %p\n", __func__
, page
);
541 * cma_release() - release allocated pages
542 * @cma: Contiguous memory region for which the allocation is performed.
543 * @pages: Allocated pages.
544 * @count: Number of allocated pages.
546 * This function releases memory allocated by alloc_cma().
547 * It returns false when provided pages do not belong to contiguous area and
550 bool cma_release(struct cma
*cma
, const struct page
*pages
, unsigned int count
)
557 pr_debug("%s(page %p)\n", __func__
, (void *)pages
);
559 pfn
= page_to_pfn(pages
);
561 if (pfn
< cma
->base_pfn
|| pfn
>= cma
->base_pfn
+ cma
->count
)
564 VM_BUG_ON(pfn
+ count
> cma
->base_pfn
+ cma
->count
);
566 free_contig_range(pfn
, count
);
567 cma_clear_bitmap(cma
, pfn
, count
);
568 trace_cma_release(pfn
, pages
, count
);
573 int cma_for_each_area(int (*it
)(struct cma
*cma
, void *data
), void *data
)
577 for (i
= 0; i
< cma_area_count
; i
++) {
578 int ret
= it(&cma_areas
[i
], data
);