2 * Contiguous Memory Allocator
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
19 #define pr_fmt(fmt) "cma: " fmt
21 #ifdef CONFIG_CMA_DEBUG
27 #include <linux/memblock.h>
28 #include <linux/err.h>
30 #include <linux/mutex.h>
31 #include <linux/sizes.h>
32 #include <linux/slab.h>
33 #include <linux/log2.h>
34 #include <linux/cma.h>
35 #include <linux/highmem.h>
38 unsigned long base_pfn
;
40 unsigned long *bitmap
;
41 unsigned int order_per_bit
; /* Order of pages represented by one bit */
45 static struct cma cma_areas
[MAX_CMA_AREAS
];
46 static unsigned cma_area_count
;
47 static DEFINE_MUTEX(cma_mutex
);
49 phys_addr_t
cma_get_base(struct cma
*cma
)
51 return PFN_PHYS(cma
->base_pfn
);
54 unsigned long cma_get_size(struct cma
*cma
)
56 return cma
->count
<< PAGE_SHIFT
;
59 static unsigned long cma_bitmap_aligned_mask(struct cma
*cma
, int align_order
)
61 if (align_order
<= cma
->order_per_bit
)
63 return (1UL << (align_order
- cma
->order_per_bit
)) - 1;
66 static unsigned long cma_bitmap_maxno(struct cma
*cma
)
68 return cma
->count
>> cma
->order_per_bit
;
71 static unsigned long cma_bitmap_pages_to_bits(struct cma
*cma
,
74 return ALIGN(pages
, 1UL << cma
->order_per_bit
) >> cma
->order_per_bit
;
77 static void cma_clear_bitmap(struct cma
*cma
, unsigned long pfn
, int count
)
79 unsigned long bitmap_no
, bitmap_count
;
81 bitmap_no
= (pfn
- cma
->base_pfn
) >> cma
->order_per_bit
;
82 bitmap_count
= cma_bitmap_pages_to_bits(cma
, count
);
84 mutex_lock(&cma
->lock
);
85 bitmap_clear(cma
->bitmap
, bitmap_no
, bitmap_count
);
86 mutex_unlock(&cma
->lock
);
89 static int __init
cma_activate_area(struct cma
*cma
)
91 int bitmap_size
= BITS_TO_LONGS(cma_bitmap_maxno(cma
)) * sizeof(long);
92 unsigned long base_pfn
= cma
->base_pfn
, pfn
= base_pfn
;
93 unsigned i
= cma
->count
>> pageblock_order
;
96 cma
->bitmap
= kzalloc(bitmap_size
, GFP_KERNEL
);
101 WARN_ON_ONCE(!pfn_valid(pfn
));
102 zone
= page_zone(pfn_to_page(pfn
));
108 for (j
= pageblock_nr_pages
; j
; --j
, pfn
++) {
109 WARN_ON_ONCE(!pfn_valid(pfn
));
111 * alloc_contig_range requires the pfn range
112 * specified to be in the same zone. Make this
113 * simple by forcing the entire CMA resv range
114 * to be in the same zone.
116 if (page_zone(pfn_to_page(pfn
)) != zone
)
119 init_cma_reserved_pageblock(pfn_to_page(base_pfn
));
122 mutex_init(&cma
->lock
);
130 static int __init
cma_init_reserved_areas(void)
134 for (i
= 0; i
< cma_area_count
; i
++) {
135 int ret
= cma_activate_area(&cma_areas
[i
]);
143 core_initcall(cma_init_reserved_areas
);
146 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
147 * @base: Base address of the reserved area
148 * @size: Size of the reserved area (in bytes),
149 * @order_per_bit: Order of pages represented by one bit on bitmap.
150 * @res_cma: Pointer to store the created cma region.
152 * This function creates custom contiguous area from already reserved memory.
154 int __init
cma_init_reserved_mem(phys_addr_t base
, phys_addr_t size
,
155 int order_per_bit
, struct cma
**res_cma
)
158 phys_addr_t alignment
;
161 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
162 pr_err("Not enough slots for CMA reserved regions!\n");
166 if (!size
|| !memblock_is_region_reserved(base
, size
))
169 /* ensure minimal alignment requied by mm core */
170 alignment
= PAGE_SIZE
<< max(MAX_ORDER
- 1, pageblock_order
);
172 /* alignment should be aligned with order_per_bit */
173 if (!IS_ALIGNED(alignment
>> PAGE_SHIFT
, 1 << order_per_bit
))
176 if (ALIGN(base
, alignment
) != base
|| ALIGN(size
, alignment
) != size
)
180 * Each reserved area must be initialised later, when more kernel
181 * subsystems (like slab allocator) are available.
183 cma
= &cma_areas
[cma_area_count
];
184 cma
->base_pfn
= PFN_DOWN(base
);
185 cma
->count
= size
>> PAGE_SHIFT
;
186 cma
->order_per_bit
= order_per_bit
;
194 * cma_declare_contiguous() - reserve custom contiguous area
195 * @base: Base address of the reserved area optional, use 0 for any
196 * @size: Size of the reserved area (in bytes),
197 * @limit: End address of the reserved memory (optional, 0 for any).
198 * @alignment: Alignment for the CMA area, should be power of 2 or zero
199 * @order_per_bit: Order of pages represented by one bit on bitmap.
200 * @fixed: hint about where to place the reserved area
201 * @res_cma: Pointer to store the created cma region.
203 * This function reserves memory from early allocator. It should be
204 * called by arch specific code once the early allocator (memblock or bootmem)
205 * has been activated and all other subsystems have already allocated/reserved
206 * memory. This function allows to create custom reserved areas.
208 * If @fixed is true, reserve contiguous area at exactly @base. If false,
209 * reserve in range from @base to @limit.
211 int __init
cma_declare_contiguous(phys_addr_t base
,
212 phys_addr_t size
, phys_addr_t limit
,
213 phys_addr_t alignment
, unsigned int order_per_bit
,
214 bool fixed
, struct cma
**res_cma
)
216 phys_addr_t memblock_end
= memblock_end_of_DRAM();
217 phys_addr_t highmem_start
= __pa(high_memory
);
220 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n",
221 __func__
, (unsigned long)size
, (unsigned long)base
,
222 (unsigned long)limit
, (unsigned long)alignment
);
224 if (cma_area_count
== ARRAY_SIZE(cma_areas
)) {
225 pr_err("Not enough slots for CMA reserved regions!\n");
232 if (alignment
&& !is_power_of_2(alignment
))
236 * Sanitise input arguments.
237 * Pages both ends in CMA area could be merged into adjacent unmovable
238 * migratetype page by page allocator's buddy algorithm. In the case,
239 * you couldn't get a contiguous memory, which is not what we want.
241 alignment
= max(alignment
,
242 (phys_addr_t
)PAGE_SIZE
<< max(MAX_ORDER
- 1, pageblock_order
));
243 base
= ALIGN(base
, alignment
);
244 size
= ALIGN(size
, alignment
);
245 limit
&= ~(alignment
- 1);
247 /* size should be aligned with order_per_bit */
248 if (!IS_ALIGNED(size
>> PAGE_SHIFT
, 1 << order_per_bit
))
252 * adjust limit to avoid crossing low/high memory boundary for
253 * automatically allocated regions
255 if (((limit
== 0 || limit
> memblock_end
) &&
256 (memblock_end
- size
< highmem_start
&&
257 memblock_end
> highmem_start
)) ||
258 (!fixed
&& limit
> highmem_start
&& limit
- size
< highmem_start
)) {
259 limit
= highmem_start
;
262 if (fixed
&& base
< highmem_start
&& base
+size
> highmem_start
) {
264 pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n",
265 (unsigned long)base
, (unsigned long)highmem_start
);
271 if (memblock_is_region_reserved(base
, size
) ||
272 memblock_reserve(base
, size
) < 0) {
277 phys_addr_t addr
= memblock_alloc_range(size
, alignment
, base
,
287 ret
= cma_init_reserved_mem(base
, size
, order_per_bit
, res_cma
);
291 pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size
/ SZ_1M
,
292 (unsigned long)base
);
296 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size
/ SZ_1M
);
301 * cma_alloc() - allocate pages from contiguous area
302 * @cma: Contiguous memory region for which the allocation is performed.
303 * @count: Requested number of pages.
304 * @align: Requested alignment of pages (in PAGE_SIZE order).
306 * This function allocates part of contiguous memory on specific
307 * contiguous memory area.
309 struct page
*cma_alloc(struct cma
*cma
, int count
, unsigned int align
)
311 unsigned long mask
, pfn
, start
= 0;
312 unsigned long bitmap_maxno
, bitmap_no
, bitmap_count
;
313 struct page
*page
= NULL
;
316 if (!cma
|| !cma
->count
)
319 pr_debug("%s(cma %p, count %d, align %d)\n", __func__
, (void *)cma
,
325 mask
= cma_bitmap_aligned_mask(cma
, align
);
326 bitmap_maxno
= cma_bitmap_maxno(cma
);
327 bitmap_count
= cma_bitmap_pages_to_bits(cma
, count
);
330 mutex_lock(&cma
->lock
);
331 bitmap_no
= bitmap_find_next_zero_area(cma
->bitmap
,
332 bitmap_maxno
, start
, bitmap_count
, mask
);
333 if (bitmap_no
>= bitmap_maxno
) {
334 mutex_unlock(&cma
->lock
);
337 bitmap_set(cma
->bitmap
, bitmap_no
, bitmap_count
);
339 * It's safe to drop the lock here. We've marked this region for
340 * our exclusive use. If the migration fails we will take the
341 * lock again and unmark it.
343 mutex_unlock(&cma
->lock
);
345 pfn
= cma
->base_pfn
+ (bitmap_no
<< cma
->order_per_bit
);
346 mutex_lock(&cma_mutex
);
347 ret
= alloc_contig_range(pfn
, pfn
+ count
, MIGRATE_CMA
);
348 mutex_unlock(&cma_mutex
);
350 page
= pfn_to_page(pfn
);
354 cma_clear_bitmap(cma
, pfn
, count
);
358 pr_debug("%s(): memory range at %p is busy, retrying\n",
359 __func__
, pfn_to_page(pfn
));
360 /* try again with a bit different memory target */
361 start
= bitmap_no
+ mask
+ 1;
364 pr_debug("%s(): returned %p\n", __func__
, page
);
369 * cma_release() - release allocated pages
370 * @cma: Contiguous memory region for which the allocation is performed.
371 * @pages: Allocated pages.
372 * @count: Number of allocated pages.
374 * This function releases memory allocated by alloc_cma().
375 * It returns false when provided pages do not belong to contiguous area and
378 bool cma_release(struct cma
*cma
, struct page
*pages
, int count
)
385 pr_debug("%s(page %p)\n", __func__
, (void *)pages
);
387 pfn
= page_to_pfn(pages
);
389 if (pfn
< cma
->base_pfn
|| pfn
>= cma
->base_pfn
+ cma
->count
)
392 VM_BUG_ON(pfn
+ count
> cma
->base_pfn
+ cma
->count
);
394 free_contig_range(pfn
, count
);
395 cma_clear_bitmap(cma
, pfn
, count
);