Linux 4.14.158
[linux/fpc-iii.git] / mm / bootmem.c
blob6aef64254203d72d0277400e7171a982c2bc06d3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bootmem - A boot-time physical memory allocator and configurator
5 * Copyright (C) 1999 Ingo Molnar
6 * 1999 Kanoj Sarcar, SGI
7 * 2008 Johannes Weiner
9 * Access to this subsystem has to be serialized externally (which is true
10 * for the boot process anyway).
12 #include <linux/init.h>
13 #include <linux/pfn.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/bug.h>
19 #include <linux/io.h>
20 #include <linux/bootmem.h>
22 #include "internal.h"
24 #ifndef CONFIG_NEED_MULTIPLE_NODES
25 struct pglist_data __refdata contig_page_data = {
26 .bdata = &bootmem_node_data[0]
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
34 unsigned long long max_possible_pfn;
36 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
38 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
40 static int bootmem_debug;
42 static int __init bootmem_debug_setup(char *buf)
44 bootmem_debug = 1;
45 return 0;
47 early_param("bootmem_debug", bootmem_debug_setup);
49 #define bdebug(fmt, args...) ({ \
50 if (unlikely(bootmem_debug)) \
51 pr_info("bootmem::%s " fmt, \
52 __func__, ## args); \
55 static unsigned long __init bootmap_bytes(unsigned long pages)
57 unsigned long bytes = DIV_ROUND_UP(pages, BITS_PER_BYTE);
59 return ALIGN(bytes, sizeof(long));
62 /**
63 * bootmem_bootmap_pages - calculate bitmap size in pages
64 * @pages: number of pages the bitmap has to represent
66 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
68 unsigned long bytes = bootmap_bytes(pages);
70 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
74 * link bdata in order
76 static void __init link_bootmem(bootmem_data_t *bdata)
78 bootmem_data_t *ent;
80 list_for_each_entry(ent, &bdata_list, list) {
81 if (bdata->node_min_pfn < ent->node_min_pfn) {
82 list_add_tail(&bdata->list, &ent->list);
83 return;
87 list_add_tail(&bdata->list, &bdata_list);
91 * Called once to set up the allocator itself.
93 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
94 unsigned long mapstart, unsigned long start, unsigned long end)
96 unsigned long mapsize;
98 mminit_validate_memmodel_limits(&start, &end);
99 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
100 bdata->node_min_pfn = start;
101 bdata->node_low_pfn = end;
102 link_bootmem(bdata);
105 * Initially all pages are reserved - setup_arch() has to
106 * register free RAM areas explicitly.
108 mapsize = bootmap_bytes(end - start);
109 memset(bdata->node_bootmem_map, 0xff, mapsize);
111 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
112 bdata - bootmem_node_data, start, mapstart, end, mapsize);
114 return mapsize;
118 * init_bootmem_node - register a node as boot memory
119 * @pgdat: node to register
120 * @freepfn: pfn where the bitmap for this node is to be placed
121 * @startpfn: first pfn on the node
122 * @endpfn: first pfn after the node
124 * Returns the number of bytes needed to hold the bitmap for this node.
126 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
127 unsigned long startpfn, unsigned long endpfn)
129 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
133 * init_bootmem - register boot memory
134 * @start: pfn where the bitmap is to be placed
135 * @pages: number of available physical pages
137 * Returns the number of bytes needed to hold the bitmap.
139 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
141 max_low_pfn = pages;
142 min_low_pfn = start;
143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
147 * free_bootmem_late - free bootmem pages directly to page allocator
148 * @addr: starting physical address of the range
149 * @size: size of the range in bytes
151 * This is only useful when the bootmem allocator has already been torn
152 * down, but we are still initializing the system. Pages are given directly
153 * to the page allocator, no bootmem metadata is updated because it is gone.
155 void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
157 unsigned long cursor, end;
159 kmemleak_free_part_phys(physaddr, size);
161 cursor = PFN_UP(physaddr);
162 end = PFN_DOWN(physaddr + size);
164 for (; cursor < end; cursor++) {
165 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
166 totalram_pages++;
170 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
172 struct page *page;
173 unsigned long *map, start, end, pages, cur, count = 0;
175 if (!bdata->node_bootmem_map)
176 return 0;
178 map = bdata->node_bootmem_map;
179 start = bdata->node_min_pfn;
180 end = bdata->node_low_pfn;
182 bdebug("nid=%td start=%lx end=%lx\n",
183 bdata - bootmem_node_data, start, end);
185 while (start < end) {
186 unsigned long idx, vec;
187 unsigned shift;
189 idx = start - bdata->node_min_pfn;
190 shift = idx & (BITS_PER_LONG - 1);
192 * vec holds at most BITS_PER_LONG map bits,
193 * bit 0 corresponds to start.
195 vec = ~map[idx / BITS_PER_LONG];
197 if (shift) {
198 vec >>= shift;
199 if (end - start >= BITS_PER_LONG)
200 vec |= ~map[idx / BITS_PER_LONG + 1] <<
201 (BITS_PER_LONG - shift);
204 * If we have a properly aligned and fully unreserved
205 * BITS_PER_LONG block of pages in front of us, free
206 * it in one go.
208 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
209 int order = ilog2(BITS_PER_LONG);
211 __free_pages_bootmem(pfn_to_page(start), start, order);
212 count += BITS_PER_LONG;
213 start += BITS_PER_LONG;
214 } else {
215 cur = start;
217 start = ALIGN(start + 1, BITS_PER_LONG);
218 while (vec && cur != start) {
219 if (vec & 1) {
220 page = pfn_to_page(cur);
221 __free_pages_bootmem(page, cur, 0);
222 count++;
224 vec >>= 1;
225 ++cur;
230 cur = bdata->node_min_pfn;
231 page = virt_to_page(bdata->node_bootmem_map);
232 pages = bdata->node_low_pfn - bdata->node_min_pfn;
233 pages = bootmem_bootmap_pages(pages);
234 count += pages;
235 while (pages--)
236 __free_pages_bootmem(page++, cur++, 0);
237 bdata->node_bootmem_map = NULL;
239 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
241 return count;
244 static int reset_managed_pages_done __initdata;
246 void reset_node_managed_pages(pg_data_t *pgdat)
248 struct zone *z;
250 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
251 z->managed_pages = 0;
254 void __init reset_all_zones_managed_pages(void)
256 struct pglist_data *pgdat;
258 if (reset_managed_pages_done)
259 return;
261 for_each_online_pgdat(pgdat)
262 reset_node_managed_pages(pgdat);
264 reset_managed_pages_done = 1;
268 * free_all_bootmem - release free pages to the buddy allocator
270 * Returns the number of pages actually released.
272 unsigned long __init free_all_bootmem(void)
274 unsigned long total_pages = 0;
275 bootmem_data_t *bdata;
277 reset_all_zones_managed_pages();
279 list_for_each_entry(bdata, &bdata_list, list)
280 total_pages += free_all_bootmem_core(bdata);
282 totalram_pages += total_pages;
284 return total_pages;
287 static void __init __free(bootmem_data_t *bdata,
288 unsigned long sidx, unsigned long eidx)
290 unsigned long idx;
292 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
293 sidx + bdata->node_min_pfn,
294 eidx + bdata->node_min_pfn);
296 if (WARN_ON(bdata->node_bootmem_map == NULL))
297 return;
299 if (bdata->hint_idx > sidx)
300 bdata->hint_idx = sidx;
302 for (idx = sidx; idx < eidx; idx++)
303 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
304 BUG();
307 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
308 unsigned long eidx, int flags)
310 unsigned long idx;
311 int exclusive = flags & BOOTMEM_EXCLUSIVE;
313 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
314 bdata - bootmem_node_data,
315 sidx + bdata->node_min_pfn,
316 eidx + bdata->node_min_pfn,
317 flags);
319 if (WARN_ON(bdata->node_bootmem_map == NULL))
320 return 0;
322 for (idx = sidx; idx < eidx; idx++)
323 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
324 if (exclusive) {
325 __free(bdata, sidx, idx);
326 return -EBUSY;
328 bdebug("silent double reserve of PFN %lx\n",
329 idx + bdata->node_min_pfn);
331 return 0;
334 static int __init mark_bootmem_node(bootmem_data_t *bdata,
335 unsigned long start, unsigned long end,
336 int reserve, int flags)
338 unsigned long sidx, eidx;
340 bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
341 bdata - bootmem_node_data, start, end, reserve, flags);
343 BUG_ON(start < bdata->node_min_pfn);
344 BUG_ON(end > bdata->node_low_pfn);
346 sidx = start - bdata->node_min_pfn;
347 eidx = end - bdata->node_min_pfn;
349 if (reserve)
350 return __reserve(bdata, sidx, eidx, flags);
351 else
352 __free(bdata, sidx, eidx);
353 return 0;
356 static int __init mark_bootmem(unsigned long start, unsigned long end,
357 int reserve, int flags)
359 unsigned long pos;
360 bootmem_data_t *bdata;
362 pos = start;
363 list_for_each_entry(bdata, &bdata_list, list) {
364 int err;
365 unsigned long max;
367 if (pos < bdata->node_min_pfn ||
368 pos >= bdata->node_low_pfn) {
369 BUG_ON(pos != start);
370 continue;
373 max = min(bdata->node_low_pfn, end);
375 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
376 if (reserve && err) {
377 mark_bootmem(start, pos, 0, 0);
378 return err;
381 if (max == end)
382 return 0;
383 pos = bdata->node_low_pfn;
385 BUG();
389 * free_bootmem_node - mark a page range as usable
390 * @pgdat: node the range resides on
391 * @physaddr: starting address of the range
392 * @size: size of the range in bytes
394 * Partial pages will be considered reserved and left as they are.
396 * The range must reside completely on the specified node.
398 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
399 unsigned long size)
401 unsigned long start, end;
403 kmemleak_free_part_phys(physaddr, size);
405 start = PFN_UP(physaddr);
406 end = PFN_DOWN(physaddr + size);
408 mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
412 * free_bootmem - mark a page range as usable
413 * @addr: starting physical address of the range
414 * @size: size of the range in bytes
416 * Partial pages will be considered reserved and left as they are.
418 * The range must be contiguous but may span node boundaries.
420 void __init free_bootmem(unsigned long physaddr, unsigned long size)
422 unsigned long start, end;
424 kmemleak_free_part_phys(physaddr, size);
426 start = PFN_UP(physaddr);
427 end = PFN_DOWN(physaddr + size);
429 mark_bootmem(start, end, 0, 0);
433 * reserve_bootmem_node - mark a page range as reserved
434 * @pgdat: node the range resides on
435 * @physaddr: starting address of the range
436 * @size: size of the range in bytes
437 * @flags: reservation flags (see linux/bootmem.h)
439 * Partial pages will be reserved.
441 * The range must reside completely on the specified node.
443 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
444 unsigned long size, int flags)
446 unsigned long start, end;
448 start = PFN_DOWN(physaddr);
449 end = PFN_UP(physaddr + size);
451 return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
455 * reserve_bootmem - mark a page range as reserved
456 * @addr: starting address of the range
457 * @size: size of the range in bytes
458 * @flags: reservation flags (see linux/bootmem.h)
460 * Partial pages will be reserved.
462 * The range must be contiguous but may span node boundaries.
464 int __init reserve_bootmem(unsigned long addr, unsigned long size,
465 int flags)
467 unsigned long start, end;
469 start = PFN_DOWN(addr);
470 end = PFN_UP(addr + size);
472 return mark_bootmem(start, end, 1, flags);
475 static unsigned long __init align_idx(struct bootmem_data *bdata,
476 unsigned long idx, unsigned long step)
478 unsigned long base = bdata->node_min_pfn;
481 * Align the index with respect to the node start so that the
482 * combination of both satisfies the requested alignment.
485 return ALIGN(base + idx, step) - base;
488 static unsigned long __init align_off(struct bootmem_data *bdata,
489 unsigned long off, unsigned long align)
491 unsigned long base = PFN_PHYS(bdata->node_min_pfn);
493 /* Same as align_idx for byte offsets */
495 return ALIGN(base + off, align) - base;
498 static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
499 unsigned long size, unsigned long align,
500 unsigned long goal, unsigned long limit)
502 unsigned long fallback = 0;
503 unsigned long min, max, start, sidx, midx, step;
505 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
506 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
507 align, goal, limit);
509 BUG_ON(!size);
510 BUG_ON(align & (align - 1));
511 BUG_ON(limit && goal + size > limit);
513 if (!bdata->node_bootmem_map)
514 return NULL;
516 min = bdata->node_min_pfn;
517 max = bdata->node_low_pfn;
519 goal >>= PAGE_SHIFT;
520 limit >>= PAGE_SHIFT;
522 if (limit && max > limit)
523 max = limit;
524 if (max <= min)
525 return NULL;
527 step = max(align >> PAGE_SHIFT, 1UL);
529 if (goal && min < goal && goal < max)
530 start = ALIGN(goal, step);
531 else
532 start = ALIGN(min, step);
534 sidx = start - bdata->node_min_pfn;
535 midx = max - bdata->node_min_pfn;
537 if (bdata->hint_idx > sidx) {
539 * Handle the valid case of sidx being zero and still
540 * catch the fallback below.
542 fallback = sidx + 1;
543 sidx = align_idx(bdata, bdata->hint_idx, step);
546 while (1) {
547 int merge;
548 void *region;
549 unsigned long eidx, i, start_off, end_off;
550 find_block:
551 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
552 sidx = align_idx(bdata, sidx, step);
553 eidx = sidx + PFN_UP(size);
555 if (sidx >= midx || eidx > midx)
556 break;
558 for (i = sidx; i < eidx; i++)
559 if (test_bit(i, bdata->node_bootmem_map)) {
560 sidx = align_idx(bdata, i, step);
561 if (sidx == i)
562 sidx += step;
563 goto find_block;
566 if (bdata->last_end_off & (PAGE_SIZE - 1) &&
567 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
568 start_off = align_off(bdata, bdata->last_end_off, align);
569 else
570 start_off = PFN_PHYS(sidx);
572 merge = PFN_DOWN(start_off) < sidx;
573 end_off = start_off + size;
575 bdata->last_end_off = end_off;
576 bdata->hint_idx = PFN_UP(end_off);
579 * Reserve the area now:
581 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
582 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
583 BUG();
585 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
586 start_off);
587 memset(region, 0, size);
589 * The min_count is set to 0 so that bootmem allocated blocks
590 * are never reported as leaks.
592 kmemleak_alloc(region, size, 0, 0);
593 return region;
596 if (fallback) {
597 sidx = align_idx(bdata, fallback - 1, step);
598 fallback = 0;
599 goto find_block;
602 return NULL;
605 static void * __init alloc_bootmem_core(unsigned long size,
606 unsigned long align,
607 unsigned long goal,
608 unsigned long limit)
610 bootmem_data_t *bdata;
611 void *region;
613 if (WARN_ON_ONCE(slab_is_available()))
614 return kzalloc(size, GFP_NOWAIT);
616 list_for_each_entry(bdata, &bdata_list, list) {
617 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
618 continue;
619 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
620 break;
622 region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
623 if (region)
624 return region;
627 return NULL;
630 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
631 unsigned long align,
632 unsigned long goal,
633 unsigned long limit)
635 void *ptr;
637 restart:
638 ptr = alloc_bootmem_core(size, align, goal, limit);
639 if (ptr)
640 return ptr;
641 if (goal) {
642 goal = 0;
643 goto restart;
646 return NULL;
650 * __alloc_bootmem_nopanic - allocate boot memory without panicking
651 * @size: size of the request in bytes
652 * @align: alignment of the region
653 * @goal: preferred starting address of the region
655 * The goal is dropped if it can not be satisfied and the allocation will
656 * fall back to memory below @goal.
658 * Allocation may happen on any node in the system.
660 * Returns NULL on failure.
662 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
663 unsigned long goal)
665 unsigned long limit = 0;
667 return ___alloc_bootmem_nopanic(size, align, goal, limit);
670 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
671 unsigned long goal, unsigned long limit)
673 void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
675 if (mem)
676 return mem;
678 * Whoops, we cannot satisfy the allocation request.
680 pr_alert("bootmem alloc of %lu bytes failed!\n", size);
681 panic("Out of memory");
682 return NULL;
686 * __alloc_bootmem - allocate boot memory
687 * @size: size of the request in bytes
688 * @align: alignment of the region
689 * @goal: preferred starting address of the region
691 * The goal is dropped if it can not be satisfied and the allocation will
692 * fall back to memory below @goal.
694 * Allocation may happen on any node in the system.
696 * The function panics if the request can not be satisfied.
698 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
699 unsigned long goal)
701 unsigned long limit = 0;
703 return ___alloc_bootmem(size, align, goal, limit);
706 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
707 unsigned long size, unsigned long align,
708 unsigned long goal, unsigned long limit)
710 void *ptr;
712 if (WARN_ON_ONCE(slab_is_available()))
713 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
714 again:
716 /* do not panic in alloc_bootmem_bdata() */
717 if (limit && goal + size > limit)
718 limit = 0;
720 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
721 if (ptr)
722 return ptr;
724 ptr = alloc_bootmem_core(size, align, goal, limit);
725 if (ptr)
726 return ptr;
728 if (goal) {
729 goal = 0;
730 goto again;
733 return NULL;
736 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
737 unsigned long align, unsigned long goal)
739 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
742 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
743 unsigned long align, unsigned long goal,
744 unsigned long limit)
746 void *ptr;
748 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
749 if (ptr)
750 return ptr;
752 pr_alert("bootmem alloc of %lu bytes failed!\n", size);
753 panic("Out of memory");
754 return NULL;
758 * __alloc_bootmem_node - allocate boot memory from a specific node
759 * @pgdat: node to allocate from
760 * @size: size of the request in bytes
761 * @align: alignment of the region
762 * @goal: preferred starting address of the region
764 * The goal is dropped if it can not be satisfied and the allocation will
765 * fall back to memory below @goal.
767 * Allocation may fall back to any node in the system if the specified node
768 * can not hold the requested memory.
770 * The function panics if the request can not be satisfied.
772 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
773 unsigned long align, unsigned long goal)
775 if (WARN_ON_ONCE(slab_is_available()))
776 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
778 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
781 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
782 unsigned long align, unsigned long goal)
784 #ifdef MAX_DMA32_PFN
785 unsigned long end_pfn;
787 if (WARN_ON_ONCE(slab_is_available()))
788 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
790 /* update goal according ...MAX_DMA32_PFN */
791 end_pfn = pgdat_end_pfn(pgdat);
793 if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
794 (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
795 void *ptr;
796 unsigned long new_goal;
798 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
799 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
800 new_goal, 0);
801 if (ptr)
802 return ptr;
804 #endif
806 return __alloc_bootmem_node(pgdat, size, align, goal);
811 * __alloc_bootmem_low - allocate low boot memory
812 * @size: size of the request in bytes
813 * @align: alignment of the region
814 * @goal: preferred starting address of the region
816 * The goal is dropped if it can not be satisfied and the allocation will
817 * fall back to memory below @goal.
819 * Allocation may happen on any node in the system.
821 * The function panics if the request can not be satisfied.
823 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
824 unsigned long goal)
826 return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
829 void * __init __alloc_bootmem_low_nopanic(unsigned long size,
830 unsigned long align,
831 unsigned long goal)
833 return ___alloc_bootmem_nopanic(size, align, goal,
834 ARCH_LOW_ADDRESS_LIMIT);
838 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
839 * @pgdat: node to allocate from
840 * @size: size of the request in bytes
841 * @align: alignment of the region
842 * @goal: preferred starting address of the region
844 * The goal is dropped if it can not be satisfied and the allocation will
845 * fall back to memory below @goal.
847 * Allocation may fall back to any node in the system if the specified node
848 * can not hold the requested memory.
850 * The function panics if the request can not be satisfied.
852 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
853 unsigned long align, unsigned long goal)
855 if (WARN_ON_ONCE(slab_is_available()))
856 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
858 return ___alloc_bootmem_node(pgdat, size, align,
859 goal, ARCH_LOW_ADDRESS_LIMIT);