accel/ivpu: Enable HWS by default on all platforms
[drm/drm-misc.git] / mm / vmalloc.c
blobf009b21705c163a60a8d709ef44029c26d6761dd
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/set_memory.h>
22 #include <linux/debugobjects.h>
23 #include <linux/kallsyms.h>
24 #include <linux/list.h>
25 #include <linux/notifier.h>
26 #include <linux/rbtree.h>
27 #include <linux/xarray.h>
28 #include <linux/io.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/memcontrol.h>
35 #include <linux/llist.h>
36 #include <linux/uio.h>
37 #include <linux/bitops.h>
38 #include <linux/rbtree_augmented.h>
39 #include <linux/overflow.h>
40 #include <linux/pgtable.h>
41 #include <linux/hugetlb.h>
42 #include <linux/sched/mm.h>
43 #include <asm/tlbflush.h>
44 #include <asm/shmparam.h>
45 #include <linux/page_owner.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/vmalloc.h>
50 #include "internal.h"
51 #include "pgalloc-track.h"
53 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
54 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
56 static int __init set_nohugeiomap(char *str)
58 ioremap_max_page_shift = PAGE_SHIFT;
59 return 0;
61 early_param("nohugeiomap", set_nohugeiomap);
62 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
63 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT;
64 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
66 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
67 static bool __ro_after_init vmap_allow_huge = true;
69 static int __init set_nohugevmalloc(char *str)
71 vmap_allow_huge = false;
72 return 0;
74 early_param("nohugevmalloc", set_nohugevmalloc);
75 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
76 static const bool vmap_allow_huge = false;
77 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
79 bool is_vmalloc_addr(const void *x)
81 unsigned long addr = (unsigned long)kasan_reset_tag(x);
83 return addr >= VMALLOC_START && addr < VMALLOC_END;
85 EXPORT_SYMBOL(is_vmalloc_addr);
87 struct vfree_deferred {
88 struct llist_head list;
89 struct work_struct wq;
91 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
93 /*** Page table manipulation functions ***/
94 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
95 phys_addr_t phys_addr, pgprot_t prot,
96 unsigned int max_page_shift, pgtbl_mod_mask *mask)
98 pte_t *pte;
99 u64 pfn;
100 struct page *page;
101 unsigned long size = PAGE_SIZE;
103 pfn = phys_addr >> PAGE_SHIFT;
104 pte = pte_alloc_kernel_track(pmd, addr, mask);
105 if (!pte)
106 return -ENOMEM;
107 do {
108 if (unlikely(!pte_none(ptep_get(pte)))) {
109 if (pfn_valid(pfn)) {
110 page = pfn_to_page(pfn);
111 dump_page(page, "remapping already mapped page");
113 BUG();
116 #ifdef CONFIG_HUGETLB_PAGE
117 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
118 if (size != PAGE_SIZE) {
119 pte_t entry = pfn_pte(pfn, prot);
121 entry = arch_make_huge_pte(entry, ilog2(size), 0);
122 set_huge_pte_at(&init_mm, addr, pte, entry, size);
123 pfn += PFN_DOWN(size);
124 continue;
126 #endif
127 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
128 pfn++;
129 } while (pte += PFN_DOWN(size), addr += size, addr != end);
130 *mask |= PGTBL_PTE_MODIFIED;
131 return 0;
134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
135 phys_addr_t phys_addr, pgprot_t prot,
136 unsigned int max_page_shift)
138 if (max_page_shift < PMD_SHIFT)
139 return 0;
141 if (!arch_vmap_pmd_supported(prot))
142 return 0;
144 if ((end - addr) != PMD_SIZE)
145 return 0;
147 if (!IS_ALIGNED(addr, PMD_SIZE))
148 return 0;
150 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
151 return 0;
153 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
154 return 0;
156 return pmd_set_huge(pmd, phys_addr, prot);
159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
160 phys_addr_t phys_addr, pgprot_t prot,
161 unsigned int max_page_shift, pgtbl_mod_mask *mask)
163 pmd_t *pmd;
164 unsigned long next;
166 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
167 if (!pmd)
168 return -ENOMEM;
169 do {
170 next = pmd_addr_end(addr, end);
172 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
173 max_page_shift)) {
174 *mask |= PGTBL_PMD_MODIFIED;
175 continue;
178 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
179 return -ENOMEM;
180 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
181 return 0;
184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
185 phys_addr_t phys_addr, pgprot_t prot,
186 unsigned int max_page_shift)
188 if (max_page_shift < PUD_SHIFT)
189 return 0;
191 if (!arch_vmap_pud_supported(prot))
192 return 0;
194 if ((end - addr) != PUD_SIZE)
195 return 0;
197 if (!IS_ALIGNED(addr, PUD_SIZE))
198 return 0;
200 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
201 return 0;
203 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
204 return 0;
206 return pud_set_huge(pud, phys_addr, prot);
209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
210 phys_addr_t phys_addr, pgprot_t prot,
211 unsigned int max_page_shift, pgtbl_mod_mask *mask)
213 pud_t *pud;
214 unsigned long next;
216 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
217 if (!pud)
218 return -ENOMEM;
219 do {
220 next = pud_addr_end(addr, end);
222 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
223 max_page_shift)) {
224 *mask |= PGTBL_PUD_MODIFIED;
225 continue;
228 if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
229 max_page_shift, mask))
230 return -ENOMEM;
231 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
232 return 0;
235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
236 phys_addr_t phys_addr, pgprot_t prot,
237 unsigned int max_page_shift)
239 if (max_page_shift < P4D_SHIFT)
240 return 0;
242 if (!arch_vmap_p4d_supported(prot))
243 return 0;
245 if ((end - addr) != P4D_SIZE)
246 return 0;
248 if (!IS_ALIGNED(addr, P4D_SIZE))
249 return 0;
251 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
252 return 0;
254 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
255 return 0;
257 return p4d_set_huge(p4d, phys_addr, prot);
260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
261 phys_addr_t phys_addr, pgprot_t prot,
262 unsigned int max_page_shift, pgtbl_mod_mask *mask)
264 p4d_t *p4d;
265 unsigned long next;
267 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
268 if (!p4d)
269 return -ENOMEM;
270 do {
271 next = p4d_addr_end(addr, end);
273 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
274 max_page_shift)) {
275 *mask |= PGTBL_P4D_MODIFIED;
276 continue;
279 if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
280 max_page_shift, mask))
281 return -ENOMEM;
282 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
283 return 0;
286 static int vmap_range_noflush(unsigned long addr, unsigned long end,
287 phys_addr_t phys_addr, pgprot_t prot,
288 unsigned int max_page_shift)
290 pgd_t *pgd;
291 unsigned long start;
292 unsigned long next;
293 int err;
294 pgtbl_mod_mask mask = 0;
296 might_sleep();
297 BUG_ON(addr >= end);
299 start = addr;
300 pgd = pgd_offset_k(addr);
301 do {
302 next = pgd_addr_end(addr, end);
303 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
304 max_page_shift, &mask);
305 if (err)
306 break;
307 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
309 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
310 arch_sync_kernel_mappings(start, end);
312 return err;
315 int vmap_page_range(unsigned long addr, unsigned long end,
316 phys_addr_t phys_addr, pgprot_t prot)
318 int err;
320 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
321 ioremap_max_page_shift);
322 flush_cache_vmap(addr, end);
323 if (!err)
324 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
325 ioremap_max_page_shift);
326 return err;
329 int ioremap_page_range(unsigned long addr, unsigned long end,
330 phys_addr_t phys_addr, pgprot_t prot)
332 struct vm_struct *area;
334 area = find_vm_area((void *)addr);
335 if (!area || !(area->flags & VM_IOREMAP)) {
336 WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
337 return -EINVAL;
339 if (addr != (unsigned long)area->addr ||
340 (void *)end != area->addr + get_vm_area_size(area)) {
341 WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
342 addr, end, (long)area->addr,
343 (long)area->addr + get_vm_area_size(area));
344 return -ERANGE;
346 return vmap_page_range(addr, end, phys_addr, prot);
349 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
350 pgtbl_mod_mask *mask)
352 pte_t *pte;
354 pte = pte_offset_kernel(pmd, addr);
355 do {
356 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
357 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
358 } while (pte++, addr += PAGE_SIZE, addr != end);
359 *mask |= PGTBL_PTE_MODIFIED;
362 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
363 pgtbl_mod_mask *mask)
365 pmd_t *pmd;
366 unsigned long next;
367 int cleared;
369 pmd = pmd_offset(pud, addr);
370 do {
371 next = pmd_addr_end(addr, end);
373 cleared = pmd_clear_huge(pmd);
374 if (cleared || pmd_bad(*pmd))
375 *mask |= PGTBL_PMD_MODIFIED;
377 if (cleared)
378 continue;
379 if (pmd_none_or_clear_bad(pmd))
380 continue;
381 vunmap_pte_range(pmd, addr, next, mask);
383 cond_resched();
384 } while (pmd++, addr = next, addr != end);
387 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
388 pgtbl_mod_mask *mask)
390 pud_t *pud;
391 unsigned long next;
392 int cleared;
394 pud = pud_offset(p4d, addr);
395 do {
396 next = pud_addr_end(addr, end);
398 cleared = pud_clear_huge(pud);
399 if (cleared || pud_bad(*pud))
400 *mask |= PGTBL_PUD_MODIFIED;
402 if (cleared)
403 continue;
404 if (pud_none_or_clear_bad(pud))
405 continue;
406 vunmap_pmd_range(pud, addr, next, mask);
407 } while (pud++, addr = next, addr != end);
410 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
411 pgtbl_mod_mask *mask)
413 p4d_t *p4d;
414 unsigned long next;
416 p4d = p4d_offset(pgd, addr);
417 do {
418 next = p4d_addr_end(addr, end);
420 p4d_clear_huge(p4d);
421 if (p4d_bad(*p4d))
422 *mask |= PGTBL_P4D_MODIFIED;
424 if (p4d_none_or_clear_bad(p4d))
425 continue;
426 vunmap_pud_range(p4d, addr, next, mask);
427 } while (p4d++, addr = next, addr != end);
431 * vunmap_range_noflush is similar to vunmap_range, but does not
432 * flush caches or TLBs.
434 * The caller is responsible for calling flush_cache_vmap() before calling
435 * this function, and flush_tlb_kernel_range after it has returned
436 * successfully (and before the addresses are expected to cause a page fault
437 * or be re-mapped for something else, if TLB flushes are being delayed or
438 * coalesced).
440 * This is an internal function only. Do not use outside mm/.
442 void __vunmap_range_noflush(unsigned long start, unsigned long end)
444 unsigned long next;
445 pgd_t *pgd;
446 unsigned long addr = start;
447 pgtbl_mod_mask mask = 0;
449 BUG_ON(addr >= end);
450 pgd = pgd_offset_k(addr);
451 do {
452 next = pgd_addr_end(addr, end);
453 if (pgd_bad(*pgd))
454 mask |= PGTBL_PGD_MODIFIED;
455 if (pgd_none_or_clear_bad(pgd))
456 continue;
457 vunmap_p4d_range(pgd, addr, next, &mask);
458 } while (pgd++, addr = next, addr != end);
460 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
461 arch_sync_kernel_mappings(start, end);
464 void vunmap_range_noflush(unsigned long start, unsigned long end)
466 kmsan_vunmap_range_noflush(start, end);
467 __vunmap_range_noflush(start, end);
471 * vunmap_range - unmap kernel virtual addresses
472 * @addr: start of the VM area to unmap
473 * @end: end of the VM area to unmap (non-inclusive)
475 * Clears any present PTEs in the virtual address range, flushes TLBs and
476 * caches. Any subsequent access to the address before it has been re-mapped
477 * is a kernel bug.
479 void vunmap_range(unsigned long addr, unsigned long end)
481 flush_cache_vunmap(addr, end);
482 vunmap_range_noflush(addr, end);
483 flush_tlb_kernel_range(addr, end);
486 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
487 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
488 pgtbl_mod_mask *mask)
490 pte_t *pte;
493 * nr is a running index into the array which helps higher level
494 * callers keep track of where we're up to.
497 pte = pte_alloc_kernel_track(pmd, addr, mask);
498 if (!pte)
499 return -ENOMEM;
500 do {
501 struct page *page = pages[*nr];
503 if (WARN_ON(!pte_none(ptep_get(pte))))
504 return -EBUSY;
505 if (WARN_ON(!page))
506 return -ENOMEM;
507 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
508 return -EINVAL;
510 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
511 (*nr)++;
512 } while (pte++, addr += PAGE_SIZE, addr != end);
513 *mask |= PGTBL_PTE_MODIFIED;
514 return 0;
517 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
518 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
519 pgtbl_mod_mask *mask)
521 pmd_t *pmd;
522 unsigned long next;
524 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
525 if (!pmd)
526 return -ENOMEM;
527 do {
528 next = pmd_addr_end(addr, end);
529 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
530 return -ENOMEM;
531 } while (pmd++, addr = next, addr != end);
532 return 0;
535 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
536 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
537 pgtbl_mod_mask *mask)
539 pud_t *pud;
540 unsigned long next;
542 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
543 if (!pud)
544 return -ENOMEM;
545 do {
546 next = pud_addr_end(addr, end);
547 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
548 return -ENOMEM;
549 } while (pud++, addr = next, addr != end);
550 return 0;
553 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
554 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
555 pgtbl_mod_mask *mask)
557 p4d_t *p4d;
558 unsigned long next;
560 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
561 if (!p4d)
562 return -ENOMEM;
563 do {
564 next = p4d_addr_end(addr, end);
565 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
566 return -ENOMEM;
567 } while (p4d++, addr = next, addr != end);
568 return 0;
571 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
572 pgprot_t prot, struct page **pages)
574 unsigned long start = addr;
575 pgd_t *pgd;
576 unsigned long next;
577 int err = 0;
578 int nr = 0;
579 pgtbl_mod_mask mask = 0;
581 BUG_ON(addr >= end);
582 pgd = pgd_offset_k(addr);
583 do {
584 next = pgd_addr_end(addr, end);
585 if (pgd_bad(*pgd))
586 mask |= PGTBL_PGD_MODIFIED;
587 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
588 if (err)
589 return err;
590 } while (pgd++, addr = next, addr != end);
592 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
593 arch_sync_kernel_mappings(start, end);
595 return 0;
599 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
600 * flush caches.
602 * The caller is responsible for calling flush_cache_vmap() after this
603 * function returns successfully and before the addresses are accessed.
605 * This is an internal function only. Do not use outside mm/.
607 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
608 pgprot_t prot, struct page **pages, unsigned int page_shift)
610 unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
612 WARN_ON(page_shift < PAGE_SHIFT);
614 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
615 page_shift == PAGE_SHIFT)
616 return vmap_small_pages_range_noflush(addr, end, prot, pages);
618 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
619 int err;
621 err = vmap_range_noflush(addr, addr + (1UL << page_shift),
622 page_to_phys(pages[i]), prot,
623 page_shift);
624 if (err)
625 return err;
627 addr += 1UL << page_shift;
630 return 0;
633 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
634 pgprot_t prot, struct page **pages, unsigned int page_shift)
636 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
637 page_shift);
639 if (ret)
640 return ret;
641 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
645 * vmap_pages_range - map pages to a kernel virtual address
646 * @addr: start of the VM area to map
647 * @end: end of the VM area to map (non-inclusive)
648 * @prot: page protection flags to use
649 * @pages: pages to map (always PAGE_SIZE pages)
650 * @page_shift: maximum shift that the pages may be mapped with, @pages must
651 * be aligned and contiguous up to at least this shift.
653 * RETURNS:
654 * 0 on success, -errno on failure.
656 int vmap_pages_range(unsigned long addr, unsigned long end,
657 pgprot_t prot, struct page **pages, unsigned int page_shift)
659 int err;
661 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
662 flush_cache_vmap(addr, end);
663 return err;
666 static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
667 unsigned long end)
669 might_sleep();
670 if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
671 return -EINVAL;
672 if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
673 return -EINVAL;
674 if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
675 return -EINVAL;
676 if ((end - start) >> PAGE_SHIFT > totalram_pages())
677 return -E2BIG;
678 if (start < (unsigned long)area->addr ||
679 (void *)end > area->addr + get_vm_area_size(area))
680 return -ERANGE;
681 return 0;
685 * vm_area_map_pages - map pages inside given sparse vm_area
686 * @area: vm_area
687 * @start: start address inside vm_area
688 * @end: end address inside vm_area
689 * @pages: pages to map (always PAGE_SIZE pages)
691 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
692 unsigned long end, struct page **pages)
694 int err;
696 err = check_sparse_vm_area(area, start, end);
697 if (err)
698 return err;
700 return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
704 * vm_area_unmap_pages - unmap pages inside given sparse vm_area
705 * @area: vm_area
706 * @start: start address inside vm_area
707 * @end: end address inside vm_area
709 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
710 unsigned long end)
712 if (check_sparse_vm_area(area, start, end))
713 return;
715 vunmap_range(start, end);
718 int is_vmalloc_or_module_addr(const void *x)
721 * ARM, x86-64 and sparc64 put modules in a special place,
722 * and fall back on vmalloc() if that fails. Others
723 * just put it in the vmalloc space.
725 #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR)
726 unsigned long addr = (unsigned long)kasan_reset_tag(x);
727 if (addr >= MODULES_VADDR && addr < MODULES_END)
728 return 1;
729 #endif
730 return is_vmalloc_addr(x);
732 EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr);
735 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
736 * return the tail page that corresponds to the base page address, which
737 * matches small vmap mappings.
739 struct page *vmalloc_to_page(const void *vmalloc_addr)
741 unsigned long addr = (unsigned long) vmalloc_addr;
742 struct page *page = NULL;
743 pgd_t *pgd = pgd_offset_k(addr);
744 p4d_t *p4d;
745 pud_t *pud;
746 pmd_t *pmd;
747 pte_t *ptep, pte;
750 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
751 * architectures that do not vmalloc module space
753 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
755 if (pgd_none(*pgd))
756 return NULL;
757 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
758 return NULL; /* XXX: no allowance for huge pgd */
759 if (WARN_ON_ONCE(pgd_bad(*pgd)))
760 return NULL;
762 p4d = p4d_offset(pgd, addr);
763 if (p4d_none(*p4d))
764 return NULL;
765 if (p4d_leaf(*p4d))
766 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
767 if (WARN_ON_ONCE(p4d_bad(*p4d)))
768 return NULL;
770 pud = pud_offset(p4d, addr);
771 if (pud_none(*pud))
772 return NULL;
773 if (pud_leaf(*pud))
774 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
775 if (WARN_ON_ONCE(pud_bad(*pud)))
776 return NULL;
778 pmd = pmd_offset(pud, addr);
779 if (pmd_none(*pmd))
780 return NULL;
781 if (pmd_leaf(*pmd))
782 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
783 if (WARN_ON_ONCE(pmd_bad(*pmd)))
784 return NULL;
786 ptep = pte_offset_kernel(pmd, addr);
787 pte = ptep_get(ptep);
788 if (pte_present(pte))
789 page = pte_page(pte);
791 return page;
793 EXPORT_SYMBOL(vmalloc_to_page);
796 * Map a vmalloc()-space virtual address to the physical page frame number.
798 unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
800 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
802 EXPORT_SYMBOL(vmalloc_to_pfn);
805 /*** Global kva allocator ***/
807 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
808 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
811 static DEFINE_SPINLOCK(free_vmap_area_lock);
812 static bool vmap_initialized __read_mostly;
815 * This kmem_cache is used for vmap_area objects. Instead of
816 * allocating from slab we reuse an object from this cache to
817 * make things faster. Especially in "no edge" splitting of
818 * free block.
820 static struct kmem_cache *vmap_area_cachep;
823 * This linked list is used in pair with free_vmap_area_root.
824 * It gives O(1) access to prev/next to perform fast coalescing.
826 static LIST_HEAD(free_vmap_area_list);
829 * This augment red-black tree represents the free vmap space.
830 * All vmap_area objects in this tree are sorted by va->va_start
831 * address. It is used for allocation and merging when a vmap
832 * object is released.
834 * Each vmap_area node contains a maximum available free block
835 * of its sub-tree, right or left. Therefore it is possible to
836 * find a lowest match of free area.
838 static struct rb_root free_vmap_area_root = RB_ROOT;
841 * Preload a CPU with one object for "no edge" split case. The
842 * aim is to get rid of allocations from the atomic context, thus
843 * to use more permissive allocation masks.
845 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
848 * This structure defines a single, solid model where a list and
849 * rb-tree are part of one entity protected by the lock. Nodes are
850 * sorted in ascending order, thus for O(1) access to left/right
851 * neighbors a list is used as well as for sequential traversal.
853 struct rb_list {
854 struct rb_root root;
855 struct list_head head;
856 spinlock_t lock;
860 * A fast size storage contains VAs up to 1M size. A pool consists
861 * of linked between each other ready to go VAs of certain sizes.
862 * An index in the pool-array corresponds to number of pages + 1.
864 #define MAX_VA_SIZE_PAGES 256
866 struct vmap_pool {
867 struct list_head head;
868 unsigned long len;
872 * An effective vmap-node logic. Users make use of nodes instead
873 * of a global heap. It allows to balance an access and mitigate
874 * contention.
876 static struct vmap_node {
877 /* Simple size segregated storage. */
878 struct vmap_pool pool[MAX_VA_SIZE_PAGES];
879 spinlock_t pool_lock;
880 bool skip_populate;
882 /* Bookkeeping data of this node. */
883 struct rb_list busy;
884 struct rb_list lazy;
887 * Ready-to-free areas.
889 struct list_head purge_list;
890 struct work_struct purge_work;
891 unsigned long nr_purged;
892 } single;
895 * Initial setup consists of one single node, i.e. a balancing
896 * is fully disabled. Later on, after vmap is initialized these
897 * parameters are updated based on a system capacity.
899 static struct vmap_node *vmap_nodes = &single;
900 static __read_mostly unsigned int nr_vmap_nodes = 1;
901 static __read_mostly unsigned int vmap_zone_size = 1;
903 static inline unsigned int
904 addr_to_node_id(unsigned long addr)
906 return (addr / vmap_zone_size) % nr_vmap_nodes;
909 static inline struct vmap_node *
910 addr_to_node(unsigned long addr)
912 return &vmap_nodes[addr_to_node_id(addr)];
915 static inline struct vmap_node *
916 id_to_node(unsigned int id)
918 return &vmap_nodes[id % nr_vmap_nodes];
922 * We use the value 0 to represent "no node", that is why
923 * an encoded value will be the node-id incremented by 1.
924 * It is always greater then 0. A valid node_id which can
925 * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id
926 * is not valid 0 is returned.
928 static unsigned int
929 encode_vn_id(unsigned int node_id)
931 /* Can store U8_MAX [0:254] nodes. */
932 if (node_id < nr_vmap_nodes)
933 return (node_id + 1) << BITS_PER_BYTE;
935 /* Warn and no node encoded. */
936 WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id);
937 return 0;
941 * Returns an encoded node-id, the valid range is within
942 * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is
943 * returned if extracted data is wrong.
945 static unsigned int
946 decode_vn_id(unsigned int val)
948 unsigned int node_id = (val >> BITS_PER_BYTE) - 1;
950 /* Can store U8_MAX [0:254] nodes. */
951 if (node_id < nr_vmap_nodes)
952 return node_id;
954 /* If it was _not_ zero, warn. */
955 WARN_ONCE(node_id != UINT_MAX,
956 "Decode wrong node id (%d)\n", node_id);
958 return nr_vmap_nodes;
961 static bool
962 is_vn_id_valid(unsigned int node_id)
964 if (node_id < nr_vmap_nodes)
965 return true;
967 return false;
970 static __always_inline unsigned long
971 va_size(struct vmap_area *va)
973 return (va->va_end - va->va_start);
976 static __always_inline unsigned long
977 get_subtree_max_size(struct rb_node *node)
979 struct vmap_area *va;
981 va = rb_entry_safe(node, struct vmap_area, rb_node);
982 return va ? va->subtree_max_size : 0;
985 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
986 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
988 static void reclaim_and_purge_vmap_areas(void);
989 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
990 static void drain_vmap_area_work(struct work_struct *work);
991 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work);
993 static atomic_long_t nr_vmalloc_pages;
995 unsigned long vmalloc_nr_pages(void)
997 return atomic_long_read(&nr_vmalloc_pages);
1000 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
1002 struct rb_node *n = root->rb_node;
1004 addr = (unsigned long)kasan_reset_tag((void *)addr);
1006 while (n) {
1007 struct vmap_area *va;
1009 va = rb_entry(n, struct vmap_area, rb_node);
1010 if (addr < va->va_start)
1011 n = n->rb_left;
1012 else if (addr >= va->va_end)
1013 n = n->rb_right;
1014 else
1015 return va;
1018 return NULL;
1021 /* Look up the first VA which satisfies addr < va_end, NULL if none. */
1022 static struct vmap_area *
1023 __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root)
1025 struct vmap_area *va = NULL;
1026 struct rb_node *n = root->rb_node;
1028 addr = (unsigned long)kasan_reset_tag((void *)addr);
1030 while (n) {
1031 struct vmap_area *tmp;
1033 tmp = rb_entry(n, struct vmap_area, rb_node);
1034 if (tmp->va_end > addr) {
1035 va = tmp;
1036 if (tmp->va_start <= addr)
1037 break;
1039 n = n->rb_left;
1040 } else
1041 n = n->rb_right;
1044 return va;
1048 * Returns a node where a first VA, that satisfies addr < va_end, resides.
1049 * If success, a node is locked. A user is responsible to unlock it when a
1050 * VA is no longer needed to be accessed.
1052 * Returns NULL if nothing found.
1054 static struct vmap_node *
1055 find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
1057 unsigned long va_start_lowest;
1058 struct vmap_node *vn;
1059 int i;
1061 repeat:
1062 for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
1063 vn = &vmap_nodes[i];
1065 spin_lock(&vn->busy.lock);
1066 *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
1068 if (*va)
1069 if (!va_start_lowest || (*va)->va_start < va_start_lowest)
1070 va_start_lowest = (*va)->va_start;
1071 spin_unlock(&vn->busy.lock);
1075 * Check if found VA exists, it might have gone away. In this case we
1076 * repeat the search because a VA has been removed concurrently and we
1077 * need to proceed to the next one, which is a rare case.
1079 if (va_start_lowest) {
1080 vn = addr_to_node(va_start_lowest);
1082 spin_lock(&vn->busy.lock);
1083 *va = __find_vmap_area(va_start_lowest, &vn->busy.root);
1085 if (*va)
1086 return vn;
1088 spin_unlock(&vn->busy.lock);
1089 goto repeat;
1092 return NULL;
1096 * This function returns back addresses of parent node
1097 * and its left or right link for further processing.
1099 * Otherwise NULL is returned. In that case all further
1100 * steps regarding inserting of conflicting overlap range
1101 * have to be declined and actually considered as a bug.
1103 static __always_inline struct rb_node **
1104 find_va_links(struct vmap_area *va,
1105 struct rb_root *root, struct rb_node *from,
1106 struct rb_node **parent)
1108 struct vmap_area *tmp_va;
1109 struct rb_node **link;
1111 if (root) {
1112 link = &root->rb_node;
1113 if (unlikely(!*link)) {
1114 *parent = NULL;
1115 return link;
1117 } else {
1118 link = &from;
1122 * Go to the bottom of the tree. When we hit the last point
1123 * we end up with parent rb_node and correct direction, i name
1124 * it link, where the new va->rb_node will be attached to.
1126 do {
1127 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
1130 * During the traversal we also do some sanity check.
1131 * Trigger the BUG() if there are sides(left/right)
1132 * or full overlaps.
1134 if (va->va_end <= tmp_va->va_start)
1135 link = &(*link)->rb_left;
1136 else if (va->va_start >= tmp_va->va_end)
1137 link = &(*link)->rb_right;
1138 else {
1139 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
1140 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
1142 return NULL;
1144 } while (*link);
1146 *parent = &tmp_va->rb_node;
1147 return link;
1150 static __always_inline struct list_head *
1151 get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
1153 struct list_head *list;
1155 if (unlikely(!parent))
1157 * The red-black tree where we try to find VA neighbors
1158 * before merging or inserting is empty, i.e. it means
1159 * there is no free vmap space. Normally it does not
1160 * happen but we handle this case anyway.
1162 return NULL;
1164 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
1165 return (&parent->rb_right == link ? list->next : list);
1168 static __always_inline void
1169 __link_va(struct vmap_area *va, struct rb_root *root,
1170 struct rb_node *parent, struct rb_node **link,
1171 struct list_head *head, bool augment)
1174 * VA is still not in the list, but we can
1175 * identify its future previous list_head node.
1177 if (likely(parent)) {
1178 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
1179 if (&parent->rb_right != link)
1180 head = head->prev;
1183 /* Insert to the rb-tree */
1184 rb_link_node(&va->rb_node, parent, link);
1185 if (augment) {
1187 * Some explanation here. Just perform simple insertion
1188 * to the tree. We do not set va->subtree_max_size to
1189 * its current size before calling rb_insert_augmented().
1190 * It is because we populate the tree from the bottom
1191 * to parent levels when the node _is_ in the tree.
1193 * Therefore we set subtree_max_size to zero after insertion,
1194 * to let __augment_tree_propagate_from() puts everything to
1195 * the correct order later on.
1197 rb_insert_augmented(&va->rb_node,
1198 root, &free_vmap_area_rb_augment_cb);
1199 va->subtree_max_size = 0;
1200 } else {
1201 rb_insert_color(&va->rb_node, root);
1204 /* Address-sort this list */
1205 list_add(&va->list, head);
1208 static __always_inline void
1209 link_va(struct vmap_area *va, struct rb_root *root,
1210 struct rb_node *parent, struct rb_node **link,
1211 struct list_head *head)
1213 __link_va(va, root, parent, link, head, false);
1216 static __always_inline void
1217 link_va_augment(struct vmap_area *va, struct rb_root *root,
1218 struct rb_node *parent, struct rb_node **link,
1219 struct list_head *head)
1221 __link_va(va, root, parent, link, head, true);
1224 static __always_inline void
1225 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
1227 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
1228 return;
1230 if (augment)
1231 rb_erase_augmented(&va->rb_node,
1232 root, &free_vmap_area_rb_augment_cb);
1233 else
1234 rb_erase(&va->rb_node, root);
1236 list_del_init(&va->list);
1237 RB_CLEAR_NODE(&va->rb_node);
1240 static __always_inline void
1241 unlink_va(struct vmap_area *va, struct rb_root *root)
1243 __unlink_va(va, root, false);
1246 static __always_inline void
1247 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1249 __unlink_va(va, root, true);
1252 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1254 * Gets called when remove the node and rotate.
1256 static __always_inline unsigned long
1257 compute_subtree_max_size(struct vmap_area *va)
1259 return max3(va_size(va),
1260 get_subtree_max_size(va->rb_node.rb_left),
1261 get_subtree_max_size(va->rb_node.rb_right));
1264 static void
1265 augment_tree_propagate_check(void)
1267 struct vmap_area *va;
1268 unsigned long computed_size;
1270 list_for_each_entry(va, &free_vmap_area_list, list) {
1271 computed_size = compute_subtree_max_size(va);
1272 if (computed_size != va->subtree_max_size)
1273 pr_emerg("tree is corrupted: %lu, %lu\n",
1274 va_size(va), va->subtree_max_size);
1277 #endif
1280 * This function populates subtree_max_size from bottom to upper
1281 * levels starting from VA point. The propagation must be done
1282 * when VA size is modified by changing its va_start/va_end. Or
1283 * in case of newly inserting of VA to the tree.
1285 * It means that __augment_tree_propagate_from() must be called:
1286 * - After VA has been inserted to the tree(free path);
1287 * - After VA has been shrunk(allocation path);
1288 * - After VA has been increased(merging path).
1290 * Please note that, it does not mean that upper parent nodes
1291 * and their subtree_max_size are recalculated all the time up
1292 * to the root node.
1294 * 4--8
1295 * /\
1296 * / \
1297 * / \
1298 * 2--2 8--8
1300 * For example if we modify the node 4, shrinking it to 2, then
1301 * no any modification is required. If we shrink the node 2 to 1
1302 * its subtree_max_size is updated only, and set to 1. If we shrink
1303 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
1304 * node becomes 4--6.
1306 static __always_inline void
1307 augment_tree_propagate_from(struct vmap_area *va)
1310 * Populate the tree from bottom towards the root until
1311 * the calculated maximum available size of checked node
1312 * is equal to its current one.
1314 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1316 #if DEBUG_AUGMENT_PROPAGATE_CHECK
1317 augment_tree_propagate_check();
1318 #endif
1321 static void
1322 insert_vmap_area(struct vmap_area *va,
1323 struct rb_root *root, struct list_head *head)
1325 struct rb_node **link;
1326 struct rb_node *parent;
1328 link = find_va_links(va, root, NULL, &parent);
1329 if (link)
1330 link_va(va, root, parent, link, head);
1333 static void
1334 insert_vmap_area_augment(struct vmap_area *va,
1335 struct rb_node *from, struct rb_root *root,
1336 struct list_head *head)
1338 struct rb_node **link;
1339 struct rb_node *parent;
1341 if (from)
1342 link = find_va_links(va, NULL, from, &parent);
1343 else
1344 link = find_va_links(va, root, NULL, &parent);
1346 if (link) {
1347 link_va_augment(va, root, parent, link, head);
1348 augment_tree_propagate_from(va);
1353 * Merge de-allocated chunk of VA memory with previous
1354 * and next free blocks. If coalesce is not done a new
1355 * free area is inserted. If VA has been merged, it is
1356 * freed.
1358 * Please note, it can return NULL in case of overlap
1359 * ranges, followed by WARN() report. Despite it is a
1360 * buggy behaviour, a system can be alive and keep
1361 * ongoing.
1363 static __always_inline struct vmap_area *
1364 __merge_or_add_vmap_area(struct vmap_area *va,
1365 struct rb_root *root, struct list_head *head, bool augment)
1367 struct vmap_area *sibling;
1368 struct list_head *next;
1369 struct rb_node **link;
1370 struct rb_node *parent;
1371 bool merged = false;
1374 * Find a place in the tree where VA potentially will be
1375 * inserted, unless it is merged with its sibling/siblings.
1377 link = find_va_links(va, root, NULL, &parent);
1378 if (!link)
1379 return NULL;
1382 * Get next node of VA to check if merging can be done.
1384 next = get_va_next_sibling(parent, link);
1385 if (unlikely(next == NULL))
1386 goto insert;
1389 * start end
1390 * | |
1391 * |<------VA------>|<-----Next----->|
1392 * | |
1393 * start end
1395 if (next != head) {
1396 sibling = list_entry(next, struct vmap_area, list);
1397 if (sibling->va_start == va->va_end) {
1398 sibling->va_start = va->va_start;
1400 /* Free vmap_area object. */
1401 kmem_cache_free(vmap_area_cachep, va);
1403 /* Point to the new merged area. */
1404 va = sibling;
1405 merged = true;
1410 * start end
1411 * | |
1412 * |<-----Prev----->|<------VA------>|
1413 * | |
1414 * start end
1416 if (next->prev != head) {
1417 sibling = list_entry(next->prev, struct vmap_area, list);
1418 if (sibling->va_end == va->va_start) {
1420 * If both neighbors are coalesced, it is important
1421 * to unlink the "next" node first, followed by merging
1422 * with "previous" one. Otherwise the tree might not be
1423 * fully populated if a sibling's augmented value is
1424 * "normalized" because of rotation operations.
1426 if (merged)
1427 __unlink_va(va, root, augment);
1429 sibling->va_end = va->va_end;
1431 /* Free vmap_area object. */
1432 kmem_cache_free(vmap_area_cachep, va);
1434 /* Point to the new merged area. */
1435 va = sibling;
1436 merged = true;
1440 insert:
1441 if (!merged)
1442 __link_va(va, root, parent, link, head, augment);
1444 return va;
1447 static __always_inline struct vmap_area *
1448 merge_or_add_vmap_area(struct vmap_area *va,
1449 struct rb_root *root, struct list_head *head)
1451 return __merge_or_add_vmap_area(va, root, head, false);
1454 static __always_inline struct vmap_area *
1455 merge_or_add_vmap_area_augment(struct vmap_area *va,
1456 struct rb_root *root, struct list_head *head)
1458 va = __merge_or_add_vmap_area(va, root, head, true);
1459 if (va)
1460 augment_tree_propagate_from(va);
1462 return va;
1465 static __always_inline bool
1466 is_within_this_va(struct vmap_area *va, unsigned long size,
1467 unsigned long align, unsigned long vstart)
1469 unsigned long nva_start_addr;
1471 if (va->va_start > vstart)
1472 nva_start_addr = ALIGN(va->va_start, align);
1473 else
1474 nva_start_addr = ALIGN(vstart, align);
1476 /* Can be overflowed due to big size or alignment. */
1477 if (nva_start_addr + size < nva_start_addr ||
1478 nva_start_addr < vstart)
1479 return false;
1481 return (nva_start_addr + size <= va->va_end);
1485 * Find the first free block(lowest start address) in the tree,
1486 * that will accomplish the request corresponding to passing
1487 * parameters. Please note, with an alignment bigger than PAGE_SIZE,
1488 * a search length is adjusted to account for worst case alignment
1489 * overhead.
1491 static __always_inline struct vmap_area *
1492 find_vmap_lowest_match(struct rb_root *root, unsigned long size,
1493 unsigned long align, unsigned long vstart, bool adjust_search_size)
1495 struct vmap_area *va;
1496 struct rb_node *node;
1497 unsigned long length;
1499 /* Start from the root. */
1500 node = root->rb_node;
1502 /* Adjust the search size for alignment overhead. */
1503 length = adjust_search_size ? size + align - 1 : size;
1505 while (node) {
1506 va = rb_entry(node, struct vmap_area, rb_node);
1508 if (get_subtree_max_size(node->rb_left) >= length &&
1509 vstart < va->va_start) {
1510 node = node->rb_left;
1511 } else {
1512 if (is_within_this_va(va, size, align, vstart))
1513 return va;
1516 * Does not make sense to go deeper towards the right
1517 * sub-tree if it does not have a free block that is
1518 * equal or bigger to the requested search length.
1520 if (get_subtree_max_size(node->rb_right) >= length) {
1521 node = node->rb_right;
1522 continue;
1526 * OK. We roll back and find the first right sub-tree,
1527 * that will satisfy the search criteria. It can happen
1528 * due to "vstart" restriction or an alignment overhead
1529 * that is bigger then PAGE_SIZE.
1531 while ((node = rb_parent(node))) {
1532 va = rb_entry(node, struct vmap_area, rb_node);
1533 if (is_within_this_va(va, size, align, vstart))
1534 return va;
1536 if (get_subtree_max_size(node->rb_right) >= length &&
1537 vstart <= va->va_start) {
1539 * Shift the vstart forward. Please note, we update it with
1540 * parent's start address adding "1" because we do not want
1541 * to enter same sub-tree after it has already been checked
1542 * and no suitable free block found there.
1544 vstart = va->va_start + 1;
1545 node = node->rb_right;
1546 break;
1552 return NULL;
1555 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1556 #include <linux/random.h>
1558 static struct vmap_area *
1559 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
1560 unsigned long align, unsigned long vstart)
1562 struct vmap_area *va;
1564 list_for_each_entry(va, head, list) {
1565 if (!is_within_this_va(va, size, align, vstart))
1566 continue;
1568 return va;
1571 return NULL;
1574 static void
1575 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
1576 unsigned long size, unsigned long align)
1578 struct vmap_area *va_1, *va_2;
1579 unsigned long vstart;
1580 unsigned int rnd;
1582 get_random_bytes(&rnd, sizeof(rnd));
1583 vstart = VMALLOC_START + rnd;
1585 va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
1586 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
1588 if (va_1 != va_2)
1589 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1590 va_1, va_2, vstart);
1592 #endif
1594 enum fit_type {
1595 NOTHING_FIT = 0,
1596 FL_FIT_TYPE = 1, /* full fit */
1597 LE_FIT_TYPE = 2, /* left edge fit */
1598 RE_FIT_TYPE = 3, /* right edge fit */
1599 NE_FIT_TYPE = 4 /* no edge fit */
1602 static __always_inline enum fit_type
1603 classify_va_fit_type(struct vmap_area *va,
1604 unsigned long nva_start_addr, unsigned long size)
1606 enum fit_type type;
1608 /* Check if it is within VA. */
1609 if (nva_start_addr < va->va_start ||
1610 nva_start_addr + size > va->va_end)
1611 return NOTHING_FIT;
1613 /* Now classify. */
1614 if (va->va_start == nva_start_addr) {
1615 if (va->va_end == nva_start_addr + size)
1616 type = FL_FIT_TYPE;
1617 else
1618 type = LE_FIT_TYPE;
1619 } else if (va->va_end == nva_start_addr + size) {
1620 type = RE_FIT_TYPE;
1621 } else {
1622 type = NE_FIT_TYPE;
1625 return type;
1628 static __always_inline int
1629 va_clip(struct rb_root *root, struct list_head *head,
1630 struct vmap_area *va, unsigned long nva_start_addr,
1631 unsigned long size)
1633 struct vmap_area *lva = NULL;
1634 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1636 if (type == FL_FIT_TYPE) {
1638 * No need to split VA, it fully fits.
1640 * | |
1641 * V NVA V
1642 * |---------------|
1644 unlink_va_augment(va, root);
1645 kmem_cache_free(vmap_area_cachep, va);
1646 } else if (type == LE_FIT_TYPE) {
1648 * Split left edge of fit VA.
1650 * | |
1651 * V NVA V R
1652 * |-------|-------|
1654 va->va_start += size;
1655 } else if (type == RE_FIT_TYPE) {
1657 * Split right edge of fit VA.
1659 * | |
1660 * L V NVA V
1661 * |-------|-------|
1663 va->va_end = nva_start_addr;
1664 } else if (type == NE_FIT_TYPE) {
1666 * Split no edge of fit VA.
1668 * | |
1669 * L V NVA V R
1670 * |---|-------|---|
1672 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1673 if (unlikely(!lva)) {
1675 * For percpu allocator we do not do any pre-allocation
1676 * and leave it as it is. The reason is it most likely
1677 * never ends up with NE_FIT_TYPE splitting. In case of
1678 * percpu allocations offsets and sizes are aligned to
1679 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1680 * are its main fitting cases.
1682 * There are a few exceptions though, as an example it is
1683 * a first allocation (early boot up) when we have "one"
1684 * big free space that has to be split.
1686 * Also we can hit this path in case of regular "vmap"
1687 * allocations, if "this" current CPU was not preloaded.
1688 * See the comment in alloc_vmap_area() why. If so, then
1689 * GFP_NOWAIT is used instead to get an extra object for
1690 * split purpose. That is rare and most time does not
1691 * occur.
1693 * What happens if an allocation gets failed. Basically,
1694 * an "overflow" path is triggered to purge lazily freed
1695 * areas to free some memory, then, the "retry" path is
1696 * triggered to repeat one more time. See more details
1697 * in alloc_vmap_area() function.
1699 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1700 if (!lva)
1701 return -1;
1705 * Build the remainder.
1707 lva->va_start = va->va_start;
1708 lva->va_end = nva_start_addr;
1711 * Shrink this VA to remaining size.
1713 va->va_start = nva_start_addr + size;
1714 } else {
1715 return -1;
1718 if (type != FL_FIT_TYPE) {
1719 augment_tree_propagate_from(va);
1721 if (lva) /* type == NE_FIT_TYPE */
1722 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1725 return 0;
1728 static unsigned long
1729 va_alloc(struct vmap_area *va,
1730 struct rb_root *root, struct list_head *head,
1731 unsigned long size, unsigned long align,
1732 unsigned long vstart, unsigned long vend)
1734 unsigned long nva_start_addr;
1735 int ret;
1737 if (va->va_start > vstart)
1738 nva_start_addr = ALIGN(va->va_start, align);
1739 else
1740 nva_start_addr = ALIGN(vstart, align);
1742 /* Check the "vend" restriction. */
1743 if (nva_start_addr + size > vend)
1744 return vend;
1746 /* Update the free vmap_area. */
1747 ret = va_clip(root, head, va, nva_start_addr, size);
1748 if (WARN_ON_ONCE(ret))
1749 return vend;
1751 return nva_start_addr;
1755 * Returns a start address of the newly allocated area, if success.
1756 * Otherwise a vend is returned that indicates failure.
1758 static __always_inline unsigned long
1759 __alloc_vmap_area(struct rb_root *root, struct list_head *head,
1760 unsigned long size, unsigned long align,
1761 unsigned long vstart, unsigned long vend)
1763 bool adjust_search_size = true;
1764 unsigned long nva_start_addr;
1765 struct vmap_area *va;
1768 * Do not adjust when:
1769 * a) align <= PAGE_SIZE, because it does not make any sense.
1770 * All blocks(their start addresses) are at least PAGE_SIZE
1771 * aligned anyway;
1772 * b) a short range where a requested size corresponds to exactly
1773 * specified [vstart:vend] interval and an alignment > PAGE_SIZE.
1774 * With adjusted search length an allocation would not succeed.
1776 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
1777 adjust_search_size = false;
1779 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1780 if (unlikely(!va))
1781 return vend;
1783 nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
1784 if (nva_start_addr == vend)
1785 return vend;
1787 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1788 find_vmap_lowest_match_check(root, head, size, align);
1789 #endif
1791 return nva_start_addr;
1795 * Free a region of KVA allocated by alloc_vmap_area
1797 static void free_vmap_area(struct vmap_area *va)
1799 struct vmap_node *vn = addr_to_node(va->va_start);
1802 * Remove from the busy tree/list.
1804 spin_lock(&vn->busy.lock);
1805 unlink_va(va, &vn->busy.root);
1806 spin_unlock(&vn->busy.lock);
1809 * Insert/Merge it back to the free tree/list.
1811 spin_lock(&free_vmap_area_lock);
1812 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1813 spin_unlock(&free_vmap_area_lock);
1816 static inline void
1817 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1819 struct vmap_area *va = NULL, *tmp;
1822 * Preload this CPU with one extra vmap_area object. It is used
1823 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1824 * a CPU that does an allocation is preloaded.
1826 * We do it in non-atomic context, thus it allows us to use more
1827 * permissive allocation masks to be more stable under low memory
1828 * condition and high memory pressure.
1830 if (!this_cpu_read(ne_fit_preload_node))
1831 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1833 spin_lock(lock);
1835 tmp = NULL;
1836 if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va))
1837 kmem_cache_free(vmap_area_cachep, va);
1840 static struct vmap_pool *
1841 size_to_va_pool(struct vmap_node *vn, unsigned long size)
1843 unsigned int idx = (size - 1) / PAGE_SIZE;
1845 if (idx < MAX_VA_SIZE_PAGES)
1846 return &vn->pool[idx];
1848 return NULL;
1851 static bool
1852 node_pool_add_va(struct vmap_node *n, struct vmap_area *va)
1854 struct vmap_pool *vp;
1856 vp = size_to_va_pool(n, va_size(va));
1857 if (!vp)
1858 return false;
1860 spin_lock(&n->pool_lock);
1861 list_add(&va->list, &vp->head);
1862 WRITE_ONCE(vp->len, vp->len + 1);
1863 spin_unlock(&n->pool_lock);
1865 return true;
1868 static struct vmap_area *
1869 node_pool_del_va(struct vmap_node *vn, unsigned long size,
1870 unsigned long align, unsigned long vstart,
1871 unsigned long vend)
1873 struct vmap_area *va = NULL;
1874 struct vmap_pool *vp;
1875 int err = 0;
1877 vp = size_to_va_pool(vn, size);
1878 if (!vp || list_empty(&vp->head))
1879 return NULL;
1881 spin_lock(&vn->pool_lock);
1882 if (!list_empty(&vp->head)) {
1883 va = list_first_entry(&vp->head, struct vmap_area, list);
1885 if (IS_ALIGNED(va->va_start, align)) {
1887 * Do some sanity check and emit a warning
1888 * if one of below checks detects an error.
1890 err |= (va_size(va) != size);
1891 err |= (va->va_start < vstart);
1892 err |= (va->va_end > vend);
1894 if (!WARN_ON_ONCE(err)) {
1895 list_del_init(&va->list);
1896 WRITE_ONCE(vp->len, vp->len - 1);
1897 } else {
1898 va = NULL;
1900 } else {
1901 list_move_tail(&va->list, &vp->head);
1902 va = NULL;
1905 spin_unlock(&vn->pool_lock);
1907 return va;
1910 static struct vmap_area *
1911 node_alloc(unsigned long size, unsigned long align,
1912 unsigned long vstart, unsigned long vend,
1913 unsigned long *addr, unsigned int *vn_id)
1915 struct vmap_area *va;
1917 *vn_id = 0;
1918 *addr = vend;
1921 * Fallback to a global heap if not vmalloc or there
1922 * is only one node.
1924 if (vstart != VMALLOC_START || vend != VMALLOC_END ||
1925 nr_vmap_nodes == 1)
1926 return NULL;
1928 *vn_id = raw_smp_processor_id() % nr_vmap_nodes;
1929 va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend);
1930 *vn_id = encode_vn_id(*vn_id);
1932 if (va)
1933 *addr = va->va_start;
1935 return va;
1938 static inline void setup_vmalloc_vm(struct vm_struct *vm,
1939 struct vmap_area *va, unsigned long flags, const void *caller)
1941 vm->flags = flags;
1942 vm->addr = (void *)va->va_start;
1943 vm->size = va_size(va);
1944 vm->caller = caller;
1945 va->vm = vm;
1949 * Allocate a region of KVA of the specified size and alignment, within the
1950 * vstart and vend. If vm is passed in, the two will also be bound.
1952 static struct vmap_area *alloc_vmap_area(unsigned long size,
1953 unsigned long align,
1954 unsigned long vstart, unsigned long vend,
1955 int node, gfp_t gfp_mask,
1956 unsigned long va_flags, struct vm_struct *vm)
1958 struct vmap_node *vn;
1959 struct vmap_area *va;
1960 unsigned long freed;
1961 unsigned long addr;
1962 unsigned int vn_id;
1963 int purged = 0;
1964 int ret;
1966 if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
1967 return ERR_PTR(-EINVAL);
1969 if (unlikely(!vmap_initialized))
1970 return ERR_PTR(-EBUSY);
1972 might_sleep();
1975 * If a VA is obtained from a global heap(if it fails here)
1976 * it is anyway marked with this "vn_id" so it is returned
1977 * to this pool's node later. Such way gives a possibility
1978 * to populate pools based on users demand.
1980 * On success a ready to go VA is returned.
1982 va = node_alloc(size, align, vstart, vend, &addr, &vn_id);
1983 if (!va) {
1984 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1986 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1987 if (unlikely(!va))
1988 return ERR_PTR(-ENOMEM);
1991 * Only scan the relevant parts containing pointers to other objects
1992 * to avoid false negatives.
1994 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1997 retry:
1998 if (addr == vend) {
1999 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
2000 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
2001 size, align, vstart, vend);
2002 spin_unlock(&free_vmap_area_lock);
2005 trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
2008 * If an allocation fails, the "vend" address is
2009 * returned. Therefore trigger the overflow path.
2011 if (unlikely(addr == vend))
2012 goto overflow;
2014 va->va_start = addr;
2015 va->va_end = addr + size;
2016 va->vm = NULL;
2017 va->flags = (va_flags | vn_id);
2019 if (vm) {
2020 vm->addr = (void *)va->va_start;
2021 vm->size = va_size(va);
2022 va->vm = vm;
2025 vn = addr_to_node(va->va_start);
2027 spin_lock(&vn->busy.lock);
2028 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
2029 spin_unlock(&vn->busy.lock);
2031 BUG_ON(!IS_ALIGNED(va->va_start, align));
2032 BUG_ON(va->va_start < vstart);
2033 BUG_ON(va->va_end > vend);
2035 ret = kasan_populate_vmalloc(addr, size);
2036 if (ret) {
2037 free_vmap_area(va);
2038 return ERR_PTR(ret);
2041 return va;
2043 overflow:
2044 if (!purged) {
2045 reclaim_and_purge_vmap_areas();
2046 purged = 1;
2047 goto retry;
2050 freed = 0;
2051 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
2053 if (freed > 0) {
2054 purged = 0;
2055 goto retry;
2058 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
2059 pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n",
2060 size, vstart, vend);
2062 kmem_cache_free(vmap_area_cachep, va);
2063 return ERR_PTR(-EBUSY);
2066 int register_vmap_purge_notifier(struct notifier_block *nb)
2068 return blocking_notifier_chain_register(&vmap_notify_list, nb);
2070 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
2072 int unregister_vmap_purge_notifier(struct notifier_block *nb)
2074 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
2076 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
2079 * lazy_max_pages is the maximum amount of virtual address space we gather up
2080 * before attempting to purge with a TLB flush.
2082 * There is a tradeoff here: a larger number will cover more kernel page tables
2083 * and take slightly longer to purge, but it will linearly reduce the number of
2084 * global TLB flushes that must be performed. It would seem natural to scale
2085 * this number up linearly with the number of CPUs (because vmapping activity
2086 * could also scale linearly with the number of CPUs), however it is likely
2087 * that in practice, workloads might be constrained in other ways that mean
2088 * vmap activity will not scale linearly with CPUs. Also, I want to be
2089 * conservative and not introduce a big latency on huge systems, so go with
2090 * a less aggressive log scale. It will still be an improvement over the old
2091 * code, and it will be simple to change the scale factor if we find that it
2092 * becomes a problem on bigger systems.
2094 static unsigned long lazy_max_pages(void)
2096 unsigned int log;
2098 log = fls(num_online_cpus());
2100 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
2103 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
2106 * Serialize vmap purging. There is no actual critical section protected
2107 * by this lock, but we want to avoid concurrent calls for performance
2108 * reasons and to make the pcpu_get_vm_areas more deterministic.
2110 static DEFINE_MUTEX(vmap_purge_lock);
2112 /* for per-CPU blocks */
2113 static void purge_fragmented_blocks_allcpus(void);
2114 static cpumask_t purge_nodes;
2116 static void
2117 reclaim_list_global(struct list_head *head)
2119 struct vmap_area *va, *n;
2121 if (list_empty(head))
2122 return;
2124 spin_lock(&free_vmap_area_lock);
2125 list_for_each_entry_safe(va, n, head, list)
2126 merge_or_add_vmap_area_augment(va,
2127 &free_vmap_area_root, &free_vmap_area_list);
2128 spin_unlock(&free_vmap_area_lock);
2131 static void
2132 decay_va_pool_node(struct vmap_node *vn, bool full_decay)
2134 LIST_HEAD(decay_list);
2135 struct rb_root decay_root = RB_ROOT;
2136 struct vmap_area *va, *nva;
2137 unsigned long n_decay;
2138 int i;
2140 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
2141 LIST_HEAD(tmp_list);
2143 if (list_empty(&vn->pool[i].head))
2144 continue;
2146 /* Detach the pool, so no-one can access it. */
2147 spin_lock(&vn->pool_lock);
2148 list_replace_init(&vn->pool[i].head, &tmp_list);
2149 spin_unlock(&vn->pool_lock);
2151 if (full_decay)
2152 WRITE_ONCE(vn->pool[i].len, 0);
2154 /* Decay a pool by ~25% out of left objects. */
2155 n_decay = vn->pool[i].len >> 2;
2157 list_for_each_entry_safe(va, nva, &tmp_list, list) {
2158 list_del_init(&va->list);
2159 merge_or_add_vmap_area(va, &decay_root, &decay_list);
2161 if (!full_decay) {
2162 WRITE_ONCE(vn->pool[i].len, vn->pool[i].len - 1);
2164 if (!--n_decay)
2165 break;
2170 * Attach the pool back if it has been partly decayed.
2171 * Please note, it is supposed that nobody(other contexts)
2172 * can populate the pool therefore a simple list replace
2173 * operation takes place here.
2175 if (!full_decay && !list_empty(&tmp_list)) {
2176 spin_lock(&vn->pool_lock);
2177 list_replace_init(&tmp_list, &vn->pool[i].head);
2178 spin_unlock(&vn->pool_lock);
2182 reclaim_list_global(&decay_list);
2185 static void
2186 kasan_release_vmalloc_node(struct vmap_node *vn)
2188 struct vmap_area *va;
2189 unsigned long start, end;
2191 start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
2192 end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;
2194 list_for_each_entry(va, &vn->purge_list, list) {
2195 if (is_vmalloc_or_module_addr((void *) va->va_start))
2196 kasan_release_vmalloc(va->va_start, va->va_end,
2197 va->va_start, va->va_end,
2198 KASAN_VMALLOC_PAGE_RANGE);
2201 kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
2204 static void purge_vmap_node(struct work_struct *work)
2206 struct vmap_node *vn = container_of(work,
2207 struct vmap_node, purge_work);
2208 unsigned long nr_purged_pages = 0;
2209 struct vmap_area *va, *n_va;
2210 LIST_HEAD(local_list);
2212 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
2213 kasan_release_vmalloc_node(vn);
2215 vn->nr_purged = 0;
2217 list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
2218 unsigned long nr = va_size(va) >> PAGE_SHIFT;
2219 unsigned int vn_id = decode_vn_id(va->flags);
2221 list_del_init(&va->list);
2223 nr_purged_pages += nr;
2224 vn->nr_purged++;
2226 if (is_vn_id_valid(vn_id) && !vn->skip_populate)
2227 if (node_pool_add_va(vn, va))
2228 continue;
2230 /* Go back to global. */
2231 list_add(&va->list, &local_list);
2234 atomic_long_sub(nr_purged_pages, &vmap_lazy_nr);
2236 reclaim_list_global(&local_list);
2240 * Purges all lazily-freed vmap areas.
2242 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
2243 bool full_pool_decay)
2245 unsigned long nr_purged_areas = 0;
2246 unsigned int nr_purge_helpers;
2247 unsigned int nr_purge_nodes;
2248 struct vmap_node *vn;
2249 int i;
2251 lockdep_assert_held(&vmap_purge_lock);
2254 * Use cpumask to mark which node has to be processed.
2256 purge_nodes = CPU_MASK_NONE;
2258 for (i = 0; i < nr_vmap_nodes; i++) {
2259 vn = &vmap_nodes[i];
2261 INIT_LIST_HEAD(&vn->purge_list);
2262 vn->skip_populate = full_pool_decay;
2263 decay_va_pool_node(vn, full_pool_decay);
2265 if (RB_EMPTY_ROOT(&vn->lazy.root))
2266 continue;
2268 spin_lock(&vn->lazy.lock);
2269 WRITE_ONCE(vn->lazy.root.rb_node, NULL);
2270 list_replace_init(&vn->lazy.head, &vn->purge_list);
2271 spin_unlock(&vn->lazy.lock);
2273 start = min(start, list_first_entry(&vn->purge_list,
2274 struct vmap_area, list)->va_start);
2276 end = max(end, list_last_entry(&vn->purge_list,
2277 struct vmap_area, list)->va_end);
2279 cpumask_set_cpu(i, &purge_nodes);
2282 nr_purge_nodes = cpumask_weight(&purge_nodes);
2283 if (nr_purge_nodes > 0) {
2284 flush_tlb_kernel_range(start, end);
2286 /* One extra worker is per a lazy_max_pages() full set minus one. */
2287 nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages();
2288 nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1;
2290 for_each_cpu(i, &purge_nodes) {
2291 vn = &vmap_nodes[i];
2293 if (nr_purge_helpers > 0) {
2294 INIT_WORK(&vn->purge_work, purge_vmap_node);
2296 if (cpumask_test_cpu(i, cpu_online_mask))
2297 schedule_work_on(i, &vn->purge_work);
2298 else
2299 schedule_work(&vn->purge_work);
2301 nr_purge_helpers--;
2302 } else {
2303 vn->purge_work.func = NULL;
2304 purge_vmap_node(&vn->purge_work);
2305 nr_purged_areas += vn->nr_purged;
2309 for_each_cpu(i, &purge_nodes) {
2310 vn = &vmap_nodes[i];
2312 if (vn->purge_work.func) {
2313 flush_work(&vn->purge_work);
2314 nr_purged_areas += vn->nr_purged;
2319 trace_purge_vmap_area_lazy(start, end, nr_purged_areas);
2320 return nr_purged_areas > 0;
2324 * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list.
2326 static void reclaim_and_purge_vmap_areas(void)
2329 mutex_lock(&vmap_purge_lock);
2330 purge_fragmented_blocks_allcpus();
2331 __purge_vmap_area_lazy(ULONG_MAX, 0, true);
2332 mutex_unlock(&vmap_purge_lock);
2335 static void drain_vmap_area_work(struct work_struct *work)
2337 mutex_lock(&vmap_purge_lock);
2338 __purge_vmap_area_lazy(ULONG_MAX, 0, false);
2339 mutex_unlock(&vmap_purge_lock);
2343 * Free a vmap area, caller ensuring that the area has been unmapped,
2344 * unlinked and flush_cache_vunmap had been called for the correct
2345 * range previously.
2347 static void free_vmap_area_noflush(struct vmap_area *va)
2349 unsigned long nr_lazy_max = lazy_max_pages();
2350 unsigned long va_start = va->va_start;
2351 unsigned int vn_id = decode_vn_id(va->flags);
2352 struct vmap_node *vn;
2353 unsigned long nr_lazy;
2355 if (WARN_ON_ONCE(!list_empty(&va->list)))
2356 return;
2358 nr_lazy = atomic_long_add_return(va_size(va) >> PAGE_SHIFT,
2359 &vmap_lazy_nr);
2362 * If it was request by a certain node we would like to
2363 * return it to that node, i.e. its pool for later reuse.
2365 vn = is_vn_id_valid(vn_id) ?
2366 id_to_node(vn_id):addr_to_node(va->va_start);
2368 spin_lock(&vn->lazy.lock);
2369 insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head);
2370 spin_unlock(&vn->lazy.lock);
2372 trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max);
2374 /* After this point, we may free va at any time */
2375 if (unlikely(nr_lazy > nr_lazy_max))
2376 schedule_work(&drain_vmap_work);
2380 * Free and unmap a vmap area
2382 static void free_unmap_vmap_area(struct vmap_area *va)
2384 flush_cache_vunmap(va->va_start, va->va_end);
2385 vunmap_range_noflush(va->va_start, va->va_end);
2386 if (debug_pagealloc_enabled_static())
2387 flush_tlb_kernel_range(va->va_start, va->va_end);
2389 free_vmap_area_noflush(va);
2392 struct vmap_area *find_vmap_area(unsigned long addr)
2394 struct vmap_node *vn;
2395 struct vmap_area *va;
2396 int i, j;
2398 if (unlikely(!vmap_initialized))
2399 return NULL;
2402 * An addr_to_node_id(addr) converts an address to a node index
2403 * where a VA is located. If VA spans several zones and passed
2404 * addr is not the same as va->va_start, what is not common, we
2405 * may need to scan extra nodes. See an example:
2407 * <----va---->
2408 * -|-----|-----|-----|-----|-
2409 * 1 2 0 1
2411 * VA resides in node 1 whereas it spans 1, 2 an 0. If passed
2412 * addr is within 2 or 0 nodes we should do extra work.
2414 i = j = addr_to_node_id(addr);
2415 do {
2416 vn = &vmap_nodes[i];
2418 spin_lock(&vn->busy.lock);
2419 va = __find_vmap_area(addr, &vn->busy.root);
2420 spin_unlock(&vn->busy.lock);
2422 if (va)
2423 return va;
2424 } while ((i = (i + 1) % nr_vmap_nodes) != j);
2426 return NULL;
2429 static struct vmap_area *find_unlink_vmap_area(unsigned long addr)
2431 struct vmap_node *vn;
2432 struct vmap_area *va;
2433 int i, j;
2436 * Check the comment in the find_vmap_area() about the loop.
2438 i = j = addr_to_node_id(addr);
2439 do {
2440 vn = &vmap_nodes[i];
2442 spin_lock(&vn->busy.lock);
2443 va = __find_vmap_area(addr, &vn->busy.root);
2444 if (va)
2445 unlink_va(va, &vn->busy.root);
2446 spin_unlock(&vn->busy.lock);
2448 if (va)
2449 return va;
2450 } while ((i = (i + 1) % nr_vmap_nodes) != j);
2452 return NULL;
2455 /*** Per cpu kva allocator ***/
2458 * vmap space is limited especially on 32 bit architectures. Ensure there is
2459 * room for at least 16 percpu vmap blocks per CPU.
2462 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
2463 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
2464 * instead (we just need a rough idea)
2466 #if BITS_PER_LONG == 32
2467 #define VMALLOC_SPACE (128UL*1024*1024)
2468 #else
2469 #define VMALLOC_SPACE (128UL*1024*1024*1024)
2470 #endif
2472 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
2473 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
2474 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
2475 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
2476 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
2477 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
2478 #define VMAP_BBMAP_BITS \
2479 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
2480 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
2481 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
2483 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
2486 * Purge threshold to prevent overeager purging of fragmented blocks for
2487 * regular operations: Purge if vb->free is less than 1/4 of the capacity.
2489 #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4)
2491 #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/
2492 #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/
2493 #define VMAP_FLAGS_MASK 0x3
2495 struct vmap_block_queue {
2496 spinlock_t lock;
2497 struct list_head free;
2500 * An xarray requires an extra memory dynamically to
2501 * be allocated. If it is an issue, we can use rb-tree
2502 * instead.
2504 struct xarray vmap_blocks;
2507 struct vmap_block {
2508 spinlock_t lock;
2509 struct vmap_area *va;
2510 unsigned long free, dirty;
2511 DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS);
2512 unsigned long dirty_min, dirty_max; /*< dirty range */
2513 struct list_head free_list;
2514 struct rcu_head rcu_head;
2515 struct list_head purge;
2516 unsigned int cpu;
2519 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
2520 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
2523 * In order to fast access to any "vmap_block" associated with a
2524 * specific address, we use a hash.
2526 * A per-cpu vmap_block_queue is used in both ways, to serialize
2527 * an access to free block chains among CPUs(alloc path) and it
2528 * also acts as a vmap_block hash(alloc/free paths). It means we
2529 * overload it, since we already have the per-cpu array which is
2530 * used as a hash table. When used as a hash a 'cpu' passed to
2531 * per_cpu() is not actually a CPU but rather a hash index.
2533 * A hash function is addr_to_vb_xa() which hashes any address
2534 * to a specific index(in a hash) it belongs to. This then uses a
2535 * per_cpu() macro to access an array with generated index.
2537 * An example:
2539 * CPU_1 CPU_2 CPU_0
2540 * | | |
2541 * V V V
2542 * 0 10 20 30 40 50 60
2543 * |------|------|------|------|------|------|...<vmap address space>
2544 * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2
2546 * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus
2547 * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock;
2549 * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus
2550 * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock;
2552 * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus
2553 * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock.
2555 * This technique almost always avoids lock contention on insert/remove,
2556 * however xarray spinlocks protect against any contention that remains.
2558 static struct xarray *
2559 addr_to_vb_xa(unsigned long addr)
2561 int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids;
2564 * Please note, nr_cpu_ids points on a highest set
2565 * possible bit, i.e. we never invoke cpumask_next()
2566 * if an index points on it which is nr_cpu_ids - 1.
2568 if (!cpu_possible(index))
2569 index = cpumask_next(index, cpu_possible_mask);
2571 return &per_cpu(vmap_block_queue, index).vmap_blocks;
2575 * We should probably have a fallback mechanism to allocate virtual memory
2576 * out of partially filled vmap blocks. However vmap block sizing should be
2577 * fairly reasonable according to the vmalloc size, so it shouldn't be a
2578 * big problem.
2581 static unsigned long addr_to_vb_idx(unsigned long addr)
2583 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
2584 addr /= VMAP_BLOCK_SIZE;
2585 return addr;
2588 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
2590 unsigned long addr;
2592 addr = va_start + (pages_off << PAGE_SHIFT);
2593 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
2594 return (void *)addr;
2598 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
2599 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
2600 * @order: how many 2^order pages should be occupied in newly allocated block
2601 * @gfp_mask: flags for the page level allocator
2603 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
2605 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
2607 struct vmap_block_queue *vbq;
2608 struct vmap_block *vb;
2609 struct vmap_area *va;
2610 struct xarray *xa;
2611 unsigned long vb_idx;
2612 int node, err;
2613 void *vaddr;
2615 node = numa_node_id();
2617 vb = kmalloc_node(sizeof(struct vmap_block),
2618 gfp_mask & GFP_RECLAIM_MASK, node);
2619 if (unlikely(!vb))
2620 return ERR_PTR(-ENOMEM);
2622 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2623 VMALLOC_START, VMALLOC_END,
2624 node, gfp_mask,
2625 VMAP_RAM|VMAP_BLOCK, NULL);
2626 if (IS_ERR(va)) {
2627 kfree(vb);
2628 return ERR_CAST(va);
2631 vaddr = vmap_block_vaddr(va->va_start, 0);
2632 spin_lock_init(&vb->lock);
2633 vb->va = va;
2634 /* At least something should be left free */
2635 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
2636 bitmap_zero(vb->used_map, VMAP_BBMAP_BITS);
2637 vb->free = VMAP_BBMAP_BITS - (1UL << order);
2638 vb->dirty = 0;
2639 vb->dirty_min = VMAP_BBMAP_BITS;
2640 vb->dirty_max = 0;
2641 bitmap_set(vb->used_map, 0, (1UL << order));
2642 INIT_LIST_HEAD(&vb->free_list);
2643 vb->cpu = raw_smp_processor_id();
2645 xa = addr_to_vb_xa(va->va_start);
2646 vb_idx = addr_to_vb_idx(va->va_start);
2647 err = xa_insert(xa, vb_idx, vb, gfp_mask);
2648 if (err) {
2649 kfree(vb);
2650 free_vmap_area(va);
2651 return ERR_PTR(err);
2654 * list_add_tail_rcu could happened in another core
2655 * rather than vb->cpu due to task migration, which
2656 * is safe as list_add_tail_rcu will ensure the list's
2657 * integrity together with list_for_each_rcu from read
2658 * side.
2660 vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu);
2661 spin_lock(&vbq->lock);
2662 list_add_tail_rcu(&vb->free_list, &vbq->free);
2663 spin_unlock(&vbq->lock);
2665 return vaddr;
2668 static void free_vmap_block(struct vmap_block *vb)
2670 struct vmap_node *vn;
2671 struct vmap_block *tmp;
2672 struct xarray *xa;
2674 xa = addr_to_vb_xa(vb->va->va_start);
2675 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2676 BUG_ON(tmp != vb);
2678 vn = addr_to_node(vb->va->va_start);
2679 spin_lock(&vn->busy.lock);
2680 unlink_va(vb->va, &vn->busy.root);
2681 spin_unlock(&vn->busy.lock);
2683 free_vmap_area_noflush(vb->va);
2684 kfree_rcu(vb, rcu_head);
2687 static bool purge_fragmented_block(struct vmap_block *vb,
2688 struct list_head *purge_list, bool force_purge)
2690 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu);
2692 if (vb->free + vb->dirty != VMAP_BBMAP_BITS ||
2693 vb->dirty == VMAP_BBMAP_BITS)
2694 return false;
2696 /* Don't overeagerly purge usable blocks unless requested */
2697 if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD))
2698 return false;
2700 /* prevent further allocs after releasing lock */
2701 WRITE_ONCE(vb->free, 0);
2702 /* prevent purging it again */
2703 WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS);
2704 vb->dirty_min = 0;
2705 vb->dirty_max = VMAP_BBMAP_BITS;
2706 spin_lock(&vbq->lock);
2707 list_del_rcu(&vb->free_list);
2708 spin_unlock(&vbq->lock);
2709 list_add_tail(&vb->purge, purge_list);
2710 return true;
2713 static void free_purged_blocks(struct list_head *purge_list)
2715 struct vmap_block *vb, *n_vb;
2717 list_for_each_entry_safe(vb, n_vb, purge_list, purge) {
2718 list_del(&vb->purge);
2719 free_vmap_block(vb);
2723 static void purge_fragmented_blocks(int cpu)
2725 LIST_HEAD(purge);
2726 struct vmap_block *vb;
2727 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2729 rcu_read_lock();
2730 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2731 unsigned long free = READ_ONCE(vb->free);
2732 unsigned long dirty = READ_ONCE(vb->dirty);
2734 if (free + dirty != VMAP_BBMAP_BITS ||
2735 dirty == VMAP_BBMAP_BITS)
2736 continue;
2738 spin_lock(&vb->lock);
2739 purge_fragmented_block(vb, &purge, true);
2740 spin_unlock(&vb->lock);
2742 rcu_read_unlock();
2743 free_purged_blocks(&purge);
2746 static void purge_fragmented_blocks_allcpus(void)
2748 int cpu;
2750 for_each_possible_cpu(cpu)
2751 purge_fragmented_blocks(cpu);
2754 static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
2756 struct vmap_block_queue *vbq;
2757 struct vmap_block *vb;
2758 void *vaddr = NULL;
2759 unsigned int order;
2761 BUG_ON(offset_in_page(size));
2762 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2763 if (WARN_ON(size == 0)) {
2765 * Allocating 0 bytes isn't what caller wants since
2766 * get_order(0) returns funny result. Just warn and terminate
2767 * early.
2769 return ERR_PTR(-EINVAL);
2771 order = get_order(size);
2773 rcu_read_lock();
2774 vbq = raw_cpu_ptr(&vmap_block_queue);
2775 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2776 unsigned long pages_off;
2778 if (READ_ONCE(vb->free) < (1UL << order))
2779 continue;
2781 spin_lock(&vb->lock);
2782 if (vb->free < (1UL << order)) {
2783 spin_unlock(&vb->lock);
2784 continue;
2787 pages_off = VMAP_BBMAP_BITS - vb->free;
2788 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2789 WRITE_ONCE(vb->free, vb->free - (1UL << order));
2790 bitmap_set(vb->used_map, pages_off, (1UL << order));
2791 if (vb->free == 0) {
2792 spin_lock(&vbq->lock);
2793 list_del_rcu(&vb->free_list);
2794 spin_unlock(&vbq->lock);
2797 spin_unlock(&vb->lock);
2798 break;
2801 rcu_read_unlock();
2803 /* Allocate new block if nothing was found */
2804 if (!vaddr)
2805 vaddr = new_vmap_block(order, gfp_mask);
2807 return vaddr;
2810 static void vb_free(unsigned long addr, unsigned long size)
2812 unsigned long offset;
2813 unsigned int order;
2814 struct vmap_block *vb;
2815 struct xarray *xa;
2817 BUG_ON(offset_in_page(size));
2818 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2820 flush_cache_vunmap(addr, addr + size);
2822 order = get_order(size);
2823 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2825 xa = addr_to_vb_xa(addr);
2826 vb = xa_load(xa, addr_to_vb_idx(addr));
2828 spin_lock(&vb->lock);
2829 bitmap_clear(vb->used_map, offset, (1UL << order));
2830 spin_unlock(&vb->lock);
2832 vunmap_range_noflush(addr, addr + size);
2834 if (debug_pagealloc_enabled_static())
2835 flush_tlb_kernel_range(addr, addr + size);
2837 spin_lock(&vb->lock);
2839 /* Expand the not yet TLB flushed dirty range */
2840 vb->dirty_min = min(vb->dirty_min, offset);
2841 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2843 WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order));
2844 if (vb->dirty == VMAP_BBMAP_BITS) {
2845 BUG_ON(vb->free);
2846 spin_unlock(&vb->lock);
2847 free_vmap_block(vb);
2848 } else
2849 spin_unlock(&vb->lock);
2852 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2854 LIST_HEAD(purge_list);
2855 int cpu;
2857 if (unlikely(!vmap_initialized))
2858 return;
2860 mutex_lock(&vmap_purge_lock);
2862 for_each_possible_cpu(cpu) {
2863 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2864 struct vmap_block *vb;
2865 unsigned long idx;
2867 rcu_read_lock();
2868 xa_for_each(&vbq->vmap_blocks, idx, vb) {
2869 spin_lock(&vb->lock);
2872 * Try to purge a fragmented block first. If it's
2873 * not purgeable, check whether there is dirty
2874 * space to be flushed.
2876 if (!purge_fragmented_block(vb, &purge_list, false) &&
2877 vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) {
2878 unsigned long va_start = vb->va->va_start;
2879 unsigned long s, e;
2881 s = va_start + (vb->dirty_min << PAGE_SHIFT);
2882 e = va_start + (vb->dirty_max << PAGE_SHIFT);
2884 start = min(s, start);
2885 end = max(e, end);
2887 /* Prevent that this is flushed again */
2888 vb->dirty_min = VMAP_BBMAP_BITS;
2889 vb->dirty_max = 0;
2891 flush = 1;
2893 spin_unlock(&vb->lock);
2895 rcu_read_unlock();
2897 free_purged_blocks(&purge_list);
2899 if (!__purge_vmap_area_lazy(start, end, false) && flush)
2900 flush_tlb_kernel_range(start, end);
2901 mutex_unlock(&vmap_purge_lock);
2905 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2907 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2908 * to amortize TLB flushing overheads. What this means is that any page you
2909 * have now, may, in a former life, have been mapped into kernel virtual
2910 * address by the vmap layer and so there might be some CPUs with TLB entries
2911 * still referencing that page (additional to the regular 1:1 kernel mapping).
2913 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2914 * be sure that none of the pages we have control over will have any aliases
2915 * from the vmap layer.
2917 void vm_unmap_aliases(void)
2919 unsigned long start = ULONG_MAX, end = 0;
2920 int flush = 0;
2922 _vm_unmap_aliases(start, end, flush);
2924 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2927 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2928 * @mem: the pointer returned by vm_map_ram
2929 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2931 void vm_unmap_ram(const void *mem, unsigned int count)
2933 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2934 unsigned long addr = (unsigned long)kasan_reset_tag(mem);
2935 struct vmap_area *va;
2937 might_sleep();
2938 BUG_ON(!addr);
2939 BUG_ON(addr < VMALLOC_START);
2940 BUG_ON(addr > VMALLOC_END);
2941 BUG_ON(!PAGE_ALIGNED(addr));
2943 kasan_poison_vmalloc(mem, size);
2945 if (likely(count <= VMAP_MAX_ALLOC)) {
2946 debug_check_no_locks_freed(mem, size);
2947 vb_free(addr, size);
2948 return;
2951 va = find_unlink_vmap_area(addr);
2952 if (WARN_ON_ONCE(!va))
2953 return;
2955 debug_check_no_locks_freed((void *)va->va_start, va_size(va));
2956 free_unmap_vmap_area(va);
2958 EXPORT_SYMBOL(vm_unmap_ram);
2961 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2962 * @pages: an array of pointers to the pages to be mapped
2963 * @count: number of pages
2964 * @node: prefer to allocate data structures on this node
2966 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2967 * faster than vmap so it's good. But if you mix long-life and short-life
2968 * objects with vm_map_ram(), it could consume lots of address space through
2969 * fragmentation (especially on a 32bit machine). You could see failures in
2970 * the end. Please use this function for short-lived objects.
2972 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2974 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2976 unsigned long size = (unsigned long)count << PAGE_SHIFT;
2977 unsigned long addr;
2978 void *mem;
2980 if (likely(count <= VMAP_MAX_ALLOC)) {
2981 mem = vb_alloc(size, GFP_KERNEL);
2982 if (IS_ERR(mem))
2983 return NULL;
2984 addr = (unsigned long)mem;
2985 } else {
2986 struct vmap_area *va;
2987 va = alloc_vmap_area(size, PAGE_SIZE,
2988 VMALLOC_START, VMALLOC_END,
2989 node, GFP_KERNEL, VMAP_RAM,
2990 NULL);
2991 if (IS_ERR(va))
2992 return NULL;
2994 addr = va->va_start;
2995 mem = (void *)addr;
2998 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2999 pages, PAGE_SHIFT) < 0) {
3000 vm_unmap_ram(mem, count);
3001 return NULL;
3005 * Mark the pages as accessible, now that they are mapped.
3006 * With hardware tag-based KASAN, marking is skipped for
3007 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3009 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
3011 return mem;
3013 EXPORT_SYMBOL(vm_map_ram);
3015 static struct vm_struct *vmlist __initdata;
3017 static inline unsigned int vm_area_page_order(struct vm_struct *vm)
3019 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3020 return vm->page_order;
3021 #else
3022 return 0;
3023 #endif
3026 unsigned int get_vm_area_page_order(struct vm_struct *vm)
3028 return vm_area_page_order(vm);
3031 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
3033 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
3034 vm->page_order = order;
3035 #else
3036 BUG_ON(order != 0);
3037 #endif
3041 * vm_area_add_early - add vmap area early during boot
3042 * @vm: vm_struct to add
3044 * This function is used to add fixed kernel vm area to vmlist before
3045 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
3046 * should contain proper values and the other fields should be zero.
3048 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3050 void __init vm_area_add_early(struct vm_struct *vm)
3052 struct vm_struct *tmp, **p;
3054 BUG_ON(vmap_initialized);
3055 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
3056 if (tmp->addr >= vm->addr) {
3057 BUG_ON(tmp->addr < vm->addr + vm->size);
3058 break;
3059 } else
3060 BUG_ON(tmp->addr + tmp->size > vm->addr);
3062 vm->next = *p;
3063 *p = vm;
3067 * vm_area_register_early - register vmap area early during boot
3068 * @vm: vm_struct to register
3069 * @align: requested alignment
3071 * This function is used to register kernel vm area before
3072 * vmalloc_init() is called. @vm->size and @vm->flags should contain
3073 * proper values on entry and other fields should be zero. On return,
3074 * vm->addr contains the allocated address.
3076 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
3078 void __init vm_area_register_early(struct vm_struct *vm, size_t align)
3080 unsigned long addr = ALIGN(VMALLOC_START, align);
3081 struct vm_struct *cur, **p;
3083 BUG_ON(vmap_initialized);
3085 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) {
3086 if ((unsigned long)cur->addr - addr >= vm->size)
3087 break;
3088 addr = ALIGN((unsigned long)cur->addr + cur->size, align);
3091 BUG_ON(addr > VMALLOC_END - vm->size);
3092 vm->addr = (void *)addr;
3093 vm->next = *p;
3094 *p = vm;
3095 kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
3098 static void clear_vm_uninitialized_flag(struct vm_struct *vm)
3101 * Before removing VM_UNINITIALIZED,
3102 * we should make sure that vm has proper values.
3103 * Pair with smp_rmb() in show_numa_info().
3105 smp_wmb();
3106 vm->flags &= ~VM_UNINITIALIZED;
3109 struct vm_struct *__get_vm_area_node(unsigned long size,
3110 unsigned long align, unsigned long shift, unsigned long flags,
3111 unsigned long start, unsigned long end, int node,
3112 gfp_t gfp_mask, const void *caller)
3114 struct vmap_area *va;
3115 struct vm_struct *area;
3116 unsigned long requested_size = size;
3118 BUG_ON(in_interrupt());
3119 size = ALIGN(size, 1ul << shift);
3120 if (unlikely(!size))
3121 return NULL;
3123 if (flags & VM_IOREMAP)
3124 align = 1ul << clamp_t(int, get_count_order_long(size),
3125 PAGE_SHIFT, IOREMAP_MAX_ORDER);
3127 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
3128 if (unlikely(!area))
3129 return NULL;
3131 if (!(flags & VM_NO_GUARD))
3132 size += PAGE_SIZE;
3134 area->flags = flags;
3135 area->caller = caller;
3137 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
3138 if (IS_ERR(va)) {
3139 kfree(area);
3140 return NULL;
3144 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a
3145 * best-effort approach, as they can be mapped outside of vmalloc code.
3146 * For VM_ALLOC mappings, the pages are marked as accessible after
3147 * getting mapped in __vmalloc_node_range().
3148 * With hardware tag-based KASAN, marking is skipped for
3149 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3151 if (!(flags & VM_ALLOC))
3152 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
3153 KASAN_VMALLOC_PROT_NORMAL);
3155 return area;
3158 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
3159 unsigned long start, unsigned long end,
3160 const void *caller)
3162 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
3163 NUMA_NO_NODE, GFP_KERNEL, caller);
3167 * get_vm_area - reserve a contiguous kernel virtual area
3168 * @size: size of the area
3169 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
3171 * Search an area of @size in the kernel virtual mapping area,
3172 * and reserved it for out purposes. Returns the area descriptor
3173 * on success or %NULL on failure.
3175 * Return: the area descriptor on success or %NULL on failure.
3177 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
3179 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3180 VMALLOC_START, VMALLOC_END,
3181 NUMA_NO_NODE, GFP_KERNEL,
3182 __builtin_return_address(0));
3185 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
3186 const void *caller)
3188 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
3189 VMALLOC_START, VMALLOC_END,
3190 NUMA_NO_NODE, GFP_KERNEL, caller);
3194 * find_vm_area - find a continuous kernel virtual area
3195 * @addr: base address
3197 * Search for the kernel VM area starting at @addr, and return it.
3198 * It is up to the caller to do all required locking to keep the returned
3199 * pointer valid.
3201 * Return: the area descriptor on success or %NULL on failure.
3203 struct vm_struct *find_vm_area(const void *addr)
3205 struct vmap_area *va;
3207 va = find_vmap_area((unsigned long)addr);
3208 if (!va)
3209 return NULL;
3211 return va->vm;
3215 * remove_vm_area - find and remove a continuous kernel virtual area
3216 * @addr: base address
3218 * Search for the kernel VM area starting at @addr, and remove it.
3219 * This function returns the found VM area, but using it is NOT safe
3220 * on SMP machines, except for its size or flags.
3222 * Return: the area descriptor on success or %NULL on failure.
3224 struct vm_struct *remove_vm_area(const void *addr)
3226 struct vmap_area *va;
3227 struct vm_struct *vm;
3229 might_sleep();
3231 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
3232 addr))
3233 return NULL;
3235 va = find_unlink_vmap_area((unsigned long)addr);
3236 if (!va || !va->vm)
3237 return NULL;
3238 vm = va->vm;
3240 debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm));
3241 debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm));
3242 kasan_free_module_shadow(vm);
3243 kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm));
3245 free_unmap_vmap_area(va);
3246 return vm;
3249 static inline void set_area_direct_map(const struct vm_struct *area,
3250 int (*set_direct_map)(struct page *page))
3252 int i;
3254 /* HUGE_VMALLOC passes small pages to set_direct_map */
3255 for (i = 0; i < area->nr_pages; i++)
3256 if (page_address(area->pages[i]))
3257 set_direct_map(area->pages[i]);
3261 * Flush the vm mapping and reset the direct map.
3263 static void vm_reset_perms(struct vm_struct *area)
3265 unsigned long start = ULONG_MAX, end = 0;
3266 unsigned int page_order = vm_area_page_order(area);
3267 int flush_dmap = 0;
3268 int i;
3271 * Find the start and end range of the direct mappings to make sure that
3272 * the vm_unmap_aliases() flush includes the direct map.
3274 for (i = 0; i < area->nr_pages; i += 1U << page_order) {
3275 unsigned long addr = (unsigned long)page_address(area->pages[i]);
3277 if (addr) {
3278 unsigned long page_size;
3280 page_size = PAGE_SIZE << page_order;
3281 start = min(addr, start);
3282 end = max(addr + page_size, end);
3283 flush_dmap = 1;
3288 * Set direct map to something invalid so that it won't be cached if
3289 * there are any accesses after the TLB flush, then flush the TLB and
3290 * reset the direct map permissions to the default.
3292 set_area_direct_map(area, set_direct_map_invalid_noflush);
3293 _vm_unmap_aliases(start, end, flush_dmap);
3294 set_area_direct_map(area, set_direct_map_default_noflush);
3297 static void delayed_vfree_work(struct work_struct *w)
3299 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
3300 struct llist_node *t, *llnode;
3302 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
3303 vfree(llnode);
3307 * vfree_atomic - release memory allocated by vmalloc()
3308 * @addr: memory base address
3310 * This one is just like vfree() but can be called in any atomic context
3311 * except NMIs.
3313 void vfree_atomic(const void *addr)
3315 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
3317 BUG_ON(in_nmi());
3318 kmemleak_free(addr);
3321 * Use raw_cpu_ptr() because this can be called from preemptible
3322 * context. Preemption is absolutely fine here, because the llist_add()
3323 * implementation is lockless, so it works even if we are adding to
3324 * another cpu's list. schedule_work() should be fine with this too.
3326 if (addr && llist_add((struct llist_node *)addr, &p->list))
3327 schedule_work(&p->wq);
3331 * vfree - Release memory allocated by vmalloc()
3332 * @addr: Memory base address
3334 * Free the virtually continuous memory area starting at @addr, as obtained
3335 * from one of the vmalloc() family of APIs. This will usually also free the
3336 * physical memory underlying the virtual allocation, but that memory is
3337 * reference counted, so it will not be freed until the last user goes away.
3339 * If @addr is NULL, no operation is performed.
3341 * Context:
3342 * May sleep if called *not* from interrupt context.
3343 * Must not be called in NMI context (strictly speaking, it could be
3344 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
3345 * conventions for vfree() arch-dependent would be a really bad idea).
3347 void vfree(const void *addr)
3349 struct vm_struct *vm;
3350 int i;
3352 if (unlikely(in_interrupt())) {
3353 vfree_atomic(addr);
3354 return;
3357 BUG_ON(in_nmi());
3358 kmemleak_free(addr);
3359 might_sleep();
3361 if (!addr)
3362 return;
3364 vm = remove_vm_area(addr);
3365 if (unlikely(!vm)) {
3366 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
3367 addr);
3368 return;
3371 if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
3372 vm_reset_perms(vm);
3373 for (i = 0; i < vm->nr_pages; i++) {
3374 struct page *page = vm->pages[i];
3376 BUG_ON(!page);
3377 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
3379 * High-order allocs for huge vmallocs are split, so
3380 * can be freed as an array of order-0 allocations
3382 __free_page(page);
3383 cond_resched();
3385 atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
3386 kvfree(vm->pages);
3387 kfree(vm);
3389 EXPORT_SYMBOL(vfree);
3392 * vunmap - release virtual mapping obtained by vmap()
3393 * @addr: memory base address
3395 * Free the virtually contiguous memory area starting at @addr,
3396 * which was created from the page array passed to vmap().
3398 * Must not be called in interrupt context.
3400 void vunmap(const void *addr)
3402 struct vm_struct *vm;
3404 BUG_ON(in_interrupt());
3405 might_sleep();
3407 if (!addr)
3408 return;
3409 vm = remove_vm_area(addr);
3410 if (unlikely(!vm)) {
3411 WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n",
3412 addr);
3413 return;
3415 kfree(vm);
3417 EXPORT_SYMBOL(vunmap);
3420 * vmap - map an array of pages into virtually contiguous space
3421 * @pages: array of page pointers
3422 * @count: number of pages to map
3423 * @flags: vm_area->flags
3424 * @prot: page protection for the mapping
3426 * Maps @count pages from @pages into contiguous kernel virtual space.
3427 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
3428 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
3429 * are transferred from the caller to vmap(), and will be freed / dropped when
3430 * vfree() is called on the return value.
3432 * Return: the address of the area or %NULL on failure
3434 void *vmap(struct page **pages, unsigned int count,
3435 unsigned long flags, pgprot_t prot)
3437 struct vm_struct *area;
3438 unsigned long addr;
3439 unsigned long size; /* In bytes */
3441 might_sleep();
3443 if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS))
3444 return NULL;
3447 * Your top guard is someone else's bottom guard. Not having a top
3448 * guard compromises someone else's mappings too.
3450 if (WARN_ON_ONCE(flags & VM_NO_GUARD))
3451 flags &= ~VM_NO_GUARD;
3453 if (count > totalram_pages())
3454 return NULL;
3456 size = (unsigned long)count << PAGE_SHIFT;
3457 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
3458 if (!area)
3459 return NULL;
3461 addr = (unsigned long)area->addr;
3462 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
3463 pages, PAGE_SHIFT) < 0) {
3464 vunmap(area->addr);
3465 return NULL;
3468 if (flags & VM_MAP_PUT_PAGES) {
3469 area->pages = pages;
3470 area->nr_pages = count;
3472 return area->addr;
3474 EXPORT_SYMBOL(vmap);
3476 #ifdef CONFIG_VMAP_PFN
3477 struct vmap_pfn_data {
3478 unsigned long *pfns;
3479 pgprot_t prot;
3480 unsigned int idx;
3483 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
3485 struct vmap_pfn_data *data = private;
3486 unsigned long pfn = data->pfns[data->idx];
3487 pte_t ptent;
3489 if (WARN_ON_ONCE(pfn_valid(pfn)))
3490 return -EINVAL;
3492 ptent = pte_mkspecial(pfn_pte(pfn, data->prot));
3493 set_pte_at(&init_mm, addr, pte, ptent);
3495 data->idx++;
3496 return 0;
3500 * vmap_pfn - map an array of PFNs into virtually contiguous space
3501 * @pfns: array of PFNs
3502 * @count: number of pages to map
3503 * @prot: page protection for the mapping
3505 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
3506 * the start address of the mapping.
3508 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
3510 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
3511 struct vm_struct *area;
3513 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
3514 __builtin_return_address(0));
3515 if (!area)
3516 return NULL;
3517 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3518 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
3519 free_vm_area(area);
3520 return NULL;
3523 flush_cache_vmap((unsigned long)area->addr,
3524 (unsigned long)area->addr + count * PAGE_SIZE);
3526 return area->addr;
3528 EXPORT_SYMBOL_GPL(vmap_pfn);
3529 #endif /* CONFIG_VMAP_PFN */
3531 static inline unsigned int
3532 vm_area_alloc_pages(gfp_t gfp, int nid,
3533 unsigned int order, unsigned int nr_pages, struct page **pages)
3535 unsigned int nr_allocated = 0;
3536 struct page *page;
3537 int i;
3540 * For order-0 pages we make use of bulk allocator, if
3541 * the page array is partly or not at all populated due
3542 * to fails, fallback to a single page allocator that is
3543 * more permissive.
3545 if (!order) {
3546 while (nr_allocated < nr_pages) {
3547 unsigned int nr, nr_pages_request;
3550 * A maximum allowed request is hard-coded and is 100
3551 * pages per call. That is done in order to prevent a
3552 * long preemption off scenario in the bulk-allocator
3553 * so the range is [1:100].
3555 nr_pages_request = min(100U, nr_pages - nr_allocated);
3557 /* memory allocation should consider mempolicy, we can't
3558 * wrongly use nearest node when nid == NUMA_NO_NODE,
3559 * otherwise memory may be allocated in only one node,
3560 * but mempolicy wants to alloc memory by interleaving.
3562 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
3563 nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
3564 nr_pages_request,
3565 pages + nr_allocated);
3566 else
3567 nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
3568 nr_pages_request,
3569 pages + nr_allocated);
3571 nr_allocated += nr;
3572 cond_resched();
3575 * If zero or pages were obtained partly,
3576 * fallback to a single page allocator.
3578 if (nr != nr_pages_request)
3579 break;
3583 /* High-order pages or fallback path if "bulk" fails. */
3584 while (nr_allocated < nr_pages) {
3585 if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
3586 break;
3588 if (nid == NUMA_NO_NODE)
3589 page = alloc_pages_noprof(gfp, order);
3590 else
3591 page = alloc_pages_node_noprof(nid, gfp, order);
3593 if (unlikely(!page))
3594 break;
3597 * High-order allocations must be able to be treated as
3598 * independent small pages by callers (as they can with
3599 * small-page vmallocs). Some drivers do their own refcounting
3600 * on vmalloc_to_page() pages, some use page->mapping,
3601 * page->lru, etc.
3603 if (order)
3604 split_page(page, order);
3607 * Careful, we allocate and map page-order pages, but
3608 * tracking is done per PAGE_SIZE page so as to keep the
3609 * vm_struct APIs independent of the physical/mapped size.
3611 for (i = 0; i < (1U << order); i++)
3612 pages[nr_allocated + i] = page + i;
3614 cond_resched();
3615 nr_allocated += 1U << order;
3618 return nr_allocated;
3621 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
3622 pgprot_t prot, unsigned int page_shift,
3623 int node)
3625 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
3626 bool nofail = gfp_mask & __GFP_NOFAIL;
3627 unsigned long addr = (unsigned long)area->addr;
3628 unsigned long size = get_vm_area_size(area);
3629 unsigned long array_size;
3630 unsigned int nr_small_pages = size >> PAGE_SHIFT;
3631 unsigned int page_order;
3632 unsigned int flags;
3633 int ret;
3635 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3637 if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
3638 gfp_mask |= __GFP_HIGHMEM;
3640 /* Please note that the recursion is strictly bounded. */
3641 if (array_size > PAGE_SIZE) {
3642 area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node,
3643 area->caller);
3644 } else {
3645 area->pages = kmalloc_node_noprof(array_size, nested_gfp, node);
3648 if (!area->pages) {
3649 warn_alloc(gfp_mask, NULL,
3650 "vmalloc error: size %lu, failed to allocated page array size %lu",
3651 nr_small_pages * PAGE_SIZE, array_size);
3652 free_vm_area(area);
3653 return NULL;
3656 set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
3657 page_order = vm_area_page_order(area);
3660 * High-order nofail allocations are really expensive and
3661 * potentially dangerous (pre-mature OOM, disruptive reclaim
3662 * and compaction etc.
3664 * Please note, the __vmalloc_node_range_noprof() falls-back
3665 * to order-0 pages if high-order attempt is unsuccessful.
3667 area->nr_pages = vm_area_alloc_pages((page_order ?
3668 gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
3669 node, page_order, nr_small_pages, area->pages);
3671 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
3672 if (gfp_mask & __GFP_ACCOUNT) {
3673 int i;
3675 for (i = 0; i < area->nr_pages; i++)
3676 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
3680 * If not enough pages were obtained to accomplish an
3681 * allocation request, free them via vfree() if any.
3683 if (area->nr_pages != nr_small_pages) {
3685 * vm_area_alloc_pages() can fail due to insufficient memory but
3686 * also:-
3688 * - a pending fatal signal
3689 * - insufficient huge page-order pages
3691 * Since we always retry allocations at order-0 in the huge page
3692 * case a warning for either is spurious.
3694 if (!fatal_signal_pending(current) && page_order == 0)
3695 warn_alloc(gfp_mask, NULL,
3696 "vmalloc error: size %lu, failed to allocate pages",
3697 area->nr_pages * PAGE_SIZE);
3698 goto fail;
3702 * page tables allocations ignore external gfp mask, enforce it
3703 * by the scope API
3705 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3706 flags = memalloc_nofs_save();
3707 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3708 flags = memalloc_noio_save();
3710 do {
3711 ret = vmap_pages_range(addr, addr + size, prot, area->pages,
3712 page_shift);
3713 if (nofail && (ret < 0))
3714 schedule_timeout_uninterruptible(1);
3715 } while (nofail && (ret < 0));
3717 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
3718 memalloc_nofs_restore(flags);
3719 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
3720 memalloc_noio_restore(flags);
3722 if (ret < 0) {
3723 warn_alloc(gfp_mask, NULL,
3724 "vmalloc error: size %lu, failed to map pages",
3725 area->nr_pages * PAGE_SIZE);
3726 goto fail;
3729 return area->addr;
3731 fail:
3732 vfree(area->addr);
3733 return NULL;
3737 * __vmalloc_node_range - allocate virtually contiguous memory
3738 * @size: allocation size
3739 * @align: desired alignment
3740 * @start: vm area range start
3741 * @end: vm area range end
3742 * @gfp_mask: flags for the page level allocator
3743 * @prot: protection mask for the allocated pages
3744 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3745 * @node: node to use for allocation or NUMA_NO_NODE
3746 * @caller: caller's return address
3748 * Allocate enough pages to cover @size from the page level
3749 * allocator with @gfp_mask flags. Please note that the full set of gfp
3750 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all
3751 * supported.
3752 * Zone modifiers are not supported. From the reclaim modifiers
3753 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported)
3754 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and
3755 * __GFP_RETRY_MAYFAIL are not supported).
3757 * __GFP_NOWARN can be used to suppress failures messages.
3759 * Map them into contiguous kernel virtual space, using a pagetable
3760 * protection of @prot.
3762 * Return: the address of the area or %NULL on failure
3764 void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
3765 unsigned long start, unsigned long end, gfp_t gfp_mask,
3766 pgprot_t prot, unsigned long vm_flags, int node,
3767 const void *caller)
3769 struct vm_struct *area;
3770 void *ret;
3771 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3772 unsigned long real_size = size;
3773 unsigned long real_align = align;
3774 unsigned int shift = PAGE_SHIFT;
3776 if (WARN_ON_ONCE(!size))
3777 return NULL;
3779 if ((size >> PAGE_SHIFT) > totalram_pages()) {
3780 warn_alloc(gfp_mask, NULL,
3781 "vmalloc error: size %lu, exceeds total pages",
3782 real_size);
3783 return NULL;
3786 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
3788 * Try huge pages. Only try for PAGE_KERNEL allocations,
3789 * others like modules don't yet expect huge pages in
3790 * their allocations due to apply_to_page_range not
3791 * supporting them.
3794 if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
3795 shift = PMD_SHIFT;
3796 else
3797 shift = arch_vmap_pte_supported_shift(size);
3799 align = max(real_align, 1UL << shift);
3800 size = ALIGN(real_size, 1UL << shift);
3803 again:
3804 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
3805 VM_UNINITIALIZED | vm_flags, start, end, node,
3806 gfp_mask, caller);
3807 if (!area) {
3808 bool nofail = gfp_mask & __GFP_NOFAIL;
3809 warn_alloc(gfp_mask, NULL,
3810 "vmalloc error: size %lu, vm_struct allocation failed%s",
3811 real_size, (nofail) ? ". Retrying." : "");
3812 if (nofail) {
3813 schedule_timeout_uninterruptible(1);
3814 goto again;
3816 goto fail;
3820 * Prepare arguments for __vmalloc_area_node() and
3821 * kasan_unpoison_vmalloc().
3823 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3824 if (kasan_hw_tags_enabled()) {
3826 * Modify protection bits to allow tagging.
3827 * This must be done before mapping.
3829 prot = arch_vmap_pgprot_tagged(prot);
3832 * Skip page_alloc poisoning and zeroing for physical
3833 * pages backing VM_ALLOC mapping. Memory is instead
3834 * poisoned and zeroed by kasan_unpoison_vmalloc().
3836 gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO;
3839 /* Take note that the mapping is PAGE_KERNEL. */
3840 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3843 /* Allocate physical pages and map them into vmalloc space. */
3844 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3845 if (!ret)
3846 goto fail;
3849 * Mark the pages as accessible, now that they are mapped.
3850 * The condition for setting KASAN_VMALLOC_INIT should complement the
3851 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check
3852 * to make sure that memory is initialized under the same conditions.
3853 * Tag-based KASAN modes only assign tags to normal non-executable
3854 * allocations, see __kasan_unpoison_vmalloc().
3856 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3857 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) &&
3858 (gfp_mask & __GFP_SKIP_ZERO))
3859 kasan_flags |= KASAN_VMALLOC_INIT;
3860 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3861 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3864 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3865 * flag. It means that vm_struct is not fully initialized.
3866 * Now, it is fully initialized, so remove this flag here.
3868 clear_vm_uninitialized_flag(area);
3870 size = PAGE_ALIGN(size);
3871 if (!(vm_flags & VM_DEFER_KMEMLEAK))
3872 kmemleak_vmalloc(area, size, gfp_mask);
3874 return area->addr;
3876 fail:
3877 if (shift > PAGE_SHIFT) {
3878 shift = PAGE_SHIFT;
3879 align = real_align;
3880 size = real_size;
3881 goto again;
3884 return NULL;
3888 * __vmalloc_node - allocate virtually contiguous memory
3889 * @size: allocation size
3890 * @align: desired alignment
3891 * @gfp_mask: flags for the page level allocator
3892 * @node: node to use for allocation or NUMA_NO_NODE
3893 * @caller: caller's return address
3895 * Allocate enough pages to cover @size from the page level allocator with
3896 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3898 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3899 * and __GFP_NOFAIL are not supported
3901 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3902 * with mm people.
3904 * Return: pointer to the allocated memory or %NULL on error
3906 void *__vmalloc_node_noprof(unsigned long size, unsigned long align,
3907 gfp_t gfp_mask, int node, const void *caller)
3909 return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END,
3910 gfp_mask, PAGE_KERNEL, 0, node, caller);
3913 * This is only for performance analysis of vmalloc and stress purpose.
3914 * It is required by vmalloc test module, therefore do not use it other
3915 * than that.
3917 #ifdef CONFIG_TEST_VMALLOC_MODULE
3918 EXPORT_SYMBOL_GPL(__vmalloc_node_noprof);
3919 #endif
3921 void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
3923 return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE,
3924 __builtin_return_address(0));
3926 EXPORT_SYMBOL(__vmalloc_noprof);
3929 * vmalloc - allocate virtually contiguous memory
3930 * @size: allocation size
3932 * Allocate enough pages to cover @size from the page level
3933 * allocator and map them into contiguous kernel virtual space.
3935 * For tight control over page level allocator and protection flags
3936 * use __vmalloc() instead.
3938 * Return: pointer to the allocated memory or %NULL on error
3940 void *vmalloc_noprof(unsigned long size)
3942 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3943 __builtin_return_address(0));
3945 EXPORT_SYMBOL(vmalloc_noprof);
3948 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
3949 * @size: allocation size
3950 * @gfp_mask: flags for the page level allocator
3952 * Allocate enough pages to cover @size from the page level
3953 * allocator and map them into contiguous kernel virtual space.
3954 * If @size is greater than or equal to PMD_SIZE, allow using
3955 * huge pages for the memory
3957 * Return: pointer to the allocated memory or %NULL on error
3959 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask)
3961 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
3962 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
3963 NUMA_NO_NODE, __builtin_return_address(0));
3965 EXPORT_SYMBOL_GPL(vmalloc_huge_noprof);
3968 * vzalloc - allocate virtually contiguous memory with zero fill
3969 * @size: allocation size
3971 * Allocate enough pages to cover @size from the page level
3972 * allocator and map them into contiguous kernel virtual space.
3973 * The memory allocated is set to zero.
3975 * For tight control over page level allocator and protection flags
3976 * use __vmalloc() instead.
3978 * Return: pointer to the allocated memory or %NULL on error
3980 void *vzalloc_noprof(unsigned long size)
3982 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3983 __builtin_return_address(0));
3985 EXPORT_SYMBOL(vzalloc_noprof);
3988 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3989 * @size: allocation size
3991 * The resulting memory area is zeroed so it can be mapped to userspace
3992 * without leaking data.
3994 * Return: pointer to the allocated memory or %NULL on error
3996 void *vmalloc_user_noprof(unsigned long size)
3998 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
3999 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
4000 VM_USERMAP, NUMA_NO_NODE,
4001 __builtin_return_address(0));
4003 EXPORT_SYMBOL(vmalloc_user_noprof);
4006 * vmalloc_node - allocate memory on a specific node
4007 * @size: allocation size
4008 * @node: numa node
4010 * Allocate enough pages to cover @size from the page level
4011 * allocator and map them into contiguous kernel virtual space.
4013 * For tight control over page level allocator and protection flags
4014 * use __vmalloc() instead.
4016 * Return: pointer to the allocated memory or %NULL on error
4018 void *vmalloc_node_noprof(unsigned long size, int node)
4020 return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node,
4021 __builtin_return_address(0));
4023 EXPORT_SYMBOL(vmalloc_node_noprof);
4026 * vzalloc_node - allocate memory on a specific node with zero fill
4027 * @size: allocation size
4028 * @node: numa node
4030 * Allocate enough pages to cover @size from the page level
4031 * allocator and map them into contiguous kernel virtual space.
4032 * The memory allocated is set to zero.
4034 * Return: pointer to the allocated memory or %NULL on error
4036 void *vzalloc_node_noprof(unsigned long size, int node)
4038 return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node,
4039 __builtin_return_address(0));
4041 EXPORT_SYMBOL(vzalloc_node_noprof);
4044 * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
4045 * @p: object to reallocate memory for
4046 * @size: the size to reallocate
4047 * @flags: the flags for the page level allocator
4049 * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
4050 * @p is not a %NULL pointer, the object pointed to is freed.
4052 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4053 * initial memory allocation, every subsequent call to this API for the same
4054 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4055 * __GFP_ZERO is not fully honored by this API.
4057 * In any case, the contents of the object pointed to are preserved up to the
4058 * lesser of the new and old sizes.
4060 * This function must not be called concurrently with itself or vfree() for the
4061 * same memory allocation.
4063 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of
4064 * failure
4066 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
4068 size_t old_size = 0;
4069 void *n;
4071 if (!size) {
4072 vfree(p);
4073 return NULL;
4076 if (p) {
4077 struct vm_struct *vm;
4079 vm = find_vm_area(p);
4080 if (unlikely(!vm)) {
4081 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
4082 return NULL;
4085 old_size = get_vm_area_size(vm);
4089 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
4090 * would be a good heuristic for when to shrink the vm_area?
4092 if (size <= old_size) {
4093 /* Zero out spare memory. */
4094 if (want_init_on_alloc(flags))
4095 memset((void *)p + size, 0, old_size - size);
4096 kasan_poison_vmalloc(p + size, old_size - size);
4097 kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
4098 return (void *)p;
4101 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
4102 n = __vmalloc_noprof(size, flags);
4103 if (!n)
4104 return NULL;
4106 if (p) {
4107 memcpy(n, p, old_size);
4108 vfree(p);
4111 return n;
4114 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
4115 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4116 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
4117 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
4118 #else
4120 * 64b systems should always have either DMA or DMA32 zones. For others
4121 * GFP_DMA32 should do the right thing and use the normal zone.
4123 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
4124 #endif
4127 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
4128 * @size: allocation size
4130 * Allocate enough 32bit PA addressable pages to cover @size from the
4131 * page level allocator and map them into contiguous kernel virtual space.
4133 * Return: pointer to the allocated memory or %NULL on error
4135 void *vmalloc_32_noprof(unsigned long size)
4137 return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
4138 __builtin_return_address(0));
4140 EXPORT_SYMBOL(vmalloc_32_noprof);
4143 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
4144 * @size: allocation size
4146 * The resulting memory area is 32bit addressable and zeroed so it can be
4147 * mapped to userspace without leaking data.
4149 * Return: pointer to the allocated memory or %NULL on error
4151 void *vmalloc_32_user_noprof(unsigned long size)
4153 return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END,
4154 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
4155 VM_USERMAP, NUMA_NO_NODE,
4156 __builtin_return_address(0));
4158 EXPORT_SYMBOL(vmalloc_32_user_noprof);
4161 * Atomically zero bytes in the iterator.
4163 * Returns the number of zeroed bytes.
4165 static size_t zero_iter(struct iov_iter *iter, size_t count)
4167 size_t remains = count;
4169 while (remains > 0) {
4170 size_t num, copied;
4172 num = min_t(size_t, remains, PAGE_SIZE);
4173 copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter);
4174 remains -= copied;
4176 if (copied < num)
4177 break;
4180 return count - remains;
4184 * small helper routine, copy contents to iter from addr.
4185 * If the page is not present, fill zero.
4187 * Returns the number of copied bytes.
4189 static size_t aligned_vread_iter(struct iov_iter *iter,
4190 const char *addr, size_t count)
4192 size_t remains = count;
4193 struct page *page;
4195 while (remains > 0) {
4196 unsigned long offset, length;
4197 size_t copied = 0;
4199 offset = offset_in_page(addr);
4200 length = PAGE_SIZE - offset;
4201 if (length > remains)
4202 length = remains;
4203 page = vmalloc_to_page(addr);
4205 * To do safe access to this _mapped_ area, we need lock. But
4206 * adding lock here means that we need to add overhead of
4207 * vmalloc()/vfree() calls for this _debug_ interface, rarely
4208 * used. Instead of that, we'll use an local mapping via
4209 * copy_page_to_iter_nofault() and accept a small overhead in
4210 * this access function.
4212 if (page)
4213 copied = copy_page_to_iter_nofault(page, offset,
4214 length, iter);
4215 else
4216 copied = zero_iter(iter, length);
4218 addr += copied;
4219 remains -= copied;
4221 if (copied != length)
4222 break;
4225 return count - remains;
4229 * Read from a vm_map_ram region of memory.
4231 * Returns the number of copied bytes.
4233 static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr,
4234 size_t count, unsigned long flags)
4236 char *start;
4237 struct vmap_block *vb;
4238 struct xarray *xa;
4239 unsigned long offset;
4240 unsigned int rs, re;
4241 size_t remains, n;
4244 * If it's area created by vm_map_ram() interface directly, but
4245 * not further subdividing and delegating management to vmap_block,
4246 * handle it here.
4248 if (!(flags & VMAP_BLOCK))
4249 return aligned_vread_iter(iter, addr, count);
4251 remains = count;
4254 * Area is split into regions and tracked with vmap_block, read out
4255 * each region and zero fill the hole between regions.
4257 xa = addr_to_vb_xa((unsigned long) addr);
4258 vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr));
4259 if (!vb)
4260 goto finished_zero;
4262 spin_lock(&vb->lock);
4263 if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) {
4264 spin_unlock(&vb->lock);
4265 goto finished_zero;
4268 for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) {
4269 size_t copied;
4271 if (remains == 0)
4272 goto finished;
4274 start = vmap_block_vaddr(vb->va->va_start, rs);
4276 if (addr < start) {
4277 size_t to_zero = min_t(size_t, start - addr, remains);
4278 size_t zeroed = zero_iter(iter, to_zero);
4280 addr += zeroed;
4281 remains -= zeroed;
4283 if (remains == 0 || zeroed != to_zero)
4284 goto finished;
4287 /*it could start reading from the middle of used region*/
4288 offset = offset_in_page(addr);
4289 n = ((re - rs + 1) << PAGE_SHIFT) - offset;
4290 if (n > remains)
4291 n = remains;
4293 copied = aligned_vread_iter(iter, start + offset, n);
4295 addr += copied;
4296 remains -= copied;
4298 if (copied != n)
4299 goto finished;
4302 spin_unlock(&vb->lock);
4304 finished_zero:
4305 /* zero-fill the left dirty or free regions */
4306 return count - remains + zero_iter(iter, remains);
4307 finished:
4308 /* We couldn't copy/zero everything */
4309 spin_unlock(&vb->lock);
4310 return count - remains;
4314 * vread_iter() - read vmalloc area in a safe way to an iterator.
4315 * @iter: the iterator to which data should be written.
4316 * @addr: vm address.
4317 * @count: number of bytes to be read.
4319 * This function checks that addr is a valid vmalloc'ed area, and
4320 * copy data from that area to a given buffer. If the given memory range
4321 * of [addr...addr+count) includes some valid address, data is copied to
4322 * proper area of @buf. If there are memory holes, they'll be zero-filled.
4323 * IOREMAP area is treated as memory hole and no copy is done.
4325 * If [addr...addr+count) doesn't includes any intersects with alive
4326 * vm_struct area, returns 0. @buf should be kernel's buffer.
4328 * Note: In usual ops, vread() is never necessary because the caller
4329 * should know vmalloc() area is valid and can use memcpy().
4330 * This is for routines which have to access vmalloc area without
4331 * any information, as /proc/kcore.
4333 * Return: number of bytes for which addr and buf should be increased
4334 * (same number as @count) or %0 if [addr...addr+count) doesn't
4335 * include any intersection with valid vmalloc area
4337 long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
4339 struct vmap_node *vn;
4340 struct vmap_area *va;
4341 struct vm_struct *vm;
4342 char *vaddr;
4343 size_t n, size, flags, remains;
4344 unsigned long next;
4346 addr = kasan_reset_tag(addr);
4348 /* Don't allow overflow */
4349 if ((unsigned long) addr + count < count)
4350 count = -(unsigned long) addr;
4352 remains = count;
4354 vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va);
4355 if (!vn)
4356 goto finished_zero;
4358 /* no intersects with alive vmap_area */
4359 if ((unsigned long)addr + remains <= va->va_start)
4360 goto finished_zero;
4362 do {
4363 size_t copied;
4365 if (remains == 0)
4366 goto finished;
4368 vm = va->vm;
4369 flags = va->flags & VMAP_FLAGS_MASK;
4371 * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need
4372 * be set together with VMAP_RAM.
4374 WARN_ON(flags == VMAP_BLOCK);
4376 if (!vm && !flags)
4377 goto next_va;
4379 if (vm && (vm->flags & VM_UNINITIALIZED))
4380 goto next_va;
4382 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4383 smp_rmb();
4385 vaddr = (char *) va->va_start;
4386 size = vm ? get_vm_area_size(vm) : va_size(va);
4388 if (addr >= vaddr + size)
4389 goto next_va;
4391 if (addr < vaddr) {
4392 size_t to_zero = min_t(size_t, vaddr - addr, remains);
4393 size_t zeroed = zero_iter(iter, to_zero);
4395 addr += zeroed;
4396 remains -= zeroed;
4398 if (remains == 0 || zeroed != to_zero)
4399 goto finished;
4402 n = vaddr + size - addr;
4403 if (n > remains)
4404 n = remains;
4406 if (flags & VMAP_RAM)
4407 copied = vmap_ram_vread_iter(iter, addr, n, flags);
4408 else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
4409 copied = aligned_vread_iter(iter, addr, n);
4410 else /* IOREMAP | SPARSE area is treated as memory hole */
4411 copied = zero_iter(iter, n);
4413 addr += copied;
4414 remains -= copied;
4416 if (copied != n)
4417 goto finished;
4419 next_va:
4420 next = va->va_end;
4421 spin_unlock(&vn->busy.lock);
4422 } while ((vn = find_vmap_area_exceed_addr_lock(next, &va)));
4424 finished_zero:
4425 if (vn)
4426 spin_unlock(&vn->busy.lock);
4428 /* zero-fill memory holes */
4429 return count - remains + zero_iter(iter, remains);
4430 finished:
4431 /* Nothing remains, or We couldn't copy/zero everything. */
4432 if (vn)
4433 spin_unlock(&vn->busy.lock);
4435 return count - remains;
4439 * remap_vmalloc_range_partial - map vmalloc pages to userspace
4440 * @vma: vma to cover
4441 * @uaddr: target user address to start at
4442 * @kaddr: virtual address of vmalloc kernel memory
4443 * @pgoff: offset from @kaddr to start at
4444 * @size: size of map area
4446 * Returns: 0 for success, -Exxx on failure
4448 * This function checks that @kaddr is a valid vmalloc'ed area,
4449 * and that it is big enough to cover the range starting at
4450 * @uaddr in @vma. Will return failure if that criteria isn't
4451 * met.
4453 * Similar to remap_pfn_range() (see mm/memory.c)
4455 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
4456 void *kaddr, unsigned long pgoff,
4457 unsigned long size)
4459 struct vm_struct *area;
4460 unsigned long off;
4461 unsigned long end_index;
4463 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
4464 return -EINVAL;
4466 size = PAGE_ALIGN(size);
4468 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
4469 return -EINVAL;
4471 area = find_vm_area(kaddr);
4472 if (!area)
4473 return -EINVAL;
4475 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
4476 return -EINVAL;
4478 if (check_add_overflow(size, off, &end_index) ||
4479 end_index > get_vm_area_size(area))
4480 return -EINVAL;
4481 kaddr += off;
4483 do {
4484 struct page *page = vmalloc_to_page(kaddr);
4485 int ret;
4487 ret = vm_insert_page(vma, uaddr, page);
4488 if (ret)
4489 return ret;
4491 uaddr += PAGE_SIZE;
4492 kaddr += PAGE_SIZE;
4493 size -= PAGE_SIZE;
4494 } while (size > 0);
4496 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
4498 return 0;
4502 * remap_vmalloc_range - map vmalloc pages to userspace
4503 * @vma: vma to cover (map full range of vma)
4504 * @addr: vmalloc memory
4505 * @pgoff: number of pages into addr before first page to map
4507 * Returns: 0 for success, -Exxx on failure
4509 * This function checks that addr is a valid vmalloc'ed area, and
4510 * that it is big enough to cover the vma. Will return failure if
4511 * that criteria isn't met.
4513 * Similar to remap_pfn_range() (see mm/memory.c)
4515 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
4516 unsigned long pgoff)
4518 return remap_vmalloc_range_partial(vma, vma->vm_start,
4519 addr, pgoff,
4520 vma->vm_end - vma->vm_start);
4522 EXPORT_SYMBOL(remap_vmalloc_range);
4524 void free_vm_area(struct vm_struct *area)
4526 struct vm_struct *ret;
4527 ret = remove_vm_area(area->addr);
4528 BUG_ON(ret != area);
4529 kfree(area);
4531 EXPORT_SYMBOL_GPL(free_vm_area);
4533 #ifdef CONFIG_SMP
4534 static struct vmap_area *node_to_va(struct rb_node *n)
4536 return rb_entry_safe(n, struct vmap_area, rb_node);
4540 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
4541 * @addr: target address
4543 * Returns: vmap_area if it is found. If there is no such area
4544 * the first highest(reverse order) vmap_area is returned
4545 * i.e. va->va_start < addr && va->va_end < addr or NULL
4546 * if there are no any areas before @addr.
4548 static struct vmap_area *
4549 pvm_find_va_enclose_addr(unsigned long addr)
4551 struct vmap_area *va, *tmp;
4552 struct rb_node *n;
4554 n = free_vmap_area_root.rb_node;
4555 va = NULL;
4557 while (n) {
4558 tmp = rb_entry(n, struct vmap_area, rb_node);
4559 if (tmp->va_start <= addr) {
4560 va = tmp;
4561 if (tmp->va_end >= addr)
4562 break;
4564 n = n->rb_right;
4565 } else {
4566 n = n->rb_left;
4570 return va;
4574 * pvm_determine_end_from_reverse - find the highest aligned address
4575 * of free block below VMALLOC_END
4576 * @va:
4577 * in - the VA we start the search(reverse order);
4578 * out - the VA with the highest aligned end address.
4579 * @align: alignment for required highest address
4581 * Returns: determined end address within vmap_area
4583 static unsigned long
4584 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
4586 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4587 unsigned long addr;
4589 if (likely(*va)) {
4590 list_for_each_entry_from_reverse((*va),
4591 &free_vmap_area_list, list) {
4592 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4593 if ((*va)->va_start < addr)
4594 return addr;
4598 return 0;
4602 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
4603 * @offsets: array containing offset of each area
4604 * @sizes: array containing size of each area
4605 * @nr_vms: the number of areas to allocate
4606 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
4608 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
4609 * vm_structs on success, %NULL on failure
4611 * Percpu allocator wants to use congruent vm areas so that it can
4612 * maintain the offsets among percpu areas. This function allocates
4613 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
4614 * be scattered pretty far, distance between two areas easily going up
4615 * to gigabytes. To avoid interacting with regular vmallocs, these
4616 * areas are allocated from top.
4618 * Despite its complicated look, this allocator is rather simple. It
4619 * does everything top-down and scans free blocks from the end looking
4620 * for matching base. While scanning, if any of the areas do not fit the
4621 * base address is pulled down to fit the area. Scanning is repeated till
4622 * all the areas fit and then all necessary data structures are inserted
4623 * and the result is returned.
4625 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
4626 const size_t *sizes, int nr_vms,
4627 size_t align)
4629 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
4630 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
4631 struct vmap_area **vas, *va;
4632 struct vm_struct **vms;
4633 int area, area2, last_area, term_area;
4634 unsigned long base, start, size, end, last_end, orig_start, orig_end;
4635 bool purged = false;
4637 /* verify parameters and allocate data structures */
4638 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
4639 for (last_area = 0, area = 0; area < nr_vms; area++) {
4640 start = offsets[area];
4641 end = start + sizes[area];
4643 /* is everything aligned properly? */
4644 BUG_ON(!IS_ALIGNED(offsets[area], align));
4645 BUG_ON(!IS_ALIGNED(sizes[area], align));
4647 /* detect the area with the highest address */
4648 if (start > offsets[last_area])
4649 last_area = area;
4651 for (area2 = area + 1; area2 < nr_vms; area2++) {
4652 unsigned long start2 = offsets[area2];
4653 unsigned long end2 = start2 + sizes[area2];
4655 BUG_ON(start2 < end && start < end2);
4658 last_end = offsets[last_area] + sizes[last_area];
4660 if (vmalloc_end - vmalloc_start < last_end) {
4661 WARN_ON(true);
4662 return NULL;
4665 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
4666 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
4667 if (!vas || !vms)
4668 goto err_free2;
4670 for (area = 0; area < nr_vms; area++) {
4671 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
4672 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
4673 if (!vas[area] || !vms[area])
4674 goto err_free;
4676 retry:
4677 spin_lock(&free_vmap_area_lock);
4679 /* start scanning - we scan from the top, begin with the last area */
4680 area = term_area = last_area;
4681 start = offsets[area];
4682 end = start + sizes[area];
4684 va = pvm_find_va_enclose_addr(vmalloc_end);
4685 base = pvm_determine_end_from_reverse(&va, align) - end;
4687 while (true) {
4689 * base might have underflowed, add last_end before
4690 * comparing.
4692 if (base + last_end < vmalloc_start + last_end)
4693 goto overflow;
4696 * Fitting base has not been found.
4698 if (va == NULL)
4699 goto overflow;
4702 * If required width exceeds current VA block, move
4703 * base downwards and then recheck.
4705 if (base + end > va->va_end) {
4706 base = pvm_determine_end_from_reverse(&va, align) - end;
4707 term_area = area;
4708 continue;
4712 * If this VA does not fit, move base downwards and recheck.
4714 if (base + start < va->va_start) {
4715 va = node_to_va(rb_prev(&va->rb_node));
4716 base = pvm_determine_end_from_reverse(&va, align) - end;
4717 term_area = area;
4718 continue;
4722 * This area fits, move on to the previous one. If
4723 * the previous one is the terminal one, we're done.
4725 area = (area + nr_vms - 1) % nr_vms;
4726 if (area == term_area)
4727 break;
4729 start = offsets[area];
4730 end = start + sizes[area];
4731 va = pvm_find_va_enclose_addr(base + end);
4734 /* we've found a fitting base, insert all va's */
4735 for (area = 0; area < nr_vms; area++) {
4736 int ret;
4738 start = base + offsets[area];
4739 size = sizes[area];
4741 va = pvm_find_va_enclose_addr(start);
4742 if (WARN_ON_ONCE(va == NULL))
4743 /* It is a BUG(), but trigger recovery instead. */
4744 goto recovery;
4746 ret = va_clip(&free_vmap_area_root,
4747 &free_vmap_area_list, va, start, size);
4748 if (WARN_ON_ONCE(unlikely(ret)))
4749 /* It is a BUG(), but trigger recovery instead. */
4750 goto recovery;
4752 /* Allocated area. */
4753 va = vas[area];
4754 va->va_start = start;
4755 va->va_end = start + size;
4758 spin_unlock(&free_vmap_area_lock);
4760 /* populate the kasan shadow space */
4761 for (area = 0; area < nr_vms; area++) {
4762 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
4763 goto err_free_shadow;
4766 /* insert all vm's */
4767 for (area = 0; area < nr_vms; area++) {
4768 struct vmap_node *vn = addr_to_node(vas[area]->va_start);
4770 spin_lock(&vn->busy.lock);
4771 insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
4772 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
4773 pcpu_get_vm_areas);
4774 spin_unlock(&vn->busy.lock);
4778 * Mark allocated areas as accessible. Do it now as a best-effort
4779 * approach, as they can be mapped outside of vmalloc code.
4780 * With hardware tag-based KASAN, marking is skipped for
4781 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
4783 for (area = 0; area < nr_vms; area++)
4784 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
4785 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
4787 kfree(vas);
4788 return vms;
4790 recovery:
4792 * Remove previously allocated areas. There is no
4793 * need in removing these areas from the busy tree,
4794 * because they are inserted only on the final step
4795 * and when pcpu_get_vm_areas() is success.
4797 while (area--) {
4798 orig_start = vas[area]->va_start;
4799 orig_end = vas[area]->va_end;
4800 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4801 &free_vmap_area_list);
4802 if (va)
4803 kasan_release_vmalloc(orig_start, orig_end,
4804 va->va_start, va->va_end,
4805 KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4806 vas[area] = NULL;
4809 overflow:
4810 spin_unlock(&free_vmap_area_lock);
4811 if (!purged) {
4812 reclaim_and_purge_vmap_areas();
4813 purged = true;
4815 /* Before "retry", check if we recover. */
4816 for (area = 0; area < nr_vms; area++) {
4817 if (vas[area])
4818 continue;
4820 vas[area] = kmem_cache_zalloc(
4821 vmap_area_cachep, GFP_KERNEL);
4822 if (!vas[area])
4823 goto err_free;
4826 goto retry;
4829 err_free:
4830 for (area = 0; area < nr_vms; area++) {
4831 if (vas[area])
4832 kmem_cache_free(vmap_area_cachep, vas[area]);
4834 kfree(vms[area]);
4836 err_free2:
4837 kfree(vas);
4838 kfree(vms);
4839 return NULL;
4841 err_free_shadow:
4842 spin_lock(&free_vmap_area_lock);
4844 * We release all the vmalloc shadows, even the ones for regions that
4845 * hadn't been successfully added. This relies on kasan_release_vmalloc
4846 * being able to tolerate this case.
4848 for (area = 0; area < nr_vms; area++) {
4849 orig_start = vas[area]->va_start;
4850 orig_end = vas[area]->va_end;
4851 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4852 &free_vmap_area_list);
4853 if (va)
4854 kasan_release_vmalloc(orig_start, orig_end,
4855 va->va_start, va->va_end,
4856 KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
4857 vas[area] = NULL;
4858 kfree(vms[area]);
4860 spin_unlock(&free_vmap_area_lock);
4861 kfree(vas);
4862 kfree(vms);
4863 return NULL;
4867 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
4868 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
4869 * @nr_vms: the number of allocated areas
4871 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
4873 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
4875 int i;
4877 for (i = 0; i < nr_vms; i++)
4878 free_vm_area(vms[i]);
4879 kfree(vms);
4881 #endif /* CONFIG_SMP */
4883 #ifdef CONFIG_PRINTK
4884 bool vmalloc_dump_obj(void *object)
4886 const void *caller;
4887 struct vm_struct *vm;
4888 struct vmap_area *va;
4889 struct vmap_node *vn;
4890 unsigned long addr;
4891 unsigned int nr_pages;
4893 addr = PAGE_ALIGN((unsigned long) object);
4894 vn = addr_to_node(addr);
4896 if (!spin_trylock(&vn->busy.lock))
4897 return false;
4899 va = __find_vmap_area(addr, &vn->busy.root);
4900 if (!va || !va->vm) {
4901 spin_unlock(&vn->busy.lock);
4902 return false;
4905 vm = va->vm;
4906 addr = (unsigned long) vm->addr;
4907 caller = vm->caller;
4908 nr_pages = vm->nr_pages;
4909 spin_unlock(&vn->busy.lock);
4911 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
4912 nr_pages, addr, caller);
4914 return true;
4916 #endif
4918 #ifdef CONFIG_PROC_FS
4919 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4921 if (IS_ENABLED(CONFIG_NUMA)) {
4922 unsigned int nr, *counters = m->private;
4923 unsigned int step = 1U << vm_area_page_order(v);
4925 if (!counters)
4926 return;
4928 if (v->flags & VM_UNINITIALIZED)
4929 return;
4930 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
4931 smp_rmb();
4933 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
4935 for (nr = 0; nr < v->nr_pages; nr += step)
4936 counters[page_to_nid(v->pages[nr])] += step;
4937 for_each_node_state(nr, N_HIGH_MEMORY)
4938 if (counters[nr])
4939 seq_printf(m, " N%u=%u", nr, counters[nr]);
4943 static void show_purge_info(struct seq_file *m)
4945 struct vmap_node *vn;
4946 struct vmap_area *va;
4947 int i;
4949 for (i = 0; i < nr_vmap_nodes; i++) {
4950 vn = &vmap_nodes[i];
4952 spin_lock(&vn->lazy.lock);
4953 list_for_each_entry(va, &vn->lazy.head, list) {
4954 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
4955 (void *)va->va_start, (void *)va->va_end,
4956 va_size(va));
4958 spin_unlock(&vn->lazy.lock);
4962 static int vmalloc_info_show(struct seq_file *m, void *p)
4964 struct vmap_node *vn;
4965 struct vmap_area *va;
4966 struct vm_struct *v;
4967 int i;
4969 for (i = 0; i < nr_vmap_nodes; i++) {
4970 vn = &vmap_nodes[i];
4972 spin_lock(&vn->busy.lock);
4973 list_for_each_entry(va, &vn->busy.head, list) {
4974 if (!va->vm) {
4975 if (va->flags & VMAP_RAM)
4976 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
4977 (void *)va->va_start, (void *)va->va_end,
4978 va_size(va));
4980 continue;
4983 v = va->vm;
4985 seq_printf(m, "0x%pK-0x%pK %7ld",
4986 v->addr, v->addr + v->size, v->size);
4988 if (v->caller)
4989 seq_printf(m, " %pS", v->caller);
4991 if (v->nr_pages)
4992 seq_printf(m, " pages=%d", v->nr_pages);
4994 if (v->phys_addr)
4995 seq_printf(m, " phys=%pa", &v->phys_addr);
4997 if (v->flags & VM_IOREMAP)
4998 seq_puts(m, " ioremap");
5000 if (v->flags & VM_SPARSE)
5001 seq_puts(m, " sparse");
5003 if (v->flags & VM_ALLOC)
5004 seq_puts(m, " vmalloc");
5006 if (v->flags & VM_MAP)
5007 seq_puts(m, " vmap");
5009 if (v->flags & VM_USERMAP)
5010 seq_puts(m, " user");
5012 if (v->flags & VM_DMA_COHERENT)
5013 seq_puts(m, " dma-coherent");
5015 if (is_vmalloc_addr(v->pages))
5016 seq_puts(m, " vpages");
5018 show_numa_info(m, v);
5019 seq_putc(m, '\n');
5021 spin_unlock(&vn->busy.lock);
5025 * As a final step, dump "unpurged" areas.
5027 show_purge_info(m);
5028 return 0;
5031 static int __init proc_vmalloc_init(void)
5033 void *priv_data = NULL;
5035 if (IS_ENABLED(CONFIG_NUMA))
5036 priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
5038 proc_create_single_data("vmallocinfo",
5039 0400, NULL, vmalloc_info_show, priv_data);
5041 return 0;
5043 module_init(proc_vmalloc_init);
5045 #endif
5047 static void __init vmap_init_free_space(void)
5049 unsigned long vmap_start = 1;
5050 const unsigned long vmap_end = ULONG_MAX;
5051 struct vmap_area *free;
5052 struct vm_struct *busy;
5055 * B F B B B F
5056 * -|-----|.....|-----|-----|-----|.....|-
5057 * | The KVA space |
5058 * |<--------------------------------->|
5060 for (busy = vmlist; busy; busy = busy->next) {
5061 if ((unsigned long) busy->addr - vmap_start > 0) {
5062 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5063 if (!WARN_ON_ONCE(!free)) {
5064 free->va_start = vmap_start;
5065 free->va_end = (unsigned long) busy->addr;
5067 insert_vmap_area_augment(free, NULL,
5068 &free_vmap_area_root,
5069 &free_vmap_area_list);
5073 vmap_start = (unsigned long) busy->addr + busy->size;
5076 if (vmap_end - vmap_start > 0) {
5077 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5078 if (!WARN_ON_ONCE(!free)) {
5079 free->va_start = vmap_start;
5080 free->va_end = vmap_end;
5082 insert_vmap_area_augment(free, NULL,
5083 &free_vmap_area_root,
5084 &free_vmap_area_list);
5089 static void vmap_init_nodes(void)
5091 struct vmap_node *vn;
5092 int i, n;
5094 #if BITS_PER_LONG == 64
5096 * A high threshold of max nodes is fixed and bound to 128,
5097 * thus a scale factor is 1 for systems where number of cores
5098 * are less or equal to specified threshold.
5100 * As for NUMA-aware notes. For bigger systems, for example
5101 * NUMA with multi-sockets, where we can end-up with thousands
5102 * of cores in total, a "sub-numa-clustering" should be added.
5104 * In this case a NUMA domain is considered as a single entity
5105 * with dedicated sub-nodes in it which describe one group or
5106 * set of cores. Therefore a per-domain purging is supposed to
5107 * be added as well as a per-domain balancing.
5109 n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
5111 if (n > 1) {
5112 vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
5113 if (vn) {
5114 /* Node partition is 16 pages. */
5115 vmap_zone_size = (1 << 4) * PAGE_SIZE;
5116 nr_vmap_nodes = n;
5117 vmap_nodes = vn;
5118 } else {
5119 pr_err("Failed to allocate an array. Disable a node layer\n");
5122 #endif
5124 for (n = 0; n < nr_vmap_nodes; n++) {
5125 vn = &vmap_nodes[n];
5126 vn->busy.root = RB_ROOT;
5127 INIT_LIST_HEAD(&vn->busy.head);
5128 spin_lock_init(&vn->busy.lock);
5130 vn->lazy.root = RB_ROOT;
5131 INIT_LIST_HEAD(&vn->lazy.head);
5132 spin_lock_init(&vn->lazy.lock);
5134 for (i = 0; i < MAX_VA_SIZE_PAGES; i++) {
5135 INIT_LIST_HEAD(&vn->pool[i].head);
5136 WRITE_ONCE(vn->pool[i].len, 0);
5139 spin_lock_init(&vn->pool_lock);
5143 static unsigned long
5144 vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5146 unsigned long count;
5147 struct vmap_node *vn;
5148 int i, j;
5150 for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
5151 vn = &vmap_nodes[i];
5153 for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
5154 count += READ_ONCE(vn->pool[j].len);
5157 return count ? count : SHRINK_EMPTY;
5160 static unsigned long
5161 vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5163 int i;
5165 for (i = 0; i < nr_vmap_nodes; i++)
5166 decay_va_pool_node(&vmap_nodes[i], true);
5168 return SHRINK_STOP;
5171 void __init vmalloc_init(void)
5173 struct shrinker *vmap_node_shrinker;
5174 struct vmap_area *va;
5175 struct vmap_node *vn;
5176 struct vm_struct *tmp;
5177 int i;
5180 * Create the cache for vmap_area objects.
5182 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
5184 for_each_possible_cpu(i) {
5185 struct vmap_block_queue *vbq;
5186 struct vfree_deferred *p;
5188 vbq = &per_cpu(vmap_block_queue, i);
5189 spin_lock_init(&vbq->lock);
5190 INIT_LIST_HEAD(&vbq->free);
5191 p = &per_cpu(vfree_deferred, i);
5192 init_llist_head(&p->list);
5193 INIT_WORK(&p->wq, delayed_vfree_work);
5194 xa_init(&vbq->vmap_blocks);
5198 * Setup nodes before importing vmlist.
5200 vmap_init_nodes();
5202 /* Import existing vmlist entries. */
5203 for (tmp = vmlist; tmp; tmp = tmp->next) {
5204 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
5205 if (WARN_ON_ONCE(!va))
5206 continue;
5208 va->va_start = (unsigned long)tmp->addr;
5209 va->va_end = va->va_start + tmp->size;
5210 va->vm = tmp;
5212 vn = addr_to_node(va->va_start);
5213 insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
5217 * Now we can initialize a free vmap space.
5219 vmap_init_free_space();
5220 vmap_initialized = true;
5222 vmap_node_shrinker = shrinker_alloc(0, "vmap-node");
5223 if (!vmap_node_shrinker) {
5224 pr_err("Failed to allocate vmap-node shrinker!\n");
5225 return;
5228 vmap_node_shrinker->count_objects = vmap_node_shrink_count;
5229 vmap_node_shrinker->scan_objects = vmap_node_shrink_scan;
5230 shrinker_register(vmap_node_shrinker);