1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1993 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
8 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
9 * Numa awareness, Christoph Lameter, SGI, June 2005
12 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/highmem.h>
16 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/set_memory.h>
23 #include <linux/debugobjects.h>
24 #include <linux/kallsyms.h>
25 #include <linux/list.h>
26 #include <linux/notifier.h>
27 #include <linux/rbtree.h>
28 #include <linux/radix-tree.h>
29 #include <linux/rcupdate.h>
30 #include <linux/pfn.h>
31 #include <linux/kmemleak.h>
32 #include <linux/atomic.h>
33 #include <linux/compiler.h>
34 #include <linux/llist.h>
35 #include <linux/bitops.h>
36 #include <linux/rbtree_augmented.h>
37 #include <linux/overflow.h>
39 #include <linux/uaccess.h>
40 #include <asm/tlbflush.h>
41 #include <asm/shmparam.h>
45 struct vfree_deferred
{
46 struct llist_head list
;
47 struct work_struct wq
;
49 static DEFINE_PER_CPU(struct vfree_deferred
, vfree_deferred
);
51 static void __vunmap(const void *, int);
53 static void free_work(struct work_struct
*w
)
55 struct vfree_deferred
*p
= container_of(w
, struct vfree_deferred
, wq
);
56 struct llist_node
*t
, *llnode
;
58 llist_for_each_safe(llnode
, t
, llist_del_all(&p
->list
))
59 __vunmap((void *)llnode
, 1);
62 /*** Page table manipulation functions ***/
64 static void vunmap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
)
68 pte
= pte_offset_kernel(pmd
, addr
);
70 pte_t ptent
= ptep_get_and_clear(&init_mm
, addr
, pte
);
71 WARN_ON(!pte_none(ptent
) && !pte_present(ptent
));
72 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
75 static void vunmap_pmd_range(pud_t
*pud
, unsigned long addr
, unsigned long end
)
80 pmd
= pmd_offset(pud
, addr
);
82 next
= pmd_addr_end(addr
, end
);
83 if (pmd_clear_huge(pmd
))
85 if (pmd_none_or_clear_bad(pmd
))
87 vunmap_pte_range(pmd
, addr
, next
);
88 } while (pmd
++, addr
= next
, addr
!= end
);
91 static void vunmap_pud_range(p4d_t
*p4d
, unsigned long addr
, unsigned long end
)
96 pud
= pud_offset(p4d
, addr
);
98 next
= pud_addr_end(addr
, end
);
99 if (pud_clear_huge(pud
))
101 if (pud_none_or_clear_bad(pud
))
103 vunmap_pmd_range(pud
, addr
, next
);
104 } while (pud
++, addr
= next
, addr
!= end
);
107 static void vunmap_p4d_range(pgd_t
*pgd
, unsigned long addr
, unsigned long end
)
112 p4d
= p4d_offset(pgd
, addr
);
114 next
= p4d_addr_end(addr
, end
);
115 if (p4d_clear_huge(p4d
))
117 if (p4d_none_or_clear_bad(p4d
))
119 vunmap_pud_range(p4d
, addr
, next
);
120 } while (p4d
++, addr
= next
, addr
!= end
);
123 static void vunmap_page_range(unsigned long addr
, unsigned long end
)
129 pgd
= pgd_offset_k(addr
);
131 next
= pgd_addr_end(addr
, end
);
132 if (pgd_none_or_clear_bad(pgd
))
134 vunmap_p4d_range(pgd
, addr
, next
);
135 } while (pgd
++, addr
= next
, addr
!= end
);
138 static int vmap_pte_range(pmd_t
*pmd
, unsigned long addr
,
139 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
144 * nr is a running index into the array which helps higher level
145 * callers keep track of where we're up to.
148 pte
= pte_alloc_kernel(pmd
, addr
);
152 struct page
*page
= pages
[*nr
];
154 if (WARN_ON(!pte_none(*pte
)))
158 set_pte_at(&init_mm
, addr
, pte
, mk_pte(page
, prot
));
160 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
164 static int vmap_pmd_range(pud_t
*pud
, unsigned long addr
,
165 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
170 pmd
= pmd_alloc(&init_mm
, pud
, addr
);
174 next
= pmd_addr_end(addr
, end
);
175 if (vmap_pte_range(pmd
, addr
, next
, prot
, pages
, nr
))
177 } while (pmd
++, addr
= next
, addr
!= end
);
181 static int vmap_pud_range(p4d_t
*p4d
, unsigned long addr
,
182 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
187 pud
= pud_alloc(&init_mm
, p4d
, addr
);
191 next
= pud_addr_end(addr
, end
);
192 if (vmap_pmd_range(pud
, addr
, next
, prot
, pages
, nr
))
194 } while (pud
++, addr
= next
, addr
!= end
);
198 static int vmap_p4d_range(pgd_t
*pgd
, unsigned long addr
,
199 unsigned long end
, pgprot_t prot
, struct page
**pages
, int *nr
)
204 p4d
= p4d_alloc(&init_mm
, pgd
, addr
);
208 next
= p4d_addr_end(addr
, end
);
209 if (vmap_pud_range(p4d
, addr
, next
, prot
, pages
, nr
))
211 } while (p4d
++, addr
= next
, addr
!= end
);
216 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
217 * will have pfns corresponding to the "pages" array.
219 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
221 static int vmap_page_range_noflush(unsigned long start
, unsigned long end
,
222 pgprot_t prot
, struct page
**pages
)
226 unsigned long addr
= start
;
231 pgd
= pgd_offset_k(addr
);
233 next
= pgd_addr_end(addr
, end
);
234 err
= vmap_p4d_range(pgd
, addr
, next
, prot
, pages
, &nr
);
237 } while (pgd
++, addr
= next
, addr
!= end
);
242 static int vmap_page_range(unsigned long start
, unsigned long end
,
243 pgprot_t prot
, struct page
**pages
)
247 ret
= vmap_page_range_noflush(start
, end
, prot
, pages
);
248 flush_cache_vmap(start
, end
);
252 int is_vmalloc_or_module_addr(const void *x
)
255 * ARM, x86-64 and sparc64 put modules in a special place,
256 * and fall back on vmalloc() if that fails. Others
257 * just put it in the vmalloc space.
259 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
260 unsigned long addr
= (unsigned long)x
;
261 if (addr
>= MODULES_VADDR
&& addr
< MODULES_END
)
264 return is_vmalloc_addr(x
);
268 * Walk a vmap address to the struct page it maps.
270 struct page
*vmalloc_to_page(const void *vmalloc_addr
)
272 unsigned long addr
= (unsigned long) vmalloc_addr
;
273 struct page
*page
= NULL
;
274 pgd_t
*pgd
= pgd_offset_k(addr
);
281 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
282 * architectures that do not vmalloc module space
284 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr
));
288 p4d
= p4d_offset(pgd
, addr
);
291 pud
= pud_offset(p4d
, addr
);
294 * Don't dereference bad PUD or PMD (below) entries. This will also
295 * identify huge mappings, which we may encounter on architectures
296 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
297 * identified as vmalloc addresses by is_vmalloc_addr(), but are
298 * not [unambiguously] associated with a struct page, so there is
299 * no correct value to return for them.
301 WARN_ON_ONCE(pud_bad(*pud
));
302 if (pud_none(*pud
) || pud_bad(*pud
))
304 pmd
= pmd_offset(pud
, addr
);
305 WARN_ON_ONCE(pmd_bad(*pmd
));
306 if (pmd_none(*pmd
) || pmd_bad(*pmd
))
309 ptep
= pte_offset_map(pmd
, addr
);
311 if (pte_present(pte
))
312 page
= pte_page(pte
);
316 EXPORT_SYMBOL(vmalloc_to_page
);
319 * Map a vmalloc()-space virtual address to the physical page frame number.
321 unsigned long vmalloc_to_pfn(const void *vmalloc_addr
)
323 return page_to_pfn(vmalloc_to_page(vmalloc_addr
));
325 EXPORT_SYMBOL(vmalloc_to_pfn
);
328 /*** Global kva allocator ***/
330 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
331 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
334 static DEFINE_SPINLOCK(vmap_area_lock
);
335 /* Export for kexec only */
336 LIST_HEAD(vmap_area_list
);
337 static LLIST_HEAD(vmap_purge_list
);
338 static struct rb_root vmap_area_root
= RB_ROOT
;
339 static bool vmap_initialized __read_mostly
;
342 * This kmem_cache is used for vmap_area objects. Instead of
343 * allocating from slab we reuse an object from this cache to
344 * make things faster. Especially in "no edge" splitting of
347 static struct kmem_cache
*vmap_area_cachep
;
350 * This linked list is used in pair with free_vmap_area_root.
351 * It gives O(1) access to prev/next to perform fast coalescing.
353 static LIST_HEAD(free_vmap_area_list
);
356 * This augment red-black tree represents the free vmap space.
357 * All vmap_area objects in this tree are sorted by va->va_start
358 * address. It is used for allocation and merging when a vmap
359 * object is released.
361 * Each vmap_area node contains a maximum available free block
362 * of its sub-tree, right or left. Therefore it is possible to
363 * find a lowest match of free area.
365 static struct rb_root free_vmap_area_root
= RB_ROOT
;
368 * Preload a CPU with one object for "no edge" split case. The
369 * aim is to get rid of allocations from the atomic context, thus
370 * to use more permissive allocation masks.
372 static DEFINE_PER_CPU(struct vmap_area
*, ne_fit_preload_node
);
374 static __always_inline
unsigned long
375 va_size(struct vmap_area
*va
)
377 return (va
->va_end
- va
->va_start
);
380 static __always_inline
unsigned long
381 get_subtree_max_size(struct rb_node
*node
)
383 struct vmap_area
*va
;
385 va
= rb_entry_safe(node
, struct vmap_area
, rb_node
);
386 return va
? va
->subtree_max_size
: 0;
390 * Gets called when remove the node and rotate.
392 static __always_inline
unsigned long
393 compute_subtree_max_size(struct vmap_area
*va
)
395 return max3(va_size(va
),
396 get_subtree_max_size(va
->rb_node
.rb_left
),
397 get_subtree_max_size(va
->rb_node
.rb_right
));
400 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb
,
401 struct vmap_area
, rb_node
, unsigned long, subtree_max_size
, va_size
)
403 static void purge_vmap_area_lazy(void);
404 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list
);
405 static unsigned long lazy_max_pages(void);
407 static atomic_long_t nr_vmalloc_pages
;
409 unsigned long vmalloc_nr_pages(void)
411 return atomic_long_read(&nr_vmalloc_pages
);
414 static struct vmap_area
*__find_vmap_area(unsigned long addr
)
416 struct rb_node
*n
= vmap_area_root
.rb_node
;
419 struct vmap_area
*va
;
421 va
= rb_entry(n
, struct vmap_area
, rb_node
);
422 if (addr
< va
->va_start
)
424 else if (addr
>= va
->va_end
)
434 * This function returns back addresses of parent node
435 * and its left or right link for further processing.
437 static __always_inline
struct rb_node
**
438 find_va_links(struct vmap_area
*va
,
439 struct rb_root
*root
, struct rb_node
*from
,
440 struct rb_node
**parent
)
442 struct vmap_area
*tmp_va
;
443 struct rb_node
**link
;
446 link
= &root
->rb_node
;
447 if (unlikely(!*link
)) {
456 * Go to the bottom of the tree. When we hit the last point
457 * we end up with parent rb_node and correct direction, i name
458 * it link, where the new va->rb_node will be attached to.
461 tmp_va
= rb_entry(*link
, struct vmap_area
, rb_node
);
464 * During the traversal we also do some sanity check.
465 * Trigger the BUG() if there are sides(left/right)
468 if (va
->va_start
< tmp_va
->va_end
&&
469 va
->va_end
<= tmp_va
->va_start
)
470 link
= &(*link
)->rb_left
;
471 else if (va
->va_end
> tmp_va
->va_start
&&
472 va
->va_start
>= tmp_va
->va_end
)
473 link
= &(*link
)->rb_right
;
478 *parent
= &tmp_va
->rb_node
;
482 static __always_inline
struct list_head
*
483 get_va_next_sibling(struct rb_node
*parent
, struct rb_node
**link
)
485 struct list_head
*list
;
487 if (unlikely(!parent
))
489 * The red-black tree where we try to find VA neighbors
490 * before merging or inserting is empty, i.e. it means
491 * there is no free vmap space. Normally it does not
492 * happen but we handle this case anyway.
496 list
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
497 return (&parent
->rb_right
== link
? list
->next
: list
);
500 static __always_inline
void
501 link_va(struct vmap_area
*va
, struct rb_root
*root
,
502 struct rb_node
*parent
, struct rb_node
**link
, struct list_head
*head
)
505 * VA is still not in the list, but we can
506 * identify its future previous list_head node.
508 if (likely(parent
)) {
509 head
= &rb_entry(parent
, struct vmap_area
, rb_node
)->list
;
510 if (&parent
->rb_right
!= link
)
514 /* Insert to the rb-tree */
515 rb_link_node(&va
->rb_node
, parent
, link
);
516 if (root
== &free_vmap_area_root
) {
518 * Some explanation here. Just perform simple insertion
519 * to the tree. We do not set va->subtree_max_size to
520 * its current size before calling rb_insert_augmented().
521 * It is because of we populate the tree from the bottom
522 * to parent levels when the node _is_ in the tree.
524 * Therefore we set subtree_max_size to zero after insertion,
525 * to let __augment_tree_propagate_from() puts everything to
526 * the correct order later on.
528 rb_insert_augmented(&va
->rb_node
,
529 root
, &free_vmap_area_rb_augment_cb
);
530 va
->subtree_max_size
= 0;
532 rb_insert_color(&va
->rb_node
, root
);
535 /* Address-sort this list */
536 list_add(&va
->list
, head
);
539 static __always_inline
void
540 unlink_va(struct vmap_area
*va
, struct rb_root
*root
)
542 if (WARN_ON(RB_EMPTY_NODE(&va
->rb_node
)))
545 if (root
== &free_vmap_area_root
)
546 rb_erase_augmented(&va
->rb_node
,
547 root
, &free_vmap_area_rb_augment_cb
);
549 rb_erase(&va
->rb_node
, root
);
552 RB_CLEAR_NODE(&va
->rb_node
);
555 #if DEBUG_AUGMENT_PROPAGATE_CHECK
557 augment_tree_propagate_check(struct rb_node
*n
)
559 struct vmap_area
*va
;
560 struct rb_node
*node
;
567 va
= rb_entry(n
, struct vmap_area
, rb_node
);
568 size
= va
->subtree_max_size
;
572 va
= rb_entry(node
, struct vmap_area
, rb_node
);
574 if (get_subtree_max_size(node
->rb_left
) == size
) {
575 node
= node
->rb_left
;
577 if (va_size(va
) == size
) {
582 node
= node
->rb_right
;
587 va
= rb_entry(n
, struct vmap_area
, rb_node
);
588 pr_emerg("tree is corrupted: %lu, %lu\n",
589 va_size(va
), va
->subtree_max_size
);
592 augment_tree_propagate_check(n
->rb_left
);
593 augment_tree_propagate_check(n
->rb_right
);
598 * This function populates subtree_max_size from bottom to upper
599 * levels starting from VA point. The propagation must be done
600 * when VA size is modified by changing its va_start/va_end. Or
601 * in case of newly inserting of VA to the tree.
603 * It means that __augment_tree_propagate_from() must be called:
604 * - After VA has been inserted to the tree(free path);
605 * - After VA has been shrunk(allocation path);
606 * - After VA has been increased(merging path).
608 * Please note that, it does not mean that upper parent nodes
609 * and their subtree_max_size are recalculated all the time up
618 * For example if we modify the node 4, shrinking it to 2, then
619 * no any modification is required. If we shrink the node 2 to 1
620 * its subtree_max_size is updated only, and set to 1. If we shrink
621 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
624 static __always_inline
void
625 augment_tree_propagate_from(struct vmap_area
*va
)
627 struct rb_node
*node
= &va
->rb_node
;
628 unsigned long new_va_sub_max_size
;
631 va
= rb_entry(node
, struct vmap_area
, rb_node
);
632 new_va_sub_max_size
= compute_subtree_max_size(va
);
635 * If the newly calculated maximum available size of the
636 * subtree is equal to the current one, then it means that
637 * the tree is propagated correctly. So we have to stop at
638 * this point to save cycles.
640 if (va
->subtree_max_size
== new_va_sub_max_size
)
643 va
->subtree_max_size
= new_va_sub_max_size
;
644 node
= rb_parent(&va
->rb_node
);
647 #if DEBUG_AUGMENT_PROPAGATE_CHECK
648 augment_tree_propagate_check(free_vmap_area_root
.rb_node
);
653 insert_vmap_area(struct vmap_area
*va
,
654 struct rb_root
*root
, struct list_head
*head
)
656 struct rb_node
**link
;
657 struct rb_node
*parent
;
659 link
= find_va_links(va
, root
, NULL
, &parent
);
660 link_va(va
, root
, parent
, link
, head
);
664 insert_vmap_area_augment(struct vmap_area
*va
,
665 struct rb_node
*from
, struct rb_root
*root
,
666 struct list_head
*head
)
668 struct rb_node
**link
;
669 struct rb_node
*parent
;
672 link
= find_va_links(va
, NULL
, from
, &parent
);
674 link
= find_va_links(va
, root
, NULL
, &parent
);
676 link_va(va
, root
, parent
, link
, head
);
677 augment_tree_propagate_from(va
);
681 * Merge de-allocated chunk of VA memory with previous
682 * and next free blocks. If coalesce is not done a new
683 * free area is inserted. If VA has been merged, it is
686 static __always_inline
void
687 merge_or_add_vmap_area(struct vmap_area
*va
,
688 struct rb_root
*root
, struct list_head
*head
)
690 struct vmap_area
*sibling
;
691 struct list_head
*next
;
692 struct rb_node
**link
;
693 struct rb_node
*parent
;
697 * Find a place in the tree where VA potentially will be
698 * inserted, unless it is merged with its sibling/siblings.
700 link
= find_va_links(va
, root
, NULL
, &parent
);
703 * Get next node of VA to check if merging can be done.
705 next
= get_va_next_sibling(parent
, link
);
706 if (unlikely(next
== NULL
))
712 * |<------VA------>|<-----Next----->|
717 sibling
= list_entry(next
, struct vmap_area
, list
);
718 if (sibling
->va_start
== va
->va_end
) {
719 sibling
->va_start
= va
->va_start
;
721 /* Check and update the tree if needed. */
722 augment_tree_propagate_from(sibling
);
724 /* Free vmap_area object. */
725 kmem_cache_free(vmap_area_cachep
, va
);
727 /* Point to the new merged area. */
736 * |<-----Prev----->|<------VA------>|
740 if (next
->prev
!= head
) {
741 sibling
= list_entry(next
->prev
, struct vmap_area
, list
);
742 if (sibling
->va_end
== va
->va_start
) {
743 sibling
->va_end
= va
->va_end
;
745 /* Check and update the tree if needed. */
746 augment_tree_propagate_from(sibling
);
751 /* Free vmap_area object. */
752 kmem_cache_free(vmap_area_cachep
, va
);
759 link_va(va
, root
, parent
, link
, head
);
760 augment_tree_propagate_from(va
);
764 static __always_inline
bool
765 is_within_this_va(struct vmap_area
*va
, unsigned long size
,
766 unsigned long align
, unsigned long vstart
)
768 unsigned long nva_start_addr
;
770 if (va
->va_start
> vstart
)
771 nva_start_addr
= ALIGN(va
->va_start
, align
);
773 nva_start_addr
= ALIGN(vstart
, align
);
775 /* Can be overflowed due to big size or alignment. */
776 if (nva_start_addr
+ size
< nva_start_addr
||
777 nva_start_addr
< vstart
)
780 return (nva_start_addr
+ size
<= va
->va_end
);
784 * Find the first free block(lowest start address) in the tree,
785 * that will accomplish the request corresponding to passing
788 static __always_inline
struct vmap_area
*
789 find_vmap_lowest_match(unsigned long size
,
790 unsigned long align
, unsigned long vstart
)
792 struct vmap_area
*va
;
793 struct rb_node
*node
;
794 unsigned long length
;
796 /* Start from the root. */
797 node
= free_vmap_area_root
.rb_node
;
799 /* Adjust the search size for alignment overhead. */
800 length
= size
+ align
- 1;
803 va
= rb_entry(node
, struct vmap_area
, rb_node
);
805 if (get_subtree_max_size(node
->rb_left
) >= length
&&
806 vstart
< va
->va_start
) {
807 node
= node
->rb_left
;
809 if (is_within_this_va(va
, size
, align
, vstart
))
813 * Does not make sense to go deeper towards the right
814 * sub-tree if it does not have a free block that is
815 * equal or bigger to the requested search length.
817 if (get_subtree_max_size(node
->rb_right
) >= length
) {
818 node
= node
->rb_right
;
823 * OK. We roll back and find the first right sub-tree,
824 * that will satisfy the search criteria. It can happen
825 * only once due to "vstart" restriction.
827 while ((node
= rb_parent(node
))) {
828 va
= rb_entry(node
, struct vmap_area
, rb_node
);
829 if (is_within_this_va(va
, size
, align
, vstart
))
832 if (get_subtree_max_size(node
->rb_right
) >= length
&&
833 vstart
<= va
->va_start
) {
834 node
= node
->rb_right
;
844 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
845 #include <linux/random.h>
847 static struct vmap_area
*
848 find_vmap_lowest_linear_match(unsigned long size
,
849 unsigned long align
, unsigned long vstart
)
851 struct vmap_area
*va
;
853 list_for_each_entry(va
, &free_vmap_area_list
, list
) {
854 if (!is_within_this_va(va
, size
, align
, vstart
))
864 find_vmap_lowest_match_check(unsigned long size
)
866 struct vmap_area
*va_1
, *va_2
;
867 unsigned long vstart
;
870 get_random_bytes(&rnd
, sizeof(rnd
));
871 vstart
= VMALLOC_START
+ rnd
;
873 va_1
= find_vmap_lowest_match(size
, 1, vstart
);
874 va_2
= find_vmap_lowest_linear_match(size
, 1, vstart
);
877 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
884 FL_FIT_TYPE
= 1, /* full fit */
885 LE_FIT_TYPE
= 2, /* left edge fit */
886 RE_FIT_TYPE
= 3, /* right edge fit */
887 NE_FIT_TYPE
= 4 /* no edge fit */
890 static __always_inline
enum fit_type
891 classify_va_fit_type(struct vmap_area
*va
,
892 unsigned long nva_start_addr
, unsigned long size
)
896 /* Check if it is within VA. */
897 if (nva_start_addr
< va
->va_start
||
898 nva_start_addr
+ size
> va
->va_end
)
902 if (va
->va_start
== nva_start_addr
) {
903 if (va
->va_end
== nva_start_addr
+ size
)
907 } else if (va
->va_end
== nva_start_addr
+ size
) {
916 static __always_inline
int
917 adjust_va_to_fit_type(struct vmap_area
*va
,
918 unsigned long nva_start_addr
, unsigned long size
,
921 struct vmap_area
*lva
= NULL
;
923 if (type
== FL_FIT_TYPE
) {
925 * No need to split VA, it fully fits.
931 unlink_va(va
, &free_vmap_area_root
);
932 kmem_cache_free(vmap_area_cachep
, va
);
933 } else if (type
== LE_FIT_TYPE
) {
935 * Split left edge of fit VA.
941 va
->va_start
+= size
;
942 } else if (type
== RE_FIT_TYPE
) {
944 * Split right edge of fit VA.
950 va
->va_end
= nva_start_addr
;
951 } else if (type
== NE_FIT_TYPE
) {
953 * Split no edge of fit VA.
959 lva
= __this_cpu_xchg(ne_fit_preload_node
, NULL
);
960 if (unlikely(!lva
)) {
962 * For percpu allocator we do not do any pre-allocation
963 * and leave it as it is. The reason is it most likely
964 * never ends up with NE_FIT_TYPE splitting. In case of
965 * percpu allocations offsets and sizes are aligned to
966 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
967 * are its main fitting cases.
969 * There are a few exceptions though, as an example it is
970 * a first allocation (early boot up) when we have "one"
971 * big free space that has to be split.
973 lva
= kmem_cache_alloc(vmap_area_cachep
, GFP_NOWAIT
);
979 * Build the remainder.
981 lva
->va_start
= va
->va_start
;
982 lva
->va_end
= nva_start_addr
;
985 * Shrink this VA to remaining size.
987 va
->va_start
= nva_start_addr
+ size
;
992 if (type
!= FL_FIT_TYPE
) {
993 augment_tree_propagate_from(va
);
995 if (lva
) /* type == NE_FIT_TYPE */
996 insert_vmap_area_augment(lva
, &va
->rb_node
,
997 &free_vmap_area_root
, &free_vmap_area_list
);
1004 * Returns a start address of the newly allocated area, if success.
1005 * Otherwise a vend is returned that indicates failure.
1007 static __always_inline
unsigned long
1008 __alloc_vmap_area(unsigned long size
, unsigned long align
,
1009 unsigned long vstart
, unsigned long vend
)
1011 unsigned long nva_start_addr
;
1012 struct vmap_area
*va
;
1016 va
= find_vmap_lowest_match(size
, align
, vstart
);
1020 if (va
->va_start
> vstart
)
1021 nva_start_addr
= ALIGN(va
->va_start
, align
);
1023 nva_start_addr
= ALIGN(vstart
, align
);
1025 /* Check the "vend" restriction. */
1026 if (nva_start_addr
+ size
> vend
)
1029 /* Classify what we have found. */
1030 type
= classify_va_fit_type(va
, nva_start_addr
, size
);
1031 if (WARN_ON_ONCE(type
== NOTHING_FIT
))
1034 /* Update the free vmap_area. */
1035 ret
= adjust_va_to_fit_type(va
, nva_start_addr
, size
, type
);
1039 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1040 find_vmap_lowest_match_check(size
);
1043 return nva_start_addr
;
1047 * Allocate a region of KVA of the specified size and alignment, within the
1050 static struct vmap_area
*alloc_vmap_area(unsigned long size
,
1051 unsigned long align
,
1052 unsigned long vstart
, unsigned long vend
,
1053 int node
, gfp_t gfp_mask
)
1055 struct vmap_area
*va
, *pva
;
1060 BUG_ON(offset_in_page(size
));
1061 BUG_ON(!is_power_of_2(align
));
1063 if (unlikely(!vmap_initialized
))
1064 return ERR_PTR(-EBUSY
);
1068 va
= kmem_cache_alloc_node(vmap_area_cachep
,
1069 gfp_mask
& GFP_RECLAIM_MASK
, node
);
1071 return ERR_PTR(-ENOMEM
);
1074 * Only scan the relevant parts containing pointers to other objects
1075 * to avoid false negatives.
1077 kmemleak_scan_area(&va
->rb_node
, SIZE_MAX
, gfp_mask
& GFP_RECLAIM_MASK
);
1081 * Preload this CPU with one extra vmap_area object to ensure
1082 * that we have it available when fit type of free area is
1085 * The preload is done in non-atomic context, thus it allows us
1086 * to use more permissive allocation masks to be more stable under
1087 * low memory condition and high memory pressure.
1089 * Even if it fails we do not really care about that. Just proceed
1090 * as it is. "overflow" path will refill the cache we allocate from.
1093 if (!__this_cpu_read(ne_fit_preload_node
)) {
1095 pva
= kmem_cache_alloc_node(vmap_area_cachep
, GFP_KERNEL
, node
);
1098 if (__this_cpu_cmpxchg(ne_fit_preload_node
, NULL
, pva
)) {
1100 kmem_cache_free(vmap_area_cachep
, pva
);
1104 spin_lock(&vmap_area_lock
);
1108 * If an allocation fails, the "vend" address is
1109 * returned. Therefore trigger the overflow path.
1111 addr
= __alloc_vmap_area(size
, align
, vstart
, vend
);
1112 if (unlikely(addr
== vend
))
1115 va
->va_start
= addr
;
1116 va
->va_end
= addr
+ size
;
1118 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
1120 spin_unlock(&vmap_area_lock
);
1122 BUG_ON(!IS_ALIGNED(va
->va_start
, align
));
1123 BUG_ON(va
->va_start
< vstart
);
1124 BUG_ON(va
->va_end
> vend
);
1129 spin_unlock(&vmap_area_lock
);
1131 purge_vmap_area_lazy();
1136 if (gfpflags_allow_blocking(gfp_mask
)) {
1137 unsigned long freed
= 0;
1138 blocking_notifier_call_chain(&vmap_notify_list
, 0, &freed
);
1145 if (!(gfp_mask
& __GFP_NOWARN
) && printk_ratelimit())
1146 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1149 kmem_cache_free(vmap_area_cachep
, va
);
1150 return ERR_PTR(-EBUSY
);
1153 int register_vmap_purge_notifier(struct notifier_block
*nb
)
1155 return blocking_notifier_chain_register(&vmap_notify_list
, nb
);
1157 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier
);
1159 int unregister_vmap_purge_notifier(struct notifier_block
*nb
)
1161 return blocking_notifier_chain_unregister(&vmap_notify_list
, nb
);
1163 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier
);
1165 static void __free_vmap_area(struct vmap_area
*va
)
1168 * Remove from the busy tree/list.
1170 unlink_va(va
, &vmap_area_root
);
1173 * Merge VA with its neighbors, otherwise just add it.
1175 merge_or_add_vmap_area(va
,
1176 &free_vmap_area_root
, &free_vmap_area_list
);
1180 * Free a region of KVA allocated by alloc_vmap_area
1182 static void free_vmap_area(struct vmap_area
*va
)
1184 spin_lock(&vmap_area_lock
);
1185 __free_vmap_area(va
);
1186 spin_unlock(&vmap_area_lock
);
1190 * Clear the pagetable entries of a given vmap_area
1192 static void unmap_vmap_area(struct vmap_area
*va
)
1194 vunmap_page_range(va
->va_start
, va
->va_end
);
1198 * lazy_max_pages is the maximum amount of virtual address space we gather up
1199 * before attempting to purge with a TLB flush.
1201 * There is a tradeoff here: a larger number will cover more kernel page tables
1202 * and take slightly longer to purge, but it will linearly reduce the number of
1203 * global TLB flushes that must be performed. It would seem natural to scale
1204 * this number up linearly with the number of CPUs (because vmapping activity
1205 * could also scale linearly with the number of CPUs), however it is likely
1206 * that in practice, workloads might be constrained in other ways that mean
1207 * vmap activity will not scale linearly with CPUs. Also, I want to be
1208 * conservative and not introduce a big latency on huge systems, so go with
1209 * a less aggressive log scale. It will still be an improvement over the old
1210 * code, and it will be simple to change the scale factor if we find that it
1211 * becomes a problem on bigger systems.
1213 static unsigned long lazy_max_pages(void)
1217 log
= fls(num_online_cpus());
1219 return log
* (32UL * 1024 * 1024 / PAGE_SIZE
);
1222 static atomic_long_t vmap_lazy_nr
= ATOMIC_LONG_INIT(0);
1225 * Serialize vmap purging. There is no actual criticial section protected
1226 * by this look, but we want to avoid concurrent calls for performance
1227 * reasons and to make the pcpu_get_vm_areas more deterministic.
1229 static DEFINE_MUTEX(vmap_purge_lock
);
1231 /* for per-CPU blocks */
1232 static void purge_fragmented_blocks_allcpus(void);
1235 * called before a call to iounmap() if the caller wants vm_area_struct's
1236 * immediately freed.
1238 void set_iounmap_nonlazy(void)
1240 atomic_long_set(&vmap_lazy_nr
, lazy_max_pages()+1);
1244 * Purges all lazily-freed vmap areas.
1246 static bool __purge_vmap_area_lazy(unsigned long start
, unsigned long end
)
1248 unsigned long resched_threshold
;
1249 struct llist_node
*valist
;
1250 struct vmap_area
*va
;
1251 struct vmap_area
*n_va
;
1253 lockdep_assert_held(&vmap_purge_lock
);
1255 valist
= llist_del_all(&vmap_purge_list
);
1256 if (unlikely(valist
== NULL
))
1260 * First make sure the mappings are removed from all page-tables
1261 * before they are freed.
1263 vmalloc_sync_unmappings();
1266 * TODO: to calculate a flush range without looping.
1267 * The list can be up to lazy_max_pages() elements.
1269 llist_for_each_entry(va
, valist
, purge_list
) {
1270 if (va
->va_start
< start
)
1271 start
= va
->va_start
;
1272 if (va
->va_end
> end
)
1276 flush_tlb_kernel_range(start
, end
);
1277 resched_threshold
= lazy_max_pages() << 1;
1279 spin_lock(&vmap_area_lock
);
1280 llist_for_each_entry_safe(va
, n_va
, valist
, purge_list
) {
1281 unsigned long nr
= (va
->va_end
- va
->va_start
) >> PAGE_SHIFT
;
1284 * Finally insert or merge lazily-freed area. It is
1285 * detached and there is no need to "unlink" it from
1288 merge_or_add_vmap_area(va
,
1289 &free_vmap_area_root
, &free_vmap_area_list
);
1291 atomic_long_sub(nr
, &vmap_lazy_nr
);
1293 if (atomic_long_read(&vmap_lazy_nr
) < resched_threshold
)
1294 cond_resched_lock(&vmap_area_lock
);
1296 spin_unlock(&vmap_area_lock
);
1301 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1302 * is already purging.
1304 static void try_purge_vmap_area_lazy(void)
1306 if (mutex_trylock(&vmap_purge_lock
)) {
1307 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1308 mutex_unlock(&vmap_purge_lock
);
1313 * Kick off a purge of the outstanding lazy areas.
1315 static void purge_vmap_area_lazy(void)
1317 mutex_lock(&vmap_purge_lock
);
1318 purge_fragmented_blocks_allcpus();
1319 __purge_vmap_area_lazy(ULONG_MAX
, 0);
1320 mutex_unlock(&vmap_purge_lock
);
1324 * Free a vmap area, caller ensuring that the area has been unmapped
1325 * and flush_cache_vunmap had been called for the correct range
1328 static void free_vmap_area_noflush(struct vmap_area
*va
)
1330 unsigned long nr_lazy
;
1332 spin_lock(&vmap_area_lock
);
1333 unlink_va(va
, &vmap_area_root
);
1334 spin_unlock(&vmap_area_lock
);
1336 nr_lazy
= atomic_long_add_return((va
->va_end
- va
->va_start
) >>
1337 PAGE_SHIFT
, &vmap_lazy_nr
);
1339 /* After this point, we may free va at any time */
1340 llist_add(&va
->purge_list
, &vmap_purge_list
);
1342 if (unlikely(nr_lazy
> lazy_max_pages()))
1343 try_purge_vmap_area_lazy();
1347 * Free and unmap a vmap area
1349 static void free_unmap_vmap_area(struct vmap_area
*va
)
1351 flush_cache_vunmap(va
->va_start
, va
->va_end
);
1352 unmap_vmap_area(va
);
1353 if (debug_pagealloc_enabled_static())
1354 flush_tlb_kernel_range(va
->va_start
, va
->va_end
);
1356 free_vmap_area_noflush(va
);
1359 static struct vmap_area
*find_vmap_area(unsigned long addr
)
1361 struct vmap_area
*va
;
1363 spin_lock(&vmap_area_lock
);
1364 va
= __find_vmap_area(addr
);
1365 spin_unlock(&vmap_area_lock
);
1370 /*** Per cpu kva allocator ***/
1373 * vmap space is limited especially on 32 bit architectures. Ensure there is
1374 * room for at least 16 percpu vmap blocks per CPU.
1377 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1378 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1379 * instead (we just need a rough idea)
1381 #if BITS_PER_LONG == 32
1382 #define VMALLOC_SPACE (128UL*1024*1024)
1384 #define VMALLOC_SPACE (128UL*1024*1024*1024)
1387 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1388 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
1389 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
1390 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1391 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
1392 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
1393 #define VMAP_BBMAP_BITS \
1394 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1395 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1396 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1398 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1400 struct vmap_block_queue
{
1402 struct list_head free
;
1407 struct vmap_area
*va
;
1408 unsigned long free
, dirty
;
1409 unsigned long dirty_min
, dirty_max
; /*< dirty range */
1410 struct list_head free_list
;
1411 struct rcu_head rcu_head
;
1412 struct list_head purge
;
1415 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1416 static DEFINE_PER_CPU(struct vmap_block_queue
, vmap_block_queue
);
1419 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1420 * in the free path. Could get rid of this if we change the API to return a
1421 * "cookie" from alloc, to be passed to free. But no big deal yet.
1423 static DEFINE_SPINLOCK(vmap_block_tree_lock
);
1424 static RADIX_TREE(vmap_block_tree
, GFP_ATOMIC
);
1427 * We should probably have a fallback mechanism to allocate virtual memory
1428 * out of partially filled vmap blocks. However vmap block sizing should be
1429 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1433 static unsigned long addr_to_vb_idx(unsigned long addr
)
1435 addr
-= VMALLOC_START
& ~(VMAP_BLOCK_SIZE
-1);
1436 addr
/= VMAP_BLOCK_SIZE
;
1440 static void *vmap_block_vaddr(unsigned long va_start
, unsigned long pages_off
)
1444 addr
= va_start
+ (pages_off
<< PAGE_SHIFT
);
1445 BUG_ON(addr_to_vb_idx(addr
) != addr_to_vb_idx(va_start
));
1446 return (void *)addr
;
1450 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1451 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
1452 * @order: how many 2^order pages should be occupied in newly allocated block
1453 * @gfp_mask: flags for the page level allocator
1455 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1457 static void *new_vmap_block(unsigned int order
, gfp_t gfp_mask
)
1459 struct vmap_block_queue
*vbq
;
1460 struct vmap_block
*vb
;
1461 struct vmap_area
*va
;
1462 unsigned long vb_idx
;
1466 node
= numa_node_id();
1468 vb
= kmalloc_node(sizeof(struct vmap_block
),
1469 gfp_mask
& GFP_RECLAIM_MASK
, node
);
1471 return ERR_PTR(-ENOMEM
);
1473 va
= alloc_vmap_area(VMAP_BLOCK_SIZE
, VMAP_BLOCK_SIZE
,
1474 VMALLOC_START
, VMALLOC_END
,
1478 return ERR_CAST(va
);
1481 err
= radix_tree_preload(gfp_mask
);
1482 if (unlikely(err
)) {
1485 return ERR_PTR(err
);
1488 vaddr
= vmap_block_vaddr(va
->va_start
, 0);
1489 spin_lock_init(&vb
->lock
);
1491 /* At least something should be left free */
1492 BUG_ON(VMAP_BBMAP_BITS
<= (1UL << order
));
1493 vb
->free
= VMAP_BBMAP_BITS
- (1UL << order
);
1495 vb
->dirty_min
= VMAP_BBMAP_BITS
;
1497 INIT_LIST_HEAD(&vb
->free_list
);
1499 vb_idx
= addr_to_vb_idx(va
->va_start
);
1500 spin_lock(&vmap_block_tree_lock
);
1501 err
= radix_tree_insert(&vmap_block_tree
, vb_idx
, vb
);
1502 spin_unlock(&vmap_block_tree_lock
);
1504 radix_tree_preload_end();
1506 vbq
= &get_cpu_var(vmap_block_queue
);
1507 spin_lock(&vbq
->lock
);
1508 list_add_tail_rcu(&vb
->free_list
, &vbq
->free
);
1509 spin_unlock(&vbq
->lock
);
1510 put_cpu_var(vmap_block_queue
);
1515 static void free_vmap_block(struct vmap_block
*vb
)
1517 struct vmap_block
*tmp
;
1518 unsigned long vb_idx
;
1520 vb_idx
= addr_to_vb_idx(vb
->va
->va_start
);
1521 spin_lock(&vmap_block_tree_lock
);
1522 tmp
= radix_tree_delete(&vmap_block_tree
, vb_idx
);
1523 spin_unlock(&vmap_block_tree_lock
);
1526 free_vmap_area_noflush(vb
->va
);
1527 kfree_rcu(vb
, rcu_head
);
1530 static void purge_fragmented_blocks(int cpu
)
1533 struct vmap_block
*vb
;
1534 struct vmap_block
*n_vb
;
1535 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
1538 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1540 if (!(vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
))
1543 spin_lock(&vb
->lock
);
1544 if (vb
->free
+ vb
->dirty
== VMAP_BBMAP_BITS
&& vb
->dirty
!= VMAP_BBMAP_BITS
) {
1545 vb
->free
= 0; /* prevent further allocs after releasing lock */
1546 vb
->dirty
= VMAP_BBMAP_BITS
; /* prevent purging it again */
1548 vb
->dirty_max
= VMAP_BBMAP_BITS
;
1549 spin_lock(&vbq
->lock
);
1550 list_del_rcu(&vb
->free_list
);
1551 spin_unlock(&vbq
->lock
);
1552 spin_unlock(&vb
->lock
);
1553 list_add_tail(&vb
->purge
, &purge
);
1555 spin_unlock(&vb
->lock
);
1559 list_for_each_entry_safe(vb
, n_vb
, &purge
, purge
) {
1560 list_del(&vb
->purge
);
1561 free_vmap_block(vb
);
1565 static void purge_fragmented_blocks_allcpus(void)
1569 for_each_possible_cpu(cpu
)
1570 purge_fragmented_blocks(cpu
);
1573 static void *vb_alloc(unsigned long size
, gfp_t gfp_mask
)
1575 struct vmap_block_queue
*vbq
;
1576 struct vmap_block
*vb
;
1580 BUG_ON(offset_in_page(size
));
1581 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
1582 if (WARN_ON(size
== 0)) {
1584 * Allocating 0 bytes isn't what caller wants since
1585 * get_order(0) returns funny result. Just warn and terminate
1590 order
= get_order(size
);
1593 vbq
= &get_cpu_var(vmap_block_queue
);
1594 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1595 unsigned long pages_off
;
1597 spin_lock(&vb
->lock
);
1598 if (vb
->free
< (1UL << order
)) {
1599 spin_unlock(&vb
->lock
);
1603 pages_off
= VMAP_BBMAP_BITS
- vb
->free
;
1604 vaddr
= vmap_block_vaddr(vb
->va
->va_start
, pages_off
);
1605 vb
->free
-= 1UL << order
;
1606 if (vb
->free
== 0) {
1607 spin_lock(&vbq
->lock
);
1608 list_del_rcu(&vb
->free_list
);
1609 spin_unlock(&vbq
->lock
);
1612 spin_unlock(&vb
->lock
);
1616 put_cpu_var(vmap_block_queue
);
1619 /* Allocate new block if nothing was found */
1621 vaddr
= new_vmap_block(order
, gfp_mask
);
1626 static void vb_free(const void *addr
, unsigned long size
)
1628 unsigned long offset
;
1629 unsigned long vb_idx
;
1631 struct vmap_block
*vb
;
1633 BUG_ON(offset_in_page(size
));
1634 BUG_ON(size
> PAGE_SIZE
*VMAP_MAX_ALLOC
);
1636 flush_cache_vunmap((unsigned long)addr
, (unsigned long)addr
+ size
);
1638 order
= get_order(size
);
1640 offset
= (unsigned long)addr
& (VMAP_BLOCK_SIZE
- 1);
1641 offset
>>= PAGE_SHIFT
;
1643 vb_idx
= addr_to_vb_idx((unsigned long)addr
);
1645 vb
= radix_tree_lookup(&vmap_block_tree
, vb_idx
);
1649 vunmap_page_range((unsigned long)addr
, (unsigned long)addr
+ size
);
1651 if (debug_pagealloc_enabled_static())
1652 flush_tlb_kernel_range((unsigned long)addr
,
1653 (unsigned long)addr
+ size
);
1655 spin_lock(&vb
->lock
);
1657 /* Expand dirty range */
1658 vb
->dirty_min
= min(vb
->dirty_min
, offset
);
1659 vb
->dirty_max
= max(vb
->dirty_max
, offset
+ (1UL << order
));
1661 vb
->dirty
+= 1UL << order
;
1662 if (vb
->dirty
== VMAP_BBMAP_BITS
) {
1664 spin_unlock(&vb
->lock
);
1665 free_vmap_block(vb
);
1667 spin_unlock(&vb
->lock
);
1670 static void _vm_unmap_aliases(unsigned long start
, unsigned long end
, int flush
)
1674 if (unlikely(!vmap_initialized
))
1679 for_each_possible_cpu(cpu
) {
1680 struct vmap_block_queue
*vbq
= &per_cpu(vmap_block_queue
, cpu
);
1681 struct vmap_block
*vb
;
1684 list_for_each_entry_rcu(vb
, &vbq
->free
, free_list
) {
1685 spin_lock(&vb
->lock
);
1687 unsigned long va_start
= vb
->va
->va_start
;
1690 s
= va_start
+ (vb
->dirty_min
<< PAGE_SHIFT
);
1691 e
= va_start
+ (vb
->dirty_max
<< PAGE_SHIFT
);
1693 start
= min(s
, start
);
1698 spin_unlock(&vb
->lock
);
1703 mutex_lock(&vmap_purge_lock
);
1704 purge_fragmented_blocks_allcpus();
1705 if (!__purge_vmap_area_lazy(start
, end
) && flush
)
1706 flush_tlb_kernel_range(start
, end
);
1707 mutex_unlock(&vmap_purge_lock
);
1711 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1713 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1714 * to amortize TLB flushing overheads. What this means is that any page you
1715 * have now, may, in a former life, have been mapped into kernel virtual
1716 * address by the vmap layer and so there might be some CPUs with TLB entries
1717 * still referencing that page (additional to the regular 1:1 kernel mapping).
1719 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1720 * be sure that none of the pages we have control over will have any aliases
1721 * from the vmap layer.
1723 void vm_unmap_aliases(void)
1725 unsigned long start
= ULONG_MAX
, end
= 0;
1728 _vm_unmap_aliases(start
, end
, flush
);
1730 EXPORT_SYMBOL_GPL(vm_unmap_aliases
);
1733 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1734 * @mem: the pointer returned by vm_map_ram
1735 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1737 void vm_unmap_ram(const void *mem
, unsigned int count
)
1739 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
1740 unsigned long addr
= (unsigned long)mem
;
1741 struct vmap_area
*va
;
1745 BUG_ON(addr
< VMALLOC_START
);
1746 BUG_ON(addr
> VMALLOC_END
);
1747 BUG_ON(!PAGE_ALIGNED(addr
));
1749 if (likely(count
<= VMAP_MAX_ALLOC
)) {
1750 debug_check_no_locks_freed(mem
, size
);
1755 va
= find_vmap_area(addr
);
1757 debug_check_no_locks_freed((void *)va
->va_start
,
1758 (va
->va_end
- va
->va_start
));
1759 free_unmap_vmap_area(va
);
1761 EXPORT_SYMBOL(vm_unmap_ram
);
1764 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1765 * @pages: an array of pointers to the pages to be mapped
1766 * @count: number of pages
1767 * @node: prefer to allocate data structures on this node
1768 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1770 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1771 * faster than vmap so it's good. But if you mix long-life and short-life
1772 * objects with vm_map_ram(), it could consume lots of address space through
1773 * fragmentation (especially on a 32bit machine). You could see failures in
1774 * the end. Please use this function for short-lived objects.
1776 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1778 void *vm_map_ram(struct page
**pages
, unsigned int count
, int node
, pgprot_t prot
)
1780 unsigned long size
= (unsigned long)count
<< PAGE_SHIFT
;
1784 if (likely(count
<= VMAP_MAX_ALLOC
)) {
1785 mem
= vb_alloc(size
, GFP_KERNEL
);
1788 addr
= (unsigned long)mem
;
1790 struct vmap_area
*va
;
1791 va
= alloc_vmap_area(size
, PAGE_SIZE
,
1792 VMALLOC_START
, VMALLOC_END
, node
, GFP_KERNEL
);
1796 addr
= va
->va_start
;
1799 if (vmap_page_range(addr
, addr
+ size
, prot
, pages
) < 0) {
1800 vm_unmap_ram(mem
, count
);
1805 EXPORT_SYMBOL(vm_map_ram
);
1807 static struct vm_struct
*vmlist __initdata
;
1810 * vm_area_add_early - add vmap area early during boot
1811 * @vm: vm_struct to add
1813 * This function is used to add fixed kernel vm area to vmlist before
1814 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1815 * should contain proper values and the other fields should be zero.
1817 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1819 void __init
vm_area_add_early(struct vm_struct
*vm
)
1821 struct vm_struct
*tmp
, **p
;
1823 BUG_ON(vmap_initialized
);
1824 for (p
= &vmlist
; (tmp
= *p
) != NULL
; p
= &tmp
->next
) {
1825 if (tmp
->addr
>= vm
->addr
) {
1826 BUG_ON(tmp
->addr
< vm
->addr
+ vm
->size
);
1829 BUG_ON(tmp
->addr
+ tmp
->size
> vm
->addr
);
1836 * vm_area_register_early - register vmap area early during boot
1837 * @vm: vm_struct to register
1838 * @align: requested alignment
1840 * This function is used to register kernel vm area before
1841 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1842 * proper values on entry and other fields should be zero. On return,
1843 * vm->addr contains the allocated address.
1845 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1847 void __init
vm_area_register_early(struct vm_struct
*vm
, size_t align
)
1849 static size_t vm_init_off __initdata
;
1852 addr
= ALIGN(VMALLOC_START
+ vm_init_off
, align
);
1853 vm_init_off
= PFN_ALIGN(addr
+ vm
->size
) - VMALLOC_START
;
1855 vm
->addr
= (void *)addr
;
1857 vm_area_add_early(vm
);
1860 static void vmap_init_free_space(void)
1862 unsigned long vmap_start
= 1;
1863 const unsigned long vmap_end
= ULONG_MAX
;
1864 struct vmap_area
*busy
, *free
;
1868 * -|-----|.....|-----|-----|-----|.....|-
1870 * |<--------------------------------->|
1872 list_for_each_entry(busy
, &vmap_area_list
, list
) {
1873 if (busy
->va_start
- vmap_start
> 0) {
1874 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
1875 if (!WARN_ON_ONCE(!free
)) {
1876 free
->va_start
= vmap_start
;
1877 free
->va_end
= busy
->va_start
;
1879 insert_vmap_area_augment(free
, NULL
,
1880 &free_vmap_area_root
,
1881 &free_vmap_area_list
);
1885 vmap_start
= busy
->va_end
;
1888 if (vmap_end
- vmap_start
> 0) {
1889 free
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
1890 if (!WARN_ON_ONCE(!free
)) {
1891 free
->va_start
= vmap_start
;
1892 free
->va_end
= vmap_end
;
1894 insert_vmap_area_augment(free
, NULL
,
1895 &free_vmap_area_root
,
1896 &free_vmap_area_list
);
1901 void __init
vmalloc_init(void)
1903 struct vmap_area
*va
;
1904 struct vm_struct
*tmp
;
1908 * Create the cache for vmap_area objects.
1910 vmap_area_cachep
= KMEM_CACHE(vmap_area
, SLAB_PANIC
);
1912 for_each_possible_cpu(i
) {
1913 struct vmap_block_queue
*vbq
;
1914 struct vfree_deferred
*p
;
1916 vbq
= &per_cpu(vmap_block_queue
, i
);
1917 spin_lock_init(&vbq
->lock
);
1918 INIT_LIST_HEAD(&vbq
->free
);
1919 p
= &per_cpu(vfree_deferred
, i
);
1920 init_llist_head(&p
->list
);
1921 INIT_WORK(&p
->wq
, free_work
);
1924 /* Import existing vmlist entries. */
1925 for (tmp
= vmlist
; tmp
; tmp
= tmp
->next
) {
1926 va
= kmem_cache_zalloc(vmap_area_cachep
, GFP_NOWAIT
);
1927 if (WARN_ON_ONCE(!va
))
1930 va
->va_start
= (unsigned long)tmp
->addr
;
1931 va
->va_end
= va
->va_start
+ tmp
->size
;
1933 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
1937 * Now we can initialize a free vmap space.
1939 vmap_init_free_space();
1940 vmap_initialized
= true;
1944 * map_kernel_range_noflush - map kernel VM area with the specified pages
1945 * @addr: start of the VM area to map
1946 * @size: size of the VM area to map
1947 * @prot: page protection flags to use
1948 * @pages: pages to map
1950 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1951 * specify should have been allocated using get_vm_area() and its
1955 * This function does NOT do any cache flushing. The caller is
1956 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1957 * before calling this function.
1960 * The number of pages mapped on success, -errno on failure.
1962 int map_kernel_range_noflush(unsigned long addr
, unsigned long size
,
1963 pgprot_t prot
, struct page
**pages
)
1965 return vmap_page_range_noflush(addr
, addr
+ size
, prot
, pages
);
1969 * unmap_kernel_range_noflush - unmap kernel VM area
1970 * @addr: start of the VM area to unmap
1971 * @size: size of the VM area to unmap
1973 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1974 * specify should have been allocated using get_vm_area() and its
1978 * This function does NOT do any cache flushing. The caller is
1979 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1980 * before calling this function and flush_tlb_kernel_range() after.
1982 void unmap_kernel_range_noflush(unsigned long addr
, unsigned long size
)
1984 vunmap_page_range(addr
, addr
+ size
);
1986 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush
);
1989 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1990 * @addr: start of the VM area to unmap
1991 * @size: size of the VM area to unmap
1993 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1994 * the unmapping and tlb after.
1996 void unmap_kernel_range(unsigned long addr
, unsigned long size
)
1998 unsigned long end
= addr
+ size
;
2000 flush_cache_vunmap(addr
, end
);
2001 vunmap_page_range(addr
, end
);
2002 flush_tlb_kernel_range(addr
, end
);
2004 EXPORT_SYMBOL_GPL(unmap_kernel_range
);
2006 int map_vm_area(struct vm_struct
*area
, pgprot_t prot
, struct page
**pages
)
2008 unsigned long addr
= (unsigned long)area
->addr
;
2009 unsigned long end
= addr
+ get_vm_area_size(area
);
2012 err
= vmap_page_range(addr
, end
, prot
, pages
);
2014 return err
> 0 ? 0 : err
;
2016 EXPORT_SYMBOL_GPL(map_vm_area
);
2018 static void setup_vmalloc_vm(struct vm_struct
*vm
, struct vmap_area
*va
,
2019 unsigned long flags
, const void *caller
)
2021 spin_lock(&vmap_area_lock
);
2023 vm
->addr
= (void *)va
->va_start
;
2024 vm
->size
= va
->va_end
- va
->va_start
;
2025 vm
->caller
= caller
;
2027 spin_unlock(&vmap_area_lock
);
2030 static void clear_vm_uninitialized_flag(struct vm_struct
*vm
)
2033 * Before removing VM_UNINITIALIZED,
2034 * we should make sure that vm has proper values.
2035 * Pair with smp_rmb() in show_numa_info().
2038 vm
->flags
&= ~VM_UNINITIALIZED
;
2041 static struct vm_struct
*__get_vm_area_node(unsigned long size
,
2042 unsigned long align
, unsigned long flags
, unsigned long start
,
2043 unsigned long end
, int node
, gfp_t gfp_mask
, const void *caller
)
2045 struct vmap_area
*va
;
2046 struct vm_struct
*area
;
2048 BUG_ON(in_interrupt());
2049 size
= PAGE_ALIGN(size
);
2050 if (unlikely(!size
))
2053 if (flags
& VM_IOREMAP
)
2054 align
= 1ul << clamp_t(int, get_count_order_long(size
),
2055 PAGE_SHIFT
, IOREMAP_MAX_ORDER
);
2057 area
= kzalloc_node(sizeof(*area
), gfp_mask
& GFP_RECLAIM_MASK
, node
);
2058 if (unlikely(!area
))
2061 if (!(flags
& VM_NO_GUARD
))
2064 va
= alloc_vmap_area(size
, align
, start
, end
, node
, gfp_mask
);
2070 setup_vmalloc_vm(area
, va
, flags
, caller
);
2075 struct vm_struct
*__get_vm_area(unsigned long size
, unsigned long flags
,
2076 unsigned long start
, unsigned long end
)
2078 return __get_vm_area_node(size
, 1, flags
, start
, end
, NUMA_NO_NODE
,
2079 GFP_KERNEL
, __builtin_return_address(0));
2081 EXPORT_SYMBOL_GPL(__get_vm_area
);
2083 struct vm_struct
*__get_vm_area_caller(unsigned long size
, unsigned long flags
,
2084 unsigned long start
, unsigned long end
,
2087 return __get_vm_area_node(size
, 1, flags
, start
, end
, NUMA_NO_NODE
,
2088 GFP_KERNEL
, caller
);
2092 * get_vm_area - reserve a contiguous kernel virtual area
2093 * @size: size of the area
2094 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
2096 * Search an area of @size in the kernel virtual mapping area,
2097 * and reserved it for out purposes. Returns the area descriptor
2098 * on success or %NULL on failure.
2100 * Return: the area descriptor on success or %NULL on failure.
2102 struct vm_struct
*get_vm_area(unsigned long size
, unsigned long flags
)
2104 return __get_vm_area_node(size
, 1, flags
, VMALLOC_START
, VMALLOC_END
,
2105 NUMA_NO_NODE
, GFP_KERNEL
,
2106 __builtin_return_address(0));
2109 struct vm_struct
*get_vm_area_caller(unsigned long size
, unsigned long flags
,
2112 return __get_vm_area_node(size
, 1, flags
, VMALLOC_START
, VMALLOC_END
,
2113 NUMA_NO_NODE
, GFP_KERNEL
, caller
);
2117 * find_vm_area - find a continuous kernel virtual area
2118 * @addr: base address
2120 * Search for the kernel VM area starting at @addr, and return it.
2121 * It is up to the caller to do all required locking to keep the returned
2124 * Return: pointer to the found area or %NULL on faulure
2126 struct vm_struct
*find_vm_area(const void *addr
)
2128 struct vmap_area
*va
;
2130 va
= find_vmap_area((unsigned long)addr
);
2138 * remove_vm_area - find and remove a continuous kernel virtual area
2139 * @addr: base address
2141 * Search for the kernel VM area starting at @addr, and remove it.
2142 * This function returns the found VM area, but using it is NOT safe
2143 * on SMP machines, except for its size or flags.
2145 * Return: pointer to the found area or %NULL on faulure
2147 struct vm_struct
*remove_vm_area(const void *addr
)
2149 struct vmap_area
*va
;
2153 spin_lock(&vmap_area_lock
);
2154 va
= __find_vmap_area((unsigned long)addr
);
2156 struct vm_struct
*vm
= va
->vm
;
2159 spin_unlock(&vmap_area_lock
);
2161 kasan_free_shadow(vm
);
2162 free_unmap_vmap_area(va
);
2167 spin_unlock(&vmap_area_lock
);
2171 static inline void set_area_direct_map(const struct vm_struct
*area
,
2172 int (*set_direct_map
)(struct page
*page
))
2176 for (i
= 0; i
< area
->nr_pages
; i
++)
2177 if (page_address(area
->pages
[i
]))
2178 set_direct_map(area
->pages
[i
]);
2181 /* Handle removing and resetting vm mappings related to the vm_struct. */
2182 static void vm_remove_mappings(struct vm_struct
*area
, int deallocate_pages
)
2184 unsigned long start
= ULONG_MAX
, end
= 0;
2185 int flush_reset
= area
->flags
& VM_FLUSH_RESET_PERMS
;
2189 remove_vm_area(area
->addr
);
2191 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2196 * If not deallocating pages, just do the flush of the VM area and
2199 if (!deallocate_pages
) {
2205 * If execution gets here, flush the vm mapping and reset the direct
2206 * map. Find the start and end range of the direct mappings to make sure
2207 * the vm_unmap_aliases() flush includes the direct map.
2209 for (i
= 0; i
< area
->nr_pages
; i
++) {
2210 unsigned long addr
= (unsigned long)page_address(area
->pages
[i
]);
2212 start
= min(addr
, start
);
2213 end
= max(addr
+ PAGE_SIZE
, end
);
2219 * Set direct map to something invalid so that it won't be cached if
2220 * there are any accesses after the TLB flush, then flush the TLB and
2221 * reset the direct map permissions to the default.
2223 set_area_direct_map(area
, set_direct_map_invalid_noflush
);
2224 _vm_unmap_aliases(start
, end
, flush_dmap
);
2225 set_area_direct_map(area
, set_direct_map_default_noflush
);
2228 static void __vunmap(const void *addr
, int deallocate_pages
)
2230 struct vm_struct
*area
;
2235 if (WARN(!PAGE_ALIGNED(addr
), "Trying to vfree() bad address (%p)\n",
2239 area
= find_vm_area(addr
);
2240 if (unlikely(!area
)) {
2241 WARN(1, KERN_ERR
"Trying to vfree() nonexistent vm area (%p)\n",
2246 debug_check_no_locks_freed(area
->addr
, get_vm_area_size(area
));
2247 debug_check_no_obj_freed(area
->addr
, get_vm_area_size(area
));
2249 vm_remove_mappings(area
, deallocate_pages
);
2251 if (deallocate_pages
) {
2254 for (i
= 0; i
< area
->nr_pages
; i
++) {
2255 struct page
*page
= area
->pages
[i
];
2258 __free_pages(page
, 0);
2260 atomic_long_sub(area
->nr_pages
, &nr_vmalloc_pages
);
2262 kvfree(area
->pages
);
2269 static inline void __vfree_deferred(const void *addr
)
2272 * Use raw_cpu_ptr() because this can be called from preemptible
2273 * context. Preemption is absolutely fine here, because the llist_add()
2274 * implementation is lockless, so it works even if we are adding to
2275 * nother cpu's list. schedule_work() should be fine with this too.
2277 struct vfree_deferred
*p
= raw_cpu_ptr(&vfree_deferred
);
2279 if (llist_add((struct llist_node
*)addr
, &p
->list
))
2280 schedule_work(&p
->wq
);
2284 * vfree_atomic - release memory allocated by vmalloc()
2285 * @addr: memory base address
2287 * This one is just like vfree() but can be called in any atomic context
2290 void vfree_atomic(const void *addr
)
2294 kmemleak_free(addr
);
2298 __vfree_deferred(addr
);
2301 static void __vfree(const void *addr
)
2303 if (unlikely(in_interrupt()))
2304 __vfree_deferred(addr
);
2310 * vfree - release memory allocated by vmalloc()
2311 * @addr: memory base address
2313 * Free the virtually continuous memory area starting at @addr, as
2314 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2315 * NULL, no operation is performed.
2317 * Must not be called in NMI context (strictly speaking, only if we don't
2318 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2319 * conventions for vfree() arch-depenedent would be a really bad idea)
2321 * May sleep if called *not* from interrupt context.
2323 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2325 void vfree(const void *addr
)
2329 kmemleak_free(addr
);
2331 might_sleep_if(!in_interrupt());
2338 EXPORT_SYMBOL(vfree
);
2341 * vunmap - release virtual mapping obtained by vmap()
2342 * @addr: memory base address
2344 * Free the virtually contiguous memory area starting at @addr,
2345 * which was created from the page array passed to vmap().
2347 * Must not be called in interrupt context.
2349 void vunmap(const void *addr
)
2351 BUG_ON(in_interrupt());
2356 EXPORT_SYMBOL(vunmap
);
2359 * vmap - map an array of pages into virtually contiguous space
2360 * @pages: array of page pointers
2361 * @count: number of pages to map
2362 * @flags: vm_area->flags
2363 * @prot: page protection for the mapping
2365 * Maps @count pages from @pages into contiguous kernel virtual
2368 * Return: the address of the area or %NULL on failure
2370 void *vmap(struct page
**pages
, unsigned int count
,
2371 unsigned long flags
, pgprot_t prot
)
2373 struct vm_struct
*area
;
2374 unsigned long size
; /* In bytes */
2378 if (count
> totalram_pages())
2381 size
= (unsigned long)count
<< PAGE_SHIFT
;
2382 area
= get_vm_area_caller(size
, flags
, __builtin_return_address(0));
2386 if (map_vm_area(area
, prot
, pages
)) {
2393 EXPORT_SYMBOL(vmap
);
2395 static void *__vmalloc_node(unsigned long size
, unsigned long align
,
2396 gfp_t gfp_mask
, pgprot_t prot
,
2397 int node
, const void *caller
);
2398 static void *__vmalloc_area_node(struct vm_struct
*area
, gfp_t gfp_mask
,
2399 pgprot_t prot
, int node
)
2401 struct page
**pages
;
2402 unsigned int nr_pages
, array_size
, i
;
2403 const gfp_t nested_gfp
= (gfp_mask
& GFP_RECLAIM_MASK
) | __GFP_ZERO
;
2404 const gfp_t alloc_mask
= gfp_mask
| __GFP_NOWARN
;
2405 const gfp_t highmem_mask
= (gfp_mask
& (GFP_DMA
| GFP_DMA32
)) ?
2409 nr_pages
= get_vm_area_size(area
) >> PAGE_SHIFT
;
2410 array_size
= (nr_pages
* sizeof(struct page
*));
2412 /* Please note that the recursion is strictly bounded. */
2413 if (array_size
> PAGE_SIZE
) {
2414 pages
= __vmalloc_node(array_size
, 1, nested_gfp
|highmem_mask
,
2415 PAGE_KERNEL
, node
, area
->caller
);
2417 pages
= kmalloc_node(array_size
, nested_gfp
, node
);
2421 remove_vm_area(area
->addr
);
2426 area
->pages
= pages
;
2427 area
->nr_pages
= nr_pages
;
2429 for (i
= 0; i
< area
->nr_pages
; i
++) {
2432 if (node
== NUMA_NO_NODE
)
2433 page
= alloc_page(alloc_mask
|highmem_mask
);
2435 page
= alloc_pages_node(node
, alloc_mask
|highmem_mask
, 0);
2437 if (unlikely(!page
)) {
2438 /* Successfully allocated i pages, free them in __vunmap() */
2440 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
2443 area
->pages
[i
] = page
;
2444 if (gfpflags_allow_blocking(gfp_mask
|highmem_mask
))
2447 atomic_long_add(area
->nr_pages
, &nr_vmalloc_pages
);
2449 if (map_vm_area(area
, prot
, pages
))
2454 warn_alloc(gfp_mask
, NULL
,
2455 "vmalloc: allocation failure, allocated %ld of %ld bytes",
2456 (area
->nr_pages
*PAGE_SIZE
), area
->size
);
2457 __vfree(area
->addr
);
2462 * __vmalloc_node_range - allocate virtually contiguous memory
2463 * @size: allocation size
2464 * @align: desired alignment
2465 * @start: vm area range start
2466 * @end: vm area range end
2467 * @gfp_mask: flags for the page level allocator
2468 * @prot: protection mask for the allocated pages
2469 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2470 * @node: node to use for allocation or NUMA_NO_NODE
2471 * @caller: caller's return address
2473 * Allocate enough pages to cover @size from the page level
2474 * allocator with @gfp_mask flags. Map them into contiguous
2475 * kernel virtual space, using a pagetable protection of @prot.
2477 * Return: the address of the area or %NULL on failure
2479 void *__vmalloc_node_range(unsigned long size
, unsigned long align
,
2480 unsigned long start
, unsigned long end
, gfp_t gfp_mask
,
2481 pgprot_t prot
, unsigned long vm_flags
, int node
,
2484 struct vm_struct
*area
;
2486 unsigned long real_size
= size
;
2488 size
= PAGE_ALIGN(size
);
2489 if (!size
|| (size
>> PAGE_SHIFT
) > totalram_pages())
2492 area
= __get_vm_area_node(size
, align
, VM_ALLOC
| VM_UNINITIALIZED
|
2493 vm_flags
, start
, end
, node
, gfp_mask
, caller
);
2497 addr
= __vmalloc_area_node(area
, gfp_mask
, prot
, node
);
2502 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2503 * flag. It means that vm_struct is not fully initialized.
2504 * Now, it is fully initialized, so remove this flag here.
2506 clear_vm_uninitialized_flag(area
);
2508 kmemleak_vmalloc(area
, size
, gfp_mask
);
2513 warn_alloc(gfp_mask
, NULL
,
2514 "vmalloc: allocation failure: %lu bytes", real_size
);
2519 * This is only for performance analysis of vmalloc and stress purpose.
2520 * It is required by vmalloc test module, therefore do not use it other
2523 #ifdef CONFIG_TEST_VMALLOC_MODULE
2524 EXPORT_SYMBOL_GPL(__vmalloc_node_range
);
2528 * __vmalloc_node - allocate virtually contiguous memory
2529 * @size: allocation size
2530 * @align: desired alignment
2531 * @gfp_mask: flags for the page level allocator
2532 * @prot: protection mask for the allocated pages
2533 * @node: node to use for allocation or NUMA_NO_NODE
2534 * @caller: caller's return address
2536 * Allocate enough pages to cover @size from the page level
2537 * allocator with @gfp_mask flags. Map them into contiguous
2538 * kernel virtual space, using a pagetable protection of @prot.
2540 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2541 * and __GFP_NOFAIL are not supported
2543 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2546 * Return: pointer to the allocated memory or %NULL on error
2548 static void *__vmalloc_node(unsigned long size
, unsigned long align
,
2549 gfp_t gfp_mask
, pgprot_t prot
,
2550 int node
, const void *caller
)
2552 return __vmalloc_node_range(size
, align
, VMALLOC_START
, VMALLOC_END
,
2553 gfp_mask
, prot
, 0, node
, caller
);
2556 void *__vmalloc(unsigned long size
, gfp_t gfp_mask
, pgprot_t prot
)
2558 return __vmalloc_node(size
, 1, gfp_mask
, prot
, NUMA_NO_NODE
,
2559 __builtin_return_address(0));
2561 EXPORT_SYMBOL(__vmalloc
);
2563 static inline void *__vmalloc_node_flags(unsigned long size
,
2564 int node
, gfp_t flags
)
2566 return __vmalloc_node(size
, 1, flags
, PAGE_KERNEL
,
2567 node
, __builtin_return_address(0));
2571 void *__vmalloc_node_flags_caller(unsigned long size
, int node
, gfp_t flags
,
2574 return __vmalloc_node(size
, 1, flags
, PAGE_KERNEL
, node
, caller
);
2578 * vmalloc - allocate virtually contiguous memory
2579 * @size: allocation size
2581 * Allocate enough pages to cover @size from the page level
2582 * allocator and map them into contiguous kernel virtual space.
2584 * For tight control over page level allocator and protection flags
2585 * use __vmalloc() instead.
2587 * Return: pointer to the allocated memory or %NULL on error
2589 void *vmalloc(unsigned long size
)
2591 return __vmalloc_node_flags(size
, NUMA_NO_NODE
,
2594 EXPORT_SYMBOL(vmalloc
);
2597 * vzalloc - allocate virtually contiguous memory with zero fill
2598 * @size: allocation size
2600 * Allocate enough pages to cover @size from the page level
2601 * allocator and map them into contiguous kernel virtual space.
2602 * The memory allocated is set to zero.
2604 * For tight control over page level allocator and protection flags
2605 * use __vmalloc() instead.
2607 * Return: pointer to the allocated memory or %NULL on error
2609 void *vzalloc(unsigned long size
)
2611 return __vmalloc_node_flags(size
, NUMA_NO_NODE
,
2612 GFP_KERNEL
| __GFP_ZERO
);
2614 EXPORT_SYMBOL(vzalloc
);
2617 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2618 * @size: allocation size
2620 * The resulting memory area is zeroed so it can be mapped to userspace
2621 * without leaking data.
2623 * Return: pointer to the allocated memory or %NULL on error
2625 void *vmalloc_user(unsigned long size
)
2627 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
2628 GFP_KERNEL
| __GFP_ZERO
, PAGE_KERNEL
,
2629 VM_USERMAP
, NUMA_NO_NODE
,
2630 __builtin_return_address(0));
2632 EXPORT_SYMBOL(vmalloc_user
);
2635 * vmalloc_node - allocate memory on a specific node
2636 * @size: allocation size
2639 * Allocate enough pages to cover @size from the page level
2640 * allocator and map them into contiguous kernel virtual space.
2642 * For tight control over page level allocator and protection flags
2643 * use __vmalloc() instead.
2645 * Return: pointer to the allocated memory or %NULL on error
2647 void *vmalloc_node(unsigned long size
, int node
)
2649 return __vmalloc_node(size
, 1, GFP_KERNEL
, PAGE_KERNEL
,
2650 node
, __builtin_return_address(0));
2652 EXPORT_SYMBOL(vmalloc_node
);
2655 * vzalloc_node - allocate memory on a specific node with zero fill
2656 * @size: allocation size
2659 * Allocate enough pages to cover @size from the page level
2660 * allocator and map them into contiguous kernel virtual space.
2661 * The memory allocated is set to zero.
2663 * For tight control over page level allocator and protection flags
2664 * use __vmalloc_node() instead.
2666 * Return: pointer to the allocated memory or %NULL on error
2668 void *vzalloc_node(unsigned long size
, int node
)
2670 return __vmalloc_node_flags(size
, node
,
2671 GFP_KERNEL
| __GFP_ZERO
);
2673 EXPORT_SYMBOL(vzalloc_node
);
2676 * vmalloc_exec - allocate virtually contiguous, executable memory
2677 * @size: allocation size
2679 * Kernel-internal function to allocate enough pages to cover @size
2680 * the page level allocator and map them into contiguous and
2681 * executable kernel virtual space.
2683 * For tight control over page level allocator and protection flags
2684 * use __vmalloc() instead.
2686 * Return: pointer to the allocated memory or %NULL on error
2688 void *vmalloc_exec(unsigned long size
)
2690 return __vmalloc_node_range(size
, 1, VMALLOC_START
, VMALLOC_END
,
2691 GFP_KERNEL
, PAGE_KERNEL_EXEC
, VM_FLUSH_RESET_PERMS
,
2692 NUMA_NO_NODE
, __builtin_return_address(0));
2695 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2696 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2697 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2698 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2701 * 64b systems should always have either DMA or DMA32 zones. For others
2702 * GFP_DMA32 should do the right thing and use the normal zone.
2704 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2708 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2709 * @size: allocation size
2711 * Allocate enough 32bit PA addressable pages to cover @size from the
2712 * page level allocator and map them into contiguous kernel virtual space.
2714 * Return: pointer to the allocated memory or %NULL on error
2716 void *vmalloc_32(unsigned long size
)
2718 return __vmalloc_node(size
, 1, GFP_VMALLOC32
, PAGE_KERNEL
,
2719 NUMA_NO_NODE
, __builtin_return_address(0));
2721 EXPORT_SYMBOL(vmalloc_32
);
2724 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2725 * @size: allocation size
2727 * The resulting memory area is 32bit addressable and zeroed so it can be
2728 * mapped to userspace without leaking data.
2730 * Return: pointer to the allocated memory or %NULL on error
2732 void *vmalloc_32_user(unsigned long size
)
2734 return __vmalloc_node_range(size
, SHMLBA
, VMALLOC_START
, VMALLOC_END
,
2735 GFP_VMALLOC32
| __GFP_ZERO
, PAGE_KERNEL
,
2736 VM_USERMAP
, NUMA_NO_NODE
,
2737 __builtin_return_address(0));
2739 EXPORT_SYMBOL(vmalloc_32_user
);
2742 * small helper routine , copy contents to buf from addr.
2743 * If the page is not present, fill zero.
2746 static int aligned_vread(char *buf
, char *addr
, unsigned long count
)
2752 unsigned long offset
, length
;
2754 offset
= offset_in_page(addr
);
2755 length
= PAGE_SIZE
- offset
;
2758 p
= vmalloc_to_page(addr
);
2760 * To do safe access to this _mapped_ area, we need
2761 * lock. But adding lock here means that we need to add
2762 * overhead of vmalloc()/vfree() calles for this _debug_
2763 * interface, rarely used. Instead of that, we'll use
2764 * kmap() and get small overhead in this access function.
2768 * we can expect USER0 is not used (see vread/vwrite's
2769 * function description)
2771 void *map
= kmap_atomic(p
);
2772 memcpy(buf
, map
+ offset
, length
);
2775 memset(buf
, 0, length
);
2785 static int aligned_vwrite(char *buf
, char *addr
, unsigned long count
)
2791 unsigned long offset
, length
;
2793 offset
= offset_in_page(addr
);
2794 length
= PAGE_SIZE
- offset
;
2797 p
= vmalloc_to_page(addr
);
2799 * To do safe access to this _mapped_ area, we need
2800 * lock. But adding lock here means that we need to add
2801 * overhead of vmalloc()/vfree() calles for this _debug_
2802 * interface, rarely used. Instead of that, we'll use
2803 * kmap() and get small overhead in this access function.
2807 * we can expect USER0 is not used (see vread/vwrite's
2808 * function description)
2810 void *map
= kmap_atomic(p
);
2811 memcpy(map
+ offset
, buf
, length
);
2823 * vread() - read vmalloc area in a safe way.
2824 * @buf: buffer for reading data
2825 * @addr: vm address.
2826 * @count: number of bytes to be read.
2828 * This function checks that addr is a valid vmalloc'ed area, and
2829 * copy data from that area to a given buffer. If the given memory range
2830 * of [addr...addr+count) includes some valid address, data is copied to
2831 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2832 * IOREMAP area is treated as memory hole and no copy is done.
2834 * If [addr...addr+count) doesn't includes any intersects with alive
2835 * vm_struct area, returns 0. @buf should be kernel's buffer.
2837 * Note: In usual ops, vread() is never necessary because the caller
2838 * should know vmalloc() area is valid and can use memcpy().
2839 * This is for routines which have to access vmalloc area without
2840 * any information, as /dev/kmem.
2842 * Return: number of bytes for which addr and buf should be increased
2843 * (same number as @count) or %0 if [addr...addr+count) doesn't
2844 * include any intersection with valid vmalloc area
2846 long vread(char *buf
, char *addr
, unsigned long count
)
2848 struct vmap_area
*va
;
2849 struct vm_struct
*vm
;
2850 char *vaddr
, *buf_start
= buf
;
2851 unsigned long buflen
= count
;
2854 /* Don't allow overflow */
2855 if ((unsigned long) addr
+ count
< count
)
2856 count
= -(unsigned long) addr
;
2858 spin_lock(&vmap_area_lock
);
2859 list_for_each_entry(va
, &vmap_area_list
, list
) {
2867 vaddr
= (char *) vm
->addr
;
2868 if (addr
>= vaddr
+ get_vm_area_size(vm
))
2870 while (addr
< vaddr
) {
2878 n
= vaddr
+ get_vm_area_size(vm
) - addr
;
2881 if (!(vm
->flags
& VM_IOREMAP
))
2882 aligned_vread(buf
, addr
, n
);
2883 else /* IOREMAP area is treated as memory hole */
2890 spin_unlock(&vmap_area_lock
);
2892 if (buf
== buf_start
)
2894 /* zero-fill memory holes */
2895 if (buf
!= buf_start
+ buflen
)
2896 memset(buf
, 0, buflen
- (buf
- buf_start
));
2902 * vwrite() - write vmalloc area in a safe way.
2903 * @buf: buffer for source data
2904 * @addr: vm address.
2905 * @count: number of bytes to be read.
2907 * This function checks that addr is a valid vmalloc'ed area, and
2908 * copy data from a buffer to the given addr. If specified range of
2909 * [addr...addr+count) includes some valid address, data is copied from
2910 * proper area of @buf. If there are memory holes, no copy to hole.
2911 * IOREMAP area is treated as memory hole and no copy is done.
2913 * If [addr...addr+count) doesn't includes any intersects with alive
2914 * vm_struct area, returns 0. @buf should be kernel's buffer.
2916 * Note: In usual ops, vwrite() is never necessary because the caller
2917 * should know vmalloc() area is valid and can use memcpy().
2918 * This is for routines which have to access vmalloc area without
2919 * any information, as /dev/kmem.
2921 * Return: number of bytes for which addr and buf should be
2922 * increased (same number as @count) or %0 if [addr...addr+count)
2923 * doesn't include any intersection with valid vmalloc area
2925 long vwrite(char *buf
, char *addr
, unsigned long count
)
2927 struct vmap_area
*va
;
2928 struct vm_struct
*vm
;
2930 unsigned long n
, buflen
;
2933 /* Don't allow overflow */
2934 if ((unsigned long) addr
+ count
< count
)
2935 count
= -(unsigned long) addr
;
2938 spin_lock(&vmap_area_lock
);
2939 list_for_each_entry(va
, &vmap_area_list
, list
) {
2947 vaddr
= (char *) vm
->addr
;
2948 if (addr
>= vaddr
+ get_vm_area_size(vm
))
2950 while (addr
< vaddr
) {
2957 n
= vaddr
+ get_vm_area_size(vm
) - addr
;
2960 if (!(vm
->flags
& VM_IOREMAP
)) {
2961 aligned_vwrite(buf
, addr
, n
);
2969 spin_unlock(&vmap_area_lock
);
2976 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2977 * @vma: vma to cover
2978 * @uaddr: target user address to start at
2979 * @kaddr: virtual address of vmalloc kernel memory
2980 * @pgoff: offset from @kaddr to start at
2981 * @size: size of map area
2983 * Returns: 0 for success, -Exxx on failure
2985 * This function checks that @kaddr is a valid vmalloc'ed area,
2986 * and that it is big enough to cover the range starting at
2987 * @uaddr in @vma. Will return failure if that criteria isn't
2990 * Similar to remap_pfn_range() (see mm/memory.c)
2992 int remap_vmalloc_range_partial(struct vm_area_struct
*vma
, unsigned long uaddr
,
2993 void *kaddr
, unsigned long pgoff
,
2996 struct vm_struct
*area
;
2998 unsigned long end_index
;
3000 if (check_shl_overflow(pgoff
, PAGE_SHIFT
, &off
))
3003 size
= PAGE_ALIGN(size
);
3005 if (!PAGE_ALIGNED(uaddr
) || !PAGE_ALIGNED(kaddr
))
3008 area
= find_vm_area(kaddr
);
3012 if (!(area
->flags
& (VM_USERMAP
| VM_DMA_COHERENT
)))
3015 if (check_add_overflow(size
, off
, &end_index
) ||
3016 end_index
> get_vm_area_size(area
))
3021 struct page
*page
= vmalloc_to_page(kaddr
);
3024 ret
= vm_insert_page(vma
, uaddr
, page
);
3033 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
3037 EXPORT_SYMBOL(remap_vmalloc_range_partial
);
3040 * remap_vmalloc_range - map vmalloc pages to userspace
3041 * @vma: vma to cover (map full range of vma)
3042 * @addr: vmalloc memory
3043 * @pgoff: number of pages into addr before first page to map
3045 * Returns: 0 for success, -Exxx on failure
3047 * This function checks that addr is a valid vmalloc'ed area, and
3048 * that it is big enough to cover the vma. Will return failure if
3049 * that criteria isn't met.
3051 * Similar to remap_pfn_range() (see mm/memory.c)
3053 int remap_vmalloc_range(struct vm_area_struct
*vma
, void *addr
,
3054 unsigned long pgoff
)
3056 return remap_vmalloc_range_partial(vma
, vma
->vm_start
,
3058 vma
->vm_end
- vma
->vm_start
);
3060 EXPORT_SYMBOL(remap_vmalloc_range
);
3063 * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
3066 * The purpose of this function is to make sure the vmalloc area
3067 * mappings are identical in all page-tables in the system.
3069 void __weak
vmalloc_sync_mappings(void)
3073 void __weak
vmalloc_sync_unmappings(void)
3077 static int f(pte_t
*pte
, unsigned long addr
, void *data
)
3089 * alloc_vm_area - allocate a range of kernel address space
3090 * @size: size of the area
3091 * @ptes: returns the PTEs for the address space
3093 * Returns: NULL on failure, vm_struct on success
3095 * This function reserves a range of kernel address space, and
3096 * allocates pagetables to map that range. No actual mappings
3099 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3100 * allocated for the VM area are returned.
3102 struct vm_struct
*alloc_vm_area(size_t size
, pte_t
**ptes
)
3104 struct vm_struct
*area
;
3106 area
= get_vm_area_caller(size
, VM_IOREMAP
,
3107 __builtin_return_address(0));
3112 * This ensures that page tables are constructed for this region
3113 * of kernel virtual address space and mapped into init_mm.
3115 if (apply_to_page_range(&init_mm
, (unsigned long)area
->addr
,
3116 size
, f
, ptes
? &ptes
: NULL
)) {
3123 EXPORT_SYMBOL_GPL(alloc_vm_area
);
3125 void free_vm_area(struct vm_struct
*area
)
3127 struct vm_struct
*ret
;
3128 ret
= remove_vm_area(area
->addr
);
3129 BUG_ON(ret
!= area
);
3132 EXPORT_SYMBOL_GPL(free_vm_area
);
3135 static struct vmap_area
*node_to_va(struct rb_node
*n
)
3137 return rb_entry_safe(n
, struct vmap_area
, rb_node
);
3141 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3142 * @addr: target address
3144 * Returns: vmap_area if it is found. If there is no such area
3145 * the first highest(reverse order) vmap_area is returned
3146 * i.e. va->va_start < addr && va->va_end < addr or NULL
3147 * if there are no any areas before @addr.
3149 static struct vmap_area
*
3150 pvm_find_va_enclose_addr(unsigned long addr
)
3152 struct vmap_area
*va
, *tmp
;
3155 n
= free_vmap_area_root
.rb_node
;
3159 tmp
= rb_entry(n
, struct vmap_area
, rb_node
);
3160 if (tmp
->va_start
<= addr
) {
3162 if (tmp
->va_end
>= addr
)
3175 * pvm_determine_end_from_reverse - find the highest aligned address
3176 * of free block below VMALLOC_END
3178 * in - the VA we start the search(reverse order);
3179 * out - the VA with the highest aligned end address.
3181 * Returns: determined end address within vmap_area
3183 static unsigned long
3184 pvm_determine_end_from_reverse(struct vmap_area
**va
, unsigned long align
)
3186 unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
3190 list_for_each_entry_from_reverse((*va
),
3191 &free_vmap_area_list
, list
) {
3192 addr
= min((*va
)->va_end
& ~(align
- 1), vmalloc_end
);
3193 if ((*va
)->va_start
< addr
)
3202 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3203 * @offsets: array containing offset of each area
3204 * @sizes: array containing size of each area
3205 * @nr_vms: the number of areas to allocate
3206 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3208 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3209 * vm_structs on success, %NULL on failure
3211 * Percpu allocator wants to use congruent vm areas so that it can
3212 * maintain the offsets among percpu areas. This function allocates
3213 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
3214 * be scattered pretty far, distance between two areas easily going up
3215 * to gigabytes. To avoid interacting with regular vmallocs, these
3216 * areas are allocated from top.
3218 * Despite its complicated look, this allocator is rather simple. It
3219 * does everything top-down and scans free blocks from the end looking
3220 * for matching base. While scanning, if any of the areas do not fit the
3221 * base address is pulled down to fit the area. Scanning is repeated till
3222 * all the areas fit and then all necessary data structures are inserted
3223 * and the result is returned.
3225 struct vm_struct
**pcpu_get_vm_areas(const unsigned long *offsets
,
3226 const size_t *sizes
, int nr_vms
,
3229 const unsigned long vmalloc_start
= ALIGN(VMALLOC_START
, align
);
3230 const unsigned long vmalloc_end
= VMALLOC_END
& ~(align
- 1);
3231 struct vmap_area
**vas
, *va
;
3232 struct vm_struct
**vms
;
3233 int area
, area2
, last_area
, term_area
;
3234 unsigned long base
, start
, size
, end
, last_end
;
3235 bool purged
= false;
3238 /* verify parameters and allocate data structures */
3239 BUG_ON(offset_in_page(align
) || !is_power_of_2(align
));
3240 for (last_area
= 0, area
= 0; area
< nr_vms
; area
++) {
3241 start
= offsets
[area
];
3242 end
= start
+ sizes
[area
];
3244 /* is everything aligned properly? */
3245 BUG_ON(!IS_ALIGNED(offsets
[area
], align
));
3246 BUG_ON(!IS_ALIGNED(sizes
[area
], align
));
3248 /* detect the area with the highest address */
3249 if (start
> offsets
[last_area
])
3252 for (area2
= area
+ 1; area2
< nr_vms
; area2
++) {
3253 unsigned long start2
= offsets
[area2
];
3254 unsigned long end2
= start2
+ sizes
[area2
];
3256 BUG_ON(start2
< end
&& start
< end2
);
3259 last_end
= offsets
[last_area
] + sizes
[last_area
];
3261 if (vmalloc_end
- vmalloc_start
< last_end
) {
3266 vms
= kcalloc(nr_vms
, sizeof(vms
[0]), GFP_KERNEL
);
3267 vas
= kcalloc(nr_vms
, sizeof(vas
[0]), GFP_KERNEL
);
3271 for (area
= 0; area
< nr_vms
; area
++) {
3272 vas
[area
] = kmem_cache_zalloc(vmap_area_cachep
, GFP_KERNEL
);
3273 vms
[area
] = kzalloc(sizeof(struct vm_struct
), GFP_KERNEL
);
3274 if (!vas
[area
] || !vms
[area
])
3278 spin_lock(&vmap_area_lock
);
3280 /* start scanning - we scan from the top, begin with the last area */
3281 area
= term_area
= last_area
;
3282 start
= offsets
[area
];
3283 end
= start
+ sizes
[area
];
3285 va
= pvm_find_va_enclose_addr(vmalloc_end
);
3286 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3290 * base might have underflowed, add last_end before
3293 if (base
+ last_end
< vmalloc_start
+ last_end
)
3297 * Fitting base has not been found.
3303 * If required width exeeds current VA block, move
3304 * base downwards and then recheck.
3306 if (base
+ end
> va
->va_end
) {
3307 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3313 * If this VA does not fit, move base downwards and recheck.
3315 if (base
+ start
< va
->va_start
) {
3316 va
= node_to_va(rb_prev(&va
->rb_node
));
3317 base
= pvm_determine_end_from_reverse(&va
, align
) - end
;
3323 * This area fits, move on to the previous one. If
3324 * the previous one is the terminal one, we're done.
3326 area
= (area
+ nr_vms
- 1) % nr_vms
;
3327 if (area
== term_area
)
3330 start
= offsets
[area
];
3331 end
= start
+ sizes
[area
];
3332 va
= pvm_find_va_enclose_addr(base
+ end
);
3335 /* we've found a fitting base, insert all va's */
3336 for (area
= 0; area
< nr_vms
; area
++) {
3339 start
= base
+ offsets
[area
];
3342 va
= pvm_find_va_enclose_addr(start
);
3343 if (WARN_ON_ONCE(va
== NULL
))
3344 /* It is a BUG(), but trigger recovery instead. */
3347 type
= classify_va_fit_type(va
, start
, size
);
3348 if (WARN_ON_ONCE(type
== NOTHING_FIT
))
3349 /* It is a BUG(), but trigger recovery instead. */
3352 ret
= adjust_va_to_fit_type(va
, start
, size
, type
);
3356 /* Allocated area. */
3358 va
->va_start
= start
;
3359 va
->va_end
= start
+ size
;
3361 insert_vmap_area(va
, &vmap_area_root
, &vmap_area_list
);
3364 spin_unlock(&vmap_area_lock
);
3366 /* insert all vm's */
3367 for (area
= 0; area
< nr_vms
; area
++)
3368 setup_vmalloc_vm(vms
[area
], vas
[area
], VM_ALLOC
,
3375 /* Remove previously inserted areas. */
3377 __free_vmap_area(vas
[area
]);
3382 spin_unlock(&vmap_area_lock
);
3384 purge_vmap_area_lazy();
3387 /* Before "retry", check if we recover. */
3388 for (area
= 0; area
< nr_vms
; area
++) {
3392 vas
[area
] = kmem_cache_zalloc(
3393 vmap_area_cachep
, GFP_KERNEL
);
3402 for (area
= 0; area
< nr_vms
; area
++) {
3404 kmem_cache_free(vmap_area_cachep
, vas
[area
]);
3415 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3416 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3417 * @nr_vms: the number of allocated areas
3419 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3421 void pcpu_free_vm_areas(struct vm_struct
**vms
, int nr_vms
)
3425 for (i
= 0; i
< nr_vms
; i
++)
3426 free_vm_area(vms
[i
]);
3429 #endif /* CONFIG_SMP */
3431 #ifdef CONFIG_PROC_FS
3432 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3433 __acquires(&vmap_area_lock
)
3435 spin_lock(&vmap_area_lock
);
3436 return seq_list_start(&vmap_area_list
, *pos
);
3439 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
3441 return seq_list_next(p
, &vmap_area_list
, pos
);
3444 static void s_stop(struct seq_file
*m
, void *p
)
3445 __releases(&vmap_area_lock
)
3447 spin_unlock(&vmap_area_lock
);
3450 static void show_numa_info(struct seq_file
*m
, struct vm_struct
*v
)
3452 if (IS_ENABLED(CONFIG_NUMA
)) {
3453 unsigned int nr
, *counters
= m
->private;
3458 if (v
->flags
& VM_UNINITIALIZED
)
3460 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3463 memset(counters
, 0, nr_node_ids
* sizeof(unsigned int));
3465 for (nr
= 0; nr
< v
->nr_pages
; nr
++)
3466 counters
[page_to_nid(v
->pages
[nr
])]++;
3468 for_each_node_state(nr
, N_HIGH_MEMORY
)
3470 seq_printf(m
, " N%u=%u", nr
, counters
[nr
]);
3474 static void show_purge_info(struct seq_file
*m
)
3476 struct llist_node
*head
;
3477 struct vmap_area
*va
;
3479 head
= READ_ONCE(vmap_purge_list
.first
);
3483 llist_for_each_entry(va
, head
, purge_list
) {
3484 seq_printf(m
, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3485 (void *)va
->va_start
, (void *)va
->va_end
,
3486 va
->va_end
- va
->va_start
);
3490 static int s_show(struct seq_file
*m
, void *p
)
3492 struct vmap_area
*va
;
3493 struct vm_struct
*v
;
3495 va
= list_entry(p
, struct vmap_area
, list
);
3498 * s_show can encounter race with remove_vm_area, !vm on behalf
3499 * of vmap area is being tear down or vm_map_ram allocation.
3502 seq_printf(m
, "0x%pK-0x%pK %7ld vm_map_ram\n",
3503 (void *)va
->va_start
, (void *)va
->va_end
,
3504 va
->va_end
- va
->va_start
);
3511 seq_printf(m
, "0x%pK-0x%pK %7ld",
3512 v
->addr
, v
->addr
+ v
->size
, v
->size
);
3515 seq_printf(m
, " %pS", v
->caller
);
3518 seq_printf(m
, " pages=%d", v
->nr_pages
);
3521 seq_printf(m
, " phys=%pa", &v
->phys_addr
);
3523 if (v
->flags
& VM_IOREMAP
)
3524 seq_puts(m
, " ioremap");
3526 if (v
->flags
& VM_ALLOC
)
3527 seq_puts(m
, " vmalloc");
3529 if (v
->flags
& VM_MAP
)
3530 seq_puts(m
, " vmap");
3532 if (v
->flags
& VM_USERMAP
)
3533 seq_puts(m
, " user");
3535 if (v
->flags
& VM_DMA_COHERENT
)
3536 seq_puts(m
, " dma-coherent");
3538 if (is_vmalloc_addr(v
->pages
))
3539 seq_puts(m
, " vpages");
3541 show_numa_info(m
, v
);
3545 * As a final step, dump "unpurged" areas. Note,
3546 * that entire "/proc/vmallocinfo" output will not
3547 * be address sorted, because the purge list is not
3550 if (list_is_last(&va
->list
, &vmap_area_list
))
3556 static const struct seq_operations vmalloc_op
= {
3563 static int __init
proc_vmalloc_init(void)
3565 if (IS_ENABLED(CONFIG_NUMA
))
3566 proc_create_seq_private("vmallocinfo", 0400, NULL
,
3568 nr_node_ids
* sizeof(unsigned int), NULL
);
3570 proc_create_seq("vmallocinfo", 0400, NULL
, &vmalloc_op
);
3573 module_init(proc_vmalloc_init
);