Linux 5.1.15
[linux/fpc-iii.git] / mm / hugetlb.c
blob5b4f00be325d74e6d6d62cde735f79ec47461c08
1 /*
2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
4 */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/memblock.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/mmdebug.h>
22 #include <linux/sched/signal.h>
23 #include <linux/rmap.h>
24 #include <linux/string_helpers.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/jhash.h>
28 #include <linux/numa.h>
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
34 #include <linux/io.h>
35 #include <linux/hugetlb.h>
36 #include <linux/hugetlb_cgroup.h>
37 #include <linux/node.h>
38 #include <linux/userfaultfd_k.h>
39 #include <linux/page_owner.h>
40 #include "internal.h"
42 int hugetlb_max_hstate __read_mostly;
43 unsigned int default_hstate_idx;
44 struct hstate hstates[HUGE_MAX_HSTATE];
46 * Minimum page order among possible hugepage sizes, set to a proper value
47 * at boot time.
49 static unsigned int minimum_order __read_mostly = UINT_MAX;
51 __initdata LIST_HEAD(huge_boot_pages);
53 /* for command line parsing */
54 static struct hstate * __initdata parsed_hstate;
55 static unsigned long __initdata default_hstate_max_huge_pages;
56 static unsigned long __initdata default_hstate_size;
57 static bool __initdata parsed_valid_hugepagesz = true;
60 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
61 * free_huge_pages, and surplus_huge_pages.
63 DEFINE_SPINLOCK(hugetlb_lock);
66 * Serializes faults on the same logical page. This is used to
67 * prevent spurious OOMs when the hugepage pool is fully utilized.
69 static int num_fault_mutexes;
70 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
72 /* Forward declaration */
73 static int hugetlb_acct_memory(struct hstate *h, long delta);
75 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
77 bool free = (spool->count == 0) && (spool->used_hpages == 0);
79 spin_unlock(&spool->lock);
81 /* If no pages are used, and no other handles to the subpool
82 * remain, give up any reservations mased on minimum size and
83 * free the subpool */
84 if (free) {
85 if (spool->min_hpages != -1)
86 hugetlb_acct_memory(spool->hstate,
87 -spool->min_hpages);
88 kfree(spool);
92 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
93 long min_hpages)
95 struct hugepage_subpool *spool;
97 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
98 if (!spool)
99 return NULL;
101 spin_lock_init(&spool->lock);
102 spool->count = 1;
103 spool->max_hpages = max_hpages;
104 spool->hstate = h;
105 spool->min_hpages = min_hpages;
107 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
108 kfree(spool);
109 return NULL;
111 spool->rsv_hpages = min_hpages;
113 return spool;
116 void hugepage_put_subpool(struct hugepage_subpool *spool)
118 spin_lock(&spool->lock);
119 BUG_ON(!spool->count);
120 spool->count--;
121 unlock_or_release_subpool(spool);
125 * Subpool accounting for allocating and reserving pages.
126 * Return -ENOMEM if there are not enough resources to satisfy the
127 * the request. Otherwise, return the number of pages by which the
128 * global pools must be adjusted (upward). The returned value may
129 * only be different than the passed value (delta) in the case where
130 * a subpool minimum size must be manitained.
132 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
133 long delta)
135 long ret = delta;
137 if (!spool)
138 return ret;
140 spin_lock(&spool->lock);
142 if (spool->max_hpages != -1) { /* maximum size accounting */
143 if ((spool->used_hpages + delta) <= spool->max_hpages)
144 spool->used_hpages += delta;
145 else {
146 ret = -ENOMEM;
147 goto unlock_ret;
151 /* minimum size accounting */
152 if (spool->min_hpages != -1 && spool->rsv_hpages) {
153 if (delta > spool->rsv_hpages) {
155 * Asking for more reserves than those already taken on
156 * behalf of subpool. Return difference.
158 ret = delta - spool->rsv_hpages;
159 spool->rsv_hpages = 0;
160 } else {
161 ret = 0; /* reserves already accounted for */
162 spool->rsv_hpages -= delta;
166 unlock_ret:
167 spin_unlock(&spool->lock);
168 return ret;
172 * Subpool accounting for freeing and unreserving pages.
173 * Return the number of global page reservations that must be dropped.
174 * The return value may only be different than the passed value (delta)
175 * in the case where a subpool minimum size must be maintained.
177 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
178 long delta)
180 long ret = delta;
182 if (!spool)
183 return delta;
185 spin_lock(&spool->lock);
187 if (spool->max_hpages != -1) /* maximum size accounting */
188 spool->used_hpages -= delta;
190 /* minimum size accounting */
191 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
192 if (spool->rsv_hpages + delta <= spool->min_hpages)
193 ret = 0;
194 else
195 ret = spool->rsv_hpages + delta - spool->min_hpages;
197 spool->rsv_hpages += delta;
198 if (spool->rsv_hpages > spool->min_hpages)
199 spool->rsv_hpages = spool->min_hpages;
203 * If hugetlbfs_put_super couldn't free spool due to an outstanding
204 * quota reference, free it now.
206 unlock_or_release_subpool(spool);
208 return ret;
211 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
213 return HUGETLBFS_SB(inode->i_sb)->spool;
216 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
218 return subpool_inode(file_inode(vma->vm_file));
222 * Region tracking -- allows tracking of reservations and instantiated pages
223 * across the pages in a mapping.
225 * The region data structures are embedded into a resv_map and protected
226 * by a resv_map's lock. The set of regions within the resv_map represent
227 * reservations for huge pages, or huge pages that have already been
228 * instantiated within the map. The from and to elements are huge page
229 * indicies into the associated mapping. from indicates the starting index
230 * of the region. to represents the first index past the end of the region.
232 * For example, a file region structure with from == 0 and to == 4 represents
233 * four huge pages in a mapping. It is important to note that the to element
234 * represents the first element past the end of the region. This is used in
235 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
237 * Interval notation of the form [from, to) will be used to indicate that
238 * the endpoint from is inclusive and to is exclusive.
240 struct file_region {
241 struct list_head link;
242 long from;
243 long to;
247 * Add the huge page range represented by [f, t) to the reserve
248 * map. In the normal case, existing regions will be expanded
249 * to accommodate the specified range. Sufficient regions should
250 * exist for expansion due to the previous call to region_chg
251 * with the same range. However, it is possible that region_del
252 * could have been called after region_chg and modifed the map
253 * in such a way that no region exists to be expanded. In this
254 * case, pull a region descriptor from the cache associated with
255 * the map and use that for the new range.
257 * Return the number of new huge pages added to the map. This
258 * number is greater than or equal to zero.
260 static long region_add(struct resv_map *resv, long f, long t)
262 struct list_head *head = &resv->regions;
263 struct file_region *rg, *nrg, *trg;
264 long add = 0;
266 spin_lock(&resv->lock);
267 /* Locate the region we are either in or before. */
268 list_for_each_entry(rg, head, link)
269 if (f <= rg->to)
270 break;
273 * If no region exists which can be expanded to include the
274 * specified range, the list must have been modified by an
275 * interleving call to region_del(). Pull a region descriptor
276 * from the cache and use it for this range.
278 if (&rg->link == head || t < rg->from) {
279 VM_BUG_ON(resv->region_cache_count <= 0);
281 resv->region_cache_count--;
282 nrg = list_first_entry(&resv->region_cache, struct file_region,
283 link);
284 list_del(&nrg->link);
286 nrg->from = f;
287 nrg->to = t;
288 list_add(&nrg->link, rg->link.prev);
290 add += t - f;
291 goto out_locked;
294 /* Round our left edge to the current segment if it encloses us. */
295 if (f > rg->from)
296 f = rg->from;
298 /* Check for and consume any regions we now overlap with. */
299 nrg = rg;
300 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
301 if (&rg->link == head)
302 break;
303 if (rg->from > t)
304 break;
306 /* If this area reaches higher then extend our area to
307 * include it completely. If this is not the first area
308 * which we intend to reuse, free it. */
309 if (rg->to > t)
310 t = rg->to;
311 if (rg != nrg) {
312 /* Decrement return value by the deleted range.
313 * Another range will span this area so that by
314 * end of routine add will be >= zero
316 add -= (rg->to - rg->from);
317 list_del(&rg->link);
318 kfree(rg);
322 add += (nrg->from - f); /* Added to beginning of region */
323 nrg->from = f;
324 add += t - nrg->to; /* Added to end of region */
325 nrg->to = t;
327 out_locked:
328 resv->adds_in_progress--;
329 spin_unlock(&resv->lock);
330 VM_BUG_ON(add < 0);
331 return add;
335 * Examine the existing reserve map and determine how many
336 * huge pages in the specified range [f, t) are NOT currently
337 * represented. This routine is called before a subsequent
338 * call to region_add that will actually modify the reserve
339 * map to add the specified range [f, t). region_chg does
340 * not change the number of huge pages represented by the
341 * map. However, if the existing regions in the map can not
342 * be expanded to represent the new range, a new file_region
343 * structure is added to the map as a placeholder. This is
344 * so that the subsequent region_add call will have all the
345 * regions it needs and will not fail.
347 * Upon entry, region_chg will also examine the cache of region descriptors
348 * associated with the map. If there are not enough descriptors cached, one
349 * will be allocated for the in progress add operation.
351 * Returns the number of huge pages that need to be added to the existing
352 * reservation map for the range [f, t). This number is greater or equal to
353 * zero. -ENOMEM is returned if a new file_region structure or cache entry
354 * is needed and can not be allocated.
356 static long region_chg(struct resv_map *resv, long f, long t)
358 struct list_head *head = &resv->regions;
359 struct file_region *rg, *nrg = NULL;
360 long chg = 0;
362 retry:
363 spin_lock(&resv->lock);
364 retry_locked:
365 resv->adds_in_progress++;
368 * Check for sufficient descriptors in the cache to accommodate
369 * the number of in progress add operations.
371 if (resv->adds_in_progress > resv->region_cache_count) {
372 struct file_region *trg;
374 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
375 /* Must drop lock to allocate a new descriptor. */
376 resv->adds_in_progress--;
377 spin_unlock(&resv->lock);
379 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
380 if (!trg) {
381 kfree(nrg);
382 return -ENOMEM;
385 spin_lock(&resv->lock);
386 list_add(&trg->link, &resv->region_cache);
387 resv->region_cache_count++;
388 goto retry_locked;
391 /* Locate the region we are before or in. */
392 list_for_each_entry(rg, head, link)
393 if (f <= rg->to)
394 break;
396 /* If we are below the current region then a new region is required.
397 * Subtle, allocate a new region at the position but make it zero
398 * size such that we can guarantee to record the reservation. */
399 if (&rg->link == head || t < rg->from) {
400 if (!nrg) {
401 resv->adds_in_progress--;
402 spin_unlock(&resv->lock);
403 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
404 if (!nrg)
405 return -ENOMEM;
407 nrg->from = f;
408 nrg->to = f;
409 INIT_LIST_HEAD(&nrg->link);
410 goto retry;
413 list_add(&nrg->link, rg->link.prev);
414 chg = t - f;
415 goto out_nrg;
418 /* Round our left edge to the current segment if it encloses us. */
419 if (f > rg->from)
420 f = rg->from;
421 chg = t - f;
423 /* Check for and consume any regions we now overlap with. */
424 list_for_each_entry(rg, rg->link.prev, link) {
425 if (&rg->link == head)
426 break;
427 if (rg->from > t)
428 goto out;
430 /* We overlap with this area, if it extends further than
431 * us then we must extend ourselves. Account for its
432 * existing reservation. */
433 if (rg->to > t) {
434 chg += rg->to - t;
435 t = rg->to;
437 chg -= rg->to - rg->from;
440 out:
441 spin_unlock(&resv->lock);
442 /* We already know we raced and no longer need the new region */
443 kfree(nrg);
444 return chg;
445 out_nrg:
446 spin_unlock(&resv->lock);
447 return chg;
451 * Abort the in progress add operation. The adds_in_progress field
452 * of the resv_map keeps track of the operations in progress between
453 * calls to region_chg and region_add. Operations are sometimes
454 * aborted after the call to region_chg. In such cases, region_abort
455 * is called to decrement the adds_in_progress counter.
457 * NOTE: The range arguments [f, t) are not needed or used in this
458 * routine. They are kept to make reading the calling code easier as
459 * arguments will match the associated region_chg call.
461 static void region_abort(struct resv_map *resv, long f, long t)
463 spin_lock(&resv->lock);
464 VM_BUG_ON(!resv->region_cache_count);
465 resv->adds_in_progress--;
466 spin_unlock(&resv->lock);
470 * Delete the specified range [f, t) from the reserve map. If the
471 * t parameter is LONG_MAX, this indicates that ALL regions after f
472 * should be deleted. Locate the regions which intersect [f, t)
473 * and either trim, delete or split the existing regions.
475 * Returns the number of huge pages deleted from the reserve map.
476 * In the normal case, the return value is zero or more. In the
477 * case where a region must be split, a new region descriptor must
478 * be allocated. If the allocation fails, -ENOMEM will be returned.
479 * NOTE: If the parameter t == LONG_MAX, then we will never split
480 * a region and possibly return -ENOMEM. Callers specifying
481 * t == LONG_MAX do not need to check for -ENOMEM error.
483 static long region_del(struct resv_map *resv, long f, long t)
485 struct list_head *head = &resv->regions;
486 struct file_region *rg, *trg;
487 struct file_region *nrg = NULL;
488 long del = 0;
490 retry:
491 spin_lock(&resv->lock);
492 list_for_each_entry_safe(rg, trg, head, link) {
494 * Skip regions before the range to be deleted. file_region
495 * ranges are normally of the form [from, to). However, there
496 * may be a "placeholder" entry in the map which is of the form
497 * (from, to) with from == to. Check for placeholder entries
498 * at the beginning of the range to be deleted.
500 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
501 continue;
503 if (rg->from >= t)
504 break;
506 if (f > rg->from && t < rg->to) { /* Must split region */
508 * Check for an entry in the cache before dropping
509 * lock and attempting allocation.
511 if (!nrg &&
512 resv->region_cache_count > resv->adds_in_progress) {
513 nrg = list_first_entry(&resv->region_cache,
514 struct file_region,
515 link);
516 list_del(&nrg->link);
517 resv->region_cache_count--;
520 if (!nrg) {
521 spin_unlock(&resv->lock);
522 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
523 if (!nrg)
524 return -ENOMEM;
525 goto retry;
528 del += t - f;
530 /* New entry for end of split region */
531 nrg->from = t;
532 nrg->to = rg->to;
533 INIT_LIST_HEAD(&nrg->link);
535 /* Original entry is trimmed */
536 rg->to = f;
538 list_add(&nrg->link, &rg->link);
539 nrg = NULL;
540 break;
543 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
544 del += rg->to - rg->from;
545 list_del(&rg->link);
546 kfree(rg);
547 continue;
550 if (f <= rg->from) { /* Trim beginning of region */
551 del += t - rg->from;
552 rg->from = t;
553 } else { /* Trim end of region */
554 del += rg->to - f;
555 rg->to = f;
559 spin_unlock(&resv->lock);
560 kfree(nrg);
561 return del;
565 * A rare out of memory error was encountered which prevented removal of
566 * the reserve map region for a page. The huge page itself was free'ed
567 * and removed from the page cache. This routine will adjust the subpool
568 * usage count, and the global reserve count if needed. By incrementing
569 * these counts, the reserve map entry which could not be deleted will
570 * appear as a "reserved" entry instead of simply dangling with incorrect
571 * counts.
573 void hugetlb_fix_reserve_counts(struct inode *inode)
575 struct hugepage_subpool *spool = subpool_inode(inode);
576 long rsv_adjust;
578 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
579 if (rsv_adjust) {
580 struct hstate *h = hstate_inode(inode);
582 hugetlb_acct_memory(h, 1);
587 * Count and return the number of huge pages in the reserve map
588 * that intersect with the range [f, t).
590 static long region_count(struct resv_map *resv, long f, long t)
592 struct list_head *head = &resv->regions;
593 struct file_region *rg;
594 long chg = 0;
596 spin_lock(&resv->lock);
597 /* Locate each segment we overlap with, and count that overlap. */
598 list_for_each_entry(rg, head, link) {
599 long seg_from;
600 long seg_to;
602 if (rg->to <= f)
603 continue;
604 if (rg->from >= t)
605 break;
607 seg_from = max(rg->from, f);
608 seg_to = min(rg->to, t);
610 chg += seg_to - seg_from;
612 spin_unlock(&resv->lock);
614 return chg;
618 * Convert the address within this vma to the page offset within
619 * the mapping, in pagecache page units; huge pages here.
621 static pgoff_t vma_hugecache_offset(struct hstate *h,
622 struct vm_area_struct *vma, unsigned long address)
624 return ((address - vma->vm_start) >> huge_page_shift(h)) +
625 (vma->vm_pgoff >> huge_page_order(h));
628 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
629 unsigned long address)
631 return vma_hugecache_offset(hstate_vma(vma), vma, address);
633 EXPORT_SYMBOL_GPL(linear_hugepage_index);
636 * Return the size of the pages allocated when backing a VMA. In the majority
637 * cases this will be same size as used by the page table entries.
639 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
641 if (vma->vm_ops && vma->vm_ops->pagesize)
642 return vma->vm_ops->pagesize(vma);
643 return PAGE_SIZE;
645 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
648 * Return the page size being used by the MMU to back a VMA. In the majority
649 * of cases, the page size used by the kernel matches the MMU size. On
650 * architectures where it differs, an architecture-specific 'strong'
651 * version of this symbol is required.
653 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
655 return vma_kernel_pagesize(vma);
659 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
660 * bits of the reservation map pointer, which are always clear due to
661 * alignment.
663 #define HPAGE_RESV_OWNER (1UL << 0)
664 #define HPAGE_RESV_UNMAPPED (1UL << 1)
665 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
668 * These helpers are used to track how many pages are reserved for
669 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
670 * is guaranteed to have their future faults succeed.
672 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
673 * the reserve counters are updated with the hugetlb_lock held. It is safe
674 * to reset the VMA at fork() time as it is not in use yet and there is no
675 * chance of the global counters getting corrupted as a result of the values.
677 * The private mapping reservation is represented in a subtly different
678 * manner to a shared mapping. A shared mapping has a region map associated
679 * with the underlying file, this region map represents the backing file
680 * pages which have ever had a reservation assigned which this persists even
681 * after the page is instantiated. A private mapping has a region map
682 * associated with the original mmap which is attached to all VMAs which
683 * reference it, this region map represents those offsets which have consumed
684 * reservation ie. where pages have been instantiated.
686 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
688 return (unsigned long)vma->vm_private_data;
691 static void set_vma_private_data(struct vm_area_struct *vma,
692 unsigned long value)
694 vma->vm_private_data = (void *)value;
697 struct resv_map *resv_map_alloc(void)
699 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
700 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
702 if (!resv_map || !rg) {
703 kfree(resv_map);
704 kfree(rg);
705 return NULL;
708 kref_init(&resv_map->refs);
709 spin_lock_init(&resv_map->lock);
710 INIT_LIST_HEAD(&resv_map->regions);
712 resv_map->adds_in_progress = 0;
714 INIT_LIST_HEAD(&resv_map->region_cache);
715 list_add(&rg->link, &resv_map->region_cache);
716 resv_map->region_cache_count = 1;
718 return resv_map;
721 void resv_map_release(struct kref *ref)
723 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
724 struct list_head *head = &resv_map->region_cache;
725 struct file_region *rg, *trg;
727 /* Clear out any active regions before we release the map. */
728 region_del(resv_map, 0, LONG_MAX);
730 /* ... and any entries left in the cache */
731 list_for_each_entry_safe(rg, trg, head, link) {
732 list_del(&rg->link);
733 kfree(rg);
736 VM_BUG_ON(resv_map->adds_in_progress);
738 kfree(resv_map);
741 static inline struct resv_map *inode_resv_map(struct inode *inode)
743 return inode->i_mapping->private_data;
746 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
748 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
749 if (vma->vm_flags & VM_MAYSHARE) {
750 struct address_space *mapping = vma->vm_file->f_mapping;
751 struct inode *inode = mapping->host;
753 return inode_resv_map(inode);
755 } else {
756 return (struct resv_map *)(get_vma_private_data(vma) &
757 ~HPAGE_RESV_MASK);
761 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
763 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
766 set_vma_private_data(vma, (get_vma_private_data(vma) &
767 HPAGE_RESV_MASK) | (unsigned long)map);
770 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
775 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
778 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
780 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
782 return (get_vma_private_data(vma) & flag) != 0;
785 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
786 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
788 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789 if (!(vma->vm_flags & VM_MAYSHARE))
790 vma->vm_private_data = (void *)0;
793 /* Returns true if the VMA has associated reserve pages */
794 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
796 if (vma->vm_flags & VM_NORESERVE) {
798 * This address is already reserved by other process(chg == 0),
799 * so, we should decrement reserved count. Without decrementing,
800 * reserve count remains after releasing inode, because this
801 * allocated page will go into page cache and is regarded as
802 * coming from reserved pool in releasing step. Currently, we
803 * don't have any other solution to deal with this situation
804 * properly, so add work-around here.
806 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
807 return true;
808 else
809 return false;
812 /* Shared mappings always use reserves */
813 if (vma->vm_flags & VM_MAYSHARE) {
815 * We know VM_NORESERVE is not set. Therefore, there SHOULD
816 * be a region map for all pages. The only situation where
817 * there is no region map is if a hole was punched via
818 * fallocate. In this case, there really are no reverves to
819 * use. This situation is indicated if chg != 0.
821 if (chg)
822 return false;
823 else
824 return true;
828 * Only the process that called mmap() has reserves for
829 * private mappings.
831 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
833 * Like the shared case above, a hole punch or truncate
834 * could have been performed on the private mapping.
835 * Examine the value of chg to determine if reserves
836 * actually exist or were previously consumed.
837 * Very Subtle - The value of chg comes from a previous
838 * call to vma_needs_reserves(). The reserve map for
839 * private mappings has different (opposite) semantics
840 * than that of shared mappings. vma_needs_reserves()
841 * has already taken this difference in semantics into
842 * account. Therefore, the meaning of chg is the same
843 * as in the shared case above. Code could easily be
844 * combined, but keeping it separate draws attention to
845 * subtle differences.
847 if (chg)
848 return false;
849 else
850 return true;
853 return false;
856 static void enqueue_huge_page(struct hstate *h, struct page *page)
858 int nid = page_to_nid(page);
859 list_move(&page->lru, &h->hugepage_freelists[nid]);
860 h->free_huge_pages++;
861 h->free_huge_pages_node[nid]++;
864 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
866 struct page *page;
868 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
869 if (!PageHWPoison(page))
870 break;
872 * if 'non-isolated free hugepage' not found on the list,
873 * the allocation fails.
875 if (&h->hugepage_freelists[nid] == &page->lru)
876 return NULL;
877 list_move(&page->lru, &h->hugepage_activelist);
878 set_page_refcounted(page);
879 h->free_huge_pages--;
880 h->free_huge_pages_node[nid]--;
881 return page;
884 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
885 nodemask_t *nmask)
887 unsigned int cpuset_mems_cookie;
888 struct zonelist *zonelist;
889 struct zone *zone;
890 struct zoneref *z;
891 int node = NUMA_NO_NODE;
893 zonelist = node_zonelist(nid, gfp_mask);
895 retry_cpuset:
896 cpuset_mems_cookie = read_mems_allowed_begin();
897 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
898 struct page *page;
900 if (!cpuset_zone_allowed(zone, gfp_mask))
901 continue;
903 * no need to ask again on the same node. Pool is node rather than
904 * zone aware
906 if (zone_to_nid(zone) == node)
907 continue;
908 node = zone_to_nid(zone);
910 page = dequeue_huge_page_node_exact(h, node);
911 if (page)
912 return page;
914 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
915 goto retry_cpuset;
917 return NULL;
920 /* Movability of hugepages depends on migration support. */
921 static inline gfp_t htlb_alloc_mask(struct hstate *h)
923 if (hugepage_movable_supported(h))
924 return GFP_HIGHUSER_MOVABLE;
925 else
926 return GFP_HIGHUSER;
929 static struct page *dequeue_huge_page_vma(struct hstate *h,
930 struct vm_area_struct *vma,
931 unsigned long address, int avoid_reserve,
932 long chg)
934 struct page *page;
935 struct mempolicy *mpol;
936 gfp_t gfp_mask;
937 nodemask_t *nodemask;
938 int nid;
941 * A child process with MAP_PRIVATE mappings created by their parent
942 * have no page reserves. This check ensures that reservations are
943 * not "stolen". The child may still get SIGKILLed
945 if (!vma_has_reserves(vma, chg) &&
946 h->free_huge_pages - h->resv_huge_pages == 0)
947 goto err;
949 /* If reserves cannot be used, ensure enough pages are in the pool */
950 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
951 goto err;
953 gfp_mask = htlb_alloc_mask(h);
954 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
955 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
956 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
957 SetPagePrivate(page);
958 h->resv_huge_pages--;
961 mpol_cond_put(mpol);
962 return page;
964 err:
965 return NULL;
969 * common helper functions for hstate_next_node_to_{alloc|free}.
970 * We may have allocated or freed a huge page based on a different
971 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
972 * be outside of *nodes_allowed. Ensure that we use an allowed
973 * node for alloc or free.
975 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
977 nid = next_node_in(nid, *nodes_allowed);
978 VM_BUG_ON(nid >= MAX_NUMNODES);
980 return nid;
983 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
985 if (!node_isset(nid, *nodes_allowed))
986 nid = next_node_allowed(nid, nodes_allowed);
987 return nid;
991 * returns the previously saved node ["this node"] from which to
992 * allocate a persistent huge page for the pool and advance the
993 * next node from which to allocate, handling wrap at end of node
994 * mask.
996 static int hstate_next_node_to_alloc(struct hstate *h,
997 nodemask_t *nodes_allowed)
999 int nid;
1001 VM_BUG_ON(!nodes_allowed);
1003 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1004 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1006 return nid;
1010 * helper for free_pool_huge_page() - return the previously saved
1011 * node ["this node"] from which to free a huge page. Advance the
1012 * next node id whether or not we find a free huge page to free so
1013 * that the next attempt to free addresses the next node.
1015 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1017 int nid;
1019 VM_BUG_ON(!nodes_allowed);
1021 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1022 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1024 return nid;
1027 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1028 for (nr_nodes = nodes_weight(*mask); \
1029 nr_nodes > 0 && \
1030 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1031 nr_nodes--)
1033 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1034 for (nr_nodes = nodes_weight(*mask); \
1035 nr_nodes > 0 && \
1036 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1037 nr_nodes--)
1039 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1040 static void destroy_compound_gigantic_page(struct page *page,
1041 unsigned int order)
1043 int i;
1044 int nr_pages = 1 << order;
1045 struct page *p = page + 1;
1047 atomic_set(compound_mapcount_ptr(page), 0);
1048 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1049 clear_compound_head(p);
1050 set_page_refcounted(p);
1053 set_compound_order(page, 0);
1054 __ClearPageHead(page);
1057 static void free_gigantic_page(struct page *page, unsigned int order)
1059 free_contig_range(page_to_pfn(page), 1 << order);
1062 static int __alloc_gigantic_page(unsigned long start_pfn,
1063 unsigned long nr_pages, gfp_t gfp_mask)
1065 unsigned long end_pfn = start_pfn + nr_pages;
1066 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1067 gfp_mask);
1070 static bool pfn_range_valid_gigantic(struct zone *z,
1071 unsigned long start_pfn, unsigned long nr_pages)
1073 unsigned long i, end_pfn = start_pfn + nr_pages;
1074 struct page *page;
1076 for (i = start_pfn; i < end_pfn; i++) {
1077 if (!pfn_valid(i))
1078 return false;
1080 page = pfn_to_page(i);
1082 if (page_zone(page) != z)
1083 return false;
1085 if (PageReserved(page))
1086 return false;
1088 if (page_count(page) > 0)
1089 return false;
1091 if (PageHuge(page))
1092 return false;
1095 return true;
1098 static bool zone_spans_last_pfn(const struct zone *zone,
1099 unsigned long start_pfn, unsigned long nr_pages)
1101 unsigned long last_pfn = start_pfn + nr_pages - 1;
1102 return zone_spans_pfn(zone, last_pfn);
1105 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1106 int nid, nodemask_t *nodemask)
1108 unsigned int order = huge_page_order(h);
1109 unsigned long nr_pages = 1 << order;
1110 unsigned long ret, pfn, flags;
1111 struct zonelist *zonelist;
1112 struct zone *zone;
1113 struct zoneref *z;
1115 zonelist = node_zonelist(nid, gfp_mask);
1116 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1117 spin_lock_irqsave(&zone->lock, flags);
1119 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1120 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1121 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1123 * We release the zone lock here because
1124 * alloc_contig_range() will also lock the zone
1125 * at some point. If there's an allocation
1126 * spinning on this lock, it may win the race
1127 * and cause alloc_contig_range() to fail...
1129 spin_unlock_irqrestore(&zone->lock, flags);
1130 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1131 if (!ret)
1132 return pfn_to_page(pfn);
1133 spin_lock_irqsave(&zone->lock, flags);
1135 pfn += nr_pages;
1138 spin_unlock_irqrestore(&zone->lock, flags);
1141 return NULL;
1144 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1145 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1147 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1148 static inline bool gigantic_page_supported(void) { return false; }
1149 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1150 int nid, nodemask_t *nodemask) { return NULL; }
1151 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1152 static inline void destroy_compound_gigantic_page(struct page *page,
1153 unsigned int order) { }
1154 #endif
1156 static void update_and_free_page(struct hstate *h, struct page *page)
1158 int i;
1160 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1161 return;
1163 h->nr_huge_pages--;
1164 h->nr_huge_pages_node[page_to_nid(page)]--;
1165 for (i = 0; i < pages_per_huge_page(h); i++) {
1166 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1167 1 << PG_referenced | 1 << PG_dirty |
1168 1 << PG_active | 1 << PG_private |
1169 1 << PG_writeback);
1171 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1172 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1173 set_page_refcounted(page);
1174 if (hstate_is_gigantic(h)) {
1175 destroy_compound_gigantic_page(page, huge_page_order(h));
1176 free_gigantic_page(page, huge_page_order(h));
1177 } else {
1178 __free_pages(page, huge_page_order(h));
1182 struct hstate *size_to_hstate(unsigned long size)
1184 struct hstate *h;
1186 for_each_hstate(h) {
1187 if (huge_page_size(h) == size)
1188 return h;
1190 return NULL;
1194 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1195 * to hstate->hugepage_activelist.)
1197 * This function can be called for tail pages, but never returns true for them.
1199 bool page_huge_active(struct page *page)
1201 VM_BUG_ON_PAGE(!PageHuge(page), page);
1202 return PageHead(page) && PagePrivate(&page[1]);
1205 /* never called for tail page */
1206 static void set_page_huge_active(struct page *page)
1208 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1209 SetPagePrivate(&page[1]);
1212 static void clear_page_huge_active(struct page *page)
1214 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1215 ClearPagePrivate(&page[1]);
1219 * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1220 * code
1222 static inline bool PageHugeTemporary(struct page *page)
1224 if (!PageHuge(page))
1225 return false;
1227 return (unsigned long)page[2].mapping == -1U;
1230 static inline void SetPageHugeTemporary(struct page *page)
1232 page[2].mapping = (void *)-1U;
1235 static inline void ClearPageHugeTemporary(struct page *page)
1237 page[2].mapping = NULL;
1240 void free_huge_page(struct page *page)
1243 * Can't pass hstate in here because it is called from the
1244 * compound page destructor.
1246 struct hstate *h = page_hstate(page);
1247 int nid = page_to_nid(page);
1248 struct hugepage_subpool *spool =
1249 (struct hugepage_subpool *)page_private(page);
1250 bool restore_reserve;
1252 VM_BUG_ON_PAGE(page_count(page), page);
1253 VM_BUG_ON_PAGE(page_mapcount(page), page);
1255 set_page_private(page, 0);
1256 page->mapping = NULL;
1257 restore_reserve = PagePrivate(page);
1258 ClearPagePrivate(page);
1261 * If PagePrivate() was set on page, page allocation consumed a
1262 * reservation. If the page was associated with a subpool, there
1263 * would have been a page reserved in the subpool before allocation
1264 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1265 * reservtion, do not call hugepage_subpool_put_pages() as this will
1266 * remove the reserved page from the subpool.
1268 if (!restore_reserve) {
1270 * A return code of zero implies that the subpool will be
1271 * under its minimum size if the reservation is not restored
1272 * after page is free. Therefore, force restore_reserve
1273 * operation.
1275 if (hugepage_subpool_put_pages(spool, 1) == 0)
1276 restore_reserve = true;
1279 spin_lock(&hugetlb_lock);
1280 clear_page_huge_active(page);
1281 hugetlb_cgroup_uncharge_page(hstate_index(h),
1282 pages_per_huge_page(h), page);
1283 if (restore_reserve)
1284 h->resv_huge_pages++;
1286 if (PageHugeTemporary(page)) {
1287 list_del(&page->lru);
1288 ClearPageHugeTemporary(page);
1289 update_and_free_page(h, page);
1290 } else if (h->surplus_huge_pages_node[nid]) {
1291 /* remove the page from active list */
1292 list_del(&page->lru);
1293 update_and_free_page(h, page);
1294 h->surplus_huge_pages--;
1295 h->surplus_huge_pages_node[nid]--;
1296 } else {
1297 arch_clear_hugepage_flags(page);
1298 enqueue_huge_page(h, page);
1300 spin_unlock(&hugetlb_lock);
1303 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1305 INIT_LIST_HEAD(&page->lru);
1306 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1307 spin_lock(&hugetlb_lock);
1308 set_hugetlb_cgroup(page, NULL);
1309 h->nr_huge_pages++;
1310 h->nr_huge_pages_node[nid]++;
1311 spin_unlock(&hugetlb_lock);
1314 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1316 int i;
1317 int nr_pages = 1 << order;
1318 struct page *p = page + 1;
1320 /* we rely on prep_new_huge_page to set the destructor */
1321 set_compound_order(page, order);
1322 __ClearPageReserved(page);
1323 __SetPageHead(page);
1324 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1326 * For gigantic hugepages allocated through bootmem at
1327 * boot, it's safer to be consistent with the not-gigantic
1328 * hugepages and clear the PG_reserved bit from all tail pages
1329 * too. Otherwse drivers using get_user_pages() to access tail
1330 * pages may get the reference counting wrong if they see
1331 * PG_reserved set on a tail page (despite the head page not
1332 * having PG_reserved set). Enforcing this consistency between
1333 * head and tail pages allows drivers to optimize away a check
1334 * on the head page when they need know if put_page() is needed
1335 * after get_user_pages().
1337 __ClearPageReserved(p);
1338 set_page_count(p, 0);
1339 set_compound_head(p, page);
1341 atomic_set(compound_mapcount_ptr(page), -1);
1345 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1346 * transparent huge pages. See the PageTransHuge() documentation for more
1347 * details.
1349 int PageHuge(struct page *page)
1351 if (!PageCompound(page))
1352 return 0;
1354 page = compound_head(page);
1355 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1357 EXPORT_SYMBOL_GPL(PageHuge);
1360 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1361 * normal or transparent huge pages.
1363 int PageHeadHuge(struct page *page_head)
1365 if (!PageHead(page_head))
1366 return 0;
1368 return get_compound_page_dtor(page_head) == free_huge_page;
1371 pgoff_t __basepage_index(struct page *page)
1373 struct page *page_head = compound_head(page);
1374 pgoff_t index = page_index(page_head);
1375 unsigned long compound_idx;
1377 if (!PageHuge(page_head))
1378 return page_index(page);
1380 if (compound_order(page_head) >= MAX_ORDER)
1381 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1382 else
1383 compound_idx = page - page_head;
1385 return (index << compound_order(page_head)) + compound_idx;
1388 static struct page *alloc_buddy_huge_page(struct hstate *h,
1389 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1391 int order = huge_page_order(h);
1392 struct page *page;
1394 gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1395 if (nid == NUMA_NO_NODE)
1396 nid = numa_mem_id();
1397 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1398 if (page)
1399 __count_vm_event(HTLB_BUDDY_PGALLOC);
1400 else
1401 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1403 return page;
1407 * Common helper to allocate a fresh hugetlb page. All specific allocators
1408 * should use this function to get new hugetlb pages
1410 static struct page *alloc_fresh_huge_page(struct hstate *h,
1411 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1413 struct page *page;
1415 if (hstate_is_gigantic(h))
1416 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1417 else
1418 page = alloc_buddy_huge_page(h, gfp_mask,
1419 nid, nmask);
1420 if (!page)
1421 return NULL;
1423 if (hstate_is_gigantic(h))
1424 prep_compound_gigantic_page(page, huge_page_order(h));
1425 prep_new_huge_page(h, page, page_to_nid(page));
1427 return page;
1431 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1432 * manner.
1434 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1436 struct page *page;
1437 int nr_nodes, node;
1438 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1440 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1441 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1442 if (page)
1443 break;
1446 if (!page)
1447 return 0;
1449 put_page(page); /* free it into the hugepage allocator */
1451 return 1;
1455 * Free huge page from pool from next node to free.
1456 * Attempt to keep persistent huge pages more or less
1457 * balanced over allowed nodes.
1458 * Called with hugetlb_lock locked.
1460 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1461 bool acct_surplus)
1463 int nr_nodes, node;
1464 int ret = 0;
1466 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1468 * If we're returning unused surplus pages, only examine
1469 * nodes with surplus pages.
1471 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1472 !list_empty(&h->hugepage_freelists[node])) {
1473 struct page *page =
1474 list_entry(h->hugepage_freelists[node].next,
1475 struct page, lru);
1476 list_del(&page->lru);
1477 h->free_huge_pages--;
1478 h->free_huge_pages_node[node]--;
1479 if (acct_surplus) {
1480 h->surplus_huge_pages--;
1481 h->surplus_huge_pages_node[node]--;
1483 update_and_free_page(h, page);
1484 ret = 1;
1485 break;
1489 return ret;
1493 * Dissolve a given free hugepage into free buddy pages. This function does
1494 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1495 * dissolution fails because a give page is not a free hugepage, or because
1496 * free hugepages are fully reserved.
1498 int dissolve_free_huge_page(struct page *page)
1500 int rc = -EBUSY;
1502 spin_lock(&hugetlb_lock);
1503 if (PageHuge(page) && !page_count(page)) {
1504 struct page *head = compound_head(page);
1505 struct hstate *h = page_hstate(head);
1506 int nid = page_to_nid(head);
1507 if (h->free_huge_pages - h->resv_huge_pages == 0)
1508 goto out;
1510 * Move PageHWPoison flag from head page to the raw error page,
1511 * which makes any subpages rather than the error page reusable.
1513 if (PageHWPoison(head) && page != head) {
1514 SetPageHWPoison(page);
1515 ClearPageHWPoison(head);
1517 list_del(&head->lru);
1518 h->free_huge_pages--;
1519 h->free_huge_pages_node[nid]--;
1520 h->max_huge_pages--;
1521 update_and_free_page(h, head);
1522 rc = 0;
1524 out:
1525 spin_unlock(&hugetlb_lock);
1526 return rc;
1530 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1531 * make specified memory blocks removable from the system.
1532 * Note that this will dissolve a free gigantic hugepage completely, if any
1533 * part of it lies within the given range.
1534 * Also note that if dissolve_free_huge_page() returns with an error, all
1535 * free hugepages that were dissolved before that error are lost.
1537 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1539 unsigned long pfn;
1540 struct page *page;
1541 int rc = 0;
1543 if (!hugepages_supported())
1544 return rc;
1546 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1547 page = pfn_to_page(pfn);
1548 if (PageHuge(page) && !page_count(page)) {
1549 rc = dissolve_free_huge_page(page);
1550 if (rc)
1551 break;
1555 return rc;
1559 * Allocates a fresh surplus page from the page allocator.
1561 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1562 int nid, nodemask_t *nmask)
1564 struct page *page = NULL;
1566 if (hstate_is_gigantic(h))
1567 return NULL;
1569 spin_lock(&hugetlb_lock);
1570 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1571 goto out_unlock;
1572 spin_unlock(&hugetlb_lock);
1574 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1575 if (!page)
1576 return NULL;
1578 spin_lock(&hugetlb_lock);
1580 * We could have raced with the pool size change.
1581 * Double check that and simply deallocate the new page
1582 * if we would end up overcommiting the surpluses. Abuse
1583 * temporary page to workaround the nasty free_huge_page
1584 * codeflow
1586 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1587 SetPageHugeTemporary(page);
1588 spin_unlock(&hugetlb_lock);
1589 put_page(page);
1590 return NULL;
1591 } else {
1592 h->surplus_huge_pages++;
1593 h->surplus_huge_pages_node[page_to_nid(page)]++;
1596 out_unlock:
1597 spin_unlock(&hugetlb_lock);
1599 return page;
1602 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1603 int nid, nodemask_t *nmask)
1605 struct page *page;
1607 if (hstate_is_gigantic(h))
1608 return NULL;
1610 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1611 if (!page)
1612 return NULL;
1615 * We do not account these pages as surplus because they are only
1616 * temporary and will be released properly on the last reference
1618 SetPageHugeTemporary(page);
1620 return page;
1624 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1626 static
1627 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1628 struct vm_area_struct *vma, unsigned long addr)
1630 struct page *page;
1631 struct mempolicy *mpol;
1632 gfp_t gfp_mask = htlb_alloc_mask(h);
1633 int nid;
1634 nodemask_t *nodemask;
1636 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1637 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1638 mpol_cond_put(mpol);
1640 return page;
1643 /* page migration callback function */
1644 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1646 gfp_t gfp_mask = htlb_alloc_mask(h);
1647 struct page *page = NULL;
1649 if (nid != NUMA_NO_NODE)
1650 gfp_mask |= __GFP_THISNODE;
1652 spin_lock(&hugetlb_lock);
1653 if (h->free_huge_pages - h->resv_huge_pages > 0)
1654 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1655 spin_unlock(&hugetlb_lock);
1657 if (!page)
1658 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1660 return page;
1663 /* page migration callback function */
1664 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1665 nodemask_t *nmask)
1667 gfp_t gfp_mask = htlb_alloc_mask(h);
1669 spin_lock(&hugetlb_lock);
1670 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1671 struct page *page;
1673 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1674 if (page) {
1675 spin_unlock(&hugetlb_lock);
1676 return page;
1679 spin_unlock(&hugetlb_lock);
1681 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1684 /* mempolicy aware migration callback */
1685 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1686 unsigned long address)
1688 struct mempolicy *mpol;
1689 nodemask_t *nodemask;
1690 struct page *page;
1691 gfp_t gfp_mask;
1692 int node;
1694 gfp_mask = htlb_alloc_mask(h);
1695 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1696 page = alloc_huge_page_nodemask(h, node, nodemask);
1697 mpol_cond_put(mpol);
1699 return page;
1703 * Increase the hugetlb pool such that it can accommodate a reservation
1704 * of size 'delta'.
1706 static int gather_surplus_pages(struct hstate *h, int delta)
1708 struct list_head surplus_list;
1709 struct page *page, *tmp;
1710 int ret, i;
1711 int needed, allocated;
1712 bool alloc_ok = true;
1714 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1715 if (needed <= 0) {
1716 h->resv_huge_pages += delta;
1717 return 0;
1720 allocated = 0;
1721 INIT_LIST_HEAD(&surplus_list);
1723 ret = -ENOMEM;
1724 retry:
1725 spin_unlock(&hugetlb_lock);
1726 for (i = 0; i < needed; i++) {
1727 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1728 NUMA_NO_NODE, NULL);
1729 if (!page) {
1730 alloc_ok = false;
1731 break;
1733 list_add(&page->lru, &surplus_list);
1734 cond_resched();
1736 allocated += i;
1739 * After retaking hugetlb_lock, we need to recalculate 'needed'
1740 * because either resv_huge_pages or free_huge_pages may have changed.
1742 spin_lock(&hugetlb_lock);
1743 needed = (h->resv_huge_pages + delta) -
1744 (h->free_huge_pages + allocated);
1745 if (needed > 0) {
1746 if (alloc_ok)
1747 goto retry;
1749 * We were not able to allocate enough pages to
1750 * satisfy the entire reservation so we free what
1751 * we've allocated so far.
1753 goto free;
1756 * The surplus_list now contains _at_least_ the number of extra pages
1757 * needed to accommodate the reservation. Add the appropriate number
1758 * of pages to the hugetlb pool and free the extras back to the buddy
1759 * allocator. Commit the entire reservation here to prevent another
1760 * process from stealing the pages as they are added to the pool but
1761 * before they are reserved.
1763 needed += allocated;
1764 h->resv_huge_pages += delta;
1765 ret = 0;
1767 /* Free the needed pages to the hugetlb pool */
1768 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1769 if ((--needed) < 0)
1770 break;
1772 * This page is now managed by the hugetlb allocator and has
1773 * no users -- drop the buddy allocator's reference.
1775 put_page_testzero(page);
1776 VM_BUG_ON_PAGE(page_count(page), page);
1777 enqueue_huge_page(h, page);
1779 free:
1780 spin_unlock(&hugetlb_lock);
1782 /* Free unnecessary surplus pages to the buddy allocator */
1783 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1784 put_page(page);
1785 spin_lock(&hugetlb_lock);
1787 return ret;
1791 * This routine has two main purposes:
1792 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1793 * in unused_resv_pages. This corresponds to the prior adjustments made
1794 * to the associated reservation map.
1795 * 2) Free any unused surplus pages that may have been allocated to satisfy
1796 * the reservation. As many as unused_resv_pages may be freed.
1798 * Called with hugetlb_lock held. However, the lock could be dropped (and
1799 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1800 * we must make sure nobody else can claim pages we are in the process of
1801 * freeing. Do this by ensuring resv_huge_page always is greater than the
1802 * number of huge pages we plan to free when dropping the lock.
1804 static void return_unused_surplus_pages(struct hstate *h,
1805 unsigned long unused_resv_pages)
1807 unsigned long nr_pages;
1809 /* Cannot return gigantic pages currently */
1810 if (hstate_is_gigantic(h))
1811 goto out;
1814 * Part (or even all) of the reservation could have been backed
1815 * by pre-allocated pages. Only free surplus pages.
1817 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1820 * We want to release as many surplus pages as possible, spread
1821 * evenly across all nodes with memory. Iterate across these nodes
1822 * until we can no longer free unreserved surplus pages. This occurs
1823 * when the nodes with surplus pages have no free pages.
1824 * free_pool_huge_page() will balance the the freed pages across the
1825 * on-line nodes with memory and will handle the hstate accounting.
1827 * Note that we decrement resv_huge_pages as we free the pages. If
1828 * we drop the lock, resv_huge_pages will still be sufficiently large
1829 * to cover subsequent pages we may free.
1831 while (nr_pages--) {
1832 h->resv_huge_pages--;
1833 unused_resv_pages--;
1834 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1835 goto out;
1836 cond_resched_lock(&hugetlb_lock);
1839 out:
1840 /* Fully uncommit the reservation */
1841 h->resv_huge_pages -= unused_resv_pages;
1846 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1847 * are used by the huge page allocation routines to manage reservations.
1849 * vma_needs_reservation is called to determine if the huge page at addr
1850 * within the vma has an associated reservation. If a reservation is
1851 * needed, the value 1 is returned. The caller is then responsible for
1852 * managing the global reservation and subpool usage counts. After
1853 * the huge page has been allocated, vma_commit_reservation is called
1854 * to add the page to the reservation map. If the page allocation fails,
1855 * the reservation must be ended instead of committed. vma_end_reservation
1856 * is called in such cases.
1858 * In the normal case, vma_commit_reservation returns the same value
1859 * as the preceding vma_needs_reservation call. The only time this
1860 * is not the case is if a reserve map was changed between calls. It
1861 * is the responsibility of the caller to notice the difference and
1862 * take appropriate action.
1864 * vma_add_reservation is used in error paths where a reservation must
1865 * be restored when a newly allocated huge page must be freed. It is
1866 * to be called after calling vma_needs_reservation to determine if a
1867 * reservation exists.
1869 enum vma_resv_mode {
1870 VMA_NEEDS_RESV,
1871 VMA_COMMIT_RESV,
1872 VMA_END_RESV,
1873 VMA_ADD_RESV,
1875 static long __vma_reservation_common(struct hstate *h,
1876 struct vm_area_struct *vma, unsigned long addr,
1877 enum vma_resv_mode mode)
1879 struct resv_map *resv;
1880 pgoff_t idx;
1881 long ret;
1883 resv = vma_resv_map(vma);
1884 if (!resv)
1885 return 1;
1887 idx = vma_hugecache_offset(h, vma, addr);
1888 switch (mode) {
1889 case VMA_NEEDS_RESV:
1890 ret = region_chg(resv, idx, idx + 1);
1891 break;
1892 case VMA_COMMIT_RESV:
1893 ret = region_add(resv, idx, idx + 1);
1894 break;
1895 case VMA_END_RESV:
1896 region_abort(resv, idx, idx + 1);
1897 ret = 0;
1898 break;
1899 case VMA_ADD_RESV:
1900 if (vma->vm_flags & VM_MAYSHARE)
1901 ret = region_add(resv, idx, idx + 1);
1902 else {
1903 region_abort(resv, idx, idx + 1);
1904 ret = region_del(resv, idx, idx + 1);
1906 break;
1907 default:
1908 BUG();
1911 if (vma->vm_flags & VM_MAYSHARE)
1912 return ret;
1913 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1915 * In most cases, reserves always exist for private mappings.
1916 * However, a file associated with mapping could have been
1917 * hole punched or truncated after reserves were consumed.
1918 * As subsequent fault on such a range will not use reserves.
1919 * Subtle - The reserve map for private mappings has the
1920 * opposite meaning than that of shared mappings. If NO
1921 * entry is in the reserve map, it means a reservation exists.
1922 * If an entry exists in the reserve map, it means the
1923 * reservation has already been consumed. As a result, the
1924 * return value of this routine is the opposite of the
1925 * value returned from reserve map manipulation routines above.
1927 if (ret)
1928 return 0;
1929 else
1930 return 1;
1932 else
1933 return ret < 0 ? ret : 0;
1936 static long vma_needs_reservation(struct hstate *h,
1937 struct vm_area_struct *vma, unsigned long addr)
1939 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1942 static long vma_commit_reservation(struct hstate *h,
1943 struct vm_area_struct *vma, unsigned long addr)
1945 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1948 static void vma_end_reservation(struct hstate *h,
1949 struct vm_area_struct *vma, unsigned long addr)
1951 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1954 static long vma_add_reservation(struct hstate *h,
1955 struct vm_area_struct *vma, unsigned long addr)
1957 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1961 * This routine is called to restore a reservation on error paths. In the
1962 * specific error paths, a huge page was allocated (via alloc_huge_page)
1963 * and is about to be freed. If a reservation for the page existed,
1964 * alloc_huge_page would have consumed the reservation and set PagePrivate
1965 * in the newly allocated page. When the page is freed via free_huge_page,
1966 * the global reservation count will be incremented if PagePrivate is set.
1967 * However, free_huge_page can not adjust the reserve map. Adjust the
1968 * reserve map here to be consistent with global reserve count adjustments
1969 * to be made by free_huge_page.
1971 static void restore_reserve_on_error(struct hstate *h,
1972 struct vm_area_struct *vma, unsigned long address,
1973 struct page *page)
1975 if (unlikely(PagePrivate(page))) {
1976 long rc = vma_needs_reservation(h, vma, address);
1978 if (unlikely(rc < 0)) {
1980 * Rare out of memory condition in reserve map
1981 * manipulation. Clear PagePrivate so that
1982 * global reserve count will not be incremented
1983 * by free_huge_page. This will make it appear
1984 * as though the reservation for this page was
1985 * consumed. This may prevent the task from
1986 * faulting in the page at a later time. This
1987 * is better than inconsistent global huge page
1988 * accounting of reserve counts.
1990 ClearPagePrivate(page);
1991 } else if (rc) {
1992 rc = vma_add_reservation(h, vma, address);
1993 if (unlikely(rc < 0))
1995 * See above comment about rare out of
1996 * memory condition.
1998 ClearPagePrivate(page);
1999 } else
2000 vma_end_reservation(h, vma, address);
2004 struct page *alloc_huge_page(struct vm_area_struct *vma,
2005 unsigned long addr, int avoid_reserve)
2007 struct hugepage_subpool *spool = subpool_vma(vma);
2008 struct hstate *h = hstate_vma(vma);
2009 struct page *page;
2010 long map_chg, map_commit;
2011 long gbl_chg;
2012 int ret, idx;
2013 struct hugetlb_cgroup *h_cg;
2015 idx = hstate_index(h);
2017 * Examine the region/reserve map to determine if the process
2018 * has a reservation for the page to be allocated. A return
2019 * code of zero indicates a reservation exists (no change).
2021 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2022 if (map_chg < 0)
2023 return ERR_PTR(-ENOMEM);
2026 * Processes that did not create the mapping will have no
2027 * reserves as indicated by the region/reserve map. Check
2028 * that the allocation will not exceed the subpool limit.
2029 * Allocations for MAP_NORESERVE mappings also need to be
2030 * checked against any subpool limit.
2032 if (map_chg || avoid_reserve) {
2033 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2034 if (gbl_chg < 0) {
2035 vma_end_reservation(h, vma, addr);
2036 return ERR_PTR(-ENOSPC);
2040 * Even though there was no reservation in the region/reserve
2041 * map, there could be reservations associated with the
2042 * subpool that can be used. This would be indicated if the
2043 * return value of hugepage_subpool_get_pages() is zero.
2044 * However, if avoid_reserve is specified we still avoid even
2045 * the subpool reservations.
2047 if (avoid_reserve)
2048 gbl_chg = 1;
2051 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2052 if (ret)
2053 goto out_subpool_put;
2055 spin_lock(&hugetlb_lock);
2057 * glb_chg is passed to indicate whether or not a page must be taken
2058 * from the global free pool (global change). gbl_chg == 0 indicates
2059 * a reservation exists for the allocation.
2061 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2062 if (!page) {
2063 spin_unlock(&hugetlb_lock);
2064 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2065 if (!page)
2066 goto out_uncharge_cgroup;
2067 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2068 SetPagePrivate(page);
2069 h->resv_huge_pages--;
2071 spin_lock(&hugetlb_lock);
2072 list_move(&page->lru, &h->hugepage_activelist);
2073 /* Fall through */
2075 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2076 spin_unlock(&hugetlb_lock);
2078 set_page_private(page, (unsigned long)spool);
2080 map_commit = vma_commit_reservation(h, vma, addr);
2081 if (unlikely(map_chg > map_commit)) {
2083 * The page was added to the reservation map between
2084 * vma_needs_reservation and vma_commit_reservation.
2085 * This indicates a race with hugetlb_reserve_pages.
2086 * Adjust for the subpool count incremented above AND
2087 * in hugetlb_reserve_pages for the same page. Also,
2088 * the reservation count added in hugetlb_reserve_pages
2089 * no longer applies.
2091 long rsv_adjust;
2093 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2094 hugetlb_acct_memory(h, -rsv_adjust);
2096 return page;
2098 out_uncharge_cgroup:
2099 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2100 out_subpool_put:
2101 if (map_chg || avoid_reserve)
2102 hugepage_subpool_put_pages(spool, 1);
2103 vma_end_reservation(h, vma, addr);
2104 return ERR_PTR(-ENOSPC);
2107 int alloc_bootmem_huge_page(struct hstate *h)
2108 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2109 int __alloc_bootmem_huge_page(struct hstate *h)
2111 struct huge_bootmem_page *m;
2112 int nr_nodes, node;
2114 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2115 void *addr;
2117 addr = memblock_alloc_try_nid_raw(
2118 huge_page_size(h), huge_page_size(h),
2119 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2120 if (addr) {
2122 * Use the beginning of the huge page to store the
2123 * huge_bootmem_page struct (until gather_bootmem
2124 * puts them into the mem_map).
2126 m = addr;
2127 goto found;
2130 return 0;
2132 found:
2133 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2134 /* Put them into a private list first because mem_map is not up yet */
2135 INIT_LIST_HEAD(&m->list);
2136 list_add(&m->list, &huge_boot_pages);
2137 m->hstate = h;
2138 return 1;
2141 static void __init prep_compound_huge_page(struct page *page,
2142 unsigned int order)
2144 if (unlikely(order > (MAX_ORDER - 1)))
2145 prep_compound_gigantic_page(page, order);
2146 else
2147 prep_compound_page(page, order);
2150 /* Put bootmem huge pages into the standard lists after mem_map is up */
2151 static void __init gather_bootmem_prealloc(void)
2153 struct huge_bootmem_page *m;
2155 list_for_each_entry(m, &huge_boot_pages, list) {
2156 struct page *page = virt_to_page(m);
2157 struct hstate *h = m->hstate;
2159 WARN_ON(page_count(page) != 1);
2160 prep_compound_huge_page(page, h->order);
2161 WARN_ON(PageReserved(page));
2162 prep_new_huge_page(h, page, page_to_nid(page));
2163 put_page(page); /* free it into the hugepage allocator */
2166 * If we had gigantic hugepages allocated at boot time, we need
2167 * to restore the 'stolen' pages to totalram_pages in order to
2168 * fix confusing memory reports from free(1) and another
2169 * side-effects, like CommitLimit going negative.
2171 if (hstate_is_gigantic(h))
2172 adjust_managed_page_count(page, 1 << h->order);
2173 cond_resched();
2177 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2179 unsigned long i;
2181 for (i = 0; i < h->max_huge_pages; ++i) {
2182 if (hstate_is_gigantic(h)) {
2183 if (!alloc_bootmem_huge_page(h))
2184 break;
2185 } else if (!alloc_pool_huge_page(h,
2186 &node_states[N_MEMORY]))
2187 break;
2188 cond_resched();
2190 if (i < h->max_huge_pages) {
2191 char buf[32];
2193 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2194 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2195 h->max_huge_pages, buf, i);
2196 h->max_huge_pages = i;
2200 static void __init hugetlb_init_hstates(void)
2202 struct hstate *h;
2204 for_each_hstate(h) {
2205 if (minimum_order > huge_page_order(h))
2206 minimum_order = huge_page_order(h);
2208 /* oversize hugepages were init'ed in early boot */
2209 if (!hstate_is_gigantic(h))
2210 hugetlb_hstate_alloc_pages(h);
2212 VM_BUG_ON(minimum_order == UINT_MAX);
2215 static void __init report_hugepages(void)
2217 struct hstate *h;
2219 for_each_hstate(h) {
2220 char buf[32];
2222 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2223 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2224 buf, h->free_huge_pages);
2228 #ifdef CONFIG_HIGHMEM
2229 static void try_to_free_low(struct hstate *h, unsigned long count,
2230 nodemask_t *nodes_allowed)
2232 int i;
2234 if (hstate_is_gigantic(h))
2235 return;
2237 for_each_node_mask(i, *nodes_allowed) {
2238 struct page *page, *next;
2239 struct list_head *freel = &h->hugepage_freelists[i];
2240 list_for_each_entry_safe(page, next, freel, lru) {
2241 if (count >= h->nr_huge_pages)
2242 return;
2243 if (PageHighMem(page))
2244 continue;
2245 list_del(&page->lru);
2246 update_and_free_page(h, page);
2247 h->free_huge_pages--;
2248 h->free_huge_pages_node[page_to_nid(page)]--;
2252 #else
2253 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2254 nodemask_t *nodes_allowed)
2257 #endif
2260 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2261 * balanced by operating on them in a round-robin fashion.
2262 * Returns 1 if an adjustment was made.
2264 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2265 int delta)
2267 int nr_nodes, node;
2269 VM_BUG_ON(delta != -1 && delta != 1);
2271 if (delta < 0) {
2272 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2273 if (h->surplus_huge_pages_node[node])
2274 goto found;
2276 } else {
2277 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2278 if (h->surplus_huge_pages_node[node] <
2279 h->nr_huge_pages_node[node])
2280 goto found;
2283 return 0;
2285 found:
2286 h->surplus_huge_pages += delta;
2287 h->surplus_huge_pages_node[node] += delta;
2288 return 1;
2291 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2292 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2293 nodemask_t *nodes_allowed)
2295 unsigned long min_count, ret;
2297 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2298 return h->max_huge_pages;
2301 * Increase the pool size
2302 * First take pages out of surplus state. Then make up the
2303 * remaining difference by allocating fresh huge pages.
2305 * We might race with alloc_surplus_huge_page() here and be unable
2306 * to convert a surplus huge page to a normal huge page. That is
2307 * not critical, though, it just means the overall size of the
2308 * pool might be one hugepage larger than it needs to be, but
2309 * within all the constraints specified by the sysctls.
2311 spin_lock(&hugetlb_lock);
2312 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2313 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2314 break;
2317 while (count > persistent_huge_pages(h)) {
2319 * If this allocation races such that we no longer need the
2320 * page, free_huge_page will handle it by freeing the page
2321 * and reducing the surplus.
2323 spin_unlock(&hugetlb_lock);
2325 /* yield cpu to avoid soft lockup */
2326 cond_resched();
2328 ret = alloc_pool_huge_page(h, nodes_allowed);
2329 spin_lock(&hugetlb_lock);
2330 if (!ret)
2331 goto out;
2333 /* Bail for signals. Probably ctrl-c from user */
2334 if (signal_pending(current))
2335 goto out;
2339 * Decrease the pool size
2340 * First return free pages to the buddy allocator (being careful
2341 * to keep enough around to satisfy reservations). Then place
2342 * pages into surplus state as needed so the pool will shrink
2343 * to the desired size as pages become free.
2345 * By placing pages into the surplus state independent of the
2346 * overcommit value, we are allowing the surplus pool size to
2347 * exceed overcommit. There are few sane options here. Since
2348 * alloc_surplus_huge_page() is checking the global counter,
2349 * though, we'll note that we're not allowed to exceed surplus
2350 * and won't grow the pool anywhere else. Not until one of the
2351 * sysctls are changed, or the surplus pages go out of use.
2353 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2354 min_count = max(count, min_count);
2355 try_to_free_low(h, min_count, nodes_allowed);
2356 while (min_count < persistent_huge_pages(h)) {
2357 if (!free_pool_huge_page(h, nodes_allowed, 0))
2358 break;
2359 cond_resched_lock(&hugetlb_lock);
2361 while (count < persistent_huge_pages(h)) {
2362 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2363 break;
2365 out:
2366 ret = persistent_huge_pages(h);
2367 spin_unlock(&hugetlb_lock);
2368 return ret;
2371 #define HSTATE_ATTR_RO(_name) \
2372 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2374 #define HSTATE_ATTR(_name) \
2375 static struct kobj_attribute _name##_attr = \
2376 __ATTR(_name, 0644, _name##_show, _name##_store)
2378 static struct kobject *hugepages_kobj;
2379 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2381 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2383 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2385 int i;
2387 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2388 if (hstate_kobjs[i] == kobj) {
2389 if (nidp)
2390 *nidp = NUMA_NO_NODE;
2391 return &hstates[i];
2394 return kobj_to_node_hstate(kobj, nidp);
2397 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2398 struct kobj_attribute *attr, char *buf)
2400 struct hstate *h;
2401 unsigned long nr_huge_pages;
2402 int nid;
2404 h = kobj_to_hstate(kobj, &nid);
2405 if (nid == NUMA_NO_NODE)
2406 nr_huge_pages = h->nr_huge_pages;
2407 else
2408 nr_huge_pages = h->nr_huge_pages_node[nid];
2410 return sprintf(buf, "%lu\n", nr_huge_pages);
2413 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2414 struct hstate *h, int nid,
2415 unsigned long count, size_t len)
2417 int err;
2418 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2420 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2421 err = -EINVAL;
2422 goto out;
2425 if (nid == NUMA_NO_NODE) {
2427 * global hstate attribute
2429 if (!(obey_mempolicy &&
2430 init_nodemask_of_mempolicy(nodes_allowed))) {
2431 NODEMASK_FREE(nodes_allowed);
2432 nodes_allowed = &node_states[N_MEMORY];
2434 } else if (nodes_allowed) {
2436 * per node hstate attribute: adjust count to global,
2437 * but restrict alloc/free to the specified node.
2439 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2440 init_nodemask_of_node(nodes_allowed, nid);
2441 } else
2442 nodes_allowed = &node_states[N_MEMORY];
2444 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2446 if (nodes_allowed != &node_states[N_MEMORY])
2447 NODEMASK_FREE(nodes_allowed);
2449 return len;
2450 out:
2451 NODEMASK_FREE(nodes_allowed);
2452 return err;
2455 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2456 struct kobject *kobj, const char *buf,
2457 size_t len)
2459 struct hstate *h;
2460 unsigned long count;
2461 int nid;
2462 int err;
2464 err = kstrtoul(buf, 10, &count);
2465 if (err)
2466 return err;
2468 h = kobj_to_hstate(kobj, &nid);
2469 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2472 static ssize_t nr_hugepages_show(struct kobject *kobj,
2473 struct kobj_attribute *attr, char *buf)
2475 return nr_hugepages_show_common(kobj, attr, buf);
2478 static ssize_t nr_hugepages_store(struct kobject *kobj,
2479 struct kobj_attribute *attr, const char *buf, size_t len)
2481 return nr_hugepages_store_common(false, kobj, buf, len);
2483 HSTATE_ATTR(nr_hugepages);
2485 #ifdef CONFIG_NUMA
2488 * hstate attribute for optionally mempolicy-based constraint on persistent
2489 * huge page alloc/free.
2491 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2492 struct kobj_attribute *attr, char *buf)
2494 return nr_hugepages_show_common(kobj, attr, buf);
2497 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2498 struct kobj_attribute *attr, const char *buf, size_t len)
2500 return nr_hugepages_store_common(true, kobj, buf, len);
2502 HSTATE_ATTR(nr_hugepages_mempolicy);
2503 #endif
2506 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2507 struct kobj_attribute *attr, char *buf)
2509 struct hstate *h = kobj_to_hstate(kobj, NULL);
2510 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2513 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2514 struct kobj_attribute *attr, const char *buf, size_t count)
2516 int err;
2517 unsigned long input;
2518 struct hstate *h = kobj_to_hstate(kobj, NULL);
2520 if (hstate_is_gigantic(h))
2521 return -EINVAL;
2523 err = kstrtoul(buf, 10, &input);
2524 if (err)
2525 return err;
2527 spin_lock(&hugetlb_lock);
2528 h->nr_overcommit_huge_pages = input;
2529 spin_unlock(&hugetlb_lock);
2531 return count;
2533 HSTATE_ATTR(nr_overcommit_hugepages);
2535 static ssize_t free_hugepages_show(struct kobject *kobj,
2536 struct kobj_attribute *attr, char *buf)
2538 struct hstate *h;
2539 unsigned long free_huge_pages;
2540 int nid;
2542 h = kobj_to_hstate(kobj, &nid);
2543 if (nid == NUMA_NO_NODE)
2544 free_huge_pages = h->free_huge_pages;
2545 else
2546 free_huge_pages = h->free_huge_pages_node[nid];
2548 return sprintf(buf, "%lu\n", free_huge_pages);
2550 HSTATE_ATTR_RO(free_hugepages);
2552 static ssize_t resv_hugepages_show(struct kobject *kobj,
2553 struct kobj_attribute *attr, char *buf)
2555 struct hstate *h = kobj_to_hstate(kobj, NULL);
2556 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2558 HSTATE_ATTR_RO(resv_hugepages);
2560 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2561 struct kobj_attribute *attr, char *buf)
2563 struct hstate *h;
2564 unsigned long surplus_huge_pages;
2565 int nid;
2567 h = kobj_to_hstate(kobj, &nid);
2568 if (nid == NUMA_NO_NODE)
2569 surplus_huge_pages = h->surplus_huge_pages;
2570 else
2571 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2573 return sprintf(buf, "%lu\n", surplus_huge_pages);
2575 HSTATE_ATTR_RO(surplus_hugepages);
2577 static struct attribute *hstate_attrs[] = {
2578 &nr_hugepages_attr.attr,
2579 &nr_overcommit_hugepages_attr.attr,
2580 &free_hugepages_attr.attr,
2581 &resv_hugepages_attr.attr,
2582 &surplus_hugepages_attr.attr,
2583 #ifdef CONFIG_NUMA
2584 &nr_hugepages_mempolicy_attr.attr,
2585 #endif
2586 NULL,
2589 static const struct attribute_group hstate_attr_group = {
2590 .attrs = hstate_attrs,
2593 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2594 struct kobject **hstate_kobjs,
2595 const struct attribute_group *hstate_attr_group)
2597 int retval;
2598 int hi = hstate_index(h);
2600 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2601 if (!hstate_kobjs[hi])
2602 return -ENOMEM;
2604 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2605 if (retval)
2606 kobject_put(hstate_kobjs[hi]);
2608 return retval;
2611 static void __init hugetlb_sysfs_init(void)
2613 struct hstate *h;
2614 int err;
2616 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2617 if (!hugepages_kobj)
2618 return;
2620 for_each_hstate(h) {
2621 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2622 hstate_kobjs, &hstate_attr_group);
2623 if (err)
2624 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2628 #ifdef CONFIG_NUMA
2631 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2632 * with node devices in node_devices[] using a parallel array. The array
2633 * index of a node device or _hstate == node id.
2634 * This is here to avoid any static dependency of the node device driver, in
2635 * the base kernel, on the hugetlb module.
2637 struct node_hstate {
2638 struct kobject *hugepages_kobj;
2639 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2641 static struct node_hstate node_hstates[MAX_NUMNODES];
2644 * A subset of global hstate attributes for node devices
2646 static struct attribute *per_node_hstate_attrs[] = {
2647 &nr_hugepages_attr.attr,
2648 &free_hugepages_attr.attr,
2649 &surplus_hugepages_attr.attr,
2650 NULL,
2653 static const struct attribute_group per_node_hstate_attr_group = {
2654 .attrs = per_node_hstate_attrs,
2658 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2659 * Returns node id via non-NULL nidp.
2661 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2663 int nid;
2665 for (nid = 0; nid < nr_node_ids; nid++) {
2666 struct node_hstate *nhs = &node_hstates[nid];
2667 int i;
2668 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2669 if (nhs->hstate_kobjs[i] == kobj) {
2670 if (nidp)
2671 *nidp = nid;
2672 return &hstates[i];
2676 BUG();
2677 return NULL;
2681 * Unregister hstate attributes from a single node device.
2682 * No-op if no hstate attributes attached.
2684 static void hugetlb_unregister_node(struct node *node)
2686 struct hstate *h;
2687 struct node_hstate *nhs = &node_hstates[node->dev.id];
2689 if (!nhs->hugepages_kobj)
2690 return; /* no hstate attributes */
2692 for_each_hstate(h) {
2693 int idx = hstate_index(h);
2694 if (nhs->hstate_kobjs[idx]) {
2695 kobject_put(nhs->hstate_kobjs[idx]);
2696 nhs->hstate_kobjs[idx] = NULL;
2700 kobject_put(nhs->hugepages_kobj);
2701 nhs->hugepages_kobj = NULL;
2706 * Register hstate attributes for a single node device.
2707 * No-op if attributes already registered.
2709 static void hugetlb_register_node(struct node *node)
2711 struct hstate *h;
2712 struct node_hstate *nhs = &node_hstates[node->dev.id];
2713 int err;
2715 if (nhs->hugepages_kobj)
2716 return; /* already allocated */
2718 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2719 &node->dev.kobj);
2720 if (!nhs->hugepages_kobj)
2721 return;
2723 for_each_hstate(h) {
2724 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2725 nhs->hstate_kobjs,
2726 &per_node_hstate_attr_group);
2727 if (err) {
2728 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2729 h->name, node->dev.id);
2730 hugetlb_unregister_node(node);
2731 break;
2737 * hugetlb init time: register hstate attributes for all registered node
2738 * devices of nodes that have memory. All on-line nodes should have
2739 * registered their associated device by this time.
2741 static void __init hugetlb_register_all_nodes(void)
2743 int nid;
2745 for_each_node_state(nid, N_MEMORY) {
2746 struct node *node = node_devices[nid];
2747 if (node->dev.id == nid)
2748 hugetlb_register_node(node);
2752 * Let the node device driver know we're here so it can
2753 * [un]register hstate attributes on node hotplug.
2755 register_hugetlbfs_with_node(hugetlb_register_node,
2756 hugetlb_unregister_node);
2758 #else /* !CONFIG_NUMA */
2760 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2762 BUG();
2763 if (nidp)
2764 *nidp = -1;
2765 return NULL;
2768 static void hugetlb_register_all_nodes(void) { }
2770 #endif
2772 static int __init hugetlb_init(void)
2774 int i;
2776 if (!hugepages_supported())
2777 return 0;
2779 if (!size_to_hstate(default_hstate_size)) {
2780 if (default_hstate_size != 0) {
2781 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2782 default_hstate_size, HPAGE_SIZE);
2785 default_hstate_size = HPAGE_SIZE;
2786 if (!size_to_hstate(default_hstate_size))
2787 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2789 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2790 if (default_hstate_max_huge_pages) {
2791 if (!default_hstate.max_huge_pages)
2792 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2795 hugetlb_init_hstates();
2796 gather_bootmem_prealloc();
2797 report_hugepages();
2799 hugetlb_sysfs_init();
2800 hugetlb_register_all_nodes();
2801 hugetlb_cgroup_file_init();
2803 #ifdef CONFIG_SMP
2804 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2805 #else
2806 num_fault_mutexes = 1;
2807 #endif
2808 hugetlb_fault_mutex_table =
2809 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2810 GFP_KERNEL);
2811 BUG_ON(!hugetlb_fault_mutex_table);
2813 for (i = 0; i < num_fault_mutexes; i++)
2814 mutex_init(&hugetlb_fault_mutex_table[i]);
2815 return 0;
2817 subsys_initcall(hugetlb_init);
2819 /* Should be called on processing a hugepagesz=... option */
2820 void __init hugetlb_bad_size(void)
2822 parsed_valid_hugepagesz = false;
2825 void __init hugetlb_add_hstate(unsigned int order)
2827 struct hstate *h;
2828 unsigned long i;
2830 if (size_to_hstate(PAGE_SIZE << order)) {
2831 pr_warn("hugepagesz= specified twice, ignoring\n");
2832 return;
2834 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2835 BUG_ON(order == 0);
2836 h = &hstates[hugetlb_max_hstate++];
2837 h->order = order;
2838 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2839 h->nr_huge_pages = 0;
2840 h->free_huge_pages = 0;
2841 for (i = 0; i < MAX_NUMNODES; ++i)
2842 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2843 INIT_LIST_HEAD(&h->hugepage_activelist);
2844 h->next_nid_to_alloc = first_memory_node;
2845 h->next_nid_to_free = first_memory_node;
2846 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2847 huge_page_size(h)/1024);
2849 parsed_hstate = h;
2852 static int __init hugetlb_nrpages_setup(char *s)
2854 unsigned long *mhp;
2855 static unsigned long *last_mhp;
2857 if (!parsed_valid_hugepagesz) {
2858 pr_warn("hugepages = %s preceded by "
2859 "an unsupported hugepagesz, ignoring\n", s);
2860 parsed_valid_hugepagesz = true;
2861 return 1;
2864 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2865 * so this hugepages= parameter goes to the "default hstate".
2867 else if (!hugetlb_max_hstate)
2868 mhp = &default_hstate_max_huge_pages;
2869 else
2870 mhp = &parsed_hstate->max_huge_pages;
2872 if (mhp == last_mhp) {
2873 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2874 return 1;
2877 if (sscanf(s, "%lu", mhp) <= 0)
2878 *mhp = 0;
2881 * Global state is always initialized later in hugetlb_init.
2882 * But we need to allocate >= MAX_ORDER hstates here early to still
2883 * use the bootmem allocator.
2885 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2886 hugetlb_hstate_alloc_pages(parsed_hstate);
2888 last_mhp = mhp;
2890 return 1;
2892 __setup("hugepages=", hugetlb_nrpages_setup);
2894 static int __init hugetlb_default_setup(char *s)
2896 default_hstate_size = memparse(s, &s);
2897 return 1;
2899 __setup("default_hugepagesz=", hugetlb_default_setup);
2901 static unsigned int cpuset_mems_nr(unsigned int *array)
2903 int node;
2904 unsigned int nr = 0;
2906 for_each_node_mask(node, cpuset_current_mems_allowed)
2907 nr += array[node];
2909 return nr;
2912 #ifdef CONFIG_SYSCTL
2913 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2914 struct ctl_table *table, int write,
2915 void __user *buffer, size_t *length, loff_t *ppos)
2917 struct hstate *h = &default_hstate;
2918 unsigned long tmp = h->max_huge_pages;
2919 int ret;
2921 if (!hugepages_supported())
2922 return -EOPNOTSUPP;
2924 table->data = &tmp;
2925 table->maxlen = sizeof(unsigned long);
2926 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2927 if (ret)
2928 goto out;
2930 if (write)
2931 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2932 NUMA_NO_NODE, tmp, *length);
2933 out:
2934 return ret;
2937 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2938 void __user *buffer, size_t *length, loff_t *ppos)
2941 return hugetlb_sysctl_handler_common(false, table, write,
2942 buffer, length, ppos);
2945 #ifdef CONFIG_NUMA
2946 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2947 void __user *buffer, size_t *length, loff_t *ppos)
2949 return hugetlb_sysctl_handler_common(true, table, write,
2950 buffer, length, ppos);
2952 #endif /* CONFIG_NUMA */
2954 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2955 void __user *buffer,
2956 size_t *length, loff_t *ppos)
2958 struct hstate *h = &default_hstate;
2959 unsigned long tmp;
2960 int ret;
2962 if (!hugepages_supported())
2963 return -EOPNOTSUPP;
2965 tmp = h->nr_overcommit_huge_pages;
2967 if (write && hstate_is_gigantic(h))
2968 return -EINVAL;
2970 table->data = &tmp;
2971 table->maxlen = sizeof(unsigned long);
2972 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2973 if (ret)
2974 goto out;
2976 if (write) {
2977 spin_lock(&hugetlb_lock);
2978 h->nr_overcommit_huge_pages = tmp;
2979 spin_unlock(&hugetlb_lock);
2981 out:
2982 return ret;
2985 #endif /* CONFIG_SYSCTL */
2987 void hugetlb_report_meminfo(struct seq_file *m)
2989 struct hstate *h;
2990 unsigned long total = 0;
2992 if (!hugepages_supported())
2993 return;
2995 for_each_hstate(h) {
2996 unsigned long count = h->nr_huge_pages;
2998 total += (PAGE_SIZE << huge_page_order(h)) * count;
3000 if (h == &default_hstate)
3001 seq_printf(m,
3002 "HugePages_Total: %5lu\n"
3003 "HugePages_Free: %5lu\n"
3004 "HugePages_Rsvd: %5lu\n"
3005 "HugePages_Surp: %5lu\n"
3006 "Hugepagesize: %8lu kB\n",
3007 count,
3008 h->free_huge_pages,
3009 h->resv_huge_pages,
3010 h->surplus_huge_pages,
3011 (PAGE_SIZE << huge_page_order(h)) / 1024);
3014 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3017 int hugetlb_report_node_meminfo(int nid, char *buf)
3019 struct hstate *h = &default_hstate;
3020 if (!hugepages_supported())
3021 return 0;
3022 return sprintf(buf,
3023 "Node %d HugePages_Total: %5u\n"
3024 "Node %d HugePages_Free: %5u\n"
3025 "Node %d HugePages_Surp: %5u\n",
3026 nid, h->nr_huge_pages_node[nid],
3027 nid, h->free_huge_pages_node[nid],
3028 nid, h->surplus_huge_pages_node[nid]);
3031 void hugetlb_show_meminfo(void)
3033 struct hstate *h;
3034 int nid;
3036 if (!hugepages_supported())
3037 return;
3039 for_each_node_state(nid, N_MEMORY)
3040 for_each_hstate(h)
3041 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3042 nid,
3043 h->nr_huge_pages_node[nid],
3044 h->free_huge_pages_node[nid],
3045 h->surplus_huge_pages_node[nid],
3046 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3049 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3051 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3052 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3055 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3056 unsigned long hugetlb_total_pages(void)
3058 struct hstate *h;
3059 unsigned long nr_total_pages = 0;
3061 for_each_hstate(h)
3062 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3063 return nr_total_pages;
3066 static int hugetlb_acct_memory(struct hstate *h, long delta)
3068 int ret = -ENOMEM;
3070 spin_lock(&hugetlb_lock);
3072 * When cpuset is configured, it breaks the strict hugetlb page
3073 * reservation as the accounting is done on a global variable. Such
3074 * reservation is completely rubbish in the presence of cpuset because
3075 * the reservation is not checked against page availability for the
3076 * current cpuset. Application can still potentially OOM'ed by kernel
3077 * with lack of free htlb page in cpuset that the task is in.
3078 * Attempt to enforce strict accounting with cpuset is almost
3079 * impossible (or too ugly) because cpuset is too fluid that
3080 * task or memory node can be dynamically moved between cpusets.
3082 * The change of semantics for shared hugetlb mapping with cpuset is
3083 * undesirable. However, in order to preserve some of the semantics,
3084 * we fall back to check against current free page availability as
3085 * a best attempt and hopefully to minimize the impact of changing
3086 * semantics that cpuset has.
3088 if (delta > 0) {
3089 if (gather_surplus_pages(h, delta) < 0)
3090 goto out;
3092 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3093 return_unused_surplus_pages(h, delta);
3094 goto out;
3098 ret = 0;
3099 if (delta < 0)
3100 return_unused_surplus_pages(h, (unsigned long) -delta);
3102 out:
3103 spin_unlock(&hugetlb_lock);
3104 return ret;
3107 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3109 struct resv_map *resv = vma_resv_map(vma);
3112 * This new VMA should share its siblings reservation map if present.
3113 * The VMA will only ever have a valid reservation map pointer where
3114 * it is being copied for another still existing VMA. As that VMA
3115 * has a reference to the reservation map it cannot disappear until
3116 * after this open call completes. It is therefore safe to take a
3117 * new reference here without additional locking.
3119 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3120 kref_get(&resv->refs);
3123 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3125 struct hstate *h = hstate_vma(vma);
3126 struct resv_map *resv = vma_resv_map(vma);
3127 struct hugepage_subpool *spool = subpool_vma(vma);
3128 unsigned long reserve, start, end;
3129 long gbl_reserve;
3131 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3132 return;
3134 start = vma_hugecache_offset(h, vma, vma->vm_start);
3135 end = vma_hugecache_offset(h, vma, vma->vm_end);
3137 reserve = (end - start) - region_count(resv, start, end);
3139 kref_put(&resv->refs, resv_map_release);
3141 if (reserve) {
3143 * Decrement reserve counts. The global reserve count may be
3144 * adjusted if the subpool has a minimum size.
3146 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3147 hugetlb_acct_memory(h, -gbl_reserve);
3151 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3153 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3154 return -EINVAL;
3155 return 0;
3158 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3160 struct hstate *hstate = hstate_vma(vma);
3162 return 1UL << huge_page_shift(hstate);
3166 * We cannot handle pagefaults against hugetlb pages at all. They cause
3167 * handle_mm_fault() to try to instantiate regular-sized pages in the
3168 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3169 * this far.
3171 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3173 BUG();
3174 return 0;
3178 * When a new function is introduced to vm_operations_struct and added
3179 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3180 * This is because under System V memory model, mappings created via
3181 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3182 * their original vm_ops are overwritten with shm_vm_ops.
3184 const struct vm_operations_struct hugetlb_vm_ops = {
3185 .fault = hugetlb_vm_op_fault,
3186 .open = hugetlb_vm_op_open,
3187 .close = hugetlb_vm_op_close,
3188 .split = hugetlb_vm_op_split,
3189 .pagesize = hugetlb_vm_op_pagesize,
3192 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3193 int writable)
3195 pte_t entry;
3197 if (writable) {
3198 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3199 vma->vm_page_prot)));
3200 } else {
3201 entry = huge_pte_wrprotect(mk_huge_pte(page,
3202 vma->vm_page_prot));
3204 entry = pte_mkyoung(entry);
3205 entry = pte_mkhuge(entry);
3206 entry = arch_make_huge_pte(entry, vma, page, writable);
3208 return entry;
3211 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3212 unsigned long address, pte_t *ptep)
3214 pte_t entry;
3216 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3217 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3218 update_mmu_cache(vma, address, ptep);
3221 bool is_hugetlb_entry_migration(pte_t pte)
3223 swp_entry_t swp;
3225 if (huge_pte_none(pte) || pte_present(pte))
3226 return false;
3227 swp = pte_to_swp_entry(pte);
3228 if (non_swap_entry(swp) && is_migration_entry(swp))
3229 return true;
3230 else
3231 return false;
3234 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3236 swp_entry_t swp;
3238 if (huge_pte_none(pte) || pte_present(pte))
3239 return 0;
3240 swp = pte_to_swp_entry(pte);
3241 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3242 return 1;
3243 else
3244 return 0;
3247 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3248 struct vm_area_struct *vma)
3250 pte_t *src_pte, *dst_pte, entry, dst_entry;
3251 struct page *ptepage;
3252 unsigned long addr;
3253 int cow;
3254 struct hstate *h = hstate_vma(vma);
3255 unsigned long sz = huge_page_size(h);
3256 struct mmu_notifier_range range;
3257 int ret = 0;
3259 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3261 if (cow) {
3262 mmu_notifier_range_init(&range, src, vma->vm_start,
3263 vma->vm_end);
3264 mmu_notifier_invalidate_range_start(&range);
3267 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3268 spinlock_t *src_ptl, *dst_ptl;
3269 src_pte = huge_pte_offset(src, addr, sz);
3270 if (!src_pte)
3271 continue;
3272 dst_pte = huge_pte_alloc(dst, addr, sz);
3273 if (!dst_pte) {
3274 ret = -ENOMEM;
3275 break;
3279 * If the pagetables are shared don't copy or take references.
3280 * dst_pte == src_pte is the common case of src/dest sharing.
3282 * However, src could have 'unshared' and dst shares with
3283 * another vma. If dst_pte !none, this implies sharing.
3284 * Check here before taking page table lock, and once again
3285 * after taking the lock below.
3287 dst_entry = huge_ptep_get(dst_pte);
3288 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3289 continue;
3291 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3292 src_ptl = huge_pte_lockptr(h, src, src_pte);
3293 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3294 entry = huge_ptep_get(src_pte);
3295 dst_entry = huge_ptep_get(dst_pte);
3296 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3298 * Skip if src entry none. Also, skip in the
3299 * unlikely case dst entry !none as this implies
3300 * sharing with another vma.
3303 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3304 is_hugetlb_entry_hwpoisoned(entry))) {
3305 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3307 if (is_write_migration_entry(swp_entry) && cow) {
3309 * COW mappings require pages in both
3310 * parent and child to be set to read.
3312 make_migration_entry_read(&swp_entry);
3313 entry = swp_entry_to_pte(swp_entry);
3314 set_huge_swap_pte_at(src, addr, src_pte,
3315 entry, sz);
3317 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3318 } else {
3319 if (cow) {
3321 * No need to notify as we are downgrading page
3322 * table protection not changing it to point
3323 * to a new page.
3325 * See Documentation/vm/mmu_notifier.rst
3327 huge_ptep_set_wrprotect(src, addr, src_pte);
3329 entry = huge_ptep_get(src_pte);
3330 ptepage = pte_page(entry);
3331 get_page(ptepage);
3332 page_dup_rmap(ptepage, true);
3333 set_huge_pte_at(dst, addr, dst_pte, entry);
3334 hugetlb_count_add(pages_per_huge_page(h), dst);
3336 spin_unlock(src_ptl);
3337 spin_unlock(dst_ptl);
3340 if (cow)
3341 mmu_notifier_invalidate_range_end(&range);
3343 return ret;
3346 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3347 unsigned long start, unsigned long end,
3348 struct page *ref_page)
3350 struct mm_struct *mm = vma->vm_mm;
3351 unsigned long address;
3352 pte_t *ptep;
3353 pte_t pte;
3354 spinlock_t *ptl;
3355 struct page *page;
3356 struct hstate *h = hstate_vma(vma);
3357 unsigned long sz = huge_page_size(h);
3358 struct mmu_notifier_range range;
3360 WARN_ON(!is_vm_hugetlb_page(vma));
3361 BUG_ON(start & ~huge_page_mask(h));
3362 BUG_ON(end & ~huge_page_mask(h));
3365 * This is a hugetlb vma, all the pte entries should point
3366 * to huge page.
3368 tlb_remove_check_page_size_change(tlb, sz);
3369 tlb_start_vma(tlb, vma);
3372 * If sharing possible, alert mmu notifiers of worst case.
3374 mmu_notifier_range_init(&range, mm, start, end);
3375 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3376 mmu_notifier_invalidate_range_start(&range);
3377 address = start;
3378 for (; address < end; address += sz) {
3379 ptep = huge_pte_offset(mm, address, sz);
3380 if (!ptep)
3381 continue;
3383 ptl = huge_pte_lock(h, mm, ptep);
3384 if (huge_pmd_unshare(mm, &address, ptep)) {
3385 spin_unlock(ptl);
3387 * We just unmapped a page of PMDs by clearing a PUD.
3388 * The caller's TLB flush range should cover this area.
3390 continue;
3393 pte = huge_ptep_get(ptep);
3394 if (huge_pte_none(pte)) {
3395 spin_unlock(ptl);
3396 continue;
3400 * Migrating hugepage or HWPoisoned hugepage is already
3401 * unmapped and its refcount is dropped, so just clear pte here.
3403 if (unlikely(!pte_present(pte))) {
3404 huge_pte_clear(mm, address, ptep, sz);
3405 spin_unlock(ptl);
3406 continue;
3409 page = pte_page(pte);
3411 * If a reference page is supplied, it is because a specific
3412 * page is being unmapped, not a range. Ensure the page we
3413 * are about to unmap is the actual page of interest.
3415 if (ref_page) {
3416 if (page != ref_page) {
3417 spin_unlock(ptl);
3418 continue;
3421 * Mark the VMA as having unmapped its page so that
3422 * future faults in this VMA will fail rather than
3423 * looking like data was lost
3425 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3428 pte = huge_ptep_get_and_clear(mm, address, ptep);
3429 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3430 if (huge_pte_dirty(pte))
3431 set_page_dirty(page);
3433 hugetlb_count_sub(pages_per_huge_page(h), mm);
3434 page_remove_rmap(page, true);
3436 spin_unlock(ptl);
3437 tlb_remove_page_size(tlb, page, huge_page_size(h));
3439 * Bail out after unmapping reference page if supplied
3441 if (ref_page)
3442 break;
3444 mmu_notifier_invalidate_range_end(&range);
3445 tlb_end_vma(tlb, vma);
3448 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3449 struct vm_area_struct *vma, unsigned long start,
3450 unsigned long end, struct page *ref_page)
3452 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3455 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3456 * test will fail on a vma being torn down, and not grab a page table
3457 * on its way out. We're lucky that the flag has such an appropriate
3458 * name, and can in fact be safely cleared here. We could clear it
3459 * before the __unmap_hugepage_range above, but all that's necessary
3460 * is to clear it before releasing the i_mmap_rwsem. This works
3461 * because in the context this is called, the VMA is about to be
3462 * destroyed and the i_mmap_rwsem is held.
3464 vma->vm_flags &= ~VM_MAYSHARE;
3467 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3468 unsigned long end, struct page *ref_page)
3470 struct mm_struct *mm;
3471 struct mmu_gather tlb;
3472 unsigned long tlb_start = start;
3473 unsigned long tlb_end = end;
3476 * If shared PMDs were possibly used within this vma range, adjust
3477 * start/end for worst case tlb flushing.
3478 * Note that we can not be sure if PMDs are shared until we try to
3479 * unmap pages. However, we want to make sure TLB flushing covers
3480 * the largest possible range.
3482 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3484 mm = vma->vm_mm;
3486 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3487 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3488 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3492 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3493 * mappping it owns the reserve page for. The intention is to unmap the page
3494 * from other VMAs and let the children be SIGKILLed if they are faulting the
3495 * same region.
3497 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3498 struct page *page, unsigned long address)
3500 struct hstate *h = hstate_vma(vma);
3501 struct vm_area_struct *iter_vma;
3502 struct address_space *mapping;
3503 pgoff_t pgoff;
3506 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3507 * from page cache lookup which is in HPAGE_SIZE units.
3509 address = address & huge_page_mask(h);
3510 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3511 vma->vm_pgoff;
3512 mapping = vma->vm_file->f_mapping;
3515 * Take the mapping lock for the duration of the table walk. As
3516 * this mapping should be shared between all the VMAs,
3517 * __unmap_hugepage_range() is called as the lock is already held
3519 i_mmap_lock_write(mapping);
3520 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3521 /* Do not unmap the current VMA */
3522 if (iter_vma == vma)
3523 continue;
3526 * Shared VMAs have their own reserves and do not affect
3527 * MAP_PRIVATE accounting but it is possible that a shared
3528 * VMA is using the same page so check and skip such VMAs.
3530 if (iter_vma->vm_flags & VM_MAYSHARE)
3531 continue;
3534 * Unmap the page from other VMAs without their own reserves.
3535 * They get marked to be SIGKILLed if they fault in these
3536 * areas. This is because a future no-page fault on this VMA
3537 * could insert a zeroed page instead of the data existing
3538 * from the time of fork. This would look like data corruption
3540 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3541 unmap_hugepage_range(iter_vma, address,
3542 address + huge_page_size(h), page);
3544 i_mmap_unlock_write(mapping);
3548 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3549 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3550 * cannot race with other handlers or page migration.
3551 * Keep the pte_same checks anyway to make transition from the mutex easier.
3553 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3554 unsigned long address, pte_t *ptep,
3555 struct page *pagecache_page, spinlock_t *ptl)
3557 pte_t pte;
3558 struct hstate *h = hstate_vma(vma);
3559 struct page *old_page, *new_page;
3560 int outside_reserve = 0;
3561 vm_fault_t ret = 0;
3562 unsigned long haddr = address & huge_page_mask(h);
3563 struct mmu_notifier_range range;
3565 pte = huge_ptep_get(ptep);
3566 old_page = pte_page(pte);
3568 retry_avoidcopy:
3569 /* If no-one else is actually using this page, avoid the copy
3570 * and just make the page writable */
3571 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3572 page_move_anon_rmap(old_page, vma);
3573 set_huge_ptep_writable(vma, haddr, ptep);
3574 return 0;
3578 * If the process that created a MAP_PRIVATE mapping is about to
3579 * perform a COW due to a shared page count, attempt to satisfy
3580 * the allocation without using the existing reserves. The pagecache
3581 * page is used to determine if the reserve at this address was
3582 * consumed or not. If reserves were used, a partial faulted mapping
3583 * at the time of fork() could consume its reserves on COW instead
3584 * of the full address range.
3586 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3587 old_page != pagecache_page)
3588 outside_reserve = 1;
3590 get_page(old_page);
3593 * Drop page table lock as buddy allocator may be called. It will
3594 * be acquired again before returning to the caller, as expected.
3596 spin_unlock(ptl);
3597 new_page = alloc_huge_page(vma, haddr, outside_reserve);
3599 if (IS_ERR(new_page)) {
3601 * If a process owning a MAP_PRIVATE mapping fails to COW,
3602 * it is due to references held by a child and an insufficient
3603 * huge page pool. To guarantee the original mappers
3604 * reliability, unmap the page from child processes. The child
3605 * may get SIGKILLed if it later faults.
3607 if (outside_reserve) {
3608 put_page(old_page);
3609 BUG_ON(huge_pte_none(pte));
3610 unmap_ref_private(mm, vma, old_page, haddr);
3611 BUG_ON(huge_pte_none(pte));
3612 spin_lock(ptl);
3613 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3614 if (likely(ptep &&
3615 pte_same(huge_ptep_get(ptep), pte)))
3616 goto retry_avoidcopy;
3618 * race occurs while re-acquiring page table
3619 * lock, and our job is done.
3621 return 0;
3624 ret = vmf_error(PTR_ERR(new_page));
3625 goto out_release_old;
3629 * When the original hugepage is shared one, it does not have
3630 * anon_vma prepared.
3632 if (unlikely(anon_vma_prepare(vma))) {
3633 ret = VM_FAULT_OOM;
3634 goto out_release_all;
3637 copy_user_huge_page(new_page, old_page, address, vma,
3638 pages_per_huge_page(h));
3639 __SetPageUptodate(new_page);
3641 mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
3642 mmu_notifier_invalidate_range_start(&range);
3645 * Retake the page table lock to check for racing updates
3646 * before the page tables are altered
3648 spin_lock(ptl);
3649 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3650 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3651 ClearPagePrivate(new_page);
3653 /* Break COW */
3654 huge_ptep_clear_flush(vma, haddr, ptep);
3655 mmu_notifier_invalidate_range(mm, range.start, range.end);
3656 set_huge_pte_at(mm, haddr, ptep,
3657 make_huge_pte(vma, new_page, 1));
3658 page_remove_rmap(old_page, true);
3659 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3660 set_page_huge_active(new_page);
3661 /* Make the old page be freed below */
3662 new_page = old_page;
3664 spin_unlock(ptl);
3665 mmu_notifier_invalidate_range_end(&range);
3666 out_release_all:
3667 restore_reserve_on_error(h, vma, haddr, new_page);
3668 put_page(new_page);
3669 out_release_old:
3670 put_page(old_page);
3672 spin_lock(ptl); /* Caller expects lock to be held */
3673 return ret;
3676 /* Return the pagecache page at a given address within a VMA */
3677 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3678 struct vm_area_struct *vma, unsigned long address)
3680 struct address_space *mapping;
3681 pgoff_t idx;
3683 mapping = vma->vm_file->f_mapping;
3684 idx = vma_hugecache_offset(h, vma, address);
3686 return find_lock_page(mapping, idx);
3690 * Return whether there is a pagecache page to back given address within VMA.
3691 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3693 static bool hugetlbfs_pagecache_present(struct hstate *h,
3694 struct vm_area_struct *vma, unsigned long address)
3696 struct address_space *mapping;
3697 pgoff_t idx;
3698 struct page *page;
3700 mapping = vma->vm_file->f_mapping;
3701 idx = vma_hugecache_offset(h, vma, address);
3703 page = find_get_page(mapping, idx);
3704 if (page)
3705 put_page(page);
3706 return page != NULL;
3709 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3710 pgoff_t idx)
3712 struct inode *inode = mapping->host;
3713 struct hstate *h = hstate_inode(inode);
3714 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3716 if (err)
3717 return err;
3718 ClearPagePrivate(page);
3721 * set page dirty so that it will not be removed from cache/file
3722 * by non-hugetlbfs specific code paths.
3724 set_page_dirty(page);
3726 spin_lock(&inode->i_lock);
3727 inode->i_blocks += blocks_per_huge_page(h);
3728 spin_unlock(&inode->i_lock);
3729 return 0;
3732 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3733 struct vm_area_struct *vma,
3734 struct address_space *mapping, pgoff_t idx,
3735 unsigned long address, pte_t *ptep, unsigned int flags)
3737 struct hstate *h = hstate_vma(vma);
3738 vm_fault_t ret = VM_FAULT_SIGBUS;
3739 int anon_rmap = 0;
3740 unsigned long size;
3741 struct page *page;
3742 pte_t new_pte;
3743 spinlock_t *ptl;
3744 unsigned long haddr = address & huge_page_mask(h);
3745 bool new_page = false;
3748 * Currently, we are forced to kill the process in the event the
3749 * original mapper has unmapped pages from the child due to a failed
3750 * COW. Warn that such a situation has occurred as it may not be obvious
3752 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3753 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3754 current->pid);
3755 return ret;
3759 * Use page lock to guard against racing truncation
3760 * before we get page_table_lock.
3762 retry:
3763 page = find_lock_page(mapping, idx);
3764 if (!page) {
3765 size = i_size_read(mapping->host) >> huge_page_shift(h);
3766 if (idx >= size)
3767 goto out;
3770 * Check for page in userfault range
3772 if (userfaultfd_missing(vma)) {
3773 u32 hash;
3774 struct vm_fault vmf = {
3775 .vma = vma,
3776 .address = haddr,
3777 .flags = flags,
3779 * Hard to debug if it ends up being
3780 * used by a callee that assumes
3781 * something about the other
3782 * uninitialized fields... same as in
3783 * memory.c
3788 * hugetlb_fault_mutex must be dropped before
3789 * handling userfault. Reacquire after handling
3790 * fault to make calling code simpler.
3792 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3793 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3794 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3795 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3796 goto out;
3799 page = alloc_huge_page(vma, haddr, 0);
3800 if (IS_ERR(page)) {
3801 ret = vmf_error(PTR_ERR(page));
3802 goto out;
3804 clear_huge_page(page, address, pages_per_huge_page(h));
3805 __SetPageUptodate(page);
3806 new_page = true;
3808 if (vma->vm_flags & VM_MAYSHARE) {
3809 int err = huge_add_to_page_cache(page, mapping, idx);
3810 if (err) {
3811 put_page(page);
3812 if (err == -EEXIST)
3813 goto retry;
3814 goto out;
3816 } else {
3817 lock_page(page);
3818 if (unlikely(anon_vma_prepare(vma))) {
3819 ret = VM_FAULT_OOM;
3820 goto backout_unlocked;
3822 anon_rmap = 1;
3824 } else {
3826 * If memory error occurs between mmap() and fault, some process
3827 * don't have hwpoisoned swap entry for errored virtual address.
3828 * So we need to block hugepage fault by PG_hwpoison bit check.
3830 if (unlikely(PageHWPoison(page))) {
3831 ret = VM_FAULT_HWPOISON |
3832 VM_FAULT_SET_HINDEX(hstate_index(h));
3833 goto backout_unlocked;
3838 * If we are going to COW a private mapping later, we examine the
3839 * pending reservations for this page now. This will ensure that
3840 * any allocations necessary to record that reservation occur outside
3841 * the spinlock.
3843 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3844 if (vma_needs_reservation(h, vma, haddr) < 0) {
3845 ret = VM_FAULT_OOM;
3846 goto backout_unlocked;
3848 /* Just decrements count, does not deallocate */
3849 vma_end_reservation(h, vma, haddr);
3852 ptl = huge_pte_lock(h, mm, ptep);
3853 size = i_size_read(mapping->host) >> huge_page_shift(h);
3854 if (idx >= size)
3855 goto backout;
3857 ret = 0;
3858 if (!huge_pte_none(huge_ptep_get(ptep)))
3859 goto backout;
3861 if (anon_rmap) {
3862 ClearPagePrivate(page);
3863 hugepage_add_new_anon_rmap(page, vma, haddr);
3864 } else
3865 page_dup_rmap(page, true);
3866 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3867 && (vma->vm_flags & VM_SHARED)));
3868 set_huge_pte_at(mm, haddr, ptep, new_pte);
3870 hugetlb_count_add(pages_per_huge_page(h), mm);
3871 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3872 /* Optimization, do the COW without a second fault */
3873 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3876 spin_unlock(ptl);
3879 * Only make newly allocated pages active. Existing pages found
3880 * in the pagecache could be !page_huge_active() if they have been
3881 * isolated for migration.
3883 if (new_page)
3884 set_page_huge_active(page);
3886 unlock_page(page);
3887 out:
3888 return ret;
3890 backout:
3891 spin_unlock(ptl);
3892 backout_unlocked:
3893 unlock_page(page);
3894 restore_reserve_on_error(h, vma, haddr, page);
3895 put_page(page);
3896 goto out;
3899 #ifdef CONFIG_SMP
3900 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3901 pgoff_t idx, unsigned long address)
3903 unsigned long key[2];
3904 u32 hash;
3906 key[0] = (unsigned long) mapping;
3907 key[1] = idx;
3909 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3911 return hash & (num_fault_mutexes - 1);
3913 #else
3915 * For uniprocesor systems we always use a single mutex, so just
3916 * return 0 and avoid the hashing overhead.
3918 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3919 pgoff_t idx, unsigned long address)
3921 return 0;
3923 #endif
3925 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3926 unsigned long address, unsigned int flags)
3928 pte_t *ptep, entry;
3929 spinlock_t *ptl;
3930 vm_fault_t ret;
3931 u32 hash;
3932 pgoff_t idx;
3933 struct page *page = NULL;
3934 struct page *pagecache_page = NULL;
3935 struct hstate *h = hstate_vma(vma);
3936 struct address_space *mapping;
3937 int need_wait_lock = 0;
3938 unsigned long haddr = address & huge_page_mask(h);
3940 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3941 if (ptep) {
3942 entry = huge_ptep_get(ptep);
3943 if (unlikely(is_hugetlb_entry_migration(entry))) {
3944 migration_entry_wait_huge(vma, mm, ptep);
3945 return 0;
3946 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3947 return VM_FAULT_HWPOISON_LARGE |
3948 VM_FAULT_SET_HINDEX(hstate_index(h));
3949 } else {
3950 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3951 if (!ptep)
3952 return VM_FAULT_OOM;
3955 mapping = vma->vm_file->f_mapping;
3956 idx = vma_hugecache_offset(h, vma, haddr);
3959 * Serialize hugepage allocation and instantiation, so that we don't
3960 * get spurious allocation failures if two CPUs race to instantiate
3961 * the same page in the page cache.
3963 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3964 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3966 entry = huge_ptep_get(ptep);
3967 if (huge_pte_none(entry)) {
3968 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3969 goto out_mutex;
3972 ret = 0;
3975 * entry could be a migration/hwpoison entry at this point, so this
3976 * check prevents the kernel from going below assuming that we have
3977 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3978 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3979 * handle it.
3981 if (!pte_present(entry))
3982 goto out_mutex;
3985 * If we are going to COW the mapping later, we examine the pending
3986 * reservations for this page now. This will ensure that any
3987 * allocations necessary to record that reservation occur outside the
3988 * spinlock. For private mappings, we also lookup the pagecache
3989 * page now as it is used to determine if a reservation has been
3990 * consumed.
3992 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3993 if (vma_needs_reservation(h, vma, haddr) < 0) {
3994 ret = VM_FAULT_OOM;
3995 goto out_mutex;
3997 /* Just decrements count, does not deallocate */
3998 vma_end_reservation(h, vma, haddr);
4000 if (!(vma->vm_flags & VM_MAYSHARE))
4001 pagecache_page = hugetlbfs_pagecache_page(h,
4002 vma, haddr);
4005 ptl = huge_pte_lock(h, mm, ptep);
4007 /* Check for a racing update before calling hugetlb_cow */
4008 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4009 goto out_ptl;
4012 * hugetlb_cow() requires page locks of pte_page(entry) and
4013 * pagecache_page, so here we need take the former one
4014 * when page != pagecache_page or !pagecache_page.
4016 page = pte_page(entry);
4017 if (page != pagecache_page)
4018 if (!trylock_page(page)) {
4019 need_wait_lock = 1;
4020 goto out_ptl;
4023 get_page(page);
4025 if (flags & FAULT_FLAG_WRITE) {
4026 if (!huge_pte_write(entry)) {
4027 ret = hugetlb_cow(mm, vma, address, ptep,
4028 pagecache_page, ptl);
4029 goto out_put_page;
4031 entry = huge_pte_mkdirty(entry);
4033 entry = pte_mkyoung(entry);
4034 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4035 flags & FAULT_FLAG_WRITE))
4036 update_mmu_cache(vma, haddr, ptep);
4037 out_put_page:
4038 if (page != pagecache_page)
4039 unlock_page(page);
4040 put_page(page);
4041 out_ptl:
4042 spin_unlock(ptl);
4044 if (pagecache_page) {
4045 unlock_page(pagecache_page);
4046 put_page(pagecache_page);
4048 out_mutex:
4049 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4051 * Generally it's safe to hold refcount during waiting page lock. But
4052 * here we just wait to defer the next page fault to avoid busy loop and
4053 * the page is not used after unlocked before returning from the current
4054 * page fault. So we are safe from accessing freed page, even if we wait
4055 * here without taking refcount.
4057 if (need_wait_lock)
4058 wait_on_page_locked(page);
4059 return ret;
4063 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4064 * modifications for huge pages.
4066 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4067 pte_t *dst_pte,
4068 struct vm_area_struct *dst_vma,
4069 unsigned long dst_addr,
4070 unsigned long src_addr,
4071 struct page **pagep)
4073 struct address_space *mapping;
4074 pgoff_t idx;
4075 unsigned long size;
4076 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4077 struct hstate *h = hstate_vma(dst_vma);
4078 pte_t _dst_pte;
4079 spinlock_t *ptl;
4080 int ret;
4081 struct page *page;
4083 if (!*pagep) {
4084 ret = -ENOMEM;
4085 page = alloc_huge_page(dst_vma, dst_addr, 0);
4086 if (IS_ERR(page))
4087 goto out;
4089 ret = copy_huge_page_from_user(page,
4090 (const void __user *) src_addr,
4091 pages_per_huge_page(h), false);
4093 /* fallback to copy_from_user outside mmap_sem */
4094 if (unlikely(ret)) {
4095 ret = -ENOENT;
4096 *pagep = page;
4097 /* don't free the page */
4098 goto out;
4100 } else {
4101 page = *pagep;
4102 *pagep = NULL;
4106 * The memory barrier inside __SetPageUptodate makes sure that
4107 * preceding stores to the page contents become visible before
4108 * the set_pte_at() write.
4110 __SetPageUptodate(page);
4112 mapping = dst_vma->vm_file->f_mapping;
4113 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4116 * If shared, add to page cache
4118 if (vm_shared) {
4119 size = i_size_read(mapping->host) >> huge_page_shift(h);
4120 ret = -EFAULT;
4121 if (idx >= size)
4122 goto out_release_nounlock;
4125 * Serialization between remove_inode_hugepages() and
4126 * huge_add_to_page_cache() below happens through the
4127 * hugetlb_fault_mutex_table that here must be hold by
4128 * the caller.
4130 ret = huge_add_to_page_cache(page, mapping, idx);
4131 if (ret)
4132 goto out_release_nounlock;
4135 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4136 spin_lock(ptl);
4139 * Recheck the i_size after holding PT lock to make sure not
4140 * to leave any page mapped (as page_mapped()) beyond the end
4141 * of the i_size (remove_inode_hugepages() is strict about
4142 * enforcing that). If we bail out here, we'll also leave a
4143 * page in the radix tree in the vm_shared case beyond the end
4144 * of the i_size, but remove_inode_hugepages() will take care
4145 * of it as soon as we drop the hugetlb_fault_mutex_table.
4147 size = i_size_read(mapping->host) >> huge_page_shift(h);
4148 ret = -EFAULT;
4149 if (idx >= size)
4150 goto out_release_unlock;
4152 ret = -EEXIST;
4153 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4154 goto out_release_unlock;
4156 if (vm_shared) {
4157 page_dup_rmap(page, true);
4158 } else {
4159 ClearPagePrivate(page);
4160 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4163 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4164 if (dst_vma->vm_flags & VM_WRITE)
4165 _dst_pte = huge_pte_mkdirty(_dst_pte);
4166 _dst_pte = pte_mkyoung(_dst_pte);
4168 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4170 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4171 dst_vma->vm_flags & VM_WRITE);
4172 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4174 /* No need to invalidate - it was non-present before */
4175 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4177 spin_unlock(ptl);
4178 set_page_huge_active(page);
4179 if (vm_shared)
4180 unlock_page(page);
4181 ret = 0;
4182 out:
4183 return ret;
4184 out_release_unlock:
4185 spin_unlock(ptl);
4186 if (vm_shared)
4187 unlock_page(page);
4188 out_release_nounlock:
4189 put_page(page);
4190 goto out;
4193 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4194 struct page **pages, struct vm_area_struct **vmas,
4195 unsigned long *position, unsigned long *nr_pages,
4196 long i, unsigned int flags, int *nonblocking)
4198 unsigned long pfn_offset;
4199 unsigned long vaddr = *position;
4200 unsigned long remainder = *nr_pages;
4201 struct hstate *h = hstate_vma(vma);
4202 int err = -EFAULT;
4204 while (vaddr < vma->vm_end && remainder) {
4205 pte_t *pte;
4206 spinlock_t *ptl = NULL;
4207 int absent;
4208 struct page *page;
4211 * If we have a pending SIGKILL, don't keep faulting pages and
4212 * potentially allocating memory.
4214 if (fatal_signal_pending(current)) {
4215 remainder = 0;
4216 break;
4220 * Some archs (sparc64, sh*) have multiple pte_ts to
4221 * each hugepage. We have to make sure we get the
4222 * first, for the page indexing below to work.
4224 * Note that page table lock is not held when pte is null.
4226 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4227 huge_page_size(h));
4228 if (pte)
4229 ptl = huge_pte_lock(h, mm, pte);
4230 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4233 * When coredumping, it suits get_dump_page if we just return
4234 * an error where there's an empty slot with no huge pagecache
4235 * to back it. This way, we avoid allocating a hugepage, and
4236 * the sparse dumpfile avoids allocating disk blocks, but its
4237 * huge holes still show up with zeroes where they need to be.
4239 if (absent && (flags & FOLL_DUMP) &&
4240 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4241 if (pte)
4242 spin_unlock(ptl);
4243 remainder = 0;
4244 break;
4248 * We need call hugetlb_fault for both hugepages under migration
4249 * (in which case hugetlb_fault waits for the migration,) and
4250 * hwpoisoned hugepages (in which case we need to prevent the
4251 * caller from accessing to them.) In order to do this, we use
4252 * here is_swap_pte instead of is_hugetlb_entry_migration and
4253 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4254 * both cases, and because we can't follow correct pages
4255 * directly from any kind of swap entries.
4257 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4258 ((flags & FOLL_WRITE) &&
4259 !huge_pte_write(huge_ptep_get(pte)))) {
4260 vm_fault_t ret;
4261 unsigned int fault_flags = 0;
4263 if (pte)
4264 spin_unlock(ptl);
4265 if (flags & FOLL_WRITE)
4266 fault_flags |= FAULT_FLAG_WRITE;
4267 if (nonblocking)
4268 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4269 if (flags & FOLL_NOWAIT)
4270 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4271 FAULT_FLAG_RETRY_NOWAIT;
4272 if (flags & FOLL_TRIED) {
4273 VM_WARN_ON_ONCE(fault_flags &
4274 FAULT_FLAG_ALLOW_RETRY);
4275 fault_flags |= FAULT_FLAG_TRIED;
4277 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4278 if (ret & VM_FAULT_ERROR) {
4279 err = vm_fault_to_errno(ret, flags);
4280 remainder = 0;
4281 break;
4283 if (ret & VM_FAULT_RETRY) {
4284 if (nonblocking &&
4285 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4286 *nonblocking = 0;
4287 *nr_pages = 0;
4289 * VM_FAULT_RETRY must not return an
4290 * error, it will return zero
4291 * instead.
4293 * No need to update "position" as the
4294 * caller will not check it after
4295 * *nr_pages is set to 0.
4297 return i;
4299 continue;
4302 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4303 page = pte_page(huge_ptep_get(pte));
4306 * Instead of doing 'try_get_page()' below in the same_page
4307 * loop, just check the count once here.
4309 if (unlikely(page_count(page) <= 0)) {
4310 if (pages) {
4311 spin_unlock(ptl);
4312 remainder = 0;
4313 err = -ENOMEM;
4314 break;
4317 same_page:
4318 if (pages) {
4319 pages[i] = mem_map_offset(page, pfn_offset);
4320 get_page(pages[i]);
4323 if (vmas)
4324 vmas[i] = vma;
4326 vaddr += PAGE_SIZE;
4327 ++pfn_offset;
4328 --remainder;
4329 ++i;
4330 if (vaddr < vma->vm_end && remainder &&
4331 pfn_offset < pages_per_huge_page(h)) {
4333 * We use pfn_offset to avoid touching the pageframes
4334 * of this compound page.
4336 goto same_page;
4338 spin_unlock(ptl);
4340 *nr_pages = remainder;
4342 * setting position is actually required only if remainder is
4343 * not zero but it's faster not to add a "if (remainder)"
4344 * branch.
4346 *position = vaddr;
4348 return i ? i : err;
4351 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4353 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4354 * implement this.
4356 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4357 #endif
4359 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4360 unsigned long address, unsigned long end, pgprot_t newprot)
4362 struct mm_struct *mm = vma->vm_mm;
4363 unsigned long start = address;
4364 pte_t *ptep;
4365 pte_t pte;
4366 struct hstate *h = hstate_vma(vma);
4367 unsigned long pages = 0;
4368 bool shared_pmd = false;
4369 struct mmu_notifier_range range;
4372 * In the case of shared PMDs, the area to flush could be beyond
4373 * start/end. Set range.start/range.end to cover the maximum possible
4374 * range if PMD sharing is possible.
4376 mmu_notifier_range_init(&range, mm, start, end);
4377 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4379 BUG_ON(address >= end);
4380 flush_cache_range(vma, range.start, range.end);
4382 mmu_notifier_invalidate_range_start(&range);
4383 i_mmap_lock_write(vma->vm_file->f_mapping);
4384 for (; address < end; address += huge_page_size(h)) {
4385 spinlock_t *ptl;
4386 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4387 if (!ptep)
4388 continue;
4389 ptl = huge_pte_lock(h, mm, ptep);
4390 if (huge_pmd_unshare(mm, &address, ptep)) {
4391 pages++;
4392 spin_unlock(ptl);
4393 shared_pmd = true;
4394 continue;
4396 pte = huge_ptep_get(ptep);
4397 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4398 spin_unlock(ptl);
4399 continue;
4401 if (unlikely(is_hugetlb_entry_migration(pte))) {
4402 swp_entry_t entry = pte_to_swp_entry(pte);
4404 if (is_write_migration_entry(entry)) {
4405 pte_t newpte;
4407 make_migration_entry_read(&entry);
4408 newpte = swp_entry_to_pte(entry);
4409 set_huge_swap_pte_at(mm, address, ptep,
4410 newpte, huge_page_size(h));
4411 pages++;
4413 spin_unlock(ptl);
4414 continue;
4416 if (!huge_pte_none(pte)) {
4417 pte_t old_pte;
4419 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4420 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4421 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4422 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4423 pages++;
4425 spin_unlock(ptl);
4428 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4429 * may have cleared our pud entry and done put_page on the page table:
4430 * once we release i_mmap_rwsem, another task can do the final put_page
4431 * and that page table be reused and filled with junk. If we actually
4432 * did unshare a page of pmds, flush the range corresponding to the pud.
4434 if (shared_pmd)
4435 flush_hugetlb_tlb_range(vma, range.start, range.end);
4436 else
4437 flush_hugetlb_tlb_range(vma, start, end);
4439 * No need to call mmu_notifier_invalidate_range() we are downgrading
4440 * page table protection not changing it to point to a new page.
4442 * See Documentation/vm/mmu_notifier.rst
4444 i_mmap_unlock_write(vma->vm_file->f_mapping);
4445 mmu_notifier_invalidate_range_end(&range);
4447 return pages << h->order;
4450 int hugetlb_reserve_pages(struct inode *inode,
4451 long from, long to,
4452 struct vm_area_struct *vma,
4453 vm_flags_t vm_flags)
4455 long ret, chg;
4456 struct hstate *h = hstate_inode(inode);
4457 struct hugepage_subpool *spool = subpool_inode(inode);
4458 struct resv_map *resv_map;
4459 long gbl_reserve;
4461 /* This should never happen */
4462 if (from > to) {
4463 VM_WARN(1, "%s called with a negative range\n", __func__);
4464 return -EINVAL;
4468 * Only apply hugepage reservation if asked. At fault time, an
4469 * attempt will be made for VM_NORESERVE to allocate a page
4470 * without using reserves
4472 if (vm_flags & VM_NORESERVE)
4473 return 0;
4476 * Shared mappings base their reservation on the number of pages that
4477 * are already allocated on behalf of the file. Private mappings need
4478 * to reserve the full area even if read-only as mprotect() may be
4479 * called to make the mapping read-write. Assume !vma is a shm mapping
4481 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4482 resv_map = inode_resv_map(inode);
4484 chg = region_chg(resv_map, from, to);
4486 } else {
4487 resv_map = resv_map_alloc();
4488 if (!resv_map)
4489 return -ENOMEM;
4491 chg = to - from;
4493 set_vma_resv_map(vma, resv_map);
4494 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4497 if (chg < 0) {
4498 ret = chg;
4499 goto out_err;
4503 * There must be enough pages in the subpool for the mapping. If
4504 * the subpool has a minimum size, there may be some global
4505 * reservations already in place (gbl_reserve).
4507 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4508 if (gbl_reserve < 0) {
4509 ret = -ENOSPC;
4510 goto out_err;
4514 * Check enough hugepages are available for the reservation.
4515 * Hand the pages back to the subpool if there are not
4517 ret = hugetlb_acct_memory(h, gbl_reserve);
4518 if (ret < 0) {
4519 /* put back original number of pages, chg */
4520 (void)hugepage_subpool_put_pages(spool, chg);
4521 goto out_err;
4525 * Account for the reservations made. Shared mappings record regions
4526 * that have reservations as they are shared by multiple VMAs.
4527 * When the last VMA disappears, the region map says how much
4528 * the reservation was and the page cache tells how much of
4529 * the reservation was consumed. Private mappings are per-VMA and
4530 * only the consumed reservations are tracked. When the VMA
4531 * disappears, the original reservation is the VMA size and the
4532 * consumed reservations are stored in the map. Hence, nothing
4533 * else has to be done for private mappings here
4535 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4536 long add = region_add(resv_map, from, to);
4538 if (unlikely(chg > add)) {
4540 * pages in this range were added to the reserve
4541 * map between region_chg and region_add. This
4542 * indicates a race with alloc_huge_page. Adjust
4543 * the subpool and reserve counts modified above
4544 * based on the difference.
4546 long rsv_adjust;
4548 rsv_adjust = hugepage_subpool_put_pages(spool,
4549 chg - add);
4550 hugetlb_acct_memory(h, -rsv_adjust);
4553 return 0;
4554 out_err:
4555 if (!vma || vma->vm_flags & VM_MAYSHARE)
4556 /* Don't call region_abort if region_chg failed */
4557 if (chg >= 0)
4558 region_abort(resv_map, from, to);
4559 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4560 kref_put(&resv_map->refs, resv_map_release);
4561 return ret;
4564 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4565 long freed)
4567 struct hstate *h = hstate_inode(inode);
4568 struct resv_map *resv_map = inode_resv_map(inode);
4569 long chg = 0;
4570 struct hugepage_subpool *spool = subpool_inode(inode);
4571 long gbl_reserve;
4573 if (resv_map) {
4574 chg = region_del(resv_map, start, end);
4576 * region_del() can fail in the rare case where a region
4577 * must be split and another region descriptor can not be
4578 * allocated. If end == LONG_MAX, it will not fail.
4580 if (chg < 0)
4581 return chg;
4584 spin_lock(&inode->i_lock);
4585 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4586 spin_unlock(&inode->i_lock);
4589 * If the subpool has a minimum size, the number of global
4590 * reservations to be released may be adjusted.
4592 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4593 hugetlb_acct_memory(h, -gbl_reserve);
4595 return 0;
4598 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4599 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4600 struct vm_area_struct *vma,
4601 unsigned long addr, pgoff_t idx)
4603 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4604 svma->vm_start;
4605 unsigned long sbase = saddr & PUD_MASK;
4606 unsigned long s_end = sbase + PUD_SIZE;
4608 /* Allow segments to share if only one is marked locked */
4609 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4610 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4613 * match the virtual addresses, permission and the alignment of the
4614 * page table page.
4616 if (pmd_index(addr) != pmd_index(saddr) ||
4617 vm_flags != svm_flags ||
4618 sbase < svma->vm_start || svma->vm_end < s_end)
4619 return 0;
4621 return saddr;
4624 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4626 unsigned long base = addr & PUD_MASK;
4627 unsigned long end = base + PUD_SIZE;
4630 * check on proper vm_flags and page table alignment
4632 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4633 return true;
4634 return false;
4638 * Determine if start,end range within vma could be mapped by shared pmd.
4639 * If yes, adjust start and end to cover range associated with possible
4640 * shared pmd mappings.
4642 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4643 unsigned long *start, unsigned long *end)
4645 unsigned long check_addr = *start;
4647 if (!(vma->vm_flags & VM_MAYSHARE))
4648 return;
4650 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4651 unsigned long a_start = check_addr & PUD_MASK;
4652 unsigned long a_end = a_start + PUD_SIZE;
4655 * If sharing is possible, adjust start/end if necessary.
4657 if (range_in_vma(vma, a_start, a_end)) {
4658 if (a_start < *start)
4659 *start = a_start;
4660 if (a_end > *end)
4661 *end = a_end;
4667 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4668 * and returns the corresponding pte. While this is not necessary for the
4669 * !shared pmd case because we can allocate the pmd later as well, it makes the
4670 * code much cleaner. pmd allocation is essential for the shared case because
4671 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4672 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4673 * bad pmd for sharing.
4675 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4677 struct vm_area_struct *vma = find_vma(mm, addr);
4678 struct address_space *mapping = vma->vm_file->f_mapping;
4679 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4680 vma->vm_pgoff;
4681 struct vm_area_struct *svma;
4682 unsigned long saddr;
4683 pte_t *spte = NULL;
4684 pte_t *pte;
4685 spinlock_t *ptl;
4687 if (!vma_shareable(vma, addr))
4688 return (pte_t *)pmd_alloc(mm, pud, addr);
4690 i_mmap_lock_write(mapping);
4691 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4692 if (svma == vma)
4693 continue;
4695 saddr = page_table_shareable(svma, vma, addr, idx);
4696 if (saddr) {
4697 spte = huge_pte_offset(svma->vm_mm, saddr,
4698 vma_mmu_pagesize(svma));
4699 if (spte) {
4700 get_page(virt_to_page(spte));
4701 break;
4706 if (!spte)
4707 goto out;
4709 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4710 if (pud_none(*pud)) {
4711 pud_populate(mm, pud,
4712 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4713 mm_inc_nr_pmds(mm);
4714 } else {
4715 put_page(virt_to_page(spte));
4717 spin_unlock(ptl);
4718 out:
4719 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4720 i_mmap_unlock_write(mapping);
4721 return pte;
4725 * unmap huge page backed by shared pte.
4727 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4728 * indicated by page_count > 1, unmap is achieved by clearing pud and
4729 * decrementing the ref count. If count == 1, the pte page is not shared.
4731 * called with page table lock held.
4733 * returns: 1 successfully unmapped a shared pte page
4734 * 0 the underlying pte page is not shared, or it is the last user
4736 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4738 pgd_t *pgd = pgd_offset(mm, *addr);
4739 p4d_t *p4d = p4d_offset(pgd, *addr);
4740 pud_t *pud = pud_offset(p4d, *addr);
4742 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4743 if (page_count(virt_to_page(ptep)) == 1)
4744 return 0;
4746 pud_clear(pud);
4747 put_page(virt_to_page(ptep));
4748 mm_dec_nr_pmds(mm);
4749 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4750 return 1;
4752 #define want_pmd_share() (1)
4753 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4754 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4756 return NULL;
4759 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4761 return 0;
4764 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4765 unsigned long *start, unsigned long *end)
4768 #define want_pmd_share() (0)
4769 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4771 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4772 pte_t *huge_pte_alloc(struct mm_struct *mm,
4773 unsigned long addr, unsigned long sz)
4775 pgd_t *pgd;
4776 p4d_t *p4d;
4777 pud_t *pud;
4778 pte_t *pte = NULL;
4780 pgd = pgd_offset(mm, addr);
4781 p4d = p4d_alloc(mm, pgd, addr);
4782 if (!p4d)
4783 return NULL;
4784 pud = pud_alloc(mm, p4d, addr);
4785 if (pud) {
4786 if (sz == PUD_SIZE) {
4787 pte = (pte_t *)pud;
4788 } else {
4789 BUG_ON(sz != PMD_SIZE);
4790 if (want_pmd_share() && pud_none(*pud))
4791 pte = huge_pmd_share(mm, addr, pud);
4792 else
4793 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4796 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4798 return pte;
4802 * huge_pte_offset() - Walk the page table to resolve the hugepage
4803 * entry at address @addr
4805 * Return: Pointer to page table or swap entry (PUD or PMD) for
4806 * address @addr, or NULL if a p*d_none() entry is encountered and the
4807 * size @sz doesn't match the hugepage size at this level of the page
4808 * table.
4810 pte_t *huge_pte_offset(struct mm_struct *mm,
4811 unsigned long addr, unsigned long sz)
4813 pgd_t *pgd;
4814 p4d_t *p4d;
4815 pud_t *pud;
4816 pmd_t *pmd;
4818 pgd = pgd_offset(mm, addr);
4819 if (!pgd_present(*pgd))
4820 return NULL;
4821 p4d = p4d_offset(pgd, addr);
4822 if (!p4d_present(*p4d))
4823 return NULL;
4825 pud = pud_offset(p4d, addr);
4826 if (sz != PUD_SIZE && pud_none(*pud))
4827 return NULL;
4828 /* hugepage or swap? */
4829 if (pud_huge(*pud) || !pud_present(*pud))
4830 return (pte_t *)pud;
4832 pmd = pmd_offset(pud, addr);
4833 if (sz != PMD_SIZE && pmd_none(*pmd))
4834 return NULL;
4835 /* hugepage or swap? */
4836 if (pmd_huge(*pmd) || !pmd_present(*pmd))
4837 return (pte_t *)pmd;
4839 return NULL;
4842 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4845 * These functions are overwritable if your architecture needs its own
4846 * behavior.
4848 struct page * __weak
4849 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4850 int write)
4852 return ERR_PTR(-EINVAL);
4855 struct page * __weak
4856 follow_huge_pd(struct vm_area_struct *vma,
4857 unsigned long address, hugepd_t hpd, int flags, int pdshift)
4859 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4860 return NULL;
4863 struct page * __weak
4864 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4865 pmd_t *pmd, int flags)
4867 struct page *page = NULL;
4868 spinlock_t *ptl;
4869 pte_t pte;
4870 retry:
4871 ptl = pmd_lockptr(mm, pmd);
4872 spin_lock(ptl);
4874 * make sure that the address range covered by this pmd is not
4875 * unmapped from other threads.
4877 if (!pmd_huge(*pmd))
4878 goto out;
4879 pte = huge_ptep_get((pte_t *)pmd);
4880 if (pte_present(pte)) {
4881 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4882 if (flags & FOLL_GET)
4883 get_page(page);
4884 } else {
4885 if (is_hugetlb_entry_migration(pte)) {
4886 spin_unlock(ptl);
4887 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4888 goto retry;
4891 * hwpoisoned entry is treated as no_page_table in
4892 * follow_page_mask().
4895 out:
4896 spin_unlock(ptl);
4897 return page;
4900 struct page * __weak
4901 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4902 pud_t *pud, int flags)
4904 if (flags & FOLL_GET)
4905 return NULL;
4907 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4910 struct page * __weak
4911 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4913 if (flags & FOLL_GET)
4914 return NULL;
4916 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4919 bool isolate_huge_page(struct page *page, struct list_head *list)
4921 bool ret = true;
4923 VM_BUG_ON_PAGE(!PageHead(page), page);
4924 spin_lock(&hugetlb_lock);
4925 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4926 ret = false;
4927 goto unlock;
4929 clear_page_huge_active(page);
4930 list_move_tail(&page->lru, list);
4931 unlock:
4932 spin_unlock(&hugetlb_lock);
4933 return ret;
4936 void putback_active_hugepage(struct page *page)
4938 VM_BUG_ON_PAGE(!PageHead(page), page);
4939 spin_lock(&hugetlb_lock);
4940 set_page_huge_active(page);
4941 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4942 spin_unlock(&hugetlb_lock);
4943 put_page(page);
4946 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
4948 struct hstate *h = page_hstate(oldpage);
4950 hugetlb_cgroup_migrate(oldpage, newpage);
4951 set_page_owner_migrate_reason(newpage, reason);
4954 * transfer temporary state of the new huge page. This is
4955 * reverse to other transitions because the newpage is going to
4956 * be final while the old one will be freed so it takes over
4957 * the temporary status.
4959 * Also note that we have to transfer the per-node surplus state
4960 * here as well otherwise the global surplus count will not match
4961 * the per-node's.
4963 if (PageHugeTemporary(newpage)) {
4964 int old_nid = page_to_nid(oldpage);
4965 int new_nid = page_to_nid(newpage);
4967 SetPageHugeTemporary(oldpage);
4968 ClearPageHugeTemporary(newpage);
4970 spin_lock(&hugetlb_lock);
4971 if (h->surplus_huge_pages_node[old_nid]) {
4972 h->surplus_huge_pages_node[old_nid]--;
4973 h->surplus_huge_pages_node[new_nid]++;
4975 spin_unlock(&hugetlb_lock);