drm/panfrost: Remove set but not used variable 'bo'
[linux/fpc-iii.git] / fs / hugetlbfs / inode.c
blobaff8642f0c2eecdaaceca4e41efecb5b689d3a60
1 /*
2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
7 * License: GPL
8 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched/signal.h> /* remove ASAP */
15 #include <linux/falloc.h>
16 #include <linux/fs.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/fs_parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
40 #include <linux/uaccess.h>
42 static const struct super_operations hugetlbfs_ops;
43 static const struct address_space_operations hugetlbfs_aops;
44 const struct file_operations hugetlbfs_file_operations;
45 static const struct inode_operations hugetlbfs_dir_inode_operations;
46 static const struct inode_operations hugetlbfs_inode_operations;
48 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
50 struct hugetlbfs_fs_context {
51 struct hstate *hstate;
52 unsigned long long max_size_opt;
53 unsigned long long min_size_opt;
54 long max_hpages;
55 long nr_inodes;
56 long min_hpages;
57 enum hugetlbfs_size_type max_val_type;
58 enum hugetlbfs_size_type min_val_type;
59 kuid_t uid;
60 kgid_t gid;
61 umode_t mode;
64 int sysctl_hugetlb_shm_group;
66 enum hugetlb_param {
67 Opt_gid,
68 Opt_min_size,
69 Opt_mode,
70 Opt_nr_inodes,
71 Opt_pagesize,
72 Opt_size,
73 Opt_uid,
76 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
77 fsparam_u32 ("gid", Opt_gid),
78 fsparam_string("min_size", Opt_min_size),
79 fsparam_u32 ("mode", Opt_mode),
80 fsparam_string("nr_inodes", Opt_nr_inodes),
81 fsparam_string("pagesize", Opt_pagesize),
82 fsparam_string("size", Opt_size),
83 fsparam_u32 ("uid", Opt_uid),
87 #ifdef CONFIG_NUMA
88 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
89 struct inode *inode, pgoff_t index)
91 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
92 index);
95 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97 mpol_cond_put(vma->vm_policy);
99 #else
100 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
101 struct inode *inode, pgoff_t index)
105 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
108 #endif
110 static void huge_pagevec_release(struct pagevec *pvec)
112 int i;
114 for (i = 0; i < pagevec_count(pvec); ++i)
115 put_page(pvec->pages[i]);
117 pagevec_reinit(pvec);
121 * Mask used when checking the page offset value passed in via system
122 * calls. This value will be converted to a loff_t which is signed.
123 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
124 * value. The extra bit (- 1 in the shift value) is to take the sign
125 * bit into account.
127 #define PGOFF_LOFFT_MAX \
128 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
130 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132 struct inode *inode = file_inode(file);
133 loff_t len, vma_len;
134 int ret;
135 struct hstate *h = hstate_file(file);
138 * vma address alignment (but not the pgoff alignment) has
139 * already been checked by prepare_hugepage_range. If you add
140 * any error returns here, do so after setting VM_HUGETLB, so
141 * is_vm_hugetlb_page tests below unmap_region go the right
142 * way when do_mmap_pgoff unwinds (may be important on powerpc
143 * and ia64).
145 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
146 vma->vm_ops = &hugetlb_vm_ops;
149 * page based offset in vm_pgoff could be sufficiently large to
150 * overflow a loff_t when converted to byte offset. This can
151 * only happen on architectures where sizeof(loff_t) ==
152 * sizeof(unsigned long). So, only check in those instances.
154 if (sizeof(unsigned long) == sizeof(loff_t)) {
155 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
156 return -EINVAL;
159 /* must be huge page aligned */
160 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
161 return -EINVAL;
163 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
164 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
165 /* check for overflow */
166 if (len < vma_len)
167 return -EINVAL;
169 inode_lock(inode);
170 file_accessed(file);
172 ret = -ENOMEM;
173 if (hugetlb_reserve_pages(inode,
174 vma->vm_pgoff >> huge_page_order(h),
175 len >> huge_page_shift(h), vma,
176 vma->vm_flags))
177 goto out;
179 ret = 0;
180 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
181 i_size_write(inode, len);
182 out:
183 inode_unlock(inode);
185 return ret;
189 * Called under down_write(mmap_sem).
192 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
193 static unsigned long
194 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
195 unsigned long len, unsigned long pgoff, unsigned long flags)
197 struct mm_struct *mm = current->mm;
198 struct vm_area_struct *vma;
199 struct hstate *h = hstate_file(file);
200 struct vm_unmapped_area_info info;
202 if (len & ~huge_page_mask(h))
203 return -EINVAL;
204 if (len > TASK_SIZE)
205 return -ENOMEM;
207 if (flags & MAP_FIXED) {
208 if (prepare_hugepage_range(file, addr, len))
209 return -EINVAL;
210 return addr;
213 if (addr) {
214 addr = ALIGN(addr, huge_page_size(h));
215 vma = find_vma(mm, addr);
216 if (TASK_SIZE - len >= addr &&
217 (!vma || addr + len <= vm_start_gap(vma)))
218 return addr;
221 info.flags = 0;
222 info.length = len;
223 info.low_limit = TASK_UNMAPPED_BASE;
224 info.high_limit = TASK_SIZE;
225 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
226 info.align_offset = 0;
227 return vm_unmapped_area(&info);
229 #endif
231 static size_t
232 hugetlbfs_read_actor(struct page *page, unsigned long offset,
233 struct iov_iter *to, unsigned long size)
235 size_t copied = 0;
236 int i, chunksize;
238 /* Find which 4k chunk and offset with in that chunk */
239 i = offset >> PAGE_SHIFT;
240 offset = offset & ~PAGE_MASK;
242 while (size) {
243 size_t n;
244 chunksize = PAGE_SIZE;
245 if (offset)
246 chunksize -= offset;
247 if (chunksize > size)
248 chunksize = size;
249 n = copy_page_to_iter(&page[i], offset, chunksize, to);
250 copied += n;
251 if (n != chunksize)
252 return copied;
253 offset = 0;
254 size -= chunksize;
255 i++;
257 return copied;
261 * Support for read() - Find the page attached to f_mapping and copy out the
262 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
263 * since it has PAGE_SIZE assumptions.
265 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
267 struct file *file = iocb->ki_filp;
268 struct hstate *h = hstate_file(file);
269 struct address_space *mapping = file->f_mapping;
270 struct inode *inode = mapping->host;
271 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
272 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
273 unsigned long end_index;
274 loff_t isize;
275 ssize_t retval = 0;
277 while (iov_iter_count(to)) {
278 struct page *page;
279 size_t nr, copied;
281 /* nr is the maximum number of bytes to copy from this page */
282 nr = huge_page_size(h);
283 isize = i_size_read(inode);
284 if (!isize)
285 break;
286 end_index = (isize - 1) >> huge_page_shift(h);
287 if (index > end_index)
288 break;
289 if (index == end_index) {
290 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
291 if (nr <= offset)
292 break;
294 nr = nr - offset;
296 /* Find the page */
297 page = find_lock_page(mapping, index);
298 if (unlikely(page == NULL)) {
300 * We have a HOLE, zero out the user-buffer for the
301 * length of the hole or request.
303 copied = iov_iter_zero(nr, to);
304 } else {
305 unlock_page(page);
308 * We have the page, copy it to user space buffer.
310 copied = hugetlbfs_read_actor(page, offset, to, nr);
311 put_page(page);
313 offset += copied;
314 retval += copied;
315 if (copied != nr && iov_iter_count(to)) {
316 if (!retval)
317 retval = -EFAULT;
318 break;
320 index += offset >> huge_page_shift(h);
321 offset &= ~huge_page_mask(h);
323 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
324 return retval;
327 static int hugetlbfs_write_begin(struct file *file,
328 struct address_space *mapping,
329 loff_t pos, unsigned len, unsigned flags,
330 struct page **pagep, void **fsdata)
332 return -EINVAL;
335 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
336 loff_t pos, unsigned len, unsigned copied,
337 struct page *page, void *fsdata)
339 BUG();
340 return -EINVAL;
343 static void remove_huge_page(struct page *page)
345 ClearPageDirty(page);
346 ClearPageUptodate(page);
347 delete_from_page_cache(page);
350 static void
351 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
353 struct vm_area_struct *vma;
356 * end == 0 indicates that the entire range after
357 * start should be unmapped.
359 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
360 unsigned long v_offset;
361 unsigned long v_end;
364 * Can the expression below overflow on 32-bit arches?
365 * No, because the interval tree returns us only those vmas
366 * which overlap the truncated area starting at pgoff,
367 * and no vma on a 32-bit arch can span beyond the 4GB.
369 if (vma->vm_pgoff < start)
370 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
371 else
372 v_offset = 0;
374 if (!end)
375 v_end = vma->vm_end;
376 else {
377 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
378 + vma->vm_start;
379 if (v_end > vma->vm_end)
380 v_end = vma->vm_end;
383 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
384 NULL);
389 * remove_inode_hugepages handles two distinct cases: truncation and hole
390 * punch. There are subtle differences in operation for each case.
392 * truncation is indicated by end of range being LLONG_MAX
393 * In this case, we first scan the range and release found pages.
394 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
395 * maps and global counts. Page faults can not race with truncation
396 * in this routine. hugetlb_no_page() prevents page faults in the
397 * truncated range. It checks i_size before allocation, and again after
398 * with the page table lock for the page held. The same lock must be
399 * acquired to unmap a page.
400 * hole punch is indicated if end is not LLONG_MAX
401 * In the hole punch case we scan the range and release found pages.
402 * Only when releasing a page is the associated region/reserv map
403 * deleted. The region/reserv map for ranges without associated
404 * pages are not modified. Page faults can race with hole punch.
405 * This is indicated if we find a mapped page.
406 * Note: If the passed end of range value is beyond the end of file, but
407 * not LLONG_MAX this routine still performs a hole punch operation.
409 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
410 loff_t lend)
412 struct hstate *h = hstate_inode(inode);
413 struct address_space *mapping = &inode->i_data;
414 const pgoff_t start = lstart >> huge_page_shift(h);
415 const pgoff_t end = lend >> huge_page_shift(h);
416 struct vm_area_struct pseudo_vma;
417 struct pagevec pvec;
418 pgoff_t next, index;
419 int i, freed = 0;
420 bool truncate_op = (lend == LLONG_MAX);
422 vma_init(&pseudo_vma, current->mm);
423 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
424 pagevec_init(&pvec);
425 next = start;
426 while (next < end) {
428 * When no more pages are found, we are done.
430 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
431 break;
433 for (i = 0; i < pagevec_count(&pvec); ++i) {
434 struct page *page = pvec.pages[i];
435 u32 hash;
437 index = page->index;
438 hash = hugetlb_fault_mutex_hash(mapping, index);
439 mutex_lock(&hugetlb_fault_mutex_table[hash]);
442 * If page is mapped, it was faulted in after being
443 * unmapped in caller. Unmap (again) now after taking
444 * the fault mutex. The mutex will prevent faults
445 * until we finish removing the page.
447 * This race can only happen in the hole punch case.
448 * Getting here in a truncate operation is a bug.
450 if (unlikely(page_mapped(page))) {
451 BUG_ON(truncate_op);
453 i_mmap_lock_write(mapping);
454 hugetlb_vmdelete_list(&mapping->i_mmap,
455 index * pages_per_huge_page(h),
456 (index + 1) * pages_per_huge_page(h));
457 i_mmap_unlock_write(mapping);
460 lock_page(page);
462 * We must free the huge page and remove from page
463 * cache (remove_huge_page) BEFORE removing the
464 * region/reserve map (hugetlb_unreserve_pages). In
465 * rare out of memory conditions, removal of the
466 * region/reserve map could fail. Correspondingly,
467 * the subpool and global reserve usage count can need
468 * to be adjusted.
470 VM_BUG_ON(PagePrivate(page));
471 remove_huge_page(page);
472 freed++;
473 if (!truncate_op) {
474 if (unlikely(hugetlb_unreserve_pages(inode,
475 index, index + 1, 1)))
476 hugetlb_fix_reserve_counts(inode);
479 unlock_page(page);
480 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
482 huge_pagevec_release(&pvec);
483 cond_resched();
486 if (truncate_op)
487 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
490 static void hugetlbfs_evict_inode(struct inode *inode)
492 struct resv_map *resv_map;
494 remove_inode_hugepages(inode, 0, LLONG_MAX);
497 * Get the resv_map from the address space embedded in the inode.
498 * This is the address space which points to any resv_map allocated
499 * at inode creation time. If this is a device special inode,
500 * i_mapping may not point to the original address space.
502 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
503 /* Only regular and link inodes have associated reserve maps */
504 if (resv_map)
505 resv_map_release(&resv_map->refs);
506 clear_inode(inode);
509 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
511 pgoff_t pgoff;
512 struct address_space *mapping = inode->i_mapping;
513 struct hstate *h = hstate_inode(inode);
515 BUG_ON(offset & ~huge_page_mask(h));
516 pgoff = offset >> PAGE_SHIFT;
518 i_size_write(inode, offset);
519 i_mmap_lock_write(mapping);
520 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
521 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
522 i_mmap_unlock_write(mapping);
523 remove_inode_hugepages(inode, offset, LLONG_MAX);
524 return 0;
527 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
529 struct hstate *h = hstate_inode(inode);
530 loff_t hpage_size = huge_page_size(h);
531 loff_t hole_start, hole_end;
534 * For hole punch round up the beginning offset of the hole and
535 * round down the end.
537 hole_start = round_up(offset, hpage_size);
538 hole_end = round_down(offset + len, hpage_size);
540 if (hole_end > hole_start) {
541 struct address_space *mapping = inode->i_mapping;
542 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
544 inode_lock(inode);
546 /* protected by i_mutex */
547 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
548 inode_unlock(inode);
549 return -EPERM;
552 i_mmap_lock_write(mapping);
553 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
554 hugetlb_vmdelete_list(&mapping->i_mmap,
555 hole_start >> PAGE_SHIFT,
556 hole_end >> PAGE_SHIFT);
557 i_mmap_unlock_write(mapping);
558 remove_inode_hugepages(inode, hole_start, hole_end);
559 inode_unlock(inode);
562 return 0;
565 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
566 loff_t len)
568 struct inode *inode = file_inode(file);
569 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
570 struct address_space *mapping = inode->i_mapping;
571 struct hstate *h = hstate_inode(inode);
572 struct vm_area_struct pseudo_vma;
573 struct mm_struct *mm = current->mm;
574 loff_t hpage_size = huge_page_size(h);
575 unsigned long hpage_shift = huge_page_shift(h);
576 pgoff_t start, index, end;
577 int error;
578 u32 hash;
580 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
581 return -EOPNOTSUPP;
583 if (mode & FALLOC_FL_PUNCH_HOLE)
584 return hugetlbfs_punch_hole(inode, offset, len);
587 * Default preallocate case.
588 * For this range, start is rounded down and end is rounded up
589 * as well as being converted to page offsets.
591 start = offset >> hpage_shift;
592 end = (offset + len + hpage_size - 1) >> hpage_shift;
594 inode_lock(inode);
596 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
597 error = inode_newsize_ok(inode, offset + len);
598 if (error)
599 goto out;
601 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
602 error = -EPERM;
603 goto out;
607 * Initialize a pseudo vma as this is required by the huge page
608 * allocation routines. If NUMA is configured, use page index
609 * as input to create an allocation policy.
611 vma_init(&pseudo_vma, mm);
612 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
613 pseudo_vma.vm_file = file;
615 for (index = start; index < end; index++) {
617 * This is supposed to be the vaddr where the page is being
618 * faulted in, but we have no vaddr here.
620 struct page *page;
621 unsigned long addr;
622 int avoid_reserve = 0;
624 cond_resched();
627 * fallocate(2) manpage permits EINTR; we may have been
628 * interrupted because we are using up too much memory.
630 if (signal_pending(current)) {
631 error = -EINTR;
632 break;
635 /* Set numa allocation policy based on index */
636 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
638 /* addr is the offset within the file (zero based) */
639 addr = index * hpage_size;
641 /* mutex taken here, fault path and hole punch */
642 hash = hugetlb_fault_mutex_hash(mapping, index);
643 mutex_lock(&hugetlb_fault_mutex_table[hash]);
645 /* See if already present in mapping to avoid alloc/free */
646 page = find_get_page(mapping, index);
647 if (page) {
648 put_page(page);
649 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
650 hugetlb_drop_vma_policy(&pseudo_vma);
651 continue;
654 /* Allocate page and add to page cache */
655 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
656 hugetlb_drop_vma_policy(&pseudo_vma);
657 if (IS_ERR(page)) {
658 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
659 error = PTR_ERR(page);
660 goto out;
662 clear_huge_page(page, addr, pages_per_huge_page(h));
663 __SetPageUptodate(page);
664 error = huge_add_to_page_cache(page, mapping, index);
665 if (unlikely(error)) {
666 put_page(page);
667 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
668 goto out;
671 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
674 * unlock_page because locked by add_to_page_cache()
675 * page_put due to reference from alloc_huge_page()
677 unlock_page(page);
678 put_page(page);
681 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
682 i_size_write(inode, offset + len);
683 inode->i_ctime = current_time(inode);
684 out:
685 inode_unlock(inode);
686 return error;
689 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
691 struct inode *inode = d_inode(dentry);
692 struct hstate *h = hstate_inode(inode);
693 int error;
694 unsigned int ia_valid = attr->ia_valid;
695 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
697 BUG_ON(!inode);
699 error = setattr_prepare(dentry, attr);
700 if (error)
701 return error;
703 if (ia_valid & ATTR_SIZE) {
704 loff_t oldsize = inode->i_size;
705 loff_t newsize = attr->ia_size;
707 if (newsize & ~huge_page_mask(h))
708 return -EINVAL;
709 /* protected by i_mutex */
710 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
711 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
712 return -EPERM;
713 error = hugetlb_vmtruncate(inode, newsize);
714 if (error)
715 return error;
718 setattr_copy(inode, attr);
719 mark_inode_dirty(inode);
720 return 0;
723 static struct inode *hugetlbfs_get_root(struct super_block *sb,
724 struct hugetlbfs_fs_context *ctx)
726 struct inode *inode;
728 inode = new_inode(sb);
729 if (inode) {
730 inode->i_ino = get_next_ino();
731 inode->i_mode = S_IFDIR | ctx->mode;
732 inode->i_uid = ctx->uid;
733 inode->i_gid = ctx->gid;
734 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
735 inode->i_op = &hugetlbfs_dir_inode_operations;
736 inode->i_fop = &simple_dir_operations;
737 /* directory inodes start off with i_nlink == 2 (for "." entry) */
738 inc_nlink(inode);
739 lockdep_annotate_inode_mutex_key(inode);
741 return inode;
745 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
746 * be taken from reclaim -- unlike regular filesystems. This needs an
747 * annotation because huge_pmd_share() does an allocation under hugetlb's
748 * i_mmap_rwsem.
750 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
752 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
753 struct inode *dir,
754 umode_t mode, dev_t dev)
756 struct inode *inode;
757 struct resv_map *resv_map = NULL;
760 * Reserve maps are only needed for inodes that can have associated
761 * page allocations.
763 if (S_ISREG(mode) || S_ISLNK(mode)) {
764 resv_map = resv_map_alloc();
765 if (!resv_map)
766 return NULL;
769 inode = new_inode(sb);
770 if (inode) {
771 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
773 inode->i_ino = get_next_ino();
774 inode_init_owner(inode, dir, mode);
775 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
776 &hugetlbfs_i_mmap_rwsem_key);
777 inode->i_mapping->a_ops = &hugetlbfs_aops;
778 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
779 inode->i_mapping->private_data = resv_map;
780 info->seals = F_SEAL_SEAL;
781 switch (mode & S_IFMT) {
782 default:
783 init_special_inode(inode, mode, dev);
784 break;
785 case S_IFREG:
786 inode->i_op = &hugetlbfs_inode_operations;
787 inode->i_fop = &hugetlbfs_file_operations;
788 break;
789 case S_IFDIR:
790 inode->i_op = &hugetlbfs_dir_inode_operations;
791 inode->i_fop = &simple_dir_operations;
793 /* directory inodes start off with i_nlink == 2 (for "." entry) */
794 inc_nlink(inode);
795 break;
796 case S_IFLNK:
797 inode->i_op = &page_symlink_inode_operations;
798 inode_nohighmem(inode);
799 break;
801 lockdep_annotate_inode_mutex_key(inode);
802 } else {
803 if (resv_map)
804 kref_put(&resv_map->refs, resv_map_release);
807 return inode;
811 * File creation. Allocate an inode, and we're done..
813 static int do_hugetlbfs_mknod(struct inode *dir,
814 struct dentry *dentry,
815 umode_t mode,
816 dev_t dev,
817 bool tmpfile)
819 struct inode *inode;
820 int error = -ENOSPC;
822 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
823 if (inode) {
824 dir->i_ctime = dir->i_mtime = current_time(dir);
825 if (tmpfile) {
826 d_tmpfile(dentry, inode);
827 } else {
828 d_instantiate(dentry, inode);
829 dget(dentry);/* Extra count - pin the dentry in core */
831 error = 0;
833 return error;
836 static int hugetlbfs_mknod(struct inode *dir,
837 struct dentry *dentry, umode_t mode, dev_t dev)
839 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
842 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
844 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
845 if (!retval)
846 inc_nlink(dir);
847 return retval;
850 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
852 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
855 static int hugetlbfs_tmpfile(struct inode *dir,
856 struct dentry *dentry, umode_t mode)
858 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
861 static int hugetlbfs_symlink(struct inode *dir,
862 struct dentry *dentry, const char *symname)
864 struct inode *inode;
865 int error = -ENOSPC;
867 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
868 if (inode) {
869 int l = strlen(symname)+1;
870 error = page_symlink(inode, symname, l);
871 if (!error) {
872 d_instantiate(dentry, inode);
873 dget(dentry);
874 } else
875 iput(inode);
877 dir->i_ctime = dir->i_mtime = current_time(dir);
879 return error;
883 * mark the head page dirty
885 static int hugetlbfs_set_page_dirty(struct page *page)
887 struct page *head = compound_head(page);
889 SetPageDirty(head);
890 return 0;
893 static int hugetlbfs_migrate_page(struct address_space *mapping,
894 struct page *newpage, struct page *page,
895 enum migrate_mode mode)
897 int rc;
899 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
900 if (rc != MIGRATEPAGE_SUCCESS)
901 return rc;
904 * page_private is subpool pointer in hugetlb pages. Transfer to
905 * new page. PagePrivate is not associated with page_private for
906 * hugetlb pages and can not be set here as only page_huge_active
907 * pages can be migrated.
909 if (page_private(page)) {
910 set_page_private(newpage, page_private(page));
911 set_page_private(page, 0);
914 if (mode != MIGRATE_SYNC_NO_COPY)
915 migrate_page_copy(newpage, page);
916 else
917 migrate_page_states(newpage, page);
919 return MIGRATEPAGE_SUCCESS;
922 static int hugetlbfs_error_remove_page(struct address_space *mapping,
923 struct page *page)
925 struct inode *inode = mapping->host;
926 pgoff_t index = page->index;
928 remove_huge_page(page);
929 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
930 hugetlb_fix_reserve_counts(inode);
932 return 0;
936 * Display the mount options in /proc/mounts.
938 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
940 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
941 struct hugepage_subpool *spool = sbinfo->spool;
942 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
943 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
944 char mod;
946 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
947 seq_printf(m, ",uid=%u",
948 from_kuid_munged(&init_user_ns, sbinfo->uid));
949 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
950 seq_printf(m, ",gid=%u",
951 from_kgid_munged(&init_user_ns, sbinfo->gid));
952 if (sbinfo->mode != 0755)
953 seq_printf(m, ",mode=%o", sbinfo->mode);
954 if (sbinfo->max_inodes != -1)
955 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
957 hpage_size /= 1024;
958 mod = 'K';
959 if (hpage_size >= 1024) {
960 hpage_size /= 1024;
961 mod = 'M';
963 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
964 if (spool) {
965 if (spool->max_hpages != -1)
966 seq_printf(m, ",size=%llu",
967 (unsigned long long)spool->max_hpages << hpage_shift);
968 if (spool->min_hpages != -1)
969 seq_printf(m, ",min_size=%llu",
970 (unsigned long long)spool->min_hpages << hpage_shift);
972 return 0;
975 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
977 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
978 struct hstate *h = hstate_inode(d_inode(dentry));
980 buf->f_type = HUGETLBFS_MAGIC;
981 buf->f_bsize = huge_page_size(h);
982 if (sbinfo) {
983 spin_lock(&sbinfo->stat_lock);
984 /* If no limits set, just report 0 for max/free/used
985 * blocks, like simple_statfs() */
986 if (sbinfo->spool) {
987 long free_pages;
989 spin_lock(&sbinfo->spool->lock);
990 buf->f_blocks = sbinfo->spool->max_hpages;
991 free_pages = sbinfo->spool->max_hpages
992 - sbinfo->spool->used_hpages;
993 buf->f_bavail = buf->f_bfree = free_pages;
994 spin_unlock(&sbinfo->spool->lock);
995 buf->f_files = sbinfo->max_inodes;
996 buf->f_ffree = sbinfo->free_inodes;
998 spin_unlock(&sbinfo->stat_lock);
1000 buf->f_namelen = NAME_MAX;
1001 return 0;
1004 static void hugetlbfs_put_super(struct super_block *sb)
1006 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1008 if (sbi) {
1009 sb->s_fs_info = NULL;
1011 if (sbi->spool)
1012 hugepage_put_subpool(sbi->spool);
1014 kfree(sbi);
1018 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1020 if (sbinfo->free_inodes >= 0) {
1021 spin_lock(&sbinfo->stat_lock);
1022 if (unlikely(!sbinfo->free_inodes)) {
1023 spin_unlock(&sbinfo->stat_lock);
1024 return 0;
1026 sbinfo->free_inodes--;
1027 spin_unlock(&sbinfo->stat_lock);
1030 return 1;
1033 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1035 if (sbinfo->free_inodes >= 0) {
1036 spin_lock(&sbinfo->stat_lock);
1037 sbinfo->free_inodes++;
1038 spin_unlock(&sbinfo->stat_lock);
1043 static struct kmem_cache *hugetlbfs_inode_cachep;
1045 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1047 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1048 struct hugetlbfs_inode_info *p;
1050 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1051 return NULL;
1052 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
1053 if (unlikely(!p)) {
1054 hugetlbfs_inc_free_inodes(sbinfo);
1055 return NULL;
1059 * Any time after allocation, hugetlbfs_destroy_inode can be called
1060 * for the inode. mpol_free_shared_policy is unconditionally called
1061 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1062 * in case of a quick call to destroy.
1064 * Note that the policy is initialized even if we are creating a
1065 * private inode. This simplifies hugetlbfs_destroy_inode.
1067 mpol_shared_policy_init(&p->policy, NULL);
1069 return &p->vfs_inode;
1072 static void hugetlbfs_free_inode(struct inode *inode)
1074 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1077 static void hugetlbfs_destroy_inode(struct inode *inode)
1079 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1080 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1083 static const struct address_space_operations hugetlbfs_aops = {
1084 .write_begin = hugetlbfs_write_begin,
1085 .write_end = hugetlbfs_write_end,
1086 .set_page_dirty = hugetlbfs_set_page_dirty,
1087 .migratepage = hugetlbfs_migrate_page,
1088 .error_remove_page = hugetlbfs_error_remove_page,
1092 static void init_once(void *foo)
1094 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1096 inode_init_once(&ei->vfs_inode);
1099 const struct file_operations hugetlbfs_file_operations = {
1100 .read_iter = hugetlbfs_read_iter,
1101 .mmap = hugetlbfs_file_mmap,
1102 .fsync = noop_fsync,
1103 .get_unmapped_area = hugetlb_get_unmapped_area,
1104 .llseek = default_llseek,
1105 .fallocate = hugetlbfs_fallocate,
1108 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1109 .create = hugetlbfs_create,
1110 .lookup = simple_lookup,
1111 .link = simple_link,
1112 .unlink = simple_unlink,
1113 .symlink = hugetlbfs_symlink,
1114 .mkdir = hugetlbfs_mkdir,
1115 .rmdir = simple_rmdir,
1116 .mknod = hugetlbfs_mknod,
1117 .rename = simple_rename,
1118 .setattr = hugetlbfs_setattr,
1119 .tmpfile = hugetlbfs_tmpfile,
1122 static const struct inode_operations hugetlbfs_inode_operations = {
1123 .setattr = hugetlbfs_setattr,
1126 static const struct super_operations hugetlbfs_ops = {
1127 .alloc_inode = hugetlbfs_alloc_inode,
1128 .free_inode = hugetlbfs_free_inode,
1129 .destroy_inode = hugetlbfs_destroy_inode,
1130 .evict_inode = hugetlbfs_evict_inode,
1131 .statfs = hugetlbfs_statfs,
1132 .put_super = hugetlbfs_put_super,
1133 .show_options = hugetlbfs_show_options,
1137 * Convert size option passed from command line to number of huge pages
1138 * in the pool specified by hstate. Size option could be in bytes
1139 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1141 static long
1142 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1143 enum hugetlbfs_size_type val_type)
1145 if (val_type == NO_SIZE)
1146 return -1;
1148 if (val_type == SIZE_PERCENT) {
1149 size_opt <<= huge_page_shift(h);
1150 size_opt *= h->max_huge_pages;
1151 do_div(size_opt, 100);
1154 size_opt >>= huge_page_shift(h);
1155 return size_opt;
1159 * Parse one mount parameter.
1161 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1163 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1164 struct fs_parse_result result;
1165 char *rest;
1166 unsigned long ps;
1167 int opt;
1169 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
1170 if (opt < 0)
1171 return opt;
1173 switch (opt) {
1174 case Opt_uid:
1175 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1176 if (!uid_valid(ctx->uid))
1177 goto bad_val;
1178 return 0;
1180 case Opt_gid:
1181 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1182 if (!gid_valid(ctx->gid))
1183 goto bad_val;
1184 return 0;
1186 case Opt_mode:
1187 ctx->mode = result.uint_32 & 01777U;
1188 return 0;
1190 case Opt_size:
1191 /* memparse() will accept a K/M/G without a digit */
1192 if (!isdigit(param->string[0]))
1193 goto bad_val;
1194 ctx->max_size_opt = memparse(param->string, &rest);
1195 ctx->max_val_type = SIZE_STD;
1196 if (*rest == '%')
1197 ctx->max_val_type = SIZE_PERCENT;
1198 return 0;
1200 case Opt_nr_inodes:
1201 /* memparse() will accept a K/M/G without a digit */
1202 if (!isdigit(param->string[0]))
1203 goto bad_val;
1204 ctx->nr_inodes = memparse(param->string, &rest);
1205 return 0;
1207 case Opt_pagesize:
1208 ps = memparse(param->string, &rest);
1209 ctx->hstate = size_to_hstate(ps);
1210 if (!ctx->hstate) {
1211 pr_err("Unsupported page size %lu MB\n", ps >> 20);
1212 return -EINVAL;
1214 return 0;
1216 case Opt_min_size:
1217 /* memparse() will accept a K/M/G without a digit */
1218 if (!isdigit(param->string[0]))
1219 goto bad_val;
1220 ctx->min_size_opt = memparse(param->string, &rest);
1221 ctx->min_val_type = SIZE_STD;
1222 if (*rest == '%')
1223 ctx->min_val_type = SIZE_PERCENT;
1224 return 0;
1226 default:
1227 return -EINVAL;
1230 bad_val:
1231 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
1232 param->string, param->key);
1236 * Validate the parsed options.
1238 static int hugetlbfs_validate(struct fs_context *fc)
1240 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1243 * Use huge page pool size (in hstate) to convert the size
1244 * options to number of huge pages. If NO_SIZE, -1 is returned.
1246 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1247 ctx->max_size_opt,
1248 ctx->max_val_type);
1249 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1250 ctx->min_size_opt,
1251 ctx->min_val_type);
1254 * If max_size was specified, then min_size must be smaller
1256 if (ctx->max_val_type > NO_SIZE &&
1257 ctx->min_hpages > ctx->max_hpages) {
1258 pr_err("Minimum size can not be greater than maximum size\n");
1259 return -EINVAL;
1262 return 0;
1265 static int
1266 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1268 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1269 struct hugetlbfs_sb_info *sbinfo;
1271 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1272 if (!sbinfo)
1273 return -ENOMEM;
1274 sb->s_fs_info = sbinfo;
1275 spin_lock_init(&sbinfo->stat_lock);
1276 sbinfo->hstate = ctx->hstate;
1277 sbinfo->max_inodes = ctx->nr_inodes;
1278 sbinfo->free_inodes = ctx->nr_inodes;
1279 sbinfo->spool = NULL;
1280 sbinfo->uid = ctx->uid;
1281 sbinfo->gid = ctx->gid;
1282 sbinfo->mode = ctx->mode;
1285 * Allocate and initialize subpool if maximum or minimum size is
1286 * specified. Any needed reservations (for minimim size) are taken
1287 * taken when the subpool is created.
1289 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1290 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1291 ctx->max_hpages,
1292 ctx->min_hpages);
1293 if (!sbinfo->spool)
1294 goto out_free;
1296 sb->s_maxbytes = MAX_LFS_FILESIZE;
1297 sb->s_blocksize = huge_page_size(ctx->hstate);
1298 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1299 sb->s_magic = HUGETLBFS_MAGIC;
1300 sb->s_op = &hugetlbfs_ops;
1301 sb->s_time_gran = 1;
1302 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
1303 if (!sb->s_root)
1304 goto out_free;
1305 return 0;
1306 out_free:
1307 kfree(sbinfo->spool);
1308 kfree(sbinfo);
1309 return -ENOMEM;
1312 static int hugetlbfs_get_tree(struct fs_context *fc)
1314 int err = hugetlbfs_validate(fc);
1315 if (err)
1316 return err;
1317 return get_tree_nodev(fc, hugetlbfs_fill_super);
1320 static void hugetlbfs_fs_context_free(struct fs_context *fc)
1322 kfree(fc->fs_private);
1325 static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1326 .free = hugetlbfs_fs_context_free,
1327 .parse_param = hugetlbfs_parse_param,
1328 .get_tree = hugetlbfs_get_tree,
1331 static int hugetlbfs_init_fs_context(struct fs_context *fc)
1333 struct hugetlbfs_fs_context *ctx;
1335 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1336 if (!ctx)
1337 return -ENOMEM;
1339 ctx->max_hpages = -1; /* No limit on size by default */
1340 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1341 ctx->uid = current_fsuid();
1342 ctx->gid = current_fsgid();
1343 ctx->mode = 0755;
1344 ctx->hstate = &default_hstate;
1345 ctx->min_hpages = -1; /* No default minimum size */
1346 ctx->max_val_type = NO_SIZE;
1347 ctx->min_val_type = NO_SIZE;
1348 fc->fs_private = ctx;
1349 fc->ops = &hugetlbfs_fs_context_ops;
1350 return 0;
1353 static struct file_system_type hugetlbfs_fs_type = {
1354 .name = "hugetlbfs",
1355 .init_fs_context = hugetlbfs_init_fs_context,
1356 .parameters = hugetlb_fs_parameters,
1357 .kill_sb = kill_litter_super,
1360 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1362 static int can_do_hugetlb_shm(void)
1364 kgid_t shm_group;
1365 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1366 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1369 static int get_hstate_idx(int page_size_log)
1371 struct hstate *h = hstate_sizelog(page_size_log);
1373 if (!h)
1374 return -1;
1375 return h - hstates;
1379 * Note that size should be aligned to proper hugepage size in caller side,
1380 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1382 struct file *hugetlb_file_setup(const char *name, size_t size,
1383 vm_flags_t acctflag, struct user_struct **user,
1384 int creat_flags, int page_size_log)
1386 struct inode *inode;
1387 struct vfsmount *mnt;
1388 int hstate_idx;
1389 struct file *file;
1391 hstate_idx = get_hstate_idx(page_size_log);
1392 if (hstate_idx < 0)
1393 return ERR_PTR(-ENODEV);
1395 *user = NULL;
1396 mnt = hugetlbfs_vfsmount[hstate_idx];
1397 if (!mnt)
1398 return ERR_PTR(-ENOENT);
1400 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1401 *user = current_user();
1402 if (user_shm_lock(size, *user)) {
1403 task_lock(current);
1404 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
1405 current->comm, current->pid);
1406 task_unlock(current);
1407 } else {
1408 *user = NULL;
1409 return ERR_PTR(-EPERM);
1413 file = ERR_PTR(-ENOSPC);
1414 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
1415 if (!inode)
1416 goto out;
1417 if (creat_flags == HUGETLB_SHMFS_INODE)
1418 inode->i_flags |= S_PRIVATE;
1420 inode->i_size = size;
1421 clear_nlink(inode);
1423 if (hugetlb_reserve_pages(inode, 0,
1424 size >> huge_page_shift(hstate_inode(inode)), NULL,
1425 acctflag))
1426 file = ERR_PTR(-ENOMEM);
1427 else
1428 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1429 &hugetlbfs_file_operations);
1430 if (!IS_ERR(file))
1431 return file;
1433 iput(inode);
1434 out:
1435 if (*user) {
1436 user_shm_unlock(size, *user);
1437 *user = NULL;
1439 return file;
1442 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1444 struct fs_context *fc;
1445 struct vfsmount *mnt;
1447 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1448 if (IS_ERR(fc)) {
1449 mnt = ERR_CAST(fc);
1450 } else {
1451 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1452 ctx->hstate = h;
1453 mnt = fc_mount(fc);
1454 put_fs_context(fc);
1456 if (IS_ERR(mnt))
1457 pr_err("Cannot mount internal hugetlbfs for page size %uK",
1458 1U << (h->order + PAGE_SHIFT - 10));
1459 return mnt;
1462 static int __init init_hugetlbfs_fs(void)
1464 struct vfsmount *mnt;
1465 struct hstate *h;
1466 int error;
1467 int i;
1469 if (!hugepages_supported()) {
1470 pr_info("disabling because there are no supported hugepage sizes\n");
1471 return -ENOTSUPP;
1474 error = -ENOMEM;
1475 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1476 sizeof(struct hugetlbfs_inode_info),
1477 0, SLAB_ACCOUNT, init_once);
1478 if (hugetlbfs_inode_cachep == NULL)
1479 goto out;
1481 error = register_filesystem(&hugetlbfs_fs_type);
1482 if (error)
1483 goto out_free;
1485 /* default hstate mount is required */
1486 mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
1487 if (IS_ERR(mnt)) {
1488 error = PTR_ERR(mnt);
1489 goto out_unreg;
1491 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1493 /* other hstates are optional */
1494 i = 0;
1495 for_each_hstate(h) {
1496 if (i == default_hstate_idx) {
1497 i++;
1498 continue;
1501 mnt = mount_one_hugetlbfs(h);
1502 if (IS_ERR(mnt))
1503 hugetlbfs_vfsmount[i] = NULL;
1504 else
1505 hugetlbfs_vfsmount[i] = mnt;
1506 i++;
1509 return 0;
1511 out_unreg:
1512 (void)unregister_filesystem(&hugetlbfs_fs_type);
1513 out_free:
1514 kmem_cache_destroy(hugetlbfs_inode_cachep);
1515 out:
1516 return error;
1518 fs_initcall(init_hugetlbfs_fs)