inet: frag: enforce memory limits earlier
[linux/fpc-iii.git] / drivers / dax / dax.c
blob473b44c008dd3c1a5997f2b6d348bb50aa5b1a33
1 /*
2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/mount.h>
17 #include <linux/pfn_t.h>
18 #include <linux/hash.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/dax.h>
22 #include <linux/fs.h>
23 #include <linux/mm.h>
24 #include "dax.h"
26 static dev_t dax_devt;
27 DEFINE_STATIC_SRCU(dax_srcu);
28 static struct class *dax_class;
29 static DEFINE_IDA(dax_minor_ida);
30 static int nr_dax = CONFIG_NR_DEV_DAX;
31 module_param(nr_dax, int, S_IRUGO);
32 static struct vfsmount *dax_mnt;
33 static struct kmem_cache *dax_cache __read_mostly;
34 static struct super_block *dax_superblock __read_mostly;
35 MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
37 /**
38 * struct dax_region - mapping infrastructure for dax devices
39 * @id: kernel-wide unique region for a memory range
40 * @base: linear address corresponding to @res
41 * @kref: to pin while other agents have a need to do lookups
42 * @dev: parent device backing this region
43 * @align: allocation and mapping alignment for child dax devices
44 * @res: physical address range of the region
45 * @pfn_flags: identify whether the pfns are paged back or not
47 struct dax_region {
48 int id;
49 struct ida ida;
50 void *base;
51 struct kref kref;
52 struct device *dev;
53 unsigned int align;
54 struct resource res;
55 unsigned long pfn_flags;
58 /**
59 * struct dax_dev - subdivision of a dax region
60 * @region - parent region
61 * @dev - device backing the character device
62 * @cdev - core chardev data
63 * @alive - !alive + srcu grace period == no new mappings can be established
64 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges
68 struct dax_dev {
69 struct dax_region *region;
70 struct inode *inode;
71 struct device dev;
72 struct cdev cdev;
73 bool alive;
74 int id;
75 int num_resources;
76 struct resource res[0];
79 static struct inode *dax_alloc_inode(struct super_block *sb)
81 return kmem_cache_alloc(dax_cache, GFP_KERNEL);
84 static void dax_i_callback(struct rcu_head *head)
86 struct inode *inode = container_of(head, struct inode, i_rcu);
88 kmem_cache_free(dax_cache, inode);
91 static void dax_destroy_inode(struct inode *inode)
93 call_rcu(&inode->i_rcu, dax_i_callback);
96 static const struct super_operations dax_sops = {
97 .statfs = simple_statfs,
98 .alloc_inode = dax_alloc_inode,
99 .destroy_inode = dax_destroy_inode,
100 .drop_inode = generic_delete_inode,
103 static struct dentry *dax_mount(struct file_system_type *fs_type,
104 int flags, const char *dev_name, void *data)
106 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
109 static struct file_system_type dax_type = {
110 .name = "dax",
111 .mount = dax_mount,
112 .kill_sb = kill_anon_super,
115 static int dax_test(struct inode *inode, void *data)
117 return inode->i_cdev == data;
120 static int dax_set(struct inode *inode, void *data)
122 inode->i_cdev = data;
123 return 0;
126 static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
128 struct inode *inode;
130 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
131 dax_test, dax_set, cdev);
133 if (!inode)
134 return NULL;
136 if (inode->i_state & I_NEW) {
137 inode->i_mode = S_IFCHR;
138 inode->i_flags = S_DAX;
139 inode->i_rdev = devt;
140 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
141 unlock_new_inode(inode);
143 return inode;
146 static void init_once(void *inode)
148 inode_init_once(inode);
151 static int dax_inode_init(void)
153 int rc;
155 dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
156 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
157 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
158 init_once);
159 if (!dax_cache)
160 return -ENOMEM;
162 rc = register_filesystem(&dax_type);
163 if (rc)
164 goto err_register_fs;
166 dax_mnt = kern_mount(&dax_type);
167 if (IS_ERR(dax_mnt)) {
168 rc = PTR_ERR(dax_mnt);
169 goto err_mount;
171 dax_superblock = dax_mnt->mnt_sb;
173 return 0;
175 err_mount:
176 unregister_filesystem(&dax_type);
177 err_register_fs:
178 kmem_cache_destroy(dax_cache);
180 return rc;
183 static void dax_inode_exit(void)
185 kern_unmount(dax_mnt);
186 unregister_filesystem(&dax_type);
187 kmem_cache_destroy(dax_cache);
190 static void dax_region_free(struct kref *kref)
192 struct dax_region *dax_region;
194 dax_region = container_of(kref, struct dax_region, kref);
195 kfree(dax_region);
198 void dax_region_put(struct dax_region *dax_region)
200 kref_put(&dax_region->kref, dax_region_free);
202 EXPORT_SYMBOL_GPL(dax_region_put);
204 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
205 struct resource *res, unsigned int align, void *addr,
206 unsigned long pfn_flags)
208 struct dax_region *dax_region;
210 if (!IS_ALIGNED(res->start, align)
211 || !IS_ALIGNED(resource_size(res), align))
212 return NULL;
214 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
215 if (!dax_region)
216 return NULL;
218 memcpy(&dax_region->res, res, sizeof(*res));
219 dax_region->pfn_flags = pfn_flags;
220 kref_init(&dax_region->kref);
221 dax_region->id = region_id;
222 ida_init(&dax_region->ida);
223 dax_region->align = align;
224 dax_region->dev = parent;
225 dax_region->base = addr;
227 return dax_region;
229 EXPORT_SYMBOL_GPL(alloc_dax_region);
231 static struct dax_dev *to_dax_dev(struct device *dev)
233 return container_of(dev, struct dax_dev, dev);
236 static ssize_t size_show(struct device *dev,
237 struct device_attribute *attr, char *buf)
239 struct dax_dev *dax_dev = to_dax_dev(dev);
240 unsigned long long size = 0;
241 int i;
243 for (i = 0; i < dax_dev->num_resources; i++)
244 size += resource_size(&dax_dev->res[i]);
246 return sprintf(buf, "%llu\n", size);
248 static DEVICE_ATTR_RO(size);
250 static struct attribute *dax_device_attributes[] = {
251 &dev_attr_size.attr,
252 NULL,
255 static const struct attribute_group dax_device_attribute_group = {
256 .attrs = dax_device_attributes,
259 static const struct attribute_group *dax_attribute_groups[] = {
260 &dax_device_attribute_group,
261 NULL,
264 static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
265 const char *func)
267 struct dax_region *dax_region = dax_dev->region;
268 struct device *dev = &dax_dev->dev;
269 unsigned long mask;
271 if (!dax_dev->alive)
272 return -ENXIO;
274 /* prevent private mappings from being established */
275 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
276 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
277 current->comm, func);
278 return -EINVAL;
281 mask = dax_region->align - 1;
282 if (vma->vm_start & mask || vma->vm_end & mask) {
283 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
284 current->comm, func, vma->vm_start, vma->vm_end,
285 mask);
286 return -EINVAL;
289 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
290 && (vma->vm_flags & VM_DONTCOPY) == 0) {
291 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
292 current->comm, func);
293 return -EINVAL;
296 if (!vma_is_dax(vma)) {
297 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
298 current->comm, func);
299 return -EINVAL;
302 return 0;
305 static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
306 unsigned long size)
308 struct resource *res;
309 phys_addr_t phys;
310 int i;
312 for (i = 0; i < dax_dev->num_resources; i++) {
313 res = &dax_dev->res[i];
314 phys = pgoff * PAGE_SIZE + res->start;
315 if (phys >= res->start && phys <= res->end)
316 break;
317 pgoff -= PHYS_PFN(resource_size(res));
320 if (i < dax_dev->num_resources) {
321 res = &dax_dev->res[i];
322 if (phys + size - 1 <= res->end)
323 return phys;
326 return -1;
329 static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
330 struct vm_fault *vmf)
332 unsigned long vaddr = (unsigned long) vmf->virtual_address;
333 struct device *dev = &dax_dev->dev;
334 struct dax_region *dax_region;
335 int rc = VM_FAULT_SIGBUS;
336 phys_addr_t phys;
337 pfn_t pfn;
338 unsigned int fault_size = PAGE_SIZE;
340 if (check_vma(dax_dev, vma, __func__))
341 return VM_FAULT_SIGBUS;
343 dax_region = dax_dev->region;
344 if (dax_region->align > PAGE_SIZE) {
345 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
346 return VM_FAULT_SIGBUS;
349 if (fault_size != dax_region->align)
350 return VM_FAULT_SIGBUS;
352 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
353 if (phys == -1) {
354 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
355 vmf->pgoff);
356 return VM_FAULT_SIGBUS;
359 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
361 rc = vm_insert_mixed(vma, vaddr, pfn);
363 if (rc == -ENOMEM)
364 return VM_FAULT_OOM;
365 if (rc < 0 && rc != -EBUSY)
366 return VM_FAULT_SIGBUS;
368 return VM_FAULT_NOPAGE;
371 static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
373 int rc;
374 struct file *filp = vma->vm_file;
375 struct dax_dev *dax_dev = filp->private_data;
377 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
378 current->comm, (vmf->flags & FAULT_FLAG_WRITE)
379 ? "write" : "read", vma->vm_start, vma->vm_end);
380 rcu_read_lock();
381 rc = __dax_dev_fault(dax_dev, vma, vmf);
382 rcu_read_unlock();
384 return rc;
387 static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
388 struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
389 unsigned int flags)
391 unsigned long pmd_addr = addr & PMD_MASK;
392 struct device *dev = &dax_dev->dev;
393 struct dax_region *dax_region;
394 phys_addr_t phys;
395 pgoff_t pgoff;
396 pfn_t pfn;
397 unsigned int fault_size = PMD_SIZE;
399 if (check_vma(dax_dev, vma, __func__))
400 return VM_FAULT_SIGBUS;
402 dax_region = dax_dev->region;
403 if (dax_region->align > PMD_SIZE) {
404 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
405 return VM_FAULT_SIGBUS;
408 /* dax pmd mappings require pfn_t_devmap() */
409 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
410 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
411 return VM_FAULT_SIGBUS;
414 if (fault_size < dax_region->align)
415 return VM_FAULT_SIGBUS;
416 else if (fault_size > dax_region->align)
417 return VM_FAULT_FALLBACK;
419 /* if we are outside of the VMA */
420 if (pmd_addr < vma->vm_start ||
421 (pmd_addr + PMD_SIZE) > vma->vm_end)
422 return VM_FAULT_SIGBUS;
424 pgoff = linear_page_index(vma, pmd_addr);
425 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
426 if (phys == -1) {
427 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
428 pgoff);
429 return VM_FAULT_SIGBUS;
432 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
434 return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
435 flags & FAULT_FLAG_WRITE);
438 static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
439 pmd_t *pmd, unsigned int flags)
441 int rc, id;
442 struct file *filp = vma->vm_file;
443 struct dax_dev *dax_dev = filp->private_data;
445 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
446 current->comm, (flags & FAULT_FLAG_WRITE)
447 ? "write" : "read", vma->vm_start, vma->vm_end);
449 id = srcu_read_lock(&dax_srcu);
450 rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
451 srcu_read_unlock(&dax_srcu, id);
453 return rc;
456 static int dax_dev_split(struct vm_area_struct *vma, unsigned long addr)
458 struct file *filp = vma->vm_file;
459 struct dax_dev *dax_dev = filp->private_data;
460 struct dax_region *dax_region = dax_dev->region;
462 if (!IS_ALIGNED(addr, dax_region->align))
463 return -EINVAL;
464 return 0;
467 static const struct vm_operations_struct dax_dev_vm_ops = {
468 .fault = dax_dev_fault,
469 .pmd_fault = dax_dev_pmd_fault,
470 .split = dax_dev_split,
473 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
475 struct dax_dev *dax_dev = filp->private_data;
476 int rc;
478 dev_dbg(&dax_dev->dev, "%s\n", __func__);
480 rc = check_vma(dax_dev, vma, __func__);
481 if (rc)
482 return rc;
484 vma->vm_ops = &dax_dev_vm_ops;
485 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
486 return 0;
489 /* return an unmapped area aligned to the dax region specified alignment */
490 static unsigned long dax_get_unmapped_area(struct file *filp,
491 unsigned long addr, unsigned long len, unsigned long pgoff,
492 unsigned long flags)
494 unsigned long off, off_end, off_align, len_align, addr_align, align;
495 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
496 struct dax_region *dax_region;
498 if (!dax_dev || addr)
499 goto out;
501 dax_region = dax_dev->region;
502 align = dax_region->align;
503 off = pgoff << PAGE_SHIFT;
504 off_end = off + len;
505 off_align = round_up(off, align);
507 if ((off_end <= off_align) || ((off_end - off_align) < align))
508 goto out;
510 len_align = len + align;
511 if ((off + len_align) < off)
512 goto out;
514 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
515 pgoff, flags);
516 if (!IS_ERR_VALUE(addr_align)) {
517 addr_align += (off - addr_align) & (align - 1);
518 return addr_align;
520 out:
521 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
524 static int dax_open(struct inode *inode, struct file *filp)
526 struct dax_dev *dax_dev;
528 dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
529 dev_dbg(&dax_dev->dev, "%s\n", __func__);
530 inode->i_mapping = dax_dev->inode->i_mapping;
531 inode->i_mapping->host = dax_dev->inode;
532 filp->f_mapping = inode->i_mapping;
533 filp->private_data = dax_dev;
534 inode->i_flags = S_DAX;
536 return 0;
539 static int dax_release(struct inode *inode, struct file *filp)
541 struct dax_dev *dax_dev = filp->private_data;
543 dev_dbg(&dax_dev->dev, "%s\n", __func__);
544 return 0;
547 static const struct file_operations dax_fops = {
548 .llseek = noop_llseek,
549 .owner = THIS_MODULE,
550 .open = dax_open,
551 .release = dax_release,
552 .get_unmapped_area = dax_get_unmapped_area,
553 .mmap = dax_mmap,
556 static void dax_dev_release(struct device *dev)
558 struct dax_dev *dax_dev = to_dax_dev(dev);
559 struct dax_region *dax_region = dax_dev->region;
561 if (dax_dev->id >= 0)
562 ida_simple_remove(&dax_region->ida, dax_dev->id);
563 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
564 dax_region_put(dax_region);
565 iput(dax_dev->inode);
566 kfree(dax_dev);
569 static void kill_dax_dev(struct dax_dev *dax_dev)
571 struct cdev *cdev = &dax_dev->cdev;
574 * Note, rcu is not protecting the liveness of dax_dev, rcu is
575 * ensuring that any fault handlers that might have seen
576 * dax_dev->alive == true, have completed. Any fault handlers
577 * that start after synchronize_srcu() has started will abort
578 * upon seeing dax_dev->alive == false.
580 dax_dev->alive = false;
581 synchronize_srcu(&dax_srcu);
582 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
583 cdev_del(cdev);
586 static void unregister_dax_dev(void *dev)
588 struct dax_dev *dax_dev = to_dax_dev(dev);
590 dev_dbg(dev, "%s\n", __func__);
592 kill_dax_dev(dax_dev);
593 device_unregister(dev);
596 struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
597 int id, struct resource *res, int count)
599 struct device *parent = dax_region->dev;
600 struct dax_dev *dax_dev;
601 int rc = 0, minor, i;
602 struct device *dev;
603 struct cdev *cdev;
604 dev_t dev_t;
606 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
607 if (!dax_dev)
608 return ERR_PTR(-ENOMEM);
610 for (i = 0; i < count; i++) {
611 if (!IS_ALIGNED(res[i].start, dax_region->align)
612 || !IS_ALIGNED(resource_size(&res[i]),
613 dax_region->align)) {
614 rc = -EINVAL;
615 break;
617 dax_dev->res[i].start = res[i].start;
618 dax_dev->res[i].end = res[i].end;
621 if (i < count)
622 goto err_id;
624 if (id < 0) {
625 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
626 dax_dev->id = id;
627 if (id < 0) {
628 rc = id;
629 goto err_id;
631 } else {
632 /* region provider owns @id lifetime */
633 dax_dev->id = -1;
636 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
637 if (minor < 0) {
638 rc = minor;
639 goto err_minor;
642 dev_t = MKDEV(MAJOR(dax_devt), minor);
643 dev = &dax_dev->dev;
644 dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
645 if (!dax_dev->inode) {
646 rc = -ENOMEM;
647 goto err_inode;
650 /* device_initialize() so cdev can reference kobj parent */
651 device_initialize(dev);
653 cdev = &dax_dev->cdev;
654 cdev_init(cdev, &dax_fops);
655 cdev->owner = parent->driver->owner;
656 cdev->kobj.parent = &dev->kobj;
657 rc = cdev_add(&dax_dev->cdev, dev_t, 1);
658 if (rc)
659 goto err_cdev;
661 /* from here on we're committed to teardown via dax_dev_release() */
662 dax_dev->num_resources = count;
663 dax_dev->alive = true;
664 dax_dev->region = dax_region;
665 kref_get(&dax_region->kref);
667 dev->devt = dev_t;
668 dev->class = dax_class;
669 dev->parent = parent;
670 dev->groups = dax_attribute_groups;
671 dev->release = dax_dev_release;
672 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
673 rc = device_add(dev);
674 if (rc) {
675 kill_dax_dev(dax_dev);
676 put_device(dev);
677 return ERR_PTR(rc);
680 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
681 if (rc)
682 return ERR_PTR(rc);
684 return dax_dev;
686 err_cdev:
687 iput(dax_dev->inode);
688 err_inode:
689 ida_simple_remove(&dax_minor_ida, minor);
690 err_minor:
691 if (dax_dev->id >= 0)
692 ida_simple_remove(&dax_region->ida, dax_dev->id);
693 err_id:
694 kfree(dax_dev);
696 return ERR_PTR(rc);
698 EXPORT_SYMBOL_GPL(devm_create_dax_dev);
700 static int __init dax_init(void)
702 int rc;
704 rc = dax_inode_init();
705 if (rc)
706 return rc;
708 nr_dax = max(nr_dax, 256);
709 rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
710 if (rc)
711 goto err_chrdev;
713 dax_class = class_create(THIS_MODULE, "dax");
714 if (IS_ERR(dax_class)) {
715 rc = PTR_ERR(dax_class);
716 goto err_class;
719 return 0;
721 err_class:
722 unregister_chrdev_region(dax_devt, nr_dax);
723 err_chrdev:
724 dax_inode_exit();
725 return rc;
728 static void __exit dax_exit(void)
730 class_destroy(dax_class);
731 unregister_chrdev_region(dax_devt, nr_dax);
732 ida_destroy(&dax_minor_ida);
733 dax_inode_exit();
736 MODULE_AUTHOR("Intel Corporation");
737 MODULE_LICENSE("GPL v2");
738 subsys_initcall(dax_init);
739 module_exit(dax_exit);