2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/mount.h>
17 #include <linux/pfn_t.h>
18 #include <linux/hash.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/dax.h>
26 static dev_t dax_devt
;
27 DEFINE_STATIC_SRCU(dax_srcu
);
28 static struct class *dax_class
;
29 static DEFINE_IDA(dax_minor_ida
);
30 static int nr_dax
= CONFIG_NR_DEV_DAX
;
31 module_param(nr_dax
, int, S_IRUGO
);
32 static struct vfsmount
*dax_mnt
;
33 static struct kmem_cache
*dax_cache __read_mostly
;
34 static struct super_block
*dax_superblock __read_mostly
;
35 MODULE_PARM_DESC(nr_dax
, "max number of device-dax instances");
38 * struct dax_region - mapping infrastructure for dax devices
39 * @id: kernel-wide unique region for a memory range
40 * @base: linear address corresponding to @res
41 * @kref: to pin while other agents have a need to do lookups
42 * @dev: parent device backing this region
43 * @align: allocation and mapping alignment for child dax devices
44 * @res: physical address range of the region
45 * @pfn_flags: identify whether the pfns are paged back or not
55 unsigned long pfn_flags
;
59 * struct dax_dev - subdivision of a dax region
60 * @region - parent region
61 * @dev - device backing the character device
62 * @cdev - core chardev data
63 * @alive - !alive + srcu grace period == no new mappings can be established
64 * @id - child id in the region
65 * @num_resources - number of physical address extents in this device
66 * @res - array of physical address ranges
69 struct dax_region
*region
;
76 struct resource res
[0];
79 static struct inode
*dax_alloc_inode(struct super_block
*sb
)
81 return kmem_cache_alloc(dax_cache
, GFP_KERNEL
);
84 static void dax_i_callback(struct rcu_head
*head
)
86 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
88 kmem_cache_free(dax_cache
, inode
);
91 static void dax_destroy_inode(struct inode
*inode
)
93 call_rcu(&inode
->i_rcu
, dax_i_callback
);
96 static const struct super_operations dax_sops
= {
97 .statfs
= simple_statfs
,
98 .alloc_inode
= dax_alloc_inode
,
99 .destroy_inode
= dax_destroy_inode
,
100 .drop_inode
= generic_delete_inode
,
103 static struct dentry
*dax_mount(struct file_system_type
*fs_type
,
104 int flags
, const char *dev_name
, void *data
)
106 return mount_pseudo(fs_type
, "dax:", &dax_sops
, NULL
, DAXFS_MAGIC
);
109 static struct file_system_type dax_type
= {
112 .kill_sb
= kill_anon_super
,
115 static int dax_test(struct inode
*inode
, void *data
)
117 return inode
->i_cdev
== data
;
120 static int dax_set(struct inode
*inode
, void *data
)
122 inode
->i_cdev
= data
;
126 static struct inode
*dax_inode_get(struct cdev
*cdev
, dev_t devt
)
130 inode
= iget5_locked(dax_superblock
, hash_32(devt
+ DAXFS_MAGIC
, 31),
131 dax_test
, dax_set
, cdev
);
136 if (inode
->i_state
& I_NEW
) {
137 inode
->i_mode
= S_IFCHR
;
138 inode
->i_flags
= S_DAX
;
139 inode
->i_rdev
= devt
;
140 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
141 unlock_new_inode(inode
);
146 static void init_once(void *inode
)
148 inode_init_once(inode
);
151 static int dax_inode_init(void)
155 dax_cache
= kmem_cache_create("dax_cache", sizeof(struct inode
), 0,
156 (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
157 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
162 rc
= register_filesystem(&dax_type
);
164 goto err_register_fs
;
166 dax_mnt
= kern_mount(&dax_type
);
167 if (IS_ERR(dax_mnt
)) {
168 rc
= PTR_ERR(dax_mnt
);
171 dax_superblock
= dax_mnt
->mnt_sb
;
176 unregister_filesystem(&dax_type
);
178 kmem_cache_destroy(dax_cache
);
183 static void dax_inode_exit(void)
185 kern_unmount(dax_mnt
);
186 unregister_filesystem(&dax_type
);
187 kmem_cache_destroy(dax_cache
);
190 static void dax_region_free(struct kref
*kref
)
192 struct dax_region
*dax_region
;
194 dax_region
= container_of(kref
, struct dax_region
, kref
);
198 void dax_region_put(struct dax_region
*dax_region
)
200 kref_put(&dax_region
->kref
, dax_region_free
);
202 EXPORT_SYMBOL_GPL(dax_region_put
);
204 struct dax_region
*alloc_dax_region(struct device
*parent
, int region_id
,
205 struct resource
*res
, unsigned int align
, void *addr
,
206 unsigned long pfn_flags
)
208 struct dax_region
*dax_region
;
210 if (!IS_ALIGNED(res
->start
, align
)
211 || !IS_ALIGNED(resource_size(res
), align
))
214 dax_region
= kzalloc(sizeof(*dax_region
), GFP_KERNEL
);
218 memcpy(&dax_region
->res
, res
, sizeof(*res
));
219 dax_region
->pfn_flags
= pfn_flags
;
220 kref_init(&dax_region
->kref
);
221 dax_region
->id
= region_id
;
222 ida_init(&dax_region
->ida
);
223 dax_region
->align
= align
;
224 dax_region
->dev
= parent
;
225 dax_region
->base
= addr
;
229 EXPORT_SYMBOL_GPL(alloc_dax_region
);
231 static struct dax_dev
*to_dax_dev(struct device
*dev
)
233 return container_of(dev
, struct dax_dev
, dev
);
236 static ssize_t
size_show(struct device
*dev
,
237 struct device_attribute
*attr
, char *buf
)
239 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
240 unsigned long long size
= 0;
243 for (i
= 0; i
< dax_dev
->num_resources
; i
++)
244 size
+= resource_size(&dax_dev
->res
[i
]);
246 return sprintf(buf
, "%llu\n", size
);
248 static DEVICE_ATTR_RO(size
);
250 static struct attribute
*dax_device_attributes
[] = {
255 static const struct attribute_group dax_device_attribute_group
= {
256 .attrs
= dax_device_attributes
,
259 static const struct attribute_group
*dax_attribute_groups
[] = {
260 &dax_device_attribute_group
,
264 static int check_vma(struct dax_dev
*dax_dev
, struct vm_area_struct
*vma
,
267 struct dax_region
*dax_region
= dax_dev
->region
;
268 struct device
*dev
= &dax_dev
->dev
;
274 /* prevent private mappings from being established */
275 if ((vma
->vm_flags
& VM_MAYSHARE
) != VM_MAYSHARE
) {
276 dev_info(dev
, "%s: %s: fail, attempted private mapping\n",
277 current
->comm
, func
);
281 mask
= dax_region
->align
- 1;
282 if (vma
->vm_start
& mask
|| vma
->vm_end
& mask
) {
283 dev_info(dev
, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
284 current
->comm
, func
, vma
->vm_start
, vma
->vm_end
,
289 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) == PFN_DEV
290 && (vma
->vm_flags
& VM_DONTCOPY
) == 0) {
291 dev_info(dev
, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
292 current
->comm
, func
);
296 if (!vma_is_dax(vma
)) {
297 dev_info(dev
, "%s: %s: fail, vma is not DAX capable\n",
298 current
->comm
, func
);
305 static phys_addr_t
pgoff_to_phys(struct dax_dev
*dax_dev
, pgoff_t pgoff
,
308 struct resource
*res
;
312 for (i
= 0; i
< dax_dev
->num_resources
; i
++) {
313 res
= &dax_dev
->res
[i
];
314 phys
= pgoff
* PAGE_SIZE
+ res
->start
;
315 if (phys
>= res
->start
&& phys
<= res
->end
)
317 pgoff
-= PHYS_PFN(resource_size(res
));
320 if (i
< dax_dev
->num_resources
) {
321 res
= &dax_dev
->res
[i
];
322 if (phys
+ size
- 1 <= res
->end
)
329 static int __dax_dev_fault(struct dax_dev
*dax_dev
, struct vm_area_struct
*vma
,
330 struct vm_fault
*vmf
)
332 unsigned long vaddr
= (unsigned long) vmf
->virtual_address
;
333 struct device
*dev
= &dax_dev
->dev
;
334 struct dax_region
*dax_region
;
335 int rc
= VM_FAULT_SIGBUS
;
338 unsigned int fault_size
= PAGE_SIZE
;
340 if (check_vma(dax_dev
, vma
, __func__
))
341 return VM_FAULT_SIGBUS
;
343 dax_region
= dax_dev
->region
;
344 if (dax_region
->align
> PAGE_SIZE
) {
345 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
346 return VM_FAULT_SIGBUS
;
349 if (fault_size
!= dax_region
->align
)
350 return VM_FAULT_SIGBUS
;
352 phys
= pgoff_to_phys(dax_dev
, vmf
->pgoff
, PAGE_SIZE
);
354 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
356 return VM_FAULT_SIGBUS
;
359 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
361 rc
= vm_insert_mixed(vma
, vaddr
, pfn
);
365 if (rc
< 0 && rc
!= -EBUSY
)
366 return VM_FAULT_SIGBUS
;
368 return VM_FAULT_NOPAGE
;
371 static int dax_dev_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
374 struct file
*filp
= vma
->vm_file
;
375 struct dax_dev
*dax_dev
= filp
->private_data
;
377 dev_dbg(&dax_dev
->dev
, "%s: %s: %s (%#lx - %#lx)\n", __func__
,
378 current
->comm
, (vmf
->flags
& FAULT_FLAG_WRITE
)
379 ? "write" : "read", vma
->vm_start
, vma
->vm_end
);
381 rc
= __dax_dev_fault(dax_dev
, vma
, vmf
);
387 static int __dax_dev_pmd_fault(struct dax_dev
*dax_dev
,
388 struct vm_area_struct
*vma
, unsigned long addr
, pmd_t
*pmd
,
391 unsigned long pmd_addr
= addr
& PMD_MASK
;
392 struct device
*dev
= &dax_dev
->dev
;
393 struct dax_region
*dax_region
;
397 unsigned int fault_size
= PMD_SIZE
;
399 if (check_vma(dax_dev
, vma
, __func__
))
400 return VM_FAULT_SIGBUS
;
402 dax_region
= dax_dev
->region
;
403 if (dax_region
->align
> PMD_SIZE
) {
404 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
405 return VM_FAULT_SIGBUS
;
408 /* dax pmd mappings require pfn_t_devmap() */
409 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) != (PFN_DEV
|PFN_MAP
)) {
410 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
411 return VM_FAULT_SIGBUS
;
414 if (fault_size
< dax_region
->align
)
415 return VM_FAULT_SIGBUS
;
416 else if (fault_size
> dax_region
->align
)
417 return VM_FAULT_FALLBACK
;
419 /* if we are outside of the VMA */
420 if (pmd_addr
< vma
->vm_start
||
421 (pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
422 return VM_FAULT_SIGBUS
;
424 pgoff
= linear_page_index(vma
, pmd_addr
);
425 phys
= pgoff_to_phys(dax_dev
, pgoff
, PMD_SIZE
);
427 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
429 return VM_FAULT_SIGBUS
;
432 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
434 return vmf_insert_pfn_pmd(vma
, addr
, pmd
, pfn
,
435 flags
& FAULT_FLAG_WRITE
);
438 static int dax_dev_pmd_fault(struct vm_area_struct
*vma
, unsigned long addr
,
439 pmd_t
*pmd
, unsigned int flags
)
442 struct file
*filp
= vma
->vm_file
;
443 struct dax_dev
*dax_dev
= filp
->private_data
;
445 dev_dbg(&dax_dev
->dev
, "%s: %s: %s (%#lx - %#lx)\n", __func__
,
446 current
->comm
, (flags
& FAULT_FLAG_WRITE
)
447 ? "write" : "read", vma
->vm_start
, vma
->vm_end
);
449 id
= srcu_read_lock(&dax_srcu
);
450 rc
= __dax_dev_pmd_fault(dax_dev
, vma
, addr
, pmd
, flags
);
451 srcu_read_unlock(&dax_srcu
, id
);
456 static int dax_dev_split(struct vm_area_struct
*vma
, unsigned long addr
)
458 struct file
*filp
= vma
->vm_file
;
459 struct dax_dev
*dax_dev
= filp
->private_data
;
460 struct dax_region
*dax_region
= dax_dev
->region
;
462 if (!IS_ALIGNED(addr
, dax_region
->align
))
467 static const struct vm_operations_struct dax_dev_vm_ops
= {
468 .fault
= dax_dev_fault
,
469 .pmd_fault
= dax_dev_pmd_fault
,
470 .split
= dax_dev_split
,
473 static int dax_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
475 struct dax_dev
*dax_dev
= filp
->private_data
;
478 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
480 rc
= check_vma(dax_dev
, vma
, __func__
);
484 vma
->vm_ops
= &dax_dev_vm_ops
;
485 vma
->vm_flags
|= VM_MIXEDMAP
| VM_HUGEPAGE
;
489 /* return an unmapped area aligned to the dax region specified alignment */
490 static unsigned long dax_get_unmapped_area(struct file
*filp
,
491 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
494 unsigned long off
, off_end
, off_align
, len_align
, addr_align
, align
;
495 struct dax_dev
*dax_dev
= filp
? filp
->private_data
: NULL
;
496 struct dax_region
*dax_region
;
498 if (!dax_dev
|| addr
)
501 dax_region
= dax_dev
->region
;
502 align
= dax_region
->align
;
503 off
= pgoff
<< PAGE_SHIFT
;
505 off_align
= round_up(off
, align
);
507 if ((off_end
<= off_align
) || ((off_end
- off_align
) < align
))
510 len_align
= len
+ align
;
511 if ((off
+ len_align
) < off
)
514 addr_align
= current
->mm
->get_unmapped_area(filp
, addr
, len_align
,
516 if (!IS_ERR_VALUE(addr_align
)) {
517 addr_align
+= (off
- addr_align
) & (align
- 1);
521 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
524 static int dax_open(struct inode
*inode
, struct file
*filp
)
526 struct dax_dev
*dax_dev
;
528 dax_dev
= container_of(inode
->i_cdev
, struct dax_dev
, cdev
);
529 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
530 inode
->i_mapping
= dax_dev
->inode
->i_mapping
;
531 inode
->i_mapping
->host
= dax_dev
->inode
;
532 filp
->f_mapping
= inode
->i_mapping
;
533 filp
->private_data
= dax_dev
;
534 inode
->i_flags
= S_DAX
;
539 static int dax_release(struct inode
*inode
, struct file
*filp
)
541 struct dax_dev
*dax_dev
= filp
->private_data
;
543 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
547 static const struct file_operations dax_fops
= {
548 .llseek
= noop_llseek
,
549 .owner
= THIS_MODULE
,
551 .release
= dax_release
,
552 .get_unmapped_area
= dax_get_unmapped_area
,
556 static void dax_dev_release(struct device
*dev
)
558 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
559 struct dax_region
*dax_region
= dax_dev
->region
;
561 if (dax_dev
->id
>= 0)
562 ida_simple_remove(&dax_region
->ida
, dax_dev
->id
);
563 ida_simple_remove(&dax_minor_ida
, MINOR(dev
->devt
));
564 dax_region_put(dax_region
);
565 iput(dax_dev
->inode
);
569 static void kill_dax_dev(struct dax_dev
*dax_dev
)
571 struct cdev
*cdev
= &dax_dev
->cdev
;
574 * Note, rcu is not protecting the liveness of dax_dev, rcu is
575 * ensuring that any fault handlers that might have seen
576 * dax_dev->alive == true, have completed. Any fault handlers
577 * that start after synchronize_srcu() has started will abort
578 * upon seeing dax_dev->alive == false.
580 dax_dev
->alive
= false;
581 synchronize_srcu(&dax_srcu
);
582 unmap_mapping_range(dax_dev
->inode
->i_mapping
, 0, 0, 1);
586 static void unregister_dax_dev(void *dev
)
588 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
590 dev_dbg(dev
, "%s\n", __func__
);
592 kill_dax_dev(dax_dev
);
593 device_unregister(dev
);
596 struct dax_dev
*devm_create_dax_dev(struct dax_region
*dax_region
,
597 int id
, struct resource
*res
, int count
)
599 struct device
*parent
= dax_region
->dev
;
600 struct dax_dev
*dax_dev
;
601 int rc
= 0, minor
, i
;
606 dax_dev
= kzalloc(sizeof(*dax_dev
) + sizeof(*res
) * count
, GFP_KERNEL
);
608 return ERR_PTR(-ENOMEM
);
610 for (i
= 0; i
< count
; i
++) {
611 if (!IS_ALIGNED(res
[i
].start
, dax_region
->align
)
612 || !IS_ALIGNED(resource_size(&res
[i
]),
613 dax_region
->align
)) {
617 dax_dev
->res
[i
].start
= res
[i
].start
;
618 dax_dev
->res
[i
].end
= res
[i
].end
;
625 id
= ida_simple_get(&dax_region
->ida
, 0, 0, GFP_KERNEL
);
632 /* region provider owns @id lifetime */
636 minor
= ida_simple_get(&dax_minor_ida
, 0, 0, GFP_KERNEL
);
642 dev_t
= MKDEV(MAJOR(dax_devt
), minor
);
644 dax_dev
->inode
= dax_inode_get(&dax_dev
->cdev
, dev_t
);
645 if (!dax_dev
->inode
) {
650 /* device_initialize() so cdev can reference kobj parent */
651 device_initialize(dev
);
653 cdev
= &dax_dev
->cdev
;
654 cdev_init(cdev
, &dax_fops
);
655 cdev
->owner
= parent
->driver
->owner
;
656 cdev
->kobj
.parent
= &dev
->kobj
;
657 rc
= cdev_add(&dax_dev
->cdev
, dev_t
, 1);
661 /* from here on we're committed to teardown via dax_dev_release() */
662 dax_dev
->num_resources
= count
;
663 dax_dev
->alive
= true;
664 dax_dev
->region
= dax_region
;
665 kref_get(&dax_region
->kref
);
668 dev
->class = dax_class
;
669 dev
->parent
= parent
;
670 dev
->groups
= dax_attribute_groups
;
671 dev
->release
= dax_dev_release
;
672 dev_set_name(dev
, "dax%d.%d", dax_region
->id
, id
);
673 rc
= device_add(dev
);
675 kill_dax_dev(dax_dev
);
680 rc
= devm_add_action_or_reset(dax_region
->dev
, unregister_dax_dev
, dev
);
687 iput(dax_dev
->inode
);
689 ida_simple_remove(&dax_minor_ida
, minor
);
691 if (dax_dev
->id
>= 0)
692 ida_simple_remove(&dax_region
->ida
, dax_dev
->id
);
698 EXPORT_SYMBOL_GPL(devm_create_dax_dev
);
700 static int __init
dax_init(void)
704 rc
= dax_inode_init();
708 nr_dax
= max(nr_dax
, 256);
709 rc
= alloc_chrdev_region(&dax_devt
, 0, nr_dax
, "dax");
713 dax_class
= class_create(THIS_MODULE
, "dax");
714 if (IS_ERR(dax_class
)) {
715 rc
= PTR_ERR(dax_class
);
722 unregister_chrdev_region(dax_devt
, nr_dax
);
728 static void __exit
dax_exit(void)
730 class_destroy(dax_class
);
731 unregister_chrdev_region(dax_devt
, nr_dax
);
732 ida_destroy(&dax_minor_ida
);
736 MODULE_AUTHOR("Intel Corporation");
737 MODULE_LICENSE("GPL v2");
738 subsys_initcall(dax_init
);
739 module_exit(dax_exit
);