1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
5 #include <linux/pagemap.h>
6 #include <linux/module.h>
7 #include <linux/mount.h>
8 #include <linux/pseudo_fs.h>
9 #include <linux/magic.h>
10 #include <linux/genhd.h>
11 #include <linux/pfn_t.h>
12 #include <linux/cdev.h>
13 #include <linux/hash.h>
14 #include <linux/slab.h>
15 #include <linux/uio.h>
16 #include <linux/dax.h>
18 #include "dax-private.h"
20 static dev_t dax_devt
;
21 DEFINE_STATIC_SRCU(dax_srcu
);
22 static struct vfsmount
*dax_mnt
;
23 static DEFINE_IDA(dax_minor_ida
);
24 static struct kmem_cache
*dax_cache __read_mostly
;
25 static struct super_block
*dax_superblock __read_mostly
;
27 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
28 static struct hlist_head dax_host_list
[DAX_HASH_SIZE
];
29 static DEFINE_SPINLOCK(dax_host_lock
);
31 int dax_read_lock(void)
33 return srcu_read_lock(&dax_srcu
);
35 EXPORT_SYMBOL_GPL(dax_read_lock
);
37 void dax_read_unlock(int id
)
39 srcu_read_unlock(&dax_srcu
, id
);
41 EXPORT_SYMBOL_GPL(dax_read_unlock
);
44 #include <linux/blkdev.h>
46 int bdev_dax_pgoff(struct block_device
*bdev
, sector_t sector
, size_t size
,
49 phys_addr_t phys_off
= (get_start_sect(bdev
) + sector
) * 512;
52 *pgoff
= PHYS_PFN(phys_off
);
53 if (phys_off
% PAGE_SIZE
|| size
% PAGE_SIZE
)
57 EXPORT_SYMBOL(bdev_dax_pgoff
);
59 #if IS_ENABLED(CONFIG_FS_DAX)
60 struct dax_device
*fs_dax_get_by_bdev(struct block_device
*bdev
)
62 if (!blk_queue_dax(bdev
->bd_disk
->queue
))
64 return dax_get_by_host(bdev
->bd_disk
->disk_name
);
66 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev
);
69 bool __generic_fsdax_supported(struct dax_device
*dax_dev
,
70 struct block_device
*bdev
, int blocksize
, sector_t start
,
73 bool dax_enabled
= false;
74 pgoff_t pgoff
, pgoff_end
;
75 char buf
[BDEVNAME_SIZE
];
76 void *kaddr
, *end_kaddr
;
82 if (blocksize
!= PAGE_SIZE
) {
83 pr_info("%s: error: unsupported blocksize for dax\n",
89 pr_debug("%s: error: dax unsupported by block device\n",
94 err
= bdev_dax_pgoff(bdev
, start
, PAGE_SIZE
, &pgoff
);
96 pr_info("%s: error: unaligned partition for dax\n",
101 last_page
= PFN_DOWN((start
+ sectors
- 1) * 512) * PAGE_SIZE
/ 512;
102 err
= bdev_dax_pgoff(bdev
, last_page
, PAGE_SIZE
, &pgoff_end
);
104 pr_info("%s: error: unaligned partition for dax\n",
105 bdevname(bdev
, buf
));
109 id
= dax_read_lock();
110 len
= dax_direct_access(dax_dev
, pgoff
, 1, &kaddr
, &pfn
);
111 len2
= dax_direct_access(dax_dev
, pgoff_end
, 1, &end_kaddr
, &end_pfn
);
113 if (len
< 1 || len2
< 1) {
114 pr_info("%s: error: dax access failed (%ld)\n",
115 bdevname(bdev
, buf
), len
< 1 ? len
: len2
);
120 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
) && pfn_t_special(pfn
)) {
122 * An arch that has enabled the pmem api should also
123 * have its drivers support pfn_t_devmap()
125 * This is a developer warning and should not trigger in
126 * production. dax_flush() will crash since it depends
127 * on being able to do (page_address(pfn_to_page())).
129 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API
));
131 } else if (pfn_t_devmap(pfn
) && pfn_t_devmap(end_pfn
)) {
132 struct dev_pagemap
*pgmap
, *end_pgmap
;
134 pgmap
= get_dev_pagemap(pfn_t_to_pfn(pfn
), NULL
);
135 end_pgmap
= get_dev_pagemap(pfn_t_to_pfn(end_pfn
), NULL
);
136 if (pgmap
&& pgmap
== end_pgmap
&& pgmap
->type
== MEMORY_DEVICE_FS_DAX
137 && pfn_t_to_page(pfn
)->pgmap
== pgmap
138 && pfn_t_to_page(end_pfn
)->pgmap
== pgmap
139 && pfn_t_to_pfn(pfn
) == PHYS_PFN(__pa(kaddr
))
140 && pfn_t_to_pfn(end_pfn
) == PHYS_PFN(__pa(end_kaddr
)))
142 put_dev_pagemap(pgmap
);
143 put_dev_pagemap(end_pgmap
);
149 pr_info("%s: error: dax support not enabled\n",
150 bdevname(bdev
, buf
));
155 EXPORT_SYMBOL_GPL(__generic_fsdax_supported
);
158 * __bdev_dax_supported() - Check if the device supports dax for filesystem
159 * @bdev: block device to check
160 * @blocksize: The block size of the device
162 * This is a library function for filesystems to check if the block device
163 * can be mounted with dax option.
165 * Return: true if supported, false if unsupported
167 bool __bdev_dax_supported(struct block_device
*bdev
, int blocksize
)
169 struct dax_device
*dax_dev
;
170 struct request_queue
*q
;
171 char buf
[BDEVNAME_SIZE
];
175 q
= bdev_get_queue(bdev
);
176 if (!q
|| !blk_queue_dax(q
)) {
177 pr_debug("%s: error: request queue doesn't support dax\n",
178 bdevname(bdev
, buf
));
182 dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
184 pr_debug("%s: error: device does not support dax\n",
185 bdevname(bdev
, buf
));
189 id
= dax_read_lock();
190 ret
= dax_supported(dax_dev
, bdev
, blocksize
, 0,
191 i_size_read(bdev
->bd_inode
) / 512);
198 EXPORT_SYMBOL_GPL(__bdev_dax_supported
);
201 enum dax_device_flags
{
202 /* !alive + rcu grace period == no new operations / mappings */
204 /* gate whether dax_flush() calls the low level flush routine */
206 /* flag to check if device supports synchronous flush */
211 * struct dax_device - anchor object for dax services
213 * @cdev: optional character interface for "device dax"
214 * @host: optional name for lookups where the device path is not available
215 * @private: dax driver private data
216 * @flags: state and boolean properties
219 struct hlist_node list
;
225 const struct dax_operations
*ops
;
228 static ssize_t
write_cache_show(struct device
*dev
,
229 struct device_attribute
*attr
, char *buf
)
231 struct dax_device
*dax_dev
= dax_get_by_host(dev_name(dev
));
234 WARN_ON_ONCE(!dax_dev
);
238 rc
= sprintf(buf
, "%d\n", !!dax_write_cache_enabled(dax_dev
));
243 static ssize_t
write_cache_store(struct device
*dev
,
244 struct device_attribute
*attr
, const char *buf
, size_t len
)
247 int rc
= strtobool(buf
, &write_cache
);
248 struct dax_device
*dax_dev
= dax_get_by_host(dev_name(dev
));
250 WARN_ON_ONCE(!dax_dev
);
257 dax_write_cache(dax_dev
, write_cache
);
262 static DEVICE_ATTR_RW(write_cache
);
264 static umode_t
dax_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
266 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
267 struct dax_device
*dax_dev
= dax_get_by_host(dev_name(dev
));
269 WARN_ON_ONCE(!dax_dev
);
273 #ifndef CONFIG_ARCH_HAS_PMEM_API
274 if (a
== &dev_attr_write_cache
.attr
)
280 static struct attribute
*dax_attributes
[] = {
281 &dev_attr_write_cache
.attr
,
285 struct attribute_group dax_attribute_group
= {
287 .attrs
= dax_attributes
,
288 .is_visible
= dax_visible
,
290 EXPORT_SYMBOL_GPL(dax_attribute_group
);
293 * dax_direct_access() - translate a device pgoff to an absolute pfn
294 * @dax_dev: a dax_device instance representing the logical memory range
295 * @pgoff: offset in pages from the start of the device to translate
296 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
297 * @kaddr: output parameter that returns a virtual address mapping of pfn
298 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
300 * Return: negative errno if an error occurs, otherwise the number of
301 * pages accessible at the device relative @pgoff.
303 long dax_direct_access(struct dax_device
*dax_dev
, pgoff_t pgoff
, long nr_pages
,
304 void **kaddr
, pfn_t
*pfn
)
311 if (!dax_alive(dax_dev
))
317 avail
= dax_dev
->ops
->direct_access(dax_dev
, pgoff
, nr_pages
,
321 return min(avail
, nr_pages
);
323 EXPORT_SYMBOL_GPL(dax_direct_access
);
325 bool dax_supported(struct dax_device
*dax_dev
, struct block_device
*bdev
,
326 int blocksize
, sector_t start
, sector_t len
)
331 if (!dax_alive(dax_dev
))
334 return dax_dev
->ops
->dax_supported(dax_dev
, bdev
, blocksize
, start
, len
);
336 EXPORT_SYMBOL_GPL(dax_supported
);
338 size_t dax_copy_from_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
, void *addr
,
339 size_t bytes
, struct iov_iter
*i
)
341 if (!dax_alive(dax_dev
))
344 return dax_dev
->ops
->copy_from_iter(dax_dev
, pgoff
, addr
, bytes
, i
);
346 EXPORT_SYMBOL_GPL(dax_copy_from_iter
);
348 size_t dax_copy_to_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
, void *addr
,
349 size_t bytes
, struct iov_iter
*i
)
351 if (!dax_alive(dax_dev
))
354 return dax_dev
->ops
->copy_to_iter(dax_dev
, pgoff
, addr
, bytes
, i
);
356 EXPORT_SYMBOL_GPL(dax_copy_to_iter
);
358 int dax_zero_page_range(struct dax_device
*dax_dev
, pgoff_t pgoff
,
361 if (!dax_alive(dax_dev
))
364 * There are no callers that want to zero more than one page as of now.
365 * Once users are there, this check can be removed after the
366 * device mapper code has been updated to split ranges across targets.
371 return dax_dev
->ops
->zero_page_range(dax_dev
, pgoff
, nr_pages
);
373 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
375 #ifdef CONFIG_ARCH_HAS_PMEM_API
376 void arch_wb_cache_pmem(void *addr
, size_t size
);
377 void dax_flush(struct dax_device
*dax_dev
, void *addr
, size_t size
)
379 if (unlikely(!dax_write_cache_enabled(dax_dev
)))
382 arch_wb_cache_pmem(addr
, size
);
385 void dax_flush(struct dax_device
*dax_dev
, void *addr
, size_t size
)
389 EXPORT_SYMBOL_GPL(dax_flush
);
391 void dax_write_cache(struct dax_device
*dax_dev
, bool wc
)
394 set_bit(DAXDEV_WRITE_CACHE
, &dax_dev
->flags
);
396 clear_bit(DAXDEV_WRITE_CACHE
, &dax_dev
->flags
);
398 EXPORT_SYMBOL_GPL(dax_write_cache
);
400 bool dax_write_cache_enabled(struct dax_device
*dax_dev
)
402 return test_bit(DAXDEV_WRITE_CACHE
, &dax_dev
->flags
);
404 EXPORT_SYMBOL_GPL(dax_write_cache_enabled
);
406 bool __dax_synchronous(struct dax_device
*dax_dev
)
408 return test_bit(DAXDEV_SYNC
, &dax_dev
->flags
);
410 EXPORT_SYMBOL_GPL(__dax_synchronous
);
412 void __set_dax_synchronous(struct dax_device
*dax_dev
)
414 set_bit(DAXDEV_SYNC
, &dax_dev
->flags
);
416 EXPORT_SYMBOL_GPL(__set_dax_synchronous
);
418 bool dax_alive(struct dax_device
*dax_dev
)
420 lockdep_assert_held(&dax_srcu
);
421 return test_bit(DAXDEV_ALIVE
, &dax_dev
->flags
);
423 EXPORT_SYMBOL_GPL(dax_alive
);
425 static int dax_host_hash(const char *host
)
427 return hashlen_hash(hashlen_string("DAX", host
)) % DAX_HASH_SIZE
;
431 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
432 * that any fault handlers or operations that might have seen
433 * dax_alive(), have completed. Any operations that start after
434 * synchronize_srcu() has run will abort upon seeing !dax_alive().
436 void kill_dax(struct dax_device
*dax_dev
)
441 clear_bit(DAXDEV_ALIVE
, &dax_dev
->flags
);
443 synchronize_srcu(&dax_srcu
);
445 spin_lock(&dax_host_lock
);
446 hlist_del_init(&dax_dev
->list
);
447 spin_unlock(&dax_host_lock
);
449 EXPORT_SYMBOL_GPL(kill_dax
);
451 void run_dax(struct dax_device
*dax_dev
)
453 set_bit(DAXDEV_ALIVE
, &dax_dev
->flags
);
455 EXPORT_SYMBOL_GPL(run_dax
);
457 static struct inode
*dax_alloc_inode(struct super_block
*sb
)
459 struct dax_device
*dax_dev
;
462 dax_dev
= kmem_cache_alloc(dax_cache
, GFP_KERNEL
);
466 inode
= &dax_dev
->inode
;
471 static struct dax_device
*to_dax_dev(struct inode
*inode
)
473 return container_of(inode
, struct dax_device
, inode
);
476 static void dax_free_inode(struct inode
*inode
)
478 struct dax_device
*dax_dev
= to_dax_dev(inode
);
479 kfree(dax_dev
->host
);
480 dax_dev
->host
= NULL
;
482 ida_simple_remove(&dax_minor_ida
, MINOR(inode
->i_rdev
));
483 kmem_cache_free(dax_cache
, dax_dev
);
486 static void dax_destroy_inode(struct inode
*inode
)
488 struct dax_device
*dax_dev
= to_dax_dev(inode
);
489 WARN_ONCE(test_bit(DAXDEV_ALIVE
, &dax_dev
->flags
),
490 "kill_dax() must be called before final iput()\n");
493 static const struct super_operations dax_sops
= {
494 .statfs
= simple_statfs
,
495 .alloc_inode
= dax_alloc_inode
,
496 .destroy_inode
= dax_destroy_inode
,
497 .free_inode
= dax_free_inode
,
498 .drop_inode
= generic_delete_inode
,
501 static int dax_init_fs_context(struct fs_context
*fc
)
503 struct pseudo_fs_context
*ctx
= init_pseudo(fc
, DAXFS_MAGIC
);
506 ctx
->ops
= &dax_sops
;
510 static struct file_system_type dax_fs_type
= {
512 .init_fs_context
= dax_init_fs_context
,
513 .kill_sb
= kill_anon_super
,
516 static int dax_test(struct inode
*inode
, void *data
)
518 dev_t devt
= *(dev_t
*) data
;
520 return inode
->i_rdev
== devt
;
523 static int dax_set(struct inode
*inode
, void *data
)
525 dev_t devt
= *(dev_t
*) data
;
527 inode
->i_rdev
= devt
;
531 static struct dax_device
*dax_dev_get(dev_t devt
)
533 struct dax_device
*dax_dev
;
536 inode
= iget5_locked(dax_superblock
, hash_32(devt
+ DAXFS_MAGIC
, 31),
537 dax_test
, dax_set
, &devt
);
542 dax_dev
= to_dax_dev(inode
);
543 if (inode
->i_state
& I_NEW
) {
544 set_bit(DAXDEV_ALIVE
, &dax_dev
->flags
);
545 inode
->i_cdev
= &dax_dev
->cdev
;
546 inode
->i_mode
= S_IFCHR
;
547 inode
->i_flags
= S_DAX
;
548 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
549 unlock_new_inode(inode
);
555 static void dax_add_host(struct dax_device
*dax_dev
, const char *host
)
560 * Unconditionally init dax_dev since it's coming from a
561 * non-zeroed slab cache
563 INIT_HLIST_NODE(&dax_dev
->list
);
564 dax_dev
->host
= host
;
568 hash
= dax_host_hash(host
);
569 spin_lock(&dax_host_lock
);
570 hlist_add_head(&dax_dev
->list
, &dax_host_list
[hash
]);
571 spin_unlock(&dax_host_lock
);
574 struct dax_device
*alloc_dax(void *private, const char *__host
,
575 const struct dax_operations
*ops
, unsigned long flags
)
577 struct dax_device
*dax_dev
;
582 if (ops
&& !ops
->zero_page_range
) {
583 pr_debug("%s: error: device does not provide dax"
584 " operation zero_page_range()\n",
585 __host
? __host
: "Unknown");
586 return ERR_PTR(-EINVAL
);
589 host
= kstrdup(__host
, GFP_KERNEL
);
591 return ERR_PTR(-ENOMEM
);
593 minor
= ida_simple_get(&dax_minor_ida
, 0, MINORMASK
+1, GFP_KERNEL
);
597 devt
= MKDEV(MAJOR(dax_devt
), minor
);
598 dax_dev
= dax_dev_get(devt
);
602 dax_add_host(dax_dev
, host
);
604 dax_dev
->private = private;
605 if (flags
& DAXDEV_F_SYNC
)
606 set_dax_synchronous(dax_dev
);
611 ida_simple_remove(&dax_minor_ida
, minor
);
614 return ERR_PTR(-ENOMEM
);
616 EXPORT_SYMBOL_GPL(alloc_dax
);
618 void put_dax(struct dax_device
*dax_dev
)
622 iput(&dax_dev
->inode
);
624 EXPORT_SYMBOL_GPL(put_dax
);
627 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
628 * @host: alternate name for the device registered by a dax driver
630 struct dax_device
*dax_get_by_host(const char *host
)
632 struct dax_device
*dax_dev
, *found
= NULL
;
638 hash
= dax_host_hash(host
);
640 id
= dax_read_lock();
641 spin_lock(&dax_host_lock
);
642 hlist_for_each_entry(dax_dev
, &dax_host_list
[hash
], list
) {
643 if (!dax_alive(dax_dev
)
644 || strcmp(host
, dax_dev
->host
) != 0)
647 if (igrab(&dax_dev
->inode
))
651 spin_unlock(&dax_host_lock
);
656 EXPORT_SYMBOL_GPL(dax_get_by_host
);
659 * inode_dax: convert a public inode into its dax_dev
660 * @inode: An inode with i_cdev pointing to a dax_dev
662 * Note this is not equivalent to to_dax_dev() which is for private
663 * internal use where we know the inode filesystem type == dax_fs_type.
665 struct dax_device
*inode_dax(struct inode
*inode
)
667 struct cdev
*cdev
= inode
->i_cdev
;
669 return container_of(cdev
, struct dax_device
, cdev
);
671 EXPORT_SYMBOL_GPL(inode_dax
);
673 struct inode
*dax_inode(struct dax_device
*dax_dev
)
675 return &dax_dev
->inode
;
677 EXPORT_SYMBOL_GPL(dax_inode
);
679 void *dax_get_private(struct dax_device
*dax_dev
)
681 if (!test_bit(DAXDEV_ALIVE
, &dax_dev
->flags
))
683 return dax_dev
->private;
685 EXPORT_SYMBOL_GPL(dax_get_private
);
687 static void init_once(void *_dax_dev
)
689 struct dax_device
*dax_dev
= _dax_dev
;
690 struct inode
*inode
= &dax_dev
->inode
;
692 memset(dax_dev
, 0, sizeof(*dax_dev
));
693 inode_init_once(inode
);
696 static int dax_fs_init(void)
700 dax_cache
= kmem_cache_create("dax_cache", sizeof(struct dax_device
), 0,
701 (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
702 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
707 dax_mnt
= kern_mount(&dax_fs_type
);
708 if (IS_ERR(dax_mnt
)) {
709 rc
= PTR_ERR(dax_mnt
);
712 dax_superblock
= dax_mnt
->mnt_sb
;
717 kmem_cache_destroy(dax_cache
);
722 static void dax_fs_exit(void)
724 kern_unmount(dax_mnt
);
725 kmem_cache_destroy(dax_cache
);
728 static int __init
dax_core_init(void)
736 rc
= alloc_chrdev_region(&dax_devt
, 0, MINORMASK
+1, "dax");
746 unregister_chrdev_region(dax_devt
, MINORMASK
+1);
752 static void __exit
dax_core_exit(void)
754 unregister_chrdev_region(dax_devt
, MINORMASK
+1);
755 ida_destroy(&dax_minor_ida
);
759 MODULE_AUTHOR("Intel Corporation");
760 MODULE_LICENSE("GPL v2");
761 subsys_initcall(dax_core_init
);
762 module_exit(dax_core_exit
);