1 // SPDX-License-Identifier: GPL-2.0-only
3 * Persistent Memory Driver
5 * Copyright (c) 2014-2015, Intel Corporation.
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
10 #include <linux/blkdev.h>
11 #include <linux/hdreg.h>
12 #include <linux/init.h>
13 #include <linux/platform_device.h>
14 #include <linux/set_memory.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/badblocks.h>
18 #include <linux/memremap.h>
19 #include <linux/vmalloc.h>
20 #include <linux/blk-mq.h>
21 #include <linux/pfn_t.h>
22 #include <linux/slab.h>
23 #include <linux/uio.h>
24 #include <linux/dax.h>
26 #include <linux/backing-dev.h>
28 #include <asm/cacheflush.h>
33 static struct device
*to_dev(struct pmem_device
*pmem
)
36 * nvdimm bus services need a 'dev' parameter, and we record the device
42 static struct nd_region
*to_region(struct pmem_device
*pmem
)
44 return to_nd_region(to_dev(pmem
)->parent
);
47 static void hwpoison_clear(struct pmem_device
*pmem
,
48 phys_addr_t phys
, unsigned int len
)
50 unsigned long pfn_start
, pfn_end
, pfn
;
52 /* only pmem in the linear map supports HWPoison */
53 if (is_vmalloc_addr(pmem
->virt_addr
))
56 pfn_start
= PHYS_PFN(phys
);
57 pfn_end
= pfn_start
+ PHYS_PFN(len
);
58 for (pfn
= pfn_start
; pfn
< pfn_end
; pfn
++) {
59 struct page
*page
= pfn_to_page(pfn
);
62 * Note, no need to hold a get_dev_pagemap() reference
63 * here since we're in the driver I/O path and
64 * outstanding I/O requests pin the dev_pagemap.
66 if (test_and_clear_pmem_poison(page
))
67 clear_mce_nospec(pfn
);
71 static blk_status_t
pmem_clear_poison(struct pmem_device
*pmem
,
72 phys_addr_t offset
, unsigned int len
)
74 struct device
*dev
= to_dev(pmem
);
77 blk_status_t rc
= BLK_STS_OK
;
79 sector
= (offset
- pmem
->data_offset
) / 512;
81 cleared
= nvdimm_clear_poison(dev
, pmem
->phys_addr
+ offset
, len
);
84 if (cleared
> 0 && cleared
/ 512) {
85 hwpoison_clear(pmem
, pmem
->phys_addr
+ offset
, cleared
);
87 dev_dbg(dev
, "%#llx clear %ld sector%s\n",
88 (unsigned long long) sector
, cleared
,
89 cleared
> 1 ? "s" : "");
90 badblocks_clear(&pmem
->bb
, sector
, cleared
);
92 sysfs_notify_dirent(pmem
->bb_state
);
95 arch_invalidate_pmem(pmem
->virt_addr
+ offset
, len
);
100 static void write_pmem(void *pmem_addr
, struct page
*page
,
101 unsigned int off
, unsigned int len
)
107 mem
= kmap_atomic(page
);
108 chunk
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
109 memcpy_flushcache(pmem_addr
, mem
+ off
, chunk
);
118 static blk_status_t
read_pmem(struct page
*page
, unsigned int off
,
119 void *pmem_addr
, unsigned int len
)
126 mem
= kmap_atomic(page
);
127 chunk
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
128 rem
= copy_mc_to_kernel(mem
+ off
, pmem_addr
, chunk
);
131 return BLK_STS_IOERR
;
140 static blk_status_t
pmem_do_read(struct pmem_device
*pmem
,
141 struct page
*page
, unsigned int page_off
,
142 sector_t sector
, unsigned int len
)
145 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
146 void *pmem_addr
= pmem
->virt_addr
+ pmem_off
;
148 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
149 return BLK_STS_IOERR
;
151 rc
= read_pmem(page
, page_off
, pmem_addr
, len
);
152 flush_dcache_page(page
);
156 static blk_status_t
pmem_do_write(struct pmem_device
*pmem
,
157 struct page
*page
, unsigned int page_off
,
158 sector_t sector
, unsigned int len
)
160 blk_status_t rc
= BLK_STS_OK
;
161 bool bad_pmem
= false;
162 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
163 void *pmem_addr
= pmem
->virt_addr
+ pmem_off
;
165 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
169 * Note that we write the data both before and after
170 * clearing poison. The write before clear poison
171 * handles situations where the latest written data is
172 * preserved and the clear poison operation simply marks
173 * the address range as valid without changing the data.
174 * In this case application software can assume that an
175 * interrupted write will either return the new good
178 * However, if pmem_clear_poison() leaves the data in an
179 * indeterminate state we need to perform the write
180 * after clear poison.
182 flush_dcache_page(page
);
183 write_pmem(pmem_addr
, page
, page_off
, len
);
184 if (unlikely(bad_pmem
)) {
185 rc
= pmem_clear_poison(pmem
, pmem_off
, len
);
186 write_pmem(pmem_addr
, page
, page_off
, len
);
192 static blk_qc_t
pmem_submit_bio(struct bio
*bio
)
199 struct bvec_iter iter
;
200 struct pmem_device
*pmem
= bio
->bi_disk
->private_data
;
201 struct nd_region
*nd_region
= to_region(pmem
);
203 if (bio
->bi_opf
& REQ_PREFLUSH
)
204 ret
= nvdimm_flush(nd_region
, bio
);
206 do_acct
= blk_queue_io_stat(bio
->bi_disk
->queue
);
208 start
= bio_start_io_acct(bio
);
209 bio_for_each_segment(bvec
, bio
, iter
) {
210 if (op_is_write(bio_op(bio
)))
211 rc
= pmem_do_write(pmem
, bvec
.bv_page
, bvec
.bv_offset
,
212 iter
.bi_sector
, bvec
.bv_len
);
214 rc
= pmem_do_read(pmem
, bvec
.bv_page
, bvec
.bv_offset
,
215 iter
.bi_sector
, bvec
.bv_len
);
222 bio_end_io_acct(bio
, start
);
224 if (bio
->bi_opf
& REQ_FUA
)
225 ret
= nvdimm_flush(nd_region
, bio
);
228 bio
->bi_status
= errno_to_blk_status(ret
);
231 return BLK_QC_T_NONE
;
234 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
235 struct page
*page
, unsigned int op
)
237 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
241 rc
= pmem_do_write(pmem
, page
, 0, sector
, thp_size(page
));
243 rc
= pmem_do_read(pmem
, page
, 0, sector
, thp_size(page
));
245 * The ->rw_page interface is subtle and tricky. The core
246 * retries on any error, so we can only invoke page_endio() in
247 * the successful completion case. Otherwise, we'll see crashes
248 * caused by double completion.
251 page_endio(page
, op_is_write(op
), 0);
253 return blk_status_to_errno(rc
);
256 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
257 __weak
long __pmem_direct_access(struct pmem_device
*pmem
, pgoff_t pgoff
,
258 long nr_pages
, void **kaddr
, pfn_t
*pfn
)
260 resource_size_t offset
= PFN_PHYS(pgoff
) + pmem
->data_offset
;
262 if (unlikely(is_bad_pmem(&pmem
->bb
, PFN_PHYS(pgoff
) / 512,
263 PFN_PHYS(nr_pages
))))
267 *kaddr
= pmem
->virt_addr
+ offset
;
269 *pfn
= phys_to_pfn_t(pmem
->phys_addr
+ offset
, pmem
->pfn_flags
);
272 * If badblocks are present, limit known good range to the
275 if (unlikely(pmem
->bb
.count
))
277 return PHYS_PFN(pmem
->size
- pmem
->pfn_pad
- offset
);
280 static const struct block_device_operations pmem_fops
= {
281 .owner
= THIS_MODULE
,
282 .submit_bio
= pmem_submit_bio
,
283 .rw_page
= pmem_rw_page
,
284 .revalidate_disk
= nvdimm_revalidate_disk
,
287 static int pmem_dax_zero_page_range(struct dax_device
*dax_dev
, pgoff_t pgoff
,
290 struct pmem_device
*pmem
= dax_get_private(dax_dev
);
292 return blk_status_to_errno(pmem_do_write(pmem
, ZERO_PAGE(0), 0,
293 PFN_PHYS(pgoff
) >> SECTOR_SHIFT
,
297 static long pmem_dax_direct_access(struct dax_device
*dax_dev
,
298 pgoff_t pgoff
, long nr_pages
, void **kaddr
, pfn_t
*pfn
)
300 struct pmem_device
*pmem
= dax_get_private(dax_dev
);
302 return __pmem_direct_access(pmem
, pgoff
, nr_pages
, kaddr
, pfn
);
306 * Use the 'no check' versions of copy_from_iter_flushcache() and
307 * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
308 * checking, both file offset and device offset, is handled by
311 static size_t pmem_copy_from_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
312 void *addr
, size_t bytes
, struct iov_iter
*i
)
314 return _copy_from_iter_flushcache(addr
, bytes
, i
);
317 static size_t pmem_copy_to_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
318 void *addr
, size_t bytes
, struct iov_iter
*i
)
320 return _copy_mc_to_iter(addr
, bytes
, i
);
323 static const struct dax_operations pmem_dax_ops
= {
324 .direct_access
= pmem_dax_direct_access
,
325 .dax_supported
= generic_fsdax_supported
,
326 .copy_from_iter
= pmem_copy_from_iter
,
327 .copy_to_iter
= pmem_copy_to_iter
,
328 .zero_page_range
= pmem_dax_zero_page_range
,
331 static const struct attribute_group
*pmem_attribute_groups
[] = {
332 &dax_attribute_group
,
336 static void pmem_pagemap_cleanup(struct dev_pagemap
*pgmap
)
338 struct request_queue
*q
=
339 container_of(pgmap
->ref
, struct request_queue
, q_usage_counter
);
341 blk_cleanup_queue(q
);
344 static void pmem_release_queue(void *pgmap
)
346 pmem_pagemap_cleanup(pgmap
);
349 static void pmem_pagemap_kill(struct dev_pagemap
*pgmap
)
351 struct request_queue
*q
=
352 container_of(pgmap
->ref
, struct request_queue
, q_usage_counter
);
354 blk_freeze_queue_start(q
);
357 static void pmem_release_disk(void *__pmem
)
359 struct pmem_device
*pmem
= __pmem
;
361 kill_dax(pmem
->dax_dev
);
362 put_dax(pmem
->dax_dev
);
363 del_gendisk(pmem
->disk
);
364 put_disk(pmem
->disk
);
367 static const struct dev_pagemap_ops fsdax_pagemap_ops
= {
368 .kill
= pmem_pagemap_kill
,
369 .cleanup
= pmem_pagemap_cleanup
,
372 static int pmem_attach_disk(struct device
*dev
,
373 struct nd_namespace_common
*ndns
)
375 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
376 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
377 int nid
= dev_to_node(dev
), fua
;
378 struct resource
*res
= &nsio
->res
;
379 struct resource bb_res
;
380 struct nd_pfn
*nd_pfn
= NULL
;
381 struct dax_device
*dax_dev
;
382 struct nd_pfn_sb
*pfn_sb
;
383 struct pmem_device
*pmem
;
384 struct request_queue
*q
;
385 struct device
*gendev
;
386 struct gendisk
*disk
;
389 unsigned long flags
= 0UL;
391 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
395 rc
= devm_namespace_enable(dev
, ndns
, nd_info_block_reserve());
399 /* while nsio_rw_bytes is active, parse a pfn info block if present */
400 if (is_nd_pfn(dev
)) {
401 nd_pfn
= to_nd_pfn(dev
);
402 rc
= nvdimm_setup_pfn(nd_pfn
, &pmem
->pgmap
);
407 /* we're attaching a block device, disable raw namespace access */
408 devm_namespace_disable(dev
, ndns
);
410 dev_set_drvdata(dev
, pmem
);
411 pmem
->phys_addr
= res
->start
;
412 pmem
->size
= resource_size(res
);
413 fua
= nvdimm_has_flush(nd_region
);
414 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
) || fua
< 0) {
415 dev_warn(dev
, "unable to guarantee persistence of writes\n");
419 if (!devm_request_mem_region(dev
, res
->start
, resource_size(res
),
420 dev_name(&ndns
->dev
))) {
421 dev_warn(dev
, "could not reserve region %pR\n", res
);
425 q
= blk_alloc_queue(dev_to_node(dev
));
429 pmem
->pfn_flags
= PFN_DEV
;
430 pmem
->pgmap
.ref
= &q
->q_usage_counter
;
431 if (is_nd_pfn(dev
)) {
432 pmem
->pgmap
.type
= MEMORY_DEVICE_FS_DAX
;
433 pmem
->pgmap
.ops
= &fsdax_pagemap_ops
;
434 addr
= devm_memremap_pages(dev
, &pmem
->pgmap
);
435 pfn_sb
= nd_pfn
->pfn_sb
;
436 pmem
->data_offset
= le64_to_cpu(pfn_sb
->dataoff
);
437 pmem
->pfn_pad
= resource_size(res
) -
438 resource_size(&pmem
->pgmap
.res
);
439 pmem
->pfn_flags
|= PFN_MAP
;
440 memcpy(&bb_res
, &pmem
->pgmap
.res
, sizeof(bb_res
));
441 bb_res
.start
+= pmem
->data_offset
;
442 } else if (pmem_should_map_pages(dev
)) {
443 memcpy(&pmem
->pgmap
.res
, &nsio
->res
, sizeof(pmem
->pgmap
.res
));
444 pmem
->pgmap
.type
= MEMORY_DEVICE_FS_DAX
;
445 pmem
->pgmap
.ops
= &fsdax_pagemap_ops
;
446 addr
= devm_memremap_pages(dev
, &pmem
->pgmap
);
447 pmem
->pfn_flags
|= PFN_MAP
;
448 memcpy(&bb_res
, &pmem
->pgmap
.res
, sizeof(bb_res
));
450 if (devm_add_action_or_reset(dev
, pmem_release_queue
,
453 addr
= devm_memremap(dev
, pmem
->phys_addr
,
454 pmem
->size
, ARCH_MEMREMAP_PMEM
);
455 memcpy(&bb_res
, &nsio
->res
, sizeof(bb_res
));
459 return PTR_ERR(addr
);
460 pmem
->virt_addr
= addr
;
462 blk_queue_write_cache(q
, true, fua
);
463 blk_queue_physical_block_size(q
, PAGE_SIZE
);
464 blk_queue_logical_block_size(q
, pmem_sector_size(ndns
));
465 blk_queue_max_hw_sectors(q
, UINT_MAX
);
466 blk_queue_flag_set(QUEUE_FLAG_NONROT
, q
);
467 if (pmem
->pfn_flags
& PFN_MAP
)
468 blk_queue_flag_set(QUEUE_FLAG_DAX
, q
);
470 disk
= alloc_disk_node(0, nid
);
475 disk
->fops
= &pmem_fops
;
477 disk
->flags
= GENHD_FL_EXT_DEVT
;
478 disk
->private_data
= pmem
;
479 disk
->queue
->backing_dev_info
->capabilities
|= BDI_CAP_SYNCHRONOUS_IO
;
480 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
481 set_capacity(disk
, (pmem
->size
- pmem
->pfn_pad
- pmem
->data_offset
)
483 if (devm_init_badblocks(dev
, &pmem
->bb
))
485 nvdimm_badblocks_populate(nd_region
, &pmem
->bb
, &bb_res
);
486 disk
->bb
= &pmem
->bb
;
488 if (is_nvdimm_sync(nd_region
))
489 flags
= DAXDEV_F_SYNC
;
490 dax_dev
= alloc_dax(pmem
, disk
->disk_name
, &pmem_dax_ops
, flags
);
491 if (IS_ERR(dax_dev
)) {
493 return PTR_ERR(dax_dev
);
495 dax_write_cache(dax_dev
, nvdimm_has_cache(nd_region
));
496 pmem
->dax_dev
= dax_dev
;
497 gendev
= disk_to_dev(disk
);
498 gendev
->groups
= pmem_attribute_groups
;
500 device_add_disk(dev
, disk
, NULL
);
501 if (devm_add_action_or_reset(dev
, pmem_release_disk
, pmem
))
504 revalidate_disk(disk
);
506 pmem
->bb_state
= sysfs_get_dirent(disk_to_dev(disk
)->kobj
.sd
,
509 dev_warn(dev
, "'badblocks' notification disabled\n");
514 static int nd_pmem_probe(struct device
*dev
)
517 struct nd_namespace_common
*ndns
;
519 ndns
= nvdimm_namespace_common_probe(dev
);
521 return PTR_ERR(ndns
);
524 return nvdimm_namespace_attach_btt(ndns
);
527 return pmem_attach_disk(dev
, ndns
);
529 ret
= devm_namespace_enable(dev
, ndns
, nd_info_block_reserve());
533 ret
= nd_btt_probe(dev
, ndns
);
538 * We have two failure conditions here, there is no
539 * info reserver block or we found a valid info reserve block
540 * but failed to initialize the pfn superblock.
542 * For the first case consider namespace as a raw pmem namespace
545 * For the latter, consider this a success and advance the namespace
548 ret
= nd_pfn_probe(dev
, ndns
);
551 else if (ret
== -EOPNOTSUPP
)
554 ret
= nd_dax_probe(dev
, ndns
);
557 else if (ret
== -EOPNOTSUPP
)
560 /* probe complete, attach handles namespace enabling */
561 devm_namespace_disable(dev
, ndns
);
563 return pmem_attach_disk(dev
, ndns
);
566 static int nd_pmem_remove(struct device
*dev
)
568 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
571 nvdimm_namespace_detach_btt(to_nd_btt(dev
));
574 * Note, this assumes nd_device_lock() context to not
575 * race nd_pmem_notify()
577 sysfs_put(pmem
->bb_state
);
578 pmem
->bb_state
= NULL
;
580 nvdimm_flush(to_nd_region(dev
->parent
), NULL
);
585 static void nd_pmem_shutdown(struct device
*dev
)
587 nvdimm_flush(to_nd_region(dev
->parent
), NULL
);
590 static void nd_pmem_notify(struct device
*dev
, enum nvdimm_event event
)
592 struct nd_region
*nd_region
;
593 resource_size_t offset
= 0, end_trunc
= 0;
594 struct nd_namespace_common
*ndns
;
595 struct nd_namespace_io
*nsio
;
597 struct badblocks
*bb
;
598 struct kernfs_node
*bb_state
;
600 if (event
!= NVDIMM_REVALIDATE_POISON
)
603 if (is_nd_btt(dev
)) {
604 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
607 nd_region
= to_nd_region(ndns
->dev
.parent
);
608 nsio
= to_nd_namespace_io(&ndns
->dev
);
612 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
614 nd_region
= to_region(pmem
);
616 bb_state
= pmem
->bb_state
;
618 if (is_nd_pfn(dev
)) {
619 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
620 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
623 offset
= pmem
->data_offset
+
624 __le32_to_cpu(pfn_sb
->start_pad
);
625 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
630 nsio
= to_nd_namespace_io(&ndns
->dev
);
633 res
.start
= nsio
->res
.start
+ offset
;
634 res
.end
= nsio
->res
.end
- end_trunc
;
635 nvdimm_badblocks_populate(nd_region
, bb
, &res
);
637 sysfs_notify_dirent(bb_state
);
640 MODULE_ALIAS("pmem");
641 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
642 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
643 static struct nd_device_driver nd_pmem_driver
= {
644 .probe
= nd_pmem_probe
,
645 .remove
= nd_pmem_remove
,
646 .notify
= nd_pmem_notify
,
647 .shutdown
= nd_pmem_shutdown
,
651 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
654 module_nd_driver(nd_pmem_driver
);
656 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
657 MODULE_LICENSE("GPL v2");