2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/blk-mq.h>
29 #include <linux/pfn_t.h>
30 #include <linux/slab.h>
31 #include <linux/uio.h>
32 #include <linux/dax.h>
34 #include <linux/backing-dev.h>
40 static struct device
*to_dev(struct pmem_device
*pmem
)
43 * nvdimm bus services need a 'dev' parameter, and we record the device
49 static struct nd_region
*to_region(struct pmem_device
*pmem
)
51 return to_nd_region(to_dev(pmem
)->parent
);
54 static blk_status_t
pmem_clear_poison(struct pmem_device
*pmem
,
55 phys_addr_t offset
, unsigned int len
)
57 struct device
*dev
= to_dev(pmem
);
60 blk_status_t rc
= BLK_STS_OK
;
62 sector
= (offset
- pmem
->data_offset
) / 512;
64 cleared
= nvdimm_clear_poison(dev
, pmem
->phys_addr
+ offset
, len
);
67 if (cleared
> 0 && cleared
/ 512) {
69 dev_dbg(dev
, "%s: %#llx clear %ld sector%s\n", __func__
,
70 (unsigned long long) sector
, cleared
,
71 cleared
> 1 ? "s" : "");
72 badblocks_clear(&pmem
->bb
, sector
, cleared
);
74 sysfs_notify_dirent(pmem
->bb_state
);
77 arch_invalidate_pmem(pmem
->virt_addr
+ offset
, len
);
82 static void write_pmem(void *pmem_addr
, struct page
*page
,
83 unsigned int off
, unsigned int len
)
89 mem
= kmap_atomic(page
);
90 chunk
= min_t(unsigned int, len
, PAGE_SIZE
);
91 memcpy_flushcache(pmem_addr
, mem
+ off
, chunk
);
96 pmem_addr
+= PAGE_SIZE
;
100 static blk_status_t
read_pmem(struct page
*page
, unsigned int off
,
101 void *pmem_addr
, unsigned int len
)
108 mem
= kmap_atomic(page
);
109 chunk
= min_t(unsigned int, len
, PAGE_SIZE
);
110 rc
= memcpy_mcsafe(mem
+ off
, pmem_addr
, chunk
);
113 return BLK_STS_IOERR
;
117 pmem_addr
+= PAGE_SIZE
;
122 static blk_status_t
pmem_do_bvec(struct pmem_device
*pmem
, struct page
*page
,
123 unsigned int len
, unsigned int off
, bool is_write
,
126 blk_status_t rc
= BLK_STS_OK
;
127 bool bad_pmem
= false;
128 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
129 void *pmem_addr
= pmem
->virt_addr
+ pmem_off
;
131 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
135 if (unlikely(bad_pmem
))
138 rc
= read_pmem(page
, off
, pmem_addr
, len
);
139 flush_dcache_page(page
);
143 * Note that we write the data both before and after
144 * clearing poison. The write before clear poison
145 * handles situations where the latest written data is
146 * preserved and the clear poison operation simply marks
147 * the address range as valid without changing the data.
148 * In this case application software can assume that an
149 * interrupted write will either return the new good
152 * However, if pmem_clear_poison() leaves the data in an
153 * indeterminate state we need to perform the write
154 * after clear poison.
156 flush_dcache_page(page
);
157 write_pmem(pmem_addr
, page
, off
, len
);
158 if (unlikely(bad_pmem
)) {
159 rc
= pmem_clear_poison(pmem
, pmem_off
, len
);
160 write_pmem(pmem_addr
, page
, off
, len
);
167 /* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
169 #define REQ_FLUSH REQ_PREFLUSH
172 static blk_qc_t
pmem_make_request(struct request_queue
*q
, struct bio
*bio
)
178 struct bvec_iter iter
;
179 struct pmem_device
*pmem
= q
->queuedata
;
180 struct nd_region
*nd_region
= to_region(pmem
);
182 if (bio
->bi_opf
& REQ_FLUSH
)
183 nvdimm_flush(nd_region
);
185 do_acct
= nd_iostat_start(bio
, &start
);
186 bio_for_each_segment(bvec
, bio
, iter
) {
187 rc
= pmem_do_bvec(pmem
, bvec
.bv_page
, bvec
.bv_len
,
188 bvec
.bv_offset
, op_is_write(bio_op(bio
)),
196 nd_iostat_end(bio
, start
);
198 if (bio
->bi_opf
& REQ_FUA
)
199 nvdimm_flush(nd_region
);
202 return BLK_QC_T_NONE
;
205 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
206 struct page
*page
, bool is_write
)
208 struct pmem_device
*pmem
= bdev
->bd_queue
->queuedata
;
211 rc
= pmem_do_bvec(pmem
, page
, hpage_nr_pages(page
) * PAGE_SIZE
,
212 0, is_write
, sector
);
215 * The ->rw_page interface is subtle and tricky. The core
216 * retries on any error, so we can only invoke page_endio() in
217 * the successful completion case. Otherwise, we'll see crashes
218 * caused by double completion.
221 page_endio(page
, is_write
, 0);
223 return blk_status_to_errno(rc
);
226 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
227 __weak
long __pmem_direct_access(struct pmem_device
*pmem
, pgoff_t pgoff
,
228 long nr_pages
, void **kaddr
, pfn_t
*pfn
)
230 resource_size_t offset
= PFN_PHYS(pgoff
) + pmem
->data_offset
;
232 if (unlikely(is_bad_pmem(&pmem
->bb
, PFN_PHYS(pgoff
) / 512,
233 PFN_PHYS(nr_pages
))))
235 *kaddr
= pmem
->virt_addr
+ offset
;
236 *pfn
= phys_to_pfn_t(pmem
->phys_addr
+ offset
, pmem
->pfn_flags
);
239 * If badblocks are present, limit known good range to the
242 if (unlikely(pmem
->bb
.count
))
244 return PHYS_PFN(pmem
->size
- pmem
->pfn_pad
- offset
);
247 static const struct block_device_operations pmem_fops
= {
248 .owner
= THIS_MODULE
,
249 .rw_page
= pmem_rw_page
,
250 .revalidate_disk
= nvdimm_revalidate_disk
,
253 static long pmem_dax_direct_access(struct dax_device
*dax_dev
,
254 pgoff_t pgoff
, long nr_pages
, void **kaddr
, pfn_t
*pfn
)
256 struct pmem_device
*pmem
= dax_get_private(dax_dev
);
258 return __pmem_direct_access(pmem
, pgoff
, nr_pages
, kaddr
, pfn
);
261 static size_t pmem_copy_from_iter(struct dax_device
*dax_dev
, pgoff_t pgoff
,
262 void *addr
, size_t bytes
, struct iov_iter
*i
)
264 return copy_from_iter_flushcache(addr
, bytes
, i
);
267 static const struct dax_operations pmem_dax_ops
= {
268 .direct_access
= pmem_dax_direct_access
,
269 .copy_from_iter
= pmem_copy_from_iter
,
272 static const struct attribute_group
*pmem_attribute_groups
[] = {
273 &dax_attribute_group
,
277 static void pmem_release_queue(void *q
)
279 blk_cleanup_queue(q
);
282 static void pmem_freeze_queue(void *q
)
284 blk_freeze_queue_start(q
);
287 static void pmem_release_disk(void *__pmem
)
289 struct pmem_device
*pmem
= __pmem
;
291 kill_dax(pmem
->dax_dev
);
292 put_dax(pmem
->dax_dev
);
293 del_gendisk(pmem
->disk
);
294 put_disk(pmem
->disk
);
297 static int pmem_attach_disk(struct device
*dev
,
298 struct nd_namespace_common
*ndns
)
300 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
301 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
302 int nid
= dev_to_node(dev
), fua
, wbc
;
303 struct resource
*res
= &nsio
->res
;
304 struct resource bb_res
;
305 struct nd_pfn
*nd_pfn
= NULL
;
306 struct dax_device
*dax_dev
;
307 struct nd_pfn_sb
*pfn_sb
;
308 struct pmem_device
*pmem
;
309 struct request_queue
*q
;
310 struct device
*gendev
;
311 struct gendisk
*disk
;
315 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
319 /* while nsio_rw_bytes is active, parse a pfn info block if present */
320 if (is_nd_pfn(dev
)) {
321 nd_pfn
= to_nd_pfn(dev
);
322 rc
= nvdimm_setup_pfn(nd_pfn
, &pmem
->pgmap
);
327 /* we're attaching a block device, disable raw namespace access */
328 devm_nsio_disable(dev
, nsio
);
330 dev_set_drvdata(dev
, pmem
);
331 pmem
->phys_addr
= res
->start
;
332 pmem
->size
= resource_size(res
);
333 fua
= nvdimm_has_flush(nd_region
);
334 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
) || fua
< 0) {
335 dev_warn(dev
, "unable to guarantee persistence of writes\n");
338 wbc
= nvdimm_has_cache(nd_region
) &&
339 !test_bit(ND_REGION_PERSIST_CACHE
, &nd_region
->flags
);
341 if (!devm_request_mem_region(dev
, res
->start
, resource_size(res
),
342 dev_name(&ndns
->dev
))) {
343 dev_warn(dev
, "could not reserve region %pR\n", res
);
347 q
= blk_alloc_queue_node(GFP_KERNEL
, dev_to_node(dev
));
351 if (devm_add_action_or_reset(dev
, pmem_release_queue
, q
))
354 pmem
->pfn_flags
= PFN_DEV
;
355 pmem
->pgmap
.ref
= &q
->q_usage_counter
;
356 if (is_nd_pfn(dev
)) {
357 addr
= devm_memremap_pages(dev
, &pmem
->pgmap
);
358 pfn_sb
= nd_pfn
->pfn_sb
;
359 pmem
->data_offset
= le64_to_cpu(pfn_sb
->dataoff
);
360 pmem
->pfn_pad
= resource_size(res
) -
361 resource_size(&pmem
->pgmap
.res
);
362 pmem
->pfn_flags
|= PFN_MAP
;
363 memcpy(&bb_res
, &pmem
->pgmap
.res
, sizeof(bb_res
));
364 bb_res
.start
+= pmem
->data_offset
;
365 } else if (pmem_should_map_pages(dev
)) {
366 memcpy(&pmem
->pgmap
.res
, &nsio
->res
, sizeof(pmem
->pgmap
.res
));
367 pmem
->pgmap
.altmap_valid
= false;
368 addr
= devm_memremap_pages(dev
, &pmem
->pgmap
);
369 pmem
->pfn_flags
|= PFN_MAP
;
370 memcpy(&bb_res
, &pmem
->pgmap
.res
, sizeof(bb_res
));
372 addr
= devm_memremap(dev
, pmem
->phys_addr
,
373 pmem
->size
, ARCH_MEMREMAP_PMEM
);
376 * At release time the queue must be frozen before
377 * devm_memremap_pages is unwound
379 if (devm_add_action_or_reset(dev
, pmem_freeze_queue
, q
))
383 return PTR_ERR(addr
);
384 pmem
->virt_addr
= addr
;
386 blk_queue_write_cache(q
, wbc
, fua
);
387 blk_queue_make_request(q
, pmem_make_request
);
388 blk_queue_physical_block_size(q
, PAGE_SIZE
);
389 blk_queue_logical_block_size(q
, pmem_sector_size(ndns
));
390 blk_queue_max_hw_sectors(q
, UINT_MAX
);
391 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
392 queue_flag_set_unlocked(QUEUE_FLAG_DAX
, q
);
395 disk
= alloc_disk_node(0, nid
);
400 disk
->fops
= &pmem_fops
;
402 disk
->flags
= GENHD_FL_EXT_DEVT
;
403 disk
->queue
->backing_dev_info
->capabilities
|= BDI_CAP_SYNCHRONOUS_IO
;
404 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
405 set_capacity(disk
, (pmem
->size
- pmem
->pfn_pad
- pmem
->data_offset
)
407 if (devm_init_badblocks(dev
, &pmem
->bb
))
409 nvdimm_badblocks_populate(nd_region
, &pmem
->bb
, &bb_res
);
410 disk
->bb
= &pmem
->bb
;
412 dax_dev
= alloc_dax(pmem
, disk
->disk_name
, &pmem_dax_ops
);
417 dax_write_cache(dax_dev
, wbc
);
418 pmem
->dax_dev
= dax_dev
;
420 gendev
= disk_to_dev(disk
);
421 gendev
->groups
= pmem_attribute_groups
;
423 device_add_disk(dev
, disk
);
424 if (devm_add_action_or_reset(dev
, pmem_release_disk
, pmem
))
427 revalidate_disk(disk
);
429 pmem
->bb_state
= sysfs_get_dirent(disk_to_dev(disk
)->kobj
.sd
,
432 dev_warn(dev
, "'badblocks' notification disabled\n");
437 static int nd_pmem_probe(struct device
*dev
)
439 struct nd_namespace_common
*ndns
;
441 ndns
= nvdimm_namespace_common_probe(dev
);
443 return PTR_ERR(ndns
);
445 if (devm_nsio_enable(dev
, to_nd_namespace_io(&ndns
->dev
)))
449 return nvdimm_namespace_attach_btt(ndns
);
452 return pmem_attach_disk(dev
, ndns
);
454 /* if we find a valid info-block we'll come back as that personality */
455 if (nd_btt_probe(dev
, ndns
) == 0 || nd_pfn_probe(dev
, ndns
) == 0
456 || nd_dax_probe(dev
, ndns
) == 0)
459 /* ...otherwise we're just a raw pmem device */
460 return pmem_attach_disk(dev
, ndns
);
463 static int nd_pmem_remove(struct device
*dev
)
465 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
468 nvdimm_namespace_detach_btt(to_nd_btt(dev
));
471 * Note, this assumes device_lock() context to not race
474 sysfs_put(pmem
->bb_state
);
475 pmem
->bb_state
= NULL
;
477 nvdimm_flush(to_nd_region(dev
->parent
));
482 static void nd_pmem_shutdown(struct device
*dev
)
484 nvdimm_flush(to_nd_region(dev
->parent
));
487 static void nd_pmem_notify(struct device
*dev
, enum nvdimm_event event
)
489 struct nd_region
*nd_region
;
490 resource_size_t offset
= 0, end_trunc
= 0;
491 struct nd_namespace_common
*ndns
;
492 struct nd_namespace_io
*nsio
;
494 struct badblocks
*bb
;
495 struct kernfs_node
*bb_state
;
497 if (event
!= NVDIMM_REVALIDATE_POISON
)
500 if (is_nd_btt(dev
)) {
501 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
504 nd_region
= to_nd_region(ndns
->dev
.parent
);
505 nsio
= to_nd_namespace_io(&ndns
->dev
);
509 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
511 nd_region
= to_region(pmem
);
513 bb_state
= pmem
->bb_state
;
515 if (is_nd_pfn(dev
)) {
516 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
517 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
520 offset
= pmem
->data_offset
+
521 __le32_to_cpu(pfn_sb
->start_pad
);
522 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
527 nsio
= to_nd_namespace_io(&ndns
->dev
);
530 res
.start
= nsio
->res
.start
+ offset
;
531 res
.end
= nsio
->res
.end
- end_trunc
;
532 nvdimm_badblocks_populate(nd_region
, bb
, &res
);
534 sysfs_notify_dirent(bb_state
);
537 MODULE_ALIAS("pmem");
538 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
539 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
540 static struct nd_device_driver nd_pmem_driver
= {
541 .probe
= nd_pmem_probe
,
542 .remove
= nd_pmem_remove
,
543 .notify
= nd_pmem_notify
,
544 .shutdown
= nd_pmem_shutdown
,
548 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
551 static int __init
pmem_init(void)
553 return nd_driver_register(&nd_pmem_driver
);
555 module_init(pmem_init
);
557 static void pmem_exit(void)
559 driver_unregister(&nd_pmem_driver
.drv
);
561 module_exit(pmem_exit
);
563 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
564 MODULE_LICENSE("GPL v2");