2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pfn_t.h>
29 #include <linux/slab.h>
30 #include <linux/pmem.h>
36 struct request_queue
*pmem_queue
;
37 struct gendisk
*pmem_disk
;
38 struct nd_namespace_common
*ndns
;
40 /* One contiguous memory region per device */
41 phys_addr_t phys_addr
;
42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset
;
45 void __pmem
*virt_addr
;
46 /* immutable base size of the namespace */
48 /* trim size when namespace capacity has been section aligned */
53 static bool is_bad_pmem(struct badblocks
*bb
, sector_t sector
, unsigned int len
)
59 return !!badblocks_check(bb
, sector
, len
/ 512, &first_bad
,
66 static void pmem_clear_poison(struct pmem_device
*pmem
, phys_addr_t offset
,
69 struct device
*dev
= disk_to_dev(pmem
->pmem_disk
);
73 sector
= (offset
- pmem
->data_offset
) / 512;
74 cleared
= nvdimm_clear_poison(dev
, pmem
->phys_addr
+ offset
, len
);
76 if (cleared
> 0 && cleared
/ 512) {
77 dev_dbg(dev
, "%s: %llx clear %ld sector%s\n",
78 __func__
, (unsigned long long) sector
,
79 cleared
/ 512, cleared
/ 512 > 1 ? "s" : "");
80 badblocks_clear(&pmem
->bb
, sector
, cleared
/ 512);
82 invalidate_pmem(pmem
->virt_addr
+ offset
, len
);
85 static int pmem_do_bvec(struct pmem_device
*pmem
, struct page
*page
,
86 unsigned int len
, unsigned int off
, int rw
,
90 bool bad_pmem
= false;
91 void *mem
= kmap_atomic(page
);
92 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
93 void __pmem
*pmem_addr
= pmem
->virt_addr
+ pmem_off
;
95 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
99 if (unlikely(bad_pmem
))
102 rc
= memcpy_from_pmem(mem
+ off
, pmem_addr
, len
);
103 flush_dcache_page(page
);
107 * Note that we write the data both before and after
108 * clearing poison. The write before clear poison
109 * handles situations where the latest written data is
110 * preserved and the clear poison operation simply marks
111 * the address range as valid without changing the data.
112 * In this case application software can assume that an
113 * interrupted write will either return the new good
116 * However, if pmem_clear_poison() leaves the data in an
117 * indeterminate state we need to perform the write
118 * after clear poison.
120 flush_dcache_page(page
);
121 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
122 if (unlikely(bad_pmem
)) {
123 pmem_clear_poison(pmem
, pmem_off
, len
);
124 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
132 static blk_qc_t
pmem_make_request(struct request_queue
*q
, struct bio
*bio
)
138 struct bvec_iter iter
;
139 struct block_device
*bdev
= bio
->bi_bdev
;
140 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
142 do_acct
= nd_iostat_start(bio
, &start
);
143 bio_for_each_segment(bvec
, bio
, iter
) {
144 rc
= pmem_do_bvec(pmem
, bvec
.bv_page
, bvec
.bv_len
,
145 bvec
.bv_offset
, bio_data_dir(bio
),
153 nd_iostat_end(bio
, start
);
155 if (bio_data_dir(bio
))
159 return BLK_QC_T_NONE
;
162 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
163 struct page
*page
, int rw
)
165 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
168 rc
= pmem_do_bvec(pmem
, page
, PAGE_SIZE
, 0, rw
, sector
);
173 * The ->rw_page interface is subtle and tricky. The core
174 * retries on any error, so we can only invoke page_endio() in
175 * the successful completion case. Otherwise, we'll see crashes
176 * caused by double completion.
179 page_endio(page
, rw
& WRITE
, 0);
184 static long pmem_direct_access(struct block_device
*bdev
, sector_t sector
,
185 void __pmem
**kaddr
, pfn_t
*pfn
)
187 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
188 resource_size_t offset
= sector
* 512 + pmem
->data_offset
;
190 *kaddr
= pmem
->virt_addr
+ offset
;
191 *pfn
= phys_to_pfn_t(pmem
->phys_addr
+ offset
, pmem
->pfn_flags
);
193 return pmem
->size
- pmem
->pfn_pad
- offset
;
196 static const struct block_device_operations pmem_fops
= {
197 .owner
= THIS_MODULE
,
198 .rw_page
= pmem_rw_page
,
199 .direct_access
= pmem_direct_access
,
200 .revalidate_disk
= nvdimm_revalidate_disk
,
203 static struct pmem_device
*pmem_alloc(struct device
*dev
,
204 struct resource
*res
, int id
)
206 struct pmem_device
*pmem
;
207 struct request_queue
*q
;
209 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
211 return ERR_PTR(-ENOMEM
);
213 pmem
->phys_addr
= res
->start
;
214 pmem
->size
= resource_size(res
);
215 if (!arch_has_wmb_pmem())
216 dev_warn(dev
, "unable to guarantee persistence of writes\n");
218 if (!devm_request_mem_region(dev
, pmem
->phys_addr
, pmem
->size
,
220 dev_warn(dev
, "could not reserve region [0x%pa:0x%zx]\n",
221 &pmem
->phys_addr
, pmem
->size
);
222 return ERR_PTR(-EBUSY
);
225 q
= blk_alloc_queue_node(GFP_KERNEL
, dev_to_node(dev
));
227 return ERR_PTR(-ENOMEM
);
229 pmem
->pfn_flags
= PFN_DEV
;
230 if (pmem_should_map_pages(dev
)) {
231 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, res
,
232 &q
->q_usage_counter
, NULL
);
233 pmem
->pfn_flags
|= PFN_MAP
;
235 pmem
->virt_addr
= (void __pmem
*) devm_memremap(dev
,
236 pmem
->phys_addr
, pmem
->size
,
239 if (IS_ERR(pmem
->virt_addr
)) {
240 blk_cleanup_queue(q
);
241 return (void __force
*) pmem
->virt_addr
;
244 pmem
->pmem_queue
= q
;
248 static void pmem_detach_disk(struct pmem_device
*pmem
)
250 if (!pmem
->pmem_disk
)
253 del_gendisk(pmem
->pmem_disk
);
254 put_disk(pmem
->pmem_disk
);
255 blk_cleanup_queue(pmem
->pmem_queue
);
258 static int pmem_attach_disk(struct device
*dev
,
259 struct nd_namespace_common
*ndns
, struct pmem_device
*pmem
)
261 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
262 int nid
= dev_to_node(dev
);
263 struct resource bb_res
;
264 struct gendisk
*disk
;
266 blk_queue_make_request(pmem
->pmem_queue
, pmem_make_request
);
267 blk_queue_physical_block_size(pmem
->pmem_queue
, PAGE_SIZE
);
268 blk_queue_max_hw_sectors(pmem
->pmem_queue
, UINT_MAX
);
269 blk_queue_bounce_limit(pmem
->pmem_queue
, BLK_BOUNCE_ANY
);
270 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, pmem
->pmem_queue
);
272 disk
= alloc_disk_node(0, nid
);
274 blk_cleanup_queue(pmem
->pmem_queue
);
278 disk
->fops
= &pmem_fops
;
279 disk
->private_data
= pmem
;
280 disk
->queue
= pmem
->pmem_queue
;
281 disk
->flags
= GENHD_FL_EXT_DEVT
;
282 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
283 disk
->driverfs_dev
= dev
;
284 set_capacity(disk
, (pmem
->size
- pmem
->pfn_pad
- pmem
->data_offset
)
286 pmem
->pmem_disk
= disk
;
287 devm_exit_badblocks(dev
, &pmem
->bb
);
288 if (devm_init_badblocks(dev
, &pmem
->bb
))
290 bb_res
.start
= nsio
->res
.start
+ pmem
->data_offset
;
291 bb_res
.end
= nsio
->res
.end
;
292 if (is_nd_pfn(dev
)) {
293 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
294 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
296 bb_res
.start
+= __le32_to_cpu(pfn_sb
->start_pad
);
297 bb_res
.end
-= __le32_to_cpu(pfn_sb
->end_trunc
);
299 nvdimm_badblocks_populate(to_nd_region(dev
->parent
), &pmem
->bb
,
301 disk
->bb
= &pmem
->bb
;
303 revalidate_disk(disk
);
308 static int pmem_rw_bytes(struct nd_namespace_common
*ndns
,
309 resource_size_t offset
, void *buf
, size_t size
, int rw
)
311 struct pmem_device
*pmem
= dev_get_drvdata(ndns
->claim
);
313 if (unlikely(offset
+ size
> pmem
->size
)) {
314 dev_WARN_ONCE(&ndns
->dev
, 1, "request out of range\n");
319 unsigned int sz_align
= ALIGN(size
+ (offset
& (512 - 1)), 512);
321 if (unlikely(is_bad_pmem(&pmem
->bb
, offset
/ 512, sz_align
)))
323 return memcpy_from_pmem(buf
, pmem
->virt_addr
+ offset
, size
);
325 memcpy_to_pmem(pmem
->virt_addr
+ offset
, buf
, size
);
332 static int nd_pfn_init(struct nd_pfn
*nd_pfn
)
334 struct nd_pfn_sb
*pfn_sb
= kzalloc(sizeof(*pfn_sb
), GFP_KERNEL
);
335 struct pmem_device
*pmem
= dev_get_drvdata(&nd_pfn
->dev
);
336 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
337 u32 start_pad
= 0, end_trunc
= 0;
338 resource_size_t start
, size
;
339 struct nd_namespace_io
*nsio
;
340 struct nd_region
*nd_region
;
349 nd_pfn
->pfn_sb
= pfn_sb
;
350 rc
= nd_pfn_validate(nd_pfn
);
352 /* no info block, do init */;
356 nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
358 dev_info(&nd_pfn
->dev
,
359 "%s is read-only, unable to init metadata\n",
360 dev_name(&nd_region
->dev
));
364 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
367 * Check if pmem collides with 'System RAM' when section aligned and
368 * trim it accordingly
370 nsio
= to_nd_namespace_io(&ndns
->dev
);
371 start
= PHYS_SECTION_ALIGN_DOWN(nsio
->res
.start
);
372 size
= resource_size(&nsio
->res
);
373 if (region_intersects(start
, size
, IORESOURCE_SYSTEM_RAM
,
374 IORES_DESC_NONE
) == REGION_MIXED
) {
376 start
= nsio
->res
.start
;
377 start_pad
= PHYS_SECTION_ALIGN_UP(start
) - start
;
380 start
= nsio
->res
.start
;
381 size
= PHYS_SECTION_ALIGN_UP(start
+ size
) - start
;
382 if (region_intersects(start
, size
, IORESOURCE_SYSTEM_RAM
,
383 IORES_DESC_NONE
) == REGION_MIXED
) {
384 size
= resource_size(&nsio
->res
);
385 end_trunc
= start
+ size
- PHYS_SECTION_ALIGN_DOWN(start
+ size
);
388 if (start_pad
+ end_trunc
)
389 dev_info(&nd_pfn
->dev
, "%s section collision, truncate %d bytes\n",
390 dev_name(&ndns
->dev
), start_pad
+ end_trunc
);
393 * Note, we use 64 here for the standard size of struct page,
394 * debugging options may cause it to be larger in which case the
395 * implementation will limit the pfns advertised through
396 * ->direct_access() to those that are included in the memmap.
399 npfns
= (pmem
->size
- start_pad
- end_trunc
- SZ_8K
) / SZ_4K
;
400 if (nd_pfn
->mode
== PFN_MODE_PMEM
)
401 offset
= ALIGN(start
+ SZ_8K
+ 64 * npfns
, nd_pfn
->align
)
403 else if (nd_pfn
->mode
== PFN_MODE_RAM
)
404 offset
= ALIGN(start
+ SZ_8K
, nd_pfn
->align
) - start
;
408 if (offset
+ start_pad
+ end_trunc
>= pmem
->size
) {
409 dev_err(&nd_pfn
->dev
, "%s unable to satisfy requested alignment\n",
410 dev_name(&ndns
->dev
));
414 npfns
= (pmem
->size
- offset
- start_pad
- end_trunc
) / SZ_4K
;
415 pfn_sb
->mode
= cpu_to_le32(nd_pfn
->mode
);
416 pfn_sb
->dataoff
= cpu_to_le64(offset
);
417 pfn_sb
->npfns
= cpu_to_le64(npfns
);
418 memcpy(pfn_sb
->signature
, PFN_SIG
, PFN_SIG_LEN
);
419 memcpy(pfn_sb
->uuid
, nd_pfn
->uuid
, 16);
420 memcpy(pfn_sb
->parent_uuid
, nd_dev_to_uuid(&ndns
->dev
), 16);
421 pfn_sb
->version_major
= cpu_to_le16(1);
422 pfn_sb
->version_minor
= cpu_to_le16(1);
423 pfn_sb
->start_pad
= cpu_to_le32(start_pad
);
424 pfn_sb
->end_trunc
= cpu_to_le32(end_trunc
);
425 checksum
= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
);
426 pfn_sb
->checksum
= cpu_to_le64(checksum
);
428 rc
= nvdimm_write_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
));
434 nd_pfn
->pfn_sb
= NULL
;
439 static int nvdimm_namespace_detach_pfn(struct nd_namespace_common
*ndns
)
441 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
442 struct pmem_device
*pmem
;
445 pmem
= dev_get_drvdata(&nd_pfn
->dev
);
446 pmem_detach_disk(pmem
);
448 /* release nd_pfn resources */
449 kfree(nd_pfn
->pfn_sb
);
450 nd_pfn
->pfn_sb
= NULL
;
456 * We hotplug memory at section granularity, pad the reserved area from
457 * the previous section base to the namespace base address.
459 static unsigned long init_altmap_base(resource_size_t base
)
461 unsigned long base_pfn
= PHYS_PFN(base
);
463 return PFN_SECTION_ALIGN_DOWN(base_pfn
);
466 static unsigned long init_altmap_reserve(resource_size_t base
)
468 unsigned long reserve
= PHYS_PFN(SZ_8K
);
469 unsigned long base_pfn
= PHYS_PFN(base
);
471 reserve
+= base_pfn
- PFN_SECTION_ALIGN_DOWN(base_pfn
);
475 static int __nvdimm_namespace_attach_pfn(struct nd_pfn
*nd_pfn
)
479 struct request_queue
*q
;
480 struct pmem_device
*pmem
;
481 struct vmem_altmap
*altmap
;
482 struct device
*dev
= &nd_pfn
->dev
;
483 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
484 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
485 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
486 u32 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
487 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
488 resource_size_t base
= nsio
->res
.start
+ start_pad
;
489 struct vmem_altmap __altmap
= {
490 .base_pfn
= init_altmap_base(base
),
491 .reserve
= init_altmap_reserve(base
),
494 pmem
= dev_get_drvdata(dev
);
495 pmem
->data_offset
= le64_to_cpu(pfn_sb
->dataoff
);
496 pmem
->pfn_pad
= start_pad
+ end_trunc
;
497 nd_pfn
->mode
= le32_to_cpu(nd_pfn
->pfn_sb
->mode
);
498 if (nd_pfn
->mode
== PFN_MODE_RAM
) {
499 if (pmem
->data_offset
< SZ_8K
)
501 nd_pfn
->npfns
= le64_to_cpu(pfn_sb
->npfns
);
503 } else if (nd_pfn
->mode
== PFN_MODE_PMEM
) {
504 nd_pfn
->npfns
= (pmem
->size
- pmem
->pfn_pad
- pmem
->data_offset
)
506 if (le64_to_cpu(nd_pfn
->pfn_sb
->npfns
) > nd_pfn
->npfns
)
507 dev_info(&nd_pfn
->dev
,
508 "number of pfns truncated from %lld to %ld\n",
509 le64_to_cpu(nd_pfn
->pfn_sb
->npfns
),
512 altmap
->free
= PHYS_PFN(pmem
->data_offset
- SZ_8K
);
519 /* establish pfn range for lookup, and switch to direct map */
520 q
= pmem
->pmem_queue
;
521 memcpy(&res
, &nsio
->res
, sizeof(res
));
522 res
.start
+= start_pad
;
523 res
.end
-= end_trunc
;
524 devm_memunmap(dev
, (void __force
*) pmem
->virt_addr
);
525 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, &res
,
526 &q
->q_usage_counter
, altmap
);
527 pmem
->pfn_flags
|= PFN_MAP
;
528 if (IS_ERR(pmem
->virt_addr
)) {
529 rc
= PTR_ERR(pmem
->virt_addr
);
533 /* attach pmem disk in "pfn-mode" */
534 rc
= pmem_attach_disk(dev
, ndns
, pmem
);
540 nvdimm_namespace_detach_pfn(ndns
);
545 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common
*ndns
)
547 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
550 if (!nd_pfn
->uuid
|| !nd_pfn
->ndns
)
553 rc
= nd_pfn_init(nd_pfn
);
556 /* we need a valid pfn_sb before we can init a vmem_altmap */
557 return __nvdimm_namespace_attach_pfn(nd_pfn
);
560 static int nd_pmem_probe(struct device
*dev
)
562 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
563 struct nd_namespace_common
*ndns
;
564 struct nd_namespace_io
*nsio
;
565 struct pmem_device
*pmem
;
567 ndns
= nvdimm_namespace_common_probe(dev
);
569 return PTR_ERR(ndns
);
571 nsio
= to_nd_namespace_io(&ndns
->dev
);
572 pmem
= pmem_alloc(dev
, &nsio
->res
, nd_region
->id
);
574 return PTR_ERR(pmem
);
577 dev_set_drvdata(dev
, pmem
);
578 ndns
->rw_bytes
= pmem_rw_bytes
;
579 if (devm_init_badblocks(dev
, &pmem
->bb
))
581 nvdimm_badblocks_populate(nd_region
, &pmem
->bb
, &nsio
->res
);
583 if (is_nd_btt(dev
)) {
584 /* btt allocates its own request_queue */
585 blk_cleanup_queue(pmem
->pmem_queue
);
586 pmem
->pmem_queue
= NULL
;
587 return nvdimm_namespace_attach_btt(ndns
);
591 return nvdimm_namespace_attach_pfn(ndns
);
593 if (nd_btt_probe(ndns
, pmem
) == 0 || nd_pfn_probe(ndns
, pmem
) == 0) {
595 * We'll come back as either btt-pmem, or pfn-pmem, so
596 * drop the queue allocation for now.
598 blk_cleanup_queue(pmem
->pmem_queue
);
602 return pmem_attach_disk(dev
, ndns
, pmem
);
605 static int nd_pmem_remove(struct device
*dev
)
607 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
610 nvdimm_namespace_detach_btt(pmem
->ndns
);
611 else if (is_nd_pfn(dev
))
612 nvdimm_namespace_detach_pfn(pmem
->ndns
);
614 pmem_detach_disk(pmem
);
619 static void nd_pmem_notify(struct device
*dev
, enum nvdimm_event event
)
621 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
622 struct nd_namespace_common
*ndns
= pmem
->ndns
;
623 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
624 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
625 struct resource res
= {
626 .start
= nsio
->res
.start
+ pmem
->data_offset
,
627 .end
= nsio
->res
.end
,
630 if (event
!= NVDIMM_REVALIDATE_POISON
)
633 if (is_nd_pfn(dev
)) {
634 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
635 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
637 res
.start
+= __le32_to_cpu(pfn_sb
->start_pad
);
638 res
.end
-= __le32_to_cpu(pfn_sb
->end_trunc
);
641 nvdimm_badblocks_populate(nd_region
, &pmem
->bb
, &res
);
644 MODULE_ALIAS("pmem");
645 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
646 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
647 static struct nd_device_driver nd_pmem_driver
= {
648 .probe
= nd_pmem_probe
,
649 .remove
= nd_pmem_remove
,
650 .notify
= nd_pmem_notify
,
654 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
657 static int __init
pmem_init(void)
659 return nd_driver_register(&nd_pmem_driver
);
661 module_init(pmem_init
);
663 static void pmem_exit(void)
665 driver_unregister(&nd_pmem_driver
.drv
);
667 module_exit(pmem_exit
);
669 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
670 MODULE_LICENSE("GPL v2");