2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pfn_t.h>
29 #include <linux/slab.h>
30 #include <linux/pmem.h>
36 /* One contiguous memory region per device */
37 phys_addr_t phys_addr
;
38 /* when non-zero this device is hosting a 'pfn' instance */
39 phys_addr_t data_offset
;
41 void __pmem
*virt_addr
;
42 /* immutable base size of the namespace */
44 /* trim size when namespace capacity has been section aligned */
49 static void pmem_clear_poison(struct pmem_device
*pmem
, phys_addr_t offset
,
52 struct device
*dev
= pmem
->bb
.dev
;
56 sector
= (offset
- pmem
->data_offset
) / 512;
57 cleared
= nvdimm_clear_poison(dev
, pmem
->phys_addr
+ offset
, len
);
59 if (cleared
> 0 && cleared
/ 512) {
60 dev_dbg(dev
, "%s: %llx clear %ld sector%s\n",
61 __func__
, (unsigned long long) sector
,
62 cleared
/ 512, cleared
/ 512 > 1 ? "s" : "");
63 badblocks_clear(&pmem
->bb
, sector
, cleared
/ 512);
65 invalidate_pmem(pmem
->virt_addr
+ offset
, len
);
68 static int pmem_do_bvec(struct pmem_device
*pmem
, struct page
*page
,
69 unsigned int len
, unsigned int off
, int rw
,
73 bool bad_pmem
= false;
74 void *mem
= kmap_atomic(page
);
75 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
76 void __pmem
*pmem_addr
= pmem
->virt_addr
+ pmem_off
;
78 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
82 if (unlikely(bad_pmem
))
85 rc
= memcpy_from_pmem(mem
+ off
, pmem_addr
, len
);
86 flush_dcache_page(page
);
90 * Note that we write the data both before and after
91 * clearing poison. The write before clear poison
92 * handles situations where the latest written data is
93 * preserved and the clear poison operation simply marks
94 * the address range as valid without changing the data.
95 * In this case application software can assume that an
96 * interrupted write will either return the new good
99 * However, if pmem_clear_poison() leaves the data in an
100 * indeterminate state we need to perform the write
101 * after clear poison.
103 flush_dcache_page(page
);
104 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
105 if (unlikely(bad_pmem
)) {
106 pmem_clear_poison(pmem
, pmem_off
, len
);
107 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
115 static blk_qc_t
pmem_make_request(struct request_queue
*q
, struct bio
*bio
)
121 struct bvec_iter iter
;
122 struct pmem_device
*pmem
= q
->queuedata
;
124 do_acct
= nd_iostat_start(bio
, &start
);
125 bio_for_each_segment(bvec
, bio
, iter
) {
126 rc
= pmem_do_bvec(pmem
, bvec
.bv_page
, bvec
.bv_len
,
127 bvec
.bv_offset
, bio_data_dir(bio
),
135 nd_iostat_end(bio
, start
);
137 if (bio_data_dir(bio
))
141 return BLK_QC_T_NONE
;
144 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
145 struct page
*page
, int rw
)
147 struct pmem_device
*pmem
= bdev
->bd_queue
->queuedata
;
150 rc
= pmem_do_bvec(pmem
, page
, PAGE_SIZE
, 0, rw
, sector
);
155 * The ->rw_page interface is subtle and tricky. The core
156 * retries on any error, so we can only invoke page_endio() in
157 * the successful completion case. Otherwise, we'll see crashes
158 * caused by double completion.
161 page_endio(page
, rw
& WRITE
, 0);
166 static long pmem_direct_access(struct block_device
*bdev
, sector_t sector
,
167 void __pmem
**kaddr
, pfn_t
*pfn
, long size
)
169 struct pmem_device
*pmem
= bdev
->bd_queue
->queuedata
;
170 resource_size_t offset
= sector
* 512 + pmem
->data_offset
;
172 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, size
)))
174 *kaddr
= pmem
->virt_addr
+ offset
;
175 *pfn
= phys_to_pfn_t(pmem
->phys_addr
+ offset
, pmem
->pfn_flags
);
178 * If badblocks are present, limit known good range to the
181 if (unlikely(pmem
->bb
.count
))
183 return pmem
->size
- pmem
->pfn_pad
- offset
;
186 static const struct block_device_operations pmem_fops
= {
187 .owner
= THIS_MODULE
,
188 .rw_page
= pmem_rw_page
,
189 .direct_access
= pmem_direct_access
,
190 .revalidate_disk
= nvdimm_revalidate_disk
,
193 static void pmem_release_queue(void *q
)
195 blk_cleanup_queue(q
);
198 void pmem_release_disk(void *disk
)
204 static int pmem_attach_disk(struct device
*dev
,
205 struct nd_namespace_common
*ndns
)
207 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
208 struct vmem_altmap __altmap
, *altmap
= NULL
;
209 struct resource
*res
= &nsio
->res
;
210 struct nd_pfn
*nd_pfn
= NULL
;
211 int nid
= dev_to_node(dev
);
212 struct nd_pfn_sb
*pfn_sb
;
213 struct pmem_device
*pmem
;
214 struct resource pfn_res
;
215 struct request_queue
*q
;
216 struct gendisk
*disk
;
219 /* while nsio_rw_bytes is active, parse a pfn info block if present */
220 if (is_nd_pfn(dev
)) {
221 nd_pfn
= to_nd_pfn(dev
);
222 altmap
= nvdimm_setup_pfn(nd_pfn
, &pfn_res
, &__altmap
);
224 return PTR_ERR(altmap
);
227 /* we're attaching a block device, disable raw namespace access */
228 devm_nsio_disable(dev
, nsio
);
230 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
234 dev_set_drvdata(dev
, pmem
);
235 pmem
->phys_addr
= res
->start
;
236 pmem
->size
= resource_size(res
);
237 if (!arch_has_wmb_pmem())
238 dev_warn(dev
, "unable to guarantee persistence of writes\n");
240 if (!devm_request_mem_region(dev
, res
->start
, resource_size(res
),
242 dev_warn(dev
, "could not reserve region %pR\n", res
);
246 q
= blk_alloc_queue_node(GFP_KERNEL
, dev_to_node(dev
));
250 pmem
->pfn_flags
= PFN_DEV
;
251 if (is_nd_pfn(dev
)) {
252 addr
= devm_memremap_pages(dev
, &pfn_res
, &q
->q_usage_counter
,
254 pfn_sb
= nd_pfn
->pfn_sb
;
255 pmem
->data_offset
= le64_to_cpu(pfn_sb
->dataoff
);
256 pmem
->pfn_pad
= resource_size(res
) - resource_size(&pfn_res
);
257 pmem
->pfn_flags
|= PFN_MAP
;
258 res
= &pfn_res
; /* for badblocks populate */
259 res
->start
+= pmem
->data_offset
;
260 } else if (pmem_should_map_pages(dev
)) {
261 addr
= devm_memremap_pages(dev
, &nsio
->res
,
262 &q
->q_usage_counter
, NULL
);
263 pmem
->pfn_flags
|= PFN_MAP
;
265 addr
= devm_memremap(dev
, pmem
->phys_addr
,
266 pmem
->size
, ARCH_MEMREMAP_PMEM
);
269 * At release time the queue must be dead before
270 * devm_memremap_pages is unwound
272 if (devm_add_action(dev
, pmem_release_queue
, q
)) {
273 blk_cleanup_queue(q
);
278 return PTR_ERR(addr
);
279 pmem
->virt_addr
= (void __pmem
*) addr
;
281 blk_queue_make_request(q
, pmem_make_request
);
282 blk_queue_physical_block_size(q
, PAGE_SIZE
);
283 blk_queue_max_hw_sectors(q
, UINT_MAX
);
284 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
285 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
288 disk
= alloc_disk_node(0, nid
);
291 if (devm_add_action(dev
, pmem_release_disk
, disk
)) {
296 disk
->fops
= &pmem_fops
;
298 disk
->flags
= GENHD_FL_EXT_DEVT
;
299 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
300 disk
->driverfs_dev
= dev
;
301 set_capacity(disk
, (pmem
->size
- pmem
->pfn_pad
- pmem
->data_offset
)
303 if (devm_init_badblocks(dev
, &pmem
->bb
))
305 nvdimm_badblocks_populate(to_nd_region(dev
->parent
), &pmem
->bb
, res
);
306 disk
->bb
= &pmem
->bb
;
308 revalidate_disk(disk
);
313 static int nd_pmem_probe(struct device
*dev
)
315 struct nd_namespace_common
*ndns
;
317 ndns
= nvdimm_namespace_common_probe(dev
);
319 return PTR_ERR(ndns
);
321 if (devm_nsio_enable(dev
, to_nd_namespace_io(&ndns
->dev
)))
325 return nvdimm_namespace_attach_btt(ndns
);
328 return pmem_attach_disk(dev
, ndns
);
330 /* if we find a valid info-block we'll come back as that personality */
331 if (nd_btt_probe(dev
, ndns
) == 0 || nd_pfn_probe(dev
, ndns
) == 0
332 || nd_dax_probe(dev
, ndns
) == 0)
335 /* ...otherwise we're just a raw pmem device */
336 return pmem_attach_disk(dev
, ndns
);
339 static int nd_pmem_remove(struct device
*dev
)
342 nvdimm_namespace_detach_btt(to_nd_btt(dev
));
346 static void nd_pmem_notify(struct device
*dev
, enum nvdimm_event event
)
348 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
349 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
350 resource_size_t offset
= 0, end_trunc
= 0;
351 struct nd_namespace_common
*ndns
;
352 struct nd_namespace_io
*nsio
;
355 if (event
!= NVDIMM_REVALIDATE_POISON
)
358 if (is_nd_btt(dev
)) {
359 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
362 } else if (is_nd_pfn(dev
)) {
363 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
364 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
367 offset
= pmem
->data_offset
+ __le32_to_cpu(pfn_sb
->start_pad
);
368 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
372 nsio
= to_nd_namespace_io(&ndns
->dev
);
373 res
.start
= nsio
->res
.start
+ offset
;
374 res
.end
= nsio
->res
.end
- end_trunc
;
375 nvdimm_badblocks_populate(nd_region
, &pmem
->bb
, &res
);
378 MODULE_ALIAS("pmem");
379 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
380 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
381 static struct nd_device_driver nd_pmem_driver
= {
382 .probe
= nd_pmem_probe
,
383 .remove
= nd_pmem_remove
,
384 .notify
= nd_pmem_notify
,
388 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
391 static int __init
pmem_init(void)
393 return nd_driver_register(&nd_pmem_driver
);
395 module_init(pmem_init
);
397 static void pmem_exit(void)
399 driver_unregister(&nd_pmem_driver
.drv
);
401 module_exit(pmem_exit
);
403 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
404 MODULE_LICENSE("GPL v2");