2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/memory_hotplug.h>
25 #include <linux/moduleparam.h>
26 #include <linux/vmalloc.h>
27 #include <linux/slab.h>
28 #include <linux/pmem.h>
34 struct request_queue
*pmem_queue
;
35 struct gendisk
*pmem_disk
;
36 struct nd_namespace_common
*ndns
;
38 /* One contiguous memory region per device */
39 phys_addr_t phys_addr
;
40 /* when non-zero this device is hosting a 'pfn' instance */
41 phys_addr_t data_offset
;
42 void __pmem
*virt_addr
;
46 static int pmem_major
;
48 static void pmem_do_bvec(struct pmem_device
*pmem
, struct page
*page
,
49 unsigned int len
, unsigned int off
, int rw
,
52 void *mem
= kmap_atomic(page
);
53 phys_addr_t pmem_off
= sector
* 512 + pmem
->data_offset
;
54 void __pmem
*pmem_addr
= pmem
->virt_addr
+ pmem_off
;
57 memcpy_from_pmem(mem
+ off
, pmem_addr
, len
);
58 flush_dcache_page(page
);
60 flush_dcache_page(page
);
61 memcpy_to_pmem(pmem_addr
, mem
+ off
, len
);
67 static blk_qc_t
pmem_make_request(struct request_queue
*q
, struct bio
*bio
)
72 struct bvec_iter iter
;
73 struct block_device
*bdev
= bio
->bi_bdev
;
74 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
76 do_acct
= nd_iostat_start(bio
, &start
);
77 bio_for_each_segment(bvec
, bio
, iter
)
78 pmem_do_bvec(pmem
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
,
79 bio_data_dir(bio
), iter
.bi_sector
);
81 nd_iostat_end(bio
, start
);
83 if (bio_data_dir(bio
))
90 static int pmem_rw_page(struct block_device
*bdev
, sector_t sector
,
91 struct page
*page
, int rw
)
93 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
95 pmem_do_bvec(pmem
, page
, PAGE_CACHE_SIZE
, 0, rw
, sector
);
98 page_endio(page
, rw
& WRITE
, 0);
103 static long pmem_direct_access(struct block_device
*bdev
, sector_t sector
,
104 void __pmem
**kaddr
, unsigned long *pfn
)
106 struct pmem_device
*pmem
= bdev
->bd_disk
->private_data
;
107 resource_size_t offset
= sector
* 512 + pmem
->data_offset
;
109 *kaddr
= pmem
->virt_addr
+ offset
;
110 *pfn
= (pmem
->phys_addr
+ offset
) >> PAGE_SHIFT
;
112 return pmem
->size
- offset
;
115 static const struct block_device_operations pmem_fops
= {
116 .owner
= THIS_MODULE
,
117 .rw_page
= pmem_rw_page
,
118 .direct_access
= pmem_direct_access
,
119 .revalidate_disk
= nvdimm_revalidate_disk
,
122 static struct pmem_device
*pmem_alloc(struct device
*dev
,
123 struct resource
*res
, int id
)
125 struct pmem_device
*pmem
;
127 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
129 return ERR_PTR(-ENOMEM
);
131 pmem
->phys_addr
= res
->start
;
132 pmem
->size
= resource_size(res
);
133 if (!arch_has_wmb_pmem())
134 dev_warn(dev
, "unable to guarantee persistence of writes\n");
136 if (!devm_request_mem_region(dev
, pmem
->phys_addr
, pmem
->size
,
138 dev_warn(dev
, "could not reserve region [0x%pa:0x%zx]\n",
139 &pmem
->phys_addr
, pmem
->size
);
140 return ERR_PTR(-EBUSY
);
143 if (pmem_should_map_pages(dev
))
144 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, res
);
146 pmem
->virt_addr
= (void __pmem
*) devm_memremap(dev
,
147 pmem
->phys_addr
, pmem
->size
,
150 if (IS_ERR(pmem
->virt_addr
))
151 return (void __force
*) pmem
->virt_addr
;
156 static void pmem_detach_disk(struct pmem_device
*pmem
)
158 if (!pmem
->pmem_disk
)
161 del_gendisk(pmem
->pmem_disk
);
162 put_disk(pmem
->pmem_disk
);
163 blk_cleanup_queue(pmem
->pmem_queue
);
166 static int pmem_attach_disk(struct device
*dev
,
167 struct nd_namespace_common
*ndns
, struct pmem_device
*pmem
)
169 int nid
= dev_to_node(dev
);
170 struct gendisk
*disk
;
172 pmem
->pmem_queue
= blk_alloc_queue_node(GFP_KERNEL
, nid
);
173 if (!pmem
->pmem_queue
)
176 blk_queue_make_request(pmem
->pmem_queue
, pmem_make_request
);
177 blk_queue_physical_block_size(pmem
->pmem_queue
, PAGE_SIZE
);
178 blk_queue_max_hw_sectors(pmem
->pmem_queue
, UINT_MAX
);
179 blk_queue_bounce_limit(pmem
->pmem_queue
, BLK_BOUNCE_ANY
);
180 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, pmem
->pmem_queue
);
182 disk
= alloc_disk_node(0, nid
);
184 blk_cleanup_queue(pmem
->pmem_queue
);
188 disk
->major
= pmem_major
;
189 disk
->first_minor
= 0;
190 disk
->fops
= &pmem_fops
;
191 disk
->private_data
= pmem
;
192 disk
->queue
= pmem
->pmem_queue
;
193 disk
->flags
= GENHD_FL_EXT_DEVT
;
194 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
195 disk
->driverfs_dev
= dev
;
196 set_capacity(disk
, (pmem
->size
- pmem
->data_offset
) / 512);
197 pmem
->pmem_disk
= disk
;
200 revalidate_disk(disk
);
205 static int pmem_rw_bytes(struct nd_namespace_common
*ndns
,
206 resource_size_t offset
, void *buf
, size_t size
, int rw
)
208 struct pmem_device
*pmem
= dev_get_drvdata(ndns
->claim
);
210 if (unlikely(offset
+ size
> pmem
->size
)) {
211 dev_WARN_ONCE(&ndns
->dev
, 1, "request out of range\n");
216 memcpy_from_pmem(buf
, pmem
->virt_addr
+ offset
, size
);
218 memcpy_to_pmem(pmem
->virt_addr
+ offset
, buf
, size
);
225 static int nd_pfn_init(struct nd_pfn
*nd_pfn
)
227 struct nd_pfn_sb
*pfn_sb
= kzalloc(sizeof(*pfn_sb
), GFP_KERNEL
);
228 struct pmem_device
*pmem
= dev_get_drvdata(&nd_pfn
->dev
);
229 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
230 struct nd_region
*nd_region
;
239 nd_pfn
->pfn_sb
= pfn_sb
;
240 rc
= nd_pfn_validate(nd_pfn
);
241 if (rc
== 0 || rc
== -EBUSY
)
244 /* section alignment for simple hotplug */
245 if (nvdimm_namespace_capacity(ndns
) < ND_PFN_ALIGN
246 || pmem
->phys_addr
& ND_PFN_MASK
)
249 nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
251 dev_info(&nd_pfn
->dev
,
252 "%s is read-only, unable to init metadata\n",
253 dev_name(&nd_region
->dev
));
257 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
258 npfns
= (pmem
->size
- SZ_8K
) / SZ_4K
;
260 * Note, we use 64 here for the standard size of struct page,
261 * debugging options may cause it to be larger in which case the
262 * implementation will limit the pfns advertised through
263 * ->direct_access() to those that are included in the memmap.
265 if (nd_pfn
->mode
== PFN_MODE_PMEM
)
266 offset
= ALIGN(SZ_8K
+ 64 * npfns
, PMD_SIZE
);
267 else if (nd_pfn
->mode
== PFN_MODE_RAM
)
272 npfns
= (pmem
->size
- offset
) / SZ_4K
;
273 pfn_sb
->mode
= cpu_to_le32(nd_pfn
->mode
);
274 pfn_sb
->dataoff
= cpu_to_le64(offset
);
275 pfn_sb
->npfns
= cpu_to_le64(npfns
);
276 memcpy(pfn_sb
->signature
, PFN_SIG
, PFN_SIG_LEN
);
277 memcpy(pfn_sb
->uuid
, nd_pfn
->uuid
, 16);
278 pfn_sb
->version_major
= cpu_to_le16(1);
279 checksum
= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
);
280 pfn_sb
->checksum
= cpu_to_le64(checksum
);
282 rc
= nvdimm_write_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
));
288 nd_pfn
->pfn_sb
= NULL
;
293 static int nvdimm_namespace_detach_pfn(struct nd_namespace_common
*ndns
)
295 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
296 struct pmem_device
*pmem
;
299 pmem
= dev_get_drvdata(&nd_pfn
->dev
);
300 pmem_detach_disk(pmem
);
302 /* release nd_pfn resources */
303 kfree(nd_pfn
->pfn_sb
);
304 nd_pfn
->pfn_sb
= NULL
;
309 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common
*ndns
)
311 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
312 struct nd_pfn
*nd_pfn
= to_nd_pfn(ndns
->claim
);
313 struct device
*dev
= &nd_pfn
->dev
;
314 struct vmem_altmap
*altmap
;
315 struct nd_region
*nd_region
;
316 struct nd_pfn_sb
*pfn_sb
;
317 struct pmem_device
*pmem
;
321 if (!nd_pfn
->uuid
|| !nd_pfn
->ndns
)
324 nd_region
= to_nd_region(dev
->parent
);
325 rc
= nd_pfn_init(nd_pfn
);
329 if (PAGE_SIZE
!= SZ_4K
) {
330 dev_err(dev
, "only supported on systems with 4K PAGE_SIZE\n");
333 if (nsio
->res
.start
& ND_PFN_MASK
) {
334 dev_err(dev
, "%s not memory hotplug section aligned\n",
335 dev_name(&ndns
->dev
));
339 pfn_sb
= nd_pfn
->pfn_sb
;
340 offset
= le64_to_cpu(pfn_sb
->dataoff
);
341 nd_pfn
->mode
= le32_to_cpu(nd_pfn
->pfn_sb
->mode
);
342 if (nd_pfn
->mode
== PFN_MODE_RAM
) {
345 nd_pfn
->npfns
= le64_to_cpu(pfn_sb
->npfns
);
352 /* establish pfn range for lookup, and switch to direct map */
353 pmem
= dev_get_drvdata(dev
);
354 devm_memunmap(dev
, (void __force
*) pmem
->virt_addr
);
355 pmem
->virt_addr
= (void __pmem
*) devm_memremap_pages(dev
, &nsio
->res
);
356 if (IS_ERR(pmem
->virt_addr
)) {
357 rc
= PTR_ERR(pmem
->virt_addr
);
361 /* attach pmem disk in "pfn-mode" */
362 pmem
->data_offset
= offset
;
363 rc
= pmem_attach_disk(dev
, ndns
, pmem
);
369 nvdimm_namespace_detach_pfn(ndns
);
373 static int nd_pmem_probe(struct device
*dev
)
375 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
376 struct nd_namespace_common
*ndns
;
377 struct nd_namespace_io
*nsio
;
378 struct pmem_device
*pmem
;
380 ndns
= nvdimm_namespace_common_probe(dev
);
382 return PTR_ERR(ndns
);
384 nsio
= to_nd_namespace_io(&ndns
->dev
);
385 pmem
= pmem_alloc(dev
, &nsio
->res
, nd_region
->id
);
387 return PTR_ERR(pmem
);
390 dev_set_drvdata(dev
, pmem
);
391 ndns
->rw_bytes
= pmem_rw_bytes
;
394 return nvdimm_namespace_attach_btt(ndns
);
397 return nvdimm_namespace_attach_pfn(ndns
);
399 if (nd_btt_probe(ndns
, pmem
) == 0) {
400 /* we'll come back as btt-pmem */
404 if (nd_pfn_probe(ndns
, pmem
) == 0) {
405 /* we'll come back as pfn-pmem */
409 return pmem_attach_disk(dev
, ndns
, pmem
);
412 static int nd_pmem_remove(struct device
*dev
)
414 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
417 nvdimm_namespace_detach_btt(pmem
->ndns
);
418 else if (is_nd_pfn(dev
))
419 nvdimm_namespace_detach_pfn(pmem
->ndns
);
421 pmem_detach_disk(pmem
);
426 MODULE_ALIAS("pmem");
427 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
428 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
429 static struct nd_device_driver nd_pmem_driver
= {
430 .probe
= nd_pmem_probe
,
431 .remove
= nd_pmem_remove
,
435 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
438 static int __init
pmem_init(void)
442 pmem_major
= register_blkdev(0, "pmem");
446 error
= nd_driver_register(&nd_pmem_driver
);
448 unregister_blkdev(pmem_major
, "pmem");
454 module_init(pmem_init
);
456 static void pmem_exit(void)
458 driver_unregister(&nd_pmem_driver
.drv
);
459 unregister_blkdev(pmem_major
, "pmem");
461 module_exit(pmem_exit
);
463 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
464 MODULE_LICENSE("GPL v2");