1 // SPDX-License-Identifier: GPL-2.0-only
3 * Persistent Memory Driver
5 * Copyright (c) 2014-2015, Intel Corporation.
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
10 #include <linux/blkdev.h>
11 #include <linux/pagemap.h>
12 #include <linux/hdreg.h>
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/set_memory.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/badblocks.h>
19 #include <linux/memremap.h>
20 #include <linux/kstrtox.h>
21 #include <linux/vmalloc.h>
22 #include <linux/blk-mq.h>
23 #include <linux/pfn_t.h>
24 #include <linux/slab.h>
25 #include <linux/uio.h>
26 #include <linux/dax.h>
29 #include <asm/cacheflush.h>
35 static struct device
*to_dev(struct pmem_device
*pmem
)
38 * nvdimm bus services need a 'dev' parameter, and we record the device
44 static struct nd_region
*to_region(struct pmem_device
*pmem
)
46 return to_nd_region(to_dev(pmem
)->parent
);
49 static phys_addr_t
pmem_to_phys(struct pmem_device
*pmem
, phys_addr_t offset
)
51 return pmem
->phys_addr
+ offset
;
54 static sector_t
to_sect(struct pmem_device
*pmem
, phys_addr_t offset
)
56 return (offset
- pmem
->data_offset
) >> SECTOR_SHIFT
;
59 static phys_addr_t
to_offset(struct pmem_device
*pmem
, sector_t sector
)
61 return (sector
<< SECTOR_SHIFT
) + pmem
->data_offset
;
64 static void pmem_mkpage_present(struct pmem_device
*pmem
, phys_addr_t offset
,
67 phys_addr_t phys
= pmem_to_phys(pmem
, offset
);
68 unsigned long pfn_start
, pfn_end
, pfn
;
70 /* only pmem in the linear map supports HWPoison */
71 if (is_vmalloc_addr(pmem
->virt_addr
))
74 pfn_start
= PHYS_PFN(phys
);
75 pfn_end
= pfn_start
+ PHYS_PFN(len
);
76 for (pfn
= pfn_start
; pfn
< pfn_end
; pfn
++) {
77 struct page
*page
= pfn_to_page(pfn
);
80 * Note, no need to hold a get_dev_pagemap() reference
81 * here since we're in the driver I/O path and
82 * outstanding I/O requests pin the dev_pagemap.
84 if (test_and_clear_pmem_poison(page
))
85 clear_mce_nospec(pfn
);
89 static void pmem_clear_bb(struct pmem_device
*pmem
, sector_t sector
, long blks
)
93 badblocks_clear(&pmem
->bb
, sector
, blks
);
95 sysfs_notify_dirent(pmem
->bb_state
);
98 static long __pmem_clear_poison(struct pmem_device
*pmem
,
99 phys_addr_t offset
, unsigned int len
)
101 phys_addr_t phys
= pmem_to_phys(pmem
, offset
);
102 long cleared
= nvdimm_clear_poison(to_dev(pmem
), phys
, len
);
105 pmem_mkpage_present(pmem
, offset
, cleared
);
106 arch_invalidate_pmem(pmem
->virt_addr
+ offset
, len
);
111 static blk_status_t
pmem_clear_poison(struct pmem_device
*pmem
,
112 phys_addr_t offset
, unsigned int len
)
114 long cleared
= __pmem_clear_poison(pmem
, offset
, len
);
117 return BLK_STS_IOERR
;
119 pmem_clear_bb(pmem
, to_sect(pmem
, offset
), cleared
>> SECTOR_SHIFT
);
121 return BLK_STS_IOERR
;
125 static void write_pmem(void *pmem_addr
, struct page
*page
,
126 unsigned int off
, unsigned int len
)
132 mem
= kmap_atomic(page
);
133 chunk
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
134 memcpy_flushcache(pmem_addr
, mem
+ off
, chunk
);
143 static blk_status_t
read_pmem(struct page
*page
, unsigned int off
,
144 void *pmem_addr
, unsigned int len
)
151 mem
= kmap_atomic(page
);
152 chunk
= min_t(unsigned int, len
, PAGE_SIZE
- off
);
153 rem
= copy_mc_to_kernel(mem
+ off
, pmem_addr
, chunk
);
156 return BLK_STS_IOERR
;
165 static blk_status_t
pmem_do_read(struct pmem_device
*pmem
,
166 struct page
*page
, unsigned int page_off
,
167 sector_t sector
, unsigned int len
)
170 phys_addr_t pmem_off
= to_offset(pmem
, sector
);
171 void *pmem_addr
= pmem
->virt_addr
+ pmem_off
;
173 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
)))
174 return BLK_STS_IOERR
;
176 rc
= read_pmem(page
, page_off
, pmem_addr
, len
);
177 flush_dcache_page(page
);
181 static blk_status_t
pmem_do_write(struct pmem_device
*pmem
,
182 struct page
*page
, unsigned int page_off
,
183 sector_t sector
, unsigned int len
)
185 phys_addr_t pmem_off
= to_offset(pmem
, sector
);
186 void *pmem_addr
= pmem
->virt_addr
+ pmem_off
;
188 if (unlikely(is_bad_pmem(&pmem
->bb
, sector
, len
))) {
189 blk_status_t rc
= pmem_clear_poison(pmem
, pmem_off
, len
);
191 if (rc
!= BLK_STS_OK
)
195 flush_dcache_page(page
);
196 write_pmem(pmem_addr
, page
, page_off
, len
);
201 static void pmem_submit_bio(struct bio
*bio
)
208 struct bvec_iter iter
;
209 struct pmem_device
*pmem
= bio
->bi_bdev
->bd_disk
->private_data
;
210 struct nd_region
*nd_region
= to_region(pmem
);
212 if (bio
->bi_opf
& REQ_PREFLUSH
)
213 ret
= nvdimm_flush(nd_region
, bio
);
215 do_acct
= blk_queue_io_stat(bio
->bi_bdev
->bd_disk
->queue
);
217 start
= bio_start_io_acct(bio
);
218 bio_for_each_segment(bvec
, bio
, iter
) {
219 if (op_is_write(bio_op(bio
)))
220 rc
= pmem_do_write(pmem
, bvec
.bv_page
, bvec
.bv_offset
,
221 iter
.bi_sector
, bvec
.bv_len
);
223 rc
= pmem_do_read(pmem
, bvec
.bv_page
, bvec
.bv_offset
,
224 iter
.bi_sector
, bvec
.bv_len
);
231 bio_end_io_acct(bio
, start
);
233 if (bio
->bi_opf
& REQ_FUA
)
234 ret
= nvdimm_flush(nd_region
, bio
);
237 bio
->bi_status
= errno_to_blk_status(ret
);
242 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
243 __weak
long __pmem_direct_access(struct pmem_device
*pmem
, pgoff_t pgoff
,
244 long nr_pages
, enum dax_access_mode mode
, void **kaddr
,
247 resource_size_t offset
= PFN_PHYS(pgoff
) + pmem
->data_offset
;
248 sector_t sector
= PFN_PHYS(pgoff
) >> SECTOR_SHIFT
;
249 unsigned int num
= PFN_PHYS(nr_pages
) >> SECTOR_SHIFT
;
250 struct badblocks
*bb
= &pmem
->bb
;
255 *kaddr
= pmem
->virt_addr
+ offset
;
257 *pfn
= phys_to_pfn_t(pmem
->phys_addr
+ offset
, pmem
->pfn_flags
);
260 badblocks_check(bb
, sector
, num
, &first_bad
, &num_bad
)) {
263 if (mode
!= DAX_RECOVERY_WRITE
)
267 * Set the recovery stride is set to kernel page size because
268 * the underlying driver and firmware clear poison functions
269 * don't appear to handle large chunk(such as 2MiB) reliably.
271 actual_nr
= PHYS_PFN(
272 PAGE_ALIGN((first_bad
- sector
) << SECTOR_SHIFT
));
273 dev_dbg(pmem
->bb
.dev
, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n",
274 sector
, nr_pages
, first_bad
, actual_nr
);
281 * If badblocks are present but not in the range, limit known good range
282 * to the requested range.
286 return PHYS_PFN(pmem
->size
- pmem
->pfn_pad
- offset
);
289 static const struct block_device_operations pmem_fops
= {
290 .owner
= THIS_MODULE
,
291 .submit_bio
= pmem_submit_bio
,
294 static int pmem_dax_zero_page_range(struct dax_device
*dax_dev
, pgoff_t pgoff
,
297 struct pmem_device
*pmem
= dax_get_private(dax_dev
);
299 return blk_status_to_errno(pmem_do_write(pmem
, ZERO_PAGE(0), 0,
300 PFN_PHYS(pgoff
) >> SECTOR_SHIFT
,
304 static long pmem_dax_direct_access(struct dax_device
*dax_dev
,
305 pgoff_t pgoff
, long nr_pages
, enum dax_access_mode mode
,
306 void **kaddr
, pfn_t
*pfn
)
308 struct pmem_device
*pmem
= dax_get_private(dax_dev
);
310 return __pmem_direct_access(pmem
, pgoff
, nr_pages
, mode
, kaddr
, pfn
);
314 * The recovery write thread started out as a normal pwrite thread and
315 * when the filesystem was told about potential media error in the
316 * range, filesystem turns the normal pwrite to a dax_recovery_write.
318 * The recovery write consists of clearing media poison, clearing page
319 * HWPoison bit, re-enable page-wide read-write permission, flush the
320 * caches and finally write. A competing pread thread will be held
321 * off during the recovery process since data read back might not be
322 * valid, and this is achieved by clearing the badblock records after
323 * the recovery write is complete. Competing recovery write threads
324 * are already serialized by writer lock held by dax_iomap_rw().
326 static size_t pmem_recovery_write(struct dax_device
*dax_dev
, pgoff_t pgoff
,
327 void *addr
, size_t bytes
, struct iov_iter
*i
)
329 struct pmem_device
*pmem
= dax_get_private(dax_dev
);
330 size_t olen
, len
, off
;
331 phys_addr_t pmem_off
;
332 struct device
*dev
= pmem
->bb
.dev
;
335 off
= offset_in_page(addr
);
336 len
= PFN_PHYS(PFN_UP(off
+ bytes
));
337 if (!is_bad_pmem(&pmem
->bb
, PFN_PHYS(pgoff
) >> SECTOR_SHIFT
, len
))
338 return _copy_from_iter_flushcache(addr
, bytes
, i
);
341 * Not page-aligned range cannot be recovered. This should not
342 * happen unless something else went wrong.
344 if (off
|| !PAGE_ALIGNED(bytes
)) {
345 dev_dbg(dev
, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n",
350 pmem_off
= PFN_PHYS(pgoff
) + pmem
->data_offset
;
351 cleared
= __pmem_clear_poison(pmem
, pmem_off
, len
);
352 if (cleared
> 0 && cleared
< len
) {
353 dev_dbg(dev
, "poison cleared only %ld out of %zu bytes\n",
358 dev_dbg(dev
, "poison clear failed: %ld\n", cleared
);
362 olen
= _copy_from_iter_flushcache(addr
, bytes
, i
);
363 pmem_clear_bb(pmem
, to_sect(pmem
, pmem_off
), cleared
>> SECTOR_SHIFT
);
368 static const struct dax_operations pmem_dax_ops
= {
369 .direct_access
= pmem_dax_direct_access
,
370 .zero_page_range
= pmem_dax_zero_page_range
,
371 .recovery_write
= pmem_recovery_write
,
374 static ssize_t
write_cache_show(struct device
*dev
,
375 struct device_attribute
*attr
, char *buf
)
377 struct pmem_device
*pmem
= dev_to_disk(dev
)->private_data
;
379 return sprintf(buf
, "%d\n", !!dax_write_cache_enabled(pmem
->dax_dev
));
382 static ssize_t
write_cache_store(struct device
*dev
,
383 struct device_attribute
*attr
, const char *buf
, size_t len
)
385 struct pmem_device
*pmem
= dev_to_disk(dev
)->private_data
;
389 rc
= kstrtobool(buf
, &write_cache
);
392 dax_write_cache(pmem
->dax_dev
, write_cache
);
395 static DEVICE_ATTR_RW(write_cache
);
397 static umode_t
dax_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
399 #ifndef CONFIG_ARCH_HAS_PMEM_API
400 if (a
== &dev_attr_write_cache
.attr
)
406 static struct attribute
*dax_attributes
[] = {
407 &dev_attr_write_cache
.attr
,
411 static const struct attribute_group dax_attribute_group
= {
413 .attrs
= dax_attributes
,
414 .is_visible
= dax_visible
,
417 static const struct attribute_group
*pmem_attribute_groups
[] = {
418 &dax_attribute_group
,
422 static void pmem_release_disk(void *__pmem
)
424 struct pmem_device
*pmem
= __pmem
;
426 dax_remove_host(pmem
->disk
);
427 kill_dax(pmem
->dax_dev
);
428 put_dax(pmem
->dax_dev
);
429 del_gendisk(pmem
->disk
);
431 put_disk(pmem
->disk
);
434 static int pmem_pagemap_memory_failure(struct dev_pagemap
*pgmap
,
435 unsigned long pfn
, unsigned long nr_pages
, int mf_flags
)
437 struct pmem_device
*pmem
=
438 container_of(pgmap
, struct pmem_device
, pgmap
);
439 u64 offset
= PFN_PHYS(pfn
) - pmem
->phys_addr
- pmem
->data_offset
;
440 u64 len
= nr_pages
<< PAGE_SHIFT
;
442 return dax_holder_notify_failure(pmem
->dax_dev
, offset
, len
, mf_flags
);
445 static const struct dev_pagemap_ops fsdax_pagemap_ops
= {
446 .memory_failure
= pmem_pagemap_memory_failure
,
449 static int pmem_attach_disk(struct device
*dev
,
450 struct nd_namespace_common
*ndns
)
452 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
453 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
454 struct queue_limits lim
= {
455 .logical_block_size
= pmem_sector_size(ndns
),
456 .physical_block_size
= PAGE_SIZE
,
457 .max_hw_sectors
= UINT_MAX
,
458 .features
= BLK_FEAT_WRITE_CACHE
|
459 BLK_FEAT_SYNCHRONOUS
,
461 int nid
= dev_to_node(dev
), fua
;
462 struct resource
*res
= &nsio
->res
;
463 struct range bb_range
;
464 struct nd_pfn
*nd_pfn
= NULL
;
465 struct dax_device
*dax_dev
;
466 struct nd_pfn_sb
*pfn_sb
;
467 struct pmem_device
*pmem
;
468 struct gendisk
*disk
;
472 pmem
= devm_kzalloc(dev
, sizeof(*pmem
), GFP_KERNEL
);
476 rc
= devm_namespace_enable(dev
, ndns
, nd_info_block_reserve());
480 /* while nsio_rw_bytes is active, parse a pfn info block if present */
481 if (is_nd_pfn(dev
)) {
482 nd_pfn
= to_nd_pfn(dev
);
483 rc
= nvdimm_setup_pfn(nd_pfn
, &pmem
->pgmap
);
488 /* we're attaching a block device, disable raw namespace access */
489 devm_namespace_disable(dev
, ndns
);
491 dev_set_drvdata(dev
, pmem
);
492 pmem
->phys_addr
= res
->start
;
493 pmem
->size
= resource_size(res
);
494 fua
= nvdimm_has_flush(nd_region
);
495 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
) || fua
< 0) {
496 dev_warn(dev
, "unable to guarantee persistence of writes\n");
500 lim
.features
|= BLK_FEAT_FUA
;
501 if (is_nd_pfn(dev
) || pmem_should_map_pages(dev
))
502 lim
.features
|= BLK_FEAT_DAX
;
504 if (!devm_request_mem_region(dev
, res
->start
, resource_size(res
),
505 dev_name(&ndns
->dev
))) {
506 dev_warn(dev
, "could not reserve region %pR\n", res
);
510 disk
= blk_alloc_disk(&lim
, nid
);
512 return PTR_ERR(disk
);
515 pmem
->pgmap
.owner
= pmem
;
516 pmem
->pfn_flags
= PFN_DEV
;
517 if (is_nd_pfn(dev
)) {
518 pmem
->pgmap
.type
= MEMORY_DEVICE_FS_DAX
;
519 pmem
->pgmap
.ops
= &fsdax_pagemap_ops
;
520 addr
= devm_memremap_pages(dev
, &pmem
->pgmap
);
521 pfn_sb
= nd_pfn
->pfn_sb
;
522 pmem
->data_offset
= le64_to_cpu(pfn_sb
->dataoff
);
523 pmem
->pfn_pad
= resource_size(res
) -
524 range_len(&pmem
->pgmap
.range
);
525 pmem
->pfn_flags
|= PFN_MAP
;
526 bb_range
= pmem
->pgmap
.range
;
527 bb_range
.start
+= pmem
->data_offset
;
528 } else if (pmem_should_map_pages(dev
)) {
529 pmem
->pgmap
.range
.start
= res
->start
;
530 pmem
->pgmap
.range
.end
= res
->end
;
531 pmem
->pgmap
.nr_range
= 1;
532 pmem
->pgmap
.type
= MEMORY_DEVICE_FS_DAX
;
533 pmem
->pgmap
.ops
= &fsdax_pagemap_ops
;
534 addr
= devm_memremap_pages(dev
, &pmem
->pgmap
);
535 pmem
->pfn_flags
|= PFN_MAP
;
536 bb_range
= pmem
->pgmap
.range
;
538 addr
= devm_memremap(dev
, pmem
->phys_addr
,
539 pmem
->size
, ARCH_MEMREMAP_PMEM
);
540 bb_range
.start
= res
->start
;
541 bb_range
.end
= res
->end
;
548 pmem
->virt_addr
= addr
;
550 disk
->fops
= &pmem_fops
;
551 disk
->private_data
= pmem
;
552 nvdimm_namespace_disk_name(ndns
, disk
->disk_name
);
553 set_capacity(disk
, (pmem
->size
- pmem
->pfn_pad
- pmem
->data_offset
)
555 if (devm_init_badblocks(dev
, &pmem
->bb
))
557 nvdimm_badblocks_populate(nd_region
, &pmem
->bb
, &bb_range
);
558 disk
->bb
= &pmem
->bb
;
560 dax_dev
= alloc_dax(pmem
, &pmem_dax_ops
);
561 if (IS_ERR(dax_dev
)) {
562 rc
= PTR_ERR(dax_dev
);
563 if (rc
!= -EOPNOTSUPP
)
566 set_dax_nocache(dax_dev
);
567 set_dax_nomc(dax_dev
);
568 if (is_nvdimm_sync(nd_region
))
569 set_dax_synchronous(dax_dev
);
570 pmem
->dax_dev
= dax_dev
;
571 rc
= dax_add_host(dax_dev
, disk
);
573 goto out_cleanup_dax
;
574 dax_write_cache(dax_dev
, nvdimm_has_cache(nd_region
));
576 rc
= device_add_disk(dev
, disk
, pmem_attribute_groups
);
578 goto out_remove_host
;
579 if (devm_add_action_or_reset(dev
, pmem_release_disk
, pmem
))
582 nvdimm_check_and_set_ro(disk
);
584 pmem
->bb_state
= sysfs_get_dirent(disk_to_dev(disk
)->kobj
.sd
,
587 dev_warn(dev
, "'badblocks' notification disabled\n");
591 dax_remove_host(pmem
->disk
);
593 kill_dax(pmem
->dax_dev
);
594 put_dax(pmem
->dax_dev
);
596 put_disk(pmem
->disk
);
600 static int nd_pmem_probe(struct device
*dev
)
603 struct nd_namespace_common
*ndns
;
605 ndns
= nvdimm_namespace_common_probe(dev
);
607 return PTR_ERR(ndns
);
610 return nvdimm_namespace_attach_btt(ndns
);
613 return pmem_attach_disk(dev
, ndns
);
615 ret
= devm_namespace_enable(dev
, ndns
, nd_info_block_reserve());
619 ret
= nd_btt_probe(dev
, ndns
);
624 * We have two failure conditions here, there is no
625 * info reserver block or we found a valid info reserve block
626 * but failed to initialize the pfn superblock.
628 * For the first case consider namespace as a raw pmem namespace
631 * For the latter, consider this a success and advance the namespace
634 ret
= nd_pfn_probe(dev
, ndns
);
637 else if (ret
== -EOPNOTSUPP
)
640 ret
= nd_dax_probe(dev
, ndns
);
643 else if (ret
== -EOPNOTSUPP
)
646 /* probe complete, attach handles namespace enabling */
647 devm_namespace_disable(dev
, ndns
);
649 return pmem_attach_disk(dev
, ndns
);
652 static void nd_pmem_remove(struct device
*dev
)
654 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
657 nvdimm_namespace_detach_btt(to_nd_btt(dev
));
660 * Note, this assumes device_lock() context to not
661 * race nd_pmem_notify()
663 sysfs_put(pmem
->bb_state
);
664 pmem
->bb_state
= NULL
;
666 nvdimm_flush(to_nd_region(dev
->parent
), NULL
);
669 static void nd_pmem_shutdown(struct device
*dev
)
671 nvdimm_flush(to_nd_region(dev
->parent
), NULL
);
674 static void pmem_revalidate_poison(struct device
*dev
)
676 struct nd_region
*nd_region
;
677 resource_size_t offset
= 0, end_trunc
= 0;
678 struct nd_namespace_common
*ndns
;
679 struct nd_namespace_io
*nsio
;
680 struct badblocks
*bb
;
682 struct kernfs_node
*bb_state
;
684 if (is_nd_btt(dev
)) {
685 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
688 nd_region
= to_nd_region(ndns
->dev
.parent
);
689 nsio
= to_nd_namespace_io(&ndns
->dev
);
693 struct pmem_device
*pmem
= dev_get_drvdata(dev
);
695 nd_region
= to_region(pmem
);
697 bb_state
= pmem
->bb_state
;
699 if (is_nd_pfn(dev
)) {
700 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
701 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
704 offset
= pmem
->data_offset
+
705 __le32_to_cpu(pfn_sb
->start_pad
);
706 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
711 nsio
= to_nd_namespace_io(&ndns
->dev
);
714 range
.start
= nsio
->res
.start
+ offset
;
715 range
.end
= nsio
->res
.end
- end_trunc
;
716 nvdimm_badblocks_populate(nd_region
, bb
, &range
);
718 sysfs_notify_dirent(bb_state
);
721 static void pmem_revalidate_region(struct device
*dev
)
723 struct pmem_device
*pmem
;
725 if (is_nd_btt(dev
)) {
726 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
727 struct btt
*btt
= nd_btt
->btt
;
729 nvdimm_check_and_set_ro(btt
->btt_disk
);
733 pmem
= dev_get_drvdata(dev
);
734 nvdimm_check_and_set_ro(pmem
->disk
);
737 static void nd_pmem_notify(struct device
*dev
, enum nvdimm_event event
)
740 case NVDIMM_REVALIDATE_POISON
:
741 pmem_revalidate_poison(dev
);
743 case NVDIMM_REVALIDATE_REGION
:
744 pmem_revalidate_region(dev
);
747 dev_WARN_ONCE(dev
, 1, "notify: unknown event: %d\n", event
);
752 MODULE_ALIAS("pmem");
753 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO
);
754 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM
);
755 static struct nd_device_driver nd_pmem_driver
= {
756 .probe
= nd_pmem_probe
,
757 .remove
= nd_pmem_remove
,
758 .notify
= nd_pmem_notify
,
759 .shutdown
= nd_pmem_shutdown
,
763 .type
= ND_DRIVER_NAMESPACE_IO
| ND_DRIVER_NAMESPACE_PMEM
,
766 module_nd_driver(nd_pmem_driver
);
768 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
769 MODULE_DESCRIPTION("NVDIMM Persistent Memory Driver");
770 MODULE_LICENSE("GPL v2");