1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5 #include <linux/memremap.h>
6 #include <linux/blkdev.h>
7 #include <linux/device.h>
8 #include <linux/sizes.h>
9 #include <linux/slab.h>
16 static const bool page_struct_override
= IS_ENABLED(CONFIG_NVDIMM_KMSAN
);
18 static void nd_pfn_release(struct device
*dev
)
20 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
21 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
23 dev_dbg(dev
, "trace\n");
24 nd_detach_ndns(&nd_pfn
->dev
, &nd_pfn
->ndns
);
25 ida_free(&nd_region
->pfn_ida
, nd_pfn
->id
);
30 struct nd_pfn
*to_nd_pfn(struct device
*dev
)
32 struct nd_pfn
*nd_pfn
= container_of(dev
, struct nd_pfn
, dev
);
34 WARN_ON(!is_nd_pfn(dev
));
37 EXPORT_SYMBOL(to_nd_pfn
);
39 static ssize_t
mode_show(struct device
*dev
,
40 struct device_attribute
*attr
, char *buf
)
42 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
44 switch (nd_pfn
->mode
) {
46 return sprintf(buf
, "ram\n");
48 return sprintf(buf
, "pmem\n");
50 return sprintf(buf
, "none\n");
54 static ssize_t
mode_store(struct device
*dev
,
55 struct device_attribute
*attr
, const char *buf
, size_t len
)
57 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
67 if (strncmp(buf
, "pmem\n", n
) == 0
68 || strncmp(buf
, "pmem", n
) == 0) {
69 nd_pfn
->mode
= PFN_MODE_PMEM
;
70 } else if (strncmp(buf
, "ram\n", n
) == 0
71 || strncmp(buf
, "ram", n
) == 0)
72 nd_pfn
->mode
= PFN_MODE_RAM
;
73 else if (strncmp(buf
, "none\n", n
) == 0
74 || strncmp(buf
, "none", n
) == 0)
75 nd_pfn
->mode
= PFN_MODE_NONE
;
79 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
80 buf
[len
- 1] == '\n' ? "" : "\n");
81 nvdimm_bus_unlock(dev
);
86 static DEVICE_ATTR_RW(mode
);
88 static ssize_t
align_show(struct device
*dev
,
89 struct device_attribute
*attr
, char *buf
)
91 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
93 return sprintf(buf
, "%ld\n", nd_pfn
->align
);
96 static unsigned long *nd_pfn_supported_alignments(unsigned long *alignments
)
99 alignments
[0] = PAGE_SIZE
;
101 if (has_transparent_hugepage()) {
102 alignments
[1] = HPAGE_PMD_SIZE
;
103 if (has_transparent_pud_hugepage())
104 alignments
[2] = HPAGE_PUD_SIZE
;
111 * Use pmd mapping if supported as default alignment
113 static unsigned long nd_pfn_default_alignment(void)
116 if (has_transparent_hugepage())
117 return HPAGE_PMD_SIZE
;
121 static ssize_t
align_store(struct device
*dev
,
122 struct device_attribute
*attr
, const char *buf
, size_t len
)
124 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
125 unsigned long aligns
[MAX_NVDIMM_ALIGN
] = { [0] = 0, };
129 nvdimm_bus_lock(dev
);
130 rc
= nd_size_select_store(dev
, buf
, &nd_pfn
->align
,
131 nd_pfn_supported_alignments(aligns
));
132 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
133 buf
[len
- 1] == '\n' ? "" : "\n");
134 nvdimm_bus_unlock(dev
);
137 return rc
? rc
: len
;
139 static DEVICE_ATTR_RW(align
);
141 static ssize_t
uuid_show(struct device
*dev
,
142 struct device_attribute
*attr
, char *buf
)
144 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
147 return sprintf(buf
, "%pUb\n", nd_pfn
->uuid
);
148 return sprintf(buf
, "\n");
151 static ssize_t
uuid_store(struct device
*dev
,
152 struct device_attribute
*attr
, const char *buf
, size_t len
)
154 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
158 rc
= nd_uuid_store(dev
, &nd_pfn
->uuid
, buf
, len
);
159 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
160 buf
[len
- 1] == '\n' ? "" : "\n");
163 return rc
? rc
: len
;
165 static DEVICE_ATTR_RW(uuid
);
167 static ssize_t
namespace_show(struct device
*dev
,
168 struct device_attribute
*attr
, char *buf
)
170 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
173 nvdimm_bus_lock(dev
);
174 rc
= sprintf(buf
, "%s\n", nd_pfn
->ndns
175 ? dev_name(&nd_pfn
->ndns
->dev
) : "");
176 nvdimm_bus_unlock(dev
);
180 static ssize_t
namespace_store(struct device
*dev
,
181 struct device_attribute
*attr
, const char *buf
, size_t len
)
183 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
187 nvdimm_bus_lock(dev
);
188 rc
= nd_namespace_store(dev
, &nd_pfn
->ndns
, buf
, len
);
189 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
190 buf
[len
- 1] == '\n' ? "" : "\n");
191 nvdimm_bus_unlock(dev
);
196 static DEVICE_ATTR_RW(namespace);
198 static ssize_t
resource_show(struct device
*dev
,
199 struct device_attribute
*attr
, char *buf
)
201 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
206 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
207 u64 offset
= __le64_to_cpu(pfn_sb
->dataoff
);
208 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
209 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
210 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
212 rc
= sprintf(buf
, "%#llx\n", (unsigned long long) nsio
->res
.start
213 + start_pad
+ offset
);
215 /* no address to convey if the pfn instance is disabled */
222 static DEVICE_ATTR_ADMIN_RO(resource
);
224 static ssize_t
size_show(struct device
*dev
,
225 struct device_attribute
*attr
, char *buf
)
227 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
232 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
233 u64 offset
= __le64_to_cpu(pfn_sb
->dataoff
);
234 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
235 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
236 u32 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
237 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
239 rc
= sprintf(buf
, "%llu\n", (unsigned long long)
240 resource_size(&nsio
->res
) - start_pad
241 - end_trunc
- offset
);
243 /* no size to convey if the pfn instance is disabled */
250 static DEVICE_ATTR_RO(size
);
252 static ssize_t
supported_alignments_show(struct device
*dev
,
253 struct device_attribute
*attr
, char *buf
)
255 unsigned long aligns
[MAX_NVDIMM_ALIGN
] = { [0] = 0, };
257 return nd_size_select_show(0,
258 nd_pfn_supported_alignments(aligns
), buf
);
260 static DEVICE_ATTR_RO(supported_alignments
);
262 static struct attribute
*nd_pfn_attributes
[] = {
264 &dev_attr_namespace
.attr
,
266 &dev_attr_align
.attr
,
267 &dev_attr_resource
.attr
,
269 &dev_attr_supported_alignments
.attr
,
273 static struct attribute_group nd_pfn_attribute_group
= {
274 .attrs
= nd_pfn_attributes
,
277 const struct attribute_group
*nd_pfn_attribute_groups
[] = {
278 &nd_pfn_attribute_group
,
279 &nd_device_attribute_group
,
280 &nd_numa_attribute_group
,
284 static const struct device_type nd_pfn_device_type
= {
286 .release
= nd_pfn_release
,
287 .groups
= nd_pfn_attribute_groups
,
290 bool is_nd_pfn(struct device
*dev
)
292 return dev
? dev
->type
== &nd_pfn_device_type
: false;
294 EXPORT_SYMBOL(is_nd_pfn
);
296 static struct lock_class_key nvdimm_pfn_key
;
298 struct device
*nd_pfn_devinit(struct nd_pfn
*nd_pfn
,
299 struct nd_namespace_common
*ndns
)
306 nd_pfn
->mode
= PFN_MODE_NONE
;
307 nd_pfn
->align
= nd_pfn_default_alignment();
309 device_initialize(&nd_pfn
->dev
);
310 lockdep_set_class(&nd_pfn
->dev
.mutex
, &nvdimm_pfn_key
);
311 if (ndns
&& !__nd_attach_ndns(&nd_pfn
->dev
, ndns
, &nd_pfn
->ndns
)) {
312 dev_dbg(&ndns
->dev
, "failed, already claimed by %s\n",
313 dev_name(ndns
->claim
));
320 static struct nd_pfn
*nd_pfn_alloc(struct nd_region
*nd_region
)
322 struct nd_pfn
*nd_pfn
;
325 nd_pfn
= kzalloc(sizeof(*nd_pfn
), GFP_KERNEL
);
329 nd_pfn
->id
= ida_alloc(&nd_region
->pfn_ida
, GFP_KERNEL
);
330 if (nd_pfn
->id
< 0) {
336 dev_set_name(dev
, "pfn%d.%d", nd_region
->id
, nd_pfn
->id
);
337 dev
->type
= &nd_pfn_device_type
;
338 dev
->parent
= &nd_region
->dev
;
343 struct device
*nd_pfn_create(struct nd_region
*nd_region
)
345 struct nd_pfn
*nd_pfn
;
348 if (!is_memory(&nd_region
->dev
))
351 nd_pfn
= nd_pfn_alloc(nd_region
);
352 dev
= nd_pfn_devinit(nd_pfn
, NULL
);
354 nd_device_register(dev
);
359 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
360 * space associated with the namespace. If the memmap is set to DRAM, then
361 * this is a no-op. Since the memmap area is freshly initialized during
362 * probe, we have an opportunity to clear any badblocks in this area.
364 static int nd_pfn_clear_memmap_errors(struct nd_pfn
*nd_pfn
)
366 struct nd_region
*nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
367 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
368 void *zero_page
= page_address(ZERO_PAGE(0));
369 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
370 int num_bad
, meta_num
, rc
, bb_present
;
371 sector_t first_bad
, meta_start
;
372 struct nd_namespace_io
*nsio
;
374 if (nd_pfn
->mode
!= PFN_MODE_PMEM
)
377 nsio
= to_nd_namespace_io(&ndns
->dev
);
378 meta_start
= (SZ_4K
+ sizeof(*pfn_sb
)) >> 9;
379 meta_num
= (le64_to_cpu(pfn_sb
->dataoff
) >> 9) - meta_start
;
382 * re-enable the namespace with correct size so that we can access
383 * the device memmap area.
385 devm_namespace_disable(&nd_pfn
->dev
, ndns
);
386 rc
= devm_namespace_enable(&nd_pfn
->dev
, ndns
, le64_to_cpu(pfn_sb
->dataoff
));
391 unsigned long zero_len
;
394 bb_present
= badblocks_check(&nd_region
->bb
, meta_start
,
395 meta_num
, &first_bad
, &num_bad
);
397 dev_dbg(&nd_pfn
->dev
, "meta: %x badblocks at %llx\n",
399 nsoff
= ALIGN_DOWN((nd_region
->ndr_start
400 + (first_bad
<< 9)) - nsio
->res
.start
,
402 zero_len
= ALIGN(num_bad
<< 9, PAGE_SIZE
);
404 unsigned long chunk
= min(zero_len
, PAGE_SIZE
);
406 rc
= nvdimm_write_bytes(ndns
, nsoff
, zero_page
,
415 dev_err(&nd_pfn
->dev
,
416 "error clearing %x badblocks at %llx\n",
421 } while (bb_present
);
426 static bool nd_supported_alignment(unsigned long align
)
429 unsigned long supported
[MAX_NVDIMM_ALIGN
] = { [0] = 0, };
434 nd_pfn_supported_alignments(supported
);
435 for (i
= 0; supported
[i
]; i
++)
436 if (align
== supported
[i
])
442 * nd_pfn_validate - read and validate info-block
443 * @nd_pfn: fsdax namespace runtime state / properties
444 * @sig: 'devdax' or 'fsdax' signature
446 * Upon return the info-block buffer contents (->pfn_sb) are
447 * indeterminate when validation fails, and a coherent info-block
450 int nd_pfn_validate(struct nd_pfn
*nd_pfn
, const char *sig
)
452 u64 checksum
, offset
;
453 struct resource
*res
;
454 enum nd_pfn_mode mode
;
455 resource_size_t res_size
;
456 struct nd_namespace_io
*nsio
;
457 unsigned long align
, start_pad
, end_trunc
;
458 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
459 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
460 const uuid_t
*parent_uuid
= nd_dev_to_uuid(&ndns
->dev
);
462 if (!pfn_sb
|| !ndns
)
465 if (!is_memory(nd_pfn
->dev
.parent
))
468 if (nvdimm_read_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
), 0))
471 if (memcmp(pfn_sb
->signature
, sig
, PFN_SIG_LEN
) != 0)
474 checksum
= le64_to_cpu(pfn_sb
->checksum
);
475 pfn_sb
->checksum
= 0;
476 if (checksum
!= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
))
478 pfn_sb
->checksum
= cpu_to_le64(checksum
);
480 if (memcmp(pfn_sb
->parent_uuid
, parent_uuid
, 16) != 0)
483 if (__le16_to_cpu(pfn_sb
->version_minor
) < 1) {
484 pfn_sb
->start_pad
= 0;
485 pfn_sb
->end_trunc
= 0;
488 if (__le16_to_cpu(pfn_sb
->version_minor
) < 2)
491 if (__le16_to_cpu(pfn_sb
->version_minor
) < 4) {
492 pfn_sb
->page_struct_size
= cpu_to_le16(64);
493 pfn_sb
->page_size
= cpu_to_le32(PAGE_SIZE
);
496 switch (le32_to_cpu(pfn_sb
->mode
)) {
504 align
= le32_to_cpu(pfn_sb
->align
);
505 offset
= le64_to_cpu(pfn_sb
->dataoff
);
506 start_pad
= le32_to_cpu(pfn_sb
->start_pad
);
507 end_trunc
= le32_to_cpu(pfn_sb
->end_trunc
);
509 align
= 1UL << ilog2(offset
);
510 mode
= le32_to_cpu(pfn_sb
->mode
);
512 if ((le32_to_cpu(pfn_sb
->page_size
) > PAGE_SIZE
) &&
513 (mode
== PFN_MODE_PMEM
)) {
514 dev_err(&nd_pfn
->dev
,
515 "init failed, page size mismatch %d\n",
516 le32_to_cpu(pfn_sb
->page_size
));
520 if ((le16_to_cpu(pfn_sb
->page_struct_size
) < sizeof(struct page
)) &&
521 (mode
== PFN_MODE_PMEM
)) {
522 dev_err(&nd_pfn
->dev
,
523 "init failed, struct page size mismatch %d\n",
524 le16_to_cpu(pfn_sb
->page_struct_size
));
529 * Check whether the we support the alignment. For Dax if the
530 * superblock alignment is not matching, we won't initialize
533 if (!nd_supported_alignment(align
) &&
534 !memcmp(pfn_sb
->signature
, DAX_SIG
, PFN_SIG_LEN
)) {
535 dev_err(&nd_pfn
->dev
, "init failed, alignment mismatch: "
536 "%ld:%ld\n", nd_pfn
->align
, align
);
542 * When probing a namespace via nd_pfn_probe() the uuid
543 * is NULL (see: nd_pfn_devinit()) we init settings from
546 nd_pfn
->uuid
= kmemdup(pfn_sb
->uuid
, 16, GFP_KERNEL
);
549 nd_pfn
->align
= align
;
553 * When probing a pfn / dax instance we validate the
554 * live settings against the pfn_sb
556 if (memcmp(nd_pfn
->uuid
, pfn_sb
->uuid
, 16) != 0)
560 * If the uuid validates, but other settings mismatch
561 * return EINVAL because userspace has managed to change
562 * the configuration without specifying new
565 if (nd_pfn
->align
!= align
|| nd_pfn
->mode
!= mode
) {
566 dev_err(&nd_pfn
->dev
,
567 "init failed, settings mismatch\n");
568 dev_dbg(&nd_pfn
->dev
, "align: %lx:%lx mode: %d:%d\n",
569 nd_pfn
->align
, align
, nd_pfn
->mode
,
575 if (align
> nvdimm_namespace_capacity(ndns
)) {
576 dev_err(&nd_pfn
->dev
, "alignment: %lx exceeds capacity %llx\n",
577 align
, nvdimm_namespace_capacity(ndns
));
582 * These warnings are verbose because they can only trigger in
583 * the case where the physical address alignment of the
584 * namespace has changed since the pfn superblock was
587 nsio
= to_nd_namespace_io(&ndns
->dev
);
589 res_size
= resource_size(res
);
590 if (offset
>= res_size
) {
591 dev_err(&nd_pfn
->dev
, "pfn array size exceeds capacity of %s\n",
592 dev_name(&ndns
->dev
));
596 if ((align
&& !IS_ALIGNED(res
->start
+ offset
+ start_pad
, align
))
597 || !IS_ALIGNED(offset
, PAGE_SIZE
)) {
598 dev_err(&nd_pfn
->dev
,
599 "bad offset: %#llx dax disabled align: %#lx\n",
604 if (!IS_ALIGNED(res
->start
+ start_pad
, memremap_compat_align())) {
605 dev_err(&nd_pfn
->dev
, "resource start misaligned\n");
609 if (!IS_ALIGNED(res
->end
+ 1 - end_trunc
, memremap_compat_align())) {
610 dev_err(&nd_pfn
->dev
, "resource end misaligned\n");
614 if (offset
>= (res_size
- start_pad
- end_trunc
)) {
615 dev_err(&nd_pfn
->dev
, "bad offset with small namespace\n");
620 EXPORT_SYMBOL(nd_pfn_validate
);
622 int nd_pfn_probe(struct device
*dev
, struct nd_namespace_common
*ndns
)
625 struct nd_pfn
*nd_pfn
;
626 struct device
*pfn_dev
;
627 struct nd_pfn_sb
*pfn_sb
;
628 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
633 switch (ndns
->claim_class
) {
634 case NVDIMM_CCLASS_NONE
:
635 case NVDIMM_CCLASS_PFN
:
641 nvdimm_bus_lock(&ndns
->dev
);
642 nd_pfn
= nd_pfn_alloc(nd_region
);
643 pfn_dev
= nd_pfn_devinit(nd_pfn
, ndns
);
644 nvdimm_bus_unlock(&ndns
->dev
);
647 pfn_sb
= devm_kmalloc(dev
, sizeof(*pfn_sb
), GFP_KERNEL
);
648 nd_pfn
= to_nd_pfn(pfn_dev
);
649 nd_pfn
->pfn_sb
= pfn_sb
;
650 rc
= nd_pfn_validate(nd_pfn
, PFN_SIG
);
651 dev_dbg(dev
, "pfn: %s\n", rc
== 0 ? dev_name(pfn_dev
) : "<none>");
653 nd_detach_ndns(pfn_dev
, &nd_pfn
->ndns
);
656 nd_device_register(pfn_dev
);
660 EXPORT_SYMBOL(nd_pfn_probe
);
663 * We hotplug memory at sub-section granularity, pad the reserved area
664 * from the previous section base to the namespace base address.
666 static unsigned long init_altmap_base(resource_size_t base
)
668 unsigned long base_pfn
= PHYS_PFN(base
);
670 return SUBSECTION_ALIGN_DOWN(base_pfn
);
673 static unsigned long init_altmap_reserve(resource_size_t base
)
675 unsigned long reserve
= nd_info_block_reserve() >> PAGE_SHIFT
;
676 unsigned long base_pfn
= PHYS_PFN(base
);
678 reserve
+= base_pfn
- SUBSECTION_ALIGN_DOWN(base_pfn
);
682 static int __nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
, struct dev_pagemap
*pgmap
)
684 struct range
*range
= &pgmap
->range
;
685 struct vmem_altmap
*altmap
= &pgmap
->altmap
;
686 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
687 u64 offset
= le64_to_cpu(pfn_sb
->dataoff
);
688 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
689 u32 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
690 u32 reserve
= nd_info_block_reserve();
691 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
692 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
693 resource_size_t base
= nsio
->res
.start
+ start_pad
;
694 resource_size_t end
= nsio
->res
.end
- end_trunc
;
695 struct vmem_altmap __altmap
= {
696 .base_pfn
= init_altmap_base(base
),
697 .reserve
= init_altmap_reserve(base
),
698 .end_pfn
= PHYS_PFN(end
),
701 *range
= (struct range
) {
702 .start
= nsio
->res
.start
+ start_pad
,
703 .end
= nsio
->res
.end
- end_trunc
,
706 if (nd_pfn
->mode
== PFN_MODE_RAM
) {
707 if (offset
< reserve
)
709 nd_pfn
->npfns
= le64_to_cpu(pfn_sb
->npfns
);
710 } else if (nd_pfn
->mode
== PFN_MODE_PMEM
) {
711 nd_pfn
->npfns
= PHYS_PFN((range_len(range
) - offset
));
712 if (le64_to_cpu(nd_pfn
->pfn_sb
->npfns
) > nd_pfn
->npfns
)
713 dev_info(&nd_pfn
->dev
,
714 "number of pfns truncated from %lld to %ld\n",
715 le64_to_cpu(nd_pfn
->pfn_sb
->npfns
),
717 memcpy(altmap
, &__altmap
, sizeof(*altmap
));
718 altmap
->free
= PHYS_PFN(offset
- reserve
);
720 pgmap
->flags
|= PGMAP_ALTMAP_VALID
;
727 static int nd_pfn_init(struct nd_pfn
*nd_pfn
)
729 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
730 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
731 resource_size_t start
, size
;
732 struct nd_region
*nd_region
;
733 unsigned long npfns
, align
;
735 struct nd_pfn_sb
*pfn_sb
;
741 pfn_sb
= devm_kmalloc(&nd_pfn
->dev
, sizeof(*pfn_sb
), GFP_KERNEL
);
745 nd_pfn
->pfn_sb
= pfn_sb
;
746 if (is_nd_dax(&nd_pfn
->dev
))
751 rc
= nd_pfn_validate(nd_pfn
, sig
);
753 return nd_pfn_clear_memmap_errors(nd_pfn
);
757 /* no info block, do init */;
758 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
760 nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
762 dev_info(&nd_pfn
->dev
,
763 "%s is read-only, unable to init metadata\n",
764 dev_name(&nd_region
->dev
));
768 start
= nsio
->res
.start
;
769 size
= resource_size(&nsio
->res
);
770 npfns
= PHYS_PFN(size
- SZ_8K
);
771 align
= max(nd_pfn
->align
, memremap_compat_align());
774 * When @start is misaligned fail namespace creation. See
775 * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
778 if (!IS_ALIGNED(start
, memremap_compat_align())) {
779 dev_err(&nd_pfn
->dev
, "%s: start %pa misaligned to %#lx\n",
780 dev_name(&ndns
->dev
), &start
,
781 memremap_compat_align());
784 end_trunc
= start
+ size
- ALIGN_DOWN(start
+ size
, align
);
785 if (nd_pfn
->mode
== PFN_MODE_PMEM
) {
786 unsigned long page_map_size
= MAX_STRUCT_PAGE_SIZE
* npfns
;
789 * The altmap should be padded out to the block size used
790 * when populating the vmemmap. This *should* be equal to
791 * PMD_SIZE for most architectures.
793 * Also make sure size of struct page is less than
794 * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
795 * face of production kernel configurations that reduce the
796 * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
797 * kernel configurations that increase the 'struct page' size
798 * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
799 * for continuing with the capacity that will be wasted when
800 * reverting to a production kernel configuration. Otherwise,
801 * those configurations are blocked by default.
803 if (sizeof(struct page
) > MAX_STRUCT_PAGE_SIZE
) {
804 if (page_struct_override
)
805 page_map_size
= sizeof(struct page
) * npfns
;
807 dev_err(&nd_pfn
->dev
,
808 "Memory debug options prevent using pmem for the page map\n");
812 offset
= ALIGN(start
+ SZ_8K
+ page_map_size
, align
) - start
;
813 } else if (nd_pfn
->mode
== PFN_MODE_RAM
)
814 offset
= ALIGN(start
+ SZ_8K
, align
) - start
;
818 if (offset
>= (size
- end_trunc
)) {
819 /* This results in zero size devices */
820 dev_err(&nd_pfn
->dev
, "%s unable to satisfy requested alignment\n",
821 dev_name(&ndns
->dev
));
825 npfns
= PHYS_PFN(size
- offset
- end_trunc
);
826 pfn_sb
->mode
= cpu_to_le32(nd_pfn
->mode
);
827 pfn_sb
->dataoff
= cpu_to_le64(offset
);
828 pfn_sb
->npfns
= cpu_to_le64(npfns
);
829 memcpy(pfn_sb
->signature
, sig
, PFN_SIG_LEN
);
830 memcpy(pfn_sb
->uuid
, nd_pfn
->uuid
, 16);
831 memcpy(pfn_sb
->parent_uuid
, nd_dev_to_uuid(&ndns
->dev
), 16);
832 pfn_sb
->version_major
= cpu_to_le16(1);
833 pfn_sb
->version_minor
= cpu_to_le16(4);
834 pfn_sb
->end_trunc
= cpu_to_le32(end_trunc
);
835 pfn_sb
->align
= cpu_to_le32(nd_pfn
->align
);
836 if (sizeof(struct page
) > MAX_STRUCT_PAGE_SIZE
&& page_struct_override
)
837 pfn_sb
->page_struct_size
= cpu_to_le16(sizeof(struct page
));
839 pfn_sb
->page_struct_size
= cpu_to_le16(MAX_STRUCT_PAGE_SIZE
);
840 pfn_sb
->page_size
= cpu_to_le32(PAGE_SIZE
);
841 checksum
= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
);
842 pfn_sb
->checksum
= cpu_to_le64(checksum
);
844 rc
= nd_pfn_clear_memmap_errors(nd_pfn
);
848 return nvdimm_write_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
), 0);
852 * Determine the effective resource range and vmem_altmap from an nd_pfn
855 int nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
, struct dev_pagemap
*pgmap
)
859 if (!nd_pfn
->uuid
|| !nd_pfn
->ndns
)
862 rc
= nd_pfn_init(nd_pfn
);
866 /* we need a valid pfn_sb before we can init a dev_pagemap */
867 return __nvdimm_setup_pfn(nd_pfn
, pgmap
);
869 EXPORT_SYMBOL_GPL(nvdimm_setup_pfn
);