2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
21 static void namespace_io_release(struct device
*dev
)
23 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
28 static void namespace_pmem_release(struct device
*dev
)
30 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
32 kfree(nspm
->alt_name
);
37 static void namespace_blk_release(struct device
*dev
)
39 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
40 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
43 ida_simple_remove(&nd_region
->ns_ida
, nsblk
->id
);
44 kfree(nsblk
->alt_name
);
50 static struct device_type namespace_io_device_type
= {
51 .name
= "nd_namespace_io",
52 .release
= namespace_io_release
,
55 static struct device_type namespace_pmem_device_type
= {
56 .name
= "nd_namespace_pmem",
57 .release
= namespace_pmem_release
,
60 static struct device_type namespace_blk_device_type
= {
61 .name
= "nd_namespace_blk",
62 .release
= namespace_blk_release
,
65 static bool is_namespace_pmem(struct device
*dev
)
67 return dev
? dev
->type
== &namespace_pmem_device_type
: false;
70 static bool is_namespace_blk(struct device
*dev
)
72 return dev
? dev
->type
== &namespace_blk_device_type
: false;
75 static bool is_namespace_io(struct device
*dev
)
77 return dev
? dev
->type
== &namespace_io_device_type
: false;
80 static int is_uuid_busy(struct device
*dev
, void *data
)
82 u8
*uuid1
= data
, *uuid2
= NULL
;
84 if (is_namespace_pmem(dev
)) {
85 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
88 } else if (is_namespace_blk(dev
)) {
89 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
92 } else if (is_nd_btt(dev
)) {
93 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
96 } else if (is_nd_pfn(dev
)) {
97 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
102 if (uuid2
&& memcmp(uuid1
, uuid2
, NSLABEL_UUID_LEN
) == 0)
108 static int is_namespace_uuid_busy(struct device
*dev
, void *data
)
110 if (is_nd_pmem(dev
) || is_nd_blk(dev
))
111 return device_for_each_child(dev
, data
, is_uuid_busy
);
116 * nd_is_uuid_unique - verify that no other namespace has @uuid
117 * @dev: any device on a nvdimm_bus
118 * @uuid: uuid to check
120 bool nd_is_uuid_unique(struct device
*dev
, u8
*uuid
)
122 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
126 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus
->dev
));
127 if (device_for_each_child(&nvdimm_bus
->dev
, uuid
,
128 is_namespace_uuid_busy
) != 0)
133 bool pmem_should_map_pages(struct device
*dev
)
135 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
137 if (!IS_ENABLED(CONFIG_ZONE_DEVICE
))
140 if (!test_bit(ND_REGION_PAGEMAP
, &nd_region
->flags
))
143 if (is_nd_pfn(dev
) || is_nd_btt(dev
))
146 #ifdef ARCH_MEMREMAP_PMEM
147 return ARCH_MEMREMAP_PMEM
== MEMREMAP_WB
;
152 EXPORT_SYMBOL(pmem_should_map_pages
);
154 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
157 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
158 const char *suffix
= NULL
;
160 if (ndns
->claim
&& is_nd_btt(ndns
->claim
))
163 if (is_namespace_pmem(&ndns
->dev
) || is_namespace_io(&ndns
->dev
)) {
164 sprintf(name
, "pmem%d%s", nd_region
->id
, suffix
? suffix
: "");
165 } else if (is_namespace_blk(&ndns
->dev
)) {
166 struct nd_namespace_blk
*nsblk
;
168 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
169 sprintf(name
, "ndblk%d.%d%s", nd_region
->id
, nsblk
->id
,
170 suffix
? suffix
: "");
177 EXPORT_SYMBOL(nvdimm_namespace_disk_name
);
179 const u8
*nd_dev_to_uuid(struct device
*dev
)
181 static const u8 null_uuid
[16];
186 if (is_namespace_pmem(dev
)) {
187 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
190 } else if (is_namespace_blk(dev
)) {
191 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
197 EXPORT_SYMBOL(nd_dev_to_uuid
);
199 static ssize_t
nstype_show(struct device
*dev
,
200 struct device_attribute
*attr
, char *buf
)
202 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
204 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
206 static DEVICE_ATTR_RO(nstype
);
208 static ssize_t
__alt_name_store(struct device
*dev
, const char *buf
,
211 char *input
, *pos
, *alt_name
, **ns_altname
;
214 if (is_namespace_pmem(dev
)) {
215 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
217 ns_altname
= &nspm
->alt_name
;
218 } else if (is_namespace_blk(dev
)) {
219 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
221 ns_altname
= &nsblk
->alt_name
;
225 if (dev
->driver
|| to_ndns(dev
)->claim
)
228 input
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
234 if (strlen(pos
) + 1 > NSLABEL_NAME_LEN
) {
239 alt_name
= kzalloc(NSLABEL_NAME_LEN
, GFP_KERNEL
);
245 *ns_altname
= alt_name
;
246 sprintf(*ns_altname
, "%s", pos
);
254 static resource_size_t
nd_namespace_blk_size(struct nd_namespace_blk
*nsblk
)
256 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
257 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
258 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
259 struct nd_label_id label_id
;
260 resource_size_t size
= 0;
261 struct resource
*res
;
265 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
266 for_each_dpa_resource(ndd
, res
)
267 if (strcmp(res
->name
, label_id
.id
) == 0)
268 size
+= resource_size(res
);
272 static bool __nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
274 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
275 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
276 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
277 struct nd_label_id label_id
;
278 struct resource
*res
;
281 if (!nsblk
->uuid
|| !nsblk
->lbasize
|| !ndd
)
285 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
286 for_each_dpa_resource(ndd
, res
) {
287 if (strcmp(res
->name
, label_id
.id
) != 0)
290 * Resources with unacknoweldged adjustments indicate a
291 * failure to update labels
293 if (res
->flags
& DPA_RESOURCE_ADJUSTED
)
298 /* These values match after a successful label update */
299 if (count
!= nsblk
->num_resources
)
302 for (i
= 0; i
< nsblk
->num_resources
; i
++) {
303 struct resource
*found
= NULL
;
305 for_each_dpa_resource(ndd
, res
)
306 if (res
== nsblk
->res
[i
]) {
318 resource_size_t
nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
320 resource_size_t size
;
322 nvdimm_bus_lock(&nsblk
->common
.dev
);
323 size
= __nd_namespace_blk_validate(nsblk
);
324 nvdimm_bus_unlock(&nsblk
->common
.dev
);
328 EXPORT_SYMBOL(nd_namespace_blk_validate
);
331 static int nd_namespace_label_update(struct nd_region
*nd_region
,
334 dev_WARN_ONCE(dev
, dev
->driver
|| to_ndns(dev
)->claim
,
335 "namespace must be idle during label update\n");
336 if (dev
->driver
|| to_ndns(dev
)->claim
)
340 * Only allow label writes that will result in a valid namespace
341 * or deletion of an existing namespace.
343 if (is_namespace_pmem(dev
)) {
344 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
345 resource_size_t size
= resource_size(&nspm
->nsio
.res
);
347 if (size
== 0 && nspm
->uuid
)
348 /* delete allocation */;
349 else if (!nspm
->uuid
)
352 return nd_pmem_namespace_label_update(nd_region
, nspm
, size
);
353 } else if (is_namespace_blk(dev
)) {
354 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
355 resource_size_t size
= nd_namespace_blk_size(nsblk
);
357 if (size
== 0 && nsblk
->uuid
)
358 /* delete allocation */;
359 else if (!nsblk
->uuid
|| !nsblk
->lbasize
)
362 return nd_blk_namespace_label_update(nd_region
, nsblk
, size
);
367 static ssize_t
alt_name_store(struct device
*dev
,
368 struct device_attribute
*attr
, const char *buf
, size_t len
)
370 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
374 nvdimm_bus_lock(dev
);
375 wait_nvdimm_bus_probe_idle(dev
);
376 rc
= __alt_name_store(dev
, buf
, len
);
378 rc
= nd_namespace_label_update(nd_region
, dev
);
379 dev_dbg(dev
, "%s: %s(%zd)\n", __func__
, rc
< 0 ? "fail " : "", rc
);
380 nvdimm_bus_unlock(dev
);
383 return rc
< 0 ? rc
: len
;
386 static ssize_t
alt_name_show(struct device
*dev
,
387 struct device_attribute
*attr
, char *buf
)
391 if (is_namespace_pmem(dev
)) {
392 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
394 ns_altname
= nspm
->alt_name
;
395 } else if (is_namespace_blk(dev
)) {
396 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
398 ns_altname
= nsblk
->alt_name
;
402 return sprintf(buf
, "%s\n", ns_altname
? ns_altname
: "");
404 static DEVICE_ATTR_RW(alt_name
);
406 static int scan_free(struct nd_region
*nd_region
,
407 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
410 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
411 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
415 struct resource
*res
, *last
;
416 resource_size_t new_start
;
419 for_each_dpa_resource(ndd
, res
)
420 if (strcmp(res
->name
, label_id
->id
) == 0)
426 if (n
>= resource_size(res
)) {
427 n
-= resource_size(res
);
428 nd_dbg_dpa(nd_region
, ndd
, res
, "delete %d\n", rc
);
429 nvdimm_free_dpa(ndd
, res
);
430 /* retry with last resource deleted */
435 * Keep BLK allocations relegated to high DPA as much as
439 new_start
= res
->start
+ n
;
441 new_start
= res
->start
;
443 rc
= adjust_resource(res
, new_start
, resource_size(res
) - n
);
445 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
446 nd_dbg_dpa(nd_region
, ndd
, res
, "shrink %d\n", rc
);
454 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
455 * @nd_region: the set of dimms to reclaim @n bytes from
456 * @label_id: unique identifier for the namespace consuming this dpa range
457 * @n: number of bytes per-dimm to release
459 * Assumes resources are ordered. Starting from the end try to
460 * adjust_resource() the allocation to @n, but if @n is larger than the
461 * allocation delete it and find the 'new' last allocation in the label
464 static int shrink_dpa_allocation(struct nd_region
*nd_region
,
465 struct nd_label_id
*label_id
, resource_size_t n
)
469 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
470 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
473 rc
= scan_free(nd_region
, nd_mapping
, label_id
, n
);
481 static resource_size_t
init_dpa_allocation(struct nd_label_id
*label_id
,
482 struct nd_region
*nd_region
, struct nd_mapping
*nd_mapping
,
485 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
486 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
487 resource_size_t first_dpa
;
488 struct resource
*res
;
491 /* allocate blk from highest dpa first */
493 first_dpa
= nd_mapping
->start
+ nd_mapping
->size
- n
;
495 first_dpa
= nd_mapping
->start
;
497 /* first resource allocation for this label-id or dimm */
498 res
= nvdimm_allocate_dpa(ndd
, label_id
, first_dpa
, n
);
502 nd_dbg_dpa(nd_region
, ndd
, res
, "init %d\n", rc
);
506 static bool space_valid(bool is_pmem
, bool is_reserve
,
507 struct nd_label_id
*label_id
, struct resource
*res
)
510 * For BLK-space any space is valid, for PMEM-space, it must be
511 * contiguous with an existing allocation unless we are
514 if (is_reserve
|| !is_pmem
)
516 if (!res
|| strcmp(res
->name
, label_id
->id
) == 0)
522 ALLOC_ERR
= 0, ALLOC_BEFORE
, ALLOC_MID
, ALLOC_AFTER
,
525 static resource_size_t
scan_allocate(struct nd_region
*nd_region
,
526 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
529 resource_size_t mapping_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
530 bool is_reserve
= strcmp(label_id
->id
, "pmem-reserve") == 0;
531 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
532 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
533 const resource_size_t to_allocate
= n
;
534 struct resource
*res
;
539 for_each_dpa_resource(ndd
, res
) {
540 resource_size_t allocate
, available
= 0, free_start
, free_end
;
541 struct resource
*next
= res
->sibling
, *new_res
= NULL
;
542 enum alloc_loc loc
= ALLOC_ERR
;
546 /* ignore resources outside this nd_mapping */
547 if (res
->start
> mapping_end
)
549 if (res
->end
< nd_mapping
->start
)
552 /* space at the beginning of the mapping */
553 if (!first
++ && res
->start
> nd_mapping
->start
) {
554 free_start
= nd_mapping
->start
;
555 available
= res
->start
- free_start
;
556 if (space_valid(is_pmem
, is_reserve
, label_id
, NULL
))
560 /* space between allocations */
562 free_start
= res
->start
+ resource_size(res
);
563 free_end
= min(mapping_end
, next
->start
- 1);
564 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
565 && free_start
< free_end
) {
566 available
= free_end
+ 1 - free_start
;
571 /* space at the end of the mapping */
573 free_start
= res
->start
+ resource_size(res
);
574 free_end
= mapping_end
;
575 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
576 && free_start
< free_end
) {
577 available
= free_end
+ 1 - free_start
;
582 if (!loc
|| !available
)
584 allocate
= min(available
, n
);
587 if (strcmp(res
->name
, label_id
->id
) == 0) {
588 /* adjust current resource up */
589 if (is_pmem
&& !is_reserve
)
591 rc
= adjust_resource(res
, res
->start
- allocate
,
592 resource_size(res
) + allocate
);
593 action
= "cur grow up";
598 if (strcmp(next
->name
, label_id
->id
) == 0) {
599 /* adjust next resource up */
600 if (is_pmem
&& !is_reserve
)
602 rc
= adjust_resource(next
, next
->start
603 - allocate
, resource_size(next
)
606 action
= "next grow up";
607 } else if (strcmp(res
->name
, label_id
->id
) == 0) {
608 action
= "grow down";
613 if (strcmp(res
->name
, label_id
->id
) == 0)
614 action
= "grow down";
622 if (strcmp(action
, "allocate") == 0) {
623 /* BLK allocate bottom up */
625 free_start
+= available
- allocate
;
626 else if (!is_reserve
&& free_start
!= nd_mapping
->start
)
629 new_res
= nvdimm_allocate_dpa(ndd
, label_id
,
630 free_start
, allocate
);
633 } else if (strcmp(action
, "grow down") == 0) {
634 /* adjust current resource down */
635 rc
= adjust_resource(res
, res
->start
, resource_size(res
)
638 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
644 nd_dbg_dpa(nd_region
, ndd
, new_res
, "%s(%d) %d\n",
653 * Retry scan with newly inserted resources.
654 * For example, if we did an ALLOC_BEFORE
655 * insertion there may also have been space
656 * available for an ALLOC_AFTER insertion, so we
657 * need to check this same resource again
665 * If we allocated nothing in the BLK case it may be because we are in
666 * an initial "pmem-reserve pass". Only do an initial BLK allocation
667 * when none of the DPA space is reserved.
669 if ((is_pmem
|| !ndd
->dpa
.child
) && n
== to_allocate
)
670 return init_dpa_allocation(label_id
, nd_region
, nd_mapping
, n
);
674 static int merge_dpa(struct nd_region
*nd_region
,
675 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
)
677 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
678 struct resource
*res
;
680 if (strncmp("pmem", label_id
->id
, 4) == 0)
683 for_each_dpa_resource(ndd
, res
) {
685 struct resource
*next
= res
->sibling
;
686 resource_size_t end
= res
->start
+ resource_size(res
);
688 if (!next
|| strcmp(res
->name
, label_id
->id
) != 0
689 || strcmp(next
->name
, label_id
->id
) != 0
690 || end
!= next
->start
)
692 end
+= resource_size(next
);
693 nvdimm_free_dpa(ndd
, next
);
694 rc
= adjust_resource(res
, res
->start
, end
- res
->start
);
695 nd_dbg_dpa(nd_region
, ndd
, res
, "merge %d\n", rc
);
698 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
705 static int __reserve_free_pmem(struct device
*dev
, void *data
)
707 struct nvdimm
*nvdimm
= data
;
708 struct nd_region
*nd_region
;
709 struct nd_label_id label_id
;
712 if (!is_nd_pmem(dev
))
715 nd_region
= to_nd_region(dev
);
716 if (nd_region
->ndr_mappings
== 0)
719 memset(&label_id
, 0, sizeof(label_id
));
720 strcat(label_id
.id
, "pmem-reserve");
721 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
722 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
723 resource_size_t n
, rem
= 0;
725 if (nd_mapping
->nvdimm
!= nvdimm
)
728 n
= nd_pmem_available_dpa(nd_region
, nd_mapping
, &rem
);
731 rem
= scan_allocate(nd_region
, nd_mapping
, &label_id
, n
);
732 dev_WARN_ONCE(&nd_region
->dev
, rem
,
733 "pmem reserve underrun: %#llx of %#llx bytes\n",
734 (unsigned long long) n
- rem
,
735 (unsigned long long) n
);
736 return rem
? -ENXIO
: 0;
742 static void release_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
743 struct nd_mapping
*nd_mapping
)
745 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
746 struct resource
*res
, *_res
;
748 for_each_dpa_resource_safe(ndd
, res
, _res
)
749 if (strcmp(res
->name
, "pmem-reserve") == 0)
750 nvdimm_free_dpa(ndd
, res
);
753 static int reserve_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
754 struct nd_mapping
*nd_mapping
)
756 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
759 rc
= device_for_each_child(&nvdimm_bus
->dev
, nvdimm
,
760 __reserve_free_pmem
);
762 release_free_pmem(nvdimm_bus
, nd_mapping
);
767 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
768 * @nd_region: the set of dimms to allocate @n more bytes from
769 * @label_id: unique identifier for the namespace consuming this dpa range
770 * @n: number of bytes per-dimm to add to the existing allocation
772 * Assumes resources are ordered. For BLK regions, first consume
773 * BLK-only available DPA free space, then consume PMEM-aliased DPA
774 * space starting at the highest DPA. For PMEM regions start
775 * allocations from the start of an interleave set and end at the first
776 * BLK allocation or the end of the interleave set, whichever comes
779 static int grow_dpa_allocation(struct nd_region
*nd_region
,
780 struct nd_label_id
*label_id
, resource_size_t n
)
782 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
783 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
786 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
787 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
788 resource_size_t rem
= n
;
792 * In the BLK case try once with all unallocated PMEM
793 * reserved, and once without
795 for (j
= is_pmem
; j
< 2; j
++) {
796 bool blk_only
= j
== 0;
799 rc
= reserve_free_pmem(nvdimm_bus
, nd_mapping
);
803 rem
= scan_allocate(nd_region
, nd_mapping
,
806 release_free_pmem(nvdimm_bus
, nd_mapping
);
808 /* try again and allow encroachments into PMEM */
813 dev_WARN_ONCE(&nd_region
->dev
, rem
,
814 "allocation underrun: %#llx of %#llx bytes\n",
815 (unsigned long long) n
- rem
,
816 (unsigned long long) n
);
820 rc
= merge_dpa(nd_region
, nd_mapping
, label_id
);
828 static void nd_namespace_pmem_set_size(struct nd_region
*nd_region
,
829 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
831 struct resource
*res
= &nspm
->nsio
.res
;
833 res
->start
= nd_region
->ndr_start
;
834 res
->end
= nd_region
->ndr_start
+ size
- 1;
837 static bool uuid_not_set(const u8
*uuid
, struct device
*dev
, const char *where
)
840 dev_dbg(dev
, "%s: uuid not set\n", where
);
846 static ssize_t
__size_store(struct device
*dev
, unsigned long long val
)
848 resource_size_t allocated
= 0, available
= 0;
849 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
850 struct nd_mapping
*nd_mapping
;
851 struct nvdimm_drvdata
*ndd
;
852 struct nd_label_id label_id
;
853 u32 flags
= 0, remainder
;
857 if (dev
->driver
|| to_ndns(dev
)->claim
)
860 if (is_namespace_pmem(dev
)) {
861 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
864 } else if (is_namespace_blk(dev
)) {
865 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
868 flags
= NSLABEL_FLAG_LOCAL
;
872 * We need a uuid for the allocation-label and dimm(s) on which
873 * to store the label.
875 if (uuid_not_set(uuid
, dev
, __func__
))
877 if (nd_region
->ndr_mappings
== 0) {
878 dev_dbg(dev
, "%s: not associated with dimm(s)\n", __func__
);
882 div_u64_rem(val
, SZ_4K
* nd_region
->ndr_mappings
, &remainder
);
884 dev_dbg(dev
, "%llu is not %dK aligned\n", val
,
885 (SZ_4K
* nd_region
->ndr_mappings
) / SZ_1K
);
889 nd_label_gen_id(&label_id
, uuid
, flags
);
890 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
891 nd_mapping
= &nd_region
->mapping
[i
];
892 ndd
= to_ndd(nd_mapping
);
895 * All dimms in an interleave set, or the base dimm for a blk
896 * region, need to be enabled for the size to be changed.
901 allocated
+= nvdimm_allocated_dpa(ndd
, &label_id
);
903 available
= nd_region_available_dpa(nd_region
);
905 if (val
> available
+ allocated
)
908 if (val
== allocated
)
911 val
= div_u64(val
, nd_region
->ndr_mappings
);
912 allocated
= div_u64(allocated
, nd_region
->ndr_mappings
);
914 rc
= shrink_dpa_allocation(nd_region
, &label_id
,
917 rc
= grow_dpa_allocation(nd_region
, &label_id
, val
- allocated
);
922 if (is_namespace_pmem(dev
)) {
923 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
925 nd_namespace_pmem_set_size(nd_region
, nspm
,
926 val
* nd_region
->ndr_mappings
);
927 } else if (is_namespace_blk(dev
)) {
928 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
931 * Try to delete the namespace if we deleted all of its
932 * allocation, this is not the seed device for the
933 * region, and it is not actively claimed by a btt
936 if (val
== 0 && nd_region
->ns_seed
!= dev
937 && !nsblk
->common
.claim
)
938 nd_device_unregister(dev
, ND_ASYNC
);
944 static ssize_t
size_store(struct device
*dev
,
945 struct device_attribute
*attr
, const char *buf
, size_t len
)
947 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
948 unsigned long long val
;
952 rc
= kstrtoull(buf
, 0, &val
);
957 nvdimm_bus_lock(dev
);
958 wait_nvdimm_bus_probe_idle(dev
);
959 rc
= __size_store(dev
, val
);
961 rc
= nd_namespace_label_update(nd_region
, dev
);
963 if (is_namespace_pmem(dev
)) {
964 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
967 } else if (is_namespace_blk(dev
)) {
968 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
973 if (rc
== 0 && val
== 0 && uuid
) {
974 /* setting size zero == 'delete namespace' */
979 dev_dbg(dev
, "%s: %llx %s (%d)\n", __func__
, val
, rc
< 0
980 ? "fail" : "success", rc
);
982 nvdimm_bus_unlock(dev
);
985 return rc
< 0 ? rc
: len
;
988 resource_size_t
__nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
990 struct device
*dev
= &ndns
->dev
;
992 if (is_namespace_pmem(dev
)) {
993 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
995 return resource_size(&nspm
->nsio
.res
);
996 } else if (is_namespace_blk(dev
)) {
997 return nd_namespace_blk_size(to_nd_namespace_blk(dev
));
998 } else if (is_namespace_io(dev
)) {
999 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
1001 return resource_size(&nsio
->res
);
1003 WARN_ONCE(1, "unknown namespace type\n");
1007 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
1009 resource_size_t size
;
1011 nvdimm_bus_lock(&ndns
->dev
);
1012 size
= __nvdimm_namespace_capacity(ndns
);
1013 nvdimm_bus_unlock(&ndns
->dev
);
1017 EXPORT_SYMBOL(nvdimm_namespace_capacity
);
1019 static ssize_t
size_show(struct device
*dev
,
1020 struct device_attribute
*attr
, char *buf
)
1022 return sprintf(buf
, "%llu\n", (unsigned long long)
1023 nvdimm_namespace_capacity(to_ndns(dev
)));
1025 static DEVICE_ATTR(size
, S_IRUGO
, size_show
, size_store
);
1027 static ssize_t
uuid_show(struct device
*dev
,
1028 struct device_attribute
*attr
, char *buf
)
1032 if (is_namespace_pmem(dev
)) {
1033 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1036 } else if (is_namespace_blk(dev
)) {
1037 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1044 return sprintf(buf
, "%pUb\n", uuid
);
1045 return sprintf(buf
, "\n");
1049 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1050 * @nd_region: parent region so we can updates all dimms in the set
1051 * @dev: namespace type for generating label_id
1052 * @new_uuid: incoming uuid
1053 * @old_uuid: reference to the uuid storage location in the namespace object
1055 static int namespace_update_uuid(struct nd_region
*nd_region
,
1056 struct device
*dev
, u8
*new_uuid
, u8
**old_uuid
)
1058 u32 flags
= is_namespace_blk(dev
) ? NSLABEL_FLAG_LOCAL
: 0;
1059 struct nd_label_id old_label_id
;
1060 struct nd_label_id new_label_id
;
1063 if (!nd_is_uuid_unique(dev
, new_uuid
))
1066 if (*old_uuid
== NULL
)
1070 * If we've already written a label with this uuid, then it's
1071 * too late to rename because we can't reliably update the uuid
1072 * without losing the old namespace. Userspace must delete this
1073 * namespace to abandon the old uuid.
1075 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1076 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1079 * This check by itself is sufficient because old_uuid
1080 * would be NULL above if this uuid did not exist in the
1081 * currently written set.
1083 * FIXME: can we delete uuid with zero dpa allocated?
1085 if (nd_mapping
->labels
)
1089 nd_label_gen_id(&old_label_id
, *old_uuid
, flags
);
1090 nd_label_gen_id(&new_label_id
, new_uuid
, flags
);
1091 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1092 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1093 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1094 struct resource
*res
;
1096 for_each_dpa_resource(ndd
, res
)
1097 if (strcmp(res
->name
, old_label_id
.id
) == 0)
1098 sprintf((void *) res
->name
, "%s",
1103 *old_uuid
= new_uuid
;
1107 static ssize_t
uuid_store(struct device
*dev
,
1108 struct device_attribute
*attr
, const char *buf
, size_t len
)
1110 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1115 if (is_namespace_pmem(dev
)) {
1116 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1118 ns_uuid
= &nspm
->uuid
;
1119 } else if (is_namespace_blk(dev
)) {
1120 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1122 ns_uuid
= &nsblk
->uuid
;
1127 nvdimm_bus_lock(dev
);
1128 wait_nvdimm_bus_probe_idle(dev
);
1129 if (to_ndns(dev
)->claim
)
1132 rc
= nd_uuid_store(dev
, &uuid
, buf
, len
);
1134 rc
= namespace_update_uuid(nd_region
, dev
, uuid
, ns_uuid
);
1136 rc
= nd_namespace_label_update(nd_region
, dev
);
1139 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
1140 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
1141 nvdimm_bus_unlock(dev
);
1144 return rc
< 0 ? rc
: len
;
1146 static DEVICE_ATTR_RW(uuid
);
1148 static ssize_t
resource_show(struct device
*dev
,
1149 struct device_attribute
*attr
, char *buf
)
1151 struct resource
*res
;
1153 if (is_namespace_pmem(dev
)) {
1154 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1156 res
= &nspm
->nsio
.res
;
1157 } else if (is_namespace_io(dev
)) {
1158 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
1164 /* no address to convey if the namespace has no allocation */
1165 if (resource_size(res
) == 0)
1167 return sprintf(buf
, "%#llx\n", (unsigned long long) res
->start
);
1169 static DEVICE_ATTR_RO(resource
);
1171 static const unsigned long ns_lbasize_supported
[] = { 512, 520, 528,
1172 4096, 4104, 4160, 4224, 0 };
1174 static ssize_t
sector_size_show(struct device
*dev
,
1175 struct device_attribute
*attr
, char *buf
)
1177 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1179 if (!is_namespace_blk(dev
))
1182 return nd_sector_size_show(nsblk
->lbasize
, ns_lbasize_supported
, buf
);
1185 static ssize_t
sector_size_store(struct device
*dev
,
1186 struct device_attribute
*attr
, const char *buf
, size_t len
)
1188 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1189 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1192 if (!is_namespace_blk(dev
))
1196 nvdimm_bus_lock(dev
);
1197 if (to_ndns(dev
)->claim
)
1200 rc
= nd_sector_size_store(dev
, buf
, &nsblk
->lbasize
,
1201 ns_lbasize_supported
);
1203 rc
= nd_namespace_label_update(nd_region
, dev
);
1204 dev_dbg(dev
, "%s: result: %zd %s: %s%s", __func__
,
1205 rc
, rc
< 0 ? "tried" : "wrote", buf
,
1206 buf
[len
- 1] == '\n' ? "" : "\n");
1207 nvdimm_bus_unlock(dev
);
1210 return rc
? rc
: len
;
1212 static DEVICE_ATTR_RW(sector_size
);
1214 static ssize_t
dpa_extents_show(struct device
*dev
,
1215 struct device_attribute
*attr
, char *buf
)
1217 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1218 struct nd_label_id label_id
;
1223 nvdimm_bus_lock(dev
);
1224 if (is_namespace_pmem(dev
)) {
1225 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1229 } else if (is_namespace_blk(dev
)) {
1230 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1233 flags
= NSLABEL_FLAG_LOCAL
;
1239 nd_label_gen_id(&label_id
, uuid
, flags
);
1240 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1241 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1242 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1243 struct resource
*res
;
1245 for_each_dpa_resource(ndd
, res
)
1246 if (strcmp(res
->name
, label_id
.id
) == 0)
1250 nvdimm_bus_unlock(dev
);
1252 return sprintf(buf
, "%d\n", count
);
1254 static DEVICE_ATTR_RO(dpa_extents
);
1256 static ssize_t
holder_show(struct device
*dev
,
1257 struct device_attribute
*attr
, char *buf
)
1259 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1263 rc
= sprintf(buf
, "%s\n", ndns
->claim
? dev_name(ndns
->claim
) : "");
1268 static DEVICE_ATTR_RO(holder
);
1270 static ssize_t
mode_show(struct device
*dev
,
1271 struct device_attribute
*attr
, char *buf
)
1273 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1274 struct device
*claim
;
1279 claim
= ndns
->claim
;
1280 if (pmem_should_map_pages(dev
) || (claim
&& is_nd_pfn(claim
)))
1282 else if (claim
&& is_nd_btt(claim
))
1286 rc
= sprintf(buf
, "%s\n", mode
);
1291 static DEVICE_ATTR_RO(mode
);
1293 static ssize_t
force_raw_store(struct device
*dev
,
1294 struct device_attribute
*attr
, const char *buf
, size_t len
)
1297 int rc
= strtobool(buf
, &force_raw
);
1302 to_ndns(dev
)->force_raw
= force_raw
;
1306 static ssize_t
force_raw_show(struct device
*dev
,
1307 struct device_attribute
*attr
, char *buf
)
1309 return sprintf(buf
, "%d\n", to_ndns(dev
)->force_raw
);
1311 static DEVICE_ATTR_RW(force_raw
);
1313 static struct attribute
*nd_namespace_attributes
[] = {
1314 &dev_attr_nstype
.attr
,
1315 &dev_attr_size
.attr
,
1316 &dev_attr_mode
.attr
,
1317 &dev_attr_uuid
.attr
,
1318 &dev_attr_holder
.attr
,
1319 &dev_attr_resource
.attr
,
1320 &dev_attr_alt_name
.attr
,
1321 &dev_attr_force_raw
.attr
,
1322 &dev_attr_sector_size
.attr
,
1323 &dev_attr_dpa_extents
.attr
,
1327 static umode_t
namespace_visible(struct kobject
*kobj
,
1328 struct attribute
*a
, int n
)
1330 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1332 if (a
== &dev_attr_resource
.attr
) {
1333 if (is_namespace_blk(dev
))
1338 if (is_namespace_pmem(dev
) || is_namespace_blk(dev
)) {
1339 if (a
== &dev_attr_size
.attr
)
1340 return S_IWUSR
| S_IRUGO
;
1342 if (is_namespace_pmem(dev
) && a
== &dev_attr_sector_size
.attr
)
1348 if (a
== &dev_attr_nstype
.attr
|| a
== &dev_attr_size
.attr
1349 || a
== &dev_attr_holder
.attr
1350 || a
== &dev_attr_force_raw
.attr
1351 || a
== &dev_attr_mode
.attr
)
1357 static struct attribute_group nd_namespace_attribute_group
= {
1358 .attrs
= nd_namespace_attributes
,
1359 .is_visible
= namespace_visible
,
1362 static const struct attribute_group
*nd_namespace_attribute_groups
[] = {
1363 &nd_device_attribute_group
,
1364 &nd_namespace_attribute_group
,
1365 &nd_numa_attribute_group
,
1369 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
)
1371 struct nd_btt
*nd_btt
= is_nd_btt(dev
) ? to_nd_btt(dev
) : NULL
;
1372 struct nd_pfn
*nd_pfn
= is_nd_pfn(dev
) ? to_nd_pfn(dev
) : NULL
;
1373 struct nd_namespace_common
*ndns
;
1374 resource_size_t size
;
1376 if (nd_btt
|| nd_pfn
) {
1377 struct device
*host
= NULL
;
1380 host
= &nd_btt
->dev
;
1381 ndns
= nd_btt
->ndns
;
1382 } else if (nd_pfn
) {
1383 host
= &nd_pfn
->dev
;
1384 ndns
= nd_pfn
->ndns
;
1388 return ERR_PTR(-ENODEV
);
1391 * Flush any in-progess probes / removals in the driver
1392 * for the raw personality of this namespace.
1394 device_lock(&ndns
->dev
);
1395 device_unlock(&ndns
->dev
);
1396 if (ndns
->dev
.driver
) {
1397 dev_dbg(&ndns
->dev
, "is active, can't bind %s\n",
1399 return ERR_PTR(-EBUSY
);
1401 if (dev_WARN_ONCE(&ndns
->dev
, ndns
->claim
!= host
,
1402 "host (%s) vs claim (%s) mismatch\n",
1404 dev_name(ndns
->claim
)))
1405 return ERR_PTR(-ENXIO
);
1407 ndns
= to_ndns(dev
);
1409 dev_dbg(dev
, "claimed by %s, failing probe\n",
1410 dev_name(ndns
->claim
));
1412 return ERR_PTR(-ENXIO
);
1416 size
= nvdimm_namespace_capacity(ndns
);
1417 if (size
< ND_MIN_NAMESPACE_SIZE
) {
1418 dev_dbg(&ndns
->dev
, "%pa, too small must be at least %#x\n",
1419 &size
, ND_MIN_NAMESPACE_SIZE
);
1420 return ERR_PTR(-ENODEV
);
1423 if (is_namespace_pmem(&ndns
->dev
)) {
1424 struct nd_namespace_pmem
*nspm
;
1426 nspm
= to_nd_namespace_pmem(&ndns
->dev
);
1427 if (uuid_not_set(nspm
->uuid
, &ndns
->dev
, __func__
))
1428 return ERR_PTR(-ENODEV
);
1429 } else if (is_namespace_blk(&ndns
->dev
)) {
1430 struct nd_namespace_blk
*nsblk
;
1432 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
1433 if (uuid_not_set(nsblk
->uuid
, &ndns
->dev
, __func__
))
1434 return ERR_PTR(-ENODEV
);
1435 if (!nsblk
->lbasize
) {
1436 dev_dbg(&ndns
->dev
, "%s: sector size not set\n",
1438 return ERR_PTR(-ENODEV
);
1440 if (!nd_namespace_blk_validate(nsblk
))
1441 return ERR_PTR(-ENODEV
);
1446 EXPORT_SYMBOL(nvdimm_namespace_common_probe
);
1448 static struct device
**create_namespace_io(struct nd_region
*nd_region
)
1450 struct nd_namespace_io
*nsio
;
1451 struct device
*dev
, **devs
;
1452 struct resource
*res
;
1454 nsio
= kzalloc(sizeof(*nsio
), GFP_KERNEL
);
1458 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1464 dev
= &nsio
->common
.dev
;
1465 dev
->type
= &namespace_io_device_type
;
1466 dev
->parent
= &nd_region
->dev
;
1468 res
->name
= dev_name(&nd_region
->dev
);
1469 res
->flags
= IORESOURCE_MEM
;
1470 res
->start
= nd_region
->ndr_start
;
1471 res
->end
= res
->start
+ nd_region
->ndr_size
- 1;
1477 static bool has_uuid_at_pos(struct nd_region
*nd_region
, u8
*uuid
,
1478 u64 cookie
, u16 pos
)
1480 struct nd_namespace_label
*found
= NULL
;
1483 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1484 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1485 struct nd_namespace_label
*nd_label
;
1486 bool found_uuid
= false;
1489 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1490 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1491 u16 position
= __le16_to_cpu(nd_label
->position
);
1492 u16 nlabel
= __le16_to_cpu(nd_label
->nlabel
);
1494 if (isetcookie
!= cookie
)
1497 if (memcmp(nd_label
->uuid
, uuid
, NSLABEL_UUID_LEN
) != 0)
1501 dev_dbg(to_ndd(nd_mapping
)->dev
,
1502 "%s duplicate entry for uuid\n",
1507 if (nlabel
!= nd_region
->ndr_mappings
)
1509 if (position
!= pos
)
1517 return found
!= NULL
;
1520 static int select_pmem_id(struct nd_region
*nd_region
, u8
*pmem_id
)
1522 struct nd_namespace_label
*select
= NULL
;
1528 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1529 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1530 struct nd_namespace_label
*nd_label
;
1531 u64 hw_start
, hw_end
, pmem_start
, pmem_end
;
1534 for_each_label(l
, nd_label
, nd_mapping
->labels
)
1535 if (memcmp(nd_label
->uuid
, pmem_id
, NSLABEL_UUID_LEN
) == 0)
1545 * Check that this label is compliant with the dpa
1546 * range published in NFIT
1548 hw_start
= nd_mapping
->start
;
1549 hw_end
= hw_start
+ nd_mapping
->size
;
1550 pmem_start
= __le64_to_cpu(select
->dpa
);
1551 pmem_end
= pmem_start
+ __le64_to_cpu(select
->rawsize
);
1552 if (pmem_start
== hw_start
&& pmem_end
<= hw_end
)
1557 nd_mapping
->labels
[0] = select
;
1558 nd_mapping
->labels
[1] = NULL
;
1564 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1565 * @nd_region: region with mappings to validate
1567 static int find_pmem_label_set(struct nd_region
*nd_region
,
1568 struct nd_namespace_pmem
*nspm
)
1570 u64 cookie
= nd_region_interleave_set_cookie(nd_region
);
1571 struct nd_namespace_label
*nd_label
;
1572 u8 select_id
[NSLABEL_UUID_LEN
];
1573 resource_size_t size
= 0;
1575 int rc
= -ENODEV
, l
;
1582 * Find a complete set of labels by uuid. By definition we can start
1583 * with any mapping as the reference label
1585 for_each_label(l
, nd_label
, nd_region
->mapping
[0].labels
) {
1586 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1588 if (isetcookie
!= cookie
)
1591 for (i
= 0; nd_region
->ndr_mappings
; i
++)
1592 if (!has_uuid_at_pos(nd_region
, nd_label
->uuid
,
1595 if (i
< nd_region
->ndr_mappings
) {
1597 * Give up if we don't find an instance of a
1598 * uuid at each position (from 0 to
1599 * nd_region->ndr_mappings - 1), or if we find a
1600 * dimm with two instances of the same uuid.
1604 } else if (pmem_id
) {
1606 * If there is more than one valid uuid set, we
1607 * need userspace to clean this up.
1612 memcpy(select_id
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
1613 pmem_id
= select_id
;
1617 * Fix up each mapping's 'labels' to have the validated pmem label for
1618 * that position at labels[0], and NULL at labels[1]. In the process,
1619 * check that the namespace aligns with interleave-set. We know
1620 * that it does not overlap with any blk namespaces by virtue of
1621 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1624 rc
= select_pmem_id(nd_region
, pmem_id
);
1628 /* Calculate total size and populate namespace properties from label0 */
1629 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1630 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1631 struct nd_namespace_label
*label0
= nd_mapping
->labels
[0];
1633 size
+= __le64_to_cpu(label0
->rawsize
);
1634 if (__le16_to_cpu(label0
->position
) != 0)
1636 WARN_ON(nspm
->alt_name
|| nspm
->uuid
);
1637 nspm
->alt_name
= kmemdup((void __force
*) label0
->name
,
1638 NSLABEL_NAME_LEN
, GFP_KERNEL
);
1639 nspm
->uuid
= kmemdup((void __force
*) label0
->uuid
,
1640 NSLABEL_UUID_LEN
, GFP_KERNEL
);
1643 if (!nspm
->alt_name
|| !nspm
->uuid
) {
1648 nd_namespace_pmem_set_size(nd_region
, nspm
, size
);
1654 dev_dbg(&nd_region
->dev
, "%s: invalid label(s)\n", __func__
);
1657 dev_dbg(&nd_region
->dev
, "%s: label not found\n", __func__
);
1660 dev_dbg(&nd_region
->dev
, "%s: unexpected err: %d\n",
1667 static struct device
**create_namespace_pmem(struct nd_region
*nd_region
)
1669 struct nd_namespace_pmem
*nspm
;
1670 struct device
*dev
, **devs
;
1671 struct resource
*res
;
1674 nspm
= kzalloc(sizeof(*nspm
), GFP_KERNEL
);
1678 dev
= &nspm
->nsio
.common
.dev
;
1679 dev
->type
= &namespace_pmem_device_type
;
1680 dev
->parent
= &nd_region
->dev
;
1681 res
= &nspm
->nsio
.res
;
1682 res
->name
= dev_name(&nd_region
->dev
);
1683 res
->flags
= IORESOURCE_MEM
;
1684 rc
= find_pmem_label_set(nd_region
, nspm
);
1685 if (rc
== -ENODEV
) {
1688 /* Pass, try to permit namespace creation... */
1689 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1690 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1692 kfree(nd_mapping
->labels
);
1693 nd_mapping
->labels
= NULL
;
1696 /* Publish a zero-sized namespace for userspace to configure. */
1697 nd_namespace_pmem_set_size(nd_region
, nspm
, 0);
1703 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1711 namespace_pmem_release(&nspm
->nsio
.common
.dev
);
1715 struct resource
*nsblk_add_resource(struct nd_region
*nd_region
,
1716 struct nvdimm_drvdata
*ndd
, struct nd_namespace_blk
*nsblk
,
1717 resource_size_t start
)
1719 struct nd_label_id label_id
;
1720 struct resource
*res
;
1722 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
1723 res
= krealloc(nsblk
->res
,
1724 sizeof(void *) * (nsblk
->num_resources
+ 1),
1728 nsblk
->res
= (struct resource
**) res
;
1729 for_each_dpa_resource(ndd
, res
)
1730 if (strcmp(res
->name
, label_id
.id
) == 0
1731 && res
->start
== start
) {
1732 nsblk
->res
[nsblk
->num_resources
++] = res
;
1738 static struct device
*nd_namespace_blk_create(struct nd_region
*nd_region
)
1740 struct nd_namespace_blk
*nsblk
;
1743 if (!is_nd_blk(&nd_region
->dev
))
1746 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1750 dev
= &nsblk
->common
.dev
;
1751 dev
->type
= &namespace_blk_device_type
;
1752 nsblk
->id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0, GFP_KERNEL
);
1753 if (nsblk
->id
< 0) {
1757 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, nsblk
->id
);
1758 dev
->parent
= &nd_region
->dev
;
1759 dev
->groups
= nd_namespace_attribute_groups
;
1761 return &nsblk
->common
.dev
;
1764 void nd_region_create_blk_seed(struct nd_region
*nd_region
)
1766 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1767 nd_region
->ns_seed
= nd_namespace_blk_create(nd_region
);
1769 * Seed creation failures are not fatal, provisioning is simply
1770 * disabled until memory becomes available
1772 if (!nd_region
->ns_seed
)
1773 dev_err(&nd_region
->dev
, "failed to create blk namespace\n");
1775 nd_device_register(nd_region
->ns_seed
);
1778 void nd_region_create_pfn_seed(struct nd_region
*nd_region
)
1780 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1781 nd_region
->pfn_seed
= nd_pfn_create(nd_region
);
1783 * Seed creation failures are not fatal, provisioning is simply
1784 * disabled until memory becomes available
1786 if (!nd_region
->pfn_seed
)
1787 dev_err(&nd_region
->dev
, "failed to create pfn namespace\n");
1790 void nd_region_create_btt_seed(struct nd_region
*nd_region
)
1792 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1793 nd_region
->btt_seed
= nd_btt_create(nd_region
);
1795 * Seed creation failures are not fatal, provisioning is simply
1796 * disabled until memory becomes available
1798 if (!nd_region
->btt_seed
)
1799 dev_err(&nd_region
->dev
, "failed to create btt namespace\n");
1802 static struct device
**create_namespace_blk(struct nd_region
*nd_region
)
1804 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
1805 struct nd_namespace_label
*nd_label
;
1806 struct device
*dev
, **devs
= NULL
;
1807 struct nd_namespace_blk
*nsblk
;
1808 struct nvdimm_drvdata
*ndd
;
1809 int i
, l
, count
= 0;
1810 struct resource
*res
;
1812 if (nd_region
->ndr_mappings
== 0)
1815 ndd
= to_ndd(nd_mapping
);
1816 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1817 u32 flags
= __le32_to_cpu(nd_label
->flags
);
1818 char *name
[NSLABEL_NAME_LEN
];
1819 struct device
**__devs
;
1821 if (flags
& NSLABEL_FLAG_LOCAL
)
1826 for (i
= 0; i
< count
; i
++) {
1827 nsblk
= to_nd_namespace_blk(devs
[i
]);
1828 if (memcmp(nsblk
->uuid
, nd_label
->uuid
,
1829 NSLABEL_UUID_LEN
) == 0) {
1830 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1831 __le64_to_cpu(nd_label
->dpa
));
1834 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1835 dev_name(&nsblk
->common
.dev
));
1841 __devs
= kcalloc(count
+ 2, sizeof(dev
), GFP_KERNEL
);
1844 memcpy(__devs
, devs
, sizeof(dev
) * count
);
1848 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1851 dev
= &nsblk
->common
.dev
;
1852 dev
->type
= &namespace_blk_device_type
;
1853 dev
->parent
= &nd_region
->dev
;
1854 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, count
);
1855 devs
[count
++] = dev
;
1857 nsblk
->lbasize
= __le64_to_cpu(nd_label
->lbasize
);
1858 nsblk
->uuid
= kmemdup(nd_label
->uuid
, NSLABEL_UUID_LEN
,
1862 memcpy(name
, nd_label
->name
, NSLABEL_NAME_LEN
);
1864 nsblk
->alt_name
= kmemdup(name
, NSLABEL_NAME_LEN
,
1866 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1867 __le64_to_cpu(nd_label
->dpa
));
1870 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1871 dev_name(&nsblk
->common
.dev
));
1874 dev_dbg(&nd_region
->dev
, "%s: discovered %d blk namespace%s\n",
1875 __func__
, count
, count
== 1 ? "" : "s");
1878 /* Publish a zero-sized namespace for userspace to configure. */
1879 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1880 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1882 kfree(nd_mapping
->labels
);
1883 nd_mapping
->labels
= NULL
;
1886 devs
= kcalloc(2, sizeof(dev
), GFP_KERNEL
);
1889 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1892 dev
= &nsblk
->common
.dev
;
1893 dev
->type
= &namespace_blk_device_type
;
1894 dev
->parent
= &nd_region
->dev
;
1895 devs
[count
++] = dev
;
1901 for (i
= 0; i
< count
; i
++) {
1902 nsblk
= to_nd_namespace_blk(devs
[i
]);
1903 namespace_blk_release(&nsblk
->common
.dev
);
1909 static int init_active_labels(struct nd_region
*nd_region
)
1913 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1914 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1915 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1916 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1920 * If the dimm is disabled then prevent the region from
1921 * being activated if it aliases DPA.
1924 if ((nvdimm
->flags
& NDD_ALIASING
) == 0)
1926 dev_dbg(&nd_region
->dev
, "%s: is disabled, failing probe\n",
1927 dev_name(&nd_mapping
->nvdimm
->dev
));
1930 nd_mapping
->ndd
= ndd
;
1931 atomic_inc(&nvdimm
->busy
);
1934 count
= nd_label_active_count(ndd
);
1935 dev_dbg(ndd
->dev
, "%s: %d\n", __func__
, count
);
1938 nd_mapping
->labels
= kcalloc(count
+ 1, sizeof(void *),
1940 if (!nd_mapping
->labels
)
1942 for (j
= 0; j
< count
; j
++) {
1943 struct nd_namespace_label
*label
;
1945 label
= nd_label_active(ndd
, j
);
1946 nd_mapping
->labels
[j
] = label
;
1953 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
)
1955 struct device
**devs
= NULL
;
1956 int i
, rc
= 0, type
;
1959 nvdimm_bus_lock(&nd_region
->dev
);
1960 rc
= init_active_labels(nd_region
);
1962 nvdimm_bus_unlock(&nd_region
->dev
);
1966 type
= nd_region_to_nstype(nd_region
);
1968 case ND_DEVICE_NAMESPACE_IO
:
1969 devs
= create_namespace_io(nd_region
);
1971 case ND_DEVICE_NAMESPACE_PMEM
:
1972 devs
= create_namespace_pmem(nd_region
);
1974 case ND_DEVICE_NAMESPACE_BLK
:
1975 devs
= create_namespace_blk(nd_region
);
1980 nvdimm_bus_unlock(&nd_region
->dev
);
1985 for (i
= 0; devs
[i
]; i
++) {
1986 struct device
*dev
= devs
[i
];
1989 if (type
== ND_DEVICE_NAMESPACE_BLK
) {
1990 struct nd_namespace_blk
*nsblk
;
1992 nsblk
= to_nd_namespace_blk(dev
);
1993 id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0,
2001 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, id
);
2002 dev
->groups
= nd_namespace_attribute_groups
;
2003 nd_device_register(dev
);
2006 nd_region
->ns_seed
= devs
[0];
2011 for (j
= i
; devs
[j
]; j
++) {
2012 struct device
*dev
= devs
[j
];
2014 device_initialize(dev
);
2019 * All of the namespaces we tried to register failed, so
2020 * fail region activation.