2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
21 static void namespace_io_release(struct device
*dev
)
23 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
28 static void namespace_pmem_release(struct device
*dev
)
30 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
32 kfree(nspm
->alt_name
);
37 static void namespace_blk_release(struct device
*dev
)
39 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
40 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
43 ida_simple_remove(&nd_region
->ns_ida
, nsblk
->id
);
44 kfree(nsblk
->alt_name
);
50 static struct device_type namespace_io_device_type
= {
51 .name
= "nd_namespace_io",
52 .release
= namespace_io_release
,
55 static struct device_type namespace_pmem_device_type
= {
56 .name
= "nd_namespace_pmem",
57 .release
= namespace_pmem_release
,
60 static struct device_type namespace_blk_device_type
= {
61 .name
= "nd_namespace_blk",
62 .release
= namespace_blk_release
,
65 static bool is_namespace_pmem(struct device
*dev
)
67 return dev
? dev
->type
== &namespace_pmem_device_type
: false;
70 static bool is_namespace_blk(struct device
*dev
)
72 return dev
? dev
->type
== &namespace_blk_device_type
: false;
75 static bool is_namespace_io(struct device
*dev
)
77 return dev
? dev
->type
== &namespace_io_device_type
: false;
80 static int is_uuid_busy(struct device
*dev
, void *data
)
82 u8
*uuid1
= data
, *uuid2
= NULL
;
84 if (is_namespace_pmem(dev
)) {
85 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
88 } else if (is_namespace_blk(dev
)) {
89 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
92 } else if (is_nd_btt(dev
)) {
93 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
96 } else if (is_nd_pfn(dev
)) {
97 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
102 if (uuid2
&& memcmp(uuid1
, uuid2
, NSLABEL_UUID_LEN
) == 0)
108 static int is_namespace_uuid_busy(struct device
*dev
, void *data
)
110 if (is_nd_pmem(dev
) || is_nd_blk(dev
))
111 return device_for_each_child(dev
, data
, is_uuid_busy
);
116 * nd_is_uuid_unique - verify that no other namespace has @uuid
117 * @dev: any device on a nvdimm_bus
118 * @uuid: uuid to check
120 bool nd_is_uuid_unique(struct device
*dev
, u8
*uuid
)
122 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
126 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus
->dev
));
127 if (device_for_each_child(&nvdimm_bus
->dev
, uuid
,
128 is_namespace_uuid_busy
) != 0)
133 bool pmem_should_map_pages(struct device
*dev
)
135 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
137 if (!IS_ENABLED(CONFIG_ZONE_DEVICE
))
140 if (!test_bit(ND_REGION_PAGEMAP
, &nd_region
->flags
))
143 if (is_nd_pfn(dev
) || is_nd_btt(dev
))
146 #ifdef ARCH_MEMREMAP_PMEM
147 return ARCH_MEMREMAP_PMEM
== MEMREMAP_WB
;
152 EXPORT_SYMBOL(pmem_should_map_pages
);
154 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
157 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
158 const char *suffix
= NULL
;
161 if (is_nd_btt(ndns
->claim
))
163 else if (is_nd_pfn(ndns
->claim
))
166 dev_WARN_ONCE(&ndns
->dev
, 1,
167 "unknown claim type by %s\n",
168 dev_name(ndns
->claim
));
171 if (is_namespace_pmem(&ndns
->dev
) || is_namespace_io(&ndns
->dev
)) {
172 if (!suffix
&& pmem_should_map_pages(&ndns
->dev
))
174 sprintf(name
, "pmem%d%s", nd_region
->id
, suffix
? suffix
: "");
175 } else if (is_namespace_blk(&ndns
->dev
)) {
176 struct nd_namespace_blk
*nsblk
;
178 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
179 sprintf(name
, "ndblk%d.%d%s", nd_region
->id
, nsblk
->id
,
180 suffix
? suffix
: "");
187 EXPORT_SYMBOL(nvdimm_namespace_disk_name
);
189 const u8
*nd_dev_to_uuid(struct device
*dev
)
191 static const u8 null_uuid
[16];
196 if (is_namespace_pmem(dev
)) {
197 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
200 } else if (is_namespace_blk(dev
)) {
201 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
207 EXPORT_SYMBOL(nd_dev_to_uuid
);
209 static ssize_t
nstype_show(struct device
*dev
,
210 struct device_attribute
*attr
, char *buf
)
212 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
214 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
216 static DEVICE_ATTR_RO(nstype
);
218 static ssize_t
__alt_name_store(struct device
*dev
, const char *buf
,
221 char *input
, *pos
, *alt_name
, **ns_altname
;
224 if (is_namespace_pmem(dev
)) {
225 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
227 ns_altname
= &nspm
->alt_name
;
228 } else if (is_namespace_blk(dev
)) {
229 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
231 ns_altname
= &nsblk
->alt_name
;
235 if (dev
->driver
|| to_ndns(dev
)->claim
)
238 input
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
244 if (strlen(pos
) + 1 > NSLABEL_NAME_LEN
) {
249 alt_name
= kzalloc(NSLABEL_NAME_LEN
, GFP_KERNEL
);
255 *ns_altname
= alt_name
;
256 sprintf(*ns_altname
, "%s", pos
);
264 static resource_size_t
nd_namespace_blk_size(struct nd_namespace_blk
*nsblk
)
266 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
267 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
268 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
269 struct nd_label_id label_id
;
270 resource_size_t size
= 0;
271 struct resource
*res
;
275 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
276 for_each_dpa_resource(ndd
, res
)
277 if (strcmp(res
->name
, label_id
.id
) == 0)
278 size
+= resource_size(res
);
282 static bool __nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
284 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
285 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
286 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
287 struct nd_label_id label_id
;
288 struct resource
*res
;
291 if (!nsblk
->uuid
|| !nsblk
->lbasize
|| !ndd
)
295 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
296 for_each_dpa_resource(ndd
, res
) {
297 if (strcmp(res
->name
, label_id
.id
) != 0)
300 * Resources with unacknoweldged adjustments indicate a
301 * failure to update labels
303 if (res
->flags
& DPA_RESOURCE_ADJUSTED
)
308 /* These values match after a successful label update */
309 if (count
!= nsblk
->num_resources
)
312 for (i
= 0; i
< nsblk
->num_resources
; i
++) {
313 struct resource
*found
= NULL
;
315 for_each_dpa_resource(ndd
, res
)
316 if (res
== nsblk
->res
[i
]) {
328 resource_size_t
nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
330 resource_size_t size
;
332 nvdimm_bus_lock(&nsblk
->common
.dev
);
333 size
= __nd_namespace_blk_validate(nsblk
);
334 nvdimm_bus_unlock(&nsblk
->common
.dev
);
338 EXPORT_SYMBOL(nd_namespace_blk_validate
);
341 static int nd_namespace_label_update(struct nd_region
*nd_region
,
344 dev_WARN_ONCE(dev
, dev
->driver
|| to_ndns(dev
)->claim
,
345 "namespace must be idle during label update\n");
346 if (dev
->driver
|| to_ndns(dev
)->claim
)
350 * Only allow label writes that will result in a valid namespace
351 * or deletion of an existing namespace.
353 if (is_namespace_pmem(dev
)) {
354 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
355 resource_size_t size
= resource_size(&nspm
->nsio
.res
);
357 if (size
== 0 && nspm
->uuid
)
358 /* delete allocation */;
359 else if (!nspm
->uuid
)
362 return nd_pmem_namespace_label_update(nd_region
, nspm
, size
);
363 } else if (is_namespace_blk(dev
)) {
364 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
365 resource_size_t size
= nd_namespace_blk_size(nsblk
);
367 if (size
== 0 && nsblk
->uuid
)
368 /* delete allocation */;
369 else if (!nsblk
->uuid
|| !nsblk
->lbasize
)
372 return nd_blk_namespace_label_update(nd_region
, nsblk
, size
);
377 static ssize_t
alt_name_store(struct device
*dev
,
378 struct device_attribute
*attr
, const char *buf
, size_t len
)
380 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
384 nvdimm_bus_lock(dev
);
385 wait_nvdimm_bus_probe_idle(dev
);
386 rc
= __alt_name_store(dev
, buf
, len
);
388 rc
= nd_namespace_label_update(nd_region
, dev
);
389 dev_dbg(dev
, "%s: %s(%zd)\n", __func__
, rc
< 0 ? "fail " : "", rc
);
390 nvdimm_bus_unlock(dev
);
393 return rc
< 0 ? rc
: len
;
396 static ssize_t
alt_name_show(struct device
*dev
,
397 struct device_attribute
*attr
, char *buf
)
401 if (is_namespace_pmem(dev
)) {
402 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
404 ns_altname
= nspm
->alt_name
;
405 } else if (is_namespace_blk(dev
)) {
406 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
408 ns_altname
= nsblk
->alt_name
;
412 return sprintf(buf
, "%s\n", ns_altname
? ns_altname
: "");
414 static DEVICE_ATTR_RW(alt_name
);
416 static int scan_free(struct nd_region
*nd_region
,
417 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
420 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
421 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
425 struct resource
*res
, *last
;
426 resource_size_t new_start
;
429 for_each_dpa_resource(ndd
, res
)
430 if (strcmp(res
->name
, label_id
->id
) == 0)
436 if (n
>= resource_size(res
)) {
437 n
-= resource_size(res
);
438 nd_dbg_dpa(nd_region
, ndd
, res
, "delete %d\n", rc
);
439 nvdimm_free_dpa(ndd
, res
);
440 /* retry with last resource deleted */
445 * Keep BLK allocations relegated to high DPA as much as
449 new_start
= res
->start
+ n
;
451 new_start
= res
->start
;
453 rc
= adjust_resource(res
, new_start
, resource_size(res
) - n
);
455 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
456 nd_dbg_dpa(nd_region
, ndd
, res
, "shrink %d\n", rc
);
464 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
465 * @nd_region: the set of dimms to reclaim @n bytes from
466 * @label_id: unique identifier for the namespace consuming this dpa range
467 * @n: number of bytes per-dimm to release
469 * Assumes resources are ordered. Starting from the end try to
470 * adjust_resource() the allocation to @n, but if @n is larger than the
471 * allocation delete it and find the 'new' last allocation in the label
474 static int shrink_dpa_allocation(struct nd_region
*nd_region
,
475 struct nd_label_id
*label_id
, resource_size_t n
)
479 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
480 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
483 rc
= scan_free(nd_region
, nd_mapping
, label_id
, n
);
491 static resource_size_t
init_dpa_allocation(struct nd_label_id
*label_id
,
492 struct nd_region
*nd_region
, struct nd_mapping
*nd_mapping
,
495 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
496 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
497 resource_size_t first_dpa
;
498 struct resource
*res
;
501 /* allocate blk from highest dpa first */
503 first_dpa
= nd_mapping
->start
+ nd_mapping
->size
- n
;
505 first_dpa
= nd_mapping
->start
;
507 /* first resource allocation for this label-id or dimm */
508 res
= nvdimm_allocate_dpa(ndd
, label_id
, first_dpa
, n
);
512 nd_dbg_dpa(nd_region
, ndd
, res
, "init %d\n", rc
);
516 static bool space_valid(bool is_pmem
, bool is_reserve
,
517 struct nd_label_id
*label_id
, struct resource
*res
)
520 * For BLK-space any space is valid, for PMEM-space, it must be
521 * contiguous with an existing allocation unless we are
524 if (is_reserve
|| !is_pmem
)
526 if (!res
|| strcmp(res
->name
, label_id
->id
) == 0)
532 ALLOC_ERR
= 0, ALLOC_BEFORE
, ALLOC_MID
, ALLOC_AFTER
,
535 static resource_size_t
scan_allocate(struct nd_region
*nd_region
,
536 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
539 resource_size_t mapping_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
540 bool is_reserve
= strcmp(label_id
->id
, "pmem-reserve") == 0;
541 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
542 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
543 const resource_size_t to_allocate
= n
;
544 struct resource
*res
;
549 for_each_dpa_resource(ndd
, res
) {
550 resource_size_t allocate
, available
= 0, free_start
, free_end
;
551 struct resource
*next
= res
->sibling
, *new_res
= NULL
;
552 enum alloc_loc loc
= ALLOC_ERR
;
556 /* ignore resources outside this nd_mapping */
557 if (res
->start
> mapping_end
)
559 if (res
->end
< nd_mapping
->start
)
562 /* space at the beginning of the mapping */
563 if (!first
++ && res
->start
> nd_mapping
->start
) {
564 free_start
= nd_mapping
->start
;
565 available
= res
->start
- free_start
;
566 if (space_valid(is_pmem
, is_reserve
, label_id
, NULL
))
570 /* space between allocations */
572 free_start
= res
->start
+ resource_size(res
);
573 free_end
= min(mapping_end
, next
->start
- 1);
574 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
575 && free_start
< free_end
) {
576 available
= free_end
+ 1 - free_start
;
581 /* space at the end of the mapping */
583 free_start
= res
->start
+ resource_size(res
);
584 free_end
= mapping_end
;
585 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
586 && free_start
< free_end
) {
587 available
= free_end
+ 1 - free_start
;
592 if (!loc
|| !available
)
594 allocate
= min(available
, n
);
597 if (strcmp(res
->name
, label_id
->id
) == 0) {
598 /* adjust current resource up */
599 if (is_pmem
&& !is_reserve
)
601 rc
= adjust_resource(res
, res
->start
- allocate
,
602 resource_size(res
) + allocate
);
603 action
= "cur grow up";
608 if (strcmp(next
->name
, label_id
->id
) == 0) {
609 /* adjust next resource up */
610 if (is_pmem
&& !is_reserve
)
612 rc
= adjust_resource(next
, next
->start
613 - allocate
, resource_size(next
)
616 action
= "next grow up";
617 } else if (strcmp(res
->name
, label_id
->id
) == 0) {
618 action
= "grow down";
623 if (strcmp(res
->name
, label_id
->id
) == 0)
624 action
= "grow down";
632 if (strcmp(action
, "allocate") == 0) {
633 /* BLK allocate bottom up */
635 free_start
+= available
- allocate
;
636 else if (!is_reserve
&& free_start
!= nd_mapping
->start
)
639 new_res
= nvdimm_allocate_dpa(ndd
, label_id
,
640 free_start
, allocate
);
643 } else if (strcmp(action
, "grow down") == 0) {
644 /* adjust current resource down */
645 rc
= adjust_resource(res
, res
->start
, resource_size(res
)
648 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
654 nd_dbg_dpa(nd_region
, ndd
, new_res
, "%s(%d) %d\n",
663 * Retry scan with newly inserted resources.
664 * For example, if we did an ALLOC_BEFORE
665 * insertion there may also have been space
666 * available for an ALLOC_AFTER insertion, so we
667 * need to check this same resource again
675 * If we allocated nothing in the BLK case it may be because we are in
676 * an initial "pmem-reserve pass". Only do an initial BLK allocation
677 * when none of the DPA space is reserved.
679 if ((is_pmem
|| !ndd
->dpa
.child
) && n
== to_allocate
)
680 return init_dpa_allocation(label_id
, nd_region
, nd_mapping
, n
);
684 static int merge_dpa(struct nd_region
*nd_region
,
685 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
)
687 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
688 struct resource
*res
;
690 if (strncmp("pmem", label_id
->id
, 4) == 0)
693 for_each_dpa_resource(ndd
, res
) {
695 struct resource
*next
= res
->sibling
;
696 resource_size_t end
= res
->start
+ resource_size(res
);
698 if (!next
|| strcmp(res
->name
, label_id
->id
) != 0
699 || strcmp(next
->name
, label_id
->id
) != 0
700 || end
!= next
->start
)
702 end
+= resource_size(next
);
703 nvdimm_free_dpa(ndd
, next
);
704 rc
= adjust_resource(res
, res
->start
, end
- res
->start
);
705 nd_dbg_dpa(nd_region
, ndd
, res
, "merge %d\n", rc
);
708 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
715 static int __reserve_free_pmem(struct device
*dev
, void *data
)
717 struct nvdimm
*nvdimm
= data
;
718 struct nd_region
*nd_region
;
719 struct nd_label_id label_id
;
722 if (!is_nd_pmem(dev
))
725 nd_region
= to_nd_region(dev
);
726 if (nd_region
->ndr_mappings
== 0)
729 memset(&label_id
, 0, sizeof(label_id
));
730 strcat(label_id
.id
, "pmem-reserve");
731 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
732 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
733 resource_size_t n
, rem
= 0;
735 if (nd_mapping
->nvdimm
!= nvdimm
)
738 n
= nd_pmem_available_dpa(nd_region
, nd_mapping
, &rem
);
741 rem
= scan_allocate(nd_region
, nd_mapping
, &label_id
, n
);
742 dev_WARN_ONCE(&nd_region
->dev
, rem
,
743 "pmem reserve underrun: %#llx of %#llx bytes\n",
744 (unsigned long long) n
- rem
,
745 (unsigned long long) n
);
746 return rem
? -ENXIO
: 0;
752 static void release_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
753 struct nd_mapping
*nd_mapping
)
755 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
756 struct resource
*res
, *_res
;
758 for_each_dpa_resource_safe(ndd
, res
, _res
)
759 if (strcmp(res
->name
, "pmem-reserve") == 0)
760 nvdimm_free_dpa(ndd
, res
);
763 static int reserve_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
764 struct nd_mapping
*nd_mapping
)
766 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
769 rc
= device_for_each_child(&nvdimm_bus
->dev
, nvdimm
,
770 __reserve_free_pmem
);
772 release_free_pmem(nvdimm_bus
, nd_mapping
);
777 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
778 * @nd_region: the set of dimms to allocate @n more bytes from
779 * @label_id: unique identifier for the namespace consuming this dpa range
780 * @n: number of bytes per-dimm to add to the existing allocation
782 * Assumes resources are ordered. For BLK regions, first consume
783 * BLK-only available DPA free space, then consume PMEM-aliased DPA
784 * space starting at the highest DPA. For PMEM regions start
785 * allocations from the start of an interleave set and end at the first
786 * BLK allocation or the end of the interleave set, whichever comes
789 static int grow_dpa_allocation(struct nd_region
*nd_region
,
790 struct nd_label_id
*label_id
, resource_size_t n
)
792 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
793 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
796 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
797 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
798 resource_size_t rem
= n
;
802 * In the BLK case try once with all unallocated PMEM
803 * reserved, and once without
805 for (j
= is_pmem
; j
< 2; j
++) {
806 bool blk_only
= j
== 0;
809 rc
= reserve_free_pmem(nvdimm_bus
, nd_mapping
);
813 rem
= scan_allocate(nd_region
, nd_mapping
,
816 release_free_pmem(nvdimm_bus
, nd_mapping
);
818 /* try again and allow encroachments into PMEM */
823 dev_WARN_ONCE(&nd_region
->dev
, rem
,
824 "allocation underrun: %#llx of %#llx bytes\n",
825 (unsigned long long) n
- rem
,
826 (unsigned long long) n
);
830 rc
= merge_dpa(nd_region
, nd_mapping
, label_id
);
838 static void nd_namespace_pmem_set_size(struct nd_region
*nd_region
,
839 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
841 struct resource
*res
= &nspm
->nsio
.res
;
843 res
->start
= nd_region
->ndr_start
;
844 res
->end
= nd_region
->ndr_start
+ size
- 1;
847 static ssize_t
__size_store(struct device
*dev
, unsigned long long val
)
849 resource_size_t allocated
= 0, available
= 0;
850 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
851 struct nd_mapping
*nd_mapping
;
852 struct nvdimm_drvdata
*ndd
;
853 struct nd_label_id label_id
;
854 u32 flags
= 0, remainder
;
858 if (dev
->driver
|| to_ndns(dev
)->claim
)
861 if (is_namespace_pmem(dev
)) {
862 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
865 } else if (is_namespace_blk(dev
)) {
866 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
869 flags
= NSLABEL_FLAG_LOCAL
;
873 * We need a uuid for the allocation-label and dimm(s) on which
874 * to store the label.
876 if (!uuid
|| nd_region
->ndr_mappings
== 0)
879 div_u64_rem(val
, SZ_4K
* nd_region
->ndr_mappings
, &remainder
);
881 dev_dbg(dev
, "%llu is not %dK aligned\n", val
,
882 (SZ_4K
* nd_region
->ndr_mappings
) / SZ_1K
);
886 nd_label_gen_id(&label_id
, uuid
, flags
);
887 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
888 nd_mapping
= &nd_region
->mapping
[i
];
889 ndd
= to_ndd(nd_mapping
);
892 * All dimms in an interleave set, or the base dimm for a blk
893 * region, need to be enabled for the size to be changed.
898 allocated
+= nvdimm_allocated_dpa(ndd
, &label_id
);
900 available
= nd_region_available_dpa(nd_region
);
902 if (val
> available
+ allocated
)
905 if (val
== allocated
)
908 val
= div_u64(val
, nd_region
->ndr_mappings
);
909 allocated
= div_u64(allocated
, nd_region
->ndr_mappings
);
911 rc
= shrink_dpa_allocation(nd_region
, &label_id
,
914 rc
= grow_dpa_allocation(nd_region
, &label_id
, val
- allocated
);
919 if (is_namespace_pmem(dev
)) {
920 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
922 nd_namespace_pmem_set_size(nd_region
, nspm
,
923 val
* nd_region
->ndr_mappings
);
924 } else if (is_namespace_blk(dev
)) {
925 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
928 * Try to delete the namespace if we deleted all of its
929 * allocation, this is not the seed device for the
930 * region, and it is not actively claimed by a btt
933 if (val
== 0 && nd_region
->ns_seed
!= dev
934 && !nsblk
->common
.claim
)
935 nd_device_unregister(dev
, ND_ASYNC
);
941 static ssize_t
size_store(struct device
*dev
,
942 struct device_attribute
*attr
, const char *buf
, size_t len
)
944 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
945 unsigned long long val
;
949 rc
= kstrtoull(buf
, 0, &val
);
954 nvdimm_bus_lock(dev
);
955 wait_nvdimm_bus_probe_idle(dev
);
956 rc
= __size_store(dev
, val
);
958 rc
= nd_namespace_label_update(nd_region
, dev
);
960 if (is_namespace_pmem(dev
)) {
961 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
964 } else if (is_namespace_blk(dev
)) {
965 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
970 if (rc
== 0 && val
== 0 && uuid
) {
971 /* setting size zero == 'delete namespace' */
976 dev_dbg(dev
, "%s: %llx %s (%d)\n", __func__
, val
, rc
< 0
977 ? "fail" : "success", rc
);
979 nvdimm_bus_unlock(dev
);
982 return rc
< 0 ? rc
: len
;
985 resource_size_t
__nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
987 struct device
*dev
= &ndns
->dev
;
989 if (is_namespace_pmem(dev
)) {
990 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
992 return resource_size(&nspm
->nsio
.res
);
993 } else if (is_namespace_blk(dev
)) {
994 return nd_namespace_blk_size(to_nd_namespace_blk(dev
));
995 } else if (is_namespace_io(dev
)) {
996 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
998 return resource_size(&nsio
->res
);
1000 WARN_ONCE(1, "unknown namespace type\n");
1004 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
1006 resource_size_t size
;
1008 nvdimm_bus_lock(&ndns
->dev
);
1009 size
= __nvdimm_namespace_capacity(ndns
);
1010 nvdimm_bus_unlock(&ndns
->dev
);
1014 EXPORT_SYMBOL(nvdimm_namespace_capacity
);
1016 static ssize_t
size_show(struct device
*dev
,
1017 struct device_attribute
*attr
, char *buf
)
1019 return sprintf(buf
, "%llu\n", (unsigned long long)
1020 nvdimm_namespace_capacity(to_ndns(dev
)));
1022 static DEVICE_ATTR(size
, S_IRUGO
, size_show
, size_store
);
1024 static ssize_t
uuid_show(struct device
*dev
,
1025 struct device_attribute
*attr
, char *buf
)
1029 if (is_namespace_pmem(dev
)) {
1030 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1033 } else if (is_namespace_blk(dev
)) {
1034 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1041 return sprintf(buf
, "%pUb\n", uuid
);
1042 return sprintf(buf
, "\n");
1046 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1047 * @nd_region: parent region so we can updates all dimms in the set
1048 * @dev: namespace type for generating label_id
1049 * @new_uuid: incoming uuid
1050 * @old_uuid: reference to the uuid storage location in the namespace object
1052 static int namespace_update_uuid(struct nd_region
*nd_region
,
1053 struct device
*dev
, u8
*new_uuid
, u8
**old_uuid
)
1055 u32 flags
= is_namespace_blk(dev
) ? NSLABEL_FLAG_LOCAL
: 0;
1056 struct nd_label_id old_label_id
;
1057 struct nd_label_id new_label_id
;
1060 if (!nd_is_uuid_unique(dev
, new_uuid
))
1063 if (*old_uuid
== NULL
)
1067 * If we've already written a label with this uuid, then it's
1068 * too late to rename because we can't reliably update the uuid
1069 * without losing the old namespace. Userspace must delete this
1070 * namespace to abandon the old uuid.
1072 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1073 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1076 * This check by itself is sufficient because old_uuid
1077 * would be NULL above if this uuid did not exist in the
1078 * currently written set.
1080 * FIXME: can we delete uuid with zero dpa allocated?
1082 if (nd_mapping
->labels
)
1086 nd_label_gen_id(&old_label_id
, *old_uuid
, flags
);
1087 nd_label_gen_id(&new_label_id
, new_uuid
, flags
);
1088 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1089 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1090 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1091 struct resource
*res
;
1093 for_each_dpa_resource(ndd
, res
)
1094 if (strcmp(res
->name
, old_label_id
.id
) == 0)
1095 sprintf((void *) res
->name
, "%s",
1100 *old_uuid
= new_uuid
;
1104 static ssize_t
uuid_store(struct device
*dev
,
1105 struct device_attribute
*attr
, const char *buf
, size_t len
)
1107 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1112 if (is_namespace_pmem(dev
)) {
1113 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1115 ns_uuid
= &nspm
->uuid
;
1116 } else if (is_namespace_blk(dev
)) {
1117 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1119 ns_uuid
= &nsblk
->uuid
;
1124 nvdimm_bus_lock(dev
);
1125 wait_nvdimm_bus_probe_idle(dev
);
1126 if (to_ndns(dev
)->claim
)
1129 rc
= nd_uuid_store(dev
, &uuid
, buf
, len
);
1131 rc
= namespace_update_uuid(nd_region
, dev
, uuid
, ns_uuid
);
1133 rc
= nd_namespace_label_update(nd_region
, dev
);
1136 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
1137 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
1138 nvdimm_bus_unlock(dev
);
1141 return rc
< 0 ? rc
: len
;
1143 static DEVICE_ATTR_RW(uuid
);
1145 static ssize_t
resource_show(struct device
*dev
,
1146 struct device_attribute
*attr
, char *buf
)
1148 struct resource
*res
;
1150 if (is_namespace_pmem(dev
)) {
1151 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1153 res
= &nspm
->nsio
.res
;
1154 } else if (is_namespace_io(dev
)) {
1155 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
1161 /* no address to convey if the namespace has no allocation */
1162 if (resource_size(res
) == 0)
1164 return sprintf(buf
, "%#llx\n", (unsigned long long) res
->start
);
1166 static DEVICE_ATTR_RO(resource
);
1168 static const unsigned long ns_lbasize_supported
[] = { 512, 520, 528,
1169 4096, 4104, 4160, 4224, 0 };
1171 static ssize_t
sector_size_show(struct device
*dev
,
1172 struct device_attribute
*attr
, char *buf
)
1174 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1176 if (!is_namespace_blk(dev
))
1179 return nd_sector_size_show(nsblk
->lbasize
, ns_lbasize_supported
, buf
);
1182 static ssize_t
sector_size_store(struct device
*dev
,
1183 struct device_attribute
*attr
, const char *buf
, size_t len
)
1185 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1186 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1189 if (!is_namespace_blk(dev
))
1193 nvdimm_bus_lock(dev
);
1194 if (to_ndns(dev
)->claim
)
1197 rc
= nd_sector_size_store(dev
, buf
, &nsblk
->lbasize
,
1198 ns_lbasize_supported
);
1200 rc
= nd_namespace_label_update(nd_region
, dev
);
1201 dev_dbg(dev
, "%s: result: %zd %s: %s%s", __func__
,
1202 rc
, rc
< 0 ? "tried" : "wrote", buf
,
1203 buf
[len
- 1] == '\n' ? "" : "\n");
1204 nvdimm_bus_unlock(dev
);
1207 return rc
? rc
: len
;
1209 static DEVICE_ATTR_RW(sector_size
);
1211 static ssize_t
dpa_extents_show(struct device
*dev
,
1212 struct device_attribute
*attr
, char *buf
)
1214 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1215 struct nd_label_id label_id
;
1220 nvdimm_bus_lock(dev
);
1221 if (is_namespace_pmem(dev
)) {
1222 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1226 } else if (is_namespace_blk(dev
)) {
1227 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1230 flags
= NSLABEL_FLAG_LOCAL
;
1236 nd_label_gen_id(&label_id
, uuid
, flags
);
1237 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1238 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1239 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1240 struct resource
*res
;
1242 for_each_dpa_resource(ndd
, res
)
1243 if (strcmp(res
->name
, label_id
.id
) == 0)
1247 nvdimm_bus_unlock(dev
);
1249 return sprintf(buf
, "%d\n", count
);
1251 static DEVICE_ATTR_RO(dpa_extents
);
1253 static ssize_t
holder_show(struct device
*dev
,
1254 struct device_attribute
*attr
, char *buf
)
1256 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1260 rc
= sprintf(buf
, "%s\n", ndns
->claim
? dev_name(ndns
->claim
) : "");
1265 static DEVICE_ATTR_RO(holder
);
1267 static ssize_t
force_raw_store(struct device
*dev
,
1268 struct device_attribute
*attr
, const char *buf
, size_t len
)
1271 int rc
= strtobool(buf
, &force_raw
);
1276 to_ndns(dev
)->force_raw
= force_raw
;
1280 static ssize_t
force_raw_show(struct device
*dev
,
1281 struct device_attribute
*attr
, char *buf
)
1283 return sprintf(buf
, "%d\n", to_ndns(dev
)->force_raw
);
1285 static DEVICE_ATTR_RW(force_raw
);
1287 static struct attribute
*nd_namespace_attributes
[] = {
1288 &dev_attr_nstype
.attr
,
1289 &dev_attr_size
.attr
,
1290 &dev_attr_uuid
.attr
,
1291 &dev_attr_holder
.attr
,
1292 &dev_attr_resource
.attr
,
1293 &dev_attr_alt_name
.attr
,
1294 &dev_attr_force_raw
.attr
,
1295 &dev_attr_sector_size
.attr
,
1296 &dev_attr_dpa_extents
.attr
,
1300 static umode_t
namespace_visible(struct kobject
*kobj
,
1301 struct attribute
*a
, int n
)
1303 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1305 if (a
== &dev_attr_resource
.attr
) {
1306 if (is_namespace_blk(dev
))
1311 if (is_namespace_pmem(dev
) || is_namespace_blk(dev
)) {
1312 if (a
== &dev_attr_size
.attr
)
1313 return S_IWUSR
| S_IRUGO
;
1315 if (is_namespace_pmem(dev
) && a
== &dev_attr_sector_size
.attr
)
1321 if (a
== &dev_attr_nstype
.attr
|| a
== &dev_attr_size
.attr
1322 || a
== &dev_attr_holder
.attr
1323 || a
== &dev_attr_force_raw
.attr
)
1329 static struct attribute_group nd_namespace_attribute_group
= {
1330 .attrs
= nd_namespace_attributes
,
1331 .is_visible
= namespace_visible
,
1334 static const struct attribute_group
*nd_namespace_attribute_groups
[] = {
1335 &nd_device_attribute_group
,
1336 &nd_namespace_attribute_group
,
1337 &nd_numa_attribute_group
,
1341 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
)
1343 struct nd_btt
*nd_btt
= is_nd_btt(dev
) ? to_nd_btt(dev
) : NULL
;
1344 struct nd_pfn
*nd_pfn
= is_nd_pfn(dev
) ? to_nd_pfn(dev
) : NULL
;
1345 struct nd_namespace_common
*ndns
;
1346 resource_size_t size
;
1348 if (nd_btt
|| nd_pfn
) {
1349 struct device
*host
= NULL
;
1352 host
= &nd_btt
->dev
;
1353 ndns
= nd_btt
->ndns
;
1354 } else if (nd_pfn
) {
1355 host
= &nd_pfn
->dev
;
1356 ndns
= nd_pfn
->ndns
;
1360 return ERR_PTR(-ENODEV
);
1363 * Flush any in-progess probes / removals in the driver
1364 * for the raw personality of this namespace.
1366 device_lock(&ndns
->dev
);
1367 device_unlock(&ndns
->dev
);
1368 if (ndns
->dev
.driver
) {
1369 dev_dbg(&ndns
->dev
, "is active, can't bind %s\n",
1371 return ERR_PTR(-EBUSY
);
1373 if (dev_WARN_ONCE(&ndns
->dev
, ndns
->claim
!= host
,
1374 "host (%s) vs claim (%s) mismatch\n",
1376 dev_name(ndns
->claim
)))
1377 return ERR_PTR(-ENXIO
);
1379 ndns
= to_ndns(dev
);
1381 dev_dbg(dev
, "claimed by %s, failing probe\n",
1382 dev_name(ndns
->claim
));
1384 return ERR_PTR(-ENXIO
);
1388 size
= nvdimm_namespace_capacity(ndns
);
1389 if (size
< ND_MIN_NAMESPACE_SIZE
) {
1390 dev_dbg(&ndns
->dev
, "%pa, too small must be at least %#x\n",
1391 &size
, ND_MIN_NAMESPACE_SIZE
);
1392 return ERR_PTR(-ENODEV
);
1395 if (is_namespace_pmem(&ndns
->dev
)) {
1396 struct nd_namespace_pmem
*nspm
;
1398 nspm
= to_nd_namespace_pmem(&ndns
->dev
);
1400 dev_dbg(&ndns
->dev
, "%s: uuid not set\n", __func__
);
1401 return ERR_PTR(-ENODEV
);
1403 } else if (is_namespace_blk(&ndns
->dev
)) {
1404 struct nd_namespace_blk
*nsblk
;
1406 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
1407 if (!nd_namespace_blk_validate(nsblk
))
1408 return ERR_PTR(-ENODEV
);
1413 EXPORT_SYMBOL(nvdimm_namespace_common_probe
);
1415 static struct device
**create_namespace_io(struct nd_region
*nd_region
)
1417 struct nd_namespace_io
*nsio
;
1418 struct device
*dev
, **devs
;
1419 struct resource
*res
;
1421 nsio
= kzalloc(sizeof(*nsio
), GFP_KERNEL
);
1425 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1431 dev
= &nsio
->common
.dev
;
1432 dev
->type
= &namespace_io_device_type
;
1433 dev
->parent
= &nd_region
->dev
;
1435 res
->name
= dev_name(&nd_region
->dev
);
1436 res
->flags
= IORESOURCE_MEM
;
1437 res
->start
= nd_region
->ndr_start
;
1438 res
->end
= res
->start
+ nd_region
->ndr_size
- 1;
1444 static bool has_uuid_at_pos(struct nd_region
*nd_region
, u8
*uuid
,
1445 u64 cookie
, u16 pos
)
1447 struct nd_namespace_label
*found
= NULL
;
1450 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1451 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1452 struct nd_namespace_label
*nd_label
;
1453 bool found_uuid
= false;
1456 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1457 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1458 u16 position
= __le16_to_cpu(nd_label
->position
);
1459 u16 nlabel
= __le16_to_cpu(nd_label
->nlabel
);
1461 if (isetcookie
!= cookie
)
1464 if (memcmp(nd_label
->uuid
, uuid
, NSLABEL_UUID_LEN
) != 0)
1468 dev_dbg(to_ndd(nd_mapping
)->dev
,
1469 "%s duplicate entry for uuid\n",
1474 if (nlabel
!= nd_region
->ndr_mappings
)
1476 if (position
!= pos
)
1484 return found
!= NULL
;
1487 static int select_pmem_id(struct nd_region
*nd_region
, u8
*pmem_id
)
1489 struct nd_namespace_label
*select
= NULL
;
1495 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1496 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1497 struct nd_namespace_label
*nd_label
;
1498 u64 hw_start
, hw_end
, pmem_start
, pmem_end
;
1501 for_each_label(l
, nd_label
, nd_mapping
->labels
)
1502 if (memcmp(nd_label
->uuid
, pmem_id
, NSLABEL_UUID_LEN
) == 0)
1512 * Check that this label is compliant with the dpa
1513 * range published in NFIT
1515 hw_start
= nd_mapping
->start
;
1516 hw_end
= hw_start
+ nd_mapping
->size
;
1517 pmem_start
= __le64_to_cpu(select
->dpa
);
1518 pmem_end
= pmem_start
+ __le64_to_cpu(select
->rawsize
);
1519 if (pmem_start
== hw_start
&& pmem_end
<= hw_end
)
1524 nd_mapping
->labels
[0] = select
;
1525 nd_mapping
->labels
[1] = NULL
;
1531 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1532 * @nd_region: region with mappings to validate
1534 static int find_pmem_label_set(struct nd_region
*nd_region
,
1535 struct nd_namespace_pmem
*nspm
)
1537 u64 altcookie
= nd_region_interleave_set_altcookie(nd_region
);
1538 u64 cookie
= nd_region_interleave_set_cookie(nd_region
);
1539 struct nd_namespace_label
*nd_label
;
1540 u8 select_id
[NSLABEL_UUID_LEN
];
1541 resource_size_t size
= 0;
1543 int rc
= -ENODEV
, l
;
1547 dev_dbg(&nd_region
->dev
, "invalid interleave-set-cookie\n");
1552 * Find a complete set of labels by uuid. By definition we can start
1553 * with any mapping as the reference label
1555 for_each_label(l
, nd_label
, nd_region
->mapping
[0].labels
) {
1556 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1558 if (isetcookie
!= cookie
) {
1559 dev_dbg(&nd_region
->dev
, "invalid cookie in label: %pUb\n",
1561 if (isetcookie
!= altcookie
)
1564 dev_dbg(&nd_region
->dev
, "valid altcookie in label: %pUb\n",
1568 for (i
= 0; nd_region
->ndr_mappings
; i
++) {
1569 if (has_uuid_at_pos(nd_region
, nd_label
->uuid
, cookie
, i
))
1571 if (has_uuid_at_pos(nd_region
, nd_label
->uuid
, altcookie
, i
))
1576 if (i
< nd_region
->ndr_mappings
) {
1578 * Give up if we don't find an instance of a
1579 * uuid at each position (from 0 to
1580 * nd_region->ndr_mappings - 1), or if we find a
1581 * dimm with two instances of the same uuid.
1585 } else if (pmem_id
) {
1587 * If there is more than one valid uuid set, we
1588 * need userspace to clean this up.
1593 memcpy(select_id
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
1594 pmem_id
= select_id
;
1598 * Fix up each mapping's 'labels' to have the validated pmem label for
1599 * that position at labels[0], and NULL at labels[1]. In the process,
1600 * check that the namespace aligns with interleave-set. We know
1601 * that it does not overlap with any blk namespaces by virtue of
1602 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1605 rc
= select_pmem_id(nd_region
, pmem_id
);
1609 /* Calculate total size and populate namespace properties from label0 */
1610 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1611 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1612 struct nd_namespace_label
*label0
= nd_mapping
->labels
[0];
1614 size
+= __le64_to_cpu(label0
->rawsize
);
1615 if (__le16_to_cpu(label0
->position
) != 0)
1617 WARN_ON(nspm
->alt_name
|| nspm
->uuid
);
1618 nspm
->alt_name
= kmemdup((void __force
*) label0
->name
,
1619 NSLABEL_NAME_LEN
, GFP_KERNEL
);
1620 nspm
->uuid
= kmemdup((void __force
*) label0
->uuid
,
1621 NSLABEL_UUID_LEN
, GFP_KERNEL
);
1624 if (!nspm
->alt_name
|| !nspm
->uuid
) {
1629 nd_namespace_pmem_set_size(nd_region
, nspm
, size
);
1635 dev_dbg(&nd_region
->dev
, "%s: invalid label(s)\n", __func__
);
1638 dev_dbg(&nd_region
->dev
, "%s: label not found\n", __func__
);
1641 dev_dbg(&nd_region
->dev
, "%s: unexpected err: %d\n",
1648 static struct device
**create_namespace_pmem(struct nd_region
*nd_region
)
1650 struct nd_namespace_pmem
*nspm
;
1651 struct device
*dev
, **devs
;
1652 struct resource
*res
;
1655 nspm
= kzalloc(sizeof(*nspm
), GFP_KERNEL
);
1659 dev
= &nspm
->nsio
.common
.dev
;
1660 dev
->type
= &namespace_pmem_device_type
;
1661 dev
->parent
= &nd_region
->dev
;
1662 res
= &nspm
->nsio
.res
;
1663 res
->name
= dev_name(&nd_region
->dev
);
1664 res
->flags
= IORESOURCE_MEM
;
1665 rc
= find_pmem_label_set(nd_region
, nspm
);
1666 if (rc
== -ENODEV
) {
1669 /* Pass, try to permit namespace creation... */
1670 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1671 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1673 kfree(nd_mapping
->labels
);
1674 nd_mapping
->labels
= NULL
;
1677 /* Publish a zero-sized namespace for userspace to configure. */
1678 nd_namespace_pmem_set_size(nd_region
, nspm
, 0);
1684 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1692 namespace_pmem_release(&nspm
->nsio
.common
.dev
);
1696 struct resource
*nsblk_add_resource(struct nd_region
*nd_region
,
1697 struct nvdimm_drvdata
*ndd
, struct nd_namespace_blk
*nsblk
,
1698 resource_size_t start
)
1700 struct nd_label_id label_id
;
1701 struct resource
*res
;
1703 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
1704 res
= krealloc(nsblk
->res
,
1705 sizeof(void *) * (nsblk
->num_resources
+ 1),
1709 nsblk
->res
= (struct resource
**) res
;
1710 for_each_dpa_resource(ndd
, res
)
1711 if (strcmp(res
->name
, label_id
.id
) == 0
1712 && res
->start
== start
) {
1713 nsblk
->res
[nsblk
->num_resources
++] = res
;
1719 static struct device
*nd_namespace_blk_create(struct nd_region
*nd_region
)
1721 struct nd_namespace_blk
*nsblk
;
1724 if (!is_nd_blk(&nd_region
->dev
))
1727 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1731 dev
= &nsblk
->common
.dev
;
1732 dev
->type
= &namespace_blk_device_type
;
1733 nsblk
->id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0, GFP_KERNEL
);
1734 if (nsblk
->id
< 0) {
1738 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, nsblk
->id
);
1739 dev
->parent
= &nd_region
->dev
;
1740 dev
->groups
= nd_namespace_attribute_groups
;
1742 return &nsblk
->common
.dev
;
1745 void nd_region_create_blk_seed(struct nd_region
*nd_region
)
1747 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1748 nd_region
->ns_seed
= nd_namespace_blk_create(nd_region
);
1750 * Seed creation failures are not fatal, provisioning is simply
1751 * disabled until memory becomes available
1753 if (!nd_region
->ns_seed
)
1754 dev_err(&nd_region
->dev
, "failed to create blk namespace\n");
1756 nd_device_register(nd_region
->ns_seed
);
1759 void nd_region_create_btt_seed(struct nd_region
*nd_region
)
1761 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1762 nd_region
->btt_seed
= nd_btt_create(nd_region
);
1764 * Seed creation failures are not fatal, provisioning is simply
1765 * disabled until memory becomes available
1767 if (!nd_region
->btt_seed
)
1768 dev_err(&nd_region
->dev
, "failed to create btt namespace\n");
1771 static struct device
**create_namespace_blk(struct nd_region
*nd_region
)
1773 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
1774 struct nd_namespace_label
*nd_label
;
1775 struct device
*dev
, **devs
= NULL
;
1776 struct nd_namespace_blk
*nsblk
;
1777 struct nvdimm_drvdata
*ndd
;
1778 int i
, l
, count
= 0;
1779 struct resource
*res
;
1781 if (nd_region
->ndr_mappings
== 0)
1784 ndd
= to_ndd(nd_mapping
);
1785 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1786 u32 flags
= __le32_to_cpu(nd_label
->flags
);
1787 char *name
[NSLABEL_NAME_LEN
];
1788 struct device
**__devs
;
1790 if (flags
& NSLABEL_FLAG_LOCAL
)
1795 for (i
= 0; i
< count
; i
++) {
1796 nsblk
= to_nd_namespace_blk(devs
[i
]);
1797 if (memcmp(nsblk
->uuid
, nd_label
->uuid
,
1798 NSLABEL_UUID_LEN
) == 0) {
1799 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1800 __le64_to_cpu(nd_label
->dpa
));
1803 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1804 dev_name(&nsblk
->common
.dev
));
1810 __devs
= kcalloc(count
+ 2, sizeof(dev
), GFP_KERNEL
);
1813 memcpy(__devs
, devs
, sizeof(dev
) * count
);
1817 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1820 dev
= &nsblk
->common
.dev
;
1821 dev
->type
= &namespace_blk_device_type
;
1822 dev
->parent
= &nd_region
->dev
;
1823 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, count
);
1824 devs
[count
++] = dev
;
1826 nsblk
->lbasize
= __le64_to_cpu(nd_label
->lbasize
);
1827 nsblk
->uuid
= kmemdup(nd_label
->uuid
, NSLABEL_UUID_LEN
,
1831 memcpy(name
, nd_label
->name
, NSLABEL_NAME_LEN
);
1833 nsblk
->alt_name
= kmemdup(name
, NSLABEL_NAME_LEN
,
1835 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1836 __le64_to_cpu(nd_label
->dpa
));
1839 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1840 dev_name(&nsblk
->common
.dev
));
1843 dev_dbg(&nd_region
->dev
, "%s: discovered %d blk namespace%s\n",
1844 __func__
, count
, count
== 1 ? "" : "s");
1847 /* Publish a zero-sized namespace for userspace to configure. */
1848 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1849 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1851 kfree(nd_mapping
->labels
);
1852 nd_mapping
->labels
= NULL
;
1855 devs
= kcalloc(2, sizeof(dev
), GFP_KERNEL
);
1858 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1861 dev
= &nsblk
->common
.dev
;
1862 dev
->type
= &namespace_blk_device_type
;
1863 dev
->parent
= &nd_region
->dev
;
1864 devs
[count
++] = dev
;
1870 for (i
= 0; i
< count
; i
++) {
1871 nsblk
= to_nd_namespace_blk(devs
[i
]);
1872 namespace_blk_release(&nsblk
->common
.dev
);
1878 static int init_active_labels(struct nd_region
*nd_region
)
1882 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1883 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1884 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1885 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1889 * If the dimm is disabled then prevent the region from
1890 * being activated if it aliases DPA.
1893 if ((nvdimm
->flags
& NDD_ALIASING
) == 0)
1895 dev_dbg(&nd_region
->dev
, "%s: is disabled, failing probe\n",
1896 dev_name(&nd_mapping
->nvdimm
->dev
));
1899 nd_mapping
->ndd
= ndd
;
1900 atomic_inc(&nvdimm
->busy
);
1903 count
= nd_label_active_count(ndd
);
1904 dev_dbg(ndd
->dev
, "%s: %d\n", __func__
, count
);
1907 nd_mapping
->labels
= kcalloc(count
+ 1, sizeof(void *),
1909 if (!nd_mapping
->labels
)
1911 for (j
= 0; j
< count
; j
++) {
1912 struct nd_namespace_label
*label
;
1914 label
= nd_label_active(ndd
, j
);
1915 nd_mapping
->labels
[j
] = label
;
1922 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
)
1924 struct device
**devs
= NULL
;
1925 int i
, rc
= 0, type
;
1928 nvdimm_bus_lock(&nd_region
->dev
);
1929 rc
= init_active_labels(nd_region
);
1931 nvdimm_bus_unlock(&nd_region
->dev
);
1935 type
= nd_region_to_nstype(nd_region
);
1937 case ND_DEVICE_NAMESPACE_IO
:
1938 devs
= create_namespace_io(nd_region
);
1940 case ND_DEVICE_NAMESPACE_PMEM
:
1941 devs
= create_namespace_pmem(nd_region
);
1943 case ND_DEVICE_NAMESPACE_BLK
:
1944 devs
= create_namespace_blk(nd_region
);
1949 nvdimm_bus_unlock(&nd_region
->dev
);
1954 for (i
= 0; devs
[i
]; i
++) {
1955 struct device
*dev
= devs
[i
];
1958 if (type
== ND_DEVICE_NAMESPACE_BLK
) {
1959 struct nd_namespace_blk
*nsblk
;
1961 nsblk
= to_nd_namespace_blk(dev
);
1962 id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0,
1970 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, id
);
1971 dev
->groups
= nd_namespace_attribute_groups
;
1972 nd_device_register(dev
);
1975 nd_region
->ns_seed
= devs
[0];
1980 for (j
= i
; devs
[j
]; j
++) {
1981 struct device
*dev
= devs
[j
];
1983 device_initialize(dev
);
1988 * All of the namespaces we tried to register failed, so
1989 * fail region activation.