1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/uuid.h>
8 #include <linux/slab.h>
15 static guid_t nvdimm_btt_guid
;
16 static guid_t nvdimm_btt2_guid
;
17 static guid_t nvdimm_pfn_guid
;
18 static guid_t nvdimm_dax_guid
;
20 static uuid_t nvdimm_btt_uuid
;
21 static uuid_t nvdimm_btt2_uuid
;
22 static uuid_t nvdimm_pfn_uuid
;
23 static uuid_t nvdimm_dax_uuid
;
25 static uuid_t cxl_region_uuid
;
26 static uuid_t cxl_namespace_uuid
;
28 static const char NSINDEX_SIGNATURE
[] = "NAMESPACE_INDEX\0";
30 static u32
best_seq(u32 a
, u32 b
)
32 a
&= NSINDEX_SEQ_MASK
;
33 b
&= NSINDEX_SEQ_MASK
;
39 else if (nd_inc_seq(a
) == b
)
45 unsigned sizeof_namespace_label(struct nvdimm_drvdata
*ndd
)
47 return ndd
->nslabel_size
;
50 static size_t __sizeof_namespace_index(u32 nslot
)
52 return ALIGN(sizeof(struct nd_namespace_index
) + DIV_ROUND_UP(nslot
, 8),
56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata
*ndd
,
59 return (ndd
->nsarea
.config_size
- index_size
* 2) /
60 sizeof_namespace_label(ndd
);
63 int nvdimm_num_label_slots(struct nvdimm_drvdata
*ndd
)
67 tmp_nslot
= ndd
->nsarea
.config_size
/ sizeof_namespace_label(ndd
);
68 n
= __sizeof_namespace_index(tmp_nslot
) / NSINDEX_ALIGN
;
70 return __nvdimm_num_label_slots(ndd
, NSINDEX_ALIGN
* n
);
73 size_t sizeof_namespace_index(struct nvdimm_drvdata
*ndd
)
75 u32 nslot
, space
, size
;
78 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
79 * enough to hold 2 index blocks and 2 labels. The minimum index
80 * block size is 256 bytes. The label size is 128 for namespaces
81 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
83 nslot
= nvdimm_num_label_slots(ndd
);
84 space
= ndd
->nsarea
.config_size
- nslot
* sizeof_namespace_label(ndd
);
85 size
= __sizeof_namespace_index(nslot
) * 2;
86 if (size
<= space
&& nslot
>= 2)
89 dev_err(ndd
->dev
, "label area (%d) too small to host (%d byte) labels\n",
90 ndd
->nsarea
.config_size
, sizeof_namespace_label(ndd
));
94 static int __nd_label_validate(struct nvdimm_drvdata
*ndd
)
97 * On media label format consists of two index blocks followed
98 * by an array of labels. None of these structures are ever
99 * updated in place. A sequence number tracks the current
100 * active index and the next one to write, while labels are
101 * written to free slots.
123 struct nd_namespace_index
*nsindex
[] = {
124 to_namespace_index(ndd
, 0),
125 to_namespace_index(ndd
, 1),
127 const int num_index
= ARRAY_SIZE(nsindex
);
128 struct device
*dev
= ndd
->dev
;
129 bool valid
[2] = { 0 };
130 int i
, num_valid
= 0;
133 for (i
= 0; i
< num_index
; i
++) {
135 u8 sig
[NSINDEX_SIG_LEN
];
136 u64 sum_save
, sum
, size
;
137 unsigned int version
, labelsize
;
139 memcpy(sig
, nsindex
[i
]->sig
, NSINDEX_SIG_LEN
);
140 if (memcmp(sig
, NSINDEX_SIGNATURE
, NSINDEX_SIG_LEN
) != 0) {
141 dev_dbg(dev
, "nsindex%d signature invalid\n", i
);
145 /* label sizes larger than 128 arrived with v1.2 */
146 version
= __le16_to_cpu(nsindex
[i
]->major
) * 100
147 + __le16_to_cpu(nsindex
[i
]->minor
);
149 labelsize
= 1 << (7 + nsindex
[i
]->labelsize
);
153 if (labelsize
!= sizeof_namespace_label(ndd
)) {
154 dev_dbg(dev
, "nsindex%d labelsize %d invalid\n",
155 i
, nsindex
[i
]->labelsize
);
159 sum_save
= __le64_to_cpu(nsindex
[i
]->checksum
);
160 nsindex
[i
]->checksum
= __cpu_to_le64(0);
161 sum
= nd_fletcher64(nsindex
[i
], sizeof_namespace_index(ndd
), 1);
162 nsindex
[i
]->checksum
= __cpu_to_le64(sum_save
);
163 if (sum
!= sum_save
) {
164 dev_dbg(dev
, "nsindex%d checksum invalid\n", i
);
168 seq
= __le32_to_cpu(nsindex
[i
]->seq
);
169 if ((seq
& NSINDEX_SEQ_MASK
) == 0) {
170 dev_dbg(dev
, "nsindex%d sequence: %#x invalid\n", i
, seq
);
174 /* sanity check the index against expected values */
175 if (__le64_to_cpu(nsindex
[i
]->myoff
)
176 != i
* sizeof_namespace_index(ndd
)) {
177 dev_dbg(dev
, "nsindex%d myoff: %#llx invalid\n",
178 i
, (unsigned long long)
179 __le64_to_cpu(nsindex
[i
]->myoff
));
182 if (__le64_to_cpu(nsindex
[i
]->otheroff
)
183 != (!i
) * sizeof_namespace_index(ndd
)) {
184 dev_dbg(dev
, "nsindex%d otheroff: %#llx invalid\n",
185 i
, (unsigned long long)
186 __le64_to_cpu(nsindex
[i
]->otheroff
));
189 if (__le64_to_cpu(nsindex
[i
]->labeloff
)
190 != 2 * sizeof_namespace_index(ndd
)) {
191 dev_dbg(dev
, "nsindex%d labeloff: %#llx invalid\n",
192 i
, (unsigned long long)
193 __le64_to_cpu(nsindex
[i
]->labeloff
));
197 size
= __le64_to_cpu(nsindex
[i
]->mysize
);
198 if (size
> sizeof_namespace_index(ndd
)
199 || size
< sizeof(struct nd_namespace_index
)) {
200 dev_dbg(dev
, "nsindex%d mysize: %#llx invalid\n", i
, size
);
204 nslot
= __le32_to_cpu(nsindex
[i
]->nslot
);
205 if (nslot
* sizeof_namespace_label(ndd
)
206 + 2 * sizeof_namespace_index(ndd
)
207 > ndd
->nsarea
.config_size
) {
208 dev_dbg(dev
, "nsindex%d nslot: %u invalid, config_size: %#x\n",
209 i
, nslot
, ndd
->nsarea
.config_size
);
220 for (i
= 0; i
< num_index
; i
++)
223 /* can't have num_valid > 0 but valid[] = { false, false } */
227 /* pick the best index... */
228 seq
= best_seq(__le32_to_cpu(nsindex
[0]->seq
),
229 __le32_to_cpu(nsindex
[1]->seq
));
230 if (seq
== (__le32_to_cpu(nsindex
[1]->seq
) & NSINDEX_SEQ_MASK
))
240 static int nd_label_validate(struct nvdimm_drvdata
*ndd
)
243 * In order to probe for and validate namespace index blocks we
244 * need to know the size of the labels, and we can't trust the
245 * size of the labels until we validate the index blocks.
246 * Resolve this dependency loop by probing for known label
247 * sizes, but default to v1.2 256-byte namespace labels if
250 int label_size
[] = { 128, 256 };
253 for (i
= 0; i
< ARRAY_SIZE(label_size
); i
++) {
254 ndd
->nslabel_size
= label_size
[i
];
255 rc
= __nd_label_validate(ndd
);
263 static void nd_label_copy(struct nvdimm_drvdata
*ndd
,
264 struct nd_namespace_index
*dst
,
265 struct nd_namespace_index
*src
)
267 /* just exit if either destination or source is NULL */
271 memcpy(dst
, src
, sizeof_namespace_index(ndd
));
274 static struct nd_namespace_label
*nd_label_base(struct nvdimm_drvdata
*ndd
)
276 void *base
= to_namespace_index(ndd
, 0);
278 return base
+ 2 * sizeof_namespace_index(ndd
);
281 static int to_slot(struct nvdimm_drvdata
*ndd
,
282 struct nd_namespace_label
*nd_label
)
284 unsigned long label
, base
;
286 label
= (unsigned long) nd_label
;
287 base
= (unsigned long) nd_label_base(ndd
);
289 return (label
- base
) / sizeof_namespace_label(ndd
);
292 static struct nd_namespace_label
*to_label(struct nvdimm_drvdata
*ndd
, int slot
)
294 unsigned long label
, base
;
296 base
= (unsigned long) nd_label_base(ndd
);
297 label
= base
+ sizeof_namespace_label(ndd
) * slot
;
299 return (struct nd_namespace_label
*) label
;
302 #define for_each_clear_bit_le(bit, addr, size) \
303 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
305 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
308 * preamble_index - common variable initialization for nd_label_* routines
309 * @ndd: dimm container for the relevant label set
310 * @idx: namespace_index index
311 * @nsindex_out: on return set to the currently active namespace index
312 * @free: on return set to the free label bitmap in the index
313 * @nslot: on return set to the number of slots in the label space
315 static bool preamble_index(struct nvdimm_drvdata
*ndd
, int idx
,
316 struct nd_namespace_index
**nsindex_out
,
317 unsigned long **free
, u32
*nslot
)
319 struct nd_namespace_index
*nsindex
;
321 nsindex
= to_namespace_index(ndd
, idx
);
325 *free
= (unsigned long *) nsindex
->free
;
326 *nslot
= __le32_to_cpu(nsindex
->nslot
);
327 *nsindex_out
= nsindex
;
332 char *nd_label_gen_id(struct nd_label_id
*label_id
, const uuid_t
*uuid
,
335 if (!label_id
|| !uuid
)
337 snprintf(label_id
->id
, ND_LABEL_ID_SIZE
, "pmem-%pUb", uuid
);
341 static bool preamble_current(struct nvdimm_drvdata
*ndd
,
342 struct nd_namespace_index
**nsindex
,
343 unsigned long **free
, u32
*nslot
)
345 return preamble_index(ndd
, ndd
->ns_current
, nsindex
,
349 static bool preamble_next(struct nvdimm_drvdata
*ndd
,
350 struct nd_namespace_index
**nsindex
,
351 unsigned long **free
, u32
*nslot
)
353 return preamble_index(ndd
, ndd
->ns_next
, nsindex
,
357 static bool nsl_validate_checksum(struct nvdimm_drvdata
*ndd
,
358 struct nd_namespace_label
*nd_label
)
362 if (!ndd
->cxl
&& !efi_namespace_label_has(ndd
, checksum
))
365 sum_save
= nsl_get_checksum(ndd
, nd_label
);
366 nsl_set_checksum(ndd
, nd_label
, 0);
367 sum
= nd_fletcher64(nd_label
, sizeof_namespace_label(ndd
), 1);
368 nsl_set_checksum(ndd
, nd_label
, sum_save
);
369 return sum
== sum_save
;
372 static void nsl_calculate_checksum(struct nvdimm_drvdata
*ndd
,
373 struct nd_namespace_label
*nd_label
)
377 if (!ndd
->cxl
&& !efi_namespace_label_has(ndd
, checksum
))
379 nsl_set_checksum(ndd
, nd_label
, 0);
380 sum
= nd_fletcher64(nd_label
, sizeof_namespace_label(ndd
), 1);
381 nsl_set_checksum(ndd
, nd_label
, sum
);
384 static bool slot_valid(struct nvdimm_drvdata
*ndd
,
385 struct nd_namespace_label
*nd_label
, u32 slot
)
389 /* check that we are written where we expect to be written */
390 if (slot
!= nsl_get_slot(ndd
, nd_label
))
392 valid
= nsl_validate_checksum(ndd
, nd_label
);
394 dev_dbg(ndd
->dev
, "fail checksum. slot: %d\n", slot
);
398 int nd_label_reserve_dpa(struct nvdimm_drvdata
*ndd
)
400 struct nd_namespace_index
*nsindex
;
404 if (!preamble_current(ndd
, &nsindex
, &free
, &nslot
))
405 return 0; /* no label, nothing to reserve */
407 for_each_clear_bit_le(slot
, free
, nslot
) {
408 struct nd_namespace_label
*nd_label
;
409 struct nd_region
*nd_region
= NULL
;
410 struct nd_label_id label_id
;
411 struct resource
*res
;
415 nd_label
= to_label(ndd
, slot
);
417 if (!slot_valid(ndd
, nd_label
, slot
))
420 nsl_get_uuid(ndd
, nd_label
, &label_uuid
);
421 flags
= nsl_get_flags(ndd
, nd_label
);
422 nd_label_gen_id(&label_id
, &label_uuid
, flags
);
423 res
= nvdimm_allocate_dpa(ndd
, &label_id
,
424 nsl_get_dpa(ndd
, nd_label
),
425 nsl_get_rawsize(ndd
, nd_label
));
426 nd_dbg_dpa(nd_region
, ndd
, res
, "reserve\n");
434 int nd_label_data_init(struct nvdimm_drvdata
*ndd
)
436 size_t config_size
, read_size
, max_xfer
, offset
;
437 struct nd_namespace_index
*nsindex
;
445 if (ndd
->nsarea
.status
|| ndd
->nsarea
.max_xfer
== 0) {
446 dev_dbg(ndd
->dev
, "failed to init config data area: (%u:%u)\n",
447 ndd
->nsarea
.max_xfer
, ndd
->nsarea
.config_size
);
452 * We need to determine the maximum index area as this is the section
453 * we must read and validate before we can start processing labels.
455 * If the area is too small to contain the two indexes and 2 labels
458 * Start at a label size of 128 as this should result in the largest
459 * possible namespace index size.
461 ndd
->nslabel_size
= 128;
462 read_size
= sizeof_namespace_index(ndd
) * 2;
466 /* Allocate config data */
467 config_size
= ndd
->nsarea
.config_size
;
468 ndd
->data
= kvzalloc(config_size
, GFP_KERNEL
);
473 * We want to guarantee as few reads as possible while conserving
474 * memory. To do that we figure out how much unused space will be left
475 * in the last read, divide that by the total number of reads it is
476 * going to take given our maximum transfer size, and then reduce our
477 * maximum transfer size based on that result.
479 max_xfer
= min_t(size_t, ndd
->nsarea
.max_xfer
, config_size
);
480 if (read_size
< max_xfer
) {
482 max_xfer
-= ((max_xfer
- 1) - (config_size
- 1) % max_xfer
) /
483 DIV_ROUND_UP(config_size
, max_xfer
);
484 /* make certain we read indexes in exactly 1 read */
485 if (max_xfer
< read_size
)
486 max_xfer
= read_size
;
489 /* Make our initial read size a multiple of max_xfer size */
490 read_size
= min(DIV_ROUND_UP(read_size
, max_xfer
) * max_xfer
,
493 /* Read the index data */
494 rc
= nvdimm_get_config_data(ndd
, ndd
->data
, 0, read_size
);
498 /* Validate index data, if not valid assume all labels are invalid */
499 ndd
->ns_current
= nd_label_validate(ndd
);
500 if (ndd
->ns_current
< 0)
503 /* Record our index values */
504 ndd
->ns_next
= nd_label_next_nsindex(ndd
->ns_current
);
506 /* Copy "current" index on top of the "next" index */
507 nsindex
= to_current_namespace_index(ndd
);
508 nd_label_copy(ndd
, to_next_namespace_index(ndd
), nsindex
);
510 /* Determine starting offset for label data */
511 offset
= __le64_to_cpu(nsindex
->labeloff
);
512 nslot
= __le32_to_cpu(nsindex
->nslot
);
514 /* Loop through the free list pulling in any active labels */
515 for (i
= 0; i
< nslot
; i
++, offset
+= ndd
->nslabel_size
) {
516 size_t label_read_size
;
518 /* zero out the unused labels */
519 if (test_bit_le(i
, nsindex
->free
)) {
520 memset(ndd
->data
+ offset
, 0, ndd
->nslabel_size
);
524 /* if we already read past here then just continue */
525 if (offset
+ ndd
->nslabel_size
<= read_size
)
528 /* if we haven't read in a while reset our read_size offset */
529 if (read_size
< offset
)
532 /* determine how much more will be read after this next call. */
533 label_read_size
= offset
+ ndd
->nslabel_size
- read_size
;
534 label_read_size
= DIV_ROUND_UP(label_read_size
, max_xfer
) *
537 /* truncate last read if needed */
538 if (read_size
+ label_read_size
> config_size
)
539 label_read_size
= config_size
- read_size
;
541 /* Read the label data */
542 rc
= nvdimm_get_config_data(ndd
, ndd
->data
+ read_size
,
543 read_size
, label_read_size
);
547 /* push read_size to next read offset */
548 read_size
+= label_read_size
;
551 dev_dbg(ndd
->dev
, "len: %zu rc: %d\n", offset
, rc
);
556 int nd_label_active_count(struct nvdimm_drvdata
*ndd
)
558 struct nd_namespace_index
*nsindex
;
563 if (!preamble_current(ndd
, &nsindex
, &free
, &nslot
))
566 for_each_clear_bit_le(slot
, free
, nslot
) {
567 struct nd_namespace_label
*nd_label
;
569 nd_label
= to_label(ndd
, slot
);
571 if (!slot_valid(ndd
, nd_label
, slot
)) {
572 u32 label_slot
= nsl_get_slot(ndd
, nd_label
);
573 u64 size
= nsl_get_rawsize(ndd
, nd_label
);
574 u64 dpa
= nsl_get_dpa(ndd
, nd_label
);
577 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
578 slot
, label_slot
, dpa
, size
);
586 struct nd_namespace_label
*nd_label_active(struct nvdimm_drvdata
*ndd
, int n
)
588 struct nd_namespace_index
*nsindex
;
592 if (!preamble_current(ndd
, &nsindex
, &free
, &nslot
))
595 for_each_clear_bit_le(slot
, free
, nslot
) {
596 struct nd_namespace_label
*nd_label
;
598 nd_label
= to_label(ndd
, slot
);
599 if (!slot_valid(ndd
, nd_label
, slot
))
603 return to_label(ndd
, slot
);
609 u32
nd_label_alloc_slot(struct nvdimm_drvdata
*ndd
)
611 struct nd_namespace_index
*nsindex
;
615 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
618 WARN_ON(!is_nvdimm_bus_locked(ndd
->dev
));
620 slot
= find_next_bit_le(free
, nslot
, 0);
624 clear_bit_le(slot
, free
);
629 bool nd_label_free_slot(struct nvdimm_drvdata
*ndd
, u32 slot
)
631 struct nd_namespace_index
*nsindex
;
635 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
638 WARN_ON(!is_nvdimm_bus_locked(ndd
->dev
));
641 return !test_and_set_bit_le(slot
, free
);
645 u32
nd_label_nfree(struct nvdimm_drvdata
*ndd
)
647 struct nd_namespace_index
*nsindex
;
651 WARN_ON(!is_nvdimm_bus_locked(ndd
->dev
));
653 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
654 return nvdimm_num_label_slots(ndd
);
656 return bitmap_weight(free
, nslot
);
659 static int nd_label_write_index(struct nvdimm_drvdata
*ndd
, int index
, u32 seq
,
662 struct nd_namespace_index
*nsindex
;
663 unsigned long offset
;
668 nsindex
= to_namespace_index(ndd
, index
);
669 if (flags
& ND_NSINDEX_INIT
)
670 nslot
= nvdimm_num_label_slots(ndd
);
672 nslot
= __le32_to_cpu(nsindex
->nslot
);
674 memcpy(nsindex
->sig
, NSINDEX_SIGNATURE
, NSINDEX_SIG_LEN
);
675 memset(&nsindex
->flags
, 0, 3);
676 nsindex
->labelsize
= sizeof_namespace_label(ndd
) >> 8;
677 nsindex
->seq
= __cpu_to_le32(seq
);
678 offset
= (unsigned long) nsindex
679 - (unsigned long) to_namespace_index(ndd
, 0);
680 nsindex
->myoff
= __cpu_to_le64(offset
);
681 nsindex
->mysize
= __cpu_to_le64(sizeof_namespace_index(ndd
));
682 offset
= (unsigned long) to_namespace_index(ndd
,
683 nd_label_next_nsindex(index
))
684 - (unsigned long) to_namespace_index(ndd
, 0);
685 nsindex
->otheroff
= __cpu_to_le64(offset
);
686 offset
= (unsigned long) nd_label_base(ndd
)
687 - (unsigned long) to_namespace_index(ndd
, 0);
688 nsindex
->labeloff
= __cpu_to_le64(offset
);
689 nsindex
->nslot
= __cpu_to_le32(nslot
);
690 nsindex
->major
= __cpu_to_le16(1);
691 if (sizeof_namespace_label(ndd
) < 256)
692 nsindex
->minor
= __cpu_to_le16(1);
694 nsindex
->minor
= __cpu_to_le16(2);
695 nsindex
->checksum
= __cpu_to_le64(0);
696 if (flags
& ND_NSINDEX_INIT
) {
697 unsigned long *free
= (unsigned long *) nsindex
->free
;
698 u32 nfree
= ALIGN(nslot
, BITS_PER_LONG
);
701 memset(nsindex
->free
, 0xff, nfree
/ 8);
702 for (i
= 0, last_bits
= nfree
- nslot
; i
< last_bits
; i
++)
703 clear_bit_le(nslot
+ i
, free
);
705 checksum
= nd_fletcher64(nsindex
, sizeof_namespace_index(ndd
), 1);
706 nsindex
->checksum
= __cpu_to_le64(checksum
);
707 rc
= nvdimm_set_config_data(ndd
, __le64_to_cpu(nsindex
->myoff
),
708 nsindex
, sizeof_namespace_index(ndd
));
712 if (flags
& ND_NSINDEX_INIT
)
715 /* copy the index we just wrote to the new 'next' */
716 WARN_ON(index
!= ndd
->ns_next
);
717 nd_label_copy(ndd
, to_current_namespace_index(ndd
), nsindex
);
718 ndd
->ns_current
= nd_label_next_nsindex(ndd
->ns_current
);
719 ndd
->ns_next
= nd_label_next_nsindex(ndd
->ns_next
);
720 WARN_ON(ndd
->ns_current
== ndd
->ns_next
);
725 static unsigned long nd_label_offset(struct nvdimm_drvdata
*ndd
,
726 struct nd_namespace_label
*nd_label
)
728 return (unsigned long) nd_label
729 - (unsigned long) to_namespace_index(ndd
, 0);
732 static enum nvdimm_claim_class
guid_to_nvdimm_cclass(guid_t
*guid
)
734 if (guid_equal(guid
, &nvdimm_btt_guid
))
735 return NVDIMM_CCLASS_BTT
;
736 else if (guid_equal(guid
, &nvdimm_btt2_guid
))
737 return NVDIMM_CCLASS_BTT2
;
738 else if (guid_equal(guid
, &nvdimm_pfn_guid
))
739 return NVDIMM_CCLASS_PFN
;
740 else if (guid_equal(guid
, &nvdimm_dax_guid
))
741 return NVDIMM_CCLASS_DAX
;
742 else if (guid_equal(guid
, &guid_null
))
743 return NVDIMM_CCLASS_NONE
;
745 return NVDIMM_CCLASS_UNKNOWN
;
748 /* CXL labels store UUIDs instead of GUIDs for the same data */
749 static enum nvdimm_claim_class
uuid_to_nvdimm_cclass(uuid_t
*uuid
)
751 if (uuid_equal(uuid
, &nvdimm_btt_uuid
))
752 return NVDIMM_CCLASS_BTT
;
753 else if (uuid_equal(uuid
, &nvdimm_btt2_uuid
))
754 return NVDIMM_CCLASS_BTT2
;
755 else if (uuid_equal(uuid
, &nvdimm_pfn_uuid
))
756 return NVDIMM_CCLASS_PFN
;
757 else if (uuid_equal(uuid
, &nvdimm_dax_uuid
))
758 return NVDIMM_CCLASS_DAX
;
759 else if (uuid_equal(uuid
, &uuid_null
))
760 return NVDIMM_CCLASS_NONE
;
762 return NVDIMM_CCLASS_UNKNOWN
;
765 static const guid_t
*to_abstraction_guid(enum nvdimm_claim_class claim_class
,
768 if (claim_class
== NVDIMM_CCLASS_BTT
)
769 return &nvdimm_btt_guid
;
770 else if (claim_class
== NVDIMM_CCLASS_BTT2
)
771 return &nvdimm_btt2_guid
;
772 else if (claim_class
== NVDIMM_CCLASS_PFN
)
773 return &nvdimm_pfn_guid
;
774 else if (claim_class
== NVDIMM_CCLASS_DAX
)
775 return &nvdimm_dax_guid
;
776 else if (claim_class
== NVDIMM_CCLASS_UNKNOWN
) {
778 * If we're modifying a namespace for which we don't
779 * know the claim_class, don't touch the existing guid.
786 /* CXL labels store UUIDs instead of GUIDs for the same data */
787 static const uuid_t
*to_abstraction_uuid(enum nvdimm_claim_class claim_class
,
790 if (claim_class
== NVDIMM_CCLASS_BTT
)
791 return &nvdimm_btt_uuid
;
792 else if (claim_class
== NVDIMM_CCLASS_BTT2
)
793 return &nvdimm_btt2_uuid
;
794 else if (claim_class
== NVDIMM_CCLASS_PFN
)
795 return &nvdimm_pfn_uuid
;
796 else if (claim_class
== NVDIMM_CCLASS_DAX
)
797 return &nvdimm_dax_uuid
;
798 else if (claim_class
== NVDIMM_CCLASS_UNKNOWN
) {
800 * If we're modifying a namespace for which we don't
801 * know the claim_class, don't touch the existing uuid.
808 static void reap_victim(struct nd_mapping
*nd_mapping
,
809 struct nd_label_ent
*victim
)
811 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
812 u32 slot
= to_slot(ndd
, victim
->label
);
814 dev_dbg(ndd
->dev
, "free: %d\n", slot
);
815 nd_label_free_slot(ndd
, slot
);
816 victim
->label
= NULL
;
819 static void nsl_set_type_guid(struct nvdimm_drvdata
*ndd
,
820 struct nd_namespace_label
*nd_label
, guid_t
*guid
)
822 if (efi_namespace_label_has(ndd
, type_guid
))
823 guid_copy(&nd_label
->efi
.type_guid
, guid
);
826 bool nsl_validate_type_guid(struct nvdimm_drvdata
*ndd
,
827 struct nd_namespace_label
*nd_label
, guid_t
*guid
)
829 if (ndd
->cxl
|| !efi_namespace_label_has(ndd
, type_guid
))
831 if (!guid_equal(&nd_label
->efi
.type_guid
, guid
)) {
832 dev_dbg(ndd
->dev
, "expect type_guid %pUb got %pUb\n", guid
,
833 &nd_label
->efi
.type_guid
);
839 static void nsl_set_claim_class(struct nvdimm_drvdata
*ndd
,
840 struct nd_namespace_label
*nd_label
,
841 enum nvdimm_claim_class claim_class
)
846 import_uuid(&uuid
, nd_label
->cxl
.abstraction_uuid
);
847 export_uuid(nd_label
->cxl
.abstraction_uuid
,
848 to_abstraction_uuid(claim_class
, &uuid
));
852 if (!efi_namespace_label_has(ndd
, abstraction_guid
))
854 guid_copy(&nd_label
->efi
.abstraction_guid
,
855 to_abstraction_guid(claim_class
,
856 &nd_label
->efi
.abstraction_guid
));
859 enum nvdimm_claim_class
nsl_get_claim_class(struct nvdimm_drvdata
*ndd
,
860 struct nd_namespace_label
*nd_label
)
865 import_uuid(&uuid
, nd_label
->cxl
.abstraction_uuid
);
866 return uuid_to_nvdimm_cclass(&uuid
);
868 if (!efi_namespace_label_has(ndd
, abstraction_guid
))
869 return NVDIMM_CCLASS_NONE
;
870 return guid_to_nvdimm_cclass(&nd_label
->efi
.abstraction_guid
);
873 static int __pmem_label_update(struct nd_region
*nd_region
,
874 struct nd_mapping
*nd_mapping
, struct nd_namespace_pmem
*nspm
,
875 int pos
, unsigned long flags
)
877 struct nd_namespace_common
*ndns
= &nspm
->nsio
.common
;
878 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
879 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
880 struct nd_namespace_label
*nd_label
;
881 struct nd_namespace_index
*nsindex
;
882 struct nd_label_ent
*label_ent
;
883 struct nd_label_id label_id
;
884 struct resource
*res
;
891 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
894 cookie
= nd_region_interleave_set_cookie(nd_region
, nsindex
);
895 nd_label_gen_id(&label_id
, nspm
->uuid
, 0);
896 for_each_dpa_resource(ndd
, res
)
897 if (strcmp(res
->name
, label_id
.id
) == 0)
905 /* allocate and write the label to the staging (next) index */
906 slot
= nd_label_alloc_slot(ndd
);
907 if (slot
== UINT_MAX
)
909 dev_dbg(ndd
->dev
, "allocated: %d\n", slot
);
911 nd_label
= to_label(ndd
, slot
);
912 memset(nd_label
, 0, sizeof_namespace_label(ndd
));
913 nsl_set_uuid(ndd
, nd_label
, nspm
->uuid
);
914 nsl_set_name(ndd
, nd_label
, nspm
->alt_name
);
915 nsl_set_flags(ndd
, nd_label
, flags
);
916 nsl_set_nlabel(ndd
, nd_label
, nd_region
->ndr_mappings
);
917 nsl_set_nrange(ndd
, nd_label
, 1);
918 nsl_set_position(ndd
, nd_label
, pos
);
919 nsl_set_isetcookie(ndd
, nd_label
, cookie
);
920 nsl_set_rawsize(ndd
, nd_label
, resource_size(res
));
921 nsl_set_lbasize(ndd
, nd_label
, nspm
->lbasize
);
922 nsl_set_dpa(ndd
, nd_label
, res
->start
);
923 nsl_set_slot(ndd
, nd_label
, slot
);
924 nsl_set_type_guid(ndd
, nd_label
, &nd_set
->type_guid
);
925 nsl_set_claim_class(ndd
, nd_label
, ndns
->claim_class
);
926 nsl_calculate_checksum(ndd
, nd_label
);
927 nd_dbg_dpa(nd_region
, ndd
, res
, "\n");
930 offset
= nd_label_offset(ndd
, nd_label
);
931 rc
= nvdimm_set_config_data(ndd
, offset
, nd_label
,
932 sizeof_namespace_label(ndd
));
936 /* Garbage collect the previous label */
937 mutex_lock(&nd_mapping
->lock
);
938 list_for_each_entry(label_ent
, &nd_mapping
->labels
, list
) {
939 if (!label_ent
->label
)
941 if (test_and_clear_bit(ND_LABEL_REAP
, &label_ent
->flags
) ||
942 nsl_uuid_equal(ndd
, label_ent
->label
, nspm
->uuid
))
943 reap_victim(nd_mapping
, label_ent
);
947 rc
= nd_label_write_index(ndd
, ndd
->ns_next
,
948 nd_inc_seq(__le32_to_cpu(nsindex
->seq
)), 0);
950 list_for_each_entry(label_ent
, &nd_mapping
->labels
, list
)
951 if (!label_ent
->label
) {
952 label_ent
->label
= nd_label
;
956 dev_WARN_ONCE(&nspm
->nsio
.common
.dev
, nd_label
,
957 "failed to track label: %d\n",
958 to_slot(ndd
, nd_label
));
962 mutex_unlock(&nd_mapping
->lock
);
967 static int init_labels(struct nd_mapping
*nd_mapping
, int num_labels
)
969 int i
, old_num_labels
= 0;
970 struct nd_label_ent
*label_ent
;
971 struct nd_namespace_index
*nsindex
;
972 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
974 mutex_lock(&nd_mapping
->lock
);
975 list_for_each_entry(label_ent
, &nd_mapping
->labels
, list
)
977 mutex_unlock(&nd_mapping
->lock
);
980 * We need to preserve all the old labels for the mapping so
981 * they can be garbage collected after writing the new labels.
983 for (i
= old_num_labels
; i
< num_labels
; i
++) {
984 label_ent
= kzalloc(sizeof(*label_ent
), GFP_KERNEL
);
987 mutex_lock(&nd_mapping
->lock
);
988 list_add_tail(&label_ent
->list
, &nd_mapping
->labels
);
989 mutex_unlock(&nd_mapping
->lock
);
992 if (ndd
->ns_current
== -1 || ndd
->ns_next
== -1)
995 return max(num_labels
, old_num_labels
);
997 nsindex
= to_namespace_index(ndd
, 0);
998 memset(nsindex
, 0, ndd
->nsarea
.config_size
);
999 for (i
= 0; i
< 2; i
++) {
1000 int rc
= nd_label_write_index(ndd
, i
, 3 - i
, ND_NSINDEX_INIT
);
1006 ndd
->ns_current
= 0;
1008 return max(num_labels
, old_num_labels
);
1011 static int del_labels(struct nd_mapping
*nd_mapping
, uuid_t
*uuid
)
1013 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1014 struct nd_label_ent
*label_ent
, *e
;
1015 struct nd_namespace_index
*nsindex
;
1016 unsigned long *free
;
1024 /* no index || no labels == nothing to delete */
1025 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
1028 mutex_lock(&nd_mapping
->lock
);
1029 list_for_each_entry_safe(label_ent
, e
, &nd_mapping
->labels
, list
) {
1030 struct nd_namespace_label
*nd_label
= label_ent
->label
;
1035 if (!nsl_uuid_equal(ndd
, nd_label
, uuid
))
1038 slot
= to_slot(ndd
, nd_label
);
1039 nd_label_free_slot(ndd
, slot
);
1040 dev_dbg(ndd
->dev
, "free: %d\n", slot
);
1041 list_move_tail(&label_ent
->list
, &list
);
1042 label_ent
->label
= NULL
;
1044 list_splice_tail_init(&list
, &nd_mapping
->labels
);
1047 nd_mapping_free_labels(nd_mapping
);
1048 dev_dbg(ndd
->dev
, "no more active labels\n");
1050 mutex_unlock(&nd_mapping
->lock
);
1052 return nd_label_write_index(ndd
, ndd
->ns_next
,
1053 nd_inc_seq(__le32_to_cpu(nsindex
->seq
)), 0);
1056 int nd_pmem_namespace_label_update(struct nd_region
*nd_region
,
1057 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
1061 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1062 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1063 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1064 struct resource
*res
;
1068 rc
= del_labels(nd_mapping
, nspm
->uuid
);
1074 for_each_dpa_resource(ndd
, res
)
1075 if (strncmp(res
->name
, "pmem", 4) == 0)
1077 WARN_ON_ONCE(!count
);
1079 rc
= init_labels(nd_mapping
, count
);
1083 rc
= __pmem_label_update(nd_region
, nd_mapping
, nspm
, i
,
1084 NSLABEL_FLAG_UPDATING
);
1092 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1093 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1094 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1096 rc
= __pmem_label_update(nd_region
, nd_mapping
, nspm
, i
, 0);
1104 int __init
nd_label_init(void)
1106 WARN_ON(guid_parse(NVDIMM_BTT_GUID
, &nvdimm_btt_guid
));
1107 WARN_ON(guid_parse(NVDIMM_BTT2_GUID
, &nvdimm_btt2_guid
));
1108 WARN_ON(guid_parse(NVDIMM_PFN_GUID
, &nvdimm_pfn_guid
));
1109 WARN_ON(guid_parse(NVDIMM_DAX_GUID
, &nvdimm_dax_guid
));
1111 WARN_ON(uuid_parse(NVDIMM_BTT_GUID
, &nvdimm_btt_uuid
));
1112 WARN_ON(uuid_parse(NVDIMM_BTT2_GUID
, &nvdimm_btt2_uuid
));
1113 WARN_ON(uuid_parse(NVDIMM_PFN_GUID
, &nvdimm_pfn_uuid
));
1114 WARN_ON(uuid_parse(NVDIMM_DAX_GUID
, &nvdimm_dax_uuid
));
1116 WARN_ON(uuid_parse(CXL_REGION_UUID
, &cxl_region_uuid
));
1117 WARN_ON(uuid_parse(CXL_NAMESPACE_UUID
, &cxl_namespace_uuid
));