2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
23 static guid_t nvdimm_btt_guid
;
24 static guid_t nvdimm_btt2_guid
;
25 static guid_t nvdimm_pfn_guid
;
26 static guid_t nvdimm_dax_guid
;
28 static const char NSINDEX_SIGNATURE
[] = "NAMESPACE_INDEX\0";
30 static u32
best_seq(u32 a
, u32 b
)
32 a
&= NSINDEX_SEQ_MASK
;
33 b
&= NSINDEX_SEQ_MASK
;
39 else if (nd_inc_seq(a
) == b
)
45 unsigned sizeof_namespace_label(struct nvdimm_drvdata
*ndd
)
47 return ndd
->nslabel_size
;
50 static size_t __sizeof_namespace_index(u32 nslot
)
52 return ALIGN(sizeof(struct nd_namespace_index
) + DIV_ROUND_UP(nslot
, 8),
56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata
*ndd
,
59 return (ndd
->nsarea
.config_size
- index_size
* 2) /
60 sizeof_namespace_label(ndd
);
63 int nvdimm_num_label_slots(struct nvdimm_drvdata
*ndd
)
67 tmp_nslot
= ndd
->nsarea
.config_size
/ sizeof_namespace_label(ndd
);
68 n
= __sizeof_namespace_index(tmp_nslot
) / NSINDEX_ALIGN
;
70 return __nvdimm_num_label_slots(ndd
, NSINDEX_ALIGN
* n
);
73 size_t sizeof_namespace_index(struct nvdimm_drvdata
*ndd
)
75 u32 nslot
, space
, size
;
78 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
79 * enough to hold 2 index blocks and 2 labels. The minimum index
80 * block size is 256 bytes, and the minimum label size is 256 bytes.
82 nslot
= nvdimm_num_label_slots(ndd
);
83 space
= ndd
->nsarea
.config_size
- nslot
* sizeof_namespace_label(ndd
);
84 size
= __sizeof_namespace_index(nslot
) * 2;
85 if (size
<= space
&& nslot
>= 2)
88 dev_err(ndd
->dev
, "label area (%d) too small to host (%d byte) labels\n",
89 ndd
->nsarea
.config_size
, sizeof_namespace_label(ndd
));
93 static int __nd_label_validate(struct nvdimm_drvdata
*ndd
)
96 * On media label format consists of two index blocks followed
97 * by an array of labels. None of these structures are ever
98 * updated in place. A sequence number tracks the current
99 * active index and the next one to write, while labels are
100 * written to free slots.
122 struct nd_namespace_index
*nsindex
[] = {
123 to_namespace_index(ndd
, 0),
124 to_namespace_index(ndd
, 1),
126 const int num_index
= ARRAY_SIZE(nsindex
);
127 struct device
*dev
= ndd
->dev
;
128 bool valid
[2] = { 0 };
129 int i
, num_valid
= 0;
132 for (i
= 0; i
< num_index
; i
++) {
134 u8 sig
[NSINDEX_SIG_LEN
];
135 u64 sum_save
, sum
, size
;
136 unsigned int version
, labelsize
;
138 memcpy(sig
, nsindex
[i
]->sig
, NSINDEX_SIG_LEN
);
139 if (memcmp(sig
, NSINDEX_SIGNATURE
, NSINDEX_SIG_LEN
) != 0) {
140 dev_dbg(dev
, "nsindex%d signature invalid\n", i
);
144 /* label sizes larger than 128 arrived with v1.2 */
145 version
= __le16_to_cpu(nsindex
[i
]->major
) * 100
146 + __le16_to_cpu(nsindex
[i
]->minor
);
148 labelsize
= 1 << (7 + nsindex
[i
]->labelsize
);
152 if (labelsize
!= sizeof_namespace_label(ndd
)) {
153 dev_dbg(dev
, "nsindex%d labelsize %d invalid\n",
154 i
, nsindex
[i
]->labelsize
);
158 sum_save
= __le64_to_cpu(nsindex
[i
]->checksum
);
159 nsindex
[i
]->checksum
= __cpu_to_le64(0);
160 sum
= nd_fletcher64(nsindex
[i
], sizeof_namespace_index(ndd
), 1);
161 nsindex
[i
]->checksum
= __cpu_to_le64(sum_save
);
162 if (sum
!= sum_save
) {
163 dev_dbg(dev
, "nsindex%d checksum invalid\n", i
);
167 seq
= __le32_to_cpu(nsindex
[i
]->seq
);
168 if ((seq
& NSINDEX_SEQ_MASK
) == 0) {
169 dev_dbg(dev
, "nsindex%d sequence: %#x invalid\n", i
, seq
);
173 /* sanity check the index against expected values */
174 if (__le64_to_cpu(nsindex
[i
]->myoff
)
175 != i
* sizeof_namespace_index(ndd
)) {
176 dev_dbg(dev
, "nsindex%d myoff: %#llx invalid\n",
177 i
, (unsigned long long)
178 __le64_to_cpu(nsindex
[i
]->myoff
));
181 if (__le64_to_cpu(nsindex
[i
]->otheroff
)
182 != (!i
) * sizeof_namespace_index(ndd
)) {
183 dev_dbg(dev
, "nsindex%d otheroff: %#llx invalid\n",
184 i
, (unsigned long long)
185 __le64_to_cpu(nsindex
[i
]->otheroff
));
189 size
= __le64_to_cpu(nsindex
[i
]->mysize
);
190 if (size
> sizeof_namespace_index(ndd
)
191 || size
< sizeof(struct nd_namespace_index
)) {
192 dev_dbg(dev
, "nsindex%d mysize: %#llx invalid\n", i
, size
);
196 nslot
= __le32_to_cpu(nsindex
[i
]->nslot
);
197 if (nslot
* sizeof_namespace_label(ndd
)
198 + 2 * sizeof_namespace_index(ndd
)
199 > ndd
->nsarea
.config_size
) {
200 dev_dbg(dev
, "nsindex%d nslot: %u invalid, config_size: %#x\n",
201 i
, nslot
, ndd
->nsarea
.config_size
);
212 for (i
= 0; i
< num_index
; i
++)
215 /* can't have num_valid > 0 but valid[] = { false, false } */
219 /* pick the best index... */
220 seq
= best_seq(__le32_to_cpu(nsindex
[0]->seq
),
221 __le32_to_cpu(nsindex
[1]->seq
));
222 if (seq
== (__le32_to_cpu(nsindex
[1]->seq
) & NSINDEX_SEQ_MASK
))
232 int nd_label_validate(struct nvdimm_drvdata
*ndd
)
235 * In order to probe for and validate namespace index blocks we
236 * need to know the size of the labels, and we can't trust the
237 * size of the labels until we validate the index blocks.
238 * Resolve this dependency loop by probing for known label
239 * sizes, but default to v1.2 256-byte namespace labels if
242 int label_size
[] = { 128, 256 };
245 for (i
= 0; i
< ARRAY_SIZE(label_size
); i
++) {
246 ndd
->nslabel_size
= label_size
[i
];
247 rc
= __nd_label_validate(ndd
);
255 void nd_label_copy(struct nvdimm_drvdata
*ndd
, struct nd_namespace_index
*dst
,
256 struct nd_namespace_index
*src
)
263 memcpy(dst
, src
, sizeof_namespace_index(ndd
));
266 static struct nd_namespace_label
*nd_label_base(struct nvdimm_drvdata
*ndd
)
268 void *base
= to_namespace_index(ndd
, 0);
270 return base
+ 2 * sizeof_namespace_index(ndd
);
273 static int to_slot(struct nvdimm_drvdata
*ndd
,
274 struct nd_namespace_label
*nd_label
)
276 unsigned long label
, base
;
278 label
= (unsigned long) nd_label
;
279 base
= (unsigned long) nd_label_base(ndd
);
281 return (label
- base
) / sizeof_namespace_label(ndd
);
284 static struct nd_namespace_label
*to_label(struct nvdimm_drvdata
*ndd
, int slot
)
286 unsigned long label
, base
;
288 base
= (unsigned long) nd_label_base(ndd
);
289 label
= base
+ sizeof_namespace_label(ndd
) * slot
;
291 return (struct nd_namespace_label
*) label
;
294 #define for_each_clear_bit_le(bit, addr, size) \
295 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
297 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
300 * preamble_index - common variable initialization for nd_label_* routines
301 * @ndd: dimm container for the relevant label set
302 * @idx: namespace_index index
303 * @nsindex_out: on return set to the currently active namespace index
304 * @free: on return set to the free label bitmap in the index
305 * @nslot: on return set to the number of slots in the label space
307 static bool preamble_index(struct nvdimm_drvdata
*ndd
, int idx
,
308 struct nd_namespace_index
**nsindex_out
,
309 unsigned long **free
, u32
*nslot
)
311 struct nd_namespace_index
*nsindex
;
313 nsindex
= to_namespace_index(ndd
, idx
);
317 *free
= (unsigned long *) nsindex
->free
;
318 *nslot
= __le32_to_cpu(nsindex
->nslot
);
319 *nsindex_out
= nsindex
;
324 char *nd_label_gen_id(struct nd_label_id
*label_id
, u8
*uuid
, u32 flags
)
326 if (!label_id
|| !uuid
)
328 snprintf(label_id
->id
, ND_LABEL_ID_SIZE
, "%s-%pUb",
329 flags
& NSLABEL_FLAG_LOCAL
? "blk" : "pmem", uuid
);
333 static bool preamble_current(struct nvdimm_drvdata
*ndd
,
334 struct nd_namespace_index
**nsindex
,
335 unsigned long **free
, u32
*nslot
)
337 return preamble_index(ndd
, ndd
->ns_current
, nsindex
,
341 static bool preamble_next(struct nvdimm_drvdata
*ndd
,
342 struct nd_namespace_index
**nsindex
,
343 unsigned long **free
, u32
*nslot
)
345 return preamble_index(ndd
, ndd
->ns_next
, nsindex
,
349 static bool slot_valid(struct nvdimm_drvdata
*ndd
,
350 struct nd_namespace_label
*nd_label
, u32 slot
)
352 /* check that we are written where we expect to be written */
353 if (slot
!= __le32_to_cpu(nd_label
->slot
))
356 /* check that DPA allocations are page aligned */
357 if ((__le64_to_cpu(nd_label
->dpa
)
358 | __le64_to_cpu(nd_label
->rawsize
)) % SZ_4K
)
362 if (namespace_label_has(ndd
, checksum
)) {
365 sum_save
= __le64_to_cpu(nd_label
->checksum
);
366 nd_label
->checksum
= __cpu_to_le64(0);
367 sum
= nd_fletcher64(nd_label
, sizeof_namespace_label(ndd
), 1);
368 nd_label
->checksum
= __cpu_to_le64(sum_save
);
369 if (sum
!= sum_save
) {
370 dev_dbg(ndd
->dev
, "fail checksum. slot: %d expect: %#llx\n",
379 int nd_label_reserve_dpa(struct nvdimm_drvdata
*ndd
)
381 struct nd_namespace_index
*nsindex
;
385 if (!preamble_current(ndd
, &nsindex
, &free
, &nslot
))
386 return 0; /* no label, nothing to reserve */
388 for_each_clear_bit_le(slot
, free
, nslot
) {
389 struct nd_namespace_label
*nd_label
;
390 struct nd_region
*nd_region
= NULL
;
391 u8 label_uuid
[NSLABEL_UUID_LEN
];
392 struct nd_label_id label_id
;
393 struct resource
*res
;
396 nd_label
= to_label(ndd
, slot
);
398 if (!slot_valid(ndd
, nd_label
, slot
))
401 memcpy(label_uuid
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
402 flags
= __le32_to_cpu(nd_label
->flags
);
403 nd_label_gen_id(&label_id
, label_uuid
, flags
);
404 res
= nvdimm_allocate_dpa(ndd
, &label_id
,
405 __le64_to_cpu(nd_label
->dpa
),
406 __le64_to_cpu(nd_label
->rawsize
));
407 nd_dbg_dpa(nd_region
, ndd
, res
, "reserve\n");
415 int nd_label_active_count(struct nvdimm_drvdata
*ndd
)
417 struct nd_namespace_index
*nsindex
;
422 if (!preamble_current(ndd
, &nsindex
, &free
, &nslot
))
425 for_each_clear_bit_le(slot
, free
, nslot
) {
426 struct nd_namespace_label
*nd_label
;
428 nd_label
= to_label(ndd
, slot
);
430 if (!slot_valid(ndd
, nd_label
, slot
)) {
431 u32 label_slot
= __le32_to_cpu(nd_label
->slot
);
432 u64 size
= __le64_to_cpu(nd_label
->rawsize
);
433 u64 dpa
= __le64_to_cpu(nd_label
->dpa
);
436 "slot%d invalid slot: %d dpa: %llx size: %llx\n",
437 slot
, label_slot
, dpa
, size
);
445 struct nd_namespace_label
*nd_label_active(struct nvdimm_drvdata
*ndd
, int n
)
447 struct nd_namespace_index
*nsindex
;
451 if (!preamble_current(ndd
, &nsindex
, &free
, &nslot
))
454 for_each_clear_bit_le(slot
, free
, nslot
) {
455 struct nd_namespace_label
*nd_label
;
457 nd_label
= to_label(ndd
, slot
);
458 if (!slot_valid(ndd
, nd_label
, slot
))
462 return to_label(ndd
, slot
);
468 u32
nd_label_alloc_slot(struct nvdimm_drvdata
*ndd
)
470 struct nd_namespace_index
*nsindex
;
474 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
477 WARN_ON(!is_nvdimm_bus_locked(ndd
->dev
));
479 slot
= find_next_bit_le(free
, nslot
, 0);
483 clear_bit_le(slot
, free
);
488 bool nd_label_free_slot(struct nvdimm_drvdata
*ndd
, u32 slot
)
490 struct nd_namespace_index
*nsindex
;
494 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
497 WARN_ON(!is_nvdimm_bus_locked(ndd
->dev
));
500 return !test_and_set_bit_le(slot
, free
);
504 u32
nd_label_nfree(struct nvdimm_drvdata
*ndd
)
506 struct nd_namespace_index
*nsindex
;
510 WARN_ON(!is_nvdimm_bus_locked(ndd
->dev
));
512 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
513 return nvdimm_num_label_slots(ndd
);
515 return bitmap_weight(free
, nslot
);
518 static int nd_label_write_index(struct nvdimm_drvdata
*ndd
, int index
, u32 seq
,
521 struct nd_namespace_index
*nsindex
;
522 unsigned long offset
;
527 nsindex
= to_namespace_index(ndd
, index
);
528 if (flags
& ND_NSINDEX_INIT
)
529 nslot
= nvdimm_num_label_slots(ndd
);
531 nslot
= __le32_to_cpu(nsindex
->nslot
);
533 memcpy(nsindex
->sig
, NSINDEX_SIGNATURE
, NSINDEX_SIG_LEN
);
534 memset(&nsindex
->flags
, 0, 3);
535 nsindex
->labelsize
= sizeof_namespace_label(ndd
) >> 8;
536 nsindex
->seq
= __cpu_to_le32(seq
);
537 offset
= (unsigned long) nsindex
538 - (unsigned long) to_namespace_index(ndd
, 0);
539 nsindex
->myoff
= __cpu_to_le64(offset
);
540 nsindex
->mysize
= __cpu_to_le64(sizeof_namespace_index(ndd
));
541 offset
= (unsigned long) to_namespace_index(ndd
,
542 nd_label_next_nsindex(index
))
543 - (unsigned long) to_namespace_index(ndd
, 0);
544 nsindex
->otheroff
= __cpu_to_le64(offset
);
545 offset
= (unsigned long) nd_label_base(ndd
)
546 - (unsigned long) to_namespace_index(ndd
, 0);
547 nsindex
->labeloff
= __cpu_to_le64(offset
);
548 nsindex
->nslot
= __cpu_to_le32(nslot
);
549 nsindex
->major
= __cpu_to_le16(1);
550 if (sizeof_namespace_label(ndd
) < 256)
551 nsindex
->minor
= __cpu_to_le16(1);
553 nsindex
->minor
= __cpu_to_le16(2);
554 nsindex
->checksum
= __cpu_to_le64(0);
555 if (flags
& ND_NSINDEX_INIT
) {
556 unsigned long *free
= (unsigned long *) nsindex
->free
;
557 u32 nfree
= ALIGN(nslot
, BITS_PER_LONG
);
560 memset(nsindex
->free
, 0xff, nfree
/ 8);
561 for (i
= 0, last_bits
= nfree
- nslot
; i
< last_bits
; i
++)
562 clear_bit_le(nslot
+ i
, free
);
564 checksum
= nd_fletcher64(nsindex
, sizeof_namespace_index(ndd
), 1);
565 nsindex
->checksum
= __cpu_to_le64(checksum
);
566 rc
= nvdimm_set_config_data(ndd
, __le64_to_cpu(nsindex
->myoff
),
567 nsindex
, sizeof_namespace_index(ndd
));
571 if (flags
& ND_NSINDEX_INIT
)
574 /* copy the index we just wrote to the new 'next' */
575 WARN_ON(index
!= ndd
->ns_next
);
576 nd_label_copy(ndd
, to_current_namespace_index(ndd
), nsindex
);
577 ndd
->ns_current
= nd_label_next_nsindex(ndd
->ns_current
);
578 ndd
->ns_next
= nd_label_next_nsindex(ndd
->ns_next
);
579 WARN_ON(ndd
->ns_current
== ndd
->ns_next
);
584 static unsigned long nd_label_offset(struct nvdimm_drvdata
*ndd
,
585 struct nd_namespace_label
*nd_label
)
587 return (unsigned long) nd_label
588 - (unsigned long) to_namespace_index(ndd
, 0);
591 enum nvdimm_claim_class
to_nvdimm_cclass(guid_t
*guid
)
593 if (guid_equal(guid
, &nvdimm_btt_guid
))
594 return NVDIMM_CCLASS_BTT
;
595 else if (guid_equal(guid
, &nvdimm_btt2_guid
))
596 return NVDIMM_CCLASS_BTT2
;
597 else if (guid_equal(guid
, &nvdimm_pfn_guid
))
598 return NVDIMM_CCLASS_PFN
;
599 else if (guid_equal(guid
, &nvdimm_dax_guid
))
600 return NVDIMM_CCLASS_DAX
;
601 else if (guid_equal(guid
, &guid_null
))
602 return NVDIMM_CCLASS_NONE
;
604 return NVDIMM_CCLASS_UNKNOWN
;
607 static const guid_t
*to_abstraction_guid(enum nvdimm_claim_class claim_class
,
610 if (claim_class
== NVDIMM_CCLASS_BTT
)
611 return &nvdimm_btt_guid
;
612 else if (claim_class
== NVDIMM_CCLASS_BTT2
)
613 return &nvdimm_btt2_guid
;
614 else if (claim_class
== NVDIMM_CCLASS_PFN
)
615 return &nvdimm_pfn_guid
;
616 else if (claim_class
== NVDIMM_CCLASS_DAX
)
617 return &nvdimm_dax_guid
;
618 else if (claim_class
== NVDIMM_CCLASS_UNKNOWN
) {
620 * If we're modifying a namespace for which we don't
621 * know the claim_class, don't touch the existing guid.
628 static void reap_victim(struct nd_mapping
*nd_mapping
,
629 struct nd_label_ent
*victim
)
631 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
632 u32 slot
= to_slot(ndd
, victim
->label
);
634 dev_dbg(ndd
->dev
, "free: %d\n", slot
);
635 nd_label_free_slot(ndd
, slot
);
636 victim
->label
= NULL
;
639 static int __pmem_label_update(struct nd_region
*nd_region
,
640 struct nd_mapping
*nd_mapping
, struct nd_namespace_pmem
*nspm
,
641 int pos
, unsigned long flags
)
643 struct nd_namespace_common
*ndns
= &nspm
->nsio
.common
;
644 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
645 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
646 struct nd_namespace_label
*nd_label
;
647 struct nd_namespace_index
*nsindex
;
648 struct nd_label_ent
*label_ent
;
649 struct nd_label_id label_id
;
650 struct resource
*res
;
657 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
660 cookie
= nd_region_interleave_set_cookie(nd_region
, nsindex
);
661 nd_label_gen_id(&label_id
, nspm
->uuid
, 0);
662 for_each_dpa_resource(ndd
, res
)
663 if (strcmp(res
->name
, label_id
.id
) == 0)
671 /* allocate and write the label to the staging (next) index */
672 slot
= nd_label_alloc_slot(ndd
);
673 if (slot
== UINT_MAX
)
675 dev_dbg(ndd
->dev
, "allocated: %d\n", slot
);
677 nd_label
= to_label(ndd
, slot
);
678 memset(nd_label
, 0, sizeof_namespace_label(ndd
));
679 memcpy(nd_label
->uuid
, nspm
->uuid
, NSLABEL_UUID_LEN
);
681 memcpy(nd_label
->name
, nspm
->alt_name
, NSLABEL_NAME_LEN
);
682 nd_label
->flags
= __cpu_to_le32(flags
);
683 nd_label
->nlabel
= __cpu_to_le16(nd_region
->ndr_mappings
);
684 nd_label
->position
= __cpu_to_le16(pos
);
685 nd_label
->isetcookie
= __cpu_to_le64(cookie
);
686 nd_label
->rawsize
= __cpu_to_le64(resource_size(res
));
687 nd_label
->lbasize
= __cpu_to_le64(nspm
->lbasize
);
688 nd_label
->dpa
= __cpu_to_le64(res
->start
);
689 nd_label
->slot
= __cpu_to_le32(slot
);
690 if (namespace_label_has(ndd
, type_guid
))
691 guid_copy(&nd_label
->type_guid
, &nd_set
->type_guid
);
692 if (namespace_label_has(ndd
, abstraction_guid
))
693 guid_copy(&nd_label
->abstraction_guid
,
694 to_abstraction_guid(ndns
->claim_class
,
695 &nd_label
->abstraction_guid
));
696 if (namespace_label_has(ndd
, checksum
)) {
699 nd_label
->checksum
= __cpu_to_le64(0);
700 sum
= nd_fletcher64(nd_label
, sizeof_namespace_label(ndd
), 1);
701 nd_label
->checksum
= __cpu_to_le64(sum
);
703 nd_dbg_dpa(nd_region
, ndd
, res
, "\n");
706 offset
= nd_label_offset(ndd
, nd_label
);
707 rc
= nvdimm_set_config_data(ndd
, offset
, nd_label
,
708 sizeof_namespace_label(ndd
));
712 /* Garbage collect the previous label */
713 mutex_lock(&nd_mapping
->lock
);
714 list_for_each_entry(label_ent
, &nd_mapping
->labels
, list
) {
715 if (!label_ent
->label
)
717 if (test_and_clear_bit(ND_LABEL_REAP
, &label_ent
->flags
)
718 || memcmp(nspm
->uuid
, label_ent
->label
->uuid
,
719 NSLABEL_UUID_LEN
) == 0)
720 reap_victim(nd_mapping
, label_ent
);
724 rc
= nd_label_write_index(ndd
, ndd
->ns_next
,
725 nd_inc_seq(__le32_to_cpu(nsindex
->seq
)), 0);
727 list_for_each_entry(label_ent
, &nd_mapping
->labels
, list
)
728 if (!label_ent
->label
) {
729 label_ent
->label
= nd_label
;
733 dev_WARN_ONCE(&nspm
->nsio
.common
.dev
, nd_label
,
734 "failed to track label: %d\n",
735 to_slot(ndd
, nd_label
));
739 mutex_unlock(&nd_mapping
->lock
);
744 static bool is_old_resource(struct resource
*res
, struct resource
**list
, int n
)
748 if (res
->flags
& DPA_RESOURCE_ADJUSTED
)
750 for (i
= 0; i
< n
; i
++)
756 static struct resource
*to_resource(struct nvdimm_drvdata
*ndd
,
757 struct nd_namespace_label
*nd_label
)
759 struct resource
*res
;
761 for_each_dpa_resource(ndd
, res
) {
762 if (res
->start
!= __le64_to_cpu(nd_label
->dpa
))
764 if (resource_size(res
) != __le64_to_cpu(nd_label
->rawsize
))
773 * 1/ Account all the labels that can be freed after this update
774 * 2/ Allocate and write the label to the staging (next) index
775 * 3/ Record the resources in the namespace device
777 static int __blk_label_update(struct nd_region
*nd_region
,
778 struct nd_mapping
*nd_mapping
, struct nd_namespace_blk
*nsblk
,
781 int i
, alloc
, victims
, nfree
, old_num_resources
, nlabel
, rc
= -ENXIO
;
782 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
783 struct nd_namespace_common
*ndns
= &nsblk
->common
;
784 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
785 struct nd_namespace_label
*nd_label
;
786 struct nd_label_ent
*label_ent
, *e
;
787 struct nd_namespace_index
*nsindex
;
788 unsigned long *free
, *victim_map
= NULL
;
789 struct resource
*res
, **old_res_list
;
790 struct nd_label_id label_id
;
791 u8 uuid
[NSLABEL_UUID_LEN
];
796 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
799 old_res_list
= nsblk
->res
;
800 nfree
= nd_label_nfree(ndd
);
801 old_num_resources
= nsblk
->num_resources
;
802 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
805 * We need to loop over the old resources a few times, which seems a
806 * bit inefficient, but we need to know that we have the label
807 * space before we start mutating the tracking structures.
808 * Otherwise the recovery method of last resort for userspace is
809 * disable and re-enable the parent region.
812 for_each_dpa_resource(ndd
, res
) {
813 if (strcmp(res
->name
, label_id
.id
) != 0)
815 if (!is_old_resource(res
, old_res_list
, old_num_resources
))
820 if (old_num_resources
) {
821 /* convert old local-label-map to dimm-slot victim-map */
822 victim_map
= kcalloc(BITS_TO_LONGS(nslot
), sizeof(long),
827 /* mark unused labels for garbage collection */
828 for_each_clear_bit_le(slot
, free
, nslot
) {
829 nd_label
= to_label(ndd
, slot
);
830 memcpy(uuid
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
831 if (memcmp(uuid
, nsblk
->uuid
, NSLABEL_UUID_LEN
) != 0)
833 res
= to_resource(ndd
, nd_label
);
834 if (res
&& is_old_resource(res
, old_res_list
,
837 slot
= to_slot(ndd
, nd_label
);
838 set_bit(slot
, victim_map
);
843 /* don't allow updates that consume the last label */
844 if (nfree
- alloc
< 0 || nfree
- alloc
+ victims
< 1) {
845 dev_info(&nsblk
->common
.dev
, "insufficient label space\n");
849 /* from here on we need to abort on error */
852 /* assign all resources to the namespace before writing the labels */
854 nsblk
->num_resources
= 0;
855 for_each_dpa_resource(ndd
, res
) {
856 if (strcmp(res
->name
, label_id
.id
) != 0)
858 if (!nsblk_add_resource(nd_region
, ndd
, nsblk
, res
->start
)) {
865 * Find the resource associated with the first label in the set
866 * per the v1.2 namespace specification.
868 for (i
= 0; i
< nsblk
->num_resources
; i
++) {
869 struct resource
*min
= nsblk
->res
[min_dpa_idx
];
872 if (res
->start
< min
->start
)
876 for (i
= 0; i
< nsblk
->num_resources
; i
++) {
880 if (is_old_resource(res
, old_res_list
, old_num_resources
))
881 continue; /* carry-over */
882 slot
= nd_label_alloc_slot(ndd
);
883 if (slot
== UINT_MAX
)
885 dev_dbg(ndd
->dev
, "allocated: %d\n", slot
);
887 nd_label
= to_label(ndd
, slot
);
888 memset(nd_label
, 0, sizeof_namespace_label(ndd
));
889 memcpy(nd_label
->uuid
, nsblk
->uuid
, NSLABEL_UUID_LEN
);
891 memcpy(nd_label
->name
, nsblk
->alt_name
,
893 nd_label
->flags
= __cpu_to_le32(NSLABEL_FLAG_LOCAL
);
896 * Use the presence of the type_guid as a flag to
897 * determine isetcookie usage and nlabel + position
898 * policy for blk-aperture namespaces.
900 if (namespace_label_has(ndd
, type_guid
)) {
901 if (i
== min_dpa_idx
) {
902 nd_label
->nlabel
= __cpu_to_le16(nsblk
->num_resources
);
903 nd_label
->position
= __cpu_to_le16(0);
905 nd_label
->nlabel
= __cpu_to_le16(0xffff);
906 nd_label
->position
= __cpu_to_le16(0xffff);
908 nd_label
->isetcookie
= __cpu_to_le64(nd_set
->cookie2
);
910 nd_label
->nlabel
= __cpu_to_le16(0); /* N/A */
911 nd_label
->position
= __cpu_to_le16(0); /* N/A */
912 nd_label
->isetcookie
= __cpu_to_le64(0); /* N/A */
915 nd_label
->dpa
= __cpu_to_le64(res
->start
);
916 nd_label
->rawsize
= __cpu_to_le64(resource_size(res
));
917 nd_label
->lbasize
= __cpu_to_le64(nsblk
->lbasize
);
918 nd_label
->slot
= __cpu_to_le32(slot
);
919 if (namespace_label_has(ndd
, type_guid
))
920 guid_copy(&nd_label
->type_guid
, &nd_set
->type_guid
);
921 if (namespace_label_has(ndd
, abstraction_guid
))
922 guid_copy(&nd_label
->abstraction_guid
,
923 to_abstraction_guid(ndns
->claim_class
,
924 &nd_label
->abstraction_guid
));
926 if (namespace_label_has(ndd
, checksum
)) {
929 nd_label
->checksum
= __cpu_to_le64(0);
930 sum
= nd_fletcher64(nd_label
,
931 sizeof_namespace_label(ndd
), 1);
932 nd_label
->checksum
= __cpu_to_le64(sum
);
936 offset
= nd_label_offset(ndd
, nd_label
);
937 rc
= nvdimm_set_config_data(ndd
, offset
, nd_label
,
938 sizeof_namespace_label(ndd
));
943 /* free up now unused slots in the new index */
944 for_each_set_bit(slot
, victim_map
, victim_map
? nslot
: 0) {
945 dev_dbg(ndd
->dev
, "free: %d\n", slot
);
946 nd_label_free_slot(ndd
, slot
);
950 rc
= nd_label_write_index(ndd
, ndd
->ns_next
,
951 nd_inc_seq(__le32_to_cpu(nsindex
->seq
)), 0);
956 * Now that the on-dimm labels are up to date, fix up the tracking
957 * entries in nd_mapping->labels
960 mutex_lock(&nd_mapping
->lock
);
961 list_for_each_entry_safe(label_ent
, e
, &nd_mapping
->labels
, list
) {
962 nd_label
= label_ent
->label
;
966 memcpy(uuid
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
967 if (memcmp(uuid
, nsblk
->uuid
, NSLABEL_UUID_LEN
) != 0)
970 list_move(&label_ent
->list
, &list
);
971 label_ent
->label
= NULL
;
973 list_splice_tail_init(&list
, &nd_mapping
->labels
);
974 mutex_unlock(&nd_mapping
->lock
);
976 if (nlabel
+ nsblk
->num_resources
> num_labels
) {
978 * Bug, we can't end up with more resources than
986 mutex_lock(&nd_mapping
->lock
);
987 label_ent
= list_first_entry_or_null(&nd_mapping
->labels
,
988 typeof(*label_ent
), list
);
991 mutex_unlock(&nd_mapping
->lock
);
995 for_each_clear_bit_le(slot
, free
, nslot
) {
996 nd_label
= to_label(ndd
, slot
);
997 memcpy(uuid
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
998 if (memcmp(uuid
, nsblk
->uuid
, NSLABEL_UUID_LEN
) != 0)
1000 res
= to_resource(ndd
, nd_label
);
1001 res
->flags
&= ~DPA_RESOURCE_ADJUSTED
;
1002 dev_vdbg(&nsblk
->common
.dev
, "assign label slot: %d\n", slot
);
1003 list_for_each_entry_from(label_ent
, &nd_mapping
->labels
, list
) {
1004 if (label_ent
->label
)
1006 label_ent
->label
= nd_label
;
1011 dev_WARN(&nsblk
->common
.dev
,
1012 "failed to track label slot%d\n", slot
);
1014 mutex_unlock(&nd_mapping
->lock
);
1017 kfree(old_res_list
);
1023 * 1/ repair the allocated label bitmap in the index
1024 * 2/ restore the resource list
1026 nd_label_copy(ndd
, nsindex
, to_current_namespace_index(ndd
));
1028 nsblk
->res
= old_res_list
;
1029 nsblk
->num_resources
= old_num_resources
;
1030 old_res_list
= NULL
;
1034 static int init_labels(struct nd_mapping
*nd_mapping
, int num_labels
)
1036 int i
, old_num_labels
= 0;
1037 struct nd_label_ent
*label_ent
;
1038 struct nd_namespace_index
*nsindex
;
1039 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1041 mutex_lock(&nd_mapping
->lock
);
1042 list_for_each_entry(label_ent
, &nd_mapping
->labels
, list
)
1044 mutex_unlock(&nd_mapping
->lock
);
1047 * We need to preserve all the old labels for the mapping so
1048 * they can be garbage collected after writing the new labels.
1050 for (i
= old_num_labels
; i
< num_labels
; i
++) {
1051 label_ent
= kzalloc(sizeof(*label_ent
), GFP_KERNEL
);
1054 mutex_lock(&nd_mapping
->lock
);
1055 list_add_tail(&label_ent
->list
, &nd_mapping
->labels
);
1056 mutex_unlock(&nd_mapping
->lock
);
1059 if (ndd
->ns_current
== -1 || ndd
->ns_next
== -1)
1062 return max(num_labels
, old_num_labels
);
1064 nsindex
= to_namespace_index(ndd
, 0);
1065 memset(nsindex
, 0, ndd
->nsarea
.config_size
);
1066 for (i
= 0; i
< 2; i
++) {
1067 int rc
= nd_label_write_index(ndd
, i
, 3 - i
, ND_NSINDEX_INIT
);
1073 ndd
->ns_current
= 0;
1075 return max(num_labels
, old_num_labels
);
1078 static int del_labels(struct nd_mapping
*nd_mapping
, u8
*uuid
)
1080 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1081 struct nd_label_ent
*label_ent
, *e
;
1082 struct nd_namespace_index
*nsindex
;
1083 u8 label_uuid
[NSLABEL_UUID_LEN
];
1084 unsigned long *free
;
1092 /* no index || no labels == nothing to delete */
1093 if (!preamble_next(ndd
, &nsindex
, &free
, &nslot
))
1096 mutex_lock(&nd_mapping
->lock
);
1097 list_for_each_entry_safe(label_ent
, e
, &nd_mapping
->labels
, list
) {
1098 struct nd_namespace_label
*nd_label
= label_ent
->label
;
1103 memcpy(label_uuid
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
1104 if (memcmp(label_uuid
, uuid
, NSLABEL_UUID_LEN
) != 0)
1107 slot
= to_slot(ndd
, nd_label
);
1108 nd_label_free_slot(ndd
, slot
);
1109 dev_dbg(ndd
->dev
, "free: %d\n", slot
);
1110 list_move_tail(&label_ent
->list
, &list
);
1111 label_ent
->label
= NULL
;
1113 list_splice_tail_init(&list
, &nd_mapping
->labels
);
1116 nd_mapping_free_labels(nd_mapping
);
1117 dev_dbg(ndd
->dev
, "no more active labels\n");
1119 mutex_unlock(&nd_mapping
->lock
);
1121 return nd_label_write_index(ndd
, ndd
->ns_next
,
1122 nd_inc_seq(__le32_to_cpu(nsindex
->seq
)), 0);
1125 int nd_pmem_namespace_label_update(struct nd_region
*nd_region
,
1126 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
1130 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1131 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1132 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1133 struct resource
*res
;
1137 rc
= del_labels(nd_mapping
, nspm
->uuid
);
1143 for_each_dpa_resource(ndd
, res
)
1144 if (strncmp(res
->name
, "pmem", 4) == 0)
1146 WARN_ON_ONCE(!count
);
1148 rc
= init_labels(nd_mapping
, count
);
1152 rc
= __pmem_label_update(nd_region
, nd_mapping
, nspm
, i
,
1153 NSLABEL_FLAG_UPDATING
);
1161 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1162 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1163 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1165 rc
= __pmem_label_update(nd_region
, nd_mapping
, nspm
, i
, 0);
1173 int nd_blk_namespace_label_update(struct nd_region
*nd_region
,
1174 struct nd_namespace_blk
*nsblk
, resource_size_t size
)
1176 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
1177 struct resource
*res
;
1181 return del_labels(nd_mapping
, nsblk
->uuid
);
1183 for_each_dpa_resource(to_ndd(nd_mapping
), res
)
1186 count
= init_labels(nd_mapping
, count
);
1190 return __blk_label_update(nd_region
, nd_mapping
, nsblk
, count
);
1193 int __init
nd_label_init(void)
1195 WARN_ON(guid_parse(NVDIMM_BTT_GUID
, &nvdimm_btt_guid
));
1196 WARN_ON(guid_parse(NVDIMM_BTT2_GUID
, &nvdimm_btt2_guid
));
1197 WARN_ON(guid_parse(NVDIMM_PFN_GUID
, &nvdimm_pfn_guid
));
1198 WARN_ON(guid_parse(NVDIMM_DAX_GUID
, &nvdimm_dax_guid
));