1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
7 #include <linux/libnvdimm.h>
8 #include <linux/badblocks.h>
9 #include <linux/blkdev.h>
10 #include <linux/device.h>
11 #include <linux/mutex.h>
12 #include <linux/ndctl.h>
13 #include <linux/types.h>
19 * Limits the maximum number of block apertures a dimm can
20 * support and is an input to the geometry/on-disk-format of a
24 INT_LBASIZE_ALIGNMENT
= 64,
28 struct nvdimm_drvdata
{
31 struct nd_cmd_get_config_size nsarea
;
34 int ns_current
, ns_next
;
39 static inline const u8
*nsl_ref_name(struct nvdimm_drvdata
*ndd
,
40 struct nd_namespace_label
*nd_label
)
43 return nd_label
->cxl
.name
;
44 return nd_label
->efi
.name
;
47 static inline u8
*nsl_get_name(struct nvdimm_drvdata
*ndd
,
48 struct nd_namespace_label
*nd_label
, u8
*name
)
51 return memcpy(name
, nd_label
->cxl
.name
, NSLABEL_NAME_LEN
);
52 return memcpy(name
, nd_label
->efi
.name
, NSLABEL_NAME_LEN
);
55 static inline u8
*nsl_set_name(struct nvdimm_drvdata
*ndd
,
56 struct nd_namespace_label
*nd_label
, u8
*name
)
61 return memcpy(nd_label
->cxl
.name
, name
, NSLABEL_NAME_LEN
);
62 return memcpy(nd_label
->efi
.name
, name
, NSLABEL_NAME_LEN
);
65 static inline u32
nsl_get_slot(struct nvdimm_drvdata
*ndd
,
66 struct nd_namespace_label
*nd_label
)
69 return __le32_to_cpu(nd_label
->cxl
.slot
);
70 return __le32_to_cpu(nd_label
->efi
.slot
);
73 static inline void nsl_set_slot(struct nvdimm_drvdata
*ndd
,
74 struct nd_namespace_label
*nd_label
, u32 slot
)
77 nd_label
->cxl
.slot
= __cpu_to_le32(slot
);
79 nd_label
->efi
.slot
= __cpu_to_le32(slot
);
82 static inline u64
nsl_get_checksum(struct nvdimm_drvdata
*ndd
,
83 struct nd_namespace_label
*nd_label
)
86 return __le64_to_cpu(nd_label
->cxl
.checksum
);
87 return __le64_to_cpu(nd_label
->efi
.checksum
);
90 static inline void nsl_set_checksum(struct nvdimm_drvdata
*ndd
,
91 struct nd_namespace_label
*nd_label
,
95 nd_label
->cxl
.checksum
= __cpu_to_le64(checksum
);
97 nd_label
->efi
.checksum
= __cpu_to_le64(checksum
);
100 static inline u32
nsl_get_flags(struct nvdimm_drvdata
*ndd
,
101 struct nd_namespace_label
*nd_label
)
104 return __le32_to_cpu(nd_label
->cxl
.flags
);
105 return __le32_to_cpu(nd_label
->efi
.flags
);
108 static inline void nsl_set_flags(struct nvdimm_drvdata
*ndd
,
109 struct nd_namespace_label
*nd_label
, u32 flags
)
112 nd_label
->cxl
.flags
= __cpu_to_le32(flags
);
114 nd_label
->efi
.flags
= __cpu_to_le32(flags
);
117 static inline u64
nsl_get_dpa(struct nvdimm_drvdata
*ndd
,
118 struct nd_namespace_label
*nd_label
)
121 return __le64_to_cpu(nd_label
->cxl
.dpa
);
122 return __le64_to_cpu(nd_label
->efi
.dpa
);
125 static inline void nsl_set_dpa(struct nvdimm_drvdata
*ndd
,
126 struct nd_namespace_label
*nd_label
, u64 dpa
)
129 nd_label
->cxl
.dpa
= __cpu_to_le64(dpa
);
131 nd_label
->efi
.dpa
= __cpu_to_le64(dpa
);
134 static inline u64
nsl_get_rawsize(struct nvdimm_drvdata
*ndd
,
135 struct nd_namespace_label
*nd_label
)
138 return __le64_to_cpu(nd_label
->cxl
.rawsize
);
139 return __le64_to_cpu(nd_label
->efi
.rawsize
);
142 static inline void nsl_set_rawsize(struct nvdimm_drvdata
*ndd
,
143 struct nd_namespace_label
*nd_label
,
147 nd_label
->cxl
.rawsize
= __cpu_to_le64(rawsize
);
149 nd_label
->efi
.rawsize
= __cpu_to_le64(rawsize
);
152 static inline u64
nsl_get_isetcookie(struct nvdimm_drvdata
*ndd
,
153 struct nd_namespace_label
*nd_label
)
155 /* WARN future refactor attempts that break this assumption */
156 if (dev_WARN_ONCE(ndd
->dev
, ndd
->cxl
,
157 "CXL labels do not use the isetcookie concept\n"))
159 return __le64_to_cpu(nd_label
->efi
.isetcookie
);
162 static inline void nsl_set_isetcookie(struct nvdimm_drvdata
*ndd
,
163 struct nd_namespace_label
*nd_label
,
167 nd_label
->efi
.isetcookie
= __cpu_to_le64(isetcookie
);
170 static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata
*ndd
,
171 struct nd_namespace_label
*nd_label
,
175 * Let the EFI and CXL validation comingle, where fields that
176 * don't matter to CXL always validate.
180 return cookie
== __le64_to_cpu(nd_label
->efi
.isetcookie
);
183 static inline u16
nsl_get_position(struct nvdimm_drvdata
*ndd
,
184 struct nd_namespace_label
*nd_label
)
187 return __le16_to_cpu(nd_label
->cxl
.position
);
188 return __le16_to_cpu(nd_label
->efi
.position
);
191 static inline void nsl_set_position(struct nvdimm_drvdata
*ndd
,
192 struct nd_namespace_label
*nd_label
,
196 nd_label
->cxl
.position
= __cpu_to_le16(position
);
198 nd_label
->efi
.position
= __cpu_to_le16(position
);
201 static inline u16
nsl_get_nlabel(struct nvdimm_drvdata
*ndd
,
202 struct nd_namespace_label
*nd_label
)
206 return __le16_to_cpu(nd_label
->efi
.nlabel
);
209 static inline void nsl_set_nlabel(struct nvdimm_drvdata
*ndd
,
210 struct nd_namespace_label
*nd_label
,
214 nd_label
->efi
.nlabel
= __cpu_to_le16(nlabel
);
217 static inline u16
nsl_get_nrange(struct nvdimm_drvdata
*ndd
,
218 struct nd_namespace_label
*nd_label
)
221 return __le16_to_cpu(nd_label
->cxl
.nrange
);
225 static inline void nsl_set_nrange(struct nvdimm_drvdata
*ndd
,
226 struct nd_namespace_label
*nd_label
,
230 nd_label
->cxl
.nrange
= __cpu_to_le16(nrange
);
233 static inline u64
nsl_get_lbasize(struct nvdimm_drvdata
*ndd
,
234 struct nd_namespace_label
*nd_label
)
237 * Yes, for some reason the EFI labels convey a massive 64-bit
238 * lbasize, that got fixed for CXL.
241 return __le16_to_cpu(nd_label
->cxl
.lbasize
);
242 return __le64_to_cpu(nd_label
->efi
.lbasize
);
245 static inline void nsl_set_lbasize(struct nvdimm_drvdata
*ndd
,
246 struct nd_namespace_label
*nd_label
,
250 nd_label
->cxl
.lbasize
= __cpu_to_le16(lbasize
);
252 nd_label
->efi
.lbasize
= __cpu_to_le64(lbasize
);
255 static inline const uuid_t
*nsl_get_uuid(struct nvdimm_drvdata
*ndd
,
256 struct nd_namespace_label
*nd_label
,
260 import_uuid(uuid
, nd_label
->cxl
.uuid
);
262 import_uuid(uuid
, nd_label
->efi
.uuid
);
266 static inline const uuid_t
*nsl_set_uuid(struct nvdimm_drvdata
*ndd
,
267 struct nd_namespace_label
*nd_label
,
271 export_uuid(nd_label
->cxl
.uuid
, uuid
);
273 export_uuid(nd_label
->efi
.uuid
, uuid
);
277 static inline bool nsl_uuid_equal(struct nvdimm_drvdata
*ndd
,
278 struct nd_namespace_label
*nd_label
,
284 import_uuid(&tmp
, nd_label
->cxl
.uuid
);
286 import_uuid(&tmp
, nd_label
->efi
.uuid
);
287 return uuid_equal(&tmp
, uuid
);
290 static inline const u8
*nsl_uuid_raw(struct nvdimm_drvdata
*ndd
,
291 struct nd_namespace_label
*nd_label
)
294 return nd_label
->cxl
.uuid
;
295 return nd_label
->efi
.uuid
;
298 bool nsl_validate_type_guid(struct nvdimm_drvdata
*ndd
,
299 struct nd_namespace_label
*nd_label
, guid_t
*guid
);
300 enum nvdimm_claim_class
nsl_get_claim_class(struct nvdimm_drvdata
*ndd
,
301 struct nd_namespace_label
*nd_label
);
303 struct nd_region_data
{
306 unsigned int hints_shift
;
307 void __iomem
*flush_wpq
[];
310 static inline void __iomem
*ndrd_get_flush_wpq(struct nd_region_data
*ndrd
,
313 unsigned int num
= 1 << ndrd
->hints_shift
;
314 unsigned int mask
= num
- 1;
316 return ndrd
->flush_wpq
[dimm
* num
+ (hint
& mask
)];
319 static inline void ndrd_set_flush_wpq(struct nd_region_data
*ndrd
, int dimm
,
320 int hint
, void __iomem
*flush
)
322 unsigned int num
= 1 << ndrd
->hints_shift
;
323 unsigned int mask
= num
- 1;
325 ndrd
->flush_wpq
[dimm
* num
+ (hint
& mask
)] = flush
;
328 static inline struct nd_namespace_index
*to_namespace_index(
329 struct nvdimm_drvdata
*ndd
, int i
)
334 return ndd
->data
+ sizeof_namespace_index(ndd
) * i
;
337 static inline struct nd_namespace_index
*to_current_namespace_index(
338 struct nvdimm_drvdata
*ndd
)
340 return to_namespace_index(ndd
, ndd
->ns_current
);
343 static inline struct nd_namespace_index
*to_next_namespace_index(
344 struct nvdimm_drvdata
*ndd
)
346 return to_namespace_index(ndd
, ndd
->ns_next
);
349 unsigned sizeof_namespace_label(struct nvdimm_drvdata
*ndd
);
351 #define efi_namespace_label_has(ndd, field) \
352 (!ndd->cxl && offsetof(struct nvdimm_efi_label, field) \
353 < sizeof_namespace_label(ndd))
355 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
356 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
357 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
358 (unsigned long long) (res ? resource_size(res) : 0), \
359 (unsigned long long) (res ? res->start : 0), ##arg)
361 #define for_each_dpa_resource(ndd, res) \
362 for (res = (ndd)->dpa.child; res; res = res->sibling)
364 #define for_each_dpa_resource_safe(ndd, res, next) \
365 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
366 res; res = next, next = next ? next->sibling : NULL)
368 struct nd_percpu_lane
{
373 enum nd_label_flags
{
376 struct nd_label_ent
{
377 struct list_head list
;
379 struct nd_namespace_label
*label
;
382 enum nd_mapping_lock_class
{
384 ND_MAPPING_UUID_SCAN
,
388 struct nvdimm
*nvdimm
;
392 struct list_head labels
;
395 * @ndd is for private use at region enable / disable time for
396 * get_ndd() + put_ndd(), all other nd_mapping to ndd
397 * conversions use to_ndd() which respects enabled state of the
400 struct nvdimm_drvdata
*ndd
;
410 struct device
*ns_seed
;
411 struct device
*btt_seed
;
412 struct device
*pfn_seed
;
413 struct device
*dax_seed
;
418 int id
, num_lanes
, ro
, numa_node
, target_node
;
420 struct kernfs_node
*bb_state
;
422 struct nd_interleave_set
*nd_set
;
423 struct nd_percpu_lane __percpu
*lane
;
424 int (*flush
)(struct nd_region
*nd_region
, struct bio
*bio
);
425 struct nd_mapping mapping
[] __counted_by(ndr_mappings
);
428 static inline bool nsl_validate_nlabel(struct nd_region
*nd_region
,
429 struct nvdimm_drvdata
*ndd
,
430 struct nd_namespace_label
*nd_label
)
434 return nsl_get_nlabel(ndd
, nd_label
) == nd_region
->ndr_mappings
;
438 * Lookup next in the repeating sequence of 01, 10, and 11.
440 static inline unsigned nd_inc_seq(unsigned seq
)
442 static const unsigned next
[] = { 0, 2, 3, 1 };
444 return next
[seq
& 3];
450 struct nd_namespace_common
*ndns
;
452 unsigned long lbasize
;
473 enum nd_pfn_mode mode
;
474 struct nd_pfn_sb
*pfn_sb
;
475 struct nd_namespace_common
*ndns
;
479 struct nd_pfn nd_pfn
;
482 static inline u32
nd_info_block_reserve(void)
484 return ALIGN(SZ_8K
, PAGE_SIZE
);
492 void wait_nvdimm_bus_probe_idle(struct device
*dev
);
493 void nd_device_register(struct device
*dev
);
494 void nd_device_unregister(struct device
*dev
, enum nd_async_mode mode
);
495 void nd_device_notify(struct device
*dev
, enum nvdimm_event event
);
496 int nd_uuid_store(struct device
*dev
, uuid_t
**uuid_out
, const char *buf
,
498 ssize_t
nd_size_select_show(unsigned long current_size
,
499 const unsigned long *supported
, char *buf
);
500 ssize_t
nd_size_select_store(struct device
*dev
, const char *buf
,
501 unsigned long *current_size
, const unsigned long *supported
);
502 int __init
nvdimm_init(void);
503 int __init
nd_region_init(void);
504 int __init
nd_label_init(void);
505 void nvdimm_exit(void);
506 void nd_region_exit(void);
508 extern const struct attribute_group nd_device_attribute_group
;
509 extern const struct attribute_group nd_numa_attribute_group
;
510 extern const struct attribute_group
*nvdimm_bus_attribute_groups
[];
511 struct nvdimm_drvdata
*to_ndd(struct nd_mapping
*nd_mapping
);
512 int nvdimm_check_config_data(struct device
*dev
);
513 int nvdimm_init_nsarea(struct nvdimm_drvdata
*ndd
);
514 int nvdimm_init_config_data(struct nvdimm_drvdata
*ndd
);
515 int nvdimm_get_config_data(struct nvdimm_drvdata
*ndd
, void *buf
,
516 size_t offset
, size_t len
);
517 int nvdimm_set_config_data(struct nvdimm_drvdata
*ndd
, size_t offset
,
518 void *buf
, size_t len
);
519 long nvdimm_clear_poison(struct device
*dev
, phys_addr_t phys
,
521 void nvdimm_set_labeling(struct device
*dev
);
522 void nvdimm_set_locked(struct device
*dev
);
523 void nvdimm_clear_locked(struct device
*dev
);
524 int nvdimm_security_setup_events(struct device
*dev
);
525 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
526 int nvdimm_security_unlock(struct device
*dev
);
528 static inline int nvdimm_security_unlock(struct device
*dev
)
533 struct nd_btt
*to_nd_btt(struct device
*dev
);
536 char reserved
[SZ_4K
- 8];
540 u64
nd_sb_checksum(struct nd_gen_sb
*sb
);
541 #if IS_ENABLED(CONFIG_BTT)
542 int nd_btt_probe(struct device
*dev
, struct nd_namespace_common
*ndns
);
543 bool is_nd_btt(struct device
*dev
);
544 struct device
*nd_btt_create(struct nd_region
*nd_region
);
546 static inline int nd_btt_probe(struct device
*dev
,
547 struct nd_namespace_common
*ndns
)
552 static inline bool is_nd_btt(struct device
*dev
)
557 static inline struct device
*nd_btt_create(struct nd_region
*nd_region
)
563 struct nd_pfn
*to_nd_pfn(struct device
*dev
);
564 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
566 #define MAX_NVDIMM_ALIGN 4
568 int nd_pfn_probe(struct device
*dev
, struct nd_namespace_common
*ndns
);
569 bool is_nd_pfn(struct device
*dev
);
570 struct device
*nd_pfn_create(struct nd_region
*nd_region
);
571 struct device
*nd_pfn_devinit(struct nd_pfn
*nd_pfn
,
572 struct nd_namespace_common
*ndns
);
573 int nd_pfn_validate(struct nd_pfn
*nd_pfn
, const char *sig
);
574 extern const struct attribute_group
*nd_pfn_attribute_groups
[];
576 static inline int nd_pfn_probe(struct device
*dev
,
577 struct nd_namespace_common
*ndns
)
582 static inline bool is_nd_pfn(struct device
*dev
)
587 static inline struct device
*nd_pfn_create(struct nd_region
*nd_region
)
592 static inline int nd_pfn_validate(struct nd_pfn
*nd_pfn
, const char *sig
)
598 struct nd_dax
*to_nd_dax(struct device
*dev
);
599 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
600 int nd_dax_probe(struct device
*dev
, struct nd_namespace_common
*ndns
);
601 bool is_nd_dax(const struct device
*dev
);
602 struct device
*nd_dax_create(struct nd_region
*nd_region
);
603 static inline struct device
*nd_dax_devinit(struct nd_dax
*nd_dax
,
604 struct nd_namespace_common
*ndns
)
608 return nd_pfn_devinit(&nd_dax
->nd_pfn
, ndns
);
611 static inline int nd_dax_probe(struct device
*dev
,
612 struct nd_namespace_common
*ndns
)
617 static inline bool is_nd_dax(const struct device
*dev
)
622 static inline struct device
*nd_dax_create(struct nd_region
*nd_region
)
628 int nd_region_to_nstype(struct nd_region
*nd_region
);
629 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
);
630 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
,
631 struct nd_namespace_index
*nsindex
);
632 u64
nd_region_interleave_set_altcookie(struct nd_region
*nd_region
);
633 void nvdimm_bus_lock(struct device
*dev
);
634 void nvdimm_bus_unlock(struct device
*dev
);
635 bool is_nvdimm_bus_locked(struct device
*dev
);
636 void nvdimm_check_and_set_ro(struct gendisk
*disk
);
637 void nvdimm_drvdata_release(struct kref
*kref
);
638 void put_ndd(struct nvdimm_drvdata
*ndd
);
639 int nd_label_reserve_dpa(struct nvdimm_drvdata
*ndd
);
640 void nvdimm_free_dpa(struct nvdimm_drvdata
*ndd
, struct resource
*res
);
641 struct resource
*nvdimm_allocate_dpa(struct nvdimm_drvdata
*ndd
,
642 struct nd_label_id
*label_id
, resource_size_t start
,
644 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
);
645 bool nvdimm_namespace_locked(struct nd_namespace_common
*ndns
);
646 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
);
647 int nvdimm_namespace_attach_btt(struct nd_namespace_common
*ndns
);
648 int nvdimm_namespace_detach_btt(struct nd_btt
*nd_btt
);
649 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
651 unsigned int pmem_sector_size(struct nd_namespace_common
*ndns
);
653 void nvdimm_badblocks_populate(struct nd_region
*nd_region
,
654 struct badblocks
*bb
, const struct range
*range
);
655 int devm_namespace_enable(struct device
*dev
, struct nd_namespace_common
*ndns
,
656 resource_size_t size
);
657 void devm_namespace_disable(struct device
*dev
,
658 struct nd_namespace_common
*ndns
);
659 #if IS_ENABLED(CONFIG_ND_CLAIM)
660 /* max struct page size independent of kernel config */
661 #define MAX_STRUCT_PAGE_SIZE 64
662 int nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
, struct dev_pagemap
*pgmap
);
664 static inline int nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
,
665 struct dev_pagemap
*pgmap
)
670 int nd_region_activate(struct nd_region
*nd_region
);
671 static inline bool is_bad_pmem(struct badblocks
*bb
, sector_t sector
,
678 return !!badblocks_check(bb
, sector
, len
/ 512, &first_bad
,
684 const uuid_t
*nd_dev_to_uuid(struct device
*dev
);
685 bool pmem_should_map_pages(struct device
*dev
);
686 #endif /* __ND_H__ */