Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / nvdimm / nd.h
blob696b55556d4d27a879ae6c21ed986ec395cd3113
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5 #ifndef __ND_H__
6 #define __ND_H__
7 #include <linux/libnvdimm.h>
8 #include <linux/badblocks.h>
9 #include <linux/blkdev.h>
10 #include <linux/device.h>
11 #include <linux/mutex.h>
12 #include <linux/ndctl.h>
13 #include <linux/types.h>
14 #include <linux/nd.h>
15 #include "label.h"
17 enum {
19 * Limits the maximum number of block apertures a dimm can
20 * support and is an input to the geometry/on-disk-format of a
21 * BTT instance
23 ND_MAX_LANES = 256,
24 INT_LBASIZE_ALIGNMENT = 64,
25 NVDIMM_IO_ATOMIC = 1,
28 struct nvdimm_drvdata {
29 struct device *dev;
30 int nslabel_size;
31 struct nd_cmd_get_config_size nsarea;
32 void *data;
33 int ns_current, ns_next;
34 struct resource dpa;
35 struct kref kref;
38 struct nd_region_data {
39 int ns_count;
40 int ns_active;
41 unsigned int hints_shift;
42 void __iomem *flush_wpq[];
45 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
46 int dimm, int hint)
48 unsigned int num = 1 << ndrd->hints_shift;
49 unsigned int mask = num - 1;
51 return ndrd->flush_wpq[dimm * num + (hint & mask)];
54 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
55 int hint, void __iomem *flush)
57 unsigned int num = 1 << ndrd->hints_shift;
58 unsigned int mask = num - 1;
60 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
63 static inline struct nd_namespace_index *to_namespace_index(
64 struct nvdimm_drvdata *ndd, int i)
66 if (i < 0)
67 return NULL;
69 return ndd->data + sizeof_namespace_index(ndd) * i;
72 static inline struct nd_namespace_index *to_current_namespace_index(
73 struct nvdimm_drvdata *ndd)
75 return to_namespace_index(ndd, ndd->ns_current);
78 static inline struct nd_namespace_index *to_next_namespace_index(
79 struct nvdimm_drvdata *ndd)
81 return to_namespace_index(ndd, ndd->ns_next);
84 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
86 #define namespace_label_has(ndd, field) \
87 (offsetof(struct nd_namespace_label, field) \
88 < sizeof_namespace_label(ndd))
90 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
91 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
92 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
93 (unsigned long long) (res ? resource_size(res) : 0), \
94 (unsigned long long) (res ? res->start : 0), ##arg)
96 #define for_each_dpa_resource(ndd, res) \
97 for (res = (ndd)->dpa.child; res; res = res->sibling)
99 #define for_each_dpa_resource_safe(ndd, res, next) \
100 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
101 res; res = next, next = next ? next->sibling : NULL)
103 struct nd_percpu_lane {
104 int count;
105 spinlock_t lock;
108 enum nd_label_flags {
109 ND_LABEL_REAP,
111 struct nd_label_ent {
112 struct list_head list;
113 unsigned long flags;
114 struct nd_namespace_label *label;
117 enum nd_mapping_lock_class {
118 ND_MAPPING_CLASS0,
119 ND_MAPPING_UUID_SCAN,
122 struct nd_mapping {
123 struct nvdimm *nvdimm;
124 u64 start;
125 u64 size;
126 int position;
127 struct list_head labels;
128 struct mutex lock;
130 * @ndd is for private use at region enable / disable time for
131 * get_ndd() + put_ndd(), all other nd_mapping to ndd
132 * conversions use to_ndd() which respects enabled state of the
133 * nvdimm.
135 struct nvdimm_drvdata *ndd;
138 struct nd_region {
139 struct device dev;
140 struct ida ns_ida;
141 struct ida btt_ida;
142 struct ida pfn_ida;
143 struct ida dax_ida;
144 unsigned long flags;
145 struct device *ns_seed;
146 struct device *btt_seed;
147 struct device *pfn_seed;
148 struct device *dax_seed;
149 unsigned long align;
150 u16 ndr_mappings;
151 u64 ndr_size;
152 u64 ndr_start;
153 int id, num_lanes, ro, numa_node, target_node;
154 void *provider_data;
155 struct kernfs_node *bb_state;
156 struct badblocks bb;
157 struct nd_interleave_set *nd_set;
158 struct nd_percpu_lane __percpu *lane;
159 int (*flush)(struct nd_region *nd_region, struct bio *bio);
160 struct nd_mapping mapping[];
163 struct nd_blk_region {
164 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
165 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
166 void *iobuf, u64 len, int rw);
167 void *blk_provider_data;
168 struct nd_region nd_region;
172 * Lookup next in the repeating sequence of 01, 10, and 11.
174 static inline unsigned nd_inc_seq(unsigned seq)
176 static const unsigned next[] = { 0, 2, 3, 1 };
178 return next[seq & 3];
181 struct btt;
182 struct nd_btt {
183 struct device dev;
184 struct nd_namespace_common *ndns;
185 struct btt *btt;
186 unsigned long lbasize;
187 u64 size;
188 u8 *uuid;
189 int id;
190 int initial_offset;
191 u16 version_major;
192 u16 version_minor;
195 enum nd_pfn_mode {
196 PFN_MODE_NONE,
197 PFN_MODE_RAM,
198 PFN_MODE_PMEM,
201 struct nd_pfn {
202 int id;
203 u8 *uuid;
204 struct device dev;
205 unsigned long align;
206 unsigned long npfns;
207 enum nd_pfn_mode mode;
208 struct nd_pfn_sb *pfn_sb;
209 struct nd_namespace_common *ndns;
212 struct nd_dax {
213 struct nd_pfn nd_pfn;
216 static inline u32 nd_info_block_reserve(void)
218 return ALIGN(SZ_8K, PAGE_SIZE);
221 enum nd_async_mode {
222 ND_SYNC,
223 ND_ASYNC,
226 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
227 void wait_nvdimm_bus_probe_idle(struct device *dev);
228 void nd_device_register(struct device *dev);
229 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
230 void nd_device_notify(struct device *dev, enum nvdimm_event event);
231 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
232 size_t len);
233 ssize_t nd_size_select_show(unsigned long current_size,
234 const unsigned long *supported, char *buf);
235 ssize_t nd_size_select_store(struct device *dev, const char *buf,
236 unsigned long *current_size, const unsigned long *supported);
237 int __init nvdimm_init(void);
238 int __init nd_region_init(void);
239 int __init nd_label_init(void);
240 void nvdimm_exit(void);
241 void nd_region_exit(void);
242 struct nvdimm;
243 extern const struct attribute_group nd_device_attribute_group;
244 extern const struct attribute_group nd_numa_attribute_group;
245 extern const struct attribute_group *nvdimm_bus_attribute_groups[];
246 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
247 int nvdimm_check_config_data(struct device *dev);
248 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
249 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
250 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
251 size_t offset, size_t len);
252 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
253 void *buf, size_t len);
254 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
255 unsigned int len);
256 void nvdimm_set_labeling(struct device *dev);
257 void nvdimm_set_locked(struct device *dev);
258 void nvdimm_clear_locked(struct device *dev);
259 int nvdimm_security_setup_events(struct device *dev);
260 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
261 int nvdimm_security_unlock(struct device *dev);
262 #else
263 static inline int nvdimm_security_unlock(struct device *dev)
265 return 0;
267 #endif
268 struct nd_btt *to_nd_btt(struct device *dev);
270 struct nd_gen_sb {
271 char reserved[SZ_4K - 8];
272 __le64 checksum;
275 u64 nd_sb_checksum(struct nd_gen_sb *sb);
276 #if IS_ENABLED(CONFIG_BTT)
277 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
278 bool is_nd_btt(struct device *dev);
279 struct device *nd_btt_create(struct nd_region *nd_region);
280 #else
281 static inline int nd_btt_probe(struct device *dev,
282 struct nd_namespace_common *ndns)
284 return -ENODEV;
287 static inline bool is_nd_btt(struct device *dev)
289 return false;
292 static inline struct device *nd_btt_create(struct nd_region *nd_region)
294 return NULL;
296 #endif
298 struct nd_pfn *to_nd_pfn(struct device *dev);
299 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
301 #define MAX_NVDIMM_ALIGN 4
303 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
304 bool is_nd_pfn(struct device *dev);
305 struct device *nd_pfn_create(struct nd_region *nd_region);
306 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
307 struct nd_namespace_common *ndns);
308 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
309 extern const struct attribute_group *nd_pfn_attribute_groups[];
310 #else
311 static inline int nd_pfn_probe(struct device *dev,
312 struct nd_namespace_common *ndns)
314 return -ENODEV;
317 static inline bool is_nd_pfn(struct device *dev)
319 return false;
322 static inline struct device *nd_pfn_create(struct nd_region *nd_region)
324 return NULL;
327 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
329 return -ENODEV;
331 #endif
333 struct nd_dax *to_nd_dax(struct device *dev);
334 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
335 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
336 bool is_nd_dax(struct device *dev);
337 struct device *nd_dax_create(struct nd_region *nd_region);
338 #else
339 static inline int nd_dax_probe(struct device *dev,
340 struct nd_namespace_common *ndns)
342 return -ENODEV;
345 static inline bool is_nd_dax(struct device *dev)
347 return false;
350 static inline struct device *nd_dax_create(struct nd_region *nd_region)
352 return NULL;
354 #endif
356 int nd_region_to_nstype(struct nd_region *nd_region);
357 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
358 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
359 struct nd_namespace_index *nsindex);
360 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
361 void nvdimm_bus_lock(struct device *dev);
362 void nvdimm_bus_unlock(struct device *dev);
363 bool is_nvdimm_bus_locked(struct device *dev);
364 void nvdimm_check_and_set_ro(struct gendisk *disk);
365 void nvdimm_drvdata_release(struct kref *kref);
366 void put_ndd(struct nvdimm_drvdata *ndd);
367 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
368 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
369 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
370 struct nd_label_id *label_id, resource_size_t start,
371 resource_size_t n);
372 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
373 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
374 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
375 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
376 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
377 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
378 char *name);
379 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
380 struct range;
381 void nvdimm_badblocks_populate(struct nd_region *nd_region,
382 struct badblocks *bb, const struct range *range);
383 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
384 resource_size_t size);
385 void devm_namespace_disable(struct device *dev,
386 struct nd_namespace_common *ndns);
387 #if IS_ENABLED(CONFIG_ND_CLAIM)
388 /* max struct page size independent of kernel config */
389 #define MAX_STRUCT_PAGE_SIZE 64
390 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
391 #else
392 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
393 struct dev_pagemap *pgmap)
395 return -ENXIO;
397 #endif
398 int nd_blk_region_init(struct nd_region *nd_region);
399 int nd_region_activate(struct nd_region *nd_region);
400 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
401 unsigned int len)
403 if (bb->count) {
404 sector_t first_bad;
405 int num_bad;
407 return !!badblocks_check(bb, sector, len / 512, &first_bad,
408 &num_bad);
411 return false;
413 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
414 const u8 *nd_dev_to_uuid(struct device *dev);
415 bool pmem_should_map_pages(struct device *dev);
416 #endif /* __ND_H__ */