2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
15 #include <linux/libnvdimm.h>
16 #include <linux/badblocks.h>
17 #include <linux/blkdev.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/ndctl.h>
21 #include <linux/types.h>
27 * Limits the maximum number of block apertures a dimm can
28 * support and is an input to the geometry/on-disk-format of a
32 INT_LBASIZE_ALIGNMENT
= 64,
36 struct nvdimm_drvdata
{
39 struct nd_cmd_get_config_size nsarea
;
41 int ns_current
, ns_next
;
46 struct nd_region_data
{
49 unsigned int hints_shift
;
50 void __iomem
*flush_wpq
[0];
53 static inline void __iomem
*ndrd_get_flush_wpq(struct nd_region_data
*ndrd
,
56 unsigned int num
= 1 << ndrd
->hints_shift
;
57 unsigned int mask
= num
- 1;
59 return ndrd
->flush_wpq
[dimm
* num
+ (hint
& mask
)];
62 static inline void ndrd_set_flush_wpq(struct nd_region_data
*ndrd
, int dimm
,
63 int hint
, void __iomem
*flush
)
65 unsigned int num
= 1 << ndrd
->hints_shift
;
66 unsigned int mask
= num
- 1;
68 ndrd
->flush_wpq
[dimm
* num
+ (hint
& mask
)] = flush
;
71 static inline struct nd_namespace_index
*to_namespace_index(
72 struct nvdimm_drvdata
*ndd
, int i
)
77 return ndd
->data
+ sizeof_namespace_index(ndd
) * i
;
80 static inline struct nd_namespace_index
*to_current_namespace_index(
81 struct nvdimm_drvdata
*ndd
)
83 return to_namespace_index(ndd
, ndd
->ns_current
);
86 static inline struct nd_namespace_index
*to_next_namespace_index(
87 struct nvdimm_drvdata
*ndd
)
89 return to_namespace_index(ndd
, ndd
->ns_next
);
92 unsigned sizeof_namespace_label(struct nvdimm_drvdata
*ndd
);
94 #define namespace_label_has(ndd, field) \
95 (offsetof(struct nd_namespace_label, field) \
96 < sizeof_namespace_label(ndd))
98 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
99 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
100 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
101 (unsigned long long) (res ? resource_size(res) : 0), \
102 (unsigned long long) (res ? res->start : 0), ##arg)
104 #define for_each_dpa_resource(ndd, res) \
105 for (res = (ndd)->dpa.child; res; res = res->sibling)
107 #define for_each_dpa_resource_safe(ndd, res, next) \
108 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
109 res; res = next, next = next ? next->sibling : NULL)
111 struct nd_percpu_lane
{
116 enum nd_label_flags
{
119 struct nd_label_ent
{
120 struct list_head list
;
122 struct nd_namespace_label
*label
;
125 enum nd_mapping_lock_class
{
127 ND_MAPPING_UUID_SCAN
,
131 struct nvdimm
*nvdimm
;
135 struct list_head labels
;
138 * @ndd is for private use at region enable / disable time for
139 * get_ndd() + put_ndd(), all other nd_mapping to ndd
140 * conversions use to_ndd() which respects enabled state of the
143 struct nvdimm_drvdata
*ndd
;
153 struct device
*ns_seed
;
154 struct device
*btt_seed
;
155 struct device
*pfn_seed
;
156 struct device
*dax_seed
;
160 int id
, num_lanes
, ro
, numa_node
, target_node
;
162 struct kernfs_node
*bb_state
;
164 struct nd_interleave_set
*nd_set
;
165 struct nd_percpu_lane __percpu
*lane
;
166 struct nd_mapping mapping
[0];
169 struct nd_blk_region
{
170 int (*enable
)(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
);
171 int (*do_io
)(struct nd_blk_region
*ndbr
, resource_size_t dpa
,
172 void *iobuf
, u64 len
, int rw
);
173 void *blk_provider_data
;
174 struct nd_region nd_region
;
178 * Lookup next in the repeating sequence of 01, 10, and 11.
180 static inline unsigned nd_inc_seq(unsigned seq
)
182 static const unsigned next
[] = { 0, 2, 3, 1 };
184 return next
[seq
& 3];
190 struct nd_namespace_common
*ndns
;
192 unsigned long lbasize
;
213 enum nd_pfn_mode mode
;
214 struct nd_pfn_sb
*pfn_sb
;
215 struct nd_namespace_common
*ndns
;
219 struct nd_pfn nd_pfn
;
227 int nd_integrity_init(struct gendisk
*disk
, unsigned long meta_size
);
228 void wait_nvdimm_bus_probe_idle(struct device
*dev
);
229 void nd_device_register(struct device
*dev
);
230 void nd_device_unregister(struct device
*dev
, enum nd_async_mode mode
);
231 void nd_device_notify(struct device
*dev
, enum nvdimm_event event
);
232 int nd_uuid_store(struct device
*dev
, u8
**uuid_out
, const char *buf
,
234 ssize_t
nd_size_select_show(unsigned long current_size
,
235 const unsigned long *supported
, char *buf
);
236 ssize_t
nd_size_select_store(struct device
*dev
, const char *buf
,
237 unsigned long *current_size
, const unsigned long *supported
);
238 int __init
nvdimm_init(void);
239 int __init
nd_region_init(void);
240 int __init
nd_label_init(void);
241 void nvdimm_exit(void);
242 void nd_region_exit(void);
244 struct nvdimm_drvdata
*to_ndd(struct nd_mapping
*nd_mapping
);
245 int nvdimm_check_config_data(struct device
*dev
);
246 int nvdimm_init_nsarea(struct nvdimm_drvdata
*ndd
);
247 int nvdimm_init_config_data(struct nvdimm_drvdata
*ndd
);
248 int nvdimm_get_config_data(struct nvdimm_drvdata
*ndd
, void *buf
,
249 size_t offset
, size_t len
);
250 int nvdimm_set_config_data(struct nvdimm_drvdata
*ndd
, size_t offset
,
251 void *buf
, size_t len
);
252 long nvdimm_clear_poison(struct device
*dev
, phys_addr_t phys
,
254 void nvdimm_set_aliasing(struct device
*dev
);
255 void nvdimm_set_locked(struct device
*dev
);
256 void nvdimm_clear_locked(struct device
*dev
);
257 int nvdimm_security_setup_events(struct device
*dev
);
258 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
259 int nvdimm_security_unlock(struct device
*dev
);
261 static inline int nvdimm_security_unlock(struct device
*dev
)
266 struct nd_btt
*to_nd_btt(struct device
*dev
);
269 char reserved
[SZ_4K
- 8];
273 u64
nd_sb_checksum(struct nd_gen_sb
*sb
);
274 #if IS_ENABLED(CONFIG_BTT)
275 int nd_btt_probe(struct device
*dev
, struct nd_namespace_common
*ndns
);
276 bool is_nd_btt(struct device
*dev
);
277 struct device
*nd_btt_create(struct nd_region
*nd_region
);
279 static inline int nd_btt_probe(struct device
*dev
,
280 struct nd_namespace_common
*ndns
)
285 static inline bool is_nd_btt(struct device
*dev
)
290 static inline struct device
*nd_btt_create(struct nd_region
*nd_region
)
296 struct nd_pfn
*to_nd_pfn(struct device
*dev
);
297 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
299 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
300 #define PFN_DEFAULT_ALIGNMENT HPAGE_PMD_SIZE
302 #define PFN_DEFAULT_ALIGNMENT PAGE_SIZE
305 int nd_pfn_probe(struct device
*dev
, struct nd_namespace_common
*ndns
);
306 bool is_nd_pfn(struct device
*dev
);
307 struct device
*nd_pfn_create(struct nd_region
*nd_region
);
308 struct device
*nd_pfn_devinit(struct nd_pfn
*nd_pfn
,
309 struct nd_namespace_common
*ndns
);
310 int nd_pfn_validate(struct nd_pfn
*nd_pfn
, const char *sig
);
311 extern struct attribute_group nd_pfn_attribute_group
;
313 static inline int nd_pfn_probe(struct device
*dev
,
314 struct nd_namespace_common
*ndns
)
319 static inline bool is_nd_pfn(struct device
*dev
)
324 static inline struct device
*nd_pfn_create(struct nd_region
*nd_region
)
329 static inline int nd_pfn_validate(struct nd_pfn
*nd_pfn
, const char *sig
)
335 struct nd_dax
*to_nd_dax(struct device
*dev
);
336 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
337 int nd_dax_probe(struct device
*dev
, struct nd_namespace_common
*ndns
);
338 bool is_nd_dax(struct device
*dev
);
339 struct device
*nd_dax_create(struct nd_region
*nd_region
);
341 static inline int nd_dax_probe(struct device
*dev
,
342 struct nd_namespace_common
*ndns
)
347 static inline bool is_nd_dax(struct device
*dev
)
352 static inline struct device
*nd_dax_create(struct nd_region
*nd_region
)
358 int nd_region_to_nstype(struct nd_region
*nd_region
);
359 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
);
360 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
,
361 struct nd_namespace_index
*nsindex
);
362 u64
nd_region_interleave_set_altcookie(struct nd_region
*nd_region
);
363 void nvdimm_bus_lock(struct device
*dev
);
364 void nvdimm_bus_unlock(struct device
*dev
);
365 bool is_nvdimm_bus_locked(struct device
*dev
);
366 int nvdimm_revalidate_disk(struct gendisk
*disk
);
367 void nvdimm_drvdata_release(struct kref
*kref
);
368 void put_ndd(struct nvdimm_drvdata
*ndd
);
369 int nd_label_reserve_dpa(struct nvdimm_drvdata
*ndd
);
370 void nvdimm_free_dpa(struct nvdimm_drvdata
*ndd
, struct resource
*res
);
371 struct resource
*nvdimm_allocate_dpa(struct nvdimm_drvdata
*ndd
,
372 struct nd_label_id
*label_id
, resource_size_t start
,
374 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
);
375 bool nvdimm_namespace_locked(struct nd_namespace_common
*ndns
);
376 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
);
377 int nvdimm_namespace_attach_btt(struct nd_namespace_common
*ndns
);
378 int nvdimm_namespace_detach_btt(struct nd_btt
*nd_btt
);
379 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
381 unsigned int pmem_sector_size(struct nd_namespace_common
*ndns
);
382 void nvdimm_badblocks_populate(struct nd_region
*nd_region
,
383 struct badblocks
*bb
, const struct resource
*res
);
384 #if IS_ENABLED(CONFIG_ND_CLAIM)
385 int nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
, struct dev_pagemap
*pgmap
);
386 int devm_nsio_enable(struct device
*dev
, struct nd_namespace_io
*nsio
);
387 void devm_nsio_disable(struct device
*dev
, struct nd_namespace_io
*nsio
);
389 static inline int nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
,
390 struct dev_pagemap
*pgmap
)
394 static inline int devm_nsio_enable(struct device
*dev
,
395 struct nd_namespace_io
*nsio
)
399 static inline void devm_nsio_disable(struct device
*dev
,
400 struct nd_namespace_io
*nsio
)
404 int nd_blk_region_init(struct nd_region
*nd_region
);
405 int nd_region_activate(struct nd_region
*nd_region
);
406 void __nd_iostat_start(struct bio
*bio
, unsigned long *start
);
407 static inline bool nd_iostat_start(struct bio
*bio
, unsigned long *start
)
409 struct gendisk
*disk
= bio
->bi_disk
;
411 if (!blk_queue_io_stat(disk
->queue
))
415 generic_start_io_acct(disk
->queue
, bio_op(bio
), bio_sectors(bio
),
419 static inline void nd_iostat_end(struct bio
*bio
, unsigned long start
)
421 struct gendisk
*disk
= bio
->bi_disk
;
423 generic_end_io_acct(disk
->queue
, bio_op(bio
), &disk
->part0
, start
);
425 static inline bool is_bad_pmem(struct badblocks
*bb
, sector_t sector
,
432 return !!badblocks_check(bb
, sector
, len
/ 512, &first_bad
,
438 resource_size_t
nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
);
439 const u8
*nd_dev_to_uuid(struct device
*dev
);
440 bool pmem_should_map_pages(struct device
*dev
);
441 #endif /* __ND_H__ */