1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/libnvdimm.h>
6 #include <linux/badblocks.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/device.h>
11 #include <linux/ctype.h>
12 #include <linux/ndctl.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
19 LIST_HEAD(nvdimm_bus_list
);
20 DEFINE_MUTEX(nvdimm_bus_list_mutex
);
22 void nvdimm_bus_lock(struct device
*dev
)
24 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
28 mutex_lock(&nvdimm_bus
->reconfig_mutex
);
30 EXPORT_SYMBOL(nvdimm_bus_lock
);
32 void nvdimm_bus_unlock(struct device
*dev
)
34 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
38 mutex_unlock(&nvdimm_bus
->reconfig_mutex
);
40 EXPORT_SYMBOL(nvdimm_bus_unlock
);
42 bool is_nvdimm_bus_locked(struct device
*dev
)
44 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
48 return mutex_is_locked(&nvdimm_bus
->reconfig_mutex
);
50 EXPORT_SYMBOL(is_nvdimm_bus_locked
);
53 struct nvdimm_bus
*nvdimm_bus
;
54 struct list_head list
;
55 resource_size_t offset
;
65 static struct nvdimm_map
*find_nvdimm_map(struct device
*dev
,
66 resource_size_t offset
)
68 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
69 struct nvdimm_map
*nvdimm_map
;
71 list_for_each_entry(nvdimm_map
, &nvdimm_bus
->mapping_list
, list
)
72 if (nvdimm_map
->offset
== offset
)
77 static struct nvdimm_map
*alloc_nvdimm_map(struct device
*dev
,
78 resource_size_t offset
, size_t size
, unsigned long flags
)
80 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
81 struct nvdimm_map
*nvdimm_map
;
83 nvdimm_map
= kzalloc(sizeof(*nvdimm_map
), GFP_KERNEL
);
87 INIT_LIST_HEAD(&nvdimm_map
->list
);
88 nvdimm_map
->nvdimm_bus
= nvdimm_bus
;
89 nvdimm_map
->offset
= offset
;
90 nvdimm_map
->flags
= flags
;
91 nvdimm_map
->size
= size
;
92 kref_init(&nvdimm_map
->kref
);
94 if (!request_mem_region(offset
, size
, dev_name(&nvdimm_bus
->dev
))) {
95 dev_err(&nvdimm_bus
->dev
, "failed to request %pa + %zd for %s\n",
96 &offset
, size
, dev_name(dev
));
97 goto err_request_region
;
101 nvdimm_map
->mem
= memremap(offset
, size
, flags
);
103 nvdimm_map
->iomem
= ioremap(offset
, size
);
105 if (!nvdimm_map
->mem
)
108 dev_WARN_ONCE(dev
, !is_nvdimm_bus_locked(dev
), "%s: bus unlocked!",
110 list_add(&nvdimm_map
->list
, &nvdimm_bus
->mapping_list
);
115 release_mem_region(offset
, size
);
121 static void nvdimm_map_release(struct kref
*kref
)
123 struct nvdimm_bus
*nvdimm_bus
;
124 struct nvdimm_map
*nvdimm_map
;
126 nvdimm_map
= container_of(kref
, struct nvdimm_map
, kref
);
127 nvdimm_bus
= nvdimm_map
->nvdimm_bus
;
129 dev_dbg(&nvdimm_bus
->dev
, "%pa\n", &nvdimm_map
->offset
);
130 list_del(&nvdimm_map
->list
);
131 if (nvdimm_map
->flags
)
132 memunmap(nvdimm_map
->mem
);
134 iounmap(nvdimm_map
->iomem
);
135 release_mem_region(nvdimm_map
->offset
, nvdimm_map
->size
);
139 static void nvdimm_map_put(void *data
)
141 struct nvdimm_map
*nvdimm_map
= data
;
142 struct nvdimm_bus
*nvdimm_bus
= nvdimm_map
->nvdimm_bus
;
144 nvdimm_bus_lock(&nvdimm_bus
->dev
);
145 kref_put(&nvdimm_map
->kref
, nvdimm_map_release
);
146 nvdimm_bus_unlock(&nvdimm_bus
->dev
);
150 * devm_nvdimm_memremap - map a resource that is shared across regions
151 * @dev: device that will own a reference to the shared mapping
152 * @offset: physical base address of the mapping
153 * @size: mapping size
154 * @flags: memremap flags, or, if zero, perform an ioremap instead
156 void *devm_nvdimm_memremap(struct device
*dev
, resource_size_t offset
,
157 size_t size
, unsigned long flags
)
159 struct nvdimm_map
*nvdimm_map
;
161 nvdimm_bus_lock(dev
);
162 nvdimm_map
= find_nvdimm_map(dev
, offset
);
164 nvdimm_map
= alloc_nvdimm_map(dev
, offset
, size
, flags
);
166 kref_get(&nvdimm_map
->kref
);
167 nvdimm_bus_unlock(dev
);
172 if (devm_add_action_or_reset(dev
, nvdimm_map_put
, nvdimm_map
))
175 return nvdimm_map
->mem
;
177 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap
);
179 u64
nd_fletcher64(void *addr
, size_t len
, bool le
)
186 for (i
= 0; i
< len
/ sizeof(u32
); i
++) {
187 lo32
+= le
? le32_to_cpu((__le32
) buf
[i
]) : buf
[i
];
191 return hi32
<< 32 | lo32
;
193 EXPORT_SYMBOL_GPL(nd_fletcher64
);
195 struct nvdimm_bus_descriptor
*to_nd_desc(struct nvdimm_bus
*nvdimm_bus
)
197 /* struct nvdimm_bus definition is private to libnvdimm */
198 return nvdimm_bus
->nd_desc
;
200 EXPORT_SYMBOL_GPL(to_nd_desc
);
202 struct device
*to_nvdimm_bus_dev(struct nvdimm_bus
*nvdimm_bus
)
204 /* struct nvdimm_bus definition is private to libnvdimm */
205 return &nvdimm_bus
->dev
;
207 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev
);
209 static bool is_uuid_sep(char sep
)
211 if (sep
== '\n' || sep
== '-' || sep
== ':' || sep
== '\0')
216 static int nd_uuid_parse(struct device
*dev
, u8
*uuid_out
, const char *buf
,
219 const char *str
= buf
;
223 for (i
= 0; i
< 16; i
++) {
224 if (!isxdigit(str
[0]) || !isxdigit(str
[1])) {
225 dev_dbg(dev
, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
226 i
, str
- buf
, str
[0],
227 str
+ 1 - buf
, str
[1]);
231 uuid
[i
] = (hex_to_bin(str
[0]) << 4) | hex_to_bin(str
[1]);
233 if (is_uuid_sep(*str
))
237 memcpy(uuid_out
, uuid
, sizeof(uuid
));
242 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
243 * @dev: container device for the uuid property
244 * @uuid_out: uuid buffer to replace
245 * @buf: raw sysfs buffer to parse
247 * Enforce that uuids can only be changed while the device is disabled
249 * LOCKING: expects nd_device_lock() is held on entry
251 int nd_uuid_store(struct device
*dev
, u8
**uuid_out
, const char *buf
,
260 rc
= nd_uuid_parse(dev
, uuid
, buf
, len
);
265 *uuid_out
= kmemdup(uuid
, sizeof(uuid
), GFP_KERNEL
);
272 ssize_t
nd_size_select_show(unsigned long current_size
,
273 const unsigned long *supported
, char *buf
)
278 for (i
= 0; supported
[i
]; i
++)
279 if (current_size
== supported
[i
])
280 len
+= sprintf(buf
+ len
, "[%ld] ", supported
[i
]);
282 len
+= sprintf(buf
+ len
, "%ld ", supported
[i
]);
283 len
+= sprintf(buf
+ len
, "\n");
287 ssize_t
nd_size_select_store(struct device
*dev
, const char *buf
,
288 unsigned long *current_size
, const unsigned long *supported
)
290 unsigned long lbasize
;
296 rc
= kstrtoul(buf
, 0, &lbasize
);
300 for (i
= 0; supported
[i
]; i
++)
301 if (lbasize
== supported
[i
])
305 *current_size
= lbasize
;
312 static ssize_t
commands_show(struct device
*dev
,
313 struct device_attribute
*attr
, char *buf
)
316 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
317 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
319 for_each_set_bit(cmd
, &nd_desc
->cmd_mask
, BITS_PER_LONG
)
320 len
+= sprintf(buf
+ len
, "%s ", nvdimm_bus_cmd_name(cmd
));
321 len
+= sprintf(buf
+ len
, "\n");
324 static DEVICE_ATTR_RO(commands
);
326 static const char *nvdimm_bus_provider(struct nvdimm_bus
*nvdimm_bus
)
328 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
329 struct device
*parent
= nvdimm_bus
->dev
.parent
;
331 if (nd_desc
->provider_name
)
332 return nd_desc
->provider_name
;
334 return dev_name(parent
);
339 static ssize_t
provider_show(struct device
*dev
,
340 struct device_attribute
*attr
, char *buf
)
342 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
344 return sprintf(buf
, "%s\n", nvdimm_bus_provider(nvdimm_bus
));
346 static DEVICE_ATTR_RO(provider
);
348 static int flush_namespaces(struct device
*dev
, void *data
)
351 nd_device_unlock(dev
);
355 static int flush_regions_dimms(struct device
*dev
, void *data
)
358 nd_device_unlock(dev
);
359 device_for_each_child(dev
, NULL
, flush_namespaces
);
363 static ssize_t
wait_probe_show(struct device
*dev
,
364 struct device_attribute
*attr
, char *buf
)
366 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
367 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
370 if (nd_desc
->flush_probe
) {
371 rc
= nd_desc
->flush_probe(nd_desc
);
376 device_for_each_child(dev
, NULL
, flush_regions_dimms
);
377 return sprintf(buf
, "1\n");
379 static DEVICE_ATTR_RO(wait_probe
);
381 static struct attribute
*nvdimm_bus_attributes
[] = {
382 &dev_attr_commands
.attr
,
383 &dev_attr_wait_probe
.attr
,
384 &dev_attr_provider
.attr
,
388 static const struct attribute_group nvdimm_bus_attribute_group
= {
389 .attrs
= nvdimm_bus_attributes
,
392 const struct attribute_group
*nvdimm_bus_attribute_groups
[] = {
393 &nvdimm_bus_attribute_group
,
397 int nvdimm_bus_add_badrange(struct nvdimm_bus
*nvdimm_bus
, u64 addr
, u64 length
)
399 return badrange_add(&nvdimm_bus
->badrange
, addr
, length
);
401 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange
);
403 #ifdef CONFIG_BLK_DEV_INTEGRITY
404 int nd_integrity_init(struct gendisk
*disk
, unsigned long meta_size
)
406 struct blk_integrity bi
;
411 memset(&bi
, 0, sizeof(bi
));
413 bi
.tuple_size
= meta_size
;
414 bi
.tag_size
= meta_size
;
416 blk_integrity_register(disk
, &bi
);
417 blk_queue_max_integrity_segments(disk
->queue
, 1);
421 EXPORT_SYMBOL(nd_integrity_init
);
423 #else /* CONFIG_BLK_DEV_INTEGRITY */
424 int nd_integrity_init(struct gendisk
*disk
, unsigned long meta_size
)
428 EXPORT_SYMBOL(nd_integrity_init
);
432 static __init
int libnvdimm_init(void)
436 rc
= nvdimm_bus_init();
442 rc
= nd_region_init();
456 static __exit
void libnvdimm_exit(void)
458 WARN_ON(!list_empty(&nvdimm_bus_list
));
465 MODULE_LICENSE("GPL v2");
466 MODULE_AUTHOR("Intel Corporation");
467 subsys_initcall(libnvdimm_init
);
468 module_exit(libnvdimm_exit
);