1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/libnvdimm.h>
6 #include <linux/suspend.h>
7 #include <linux/export.h>
8 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/device.h>
11 #include <linux/ctype.h>
12 #include <linux/ndctl.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
19 LIST_HEAD(nvdimm_bus_list
);
20 DEFINE_MUTEX(nvdimm_bus_list_mutex
);
22 void nvdimm_bus_lock(struct device
*dev
)
24 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
28 mutex_lock(&nvdimm_bus
->reconfig_mutex
);
30 EXPORT_SYMBOL(nvdimm_bus_lock
);
32 void nvdimm_bus_unlock(struct device
*dev
)
34 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
38 mutex_unlock(&nvdimm_bus
->reconfig_mutex
);
40 EXPORT_SYMBOL(nvdimm_bus_unlock
);
42 bool is_nvdimm_bus_locked(struct device
*dev
)
44 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
48 return mutex_is_locked(&nvdimm_bus
->reconfig_mutex
);
50 EXPORT_SYMBOL(is_nvdimm_bus_locked
);
53 struct nvdimm_bus
*nvdimm_bus
;
54 struct list_head list
;
55 resource_size_t offset
;
65 static struct nvdimm_map
*find_nvdimm_map(struct device
*dev
,
66 resource_size_t offset
)
68 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
69 struct nvdimm_map
*nvdimm_map
;
71 list_for_each_entry(nvdimm_map
, &nvdimm_bus
->mapping_list
, list
)
72 if (nvdimm_map
->offset
== offset
)
77 static struct nvdimm_map
*alloc_nvdimm_map(struct device
*dev
,
78 resource_size_t offset
, size_t size
, unsigned long flags
)
80 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
81 struct nvdimm_map
*nvdimm_map
;
83 nvdimm_map
= kzalloc(sizeof(*nvdimm_map
), GFP_KERNEL
);
87 INIT_LIST_HEAD(&nvdimm_map
->list
);
88 nvdimm_map
->nvdimm_bus
= nvdimm_bus
;
89 nvdimm_map
->offset
= offset
;
90 nvdimm_map
->flags
= flags
;
91 nvdimm_map
->size
= size
;
92 kref_init(&nvdimm_map
->kref
);
94 if (!request_mem_region(offset
, size
, dev_name(&nvdimm_bus
->dev
))) {
95 dev_err(&nvdimm_bus
->dev
, "failed to request %pa + %zd for %s\n",
96 &offset
, size
, dev_name(dev
));
97 goto err_request_region
;
101 nvdimm_map
->mem
= memremap(offset
, size
, flags
);
103 nvdimm_map
->iomem
= ioremap(offset
, size
);
105 if (!nvdimm_map
->mem
)
108 dev_WARN_ONCE(dev
, !is_nvdimm_bus_locked(dev
), "%s: bus unlocked!",
110 list_add(&nvdimm_map
->list
, &nvdimm_bus
->mapping_list
);
115 release_mem_region(offset
, size
);
121 static void nvdimm_map_release(struct kref
*kref
)
123 struct nvdimm_bus
*nvdimm_bus
;
124 struct nvdimm_map
*nvdimm_map
;
126 nvdimm_map
= container_of(kref
, struct nvdimm_map
, kref
);
127 nvdimm_bus
= nvdimm_map
->nvdimm_bus
;
129 dev_dbg(&nvdimm_bus
->dev
, "%pa\n", &nvdimm_map
->offset
);
130 list_del(&nvdimm_map
->list
);
131 if (nvdimm_map
->flags
)
132 memunmap(nvdimm_map
->mem
);
134 iounmap(nvdimm_map
->iomem
);
135 release_mem_region(nvdimm_map
->offset
, nvdimm_map
->size
);
139 static void nvdimm_map_put(void *data
)
141 struct nvdimm_map
*nvdimm_map
= data
;
142 struct nvdimm_bus
*nvdimm_bus
= nvdimm_map
->nvdimm_bus
;
144 nvdimm_bus_lock(&nvdimm_bus
->dev
);
145 kref_put(&nvdimm_map
->kref
, nvdimm_map_release
);
146 nvdimm_bus_unlock(&nvdimm_bus
->dev
);
150 * devm_nvdimm_memremap - map a resource that is shared across regions
151 * @dev: device that will own a reference to the shared mapping
152 * @offset: physical base address of the mapping
153 * @size: mapping size
154 * @flags: memremap flags, or, if zero, perform an ioremap instead
156 void *devm_nvdimm_memremap(struct device
*dev
, resource_size_t offset
,
157 size_t size
, unsigned long flags
)
159 struct nvdimm_map
*nvdimm_map
;
161 nvdimm_bus_lock(dev
);
162 nvdimm_map
= find_nvdimm_map(dev
, offset
);
164 nvdimm_map
= alloc_nvdimm_map(dev
, offset
, size
, flags
);
166 kref_get(&nvdimm_map
->kref
);
167 nvdimm_bus_unlock(dev
);
172 if (devm_add_action_or_reset(dev
, nvdimm_map_put
, nvdimm_map
))
175 return nvdimm_map
->mem
;
177 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap
);
179 u64
nd_fletcher64(void *addr
, size_t len
, bool le
)
186 for (i
= 0; i
< len
/ sizeof(u32
); i
++) {
187 lo32
+= le
? le32_to_cpu((__le32
) buf
[i
]) : buf
[i
];
191 return hi32
<< 32 | lo32
;
193 EXPORT_SYMBOL_GPL(nd_fletcher64
);
195 struct nvdimm_bus_descriptor
*to_nd_desc(struct nvdimm_bus
*nvdimm_bus
)
197 /* struct nvdimm_bus definition is private to libnvdimm */
198 return nvdimm_bus
->nd_desc
;
200 EXPORT_SYMBOL_GPL(to_nd_desc
);
202 struct device
*to_nvdimm_bus_dev(struct nvdimm_bus
*nvdimm_bus
)
204 /* struct nvdimm_bus definition is private to libnvdimm */
205 return &nvdimm_bus
->dev
;
207 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev
);
210 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
211 * @dev: container device for the uuid property
212 * @uuid_out: uuid buffer to replace
213 * @buf: raw sysfs buffer to parse
215 * Enforce that uuids can only be changed while the device is disabled
217 * LOCKING: expects device_lock() is held on entry
219 int nd_uuid_store(struct device
*dev
, uuid_t
**uuid_out
, const char *buf
,
228 rc
= uuid_parse(buf
, &uuid
);
233 *uuid_out
= kmemdup(&uuid
, sizeof(uuid
), GFP_KERNEL
);
240 ssize_t
nd_size_select_show(unsigned long current_size
,
241 const unsigned long *supported
, char *buf
)
246 for (i
= 0; supported
[i
]; i
++)
247 if (current_size
== supported
[i
])
248 len
+= sprintf(buf
+ len
, "[%ld] ", supported
[i
]);
250 len
+= sprintf(buf
+ len
, "%ld ", supported
[i
]);
251 len
+= sprintf(buf
+ len
, "\n");
255 ssize_t
nd_size_select_store(struct device
*dev
, const char *buf
,
256 unsigned long *current_size
, const unsigned long *supported
)
258 unsigned long lbasize
;
264 rc
= kstrtoul(buf
, 0, &lbasize
);
268 for (i
= 0; supported
[i
]; i
++)
269 if (lbasize
== supported
[i
])
273 *current_size
= lbasize
;
280 static ssize_t
commands_show(struct device
*dev
,
281 struct device_attribute
*attr
, char *buf
)
284 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
285 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
287 for_each_set_bit(cmd
, &nd_desc
->cmd_mask
, BITS_PER_LONG
)
288 len
+= sprintf(buf
+ len
, "%s ", nvdimm_bus_cmd_name(cmd
));
289 len
+= sprintf(buf
+ len
, "\n");
292 static DEVICE_ATTR_RO(commands
);
294 static const char *nvdimm_bus_provider(struct nvdimm_bus
*nvdimm_bus
)
296 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
297 struct device
*parent
= nvdimm_bus
->dev
.parent
;
299 if (nd_desc
->provider_name
)
300 return nd_desc
->provider_name
;
302 return dev_name(parent
);
307 static ssize_t
provider_show(struct device
*dev
,
308 struct device_attribute
*attr
, char *buf
)
310 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
312 return sprintf(buf
, "%s\n", nvdimm_bus_provider(nvdimm_bus
));
314 static DEVICE_ATTR_RO(provider
);
316 static int flush_namespaces(struct device
*dev
, void *data
)
323 static int flush_regions_dimms(struct device
*dev
, void *data
)
327 device_for_each_child(dev
, NULL
, flush_namespaces
);
331 static ssize_t
wait_probe_show(struct device
*dev
,
332 struct device_attribute
*attr
, char *buf
)
334 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
335 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
338 if (nd_desc
->flush_probe
) {
339 rc
= nd_desc
->flush_probe(nd_desc
);
344 device_for_each_child(dev
, NULL
, flush_regions_dimms
);
345 return sprintf(buf
, "1\n");
347 static DEVICE_ATTR_RO(wait_probe
);
349 static struct attribute
*nvdimm_bus_attributes
[] = {
350 &dev_attr_commands
.attr
,
351 &dev_attr_wait_probe
.attr
,
352 &dev_attr_provider
.attr
,
356 static const struct attribute_group nvdimm_bus_attribute_group
= {
357 .attrs
= nvdimm_bus_attributes
,
360 static ssize_t
capability_show(struct device
*dev
,
361 struct device_attribute
*attr
, char *buf
)
363 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
364 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
365 enum nvdimm_fwa_capability cap
;
367 if (!nd_desc
->fw_ops
)
370 cap
= nd_desc
->fw_ops
->capability(nd_desc
);
373 case NVDIMM_FWA_CAP_QUIESCE
:
374 return sprintf(buf
, "quiesce\n");
375 case NVDIMM_FWA_CAP_LIVE
:
376 return sprintf(buf
, "live\n");
382 static DEVICE_ATTR_RO(capability
);
384 static ssize_t
activate_show(struct device
*dev
,
385 struct device_attribute
*attr
, char *buf
)
387 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
388 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
389 enum nvdimm_fwa_capability cap
;
390 enum nvdimm_fwa_state state
;
392 if (!nd_desc
->fw_ops
)
395 cap
= nd_desc
->fw_ops
->capability(nd_desc
);
396 state
= nd_desc
->fw_ops
->activate_state(nd_desc
);
398 if (cap
< NVDIMM_FWA_CAP_QUIESCE
)
402 case NVDIMM_FWA_IDLE
:
403 return sprintf(buf
, "idle\n");
404 case NVDIMM_FWA_BUSY
:
405 return sprintf(buf
, "busy\n");
406 case NVDIMM_FWA_ARMED
:
407 return sprintf(buf
, "armed\n");
408 case NVDIMM_FWA_ARM_OVERFLOW
:
409 return sprintf(buf
, "overflow\n");
415 static int exec_firmware_activate(void *data
)
417 struct nvdimm_bus_descriptor
*nd_desc
= data
;
419 return nd_desc
->fw_ops
->activate(nd_desc
);
422 static ssize_t
activate_store(struct device
*dev
,
423 struct device_attribute
*attr
, const char *buf
, size_t len
)
425 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
426 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
427 enum nvdimm_fwa_state state
;
431 if (!nd_desc
->fw_ops
)
434 if (sysfs_streq(buf
, "live"))
436 else if (sysfs_streq(buf
, "quiesce"))
441 state
= nd_desc
->fw_ops
->activate_state(nd_desc
);
444 case NVDIMM_FWA_BUSY
:
447 case NVDIMM_FWA_ARMED
:
448 case NVDIMM_FWA_ARM_OVERFLOW
:
450 rc
= hibernate_quiet_exec(exec_firmware_activate
, nd_desc
);
452 rc
= nd_desc
->fw_ops
->activate(nd_desc
);
454 case NVDIMM_FWA_IDLE
:
464 static DEVICE_ATTR_ADMIN_RW(activate
);
466 static umode_t
nvdimm_bus_firmware_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
468 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
469 struct nvdimm_bus
*nvdimm_bus
= to_nvdimm_bus(dev
);
470 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
471 enum nvdimm_fwa_capability cap
;
474 * Both 'activate' and 'capability' disappear when no ops
475 * detected, or a negative capability is indicated.
477 if (!nd_desc
->fw_ops
)
480 cap
= nd_desc
->fw_ops
->capability(nd_desc
);
481 if (cap
< NVDIMM_FWA_CAP_QUIESCE
)
486 static struct attribute
*nvdimm_bus_firmware_attributes
[] = {
487 &dev_attr_activate
.attr
,
488 &dev_attr_capability
.attr
,
492 static const struct attribute_group nvdimm_bus_firmware_attribute_group
= {
494 .attrs
= nvdimm_bus_firmware_attributes
,
495 .is_visible
= nvdimm_bus_firmware_visible
,
498 const struct attribute_group
*nvdimm_bus_attribute_groups
[] = {
499 &nvdimm_bus_attribute_group
,
500 &nvdimm_bus_firmware_attribute_group
,
504 int nvdimm_bus_add_badrange(struct nvdimm_bus
*nvdimm_bus
, u64 addr
, u64 length
)
506 return badrange_add(&nvdimm_bus
->badrange
, addr
, length
);
508 EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange
);
510 static __init
int libnvdimm_init(void)
514 rc
= nvdimm_bus_init();
520 rc
= nd_region_init();
534 static __exit
void libnvdimm_exit(void)
536 WARN_ON(!list_empty(&nvdimm_bus_list
));
543 MODULE_DESCRIPTION("NVDIMM (Non-Volatile Memory Device) core");
544 MODULE_LICENSE("GPL v2");
545 MODULE_AUTHOR("Intel Corporation");
546 subsys_initcall(libnvdimm_init
);
547 module_exit(libnvdimm_exit
);