Convert trailing spaces and periods in path components
[linux/fpc-iii.git] / drivers / nvmem / core.c
blob84f4078216a36d74177e2c63637488d3ae46db2a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include "nvmem.h"
22 struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
28 struct device_node *np;
29 struct nvmem_device *nvmem;
30 struct list_head node;
33 static DEFINE_MUTEX(nvmem_mutex);
34 static DEFINE_IDA(nvmem_ida);
36 static DEFINE_MUTEX(nvmem_cell_mutex);
37 static LIST_HEAD(nvmem_cell_tables);
39 static DEFINE_MUTEX(nvmem_lookup_mutex);
40 static LIST_HEAD(nvmem_lookup_list);
42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
51 return -EINVAL;
54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
60 return -EINVAL;
63 static void nvmem_release(struct device *dev)
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
71 static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
75 static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
79 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
81 struct device *d;
83 if (!nvmem_np)
84 return NULL;
86 d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
88 if (!d)
89 return NULL;
91 return to_nvmem_device(d);
94 static struct nvmem_device *nvmem_find(const char *name)
96 struct device *d;
98 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
100 if (!d)
101 return NULL;
103 return to_nvmem_device(d);
106 static void nvmem_cell_drop(struct nvmem_cell *cell)
108 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
109 mutex_lock(&nvmem_mutex);
110 list_del(&cell->node);
111 mutex_unlock(&nvmem_mutex);
112 of_node_put(cell->np);
113 kfree_const(cell->name);
114 kfree(cell);
117 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
119 struct nvmem_cell *cell, *p;
121 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
122 nvmem_cell_drop(cell);
125 static void nvmem_cell_add(struct nvmem_cell *cell)
127 mutex_lock(&nvmem_mutex);
128 list_add_tail(&cell->node, &cell->nvmem->cells);
129 mutex_unlock(&nvmem_mutex);
130 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
133 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
134 const struct nvmem_cell_info *info,
135 struct nvmem_cell *cell)
137 cell->nvmem = nvmem;
138 cell->offset = info->offset;
139 cell->bytes = info->bytes;
140 cell->name = info->name;
142 cell->bit_offset = info->bit_offset;
143 cell->nbits = info->nbits;
145 if (cell->nbits)
146 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
147 BITS_PER_BYTE);
149 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
150 dev_err(&nvmem->dev,
151 "cell %s unaligned to nvmem stride %d\n",
152 cell->name ?: "<unknown>", nvmem->stride);
153 return -EINVAL;
156 return 0;
159 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
160 const struct nvmem_cell_info *info,
161 struct nvmem_cell *cell)
163 int err;
165 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
166 if (err)
167 return err;
169 cell->name = kstrdup_const(info->name, GFP_KERNEL);
170 if (!cell->name)
171 return -ENOMEM;
173 return 0;
177 * nvmem_add_cells() - Add cell information to an nvmem device
179 * @nvmem: nvmem device to add cells to.
180 * @info: nvmem cell info to add to the device
181 * @ncells: number of cells in info
183 * Return: 0 or negative error code on failure.
185 static int nvmem_add_cells(struct nvmem_device *nvmem,
186 const struct nvmem_cell_info *info,
187 int ncells)
189 struct nvmem_cell **cells;
190 int i, rval;
192 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
193 if (!cells)
194 return -ENOMEM;
196 for (i = 0; i < ncells; i++) {
197 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
198 if (!cells[i]) {
199 rval = -ENOMEM;
200 goto err;
203 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
204 if (rval) {
205 kfree(cells[i]);
206 goto err;
209 nvmem_cell_add(cells[i]);
212 /* remove tmp array */
213 kfree(cells);
215 return 0;
216 err:
217 while (i--)
218 nvmem_cell_drop(cells[i]);
220 kfree(cells);
222 return rval;
226 * nvmem_register_notifier() - Register a notifier block for nvmem events.
228 * @nb: notifier block to be called on nvmem events.
230 * Return: 0 on success, negative error number on failure.
232 int nvmem_register_notifier(struct notifier_block *nb)
234 return blocking_notifier_chain_register(&nvmem_notifier, nb);
236 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
239 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
241 * @nb: notifier block to be unregistered.
243 * Return: 0 on success, negative error number on failure.
245 int nvmem_unregister_notifier(struct notifier_block *nb)
247 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
249 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
251 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
253 const struct nvmem_cell_info *info;
254 struct nvmem_cell_table *table;
255 struct nvmem_cell *cell;
256 int rval = 0, i;
258 mutex_lock(&nvmem_cell_mutex);
259 list_for_each_entry(table, &nvmem_cell_tables, node) {
260 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
261 for (i = 0; i < table->ncells; i++) {
262 info = &table->cells[i];
264 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
265 if (!cell) {
266 rval = -ENOMEM;
267 goto out;
270 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
271 info,
272 cell);
273 if (rval) {
274 kfree(cell);
275 goto out;
278 nvmem_cell_add(cell);
283 out:
284 mutex_unlock(&nvmem_cell_mutex);
285 return rval;
288 static struct nvmem_cell *
289 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
291 struct nvmem_cell *iter, *cell = NULL;
293 mutex_lock(&nvmem_mutex);
294 list_for_each_entry(iter, &nvmem->cells, node) {
295 if (strcmp(cell_id, iter->name) == 0) {
296 cell = iter;
297 break;
300 mutex_unlock(&nvmem_mutex);
302 return cell;
305 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
307 struct device_node *parent, *child;
308 struct device *dev = &nvmem->dev;
309 struct nvmem_cell *cell;
310 const __be32 *addr;
311 int len;
313 parent = dev->of_node;
315 for_each_child_of_node(parent, child) {
316 addr = of_get_property(child, "reg", &len);
317 if (!addr || (len < 2 * sizeof(u32))) {
318 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
319 return -EINVAL;
322 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
323 if (!cell)
324 return -ENOMEM;
326 cell->nvmem = nvmem;
327 cell->np = of_node_get(child);
328 cell->offset = be32_to_cpup(addr++);
329 cell->bytes = be32_to_cpup(addr);
330 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
332 addr = of_get_property(child, "bits", &len);
333 if (addr && len == (2 * sizeof(u32))) {
334 cell->bit_offset = be32_to_cpup(addr++);
335 cell->nbits = be32_to_cpup(addr);
338 if (cell->nbits)
339 cell->bytes = DIV_ROUND_UP(
340 cell->nbits + cell->bit_offset,
341 BITS_PER_BYTE);
343 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
344 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
345 cell->name, nvmem->stride);
346 /* Cells already added will be freed later. */
347 kfree_const(cell->name);
348 kfree(cell);
349 return -EINVAL;
352 nvmem_cell_add(cell);
355 return 0;
359 * nvmem_register() - Register a nvmem device for given nvmem_config.
360 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
362 * @config: nvmem device configuration with which nvmem device is created.
364 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
365 * on success.
368 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
370 struct nvmem_device *nvmem;
371 int rval;
373 if (!config->dev)
374 return ERR_PTR(-EINVAL);
376 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
377 if (!nvmem)
378 return ERR_PTR(-ENOMEM);
380 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
381 if (rval < 0) {
382 kfree(nvmem);
383 return ERR_PTR(rval);
386 kref_init(&nvmem->refcnt);
387 INIT_LIST_HEAD(&nvmem->cells);
389 nvmem->id = rval;
390 nvmem->owner = config->owner;
391 if (!nvmem->owner && config->dev->driver)
392 nvmem->owner = config->dev->driver->owner;
393 nvmem->stride = config->stride ?: 1;
394 nvmem->word_size = config->word_size ?: 1;
395 nvmem->size = config->size;
396 nvmem->dev.type = &nvmem_provider_type;
397 nvmem->dev.bus = &nvmem_bus_type;
398 nvmem->dev.parent = config->dev;
399 nvmem->priv = config->priv;
400 nvmem->type = config->type;
401 nvmem->reg_read = config->reg_read;
402 nvmem->reg_write = config->reg_write;
403 if (!config->no_of_node)
404 nvmem->dev.of_node = config->dev->of_node;
406 if (config->id == -1 && config->name) {
407 dev_set_name(&nvmem->dev, "%s", config->name);
408 } else {
409 dev_set_name(&nvmem->dev, "%s%d",
410 config->name ? : "nvmem",
411 config->name ? config->id : nvmem->id);
414 nvmem->read_only = device_property_present(config->dev, "read-only") ||
415 config->read_only || !nvmem->reg_write;
417 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
419 device_initialize(&nvmem->dev);
421 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
423 rval = device_add(&nvmem->dev);
424 if (rval)
425 goto err_put_device;
427 if (config->compat) {
428 rval = nvmem_sysfs_setup_compat(nvmem, config);
429 if (rval)
430 goto err_device_del;
433 if (config->cells) {
434 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
435 if (rval)
436 goto err_teardown_compat;
439 rval = nvmem_add_cells_from_table(nvmem);
440 if (rval)
441 goto err_remove_cells;
443 rval = nvmem_add_cells_from_of(nvmem);
444 if (rval)
445 goto err_remove_cells;
447 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
449 return nvmem;
451 err_remove_cells:
452 nvmem_device_remove_all_cells(nvmem);
453 err_teardown_compat:
454 if (config->compat)
455 nvmem_sysfs_remove_compat(nvmem, config);
456 err_device_del:
457 device_del(&nvmem->dev);
458 err_put_device:
459 put_device(&nvmem->dev);
461 return ERR_PTR(rval);
463 EXPORT_SYMBOL_GPL(nvmem_register);
465 static void nvmem_device_release(struct kref *kref)
467 struct nvmem_device *nvmem;
469 nvmem = container_of(kref, struct nvmem_device, refcnt);
471 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
473 if (nvmem->flags & FLAG_COMPAT)
474 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
476 nvmem_device_remove_all_cells(nvmem);
477 device_del(&nvmem->dev);
478 put_device(&nvmem->dev);
482 * nvmem_unregister() - Unregister previously registered nvmem device
484 * @nvmem: Pointer to previously registered nvmem device.
486 void nvmem_unregister(struct nvmem_device *nvmem)
488 kref_put(&nvmem->refcnt, nvmem_device_release);
490 EXPORT_SYMBOL_GPL(nvmem_unregister);
492 static void devm_nvmem_release(struct device *dev, void *res)
494 nvmem_unregister(*(struct nvmem_device **)res);
498 * devm_nvmem_register() - Register a managed nvmem device for given
499 * nvmem_config.
500 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
502 * @dev: Device that uses the nvmem device.
503 * @config: nvmem device configuration with which nvmem device is created.
505 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
506 * on success.
508 struct nvmem_device *devm_nvmem_register(struct device *dev,
509 const struct nvmem_config *config)
511 struct nvmem_device **ptr, *nvmem;
513 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
514 if (!ptr)
515 return ERR_PTR(-ENOMEM);
517 nvmem = nvmem_register(config);
519 if (!IS_ERR(nvmem)) {
520 *ptr = nvmem;
521 devres_add(dev, ptr);
522 } else {
523 devres_free(ptr);
526 return nvmem;
528 EXPORT_SYMBOL_GPL(devm_nvmem_register);
530 static int devm_nvmem_match(struct device *dev, void *res, void *data)
532 struct nvmem_device **r = res;
534 return *r == data;
538 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
539 * device.
541 * @dev: Device that uses the nvmem device.
542 * @nvmem: Pointer to previously registered nvmem device.
544 * Return: Will be an negative on error or a zero on success.
546 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
548 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
550 EXPORT_SYMBOL(devm_nvmem_unregister);
552 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
553 const char *nvmem_name)
555 struct nvmem_device *nvmem = NULL;
557 mutex_lock(&nvmem_mutex);
558 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
559 mutex_unlock(&nvmem_mutex);
560 if (!nvmem)
561 return ERR_PTR(-EPROBE_DEFER);
563 if (!try_module_get(nvmem->owner)) {
564 dev_err(&nvmem->dev,
565 "could not increase module refcount for cell %s\n",
566 nvmem_dev_name(nvmem));
568 put_device(&nvmem->dev);
569 return ERR_PTR(-EINVAL);
572 kref_get(&nvmem->refcnt);
574 return nvmem;
577 static void __nvmem_device_put(struct nvmem_device *nvmem)
579 put_device(&nvmem->dev);
580 module_put(nvmem->owner);
581 kref_put(&nvmem->refcnt, nvmem_device_release);
584 #if IS_ENABLED(CONFIG_OF)
586 * of_nvmem_device_get() - Get nvmem device from a given id
588 * @np: Device tree node that uses the nvmem device.
589 * @id: nvmem name from nvmem-names property.
591 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
592 * on success.
594 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
597 struct device_node *nvmem_np;
598 int index = 0;
600 if (id)
601 index = of_property_match_string(np, "nvmem-names", id);
603 nvmem_np = of_parse_phandle(np, "nvmem", index);
604 if (!nvmem_np)
605 return ERR_PTR(-ENOENT);
607 return __nvmem_device_get(nvmem_np, NULL);
609 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
610 #endif
613 * nvmem_device_get() - Get nvmem device from a given id
615 * @dev: Device that uses the nvmem device.
616 * @dev_name: name of the requested nvmem device.
618 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
619 * on success.
621 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
623 if (dev->of_node) { /* try dt first */
624 struct nvmem_device *nvmem;
626 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
628 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
629 return nvmem;
633 return __nvmem_device_get(NULL, dev_name);
635 EXPORT_SYMBOL_GPL(nvmem_device_get);
637 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
639 struct nvmem_device **nvmem = res;
641 if (WARN_ON(!nvmem || !*nvmem))
642 return 0;
644 return *nvmem == data;
647 static void devm_nvmem_device_release(struct device *dev, void *res)
649 nvmem_device_put(*(struct nvmem_device **)res);
653 * devm_nvmem_device_put() - put alredy got nvmem device
655 * @dev: Device that uses the nvmem device.
656 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
657 * that needs to be released.
659 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
661 int ret;
663 ret = devres_release(dev, devm_nvmem_device_release,
664 devm_nvmem_device_match, nvmem);
666 WARN_ON(ret);
668 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
671 * nvmem_device_put() - put alredy got nvmem device
673 * @nvmem: pointer to nvmem device that needs to be released.
675 void nvmem_device_put(struct nvmem_device *nvmem)
677 __nvmem_device_put(nvmem);
679 EXPORT_SYMBOL_GPL(nvmem_device_put);
682 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
684 * @dev: Device that requests the nvmem device.
685 * @id: name id for the requested nvmem device.
687 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
688 * on success. The nvmem_cell will be freed by the automatically once the
689 * device is freed.
691 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
693 struct nvmem_device **ptr, *nvmem;
695 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
696 if (!ptr)
697 return ERR_PTR(-ENOMEM);
699 nvmem = nvmem_device_get(dev, id);
700 if (!IS_ERR(nvmem)) {
701 *ptr = nvmem;
702 devres_add(dev, ptr);
703 } else {
704 devres_free(ptr);
707 return nvmem;
709 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
711 static struct nvmem_cell *
712 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
714 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
715 struct nvmem_cell_lookup *lookup;
716 struct nvmem_device *nvmem;
717 const char *dev_id;
719 if (!dev)
720 return ERR_PTR(-EINVAL);
722 dev_id = dev_name(dev);
724 mutex_lock(&nvmem_lookup_mutex);
726 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
727 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
728 (strcmp(lookup->con_id, con_id) == 0)) {
729 /* This is the right entry. */
730 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
731 if (IS_ERR(nvmem)) {
732 /* Provider may not be registered yet. */
733 cell = ERR_CAST(nvmem);
734 break;
737 cell = nvmem_find_cell_by_name(nvmem,
738 lookup->cell_name);
739 if (!cell) {
740 __nvmem_device_put(nvmem);
741 cell = ERR_PTR(-ENOENT);
743 break;
747 mutex_unlock(&nvmem_lookup_mutex);
748 return cell;
751 #if IS_ENABLED(CONFIG_OF)
752 static struct nvmem_cell *
753 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
755 struct nvmem_cell *iter, *cell = NULL;
757 mutex_lock(&nvmem_mutex);
758 list_for_each_entry(iter, &nvmem->cells, node) {
759 if (np == iter->np) {
760 cell = iter;
761 break;
764 mutex_unlock(&nvmem_mutex);
766 return cell;
770 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
772 * @np: Device tree node that uses the nvmem cell.
773 * @id: nvmem cell name from nvmem-cell-names property, or NULL
774 * for the cell at index 0 (the lone cell with no accompanying
775 * nvmem-cell-names property).
777 * Return: Will be an ERR_PTR() on error or a valid pointer
778 * to a struct nvmem_cell. The nvmem_cell will be freed by the
779 * nvmem_cell_put().
781 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
783 struct device_node *cell_np, *nvmem_np;
784 struct nvmem_device *nvmem;
785 struct nvmem_cell *cell;
786 int index = 0;
788 /* if cell name exists, find index to the name */
789 if (id)
790 index = of_property_match_string(np, "nvmem-cell-names", id);
792 cell_np = of_parse_phandle(np, "nvmem-cells", index);
793 if (!cell_np)
794 return ERR_PTR(-ENOENT);
796 nvmem_np = of_get_next_parent(cell_np);
797 if (!nvmem_np)
798 return ERR_PTR(-EINVAL);
800 nvmem = __nvmem_device_get(nvmem_np, NULL);
801 of_node_put(nvmem_np);
802 if (IS_ERR(nvmem))
803 return ERR_CAST(nvmem);
805 cell = nvmem_find_cell_by_node(nvmem, cell_np);
806 if (!cell) {
807 __nvmem_device_put(nvmem);
808 return ERR_PTR(-ENOENT);
811 return cell;
813 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
814 #endif
817 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
819 * @dev: Device that requests the nvmem cell.
820 * @id: nvmem cell name to get (this corresponds with the name from the
821 * nvmem-cell-names property for DT systems and with the con_id from
822 * the lookup entry for non-DT systems).
824 * Return: Will be an ERR_PTR() on error or a valid pointer
825 * to a struct nvmem_cell. The nvmem_cell will be freed by the
826 * nvmem_cell_put().
828 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
830 struct nvmem_cell *cell;
832 if (dev->of_node) { /* try dt first */
833 cell = of_nvmem_cell_get(dev->of_node, id);
834 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
835 return cell;
838 /* NULL cell id only allowed for device tree; invalid otherwise */
839 if (!id)
840 return ERR_PTR(-EINVAL);
842 return nvmem_cell_get_from_lookup(dev, id);
844 EXPORT_SYMBOL_GPL(nvmem_cell_get);
846 static void devm_nvmem_cell_release(struct device *dev, void *res)
848 nvmem_cell_put(*(struct nvmem_cell **)res);
852 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
854 * @dev: Device that requests the nvmem cell.
855 * @id: nvmem cell name id to get.
857 * Return: Will be an ERR_PTR() on error or a valid pointer
858 * to a struct nvmem_cell. The nvmem_cell will be freed by the
859 * automatically once the device is freed.
861 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
863 struct nvmem_cell **ptr, *cell;
865 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
866 if (!ptr)
867 return ERR_PTR(-ENOMEM);
869 cell = nvmem_cell_get(dev, id);
870 if (!IS_ERR(cell)) {
871 *ptr = cell;
872 devres_add(dev, ptr);
873 } else {
874 devres_free(ptr);
877 return cell;
879 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
881 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
883 struct nvmem_cell **c = res;
885 if (WARN_ON(!c || !*c))
886 return 0;
888 return *c == data;
892 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
893 * from devm_nvmem_cell_get.
895 * @dev: Device that requests the nvmem cell.
896 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
898 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
900 int ret;
902 ret = devres_release(dev, devm_nvmem_cell_release,
903 devm_nvmem_cell_match, cell);
905 WARN_ON(ret);
907 EXPORT_SYMBOL(devm_nvmem_cell_put);
910 * nvmem_cell_put() - Release previously allocated nvmem cell.
912 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
914 void nvmem_cell_put(struct nvmem_cell *cell)
916 struct nvmem_device *nvmem = cell->nvmem;
918 __nvmem_device_put(nvmem);
920 EXPORT_SYMBOL_GPL(nvmem_cell_put);
922 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
924 u8 *p, *b;
925 int i, extra, bit_offset = cell->bit_offset;
927 p = b = buf;
928 if (bit_offset) {
929 /* First shift */
930 *b++ >>= bit_offset;
932 /* setup rest of the bytes if any */
933 for (i = 1; i < cell->bytes; i++) {
934 /* Get bits from next byte and shift them towards msb */
935 *p |= *b << (BITS_PER_BYTE - bit_offset);
937 p = b;
938 *b++ >>= bit_offset;
940 } else {
941 /* point to the msb */
942 p += cell->bytes - 1;
945 /* result fits in less bytes */
946 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
947 while (--extra >= 0)
948 *p-- = 0;
950 /* clear msb bits if any leftover in the last byte */
951 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
954 static int __nvmem_cell_read(struct nvmem_device *nvmem,
955 struct nvmem_cell *cell,
956 void *buf, size_t *len)
958 int rc;
960 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
962 if (rc)
963 return rc;
965 /* shift bits in-place */
966 if (cell->bit_offset || cell->nbits)
967 nvmem_shift_read_buffer_in_place(cell, buf);
969 if (len)
970 *len = cell->bytes;
972 return 0;
976 * nvmem_cell_read() - Read a given nvmem cell
978 * @cell: nvmem cell to be read.
979 * @len: pointer to length of cell which will be populated on successful read;
980 * can be NULL.
982 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
983 * buffer should be freed by the consumer with a kfree().
985 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
987 struct nvmem_device *nvmem = cell->nvmem;
988 u8 *buf;
989 int rc;
991 if (!nvmem)
992 return ERR_PTR(-EINVAL);
994 buf = kzalloc(cell->bytes, GFP_KERNEL);
995 if (!buf)
996 return ERR_PTR(-ENOMEM);
998 rc = __nvmem_cell_read(nvmem, cell, buf, len);
999 if (rc) {
1000 kfree(buf);
1001 return ERR_PTR(rc);
1004 return buf;
1006 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1008 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1009 u8 *_buf, int len)
1011 struct nvmem_device *nvmem = cell->nvmem;
1012 int i, rc, nbits, bit_offset = cell->bit_offset;
1013 u8 v, *p, *buf, *b, pbyte, pbits;
1015 nbits = cell->nbits;
1016 buf = kzalloc(cell->bytes, GFP_KERNEL);
1017 if (!buf)
1018 return ERR_PTR(-ENOMEM);
1020 memcpy(buf, _buf, len);
1021 p = b = buf;
1023 if (bit_offset) {
1024 pbyte = *b;
1025 *b <<= bit_offset;
1027 /* setup the first byte with lsb bits from nvmem */
1028 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1029 if (rc)
1030 goto err;
1031 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1033 /* setup rest of the byte if any */
1034 for (i = 1; i < cell->bytes; i++) {
1035 /* Get last byte bits and shift them towards lsb */
1036 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1037 pbyte = *b;
1038 p = b;
1039 *b <<= bit_offset;
1040 *b++ |= pbits;
1044 /* if it's not end on byte boundary */
1045 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1046 /* setup the last byte with msb bits from nvmem */
1047 rc = nvmem_reg_read(nvmem,
1048 cell->offset + cell->bytes - 1, &v, 1);
1049 if (rc)
1050 goto err;
1051 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1055 return buf;
1056 err:
1057 kfree(buf);
1058 return ERR_PTR(rc);
1062 * nvmem_cell_write() - Write to a given nvmem cell
1064 * @cell: nvmem cell to be written.
1065 * @buf: Buffer to be written.
1066 * @len: length of buffer to be written to nvmem cell.
1068 * Return: length of bytes written or negative on failure.
1070 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1072 struct nvmem_device *nvmem = cell->nvmem;
1073 int rc;
1075 if (!nvmem || nvmem->read_only ||
1076 (cell->bit_offset == 0 && len != cell->bytes))
1077 return -EINVAL;
1079 if (cell->bit_offset || cell->nbits) {
1080 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1081 if (IS_ERR(buf))
1082 return PTR_ERR(buf);
1085 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1087 /* free the tmp buffer */
1088 if (cell->bit_offset || cell->nbits)
1089 kfree(buf);
1091 if (rc)
1092 return rc;
1094 return len;
1096 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1099 * nvmem_cell_read_u16() - Read a cell value as an u16
1101 * @dev: Device that requests the nvmem cell.
1102 * @cell_id: Name of nvmem cell to read.
1103 * @val: pointer to output value.
1105 * Return: 0 on success or negative errno.
1107 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1109 struct nvmem_cell *cell;
1110 void *buf;
1111 size_t len;
1113 cell = nvmem_cell_get(dev, cell_id);
1114 if (IS_ERR(cell))
1115 return PTR_ERR(cell);
1117 buf = nvmem_cell_read(cell, &len);
1118 if (IS_ERR(buf)) {
1119 nvmem_cell_put(cell);
1120 return PTR_ERR(buf);
1122 if (len != sizeof(*val)) {
1123 kfree(buf);
1124 nvmem_cell_put(cell);
1125 return -EINVAL;
1127 memcpy(val, buf, sizeof(*val));
1128 kfree(buf);
1129 nvmem_cell_put(cell);
1131 return 0;
1133 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1136 * nvmem_cell_read_u32() - Read a cell value as an u32
1138 * @dev: Device that requests the nvmem cell.
1139 * @cell_id: Name of nvmem cell to read.
1140 * @val: pointer to output value.
1142 * Return: 0 on success or negative errno.
1144 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1146 struct nvmem_cell *cell;
1147 void *buf;
1148 size_t len;
1150 cell = nvmem_cell_get(dev, cell_id);
1151 if (IS_ERR(cell))
1152 return PTR_ERR(cell);
1154 buf = nvmem_cell_read(cell, &len);
1155 if (IS_ERR(buf)) {
1156 nvmem_cell_put(cell);
1157 return PTR_ERR(buf);
1159 if (len != sizeof(*val)) {
1160 kfree(buf);
1161 nvmem_cell_put(cell);
1162 return -EINVAL;
1164 memcpy(val, buf, sizeof(*val));
1166 kfree(buf);
1167 nvmem_cell_put(cell);
1168 return 0;
1170 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1173 * nvmem_device_cell_read() - Read a given nvmem device and cell
1175 * @nvmem: nvmem device to read from.
1176 * @info: nvmem cell info to be read.
1177 * @buf: buffer pointer which will be populated on successful read.
1179 * Return: length of successful bytes read on success and negative
1180 * error code on error.
1182 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1183 struct nvmem_cell_info *info, void *buf)
1185 struct nvmem_cell cell;
1186 int rc;
1187 ssize_t len;
1189 if (!nvmem)
1190 return -EINVAL;
1192 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1193 if (rc)
1194 return rc;
1196 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1197 if (rc)
1198 return rc;
1200 return len;
1202 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1205 * nvmem_device_cell_write() - Write cell to a given nvmem device
1207 * @nvmem: nvmem device to be written to.
1208 * @info: nvmem cell info to be written.
1209 * @buf: buffer to be written to cell.
1211 * Return: length of bytes written or negative error code on failure.
1213 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1214 struct nvmem_cell_info *info, void *buf)
1216 struct nvmem_cell cell;
1217 int rc;
1219 if (!nvmem)
1220 return -EINVAL;
1222 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1223 if (rc)
1224 return rc;
1226 return nvmem_cell_write(&cell, buf, cell.bytes);
1228 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1231 * nvmem_device_read() - Read from a given nvmem device
1233 * @nvmem: nvmem device to read from.
1234 * @offset: offset in nvmem device.
1235 * @bytes: number of bytes to read.
1236 * @buf: buffer pointer which will be populated on successful read.
1238 * Return: length of successful bytes read on success and negative
1239 * error code on error.
1241 int nvmem_device_read(struct nvmem_device *nvmem,
1242 unsigned int offset,
1243 size_t bytes, void *buf)
1245 int rc;
1247 if (!nvmem)
1248 return -EINVAL;
1250 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1252 if (rc)
1253 return rc;
1255 return bytes;
1257 EXPORT_SYMBOL_GPL(nvmem_device_read);
1260 * nvmem_device_write() - Write cell to a given nvmem device
1262 * @nvmem: nvmem device to be written to.
1263 * @offset: offset in nvmem device.
1264 * @bytes: number of bytes to write.
1265 * @buf: buffer to be written.
1267 * Return: length of bytes written or negative error code on failure.
1269 int nvmem_device_write(struct nvmem_device *nvmem,
1270 unsigned int offset,
1271 size_t bytes, void *buf)
1273 int rc;
1275 if (!nvmem)
1276 return -EINVAL;
1278 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1280 if (rc)
1281 return rc;
1284 return bytes;
1286 EXPORT_SYMBOL_GPL(nvmem_device_write);
1289 * nvmem_add_cell_table() - register a table of cell info entries
1291 * @table: table of cell info entries
1293 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1295 mutex_lock(&nvmem_cell_mutex);
1296 list_add_tail(&table->node, &nvmem_cell_tables);
1297 mutex_unlock(&nvmem_cell_mutex);
1299 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1302 * nvmem_del_cell_table() - remove a previously registered cell info table
1304 * @table: table of cell info entries
1306 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1308 mutex_lock(&nvmem_cell_mutex);
1309 list_del(&table->node);
1310 mutex_unlock(&nvmem_cell_mutex);
1312 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1315 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1317 * @entries: array of cell lookup entries
1318 * @nentries: number of cell lookup entries in the array
1320 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1322 int i;
1324 mutex_lock(&nvmem_lookup_mutex);
1325 for (i = 0; i < nentries; i++)
1326 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1327 mutex_unlock(&nvmem_lookup_mutex);
1329 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1332 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1333 * entries
1335 * @entries: array of cell lookup entries
1336 * @nentries: number of cell lookup entries in the array
1338 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1340 int i;
1342 mutex_lock(&nvmem_lookup_mutex);
1343 for (i = 0; i < nentries; i++)
1344 list_del(&entries[i].node);
1345 mutex_unlock(&nvmem_lookup_mutex);
1347 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1350 * nvmem_dev_name() - Get the name of a given nvmem device.
1352 * @nvmem: nvmem device.
1354 * Return: name of the nvmem device.
1356 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1358 return dev_name(&nvmem->dev);
1360 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1362 static int __init nvmem_init(void)
1364 return bus_register(&nvmem_bus_type);
1367 static void __exit nvmem_exit(void)
1369 bus_unregister(&nvmem_bus_type);
1372 subsys_initcall(nvmem_init);
1373 module_exit(nvmem_exit);
1375 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1376 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1377 MODULE_DESCRIPTION("nvmem Driver Core");
1378 MODULE_LICENSE("GPL v2");