1 // SPDX-License-Identifier: GPL-2.0
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
19 #include <linux/slab.h>
28 struct device_node
*np
;
29 struct nvmem_device
*nvmem
;
30 struct list_head node
;
33 static DEFINE_MUTEX(nvmem_mutex
);
34 static DEFINE_IDA(nvmem_ida
);
36 static DEFINE_MUTEX(nvmem_cell_mutex
);
37 static LIST_HEAD(nvmem_cell_tables
);
39 static DEFINE_MUTEX(nvmem_lookup_mutex
);
40 static LIST_HEAD(nvmem_lookup_list
);
42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier
);
45 static int nvmem_reg_read(struct nvmem_device
*nvmem
, unsigned int offset
,
46 void *val
, size_t bytes
)
49 return nvmem
->reg_read(nvmem
->priv
, offset
, val
, bytes
);
54 static int nvmem_reg_write(struct nvmem_device
*nvmem
, unsigned int offset
,
55 void *val
, size_t bytes
)
58 return nvmem
->reg_write(nvmem
->priv
, offset
, val
, bytes
);
63 static void nvmem_release(struct device
*dev
)
65 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
67 ida_simple_remove(&nvmem_ida
, nvmem
->id
);
71 static const struct device_type nvmem_provider_type
= {
72 .release
= nvmem_release
,
75 static struct bus_type nvmem_bus_type
= {
79 static struct nvmem_device
*of_nvmem_find(struct device_node
*nvmem_np
)
86 d
= bus_find_device_by_of_node(&nvmem_bus_type
, nvmem_np
);
91 return to_nvmem_device(d
);
94 static struct nvmem_device
*nvmem_find(const char *name
)
98 d
= bus_find_device_by_name(&nvmem_bus_type
, NULL
, name
);
103 return to_nvmem_device(d
);
106 static void nvmem_cell_drop(struct nvmem_cell
*cell
)
108 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_REMOVE
, cell
);
109 mutex_lock(&nvmem_mutex
);
110 list_del(&cell
->node
);
111 mutex_unlock(&nvmem_mutex
);
112 of_node_put(cell
->np
);
113 kfree_const(cell
->name
);
117 static void nvmem_device_remove_all_cells(const struct nvmem_device
*nvmem
)
119 struct nvmem_cell
*cell
, *p
;
121 list_for_each_entry_safe(cell
, p
, &nvmem
->cells
, node
)
122 nvmem_cell_drop(cell
);
125 static void nvmem_cell_add(struct nvmem_cell
*cell
)
127 mutex_lock(&nvmem_mutex
);
128 list_add_tail(&cell
->node
, &cell
->nvmem
->cells
);
129 mutex_unlock(&nvmem_mutex
);
130 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_ADD
, cell
);
133 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device
*nvmem
,
134 const struct nvmem_cell_info
*info
,
135 struct nvmem_cell
*cell
)
138 cell
->offset
= info
->offset
;
139 cell
->bytes
= info
->bytes
;
140 cell
->name
= info
->name
;
142 cell
->bit_offset
= info
->bit_offset
;
143 cell
->nbits
= info
->nbits
;
146 cell
->bytes
= DIV_ROUND_UP(cell
->nbits
+ cell
->bit_offset
,
149 if (!IS_ALIGNED(cell
->offset
, nvmem
->stride
)) {
151 "cell %s unaligned to nvmem stride %d\n",
152 cell
->name
?: "<unknown>", nvmem
->stride
);
159 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device
*nvmem
,
160 const struct nvmem_cell_info
*info
,
161 struct nvmem_cell
*cell
)
165 err
= nvmem_cell_info_to_nvmem_cell_nodup(nvmem
, info
, cell
);
169 cell
->name
= kstrdup_const(info
->name
, GFP_KERNEL
);
177 * nvmem_add_cells() - Add cell information to an nvmem device
179 * @nvmem: nvmem device to add cells to.
180 * @info: nvmem cell info to add to the device
181 * @ncells: number of cells in info
183 * Return: 0 or negative error code on failure.
185 static int nvmem_add_cells(struct nvmem_device
*nvmem
,
186 const struct nvmem_cell_info
*info
,
189 struct nvmem_cell
**cells
;
192 cells
= kcalloc(ncells
, sizeof(*cells
), GFP_KERNEL
);
196 for (i
= 0; i
< ncells
; i
++) {
197 cells
[i
] = kzalloc(sizeof(**cells
), GFP_KERNEL
);
203 rval
= nvmem_cell_info_to_nvmem_cell(nvmem
, &info
[i
], cells
[i
]);
209 nvmem_cell_add(cells
[i
]);
212 /* remove tmp array */
218 nvmem_cell_drop(cells
[i
]);
226 * nvmem_register_notifier() - Register a notifier block for nvmem events.
228 * @nb: notifier block to be called on nvmem events.
230 * Return: 0 on success, negative error number on failure.
232 int nvmem_register_notifier(struct notifier_block
*nb
)
234 return blocking_notifier_chain_register(&nvmem_notifier
, nb
);
236 EXPORT_SYMBOL_GPL(nvmem_register_notifier
);
239 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
241 * @nb: notifier block to be unregistered.
243 * Return: 0 on success, negative error number on failure.
245 int nvmem_unregister_notifier(struct notifier_block
*nb
)
247 return blocking_notifier_chain_unregister(&nvmem_notifier
, nb
);
249 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier
);
251 static int nvmem_add_cells_from_table(struct nvmem_device
*nvmem
)
253 const struct nvmem_cell_info
*info
;
254 struct nvmem_cell_table
*table
;
255 struct nvmem_cell
*cell
;
258 mutex_lock(&nvmem_cell_mutex
);
259 list_for_each_entry(table
, &nvmem_cell_tables
, node
) {
260 if (strcmp(nvmem_dev_name(nvmem
), table
->nvmem_name
) == 0) {
261 for (i
= 0; i
< table
->ncells
; i
++) {
262 info
= &table
->cells
[i
];
264 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
270 rval
= nvmem_cell_info_to_nvmem_cell(nvmem
,
278 nvmem_cell_add(cell
);
284 mutex_unlock(&nvmem_cell_mutex
);
288 static struct nvmem_cell
*
289 nvmem_find_cell_by_name(struct nvmem_device
*nvmem
, const char *cell_id
)
291 struct nvmem_cell
*iter
, *cell
= NULL
;
293 mutex_lock(&nvmem_mutex
);
294 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
295 if (strcmp(cell_id
, iter
->name
) == 0) {
300 mutex_unlock(&nvmem_mutex
);
305 static int nvmem_add_cells_from_of(struct nvmem_device
*nvmem
)
307 struct device_node
*parent
, *child
;
308 struct device
*dev
= &nvmem
->dev
;
309 struct nvmem_cell
*cell
;
313 parent
= dev
->of_node
;
315 for_each_child_of_node(parent
, child
) {
316 addr
= of_get_property(child
, "reg", &len
);
317 if (!addr
|| (len
< 2 * sizeof(u32
))) {
318 dev_err(dev
, "nvmem: invalid reg on %pOF\n", child
);
322 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
327 cell
->np
= of_node_get(child
);
328 cell
->offset
= be32_to_cpup(addr
++);
329 cell
->bytes
= be32_to_cpup(addr
);
330 cell
->name
= kasprintf(GFP_KERNEL
, "%pOFn", child
);
332 addr
= of_get_property(child
, "bits", &len
);
333 if (addr
&& len
== (2 * sizeof(u32
))) {
334 cell
->bit_offset
= be32_to_cpup(addr
++);
335 cell
->nbits
= be32_to_cpup(addr
);
339 cell
->bytes
= DIV_ROUND_UP(
340 cell
->nbits
+ cell
->bit_offset
,
343 if (!IS_ALIGNED(cell
->offset
, nvmem
->stride
)) {
344 dev_err(dev
, "cell %s unaligned to nvmem stride %d\n",
345 cell
->name
, nvmem
->stride
);
346 /* Cells already added will be freed later. */
347 kfree_const(cell
->name
);
352 nvmem_cell_add(cell
);
359 * nvmem_register() - Register a nvmem device for given nvmem_config.
360 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
362 * @config: nvmem device configuration with which nvmem device is created.
364 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
368 struct nvmem_device
*nvmem_register(const struct nvmem_config
*config
)
370 struct nvmem_device
*nvmem
;
374 return ERR_PTR(-EINVAL
);
376 nvmem
= kzalloc(sizeof(*nvmem
), GFP_KERNEL
);
378 return ERR_PTR(-ENOMEM
);
380 rval
= ida_simple_get(&nvmem_ida
, 0, 0, GFP_KERNEL
);
383 return ERR_PTR(rval
);
386 kref_init(&nvmem
->refcnt
);
387 INIT_LIST_HEAD(&nvmem
->cells
);
390 nvmem
->owner
= config
->owner
;
391 if (!nvmem
->owner
&& config
->dev
->driver
)
392 nvmem
->owner
= config
->dev
->driver
->owner
;
393 nvmem
->stride
= config
->stride
?: 1;
394 nvmem
->word_size
= config
->word_size
?: 1;
395 nvmem
->size
= config
->size
;
396 nvmem
->dev
.type
= &nvmem_provider_type
;
397 nvmem
->dev
.bus
= &nvmem_bus_type
;
398 nvmem
->dev
.parent
= config
->dev
;
399 nvmem
->priv
= config
->priv
;
400 nvmem
->type
= config
->type
;
401 nvmem
->reg_read
= config
->reg_read
;
402 nvmem
->reg_write
= config
->reg_write
;
403 if (!config
->no_of_node
)
404 nvmem
->dev
.of_node
= config
->dev
->of_node
;
406 if (config
->id
== -1 && config
->name
) {
407 dev_set_name(&nvmem
->dev
, "%s", config
->name
);
409 dev_set_name(&nvmem
->dev
, "%s%d",
410 config
->name
? : "nvmem",
411 config
->name
? config
->id
: nvmem
->id
);
414 nvmem
->read_only
= device_property_present(config
->dev
, "read-only") ||
415 config
->read_only
|| !nvmem
->reg_write
;
417 nvmem
->dev
.groups
= nvmem_sysfs_get_groups(nvmem
, config
);
419 device_initialize(&nvmem
->dev
);
421 dev_dbg(&nvmem
->dev
, "Registering nvmem device %s\n", config
->name
);
423 rval
= device_add(&nvmem
->dev
);
427 if (config
->compat
) {
428 rval
= nvmem_sysfs_setup_compat(nvmem
, config
);
434 rval
= nvmem_add_cells(nvmem
, config
->cells
, config
->ncells
);
436 goto err_teardown_compat
;
439 rval
= nvmem_add_cells_from_table(nvmem
);
441 goto err_remove_cells
;
443 rval
= nvmem_add_cells_from_of(nvmem
);
445 goto err_remove_cells
;
447 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_ADD
, nvmem
);
452 nvmem_device_remove_all_cells(nvmem
);
455 nvmem_sysfs_remove_compat(nvmem
, config
);
457 device_del(&nvmem
->dev
);
459 put_device(&nvmem
->dev
);
461 return ERR_PTR(rval
);
463 EXPORT_SYMBOL_GPL(nvmem_register
);
465 static void nvmem_device_release(struct kref
*kref
)
467 struct nvmem_device
*nvmem
;
469 nvmem
= container_of(kref
, struct nvmem_device
, refcnt
);
471 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_REMOVE
, nvmem
);
473 if (nvmem
->flags
& FLAG_COMPAT
)
474 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
476 nvmem_device_remove_all_cells(nvmem
);
477 device_del(&nvmem
->dev
);
478 put_device(&nvmem
->dev
);
482 * nvmem_unregister() - Unregister previously registered nvmem device
484 * @nvmem: Pointer to previously registered nvmem device.
486 void nvmem_unregister(struct nvmem_device
*nvmem
)
488 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
490 EXPORT_SYMBOL_GPL(nvmem_unregister
);
492 static void devm_nvmem_release(struct device
*dev
, void *res
)
494 nvmem_unregister(*(struct nvmem_device
**)res
);
498 * devm_nvmem_register() - Register a managed nvmem device for given
500 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
502 * @dev: Device that uses the nvmem device.
503 * @config: nvmem device configuration with which nvmem device is created.
505 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
508 struct nvmem_device
*devm_nvmem_register(struct device
*dev
,
509 const struct nvmem_config
*config
)
511 struct nvmem_device
**ptr
, *nvmem
;
513 ptr
= devres_alloc(devm_nvmem_release
, sizeof(*ptr
), GFP_KERNEL
);
515 return ERR_PTR(-ENOMEM
);
517 nvmem
= nvmem_register(config
);
519 if (!IS_ERR(nvmem
)) {
521 devres_add(dev
, ptr
);
528 EXPORT_SYMBOL_GPL(devm_nvmem_register
);
530 static int devm_nvmem_match(struct device
*dev
, void *res
, void *data
)
532 struct nvmem_device
**r
= res
;
538 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
541 * @dev: Device that uses the nvmem device.
542 * @nvmem: Pointer to previously registered nvmem device.
544 * Return: Will be an negative on error or a zero on success.
546 int devm_nvmem_unregister(struct device
*dev
, struct nvmem_device
*nvmem
)
548 return devres_release(dev
, devm_nvmem_release
, devm_nvmem_match
, nvmem
);
550 EXPORT_SYMBOL(devm_nvmem_unregister
);
552 static struct nvmem_device
*__nvmem_device_get(struct device_node
*np
,
553 const char *nvmem_name
)
555 struct nvmem_device
*nvmem
= NULL
;
557 mutex_lock(&nvmem_mutex
);
558 nvmem
= np
? of_nvmem_find(np
) : nvmem_find(nvmem_name
);
559 mutex_unlock(&nvmem_mutex
);
561 return ERR_PTR(-EPROBE_DEFER
);
563 if (!try_module_get(nvmem
->owner
)) {
565 "could not increase module refcount for cell %s\n",
566 nvmem_dev_name(nvmem
));
568 put_device(&nvmem
->dev
);
569 return ERR_PTR(-EINVAL
);
572 kref_get(&nvmem
->refcnt
);
577 static void __nvmem_device_put(struct nvmem_device
*nvmem
)
579 put_device(&nvmem
->dev
);
580 module_put(nvmem
->owner
);
581 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
584 #if IS_ENABLED(CONFIG_OF)
586 * of_nvmem_device_get() - Get nvmem device from a given id
588 * @np: Device tree node that uses the nvmem device.
589 * @id: nvmem name from nvmem-names property.
591 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
594 struct nvmem_device
*of_nvmem_device_get(struct device_node
*np
, const char *id
)
597 struct device_node
*nvmem_np
;
601 index
= of_property_match_string(np
, "nvmem-names", id
);
603 nvmem_np
= of_parse_phandle(np
, "nvmem", index
);
605 return ERR_PTR(-ENOENT
);
607 return __nvmem_device_get(nvmem_np
, NULL
);
609 EXPORT_SYMBOL_GPL(of_nvmem_device_get
);
613 * nvmem_device_get() - Get nvmem device from a given id
615 * @dev: Device that uses the nvmem device.
616 * @dev_name: name of the requested nvmem device.
618 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
621 struct nvmem_device
*nvmem_device_get(struct device
*dev
, const char *dev_name
)
623 if (dev
->of_node
) { /* try dt first */
624 struct nvmem_device
*nvmem
;
626 nvmem
= of_nvmem_device_get(dev
->of_node
, dev_name
);
628 if (!IS_ERR(nvmem
) || PTR_ERR(nvmem
) == -EPROBE_DEFER
)
633 return __nvmem_device_get(NULL
, dev_name
);
635 EXPORT_SYMBOL_GPL(nvmem_device_get
);
637 static int devm_nvmem_device_match(struct device
*dev
, void *res
, void *data
)
639 struct nvmem_device
**nvmem
= res
;
641 if (WARN_ON(!nvmem
|| !*nvmem
))
644 return *nvmem
== data
;
647 static void devm_nvmem_device_release(struct device
*dev
, void *res
)
649 nvmem_device_put(*(struct nvmem_device
**)res
);
653 * devm_nvmem_device_put() - put alredy got nvmem device
655 * @dev: Device that uses the nvmem device.
656 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
657 * that needs to be released.
659 void devm_nvmem_device_put(struct device
*dev
, struct nvmem_device
*nvmem
)
663 ret
= devres_release(dev
, devm_nvmem_device_release
,
664 devm_nvmem_device_match
, nvmem
);
668 EXPORT_SYMBOL_GPL(devm_nvmem_device_put
);
671 * nvmem_device_put() - put alredy got nvmem device
673 * @nvmem: pointer to nvmem device that needs to be released.
675 void nvmem_device_put(struct nvmem_device
*nvmem
)
677 __nvmem_device_put(nvmem
);
679 EXPORT_SYMBOL_GPL(nvmem_device_put
);
682 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
684 * @dev: Device that requests the nvmem device.
685 * @id: name id for the requested nvmem device.
687 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
688 * on success. The nvmem_cell will be freed by the automatically once the
691 struct nvmem_device
*devm_nvmem_device_get(struct device
*dev
, const char *id
)
693 struct nvmem_device
**ptr
, *nvmem
;
695 ptr
= devres_alloc(devm_nvmem_device_release
, sizeof(*ptr
), GFP_KERNEL
);
697 return ERR_PTR(-ENOMEM
);
699 nvmem
= nvmem_device_get(dev
, id
);
700 if (!IS_ERR(nvmem
)) {
702 devres_add(dev
, ptr
);
709 EXPORT_SYMBOL_GPL(devm_nvmem_device_get
);
711 static struct nvmem_cell
*
712 nvmem_cell_get_from_lookup(struct device
*dev
, const char *con_id
)
714 struct nvmem_cell
*cell
= ERR_PTR(-ENOENT
);
715 struct nvmem_cell_lookup
*lookup
;
716 struct nvmem_device
*nvmem
;
720 return ERR_PTR(-EINVAL
);
722 dev_id
= dev_name(dev
);
724 mutex_lock(&nvmem_lookup_mutex
);
726 list_for_each_entry(lookup
, &nvmem_lookup_list
, node
) {
727 if ((strcmp(lookup
->dev_id
, dev_id
) == 0) &&
728 (strcmp(lookup
->con_id
, con_id
) == 0)) {
729 /* This is the right entry. */
730 nvmem
= __nvmem_device_get(NULL
, lookup
->nvmem_name
);
732 /* Provider may not be registered yet. */
733 cell
= ERR_CAST(nvmem
);
737 cell
= nvmem_find_cell_by_name(nvmem
,
740 __nvmem_device_put(nvmem
);
741 cell
= ERR_PTR(-ENOENT
);
747 mutex_unlock(&nvmem_lookup_mutex
);
751 #if IS_ENABLED(CONFIG_OF)
752 static struct nvmem_cell
*
753 nvmem_find_cell_by_node(struct nvmem_device
*nvmem
, struct device_node
*np
)
755 struct nvmem_cell
*iter
, *cell
= NULL
;
757 mutex_lock(&nvmem_mutex
);
758 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
759 if (np
== iter
->np
) {
764 mutex_unlock(&nvmem_mutex
);
770 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
772 * @np: Device tree node that uses the nvmem cell.
773 * @id: nvmem cell name from nvmem-cell-names property, or NULL
774 * for the cell at index 0 (the lone cell with no accompanying
775 * nvmem-cell-names property).
777 * Return: Will be an ERR_PTR() on error or a valid pointer
778 * to a struct nvmem_cell. The nvmem_cell will be freed by the
781 struct nvmem_cell
*of_nvmem_cell_get(struct device_node
*np
, const char *id
)
783 struct device_node
*cell_np
, *nvmem_np
;
784 struct nvmem_device
*nvmem
;
785 struct nvmem_cell
*cell
;
788 /* if cell name exists, find index to the name */
790 index
= of_property_match_string(np
, "nvmem-cell-names", id
);
792 cell_np
= of_parse_phandle(np
, "nvmem-cells", index
);
794 return ERR_PTR(-ENOENT
);
796 nvmem_np
= of_get_next_parent(cell_np
);
798 return ERR_PTR(-EINVAL
);
800 nvmem
= __nvmem_device_get(nvmem_np
, NULL
);
801 of_node_put(nvmem_np
);
803 return ERR_CAST(nvmem
);
805 cell
= nvmem_find_cell_by_node(nvmem
, cell_np
);
807 __nvmem_device_put(nvmem
);
808 return ERR_PTR(-ENOENT
);
813 EXPORT_SYMBOL_GPL(of_nvmem_cell_get
);
817 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
819 * @dev: Device that requests the nvmem cell.
820 * @id: nvmem cell name to get (this corresponds with the name from the
821 * nvmem-cell-names property for DT systems and with the con_id from
822 * the lookup entry for non-DT systems).
824 * Return: Will be an ERR_PTR() on error or a valid pointer
825 * to a struct nvmem_cell. The nvmem_cell will be freed by the
828 struct nvmem_cell
*nvmem_cell_get(struct device
*dev
, const char *id
)
830 struct nvmem_cell
*cell
;
832 if (dev
->of_node
) { /* try dt first */
833 cell
= of_nvmem_cell_get(dev
->of_node
, id
);
834 if (!IS_ERR(cell
) || PTR_ERR(cell
) == -EPROBE_DEFER
)
838 /* NULL cell id only allowed for device tree; invalid otherwise */
840 return ERR_PTR(-EINVAL
);
842 return nvmem_cell_get_from_lookup(dev
, id
);
844 EXPORT_SYMBOL_GPL(nvmem_cell_get
);
846 static void devm_nvmem_cell_release(struct device
*dev
, void *res
)
848 nvmem_cell_put(*(struct nvmem_cell
**)res
);
852 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
854 * @dev: Device that requests the nvmem cell.
855 * @id: nvmem cell name id to get.
857 * Return: Will be an ERR_PTR() on error or a valid pointer
858 * to a struct nvmem_cell. The nvmem_cell will be freed by the
859 * automatically once the device is freed.
861 struct nvmem_cell
*devm_nvmem_cell_get(struct device
*dev
, const char *id
)
863 struct nvmem_cell
**ptr
, *cell
;
865 ptr
= devres_alloc(devm_nvmem_cell_release
, sizeof(*ptr
), GFP_KERNEL
);
867 return ERR_PTR(-ENOMEM
);
869 cell
= nvmem_cell_get(dev
, id
);
872 devres_add(dev
, ptr
);
879 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get
);
881 static int devm_nvmem_cell_match(struct device
*dev
, void *res
, void *data
)
883 struct nvmem_cell
**c
= res
;
885 if (WARN_ON(!c
|| !*c
))
892 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
893 * from devm_nvmem_cell_get.
895 * @dev: Device that requests the nvmem cell.
896 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
898 void devm_nvmem_cell_put(struct device
*dev
, struct nvmem_cell
*cell
)
902 ret
= devres_release(dev
, devm_nvmem_cell_release
,
903 devm_nvmem_cell_match
, cell
);
907 EXPORT_SYMBOL(devm_nvmem_cell_put
);
910 * nvmem_cell_put() - Release previously allocated nvmem cell.
912 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
914 void nvmem_cell_put(struct nvmem_cell
*cell
)
916 struct nvmem_device
*nvmem
= cell
->nvmem
;
918 __nvmem_device_put(nvmem
);
920 EXPORT_SYMBOL_GPL(nvmem_cell_put
);
922 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell
*cell
, void *buf
)
925 int i
, extra
, bit_offset
= cell
->bit_offset
;
932 /* setup rest of the bytes if any */
933 for (i
= 1; i
< cell
->bytes
; i
++) {
934 /* Get bits from next byte and shift them towards msb */
935 *p
|= *b
<< (BITS_PER_BYTE
- bit_offset
);
941 /* point to the msb */
942 p
+= cell
->bytes
- 1;
945 /* result fits in less bytes */
946 extra
= cell
->bytes
- DIV_ROUND_UP(cell
->nbits
, BITS_PER_BYTE
);
950 /* clear msb bits if any leftover in the last byte */
951 *p
&= GENMASK((cell
->nbits
%BITS_PER_BYTE
) - 1, 0);
954 static int __nvmem_cell_read(struct nvmem_device
*nvmem
,
955 struct nvmem_cell
*cell
,
956 void *buf
, size_t *len
)
960 rc
= nvmem_reg_read(nvmem
, cell
->offset
, buf
, cell
->bytes
);
965 /* shift bits in-place */
966 if (cell
->bit_offset
|| cell
->nbits
)
967 nvmem_shift_read_buffer_in_place(cell
, buf
);
976 * nvmem_cell_read() - Read a given nvmem cell
978 * @cell: nvmem cell to be read.
979 * @len: pointer to length of cell which will be populated on successful read;
982 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
983 * buffer should be freed by the consumer with a kfree().
985 void *nvmem_cell_read(struct nvmem_cell
*cell
, size_t *len
)
987 struct nvmem_device
*nvmem
= cell
->nvmem
;
992 return ERR_PTR(-EINVAL
);
994 buf
= kzalloc(cell
->bytes
, GFP_KERNEL
);
996 return ERR_PTR(-ENOMEM
);
998 rc
= __nvmem_cell_read(nvmem
, cell
, buf
, len
);
1006 EXPORT_SYMBOL_GPL(nvmem_cell_read
);
1008 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell
*cell
,
1011 struct nvmem_device
*nvmem
= cell
->nvmem
;
1012 int i
, rc
, nbits
, bit_offset
= cell
->bit_offset
;
1013 u8 v
, *p
, *buf
, *b
, pbyte
, pbits
;
1015 nbits
= cell
->nbits
;
1016 buf
= kzalloc(cell
->bytes
, GFP_KERNEL
);
1018 return ERR_PTR(-ENOMEM
);
1020 memcpy(buf
, _buf
, len
);
1027 /* setup the first byte with lsb bits from nvmem */
1028 rc
= nvmem_reg_read(nvmem
, cell
->offset
, &v
, 1);
1031 *b
++ |= GENMASK(bit_offset
- 1, 0) & v
;
1033 /* setup rest of the byte if any */
1034 for (i
= 1; i
< cell
->bytes
; i
++) {
1035 /* Get last byte bits and shift them towards lsb */
1036 pbits
= pbyte
>> (BITS_PER_BYTE
- 1 - bit_offset
);
1044 /* if it's not end on byte boundary */
1045 if ((nbits
+ bit_offset
) % BITS_PER_BYTE
) {
1046 /* setup the last byte with msb bits from nvmem */
1047 rc
= nvmem_reg_read(nvmem
,
1048 cell
->offset
+ cell
->bytes
- 1, &v
, 1);
1051 *p
|= GENMASK(7, (nbits
+ bit_offset
) % BITS_PER_BYTE
) & v
;
1062 * nvmem_cell_write() - Write to a given nvmem cell
1064 * @cell: nvmem cell to be written.
1065 * @buf: Buffer to be written.
1066 * @len: length of buffer to be written to nvmem cell.
1068 * Return: length of bytes written or negative on failure.
1070 int nvmem_cell_write(struct nvmem_cell
*cell
, void *buf
, size_t len
)
1072 struct nvmem_device
*nvmem
= cell
->nvmem
;
1075 if (!nvmem
|| nvmem
->read_only
||
1076 (cell
->bit_offset
== 0 && len
!= cell
->bytes
))
1079 if (cell
->bit_offset
|| cell
->nbits
) {
1080 buf
= nvmem_cell_prepare_write_buffer(cell
, buf
, len
);
1082 return PTR_ERR(buf
);
1085 rc
= nvmem_reg_write(nvmem
, cell
->offset
, buf
, cell
->bytes
);
1087 /* free the tmp buffer */
1088 if (cell
->bit_offset
|| cell
->nbits
)
1096 EXPORT_SYMBOL_GPL(nvmem_cell_write
);
1099 * nvmem_cell_read_u16() - Read a cell value as an u16
1101 * @dev: Device that requests the nvmem cell.
1102 * @cell_id: Name of nvmem cell to read.
1103 * @val: pointer to output value.
1105 * Return: 0 on success or negative errno.
1107 int nvmem_cell_read_u16(struct device
*dev
, const char *cell_id
, u16
*val
)
1109 struct nvmem_cell
*cell
;
1113 cell
= nvmem_cell_get(dev
, cell_id
);
1115 return PTR_ERR(cell
);
1117 buf
= nvmem_cell_read(cell
, &len
);
1119 nvmem_cell_put(cell
);
1120 return PTR_ERR(buf
);
1122 if (len
!= sizeof(*val
)) {
1124 nvmem_cell_put(cell
);
1127 memcpy(val
, buf
, sizeof(*val
));
1129 nvmem_cell_put(cell
);
1133 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16
);
1136 * nvmem_cell_read_u32() - Read a cell value as an u32
1138 * @dev: Device that requests the nvmem cell.
1139 * @cell_id: Name of nvmem cell to read.
1140 * @val: pointer to output value.
1142 * Return: 0 on success or negative errno.
1144 int nvmem_cell_read_u32(struct device
*dev
, const char *cell_id
, u32
*val
)
1146 struct nvmem_cell
*cell
;
1150 cell
= nvmem_cell_get(dev
, cell_id
);
1152 return PTR_ERR(cell
);
1154 buf
= nvmem_cell_read(cell
, &len
);
1156 nvmem_cell_put(cell
);
1157 return PTR_ERR(buf
);
1159 if (len
!= sizeof(*val
)) {
1161 nvmem_cell_put(cell
);
1164 memcpy(val
, buf
, sizeof(*val
));
1167 nvmem_cell_put(cell
);
1170 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32
);
1173 * nvmem_device_cell_read() - Read a given nvmem device and cell
1175 * @nvmem: nvmem device to read from.
1176 * @info: nvmem cell info to be read.
1177 * @buf: buffer pointer which will be populated on successful read.
1179 * Return: length of successful bytes read on success and negative
1180 * error code on error.
1182 ssize_t
nvmem_device_cell_read(struct nvmem_device
*nvmem
,
1183 struct nvmem_cell_info
*info
, void *buf
)
1185 struct nvmem_cell cell
;
1192 rc
= nvmem_cell_info_to_nvmem_cell_nodup(nvmem
, info
, &cell
);
1196 rc
= __nvmem_cell_read(nvmem
, &cell
, buf
, &len
);
1202 EXPORT_SYMBOL_GPL(nvmem_device_cell_read
);
1205 * nvmem_device_cell_write() - Write cell to a given nvmem device
1207 * @nvmem: nvmem device to be written to.
1208 * @info: nvmem cell info to be written.
1209 * @buf: buffer to be written to cell.
1211 * Return: length of bytes written or negative error code on failure.
1213 int nvmem_device_cell_write(struct nvmem_device
*nvmem
,
1214 struct nvmem_cell_info
*info
, void *buf
)
1216 struct nvmem_cell cell
;
1222 rc
= nvmem_cell_info_to_nvmem_cell_nodup(nvmem
, info
, &cell
);
1226 return nvmem_cell_write(&cell
, buf
, cell
.bytes
);
1228 EXPORT_SYMBOL_GPL(nvmem_device_cell_write
);
1231 * nvmem_device_read() - Read from a given nvmem device
1233 * @nvmem: nvmem device to read from.
1234 * @offset: offset in nvmem device.
1235 * @bytes: number of bytes to read.
1236 * @buf: buffer pointer which will be populated on successful read.
1238 * Return: length of successful bytes read on success and negative
1239 * error code on error.
1241 int nvmem_device_read(struct nvmem_device
*nvmem
,
1242 unsigned int offset
,
1243 size_t bytes
, void *buf
)
1250 rc
= nvmem_reg_read(nvmem
, offset
, buf
, bytes
);
1257 EXPORT_SYMBOL_GPL(nvmem_device_read
);
1260 * nvmem_device_write() - Write cell to a given nvmem device
1262 * @nvmem: nvmem device to be written to.
1263 * @offset: offset in nvmem device.
1264 * @bytes: number of bytes to write.
1265 * @buf: buffer to be written.
1267 * Return: length of bytes written or negative error code on failure.
1269 int nvmem_device_write(struct nvmem_device
*nvmem
,
1270 unsigned int offset
,
1271 size_t bytes
, void *buf
)
1278 rc
= nvmem_reg_write(nvmem
, offset
, buf
, bytes
);
1286 EXPORT_SYMBOL_GPL(nvmem_device_write
);
1289 * nvmem_add_cell_table() - register a table of cell info entries
1291 * @table: table of cell info entries
1293 void nvmem_add_cell_table(struct nvmem_cell_table
*table
)
1295 mutex_lock(&nvmem_cell_mutex
);
1296 list_add_tail(&table
->node
, &nvmem_cell_tables
);
1297 mutex_unlock(&nvmem_cell_mutex
);
1299 EXPORT_SYMBOL_GPL(nvmem_add_cell_table
);
1302 * nvmem_del_cell_table() - remove a previously registered cell info table
1304 * @table: table of cell info entries
1306 void nvmem_del_cell_table(struct nvmem_cell_table
*table
)
1308 mutex_lock(&nvmem_cell_mutex
);
1309 list_del(&table
->node
);
1310 mutex_unlock(&nvmem_cell_mutex
);
1312 EXPORT_SYMBOL_GPL(nvmem_del_cell_table
);
1315 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1317 * @entries: array of cell lookup entries
1318 * @nentries: number of cell lookup entries in the array
1320 void nvmem_add_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
1324 mutex_lock(&nvmem_lookup_mutex
);
1325 for (i
= 0; i
< nentries
; i
++)
1326 list_add_tail(&entries
[i
].node
, &nvmem_lookup_list
);
1327 mutex_unlock(&nvmem_lookup_mutex
);
1329 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups
);
1332 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1335 * @entries: array of cell lookup entries
1336 * @nentries: number of cell lookup entries in the array
1338 void nvmem_del_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
1342 mutex_lock(&nvmem_lookup_mutex
);
1343 for (i
= 0; i
< nentries
; i
++)
1344 list_del(&entries
[i
].node
);
1345 mutex_unlock(&nvmem_lookup_mutex
);
1347 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups
);
1350 * nvmem_dev_name() - Get the name of a given nvmem device.
1352 * @nvmem: nvmem device.
1354 * Return: name of the nvmem device.
1356 const char *nvmem_dev_name(struct nvmem_device
*nvmem
)
1358 return dev_name(&nvmem
->dev
);
1360 EXPORT_SYMBOL_GPL(nvmem_dev_name
);
1362 static int __init
nvmem_init(void)
1364 return bus_register(&nvmem_bus_type
);
1367 static void __exit
nvmem_exit(void)
1369 bus_unregister(&nvmem_bus_type
);
1372 subsys_initcall(nvmem_init
);
1373 module_exit(nvmem_exit
);
1375 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1376 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1377 MODULE_DESCRIPTION("nvmem Driver Core");
1378 MODULE_LICENSE("GPL v2");