1 // SPDX-License-Identifier: GPL-2.0
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
19 #include <linux/slab.h>
32 struct bin_attribute eeprom
;
33 struct device
*base_dev
;
34 struct list_head cells
;
35 nvmem_reg_read_t reg_read
;
36 nvmem_reg_write_t reg_write
;
40 #define FLAG_COMPAT BIT(0)
48 struct device_node
*np
;
49 struct nvmem_device
*nvmem
;
50 struct list_head node
;
53 static DEFINE_MUTEX(nvmem_mutex
);
54 static DEFINE_IDA(nvmem_ida
);
56 static DEFINE_MUTEX(nvmem_cell_mutex
);
57 static LIST_HEAD(nvmem_cell_tables
);
59 static DEFINE_MUTEX(nvmem_lookup_mutex
);
60 static LIST_HEAD(nvmem_lookup_list
);
62 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier
);
64 static const char * const nvmem_type_str
[] = {
65 [NVMEM_TYPE_UNKNOWN
] = "Unknown",
66 [NVMEM_TYPE_EEPROM
] = "EEPROM",
67 [NVMEM_TYPE_OTP
] = "OTP",
68 [NVMEM_TYPE_BATTERY_BACKED
] = "Battery backed",
71 #ifdef CONFIG_DEBUG_LOCK_ALLOC
72 static struct lock_class_key eeprom_lock_key
;
75 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
76 static int nvmem_reg_read(struct nvmem_device
*nvmem
, unsigned int offset
,
77 void *val
, size_t bytes
)
80 return nvmem
->reg_read(nvmem
->priv
, offset
, val
, bytes
);
85 static int nvmem_reg_write(struct nvmem_device
*nvmem
, unsigned int offset
,
86 void *val
, size_t bytes
)
89 return nvmem
->reg_write(nvmem
->priv
, offset
, val
, bytes
);
94 static ssize_t
type_show(struct device
*dev
,
95 struct device_attribute
*attr
, char *buf
)
97 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
99 return sprintf(buf
, "%s\n", nvmem_type_str
[nvmem
->type
]);
102 static DEVICE_ATTR_RO(type
);
104 static struct attribute
*nvmem_attrs
[] = {
109 static ssize_t
bin_attr_nvmem_read(struct file
*filp
, struct kobject
*kobj
,
110 struct bin_attribute
*attr
,
111 char *buf
, loff_t pos
, size_t count
)
114 struct nvmem_device
*nvmem
;
120 dev
= container_of(kobj
, struct device
, kobj
);
121 nvmem
= to_nvmem_device(dev
);
123 /* Stop the user from reading */
124 if (pos
>= nvmem
->size
)
127 if (count
< nvmem
->word_size
)
130 if (pos
+ count
> nvmem
->size
)
131 count
= nvmem
->size
- pos
;
133 count
= round_down(count
, nvmem
->word_size
);
135 rc
= nvmem_reg_read(nvmem
, pos
, buf
, count
);
143 static ssize_t
bin_attr_nvmem_write(struct file
*filp
, struct kobject
*kobj
,
144 struct bin_attribute
*attr
,
145 char *buf
, loff_t pos
, size_t count
)
148 struct nvmem_device
*nvmem
;
154 dev
= container_of(kobj
, struct device
, kobj
);
155 nvmem
= to_nvmem_device(dev
);
157 /* Stop the user from writing */
158 if (pos
>= nvmem
->size
)
161 if (count
< nvmem
->word_size
)
164 if (pos
+ count
> nvmem
->size
)
165 count
= nvmem
->size
- pos
;
167 count
= round_down(count
, nvmem
->word_size
);
169 rc
= nvmem_reg_write(nvmem
, pos
, buf
, count
);
177 /* default read/write permissions */
178 static struct bin_attribute bin_attr_rw_nvmem
= {
183 .read
= bin_attr_nvmem_read
,
184 .write
= bin_attr_nvmem_write
,
187 static struct bin_attribute
*nvmem_bin_rw_attributes
[] = {
192 static const struct attribute_group nvmem_bin_rw_group
= {
193 .bin_attrs
= nvmem_bin_rw_attributes
,
194 .attrs
= nvmem_attrs
,
197 static const struct attribute_group
*nvmem_rw_dev_groups
[] = {
202 /* read only permission */
203 static struct bin_attribute bin_attr_ro_nvmem
= {
208 .read
= bin_attr_nvmem_read
,
211 static struct bin_attribute
*nvmem_bin_ro_attributes
[] = {
216 static const struct attribute_group nvmem_bin_ro_group
= {
217 .bin_attrs
= nvmem_bin_ro_attributes
,
218 .attrs
= nvmem_attrs
,
221 static const struct attribute_group
*nvmem_ro_dev_groups
[] = {
226 /* default read/write permissions, root only */
227 static struct bin_attribute bin_attr_rw_root_nvmem
= {
232 .read
= bin_attr_nvmem_read
,
233 .write
= bin_attr_nvmem_write
,
236 static struct bin_attribute
*nvmem_bin_rw_root_attributes
[] = {
237 &bin_attr_rw_root_nvmem
,
241 static const struct attribute_group nvmem_bin_rw_root_group
= {
242 .bin_attrs
= nvmem_bin_rw_root_attributes
,
243 .attrs
= nvmem_attrs
,
246 static const struct attribute_group
*nvmem_rw_root_dev_groups
[] = {
247 &nvmem_bin_rw_root_group
,
251 /* read only permission, root only */
252 static struct bin_attribute bin_attr_ro_root_nvmem
= {
257 .read
= bin_attr_nvmem_read
,
260 static struct bin_attribute
*nvmem_bin_ro_root_attributes
[] = {
261 &bin_attr_ro_root_nvmem
,
265 static const struct attribute_group nvmem_bin_ro_root_group
= {
266 .bin_attrs
= nvmem_bin_ro_root_attributes
,
267 .attrs
= nvmem_attrs
,
270 static const struct attribute_group
*nvmem_ro_root_dev_groups
[] = {
271 &nvmem_bin_ro_root_group
,
275 static void nvmem_release(struct device
*dev
)
277 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
279 ida_simple_remove(&nvmem_ida
, nvmem
->id
);
283 static const struct device_type nvmem_provider_type
= {
284 .release
= nvmem_release
,
287 static struct bus_type nvmem_bus_type
= {
291 static int of_nvmem_match(struct device
*dev
, void *nvmem_np
)
293 return dev
->of_node
== nvmem_np
;
296 static struct nvmem_device
*of_nvmem_find(struct device_node
*nvmem_np
)
303 d
= bus_find_device(&nvmem_bus_type
, NULL
, nvmem_np
, of_nvmem_match
);
308 return to_nvmem_device(d
);
311 static struct nvmem_device
*nvmem_find(const char *name
)
315 d
= bus_find_device_by_name(&nvmem_bus_type
, NULL
, name
);
320 return to_nvmem_device(d
);
323 static void nvmem_cell_drop(struct nvmem_cell
*cell
)
325 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_REMOVE
, cell
);
326 mutex_lock(&nvmem_mutex
);
327 list_del(&cell
->node
);
328 mutex_unlock(&nvmem_mutex
);
329 of_node_put(cell
->np
);
334 static void nvmem_device_remove_all_cells(const struct nvmem_device
*nvmem
)
336 struct nvmem_cell
*cell
, *p
;
338 list_for_each_entry_safe(cell
, p
, &nvmem
->cells
, node
)
339 nvmem_cell_drop(cell
);
342 static void nvmem_cell_add(struct nvmem_cell
*cell
)
344 mutex_lock(&nvmem_mutex
);
345 list_add_tail(&cell
->node
, &cell
->nvmem
->cells
);
346 mutex_unlock(&nvmem_mutex
);
347 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_ADD
, cell
);
350 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device
*nvmem
,
351 const struct nvmem_cell_info
*info
,
352 struct nvmem_cell
*cell
)
355 cell
->offset
= info
->offset
;
356 cell
->bytes
= info
->bytes
;
357 cell
->name
= info
->name
;
359 cell
->bit_offset
= info
->bit_offset
;
360 cell
->nbits
= info
->nbits
;
363 cell
->bytes
= DIV_ROUND_UP(cell
->nbits
+ cell
->bit_offset
,
366 if (!IS_ALIGNED(cell
->offset
, nvmem
->stride
)) {
368 "cell %s unaligned to nvmem stride %d\n",
369 cell
->name
, nvmem
->stride
);
377 * nvmem_add_cells() - Add cell information to an nvmem device
379 * @nvmem: nvmem device to add cells to.
380 * @info: nvmem cell info to add to the device
381 * @ncells: number of cells in info
383 * Return: 0 or negative error code on failure.
385 static int nvmem_add_cells(struct nvmem_device
*nvmem
,
386 const struct nvmem_cell_info
*info
,
389 struct nvmem_cell
**cells
;
392 cells
= kcalloc(ncells
, sizeof(*cells
), GFP_KERNEL
);
396 for (i
= 0; i
< ncells
; i
++) {
397 cells
[i
] = kzalloc(sizeof(**cells
), GFP_KERNEL
);
403 rval
= nvmem_cell_info_to_nvmem_cell(nvmem
, &info
[i
], cells
[i
]);
409 nvmem_cell_add(cells
[i
]);
412 /* remove tmp array */
418 nvmem_cell_drop(cells
[i
]);
426 * nvmem_setup_compat() - Create an additional binary entry in
427 * drivers sys directory, to be backwards compatible with the older
428 * drivers/misc/eeprom drivers.
430 static int nvmem_setup_compat(struct nvmem_device
*nvmem
,
431 const struct nvmem_config
*config
)
435 if (!config
->base_dev
)
438 if (nvmem
->read_only
)
439 nvmem
->eeprom
= bin_attr_ro_root_nvmem
;
441 nvmem
->eeprom
= bin_attr_rw_root_nvmem
;
442 nvmem
->eeprom
.attr
.name
= "eeprom";
443 nvmem
->eeprom
.size
= nvmem
->size
;
444 #ifdef CONFIG_DEBUG_LOCK_ALLOC
445 nvmem
->eeprom
.attr
.key
= &eeprom_lock_key
;
447 nvmem
->eeprom
.private = &nvmem
->dev
;
448 nvmem
->base_dev
= config
->base_dev
;
450 rval
= device_create_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
453 "Failed to create eeprom binary file %d\n", rval
);
457 nvmem
->flags
|= FLAG_COMPAT
;
463 * nvmem_register_notifier() - Register a notifier block for nvmem events.
465 * @nb: notifier block to be called on nvmem events.
467 * Return: 0 on success, negative error number on failure.
469 int nvmem_register_notifier(struct notifier_block
*nb
)
471 return blocking_notifier_chain_register(&nvmem_notifier
, nb
);
473 EXPORT_SYMBOL_GPL(nvmem_register_notifier
);
476 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
478 * @nb: notifier block to be unregistered.
480 * Return: 0 on success, negative error number on failure.
482 int nvmem_unregister_notifier(struct notifier_block
*nb
)
484 return blocking_notifier_chain_unregister(&nvmem_notifier
, nb
);
486 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier
);
488 static int nvmem_add_cells_from_table(struct nvmem_device
*nvmem
)
490 const struct nvmem_cell_info
*info
;
491 struct nvmem_cell_table
*table
;
492 struct nvmem_cell
*cell
;
495 mutex_lock(&nvmem_cell_mutex
);
496 list_for_each_entry(table
, &nvmem_cell_tables
, node
) {
497 if (strcmp(nvmem_dev_name(nvmem
), table
->nvmem_name
) == 0) {
498 for (i
= 0; i
< table
->ncells
; i
++) {
499 info
= &table
->cells
[i
];
501 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
507 rval
= nvmem_cell_info_to_nvmem_cell(nvmem
,
515 nvmem_cell_add(cell
);
521 mutex_unlock(&nvmem_cell_mutex
);
525 static struct nvmem_cell
*
526 nvmem_find_cell_by_name(struct nvmem_device
*nvmem
, const char *cell_id
)
528 struct nvmem_cell
*iter
, *cell
= NULL
;
530 mutex_lock(&nvmem_mutex
);
531 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
532 if (strcmp(cell_id
, iter
->name
) == 0) {
537 mutex_unlock(&nvmem_mutex
);
542 static int nvmem_add_cells_from_of(struct nvmem_device
*nvmem
)
544 struct device_node
*parent
, *child
;
545 struct device
*dev
= &nvmem
->dev
;
546 struct nvmem_cell
*cell
;
550 parent
= dev
->of_node
;
552 for_each_child_of_node(parent
, child
) {
553 addr
= of_get_property(child
, "reg", &len
);
554 if (!addr
|| (len
< 2 * sizeof(u32
))) {
555 dev_err(dev
, "nvmem: invalid reg on %pOF\n", child
);
559 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
564 cell
->np
= of_node_get(child
);
565 cell
->offset
= be32_to_cpup(addr
++);
566 cell
->bytes
= be32_to_cpup(addr
);
567 cell
->name
= kasprintf(GFP_KERNEL
, "%pOFn", child
);
569 addr
= of_get_property(child
, "bits", &len
);
570 if (addr
&& len
== (2 * sizeof(u32
))) {
571 cell
->bit_offset
= be32_to_cpup(addr
++);
572 cell
->nbits
= be32_to_cpup(addr
);
576 cell
->bytes
= DIV_ROUND_UP(
577 cell
->nbits
+ cell
->bit_offset
,
580 if (!IS_ALIGNED(cell
->offset
, nvmem
->stride
)) {
581 dev_err(dev
, "cell %s unaligned to nvmem stride %d\n",
582 cell
->name
, nvmem
->stride
);
583 /* Cells already added will be freed later. */
589 nvmem_cell_add(cell
);
596 * nvmem_register() - Register a nvmem device for given nvmem_config.
597 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
599 * @config: nvmem device configuration with which nvmem device is created.
601 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
605 struct nvmem_device
*nvmem_register(const struct nvmem_config
*config
)
607 struct nvmem_device
*nvmem
;
611 return ERR_PTR(-EINVAL
);
613 nvmem
= kzalloc(sizeof(*nvmem
), GFP_KERNEL
);
615 return ERR_PTR(-ENOMEM
);
617 rval
= ida_simple_get(&nvmem_ida
, 0, 0, GFP_KERNEL
);
620 return ERR_PTR(rval
);
623 kref_init(&nvmem
->refcnt
);
624 INIT_LIST_HEAD(&nvmem
->cells
);
627 nvmem
->owner
= config
->owner
;
628 if (!nvmem
->owner
&& config
->dev
->driver
)
629 nvmem
->owner
= config
->dev
->driver
->owner
;
630 nvmem
->stride
= config
->stride
?: 1;
631 nvmem
->word_size
= config
->word_size
?: 1;
632 nvmem
->size
= config
->size
;
633 nvmem
->dev
.type
= &nvmem_provider_type
;
634 nvmem
->dev
.bus
= &nvmem_bus_type
;
635 nvmem
->dev
.parent
= config
->dev
;
636 nvmem
->priv
= config
->priv
;
637 nvmem
->type
= config
->type
;
638 nvmem
->reg_read
= config
->reg_read
;
639 nvmem
->reg_write
= config
->reg_write
;
640 if (!config
->no_of_node
)
641 nvmem
->dev
.of_node
= config
->dev
->of_node
;
643 if (config
->id
== -1 && config
->name
) {
644 dev_set_name(&nvmem
->dev
, "%s", config
->name
);
646 dev_set_name(&nvmem
->dev
, "%s%d",
647 config
->name
? : "nvmem",
648 config
->name
? config
->id
: nvmem
->id
);
651 nvmem
->read_only
= device_property_present(config
->dev
, "read-only") ||
652 config
->read_only
|| !nvmem
->reg_write
;
654 if (config
->root_only
)
655 nvmem
->dev
.groups
= nvmem
->read_only
?
656 nvmem_ro_root_dev_groups
:
657 nvmem_rw_root_dev_groups
;
659 nvmem
->dev
.groups
= nvmem
->read_only
?
660 nvmem_ro_dev_groups
:
663 device_initialize(&nvmem
->dev
);
665 dev_dbg(&nvmem
->dev
, "Registering nvmem device %s\n", config
->name
);
667 rval
= device_add(&nvmem
->dev
);
671 if (config
->compat
) {
672 rval
= nvmem_setup_compat(nvmem
, config
);
678 rval
= nvmem_add_cells(nvmem
, config
->cells
, config
->ncells
);
680 goto err_teardown_compat
;
683 rval
= nvmem_add_cells_from_table(nvmem
);
685 goto err_remove_cells
;
687 rval
= nvmem_add_cells_from_of(nvmem
);
689 goto err_remove_cells
;
691 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_ADD
, nvmem
);
696 nvmem_device_remove_all_cells(nvmem
);
699 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
701 device_del(&nvmem
->dev
);
703 put_device(&nvmem
->dev
);
705 return ERR_PTR(rval
);
707 EXPORT_SYMBOL_GPL(nvmem_register
);
709 static void nvmem_device_release(struct kref
*kref
)
711 struct nvmem_device
*nvmem
;
713 nvmem
= container_of(kref
, struct nvmem_device
, refcnt
);
715 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_REMOVE
, nvmem
);
717 if (nvmem
->flags
& FLAG_COMPAT
)
718 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
720 nvmem_device_remove_all_cells(nvmem
);
721 device_del(&nvmem
->dev
);
722 put_device(&nvmem
->dev
);
726 * nvmem_unregister() - Unregister previously registered nvmem device
728 * @nvmem: Pointer to previously registered nvmem device.
730 void nvmem_unregister(struct nvmem_device
*nvmem
)
732 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
734 EXPORT_SYMBOL_GPL(nvmem_unregister
);
736 static void devm_nvmem_release(struct device
*dev
, void *res
)
738 nvmem_unregister(*(struct nvmem_device
**)res
);
742 * devm_nvmem_register() - Register a managed nvmem device for given
744 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
746 * @dev: Device that uses the nvmem device.
747 * @config: nvmem device configuration with which nvmem device is created.
749 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
752 struct nvmem_device
*devm_nvmem_register(struct device
*dev
,
753 const struct nvmem_config
*config
)
755 struct nvmem_device
**ptr
, *nvmem
;
757 ptr
= devres_alloc(devm_nvmem_release
, sizeof(*ptr
), GFP_KERNEL
);
759 return ERR_PTR(-ENOMEM
);
761 nvmem
= nvmem_register(config
);
763 if (!IS_ERR(nvmem
)) {
765 devres_add(dev
, ptr
);
772 EXPORT_SYMBOL_GPL(devm_nvmem_register
);
774 static int devm_nvmem_match(struct device
*dev
, void *res
, void *data
)
776 struct nvmem_device
**r
= res
;
782 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
785 * @dev: Device that uses the nvmem device.
786 * @nvmem: Pointer to previously registered nvmem device.
788 * Return: Will be an negative on error or a zero on success.
790 int devm_nvmem_unregister(struct device
*dev
, struct nvmem_device
*nvmem
)
792 return devres_release(dev
, devm_nvmem_release
, devm_nvmem_match
, nvmem
);
794 EXPORT_SYMBOL(devm_nvmem_unregister
);
796 static struct nvmem_device
*__nvmem_device_get(struct device_node
*np
,
797 const char *nvmem_name
)
799 struct nvmem_device
*nvmem
= NULL
;
801 mutex_lock(&nvmem_mutex
);
802 nvmem
= np
? of_nvmem_find(np
) : nvmem_find(nvmem_name
);
803 mutex_unlock(&nvmem_mutex
);
805 return ERR_PTR(-EPROBE_DEFER
);
807 if (!try_module_get(nvmem
->owner
)) {
809 "could not increase module refcount for cell %s\n",
810 nvmem_dev_name(nvmem
));
812 put_device(&nvmem
->dev
);
813 return ERR_PTR(-EINVAL
);
816 kref_get(&nvmem
->refcnt
);
821 static void __nvmem_device_put(struct nvmem_device
*nvmem
)
823 put_device(&nvmem
->dev
);
824 module_put(nvmem
->owner
);
825 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
828 #if IS_ENABLED(CONFIG_OF)
830 * of_nvmem_device_get() - Get nvmem device from a given id
832 * @np: Device tree node that uses the nvmem device.
833 * @id: nvmem name from nvmem-names property.
835 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
838 struct nvmem_device
*of_nvmem_device_get(struct device_node
*np
, const char *id
)
841 struct device_node
*nvmem_np
;
845 index
= of_property_match_string(np
, "nvmem-names", id
);
847 nvmem_np
= of_parse_phandle(np
, "nvmem", index
);
849 return ERR_PTR(-ENOENT
);
851 return __nvmem_device_get(nvmem_np
, NULL
);
853 EXPORT_SYMBOL_GPL(of_nvmem_device_get
);
857 * nvmem_device_get() - Get nvmem device from a given id
859 * @dev: Device that uses the nvmem device.
860 * @dev_name: name of the requested nvmem device.
862 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
865 struct nvmem_device
*nvmem_device_get(struct device
*dev
, const char *dev_name
)
867 if (dev
->of_node
) { /* try dt first */
868 struct nvmem_device
*nvmem
;
870 nvmem
= of_nvmem_device_get(dev
->of_node
, dev_name
);
872 if (!IS_ERR(nvmem
) || PTR_ERR(nvmem
) == -EPROBE_DEFER
)
877 return __nvmem_device_get(NULL
, dev_name
);
879 EXPORT_SYMBOL_GPL(nvmem_device_get
);
881 static int devm_nvmem_device_match(struct device
*dev
, void *res
, void *data
)
883 struct nvmem_device
**nvmem
= res
;
885 if (WARN_ON(!nvmem
|| !*nvmem
))
888 return *nvmem
== data
;
891 static void devm_nvmem_device_release(struct device
*dev
, void *res
)
893 nvmem_device_put(*(struct nvmem_device
**)res
);
897 * devm_nvmem_device_put() - put alredy got nvmem device
899 * @dev: Device that uses the nvmem device.
900 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
901 * that needs to be released.
903 void devm_nvmem_device_put(struct device
*dev
, struct nvmem_device
*nvmem
)
907 ret
= devres_release(dev
, devm_nvmem_device_release
,
908 devm_nvmem_device_match
, nvmem
);
912 EXPORT_SYMBOL_GPL(devm_nvmem_device_put
);
915 * nvmem_device_put() - put alredy got nvmem device
917 * @nvmem: pointer to nvmem device that needs to be released.
919 void nvmem_device_put(struct nvmem_device
*nvmem
)
921 __nvmem_device_put(nvmem
);
923 EXPORT_SYMBOL_GPL(nvmem_device_put
);
926 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
928 * @dev: Device that requests the nvmem device.
929 * @id: name id for the requested nvmem device.
931 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
932 * on success. The nvmem_cell will be freed by the automatically once the
935 struct nvmem_device
*devm_nvmem_device_get(struct device
*dev
, const char *id
)
937 struct nvmem_device
**ptr
, *nvmem
;
939 ptr
= devres_alloc(devm_nvmem_device_release
, sizeof(*ptr
), GFP_KERNEL
);
941 return ERR_PTR(-ENOMEM
);
943 nvmem
= nvmem_device_get(dev
, id
);
944 if (!IS_ERR(nvmem
)) {
946 devres_add(dev
, ptr
);
953 EXPORT_SYMBOL_GPL(devm_nvmem_device_get
);
955 static struct nvmem_cell
*
956 nvmem_cell_get_from_lookup(struct device
*dev
, const char *con_id
)
958 struct nvmem_cell
*cell
= ERR_PTR(-ENOENT
);
959 struct nvmem_cell_lookup
*lookup
;
960 struct nvmem_device
*nvmem
;
964 return ERR_PTR(-EINVAL
);
966 dev_id
= dev_name(dev
);
968 mutex_lock(&nvmem_lookup_mutex
);
970 list_for_each_entry(lookup
, &nvmem_lookup_list
, node
) {
971 if ((strcmp(lookup
->dev_id
, dev_id
) == 0) &&
972 (strcmp(lookup
->con_id
, con_id
) == 0)) {
973 /* This is the right entry. */
974 nvmem
= __nvmem_device_get(NULL
, lookup
->nvmem_name
);
976 /* Provider may not be registered yet. */
977 cell
= ERR_CAST(nvmem
);
981 cell
= nvmem_find_cell_by_name(nvmem
,
984 __nvmem_device_put(nvmem
);
985 cell
= ERR_PTR(-ENOENT
);
991 mutex_unlock(&nvmem_lookup_mutex
);
995 #if IS_ENABLED(CONFIG_OF)
996 static struct nvmem_cell
*
997 nvmem_find_cell_by_node(struct nvmem_device
*nvmem
, struct device_node
*np
)
999 struct nvmem_cell
*iter
, *cell
= NULL
;
1001 mutex_lock(&nvmem_mutex
);
1002 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
1003 if (np
== iter
->np
) {
1008 mutex_unlock(&nvmem_mutex
);
1014 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1016 * @np: Device tree node that uses the nvmem cell.
1017 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1018 * for the cell at index 0 (the lone cell with no accompanying
1019 * nvmem-cell-names property).
1021 * Return: Will be an ERR_PTR() on error or a valid pointer
1022 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1025 struct nvmem_cell
*of_nvmem_cell_get(struct device_node
*np
, const char *id
)
1027 struct device_node
*cell_np
, *nvmem_np
;
1028 struct nvmem_device
*nvmem
;
1029 struct nvmem_cell
*cell
;
1032 /* if cell name exists, find index to the name */
1034 index
= of_property_match_string(np
, "nvmem-cell-names", id
);
1036 cell_np
= of_parse_phandle(np
, "nvmem-cells", index
);
1038 return ERR_PTR(-ENOENT
);
1040 nvmem_np
= of_get_next_parent(cell_np
);
1042 return ERR_PTR(-EINVAL
);
1044 nvmem
= __nvmem_device_get(nvmem_np
, NULL
);
1045 of_node_put(nvmem_np
);
1047 return ERR_CAST(nvmem
);
1049 cell
= nvmem_find_cell_by_node(nvmem
, cell_np
);
1051 __nvmem_device_put(nvmem
);
1052 return ERR_PTR(-ENOENT
);
1057 EXPORT_SYMBOL_GPL(of_nvmem_cell_get
);
1061 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1063 * @dev: Device that requests the nvmem cell.
1064 * @id: nvmem cell name to get (this corresponds with the name from the
1065 * nvmem-cell-names property for DT systems and with the con_id from
1066 * the lookup entry for non-DT systems).
1068 * Return: Will be an ERR_PTR() on error or a valid pointer
1069 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1072 struct nvmem_cell
*nvmem_cell_get(struct device
*dev
, const char *id
)
1074 struct nvmem_cell
*cell
;
1076 if (dev
->of_node
) { /* try dt first */
1077 cell
= of_nvmem_cell_get(dev
->of_node
, id
);
1078 if (!IS_ERR(cell
) || PTR_ERR(cell
) == -EPROBE_DEFER
)
1082 /* NULL cell id only allowed for device tree; invalid otherwise */
1084 return ERR_PTR(-EINVAL
);
1086 return nvmem_cell_get_from_lookup(dev
, id
);
1088 EXPORT_SYMBOL_GPL(nvmem_cell_get
);
1090 static void devm_nvmem_cell_release(struct device
*dev
, void *res
)
1092 nvmem_cell_put(*(struct nvmem_cell
**)res
);
1096 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1098 * @dev: Device that requests the nvmem cell.
1099 * @id: nvmem cell name id to get.
1101 * Return: Will be an ERR_PTR() on error or a valid pointer
1102 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1103 * automatically once the device is freed.
1105 struct nvmem_cell
*devm_nvmem_cell_get(struct device
*dev
, const char *id
)
1107 struct nvmem_cell
**ptr
, *cell
;
1109 ptr
= devres_alloc(devm_nvmem_cell_release
, sizeof(*ptr
), GFP_KERNEL
);
1111 return ERR_PTR(-ENOMEM
);
1113 cell
= nvmem_cell_get(dev
, id
);
1114 if (!IS_ERR(cell
)) {
1116 devres_add(dev
, ptr
);
1123 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get
);
1125 static int devm_nvmem_cell_match(struct device
*dev
, void *res
, void *data
)
1127 struct nvmem_cell
**c
= res
;
1129 if (WARN_ON(!c
|| !*c
))
1136 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1137 * from devm_nvmem_cell_get.
1139 * @dev: Device that requests the nvmem cell.
1140 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1142 void devm_nvmem_cell_put(struct device
*dev
, struct nvmem_cell
*cell
)
1146 ret
= devres_release(dev
, devm_nvmem_cell_release
,
1147 devm_nvmem_cell_match
, cell
);
1151 EXPORT_SYMBOL(devm_nvmem_cell_put
);
1154 * nvmem_cell_put() - Release previously allocated nvmem cell.
1156 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1158 void nvmem_cell_put(struct nvmem_cell
*cell
)
1160 struct nvmem_device
*nvmem
= cell
->nvmem
;
1162 __nvmem_device_put(nvmem
);
1164 EXPORT_SYMBOL_GPL(nvmem_cell_put
);
1166 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell
*cell
, void *buf
)
1169 int i
, bit_offset
= cell
->bit_offset
;
1174 *b
++ >>= bit_offset
;
1176 /* setup rest of the bytes if any */
1177 for (i
= 1; i
< cell
->bytes
; i
++) {
1178 /* Get bits from next byte and shift them towards msb */
1179 *p
|= *b
<< (BITS_PER_BYTE
- bit_offset
);
1182 *b
++ >>= bit_offset
;
1185 /* result fits in less bytes */
1186 if (cell
->bytes
!= DIV_ROUND_UP(cell
->nbits
, BITS_PER_BYTE
))
1189 /* clear msb bits if any leftover in the last byte */
1190 *p
&= GENMASK((cell
->nbits
%BITS_PER_BYTE
) - 1, 0);
1193 static int __nvmem_cell_read(struct nvmem_device
*nvmem
,
1194 struct nvmem_cell
*cell
,
1195 void *buf
, size_t *len
)
1199 rc
= nvmem_reg_read(nvmem
, cell
->offset
, buf
, cell
->bytes
);
1204 /* shift bits in-place */
1205 if (cell
->bit_offset
|| cell
->nbits
)
1206 nvmem_shift_read_buffer_in_place(cell
, buf
);
1215 * nvmem_cell_read() - Read a given nvmem cell
1217 * @cell: nvmem cell to be read.
1218 * @len: pointer to length of cell which will be populated on successful read;
1221 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1222 * buffer should be freed by the consumer with a kfree().
1224 void *nvmem_cell_read(struct nvmem_cell
*cell
, size_t *len
)
1226 struct nvmem_device
*nvmem
= cell
->nvmem
;
1231 return ERR_PTR(-EINVAL
);
1233 buf
= kzalloc(cell
->bytes
, GFP_KERNEL
);
1235 return ERR_PTR(-ENOMEM
);
1237 rc
= __nvmem_cell_read(nvmem
, cell
, buf
, len
);
1245 EXPORT_SYMBOL_GPL(nvmem_cell_read
);
1247 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell
*cell
,
1250 struct nvmem_device
*nvmem
= cell
->nvmem
;
1251 int i
, rc
, nbits
, bit_offset
= cell
->bit_offset
;
1252 u8 v
, *p
, *buf
, *b
, pbyte
, pbits
;
1254 nbits
= cell
->nbits
;
1255 buf
= kzalloc(cell
->bytes
, GFP_KERNEL
);
1257 return ERR_PTR(-ENOMEM
);
1259 memcpy(buf
, _buf
, len
);
1266 /* setup the first byte with lsb bits from nvmem */
1267 rc
= nvmem_reg_read(nvmem
, cell
->offset
, &v
, 1);
1270 *b
++ |= GENMASK(bit_offset
- 1, 0) & v
;
1272 /* setup rest of the byte if any */
1273 for (i
= 1; i
< cell
->bytes
; i
++) {
1274 /* Get last byte bits and shift them towards lsb */
1275 pbits
= pbyte
>> (BITS_PER_BYTE
- 1 - bit_offset
);
1283 /* if it's not end on byte boundary */
1284 if ((nbits
+ bit_offset
) % BITS_PER_BYTE
) {
1285 /* setup the last byte with msb bits from nvmem */
1286 rc
= nvmem_reg_read(nvmem
,
1287 cell
->offset
+ cell
->bytes
- 1, &v
, 1);
1290 *p
|= GENMASK(7, (nbits
+ bit_offset
) % BITS_PER_BYTE
) & v
;
1301 * nvmem_cell_write() - Write to a given nvmem cell
1303 * @cell: nvmem cell to be written.
1304 * @buf: Buffer to be written.
1305 * @len: length of buffer to be written to nvmem cell.
1307 * Return: length of bytes written or negative on failure.
1309 int nvmem_cell_write(struct nvmem_cell
*cell
, void *buf
, size_t len
)
1311 struct nvmem_device
*nvmem
= cell
->nvmem
;
1314 if (!nvmem
|| nvmem
->read_only
||
1315 (cell
->bit_offset
== 0 && len
!= cell
->bytes
))
1318 if (cell
->bit_offset
|| cell
->nbits
) {
1319 buf
= nvmem_cell_prepare_write_buffer(cell
, buf
, len
);
1321 return PTR_ERR(buf
);
1324 rc
= nvmem_reg_write(nvmem
, cell
->offset
, buf
, cell
->bytes
);
1326 /* free the tmp buffer */
1327 if (cell
->bit_offset
|| cell
->nbits
)
1335 EXPORT_SYMBOL_GPL(nvmem_cell_write
);
1338 * nvmem_cell_read_u32() - Read a cell value as an u32
1340 * @dev: Device that requests the nvmem cell.
1341 * @cell_id: Name of nvmem cell to read.
1342 * @val: pointer to output value.
1344 * Return: 0 on success or negative errno.
1346 int nvmem_cell_read_u32(struct device
*dev
, const char *cell_id
, u32
*val
)
1348 struct nvmem_cell
*cell
;
1352 cell
= nvmem_cell_get(dev
, cell_id
);
1354 return PTR_ERR(cell
);
1356 buf
= nvmem_cell_read(cell
, &len
);
1358 nvmem_cell_put(cell
);
1359 return PTR_ERR(buf
);
1361 if (len
!= sizeof(*val
)) {
1363 nvmem_cell_put(cell
);
1366 memcpy(val
, buf
, sizeof(*val
));
1369 nvmem_cell_put(cell
);
1372 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32
);
1375 * nvmem_device_cell_read() - Read a given nvmem device and cell
1377 * @nvmem: nvmem device to read from.
1378 * @info: nvmem cell info to be read.
1379 * @buf: buffer pointer which will be populated on successful read.
1381 * Return: length of successful bytes read on success and negative
1382 * error code on error.
1384 ssize_t
nvmem_device_cell_read(struct nvmem_device
*nvmem
,
1385 struct nvmem_cell_info
*info
, void *buf
)
1387 struct nvmem_cell cell
;
1394 rc
= nvmem_cell_info_to_nvmem_cell(nvmem
, info
, &cell
);
1398 rc
= __nvmem_cell_read(nvmem
, &cell
, buf
, &len
);
1404 EXPORT_SYMBOL_GPL(nvmem_device_cell_read
);
1407 * nvmem_device_cell_write() - Write cell to a given nvmem device
1409 * @nvmem: nvmem device to be written to.
1410 * @info: nvmem cell info to be written.
1411 * @buf: buffer to be written to cell.
1413 * Return: length of bytes written or negative error code on failure.
1415 int nvmem_device_cell_write(struct nvmem_device
*nvmem
,
1416 struct nvmem_cell_info
*info
, void *buf
)
1418 struct nvmem_cell cell
;
1424 rc
= nvmem_cell_info_to_nvmem_cell(nvmem
, info
, &cell
);
1428 return nvmem_cell_write(&cell
, buf
, cell
.bytes
);
1430 EXPORT_SYMBOL_GPL(nvmem_device_cell_write
);
1433 * nvmem_device_read() - Read from a given nvmem device
1435 * @nvmem: nvmem device to read from.
1436 * @offset: offset in nvmem device.
1437 * @bytes: number of bytes to read.
1438 * @buf: buffer pointer which will be populated on successful read.
1440 * Return: length of successful bytes read on success and negative
1441 * error code on error.
1443 int nvmem_device_read(struct nvmem_device
*nvmem
,
1444 unsigned int offset
,
1445 size_t bytes
, void *buf
)
1452 rc
= nvmem_reg_read(nvmem
, offset
, buf
, bytes
);
1459 EXPORT_SYMBOL_GPL(nvmem_device_read
);
1462 * nvmem_device_write() - Write cell to a given nvmem device
1464 * @nvmem: nvmem device to be written to.
1465 * @offset: offset in nvmem device.
1466 * @bytes: number of bytes to write.
1467 * @buf: buffer to be written.
1469 * Return: length of bytes written or negative error code on failure.
1471 int nvmem_device_write(struct nvmem_device
*nvmem
,
1472 unsigned int offset
,
1473 size_t bytes
, void *buf
)
1480 rc
= nvmem_reg_write(nvmem
, offset
, buf
, bytes
);
1488 EXPORT_SYMBOL_GPL(nvmem_device_write
);
1491 * nvmem_add_cell_table() - register a table of cell info entries
1493 * @table: table of cell info entries
1495 void nvmem_add_cell_table(struct nvmem_cell_table
*table
)
1497 mutex_lock(&nvmem_cell_mutex
);
1498 list_add_tail(&table
->node
, &nvmem_cell_tables
);
1499 mutex_unlock(&nvmem_cell_mutex
);
1501 EXPORT_SYMBOL_GPL(nvmem_add_cell_table
);
1504 * nvmem_del_cell_table() - remove a previously registered cell info table
1506 * @table: table of cell info entries
1508 void nvmem_del_cell_table(struct nvmem_cell_table
*table
)
1510 mutex_lock(&nvmem_cell_mutex
);
1511 list_del(&table
->node
);
1512 mutex_unlock(&nvmem_cell_mutex
);
1514 EXPORT_SYMBOL_GPL(nvmem_del_cell_table
);
1517 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1519 * @entries: array of cell lookup entries
1520 * @nentries: number of cell lookup entries in the array
1522 void nvmem_add_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
1526 mutex_lock(&nvmem_lookup_mutex
);
1527 for (i
= 0; i
< nentries
; i
++)
1528 list_add_tail(&entries
[i
].node
, &nvmem_lookup_list
);
1529 mutex_unlock(&nvmem_lookup_mutex
);
1531 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups
);
1534 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1537 * @entries: array of cell lookup entries
1538 * @nentries: number of cell lookup entries in the array
1540 void nvmem_del_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
1544 mutex_lock(&nvmem_lookup_mutex
);
1545 for (i
= 0; i
< nentries
; i
++)
1546 list_del(&entries
[i
].node
);
1547 mutex_unlock(&nvmem_lookup_mutex
);
1549 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups
);
1552 * nvmem_dev_name() - Get the name of a given nvmem device.
1554 * @nvmem: nvmem device.
1556 * Return: name of the nvmem device.
1558 const char *nvmem_dev_name(struct nvmem_device
*nvmem
)
1560 return dev_name(&nvmem
->dev
);
1562 EXPORT_SYMBOL_GPL(nvmem_dev_name
);
1564 static int __init
nvmem_init(void)
1566 return bus_register(&nvmem_bus_type
);
1569 static void __exit
nvmem_exit(void)
1571 bus_unregister(&nvmem_bus_type
);
1574 subsys_initcall(nvmem_init
);
1575 module_exit(nvmem_exit
);
1577 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1578 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1579 MODULE_DESCRIPTION("nvmem Driver Core");
1580 MODULE_LICENSE("GPL v2");