1 // SPDX-License-Identifier: GPL-2.0
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
20 #include <linux/slab.h>
22 #include "internals.h"
24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
26 #define FLAG_COMPAT BIT(0)
27 struct nvmem_cell_entry
{
34 nvmem_cell_post_process_t read_post_process
;
36 struct device_node
*np
;
37 struct nvmem_device
*nvmem
;
38 struct list_head node
;
42 struct nvmem_cell_entry
*entry
;
47 static DEFINE_MUTEX(nvmem_mutex
);
48 static DEFINE_IDA(nvmem_ida
);
50 static DEFINE_MUTEX(nvmem_cell_mutex
);
51 static LIST_HEAD(nvmem_cell_tables
);
53 static DEFINE_MUTEX(nvmem_lookup_mutex
);
54 static LIST_HEAD(nvmem_lookup_list
);
56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier
);
58 static int __nvmem_reg_read(struct nvmem_device
*nvmem
, unsigned int offset
,
59 void *val
, size_t bytes
)
62 return nvmem
->reg_read(nvmem
->priv
, offset
, val
, bytes
);
67 static int __nvmem_reg_write(struct nvmem_device
*nvmem
, unsigned int offset
,
68 void *val
, size_t bytes
)
72 if (nvmem
->reg_write
) {
73 gpiod_set_value_cansleep(nvmem
->wp_gpio
, 0);
74 ret
= nvmem
->reg_write(nvmem
->priv
, offset
, val
, bytes
);
75 gpiod_set_value_cansleep(nvmem
->wp_gpio
, 1);
82 static int nvmem_access_with_keepouts(struct nvmem_device
*nvmem
,
83 unsigned int offset
, void *val
,
84 size_t bytes
, int write
)
87 unsigned int end
= offset
+ bytes
;
88 unsigned int kend
, ksize
;
89 const struct nvmem_keepout
*keepout
= nvmem
->keepout
;
90 const struct nvmem_keepout
*keepoutend
= keepout
+ nvmem
->nkeepout
;
94 * Skip all keepouts before the range being accessed.
95 * Keepouts are sorted.
97 while ((keepout
< keepoutend
) && (keepout
->end
<= offset
))
100 while ((offset
< end
) && (keepout
< keepoutend
)) {
101 /* Access the valid portion before the keepout. */
102 if (offset
< keepout
->start
) {
103 kend
= min(end
, keepout
->start
);
104 ksize
= kend
- offset
;
106 rc
= __nvmem_reg_write(nvmem
, offset
, val
, ksize
);
108 rc
= __nvmem_reg_read(nvmem
, offset
, val
, ksize
);
118 * Now we're aligned to the start of this keepout zone. Go
121 kend
= min(end
, keepout
->end
);
122 ksize
= kend
- offset
;
124 memset(val
, keepout
->value
, ksize
);
132 * If we ran out of keepouts but there's still stuff to do, send it
136 ksize
= end
- offset
;
138 return __nvmem_reg_write(nvmem
, offset
, val
, ksize
);
140 return __nvmem_reg_read(nvmem
, offset
, val
, ksize
);
146 static int nvmem_reg_read(struct nvmem_device
*nvmem
, unsigned int offset
,
147 void *val
, size_t bytes
)
149 if (!nvmem
->nkeepout
)
150 return __nvmem_reg_read(nvmem
, offset
, val
, bytes
);
152 return nvmem_access_with_keepouts(nvmem
, offset
, val
, bytes
, false);
155 static int nvmem_reg_write(struct nvmem_device
*nvmem
, unsigned int offset
,
156 void *val
, size_t bytes
)
158 if (!nvmem
->nkeepout
)
159 return __nvmem_reg_write(nvmem
, offset
, val
, bytes
);
161 return nvmem_access_with_keepouts(nvmem
, offset
, val
, bytes
, true);
164 #ifdef CONFIG_NVMEM_SYSFS
165 static const char * const nvmem_type_str
[] = {
166 [NVMEM_TYPE_UNKNOWN
] = "Unknown",
167 [NVMEM_TYPE_EEPROM
] = "EEPROM",
168 [NVMEM_TYPE_OTP
] = "OTP",
169 [NVMEM_TYPE_BATTERY_BACKED
] = "Battery backed",
170 [NVMEM_TYPE_FRAM
] = "FRAM",
173 #ifdef CONFIG_DEBUG_LOCK_ALLOC
174 static struct lock_class_key eeprom_lock_key
;
177 static ssize_t
type_show(struct device
*dev
,
178 struct device_attribute
*attr
, char *buf
)
180 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
182 return sysfs_emit(buf
, "%s\n", nvmem_type_str
[nvmem
->type
]);
185 static DEVICE_ATTR_RO(type
);
187 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
190 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
192 return sysfs_emit(buf
, "%d\n", nvmem
->read_only
);
195 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
196 const char *buf
, size_t count
)
198 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
199 int ret
= kstrtobool(buf
, &nvmem
->read_only
);
207 static DEVICE_ATTR_RW(force_ro
);
209 static struct attribute
*nvmem_attrs
[] = {
210 &dev_attr_force_ro
.attr
,
215 static ssize_t
bin_attr_nvmem_read(struct file
*filp
, struct kobject
*kobj
,
216 const struct bin_attribute
*attr
, char *buf
,
217 loff_t pos
, size_t count
)
220 struct nvmem_device
*nvmem
;
226 dev
= kobj_to_dev(kobj
);
227 nvmem
= to_nvmem_device(dev
);
229 if (!IS_ALIGNED(pos
, nvmem
->stride
))
232 if (count
< nvmem
->word_size
)
235 count
= round_down(count
, nvmem
->word_size
);
237 if (!nvmem
->reg_read
)
240 rc
= nvmem_reg_read(nvmem
, pos
, buf
, count
);
248 static ssize_t
bin_attr_nvmem_write(struct file
*filp
, struct kobject
*kobj
,
249 const struct bin_attribute
*attr
, char *buf
,
250 loff_t pos
, size_t count
)
253 struct nvmem_device
*nvmem
;
259 dev
= kobj_to_dev(kobj
);
260 nvmem
= to_nvmem_device(dev
);
262 if (!IS_ALIGNED(pos
, nvmem
->stride
))
265 if (count
< nvmem
->word_size
)
268 count
= round_down(count
, nvmem
->word_size
);
270 if (!nvmem
->reg_write
|| nvmem
->read_only
)
273 rc
= nvmem_reg_write(nvmem
, pos
, buf
, count
);
281 static umode_t
nvmem_bin_attr_get_umode(struct nvmem_device
*nvmem
)
285 if (!nvmem
->root_only
)
288 if (!nvmem
->read_only
)
291 if (!nvmem
->reg_write
)
294 if (!nvmem
->reg_read
)
300 static umode_t
nvmem_bin_attr_is_visible(struct kobject
*kobj
,
301 const struct bin_attribute
*attr
,
304 struct device
*dev
= kobj_to_dev(kobj
);
305 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
307 return nvmem_bin_attr_get_umode(nvmem
);
310 static size_t nvmem_bin_attr_size(struct kobject
*kobj
,
311 const struct bin_attribute
*attr
,
314 struct device
*dev
= kobj_to_dev(kobj
);
315 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
320 static umode_t
nvmem_attr_is_visible(struct kobject
*kobj
,
321 struct attribute
*attr
, int i
)
323 struct device
*dev
= kobj_to_dev(kobj
);
324 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
327 * If the device has no .reg_write operation, do not allow
328 * configuration as read-write.
329 * If the device is set as read-only by configuration, it
330 * can be forced into read-write mode using the 'force_ro'
333 if (attr
== &dev_attr_force_ro
.attr
&& !nvmem
->reg_write
)
334 return 0; /* Attribute not visible */
339 static struct nvmem_cell
*nvmem_create_cell(struct nvmem_cell_entry
*entry
,
340 const char *id
, int index
);
342 static ssize_t
nvmem_cell_attr_read(struct file
*filp
, struct kobject
*kobj
,
343 const struct bin_attribute
*attr
, char *buf
,
344 loff_t pos
, size_t count
)
346 struct nvmem_cell_entry
*entry
;
347 struct nvmem_cell
*cell
= NULL
;
348 size_t cell_sz
, read_len
;
351 entry
= attr
->private;
352 cell
= nvmem_create_cell(entry
, entry
->name
, 0);
354 return PTR_ERR(cell
);
359 content
= nvmem_cell_read(cell
, &cell_sz
);
360 if (IS_ERR(content
)) {
361 read_len
= PTR_ERR(content
);
365 read_len
= min_t(unsigned int, cell_sz
- pos
, count
);
366 memcpy(buf
, content
+ pos
, read_len
);
370 kfree_const(cell
->id
);
376 /* default read/write permissions */
377 static const struct bin_attribute bin_attr_rw_nvmem
= {
382 .read_new
= bin_attr_nvmem_read
,
383 .write_new
= bin_attr_nvmem_write
,
386 static const struct bin_attribute
*const nvmem_bin_attributes
[] = {
391 static const struct attribute_group nvmem_bin_group
= {
392 .bin_attrs_new
= nvmem_bin_attributes
,
393 .attrs
= nvmem_attrs
,
394 .is_bin_visible
= nvmem_bin_attr_is_visible
,
395 .bin_size
= nvmem_bin_attr_size
,
396 .is_visible
= nvmem_attr_is_visible
,
399 static const struct attribute_group
*nvmem_dev_groups
[] = {
404 static const struct bin_attribute bin_attr_nvmem_eeprom_compat
= {
408 .read_new
= bin_attr_nvmem_read
,
409 .write_new
= bin_attr_nvmem_write
,
413 * nvmem_setup_compat() - Create an additional binary entry in
414 * drivers sys directory, to be backwards compatible with the older
415 * drivers/misc/eeprom drivers.
417 static int nvmem_sysfs_setup_compat(struct nvmem_device
*nvmem
,
418 const struct nvmem_config
*config
)
425 if (!config
->base_dev
)
428 nvmem
->eeprom
= bin_attr_nvmem_eeprom_compat
;
429 if (config
->type
== NVMEM_TYPE_FRAM
)
430 nvmem
->eeprom
.attr
.name
= "fram";
431 nvmem
->eeprom
.attr
.mode
= nvmem_bin_attr_get_umode(nvmem
);
432 nvmem
->eeprom
.size
= nvmem
->size
;
433 #ifdef CONFIG_DEBUG_LOCK_ALLOC
434 nvmem
->eeprom
.attr
.key
= &eeprom_lock_key
;
436 nvmem
->eeprom
.private = &nvmem
->dev
;
437 nvmem
->base_dev
= config
->base_dev
;
439 rval
= device_create_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
442 "Failed to create eeprom binary file %d\n", rval
);
446 nvmem
->flags
|= FLAG_COMPAT
;
451 static void nvmem_sysfs_remove_compat(struct nvmem_device
*nvmem
,
452 const struct nvmem_config
*config
)
455 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
458 static int nvmem_populate_sysfs_cells(struct nvmem_device
*nvmem
)
460 struct attribute_group group
= {
463 struct nvmem_cell_entry
*entry
;
464 const struct bin_attribute
**pattrs
;
465 struct bin_attribute
*attrs
;
466 unsigned int ncells
= 0, i
= 0;
469 mutex_lock(&nvmem_mutex
);
471 if (list_empty(&nvmem
->cells
) || nvmem
->sysfs_cells_populated
)
474 /* Allocate an array of attributes with a sentinel */
475 ncells
= list_count_nodes(&nvmem
->cells
);
476 pattrs
= devm_kcalloc(&nvmem
->dev
, ncells
+ 1,
477 sizeof(struct bin_attribute
*), GFP_KERNEL
);
483 attrs
= devm_kcalloc(&nvmem
->dev
, ncells
, sizeof(struct bin_attribute
), GFP_KERNEL
);
489 /* Initialize each attribute to take the name and size of the cell */
490 list_for_each_entry(entry
, &nvmem
->cells
, node
) {
491 sysfs_bin_attr_init(&attrs
[i
]);
492 attrs
[i
].attr
.name
= devm_kasprintf(&nvmem
->dev
, GFP_KERNEL
,
493 "%s@%x,%x", entry
->name
,
496 attrs
[i
].attr
.mode
= 0444 & nvmem_bin_attr_get_umode(nvmem
);
497 attrs
[i
].size
= entry
->bytes
;
498 attrs
[i
].read_new
= &nvmem_cell_attr_read
;
499 attrs
[i
].private = entry
;
500 if (!attrs
[i
].attr
.name
) {
505 pattrs
[i
] = &attrs
[i
];
509 group
.bin_attrs_new
= pattrs
;
511 ret
= device_add_group(&nvmem
->dev
, &group
);
515 nvmem
->sysfs_cells_populated
= true;
518 mutex_unlock(&nvmem_mutex
);
523 #else /* CONFIG_NVMEM_SYSFS */
525 static int nvmem_sysfs_setup_compat(struct nvmem_device
*nvmem
,
526 const struct nvmem_config
*config
)
530 static void nvmem_sysfs_remove_compat(struct nvmem_device
*nvmem
,
531 const struct nvmem_config
*config
)
535 #endif /* CONFIG_NVMEM_SYSFS */
537 static void nvmem_release(struct device
*dev
)
539 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
541 ida_free(&nvmem_ida
, nvmem
->id
);
542 gpiod_put(nvmem
->wp_gpio
);
546 static const struct device_type nvmem_provider_type
= {
547 .release
= nvmem_release
,
550 static struct bus_type nvmem_bus_type
= {
554 static void nvmem_cell_entry_drop(struct nvmem_cell_entry
*cell
)
556 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_REMOVE
, cell
);
557 mutex_lock(&nvmem_mutex
);
558 list_del(&cell
->node
);
559 mutex_unlock(&nvmem_mutex
);
560 of_node_put(cell
->np
);
561 kfree_const(cell
->name
);
565 static void nvmem_device_remove_all_cells(const struct nvmem_device
*nvmem
)
567 struct nvmem_cell_entry
*cell
, *p
;
569 list_for_each_entry_safe(cell
, p
, &nvmem
->cells
, node
)
570 nvmem_cell_entry_drop(cell
);
573 static void nvmem_cell_entry_add(struct nvmem_cell_entry
*cell
)
575 mutex_lock(&nvmem_mutex
);
576 list_add_tail(&cell
->node
, &cell
->nvmem
->cells
);
577 mutex_unlock(&nvmem_mutex
);
578 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_ADD
, cell
);
581 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device
*nvmem
,
582 const struct nvmem_cell_info
*info
,
583 struct nvmem_cell_entry
*cell
)
586 cell
->offset
= info
->offset
;
587 cell
->raw_len
= info
->raw_len
?: info
->bytes
;
588 cell
->bytes
= info
->bytes
;
589 cell
->name
= info
->name
;
590 cell
->read_post_process
= info
->read_post_process
;
591 cell
->priv
= info
->priv
;
593 cell
->bit_offset
= info
->bit_offset
;
594 cell
->nbits
= info
->nbits
;
598 cell
->bytes
= DIV_ROUND_UP(cell
->nbits
+ cell
->bit_offset
,
601 if (!IS_ALIGNED(cell
->offset
, nvmem
->stride
)) {
603 "cell %s unaligned to nvmem stride %d\n",
604 cell
->name
?: "<unknown>", nvmem
->stride
);
611 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device
*nvmem
,
612 const struct nvmem_cell_info
*info
,
613 struct nvmem_cell_entry
*cell
)
617 err
= nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem
, info
, cell
);
621 cell
->name
= kstrdup_const(info
->name
, GFP_KERNEL
);
629 * nvmem_add_one_cell() - Add one cell information to an nvmem device
631 * @nvmem: nvmem device to add cells to.
632 * @info: nvmem cell info to add to the device
634 * Return: 0 or negative error code on failure.
636 int nvmem_add_one_cell(struct nvmem_device
*nvmem
,
637 const struct nvmem_cell_info
*info
)
639 struct nvmem_cell_entry
*cell
;
642 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
646 rval
= nvmem_cell_info_to_nvmem_cell_entry(nvmem
, info
, cell
);
652 nvmem_cell_entry_add(cell
);
656 EXPORT_SYMBOL_GPL(nvmem_add_one_cell
);
659 * nvmem_add_cells() - Add cell information to an nvmem device
661 * @nvmem: nvmem device to add cells to.
662 * @info: nvmem cell info to add to the device
663 * @ncells: number of cells in info
665 * Return: 0 or negative error code on failure.
667 static int nvmem_add_cells(struct nvmem_device
*nvmem
,
668 const struct nvmem_cell_info
*info
,
673 for (i
= 0; i
< ncells
; i
++) {
674 rval
= nvmem_add_one_cell(nvmem
, &info
[i
]);
683 * nvmem_register_notifier() - Register a notifier block for nvmem events.
685 * @nb: notifier block to be called on nvmem events.
687 * Return: 0 on success, negative error number on failure.
689 int nvmem_register_notifier(struct notifier_block
*nb
)
691 return blocking_notifier_chain_register(&nvmem_notifier
, nb
);
693 EXPORT_SYMBOL_GPL(nvmem_register_notifier
);
696 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
698 * @nb: notifier block to be unregistered.
700 * Return: 0 on success, negative error number on failure.
702 int nvmem_unregister_notifier(struct notifier_block
*nb
)
704 return blocking_notifier_chain_unregister(&nvmem_notifier
, nb
);
706 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier
);
708 static int nvmem_add_cells_from_table(struct nvmem_device
*nvmem
)
710 const struct nvmem_cell_info
*info
;
711 struct nvmem_cell_table
*table
;
712 struct nvmem_cell_entry
*cell
;
715 mutex_lock(&nvmem_cell_mutex
);
716 list_for_each_entry(table
, &nvmem_cell_tables
, node
) {
717 if (strcmp(nvmem_dev_name(nvmem
), table
->nvmem_name
) == 0) {
718 for (i
= 0; i
< table
->ncells
; i
++) {
719 info
= &table
->cells
[i
];
721 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
727 rval
= nvmem_cell_info_to_nvmem_cell_entry(nvmem
, info
, cell
);
733 nvmem_cell_entry_add(cell
);
739 mutex_unlock(&nvmem_cell_mutex
);
743 static struct nvmem_cell_entry
*
744 nvmem_find_cell_entry_by_name(struct nvmem_device
*nvmem
, const char *cell_id
)
746 struct nvmem_cell_entry
*iter
, *cell
= NULL
;
748 mutex_lock(&nvmem_mutex
);
749 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
750 if (strcmp(cell_id
, iter
->name
) == 0) {
755 mutex_unlock(&nvmem_mutex
);
760 static int nvmem_validate_keepouts(struct nvmem_device
*nvmem
)
762 unsigned int cur
= 0;
763 const struct nvmem_keepout
*keepout
= nvmem
->keepout
;
764 const struct nvmem_keepout
*keepoutend
= keepout
+ nvmem
->nkeepout
;
766 while (keepout
< keepoutend
) {
767 /* Ensure keepouts are sorted and don't overlap. */
768 if (keepout
->start
< cur
) {
770 "Keepout regions aren't sorted or overlap.\n");
775 if (keepout
->end
< keepout
->start
) {
777 "Invalid keepout region.\n");
783 * Validate keepouts (and holes between) don't violate
784 * word_size constraints.
786 if ((keepout
->end
- keepout
->start
< nvmem
->word_size
) ||
787 ((keepout
->start
!= cur
) &&
788 (keepout
->start
- cur
< nvmem
->word_size
))) {
791 "Keepout regions violate word_size constraints.\n");
796 /* Validate keepouts don't violate stride (alignment). */
797 if (!IS_ALIGNED(keepout
->start
, nvmem
->stride
) ||
798 !IS_ALIGNED(keepout
->end
, nvmem
->stride
)) {
801 "Keepout regions violate stride.\n");
813 static int nvmem_add_cells_from_dt(struct nvmem_device
*nvmem
, struct device_node
*np
)
815 struct device
*dev
= &nvmem
->dev
;
816 struct device_node
*child
;
820 for_each_child_of_node(np
, child
) {
821 struct nvmem_cell_info info
= {0};
823 addr
= of_get_property(child
, "reg", &len
);
826 if (len
< 2 * sizeof(u32
)) {
827 dev_err(dev
, "nvmem: invalid reg on %pOF\n", child
);
832 info
.offset
= be32_to_cpup(addr
++);
833 info
.bytes
= be32_to_cpup(addr
);
834 info
.name
= kasprintf(GFP_KERNEL
, "%pOFn", child
);
836 addr
= of_get_property(child
, "bits", &len
);
837 if (addr
&& len
== (2 * sizeof(u32
))) {
838 info
.bit_offset
= be32_to_cpup(addr
++);
839 info
.nbits
= be32_to_cpup(addr
);
840 if (info
.bit_offset
>= BITS_PER_BYTE
|| info
.nbits
< 1) {
841 dev_err(dev
, "nvmem: invalid bits on %pOF\n", child
);
847 info
.np
= of_node_get(child
);
849 if (nvmem
->fixup_dt_cell_info
)
850 nvmem
->fixup_dt_cell_info(nvmem
, &info
);
852 ret
= nvmem_add_one_cell(nvmem
, &info
);
863 static int nvmem_add_cells_from_legacy_of(struct nvmem_device
*nvmem
)
865 return nvmem_add_cells_from_dt(nvmem
, nvmem
->dev
.of_node
);
868 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device
*nvmem
)
870 struct device_node
*layout_np
;
873 layout_np
= of_nvmem_layout_get_container(nvmem
);
877 if (of_device_is_compatible(layout_np
, "fixed-layout"))
878 err
= nvmem_add_cells_from_dt(nvmem
, layout_np
);
880 of_node_put(layout_np
);
885 int nvmem_layout_register(struct nvmem_layout
*layout
)
889 if (!layout
->add_cells
)
892 /* Populate the cells */
893 ret
= layout
->add_cells(layout
);
897 #ifdef CONFIG_NVMEM_SYSFS
898 ret
= nvmem_populate_sysfs_cells(layout
->nvmem
);
900 nvmem_device_remove_all_cells(layout
->nvmem
);
907 EXPORT_SYMBOL_GPL(nvmem_layout_register
);
909 void nvmem_layout_unregister(struct nvmem_layout
*layout
)
911 /* Keep the API even with an empty stub in case we need it later */
913 EXPORT_SYMBOL_GPL(nvmem_layout_unregister
);
916 * nvmem_register() - Register a nvmem device for given nvmem_config.
917 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
919 * @config: nvmem device configuration with which nvmem device is created.
921 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
925 struct nvmem_device
*nvmem_register(const struct nvmem_config
*config
)
927 struct nvmem_device
*nvmem
;
931 return ERR_PTR(-EINVAL
);
933 if (!config
->reg_read
&& !config
->reg_write
)
934 return ERR_PTR(-EINVAL
);
936 nvmem
= kzalloc(sizeof(*nvmem
), GFP_KERNEL
);
938 return ERR_PTR(-ENOMEM
);
940 rval
= ida_alloc(&nvmem_ida
, GFP_KERNEL
);
943 return ERR_PTR(rval
);
948 nvmem
->dev
.type
= &nvmem_provider_type
;
949 nvmem
->dev
.bus
= &nvmem_bus_type
;
950 nvmem
->dev
.parent
= config
->dev
;
952 device_initialize(&nvmem
->dev
);
954 if (!config
->ignore_wp
)
955 nvmem
->wp_gpio
= gpiod_get_optional(config
->dev
, "wp",
957 if (IS_ERR(nvmem
->wp_gpio
)) {
958 rval
= PTR_ERR(nvmem
->wp_gpio
);
959 nvmem
->wp_gpio
= NULL
;
963 kref_init(&nvmem
->refcnt
);
964 INIT_LIST_HEAD(&nvmem
->cells
);
965 nvmem
->fixup_dt_cell_info
= config
->fixup_dt_cell_info
;
967 nvmem
->owner
= config
->owner
;
968 if (!nvmem
->owner
&& config
->dev
->driver
)
969 nvmem
->owner
= config
->dev
->driver
->owner
;
970 nvmem
->stride
= config
->stride
?: 1;
971 nvmem
->word_size
= config
->word_size
?: 1;
972 nvmem
->size
= config
->size
;
973 nvmem
->root_only
= config
->root_only
;
974 nvmem
->priv
= config
->priv
;
975 nvmem
->type
= config
->type
;
976 nvmem
->reg_read
= config
->reg_read
;
977 nvmem
->reg_write
= config
->reg_write
;
978 nvmem
->keepout
= config
->keepout
;
979 nvmem
->nkeepout
= config
->nkeepout
;
981 nvmem
->dev
.of_node
= config
->of_node
;
983 nvmem
->dev
.of_node
= config
->dev
->of_node
;
985 switch (config
->id
) {
986 case NVMEM_DEVID_NONE
:
987 rval
= dev_set_name(&nvmem
->dev
, "%s", config
->name
);
989 case NVMEM_DEVID_AUTO
:
990 rval
= dev_set_name(&nvmem
->dev
, "%s%d", config
->name
, nvmem
->id
);
993 rval
= dev_set_name(&nvmem
->dev
, "%s%d",
994 config
->name
? : "nvmem",
995 config
->name
? config
->id
: nvmem
->id
);
1000 goto err_put_device
;
1002 nvmem
->read_only
= device_property_present(config
->dev
, "read-only") ||
1003 config
->read_only
|| !nvmem
->reg_write
;
1005 #ifdef CONFIG_NVMEM_SYSFS
1006 nvmem
->dev
.groups
= nvmem_dev_groups
;
1009 if (nvmem
->nkeepout
) {
1010 rval
= nvmem_validate_keepouts(nvmem
);
1012 goto err_put_device
;
1015 if (config
->compat
) {
1016 rval
= nvmem_sysfs_setup_compat(nvmem
, config
);
1018 goto err_put_device
;
1021 if (config
->cells
) {
1022 rval
= nvmem_add_cells(nvmem
, config
->cells
, config
->ncells
);
1024 goto err_remove_cells
;
1027 rval
= nvmem_add_cells_from_table(nvmem
);
1029 goto err_remove_cells
;
1031 if (config
->add_legacy_fixed_of_cells
) {
1032 rval
= nvmem_add_cells_from_legacy_of(nvmem
);
1034 goto err_remove_cells
;
1037 rval
= nvmem_add_cells_from_fixed_layout(nvmem
);
1039 goto err_remove_cells
;
1041 dev_dbg(&nvmem
->dev
, "Registering nvmem device %s\n", config
->name
);
1043 rval
= device_add(&nvmem
->dev
);
1045 goto err_remove_cells
;
1047 rval
= nvmem_populate_layout(nvmem
);
1049 goto err_remove_dev
;
1051 #ifdef CONFIG_NVMEM_SYSFS
1052 rval
= nvmem_populate_sysfs_cells(nvmem
);
1054 goto err_destroy_layout
;
1057 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_ADD
, nvmem
);
1061 #ifdef CONFIG_NVMEM_SYSFS
1063 nvmem_destroy_layout(nvmem
);
1066 device_del(&nvmem
->dev
);
1068 nvmem_device_remove_all_cells(nvmem
);
1070 nvmem_sysfs_remove_compat(nvmem
, config
);
1072 put_device(&nvmem
->dev
);
1074 return ERR_PTR(rval
);
1076 EXPORT_SYMBOL_GPL(nvmem_register
);
1078 static void nvmem_device_release(struct kref
*kref
)
1080 struct nvmem_device
*nvmem
;
1082 nvmem
= container_of(kref
, struct nvmem_device
, refcnt
);
1084 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_REMOVE
, nvmem
);
1086 if (nvmem
->flags
& FLAG_COMPAT
)
1087 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
1089 nvmem_device_remove_all_cells(nvmem
);
1090 nvmem_destroy_layout(nvmem
);
1091 device_unregister(&nvmem
->dev
);
1095 * nvmem_unregister() - Unregister previously registered nvmem device
1097 * @nvmem: Pointer to previously registered nvmem device.
1099 void nvmem_unregister(struct nvmem_device
*nvmem
)
1102 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
1104 EXPORT_SYMBOL_GPL(nvmem_unregister
);
1106 static void devm_nvmem_unregister(void *nvmem
)
1108 nvmem_unregister(nvmem
);
1112 * devm_nvmem_register() - Register a managed nvmem device for given
1114 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1116 * @dev: Device that uses the nvmem device.
1117 * @config: nvmem device configuration with which nvmem device is created.
1119 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1122 struct nvmem_device
*devm_nvmem_register(struct device
*dev
,
1123 const struct nvmem_config
*config
)
1125 struct nvmem_device
*nvmem
;
1128 nvmem
= nvmem_register(config
);
1132 ret
= devm_add_action_or_reset(dev
, devm_nvmem_unregister
, nvmem
);
1134 return ERR_PTR(ret
);
1138 EXPORT_SYMBOL_GPL(devm_nvmem_register
);
1140 static struct nvmem_device
*__nvmem_device_get(void *data
,
1141 int (*match
)(struct device
*dev
, const void *data
))
1143 struct nvmem_device
*nvmem
= NULL
;
1146 mutex_lock(&nvmem_mutex
);
1147 dev
= bus_find_device(&nvmem_bus_type
, NULL
, data
, match
);
1149 nvmem
= to_nvmem_device(dev
);
1150 mutex_unlock(&nvmem_mutex
);
1152 return ERR_PTR(-EPROBE_DEFER
);
1154 if (!try_module_get(nvmem
->owner
)) {
1155 dev_err(&nvmem
->dev
,
1156 "could not increase module refcount for cell %s\n",
1157 nvmem_dev_name(nvmem
));
1159 put_device(&nvmem
->dev
);
1160 return ERR_PTR(-EINVAL
);
1163 kref_get(&nvmem
->refcnt
);
1168 static void __nvmem_device_put(struct nvmem_device
*nvmem
)
1170 put_device(&nvmem
->dev
);
1171 module_put(nvmem
->owner
);
1172 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
1175 #if IS_ENABLED(CONFIG_OF)
1177 * of_nvmem_device_get() - Get nvmem device from a given id
1179 * @np: Device tree node that uses the nvmem device.
1180 * @id: nvmem name from nvmem-names property.
1182 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1185 struct nvmem_device
*of_nvmem_device_get(struct device_node
*np
, const char *id
)
1188 struct device_node
*nvmem_np
;
1189 struct nvmem_device
*nvmem
;
1193 index
= of_property_match_string(np
, "nvmem-names", id
);
1195 nvmem_np
= of_parse_phandle(np
, "nvmem", index
);
1197 return ERR_PTR(-ENOENT
);
1199 nvmem
= __nvmem_device_get(nvmem_np
, device_match_of_node
);
1200 of_node_put(nvmem_np
);
1203 EXPORT_SYMBOL_GPL(of_nvmem_device_get
);
1207 * nvmem_device_get() - Get nvmem device from a given id
1209 * @dev: Device that uses the nvmem device.
1210 * @dev_name: name of the requested nvmem device.
1212 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1215 struct nvmem_device
*nvmem_device_get(struct device
*dev
, const char *dev_name
)
1217 if (dev
->of_node
) { /* try dt first */
1218 struct nvmem_device
*nvmem
;
1220 nvmem
= of_nvmem_device_get(dev
->of_node
, dev_name
);
1222 if (!IS_ERR(nvmem
) || PTR_ERR(nvmem
) == -EPROBE_DEFER
)
1227 return __nvmem_device_get((void *)dev_name
, device_match_name
);
1229 EXPORT_SYMBOL_GPL(nvmem_device_get
);
1232 * nvmem_device_find() - Find nvmem device with matching function
1234 * @data: Data to pass to match function
1235 * @match: Callback function to check device
1237 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1240 struct nvmem_device
*nvmem_device_find(void *data
,
1241 int (*match
)(struct device
*dev
, const void *data
))
1243 return __nvmem_device_get(data
, match
);
1245 EXPORT_SYMBOL_GPL(nvmem_device_find
);
1247 static int devm_nvmem_device_match(struct device
*dev
, void *res
, void *data
)
1249 struct nvmem_device
**nvmem
= res
;
1251 if (WARN_ON(!nvmem
|| !*nvmem
))
1254 return *nvmem
== data
;
1257 static void devm_nvmem_device_release(struct device
*dev
, void *res
)
1259 nvmem_device_put(*(struct nvmem_device
**)res
);
1263 * devm_nvmem_device_put() - put already got nvmem device
1265 * @dev: Device that uses the nvmem device.
1266 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1267 * that needs to be released.
1269 void devm_nvmem_device_put(struct device
*dev
, struct nvmem_device
*nvmem
)
1273 ret
= devres_release(dev
, devm_nvmem_device_release
,
1274 devm_nvmem_device_match
, nvmem
);
1278 EXPORT_SYMBOL_GPL(devm_nvmem_device_put
);
1281 * nvmem_device_put() - put already got nvmem device
1283 * @nvmem: pointer to nvmem device that needs to be released.
1285 void nvmem_device_put(struct nvmem_device
*nvmem
)
1287 __nvmem_device_put(nvmem
);
1289 EXPORT_SYMBOL_GPL(nvmem_device_put
);
1292 * devm_nvmem_device_get() - Get nvmem device of device form a given id
1294 * @dev: Device that requests the nvmem device.
1295 * @id: name id for the requested nvmem device.
1297 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1298 * on success. The nvmem_device will be freed by the automatically once the
1301 struct nvmem_device
*devm_nvmem_device_get(struct device
*dev
, const char *id
)
1303 struct nvmem_device
**ptr
, *nvmem
;
1305 ptr
= devres_alloc(devm_nvmem_device_release
, sizeof(*ptr
), GFP_KERNEL
);
1307 return ERR_PTR(-ENOMEM
);
1309 nvmem
= nvmem_device_get(dev
, id
);
1310 if (!IS_ERR(nvmem
)) {
1312 devres_add(dev
, ptr
);
1319 EXPORT_SYMBOL_GPL(devm_nvmem_device_get
);
1321 static struct nvmem_cell
*nvmem_create_cell(struct nvmem_cell_entry
*entry
,
1322 const char *id
, int index
)
1324 struct nvmem_cell
*cell
;
1325 const char *name
= NULL
;
1327 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
1329 return ERR_PTR(-ENOMEM
);
1332 name
= kstrdup_const(id
, GFP_KERNEL
);
1335 return ERR_PTR(-ENOMEM
);
1340 cell
->entry
= entry
;
1341 cell
->index
= index
;
1346 static struct nvmem_cell
*
1347 nvmem_cell_get_from_lookup(struct device
*dev
, const char *con_id
)
1349 struct nvmem_cell_entry
*cell_entry
;
1350 struct nvmem_cell
*cell
= ERR_PTR(-ENOENT
);
1351 struct nvmem_cell_lookup
*lookup
;
1352 struct nvmem_device
*nvmem
;
1356 return ERR_PTR(-EINVAL
);
1358 dev_id
= dev_name(dev
);
1360 mutex_lock(&nvmem_lookup_mutex
);
1362 list_for_each_entry(lookup
, &nvmem_lookup_list
, node
) {
1363 if ((strcmp(lookup
->dev_id
, dev_id
) == 0) &&
1364 (strcmp(lookup
->con_id
, con_id
) == 0)) {
1365 /* This is the right entry. */
1366 nvmem
= __nvmem_device_get((void *)lookup
->nvmem_name
,
1368 if (IS_ERR(nvmem
)) {
1369 /* Provider may not be registered yet. */
1370 cell
= ERR_CAST(nvmem
);
1374 cell_entry
= nvmem_find_cell_entry_by_name(nvmem
,
1377 __nvmem_device_put(nvmem
);
1378 cell
= ERR_PTR(-ENOENT
);
1380 cell
= nvmem_create_cell(cell_entry
, con_id
, 0);
1382 __nvmem_device_put(nvmem
);
1388 mutex_unlock(&nvmem_lookup_mutex
);
1392 static void nvmem_layout_module_put(struct nvmem_device
*nvmem
)
1394 if (nvmem
->layout
&& nvmem
->layout
->dev
.driver
)
1395 module_put(nvmem
->layout
->dev
.driver
->owner
);
1398 #if IS_ENABLED(CONFIG_OF)
1399 static struct nvmem_cell_entry
*
1400 nvmem_find_cell_entry_by_node(struct nvmem_device
*nvmem
, struct device_node
*np
)
1402 struct nvmem_cell_entry
*iter
, *cell
= NULL
;
1404 mutex_lock(&nvmem_mutex
);
1405 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
1406 if (np
== iter
->np
) {
1411 mutex_unlock(&nvmem_mutex
);
1416 static int nvmem_layout_module_get_optional(struct nvmem_device
*nvmem
)
1421 if (!nvmem
->layout
->dev
.driver
||
1422 !try_module_get(nvmem
->layout
->dev
.driver
->owner
))
1423 return -EPROBE_DEFER
;
1429 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1431 * @np: Device tree node that uses the nvmem cell.
1432 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1433 * for the cell at index 0 (the lone cell with no accompanying
1434 * nvmem-cell-names property).
1436 * Return: Will be an ERR_PTR() on error or a valid pointer
1437 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1440 struct nvmem_cell
*of_nvmem_cell_get(struct device_node
*np
, const char *id
)
1442 struct device_node
*cell_np
, *nvmem_np
;
1443 struct nvmem_device
*nvmem
;
1444 struct nvmem_cell_entry
*cell_entry
;
1445 struct nvmem_cell
*cell
;
1446 struct of_phandle_args cell_spec
;
1451 /* if cell name exists, find index to the name */
1453 index
= of_property_match_string(np
, "nvmem-cell-names", id
);
1455 ret
= of_parse_phandle_with_optional_args(np
, "nvmem-cells",
1456 "#nvmem-cell-cells",
1459 return ERR_PTR(-ENOENT
);
1461 if (cell_spec
.args_count
> 1)
1462 return ERR_PTR(-EINVAL
);
1464 cell_np
= cell_spec
.np
;
1465 if (cell_spec
.args_count
)
1466 cell_index
= cell_spec
.args
[0];
1468 nvmem_np
= of_get_parent(cell_np
);
1470 of_node_put(cell_np
);
1471 return ERR_PTR(-EINVAL
);
1474 /* nvmem layouts produce cells within the nvmem-layout container */
1475 if (of_node_name_eq(nvmem_np
, "nvmem-layout")) {
1476 nvmem_np
= of_get_next_parent(nvmem_np
);
1478 of_node_put(cell_np
);
1479 return ERR_PTR(-EINVAL
);
1483 nvmem
= __nvmem_device_get(nvmem_np
, device_match_of_node
);
1484 of_node_put(nvmem_np
);
1485 if (IS_ERR(nvmem
)) {
1486 of_node_put(cell_np
);
1487 return ERR_CAST(nvmem
);
1490 ret
= nvmem_layout_module_get_optional(nvmem
);
1492 of_node_put(cell_np
);
1493 __nvmem_device_put(nvmem
);
1494 return ERR_PTR(ret
);
1497 cell_entry
= nvmem_find_cell_entry_by_node(nvmem
, cell_np
);
1498 of_node_put(cell_np
);
1500 __nvmem_device_put(nvmem
);
1501 nvmem_layout_module_put(nvmem
);
1503 return ERR_PTR(-EPROBE_DEFER
);
1505 return ERR_PTR(-ENOENT
);
1508 cell
= nvmem_create_cell(cell_entry
, id
, cell_index
);
1510 __nvmem_device_put(nvmem
);
1511 nvmem_layout_module_put(nvmem
);
1516 EXPORT_SYMBOL_GPL(of_nvmem_cell_get
);
1520 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1522 * @dev: Device that requests the nvmem cell.
1523 * @id: nvmem cell name to get (this corresponds with the name from the
1524 * nvmem-cell-names property for DT systems and with the con_id from
1525 * the lookup entry for non-DT systems).
1527 * Return: Will be an ERR_PTR() on error or a valid pointer
1528 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1531 struct nvmem_cell
*nvmem_cell_get(struct device
*dev
, const char *id
)
1533 struct nvmem_cell
*cell
;
1535 if (dev
->of_node
) { /* try dt first */
1536 cell
= of_nvmem_cell_get(dev
->of_node
, id
);
1537 if (!IS_ERR(cell
) || PTR_ERR(cell
) == -EPROBE_DEFER
)
1541 /* NULL cell id only allowed for device tree; invalid otherwise */
1543 return ERR_PTR(-EINVAL
);
1545 return nvmem_cell_get_from_lookup(dev
, id
);
1547 EXPORT_SYMBOL_GPL(nvmem_cell_get
);
1549 static void devm_nvmem_cell_release(struct device
*dev
, void *res
)
1551 nvmem_cell_put(*(struct nvmem_cell
**)res
);
1555 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1557 * @dev: Device that requests the nvmem cell.
1558 * @id: nvmem cell name id to get.
1560 * Return: Will be an ERR_PTR() on error or a valid pointer
1561 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1562 * automatically once the device is freed.
1564 struct nvmem_cell
*devm_nvmem_cell_get(struct device
*dev
, const char *id
)
1566 struct nvmem_cell
**ptr
, *cell
;
1568 ptr
= devres_alloc(devm_nvmem_cell_release
, sizeof(*ptr
), GFP_KERNEL
);
1570 return ERR_PTR(-ENOMEM
);
1572 cell
= nvmem_cell_get(dev
, id
);
1573 if (!IS_ERR(cell
)) {
1575 devres_add(dev
, ptr
);
1582 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get
);
1584 static int devm_nvmem_cell_match(struct device
*dev
, void *res
, void *data
)
1586 struct nvmem_cell
**c
= res
;
1588 if (WARN_ON(!c
|| !*c
))
1595 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1596 * from devm_nvmem_cell_get.
1598 * @dev: Device that requests the nvmem cell.
1599 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1601 void devm_nvmem_cell_put(struct device
*dev
, struct nvmem_cell
*cell
)
1605 ret
= devres_release(dev
, devm_nvmem_cell_release
,
1606 devm_nvmem_cell_match
, cell
);
1610 EXPORT_SYMBOL(devm_nvmem_cell_put
);
1613 * nvmem_cell_put() - Release previously allocated nvmem cell.
1615 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1617 void nvmem_cell_put(struct nvmem_cell
*cell
)
1619 struct nvmem_device
*nvmem
= cell
->entry
->nvmem
;
1622 kfree_const(cell
->id
);
1625 __nvmem_device_put(nvmem
);
1626 nvmem_layout_module_put(nvmem
);
1628 EXPORT_SYMBOL_GPL(nvmem_cell_put
);
1630 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry
*cell
, void *buf
)
1633 int i
, extra
, bit_offset
= cell
->bit_offset
;
1638 *b
++ >>= bit_offset
;
1640 /* setup rest of the bytes if any */
1641 for (i
= 1; i
< cell
->bytes
; i
++) {
1642 /* Get bits from next byte and shift them towards msb */
1643 *p
|= *b
<< (BITS_PER_BYTE
- bit_offset
);
1646 *b
++ >>= bit_offset
;
1649 /* point to the msb */
1650 p
+= cell
->bytes
- 1;
1653 /* result fits in less bytes */
1654 extra
= cell
->bytes
- DIV_ROUND_UP(cell
->nbits
, BITS_PER_BYTE
);
1655 while (--extra
>= 0)
1658 /* clear msb bits if any leftover in the last byte */
1659 if (cell
->nbits
% BITS_PER_BYTE
)
1660 *p
&= GENMASK((cell
->nbits
% BITS_PER_BYTE
) - 1, 0);
1663 static int __nvmem_cell_read(struct nvmem_device
*nvmem
,
1664 struct nvmem_cell_entry
*cell
,
1665 void *buf
, size_t *len
, const char *id
, int index
)
1669 rc
= nvmem_reg_read(nvmem
, cell
->offset
, buf
, cell
->raw_len
);
1674 /* shift bits in-place */
1675 if (cell
->bit_offset
|| cell
->nbits
)
1676 nvmem_shift_read_buffer_in_place(cell
, buf
);
1678 if (cell
->read_post_process
) {
1679 rc
= cell
->read_post_process(cell
->priv
, id
, index
,
1680 cell
->offset
, buf
, cell
->raw_len
);
1692 * nvmem_cell_read() - Read a given nvmem cell
1694 * @cell: nvmem cell to be read.
1695 * @len: pointer to length of cell which will be populated on successful read;
1698 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1699 * buffer should be freed by the consumer with a kfree().
1701 void *nvmem_cell_read(struct nvmem_cell
*cell
, size_t *len
)
1703 struct nvmem_cell_entry
*entry
= cell
->entry
;
1704 struct nvmem_device
*nvmem
= entry
->nvmem
;
1709 return ERR_PTR(-EINVAL
);
1711 buf
= kzalloc(max_t(size_t, entry
->raw_len
, entry
->bytes
), GFP_KERNEL
);
1713 return ERR_PTR(-ENOMEM
);
1715 rc
= __nvmem_cell_read(nvmem
, cell
->entry
, buf
, len
, cell
->id
, cell
->index
);
1723 EXPORT_SYMBOL_GPL(nvmem_cell_read
);
1725 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry
*cell
,
1728 struct nvmem_device
*nvmem
= cell
->nvmem
;
1729 int i
, rc
, nbits
, bit_offset
= cell
->bit_offset
;
1730 u8 v
, *p
, *buf
, *b
, pbyte
, pbits
;
1732 nbits
= cell
->nbits
;
1733 buf
= kzalloc(cell
->bytes
, GFP_KERNEL
);
1735 return ERR_PTR(-ENOMEM
);
1737 memcpy(buf
, _buf
, len
);
1744 /* setup the first byte with lsb bits from nvmem */
1745 rc
= nvmem_reg_read(nvmem
, cell
->offset
, &v
, 1);
1748 *b
++ |= GENMASK(bit_offset
- 1, 0) & v
;
1750 /* setup rest of the byte if any */
1751 for (i
= 1; i
< cell
->bytes
; i
++) {
1752 /* Get last byte bits and shift them towards lsb */
1753 pbits
= pbyte
>> (BITS_PER_BYTE
- 1 - bit_offset
);
1761 /* if it's not end on byte boundary */
1762 if ((nbits
+ bit_offset
) % BITS_PER_BYTE
) {
1763 /* setup the last byte with msb bits from nvmem */
1764 rc
= nvmem_reg_read(nvmem
,
1765 cell
->offset
+ cell
->bytes
- 1, &v
, 1);
1768 *p
|= GENMASK(7, (nbits
+ bit_offset
) % BITS_PER_BYTE
) & v
;
1778 static int __nvmem_cell_entry_write(struct nvmem_cell_entry
*cell
, void *buf
, size_t len
)
1780 struct nvmem_device
*nvmem
= cell
->nvmem
;
1783 if (!nvmem
|| nvmem
->read_only
||
1784 (cell
->bit_offset
== 0 && len
!= cell
->bytes
))
1788 * Any cells which have a read_post_process hook are read-only because
1789 * we cannot reverse the operation and it might affect other cells,
1792 if (cell
->read_post_process
)
1795 if (cell
->bit_offset
|| cell
->nbits
) {
1796 if (len
!= BITS_TO_BYTES(cell
->nbits
) && len
!= cell
->bytes
)
1798 buf
= nvmem_cell_prepare_write_buffer(cell
, buf
, len
);
1800 return PTR_ERR(buf
);
1803 rc
= nvmem_reg_write(nvmem
, cell
->offset
, buf
, cell
->bytes
);
1805 /* free the tmp buffer */
1806 if (cell
->bit_offset
|| cell
->nbits
)
1816 * nvmem_cell_write() - Write to a given nvmem cell
1818 * @cell: nvmem cell to be written.
1819 * @buf: Buffer to be written.
1820 * @len: length of buffer to be written to nvmem cell.
1822 * Return: length of bytes written or negative on failure.
1824 int nvmem_cell_write(struct nvmem_cell
*cell
, void *buf
, size_t len
)
1826 return __nvmem_cell_entry_write(cell
->entry
, buf
, len
);
1829 EXPORT_SYMBOL_GPL(nvmem_cell_write
);
1831 static int nvmem_cell_read_common(struct device
*dev
, const char *cell_id
,
1832 void *val
, size_t count
)
1834 struct nvmem_cell
*cell
;
1838 cell
= nvmem_cell_get(dev
, cell_id
);
1840 return PTR_ERR(cell
);
1842 buf
= nvmem_cell_read(cell
, &len
);
1844 nvmem_cell_put(cell
);
1845 return PTR_ERR(buf
);
1849 nvmem_cell_put(cell
);
1852 memcpy(val
, buf
, count
);
1854 nvmem_cell_put(cell
);
1860 * nvmem_cell_read_u8() - Read a cell value as a u8
1862 * @dev: Device that requests the nvmem cell.
1863 * @cell_id: Name of nvmem cell to read.
1864 * @val: pointer to output value.
1866 * Return: 0 on success or negative errno.
1868 int nvmem_cell_read_u8(struct device
*dev
, const char *cell_id
, u8
*val
)
1870 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1872 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8
);
1875 * nvmem_cell_read_u16() - Read a cell value as a u16
1877 * @dev: Device that requests the nvmem cell.
1878 * @cell_id: Name of nvmem cell to read.
1879 * @val: pointer to output value.
1881 * Return: 0 on success or negative errno.
1883 int nvmem_cell_read_u16(struct device
*dev
, const char *cell_id
, u16
*val
)
1885 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1887 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16
);
1890 * nvmem_cell_read_u32() - Read a cell value as a u32
1892 * @dev: Device that requests the nvmem cell.
1893 * @cell_id: Name of nvmem cell to read.
1894 * @val: pointer to output value.
1896 * Return: 0 on success or negative errno.
1898 int nvmem_cell_read_u32(struct device
*dev
, const char *cell_id
, u32
*val
)
1900 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1902 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32
);
1905 * nvmem_cell_read_u64() - Read a cell value as a u64
1907 * @dev: Device that requests the nvmem cell.
1908 * @cell_id: Name of nvmem cell to read.
1909 * @val: pointer to output value.
1911 * Return: 0 on success or negative errno.
1913 int nvmem_cell_read_u64(struct device
*dev
, const char *cell_id
, u64
*val
)
1915 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1917 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64
);
1919 static const void *nvmem_cell_read_variable_common(struct device
*dev
,
1920 const char *cell_id
,
1921 size_t max_len
, size_t *len
)
1923 struct nvmem_cell
*cell
;
1927 cell
= nvmem_cell_get(dev
, cell_id
);
1931 nbits
= cell
->entry
->nbits
;
1932 buf
= nvmem_cell_read(cell
, len
);
1933 nvmem_cell_put(cell
);
1938 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1939 * the length of the real data. Throw away the extra junk.
1942 *len
= DIV_ROUND_UP(nbits
, 8);
1944 if (*len
> max_len
) {
1946 return ERR_PTR(-ERANGE
);
1953 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1955 * @dev: Device that requests the nvmem cell.
1956 * @cell_id: Name of nvmem cell to read.
1957 * @val: pointer to output value.
1959 * Return: 0 on success or negative errno.
1961 int nvmem_cell_read_variable_le_u32(struct device
*dev
, const char *cell_id
,
1968 buf
= nvmem_cell_read_variable_common(dev
, cell_id
, sizeof(*val
), &len
);
1970 return PTR_ERR(buf
);
1972 /* Copy w/ implicit endian conversion */
1974 for (i
= 0; i
< len
; i
++)
1975 *val
|= buf
[i
] << (8 * i
);
1981 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32
);
1984 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1986 * @dev: Device that requests the nvmem cell.
1987 * @cell_id: Name of nvmem cell to read.
1988 * @val: pointer to output value.
1990 * Return: 0 on success or negative errno.
1992 int nvmem_cell_read_variable_le_u64(struct device
*dev
, const char *cell_id
,
1999 buf
= nvmem_cell_read_variable_common(dev
, cell_id
, sizeof(*val
), &len
);
2001 return PTR_ERR(buf
);
2003 /* Copy w/ implicit endian conversion */
2005 for (i
= 0; i
< len
; i
++)
2006 *val
|= (uint64_t)buf
[i
] << (8 * i
);
2012 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64
);
2015 * nvmem_device_cell_read() - Read a given nvmem device and cell
2017 * @nvmem: nvmem device to read from.
2018 * @info: nvmem cell info to be read.
2019 * @buf: buffer pointer which will be populated on successful read.
2021 * Return: length of successful bytes read on success and negative
2022 * error code on error.
2024 ssize_t
nvmem_device_cell_read(struct nvmem_device
*nvmem
,
2025 struct nvmem_cell_info
*info
, void *buf
)
2027 struct nvmem_cell_entry cell
;
2034 rc
= nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem
, info
, &cell
);
2038 rc
= __nvmem_cell_read(nvmem
, &cell
, buf
, &len
, NULL
, 0);
2044 EXPORT_SYMBOL_GPL(nvmem_device_cell_read
);
2047 * nvmem_device_cell_write() - Write cell to a given nvmem device
2049 * @nvmem: nvmem device to be written to.
2050 * @info: nvmem cell info to be written.
2051 * @buf: buffer to be written to cell.
2053 * Return: length of bytes written or negative error code on failure.
2055 int nvmem_device_cell_write(struct nvmem_device
*nvmem
,
2056 struct nvmem_cell_info
*info
, void *buf
)
2058 struct nvmem_cell_entry cell
;
2064 rc
= nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem
, info
, &cell
);
2068 return __nvmem_cell_entry_write(&cell
, buf
, cell
.bytes
);
2070 EXPORT_SYMBOL_GPL(nvmem_device_cell_write
);
2073 * nvmem_device_read() - Read from a given nvmem device
2075 * @nvmem: nvmem device to read from.
2076 * @offset: offset in nvmem device.
2077 * @bytes: number of bytes to read.
2078 * @buf: buffer pointer which will be populated on successful read.
2080 * Return: length of successful bytes read on success and negative
2081 * error code on error.
2083 int nvmem_device_read(struct nvmem_device
*nvmem
,
2084 unsigned int offset
,
2085 size_t bytes
, void *buf
)
2092 rc
= nvmem_reg_read(nvmem
, offset
, buf
, bytes
);
2099 EXPORT_SYMBOL_GPL(nvmem_device_read
);
2102 * nvmem_device_write() - Write cell to a given nvmem device
2104 * @nvmem: nvmem device to be written to.
2105 * @offset: offset in nvmem device.
2106 * @bytes: number of bytes to write.
2107 * @buf: buffer to be written.
2109 * Return: length of bytes written or negative error code on failure.
2111 int nvmem_device_write(struct nvmem_device
*nvmem
,
2112 unsigned int offset
,
2113 size_t bytes
, void *buf
)
2120 rc
= nvmem_reg_write(nvmem
, offset
, buf
, bytes
);
2128 EXPORT_SYMBOL_GPL(nvmem_device_write
);
2131 * nvmem_add_cell_table() - register a table of cell info entries
2133 * @table: table of cell info entries
2135 void nvmem_add_cell_table(struct nvmem_cell_table
*table
)
2137 mutex_lock(&nvmem_cell_mutex
);
2138 list_add_tail(&table
->node
, &nvmem_cell_tables
);
2139 mutex_unlock(&nvmem_cell_mutex
);
2141 EXPORT_SYMBOL_GPL(nvmem_add_cell_table
);
2144 * nvmem_del_cell_table() - remove a previously registered cell info table
2146 * @table: table of cell info entries
2148 void nvmem_del_cell_table(struct nvmem_cell_table
*table
)
2150 mutex_lock(&nvmem_cell_mutex
);
2151 list_del(&table
->node
);
2152 mutex_unlock(&nvmem_cell_mutex
);
2154 EXPORT_SYMBOL_GPL(nvmem_del_cell_table
);
2157 * nvmem_add_cell_lookups() - register a list of cell lookup entries
2159 * @entries: array of cell lookup entries
2160 * @nentries: number of cell lookup entries in the array
2162 void nvmem_add_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
2166 mutex_lock(&nvmem_lookup_mutex
);
2167 for (i
= 0; i
< nentries
; i
++)
2168 list_add_tail(&entries
[i
].node
, &nvmem_lookup_list
);
2169 mutex_unlock(&nvmem_lookup_mutex
);
2171 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups
);
2174 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2177 * @entries: array of cell lookup entries
2178 * @nentries: number of cell lookup entries in the array
2180 void nvmem_del_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
2184 mutex_lock(&nvmem_lookup_mutex
);
2185 for (i
= 0; i
< nentries
; i
++)
2186 list_del(&entries
[i
].node
);
2187 mutex_unlock(&nvmem_lookup_mutex
);
2189 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups
);
2192 * nvmem_dev_name() - Get the name of a given nvmem device.
2194 * @nvmem: nvmem device.
2196 * Return: name of the nvmem device.
2198 const char *nvmem_dev_name(struct nvmem_device
*nvmem
)
2200 return dev_name(&nvmem
->dev
);
2202 EXPORT_SYMBOL_GPL(nvmem_dev_name
);
2205 * nvmem_dev_size() - Get the size of a given nvmem device.
2207 * @nvmem: nvmem device.
2209 * Return: size of the nvmem device.
2211 size_t nvmem_dev_size(struct nvmem_device
*nvmem
)
2215 EXPORT_SYMBOL_GPL(nvmem_dev_size
);
2217 static int __init
nvmem_init(void)
2221 ret
= bus_register(&nvmem_bus_type
);
2225 ret
= nvmem_layout_bus_register();
2227 bus_unregister(&nvmem_bus_type
);
2232 static void __exit
nvmem_exit(void)
2234 nvmem_layout_bus_unregister();
2235 bus_unregister(&nvmem_bus_type
);
2238 subsys_initcall(nvmem_init
);
2239 module_exit(nvmem_exit
);
2241 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2242 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2243 MODULE_DESCRIPTION("nvmem Driver Core");