1 // SPDX-License-Identifier: GPL-2.0
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
20 #include <linux/slab.h>
22 #include "internals.h"
24 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
26 #define FLAG_COMPAT BIT(0)
27 struct nvmem_cell_entry
{
34 nvmem_cell_post_process_t read_post_process
;
36 struct device_node
*np
;
37 struct nvmem_device
*nvmem
;
38 struct list_head node
;
42 struct nvmem_cell_entry
*entry
;
47 static DEFINE_MUTEX(nvmem_mutex
);
48 static DEFINE_IDA(nvmem_ida
);
50 static DEFINE_MUTEX(nvmem_cell_mutex
);
51 static LIST_HEAD(nvmem_cell_tables
);
53 static DEFINE_MUTEX(nvmem_lookup_mutex
);
54 static LIST_HEAD(nvmem_lookup_list
);
56 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier
);
58 static int __nvmem_reg_read(struct nvmem_device
*nvmem
, unsigned int offset
,
59 void *val
, size_t bytes
)
62 return nvmem
->reg_read(nvmem
->priv
, offset
, val
, bytes
);
67 static int __nvmem_reg_write(struct nvmem_device
*nvmem
, unsigned int offset
,
68 void *val
, size_t bytes
)
72 if (nvmem
->reg_write
) {
73 gpiod_set_value_cansleep(nvmem
->wp_gpio
, 0);
74 ret
= nvmem
->reg_write(nvmem
->priv
, offset
, val
, bytes
);
75 gpiod_set_value_cansleep(nvmem
->wp_gpio
, 1);
82 static int nvmem_access_with_keepouts(struct nvmem_device
*nvmem
,
83 unsigned int offset
, void *val
,
84 size_t bytes
, int write
)
87 unsigned int end
= offset
+ bytes
;
88 unsigned int kend
, ksize
;
89 const struct nvmem_keepout
*keepout
= nvmem
->keepout
;
90 const struct nvmem_keepout
*keepoutend
= keepout
+ nvmem
->nkeepout
;
94 * Skip all keepouts before the range being accessed.
95 * Keepouts are sorted.
97 while ((keepout
< keepoutend
) && (keepout
->end
<= offset
))
100 while ((offset
< end
) && (keepout
< keepoutend
)) {
101 /* Access the valid portion before the keepout. */
102 if (offset
< keepout
->start
) {
103 kend
= min(end
, keepout
->start
);
104 ksize
= kend
- offset
;
106 rc
= __nvmem_reg_write(nvmem
, offset
, val
, ksize
);
108 rc
= __nvmem_reg_read(nvmem
, offset
, val
, ksize
);
118 * Now we're aligned to the start of this keepout zone. Go
121 kend
= min(end
, keepout
->end
);
122 ksize
= kend
- offset
;
124 memset(val
, keepout
->value
, ksize
);
132 * If we ran out of keepouts but there's still stuff to do, send it
136 ksize
= end
- offset
;
138 return __nvmem_reg_write(nvmem
, offset
, val
, ksize
);
140 return __nvmem_reg_read(nvmem
, offset
, val
, ksize
);
146 static int nvmem_reg_read(struct nvmem_device
*nvmem
, unsigned int offset
,
147 void *val
, size_t bytes
)
149 if (!nvmem
->nkeepout
)
150 return __nvmem_reg_read(nvmem
, offset
, val
, bytes
);
152 return nvmem_access_with_keepouts(nvmem
, offset
, val
, bytes
, false);
155 static int nvmem_reg_write(struct nvmem_device
*nvmem
, unsigned int offset
,
156 void *val
, size_t bytes
)
158 if (!nvmem
->nkeepout
)
159 return __nvmem_reg_write(nvmem
, offset
, val
, bytes
);
161 return nvmem_access_with_keepouts(nvmem
, offset
, val
, bytes
, true);
164 #ifdef CONFIG_NVMEM_SYSFS
165 static const char * const nvmem_type_str
[] = {
166 [NVMEM_TYPE_UNKNOWN
] = "Unknown",
167 [NVMEM_TYPE_EEPROM
] = "EEPROM",
168 [NVMEM_TYPE_OTP
] = "OTP",
169 [NVMEM_TYPE_BATTERY_BACKED
] = "Battery backed",
170 [NVMEM_TYPE_FRAM
] = "FRAM",
173 #ifdef CONFIG_DEBUG_LOCK_ALLOC
174 static struct lock_class_key eeprom_lock_key
;
177 static ssize_t
type_show(struct device
*dev
,
178 struct device_attribute
*attr
, char *buf
)
180 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
182 return sysfs_emit(buf
, "%s\n", nvmem_type_str
[nvmem
->type
]);
185 static DEVICE_ATTR_RO(type
);
187 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
190 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
192 return sysfs_emit(buf
, "%d\n", nvmem
->read_only
);
195 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
196 const char *buf
, size_t count
)
198 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
199 int ret
= kstrtobool(buf
, &nvmem
->read_only
);
207 static DEVICE_ATTR_RW(force_ro
);
209 static struct attribute
*nvmem_attrs
[] = {
210 &dev_attr_force_ro
.attr
,
215 static ssize_t
bin_attr_nvmem_read(struct file
*filp
, struct kobject
*kobj
,
216 struct bin_attribute
*attr
, char *buf
,
217 loff_t pos
, size_t count
)
220 struct nvmem_device
*nvmem
;
226 dev
= kobj_to_dev(kobj
);
227 nvmem
= to_nvmem_device(dev
);
229 if (!IS_ALIGNED(pos
, nvmem
->stride
))
232 if (count
< nvmem
->word_size
)
235 count
= round_down(count
, nvmem
->word_size
);
237 if (!nvmem
->reg_read
)
240 rc
= nvmem_reg_read(nvmem
, pos
, buf
, count
);
248 static ssize_t
bin_attr_nvmem_write(struct file
*filp
, struct kobject
*kobj
,
249 struct bin_attribute
*attr
, char *buf
,
250 loff_t pos
, size_t count
)
253 struct nvmem_device
*nvmem
;
259 dev
= kobj_to_dev(kobj
);
260 nvmem
= to_nvmem_device(dev
);
262 if (!IS_ALIGNED(pos
, nvmem
->stride
))
265 if (count
< nvmem
->word_size
)
268 count
= round_down(count
, nvmem
->word_size
);
270 if (!nvmem
->reg_write
)
273 rc
= nvmem_reg_write(nvmem
, pos
, buf
, count
);
281 static umode_t
nvmem_bin_attr_get_umode(struct nvmem_device
*nvmem
)
285 if (!nvmem
->root_only
)
288 if (!nvmem
->read_only
)
291 if (!nvmem
->reg_write
)
294 if (!nvmem
->reg_read
)
300 static umode_t
nvmem_bin_attr_is_visible(struct kobject
*kobj
,
301 struct bin_attribute
*attr
, int i
)
303 struct device
*dev
= kobj_to_dev(kobj
);
304 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
306 attr
->size
= nvmem
->size
;
308 return nvmem_bin_attr_get_umode(nvmem
);
311 static umode_t
nvmem_attr_is_visible(struct kobject
*kobj
,
312 struct attribute
*attr
, int i
)
314 struct device
*dev
= kobj_to_dev(kobj
);
315 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
318 * If the device has no .reg_write operation, do not allow
319 * configuration as read-write.
320 * If the device is set as read-only by configuration, it
321 * can be forced into read-write mode using the 'force_ro'
324 if (attr
== &dev_attr_force_ro
.attr
&& !nvmem
->reg_write
)
325 return 0; /* Attribute not visible */
330 static struct nvmem_cell
*nvmem_create_cell(struct nvmem_cell_entry
*entry
,
331 const char *id
, int index
);
333 static ssize_t
nvmem_cell_attr_read(struct file
*filp
, struct kobject
*kobj
,
334 struct bin_attribute
*attr
, char *buf
,
335 loff_t pos
, size_t count
)
337 struct nvmem_cell_entry
*entry
;
338 struct nvmem_cell
*cell
= NULL
;
339 size_t cell_sz
, read_len
;
342 entry
= attr
->private;
343 cell
= nvmem_create_cell(entry
, entry
->name
, 0);
345 return PTR_ERR(cell
);
350 content
= nvmem_cell_read(cell
, &cell_sz
);
351 if (IS_ERR(content
)) {
352 read_len
= PTR_ERR(content
);
356 read_len
= min_t(unsigned int, cell_sz
- pos
, count
);
357 memcpy(buf
, content
+ pos
, read_len
);
361 kfree_const(cell
->id
);
367 /* default read/write permissions */
368 static struct bin_attribute bin_attr_rw_nvmem
= {
373 .read
= bin_attr_nvmem_read
,
374 .write
= bin_attr_nvmem_write
,
377 static struct bin_attribute
*nvmem_bin_attributes
[] = {
382 static const struct attribute_group nvmem_bin_group
= {
383 .bin_attrs
= nvmem_bin_attributes
,
384 .attrs
= nvmem_attrs
,
385 .is_bin_visible
= nvmem_bin_attr_is_visible
,
386 .is_visible
= nvmem_attr_is_visible
,
389 static const struct attribute_group
*nvmem_dev_groups
[] = {
394 static struct bin_attribute bin_attr_nvmem_eeprom_compat
= {
398 .read
= bin_attr_nvmem_read
,
399 .write
= bin_attr_nvmem_write
,
403 * nvmem_setup_compat() - Create an additional binary entry in
404 * drivers sys directory, to be backwards compatible with the older
405 * drivers/misc/eeprom drivers.
407 static int nvmem_sysfs_setup_compat(struct nvmem_device
*nvmem
,
408 const struct nvmem_config
*config
)
415 if (!config
->base_dev
)
418 nvmem
->eeprom
= bin_attr_nvmem_eeprom_compat
;
419 if (config
->type
== NVMEM_TYPE_FRAM
)
420 nvmem
->eeprom
.attr
.name
= "fram";
421 nvmem
->eeprom
.attr
.mode
= nvmem_bin_attr_get_umode(nvmem
);
422 nvmem
->eeprom
.size
= nvmem
->size
;
423 #ifdef CONFIG_DEBUG_LOCK_ALLOC
424 nvmem
->eeprom
.attr
.key
= &eeprom_lock_key
;
426 nvmem
->eeprom
.private = &nvmem
->dev
;
427 nvmem
->base_dev
= config
->base_dev
;
429 rval
= device_create_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
432 "Failed to create eeprom binary file %d\n", rval
);
436 nvmem
->flags
|= FLAG_COMPAT
;
441 static void nvmem_sysfs_remove_compat(struct nvmem_device
*nvmem
,
442 const struct nvmem_config
*config
)
445 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
448 static int nvmem_populate_sysfs_cells(struct nvmem_device
*nvmem
)
450 struct attribute_group group
= {
453 struct nvmem_cell_entry
*entry
;
454 struct bin_attribute
*attrs
;
455 unsigned int ncells
= 0, i
= 0;
458 mutex_lock(&nvmem_mutex
);
460 if (list_empty(&nvmem
->cells
) || nvmem
->sysfs_cells_populated
)
463 /* Allocate an array of attributes with a sentinel */
464 ncells
= list_count_nodes(&nvmem
->cells
);
465 group
.bin_attrs
= devm_kcalloc(&nvmem
->dev
, ncells
+ 1,
466 sizeof(struct bin_attribute
*), GFP_KERNEL
);
467 if (!group
.bin_attrs
) {
472 attrs
= devm_kcalloc(&nvmem
->dev
, ncells
, sizeof(struct bin_attribute
), GFP_KERNEL
);
478 /* Initialize each attribute to take the name and size of the cell */
479 list_for_each_entry(entry
, &nvmem
->cells
, node
) {
480 sysfs_bin_attr_init(&attrs
[i
]);
481 attrs
[i
].attr
.name
= devm_kasprintf(&nvmem
->dev
, GFP_KERNEL
,
482 "%s@%x,%x", entry
->name
,
485 attrs
[i
].attr
.mode
= 0444 & nvmem_bin_attr_get_umode(nvmem
);
486 attrs
[i
].size
= entry
->bytes
;
487 attrs
[i
].read
= &nvmem_cell_attr_read
;
488 attrs
[i
].private = entry
;
489 if (!attrs
[i
].attr
.name
) {
494 group
.bin_attrs
[i
] = &attrs
[i
];
498 ret
= device_add_group(&nvmem
->dev
, &group
);
502 nvmem
->sysfs_cells_populated
= true;
505 mutex_unlock(&nvmem_mutex
);
510 #else /* CONFIG_NVMEM_SYSFS */
512 static int nvmem_sysfs_setup_compat(struct nvmem_device
*nvmem
,
513 const struct nvmem_config
*config
)
517 static void nvmem_sysfs_remove_compat(struct nvmem_device
*nvmem
,
518 const struct nvmem_config
*config
)
522 #endif /* CONFIG_NVMEM_SYSFS */
524 static void nvmem_release(struct device
*dev
)
526 struct nvmem_device
*nvmem
= to_nvmem_device(dev
);
528 ida_free(&nvmem_ida
, nvmem
->id
);
529 gpiod_put(nvmem
->wp_gpio
);
533 static const struct device_type nvmem_provider_type
= {
534 .release
= nvmem_release
,
537 static struct bus_type nvmem_bus_type
= {
541 static void nvmem_cell_entry_drop(struct nvmem_cell_entry
*cell
)
543 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_REMOVE
, cell
);
544 mutex_lock(&nvmem_mutex
);
545 list_del(&cell
->node
);
546 mutex_unlock(&nvmem_mutex
);
547 of_node_put(cell
->np
);
548 kfree_const(cell
->name
);
552 static void nvmem_device_remove_all_cells(const struct nvmem_device
*nvmem
)
554 struct nvmem_cell_entry
*cell
, *p
;
556 list_for_each_entry_safe(cell
, p
, &nvmem
->cells
, node
)
557 nvmem_cell_entry_drop(cell
);
560 static void nvmem_cell_entry_add(struct nvmem_cell_entry
*cell
)
562 mutex_lock(&nvmem_mutex
);
563 list_add_tail(&cell
->node
, &cell
->nvmem
->cells
);
564 mutex_unlock(&nvmem_mutex
);
565 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_CELL_ADD
, cell
);
568 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device
*nvmem
,
569 const struct nvmem_cell_info
*info
,
570 struct nvmem_cell_entry
*cell
)
573 cell
->offset
= info
->offset
;
574 cell
->raw_len
= info
->raw_len
?: info
->bytes
;
575 cell
->bytes
= info
->bytes
;
576 cell
->name
= info
->name
;
577 cell
->read_post_process
= info
->read_post_process
;
578 cell
->priv
= info
->priv
;
580 cell
->bit_offset
= info
->bit_offset
;
581 cell
->nbits
= info
->nbits
;
585 cell
->bytes
= DIV_ROUND_UP(cell
->nbits
+ cell
->bit_offset
,
588 if (!IS_ALIGNED(cell
->offset
, nvmem
->stride
)) {
590 "cell %s unaligned to nvmem stride %d\n",
591 cell
->name
?: "<unknown>", nvmem
->stride
);
598 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device
*nvmem
,
599 const struct nvmem_cell_info
*info
,
600 struct nvmem_cell_entry
*cell
)
604 err
= nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem
, info
, cell
);
608 cell
->name
= kstrdup_const(info
->name
, GFP_KERNEL
);
616 * nvmem_add_one_cell() - Add one cell information to an nvmem device
618 * @nvmem: nvmem device to add cells to.
619 * @info: nvmem cell info to add to the device
621 * Return: 0 or negative error code on failure.
623 int nvmem_add_one_cell(struct nvmem_device
*nvmem
,
624 const struct nvmem_cell_info
*info
)
626 struct nvmem_cell_entry
*cell
;
629 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
633 rval
= nvmem_cell_info_to_nvmem_cell_entry(nvmem
, info
, cell
);
639 nvmem_cell_entry_add(cell
);
643 EXPORT_SYMBOL_GPL(nvmem_add_one_cell
);
646 * nvmem_add_cells() - Add cell information to an nvmem device
648 * @nvmem: nvmem device to add cells to.
649 * @info: nvmem cell info to add to the device
650 * @ncells: number of cells in info
652 * Return: 0 or negative error code on failure.
654 static int nvmem_add_cells(struct nvmem_device
*nvmem
,
655 const struct nvmem_cell_info
*info
,
660 for (i
= 0; i
< ncells
; i
++) {
661 rval
= nvmem_add_one_cell(nvmem
, &info
[i
]);
670 * nvmem_register_notifier() - Register a notifier block for nvmem events.
672 * @nb: notifier block to be called on nvmem events.
674 * Return: 0 on success, negative error number on failure.
676 int nvmem_register_notifier(struct notifier_block
*nb
)
678 return blocking_notifier_chain_register(&nvmem_notifier
, nb
);
680 EXPORT_SYMBOL_GPL(nvmem_register_notifier
);
683 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
685 * @nb: notifier block to be unregistered.
687 * Return: 0 on success, negative error number on failure.
689 int nvmem_unregister_notifier(struct notifier_block
*nb
)
691 return blocking_notifier_chain_unregister(&nvmem_notifier
, nb
);
693 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier
);
695 static int nvmem_add_cells_from_table(struct nvmem_device
*nvmem
)
697 const struct nvmem_cell_info
*info
;
698 struct nvmem_cell_table
*table
;
699 struct nvmem_cell_entry
*cell
;
702 mutex_lock(&nvmem_cell_mutex
);
703 list_for_each_entry(table
, &nvmem_cell_tables
, node
) {
704 if (strcmp(nvmem_dev_name(nvmem
), table
->nvmem_name
) == 0) {
705 for (i
= 0; i
< table
->ncells
; i
++) {
706 info
= &table
->cells
[i
];
708 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
714 rval
= nvmem_cell_info_to_nvmem_cell_entry(nvmem
, info
, cell
);
720 nvmem_cell_entry_add(cell
);
726 mutex_unlock(&nvmem_cell_mutex
);
730 static struct nvmem_cell_entry
*
731 nvmem_find_cell_entry_by_name(struct nvmem_device
*nvmem
, const char *cell_id
)
733 struct nvmem_cell_entry
*iter
, *cell
= NULL
;
735 mutex_lock(&nvmem_mutex
);
736 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
737 if (strcmp(cell_id
, iter
->name
) == 0) {
742 mutex_unlock(&nvmem_mutex
);
747 static int nvmem_validate_keepouts(struct nvmem_device
*nvmem
)
749 unsigned int cur
= 0;
750 const struct nvmem_keepout
*keepout
= nvmem
->keepout
;
751 const struct nvmem_keepout
*keepoutend
= keepout
+ nvmem
->nkeepout
;
753 while (keepout
< keepoutend
) {
754 /* Ensure keepouts are sorted and don't overlap. */
755 if (keepout
->start
< cur
) {
757 "Keepout regions aren't sorted or overlap.\n");
762 if (keepout
->end
< keepout
->start
) {
764 "Invalid keepout region.\n");
770 * Validate keepouts (and holes between) don't violate
771 * word_size constraints.
773 if ((keepout
->end
- keepout
->start
< nvmem
->word_size
) ||
774 ((keepout
->start
!= cur
) &&
775 (keepout
->start
- cur
< nvmem
->word_size
))) {
778 "Keepout regions violate word_size constraints.\n");
783 /* Validate keepouts don't violate stride (alignment). */
784 if (!IS_ALIGNED(keepout
->start
, nvmem
->stride
) ||
785 !IS_ALIGNED(keepout
->end
, nvmem
->stride
)) {
788 "Keepout regions violate stride.\n");
800 static int nvmem_add_cells_from_dt(struct nvmem_device
*nvmem
, struct device_node
*np
)
802 struct device
*dev
= &nvmem
->dev
;
803 struct device_node
*child
;
807 for_each_child_of_node(np
, child
) {
808 struct nvmem_cell_info info
= {0};
810 addr
= of_get_property(child
, "reg", &len
);
813 if (len
< 2 * sizeof(u32
)) {
814 dev_err(dev
, "nvmem: invalid reg on %pOF\n", child
);
819 info
.offset
= be32_to_cpup(addr
++);
820 info
.bytes
= be32_to_cpup(addr
);
821 info
.name
= kasprintf(GFP_KERNEL
, "%pOFn", child
);
823 addr
= of_get_property(child
, "bits", &len
);
824 if (addr
&& len
== (2 * sizeof(u32
))) {
825 info
.bit_offset
= be32_to_cpup(addr
++);
826 info
.nbits
= be32_to_cpup(addr
);
827 if (info
.bit_offset
>= BITS_PER_BYTE
|| info
.nbits
< 1) {
828 dev_err(dev
, "nvmem: invalid bits on %pOF\n", child
);
834 info
.np
= of_node_get(child
);
836 if (nvmem
->fixup_dt_cell_info
)
837 nvmem
->fixup_dt_cell_info(nvmem
, &info
);
839 ret
= nvmem_add_one_cell(nvmem
, &info
);
850 static int nvmem_add_cells_from_legacy_of(struct nvmem_device
*nvmem
)
852 return nvmem_add_cells_from_dt(nvmem
, nvmem
->dev
.of_node
);
855 static int nvmem_add_cells_from_fixed_layout(struct nvmem_device
*nvmem
)
857 struct device_node
*layout_np
;
860 layout_np
= of_nvmem_layout_get_container(nvmem
);
864 if (of_device_is_compatible(layout_np
, "fixed-layout"))
865 err
= nvmem_add_cells_from_dt(nvmem
, layout_np
);
867 of_node_put(layout_np
);
872 int nvmem_layout_register(struct nvmem_layout
*layout
)
876 if (!layout
->add_cells
)
879 /* Populate the cells */
880 ret
= layout
->add_cells(layout
);
884 #ifdef CONFIG_NVMEM_SYSFS
885 ret
= nvmem_populate_sysfs_cells(layout
->nvmem
);
887 nvmem_device_remove_all_cells(layout
->nvmem
);
894 EXPORT_SYMBOL_GPL(nvmem_layout_register
);
896 void nvmem_layout_unregister(struct nvmem_layout
*layout
)
898 /* Keep the API even with an empty stub in case we need it later */
900 EXPORT_SYMBOL_GPL(nvmem_layout_unregister
);
903 * nvmem_register() - Register a nvmem device for given nvmem_config.
904 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
906 * @config: nvmem device configuration with which nvmem device is created.
908 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
912 struct nvmem_device
*nvmem_register(const struct nvmem_config
*config
)
914 struct nvmem_device
*nvmem
;
918 return ERR_PTR(-EINVAL
);
920 if (!config
->reg_read
&& !config
->reg_write
)
921 return ERR_PTR(-EINVAL
);
923 nvmem
= kzalloc(sizeof(*nvmem
), GFP_KERNEL
);
925 return ERR_PTR(-ENOMEM
);
927 rval
= ida_alloc(&nvmem_ida
, GFP_KERNEL
);
930 return ERR_PTR(rval
);
935 nvmem
->dev
.type
= &nvmem_provider_type
;
936 nvmem
->dev
.bus
= &nvmem_bus_type
;
937 nvmem
->dev
.parent
= config
->dev
;
939 device_initialize(&nvmem
->dev
);
941 if (!config
->ignore_wp
)
942 nvmem
->wp_gpio
= gpiod_get_optional(config
->dev
, "wp",
944 if (IS_ERR(nvmem
->wp_gpio
)) {
945 rval
= PTR_ERR(nvmem
->wp_gpio
);
946 nvmem
->wp_gpio
= NULL
;
950 kref_init(&nvmem
->refcnt
);
951 INIT_LIST_HEAD(&nvmem
->cells
);
952 nvmem
->fixup_dt_cell_info
= config
->fixup_dt_cell_info
;
954 nvmem
->owner
= config
->owner
;
955 if (!nvmem
->owner
&& config
->dev
->driver
)
956 nvmem
->owner
= config
->dev
->driver
->owner
;
957 nvmem
->stride
= config
->stride
?: 1;
958 nvmem
->word_size
= config
->word_size
?: 1;
959 nvmem
->size
= config
->size
;
960 nvmem
->root_only
= config
->root_only
;
961 nvmem
->priv
= config
->priv
;
962 nvmem
->type
= config
->type
;
963 nvmem
->reg_read
= config
->reg_read
;
964 nvmem
->reg_write
= config
->reg_write
;
965 nvmem
->keepout
= config
->keepout
;
966 nvmem
->nkeepout
= config
->nkeepout
;
968 nvmem
->dev
.of_node
= config
->of_node
;
970 nvmem
->dev
.of_node
= config
->dev
->of_node
;
972 switch (config
->id
) {
973 case NVMEM_DEVID_NONE
:
974 rval
= dev_set_name(&nvmem
->dev
, "%s", config
->name
);
976 case NVMEM_DEVID_AUTO
:
977 rval
= dev_set_name(&nvmem
->dev
, "%s%d", config
->name
, nvmem
->id
);
980 rval
= dev_set_name(&nvmem
->dev
, "%s%d",
981 config
->name
? : "nvmem",
982 config
->name
? config
->id
: nvmem
->id
);
989 nvmem
->read_only
= device_property_present(config
->dev
, "read-only") ||
990 config
->read_only
|| !nvmem
->reg_write
;
992 #ifdef CONFIG_NVMEM_SYSFS
993 nvmem
->dev
.groups
= nvmem_dev_groups
;
996 if (nvmem
->nkeepout
) {
997 rval
= nvmem_validate_keepouts(nvmem
);
1002 if (config
->compat
) {
1003 rval
= nvmem_sysfs_setup_compat(nvmem
, config
);
1005 goto err_put_device
;
1008 if (config
->cells
) {
1009 rval
= nvmem_add_cells(nvmem
, config
->cells
, config
->ncells
);
1011 goto err_remove_cells
;
1014 rval
= nvmem_add_cells_from_table(nvmem
);
1016 goto err_remove_cells
;
1018 if (config
->add_legacy_fixed_of_cells
) {
1019 rval
= nvmem_add_cells_from_legacy_of(nvmem
);
1021 goto err_remove_cells
;
1024 rval
= nvmem_add_cells_from_fixed_layout(nvmem
);
1026 goto err_remove_cells
;
1028 dev_dbg(&nvmem
->dev
, "Registering nvmem device %s\n", config
->name
);
1030 rval
= device_add(&nvmem
->dev
);
1032 goto err_remove_cells
;
1034 rval
= nvmem_populate_layout(nvmem
);
1036 goto err_remove_dev
;
1038 #ifdef CONFIG_NVMEM_SYSFS
1039 rval
= nvmem_populate_sysfs_cells(nvmem
);
1041 goto err_destroy_layout
;
1044 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_ADD
, nvmem
);
1048 #ifdef CONFIG_NVMEM_SYSFS
1050 nvmem_destroy_layout(nvmem
);
1053 device_del(&nvmem
->dev
);
1055 nvmem_device_remove_all_cells(nvmem
);
1057 nvmem_sysfs_remove_compat(nvmem
, config
);
1059 put_device(&nvmem
->dev
);
1061 return ERR_PTR(rval
);
1063 EXPORT_SYMBOL_GPL(nvmem_register
);
1065 static void nvmem_device_release(struct kref
*kref
)
1067 struct nvmem_device
*nvmem
;
1069 nvmem
= container_of(kref
, struct nvmem_device
, refcnt
);
1071 blocking_notifier_call_chain(&nvmem_notifier
, NVMEM_REMOVE
, nvmem
);
1073 if (nvmem
->flags
& FLAG_COMPAT
)
1074 device_remove_bin_file(nvmem
->base_dev
, &nvmem
->eeprom
);
1076 nvmem_device_remove_all_cells(nvmem
);
1077 nvmem_destroy_layout(nvmem
);
1078 device_unregister(&nvmem
->dev
);
1082 * nvmem_unregister() - Unregister previously registered nvmem device
1084 * @nvmem: Pointer to previously registered nvmem device.
1086 void nvmem_unregister(struct nvmem_device
*nvmem
)
1089 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
1091 EXPORT_SYMBOL_GPL(nvmem_unregister
);
1093 static void devm_nvmem_unregister(void *nvmem
)
1095 nvmem_unregister(nvmem
);
1099 * devm_nvmem_register() - Register a managed nvmem device for given
1101 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1103 * @dev: Device that uses the nvmem device.
1104 * @config: nvmem device configuration with which nvmem device is created.
1106 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1109 struct nvmem_device
*devm_nvmem_register(struct device
*dev
,
1110 const struct nvmem_config
*config
)
1112 struct nvmem_device
*nvmem
;
1115 nvmem
= nvmem_register(config
);
1119 ret
= devm_add_action_or_reset(dev
, devm_nvmem_unregister
, nvmem
);
1121 return ERR_PTR(ret
);
1125 EXPORT_SYMBOL_GPL(devm_nvmem_register
);
1127 static struct nvmem_device
*__nvmem_device_get(void *data
,
1128 int (*match
)(struct device
*dev
, const void *data
))
1130 struct nvmem_device
*nvmem
= NULL
;
1133 mutex_lock(&nvmem_mutex
);
1134 dev
= bus_find_device(&nvmem_bus_type
, NULL
, data
, match
);
1136 nvmem
= to_nvmem_device(dev
);
1137 mutex_unlock(&nvmem_mutex
);
1139 return ERR_PTR(-EPROBE_DEFER
);
1141 if (!try_module_get(nvmem
->owner
)) {
1142 dev_err(&nvmem
->dev
,
1143 "could not increase module refcount for cell %s\n",
1144 nvmem_dev_name(nvmem
));
1146 put_device(&nvmem
->dev
);
1147 return ERR_PTR(-EINVAL
);
1150 kref_get(&nvmem
->refcnt
);
1155 static void __nvmem_device_put(struct nvmem_device
*nvmem
)
1157 put_device(&nvmem
->dev
);
1158 module_put(nvmem
->owner
);
1159 kref_put(&nvmem
->refcnt
, nvmem_device_release
);
1162 #if IS_ENABLED(CONFIG_OF)
1164 * of_nvmem_device_get() - Get nvmem device from a given id
1166 * @np: Device tree node that uses the nvmem device.
1167 * @id: nvmem name from nvmem-names property.
1169 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1172 struct nvmem_device
*of_nvmem_device_get(struct device_node
*np
, const char *id
)
1175 struct device_node
*nvmem_np
;
1176 struct nvmem_device
*nvmem
;
1180 index
= of_property_match_string(np
, "nvmem-names", id
);
1182 nvmem_np
= of_parse_phandle(np
, "nvmem", index
);
1184 return ERR_PTR(-ENOENT
);
1186 nvmem
= __nvmem_device_get(nvmem_np
, device_match_of_node
);
1187 of_node_put(nvmem_np
);
1190 EXPORT_SYMBOL_GPL(of_nvmem_device_get
);
1194 * nvmem_device_get() - Get nvmem device from a given id
1196 * @dev: Device that uses the nvmem device.
1197 * @dev_name: name of the requested nvmem device.
1199 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1202 struct nvmem_device
*nvmem_device_get(struct device
*dev
, const char *dev_name
)
1204 if (dev
->of_node
) { /* try dt first */
1205 struct nvmem_device
*nvmem
;
1207 nvmem
= of_nvmem_device_get(dev
->of_node
, dev_name
);
1209 if (!IS_ERR(nvmem
) || PTR_ERR(nvmem
) == -EPROBE_DEFER
)
1214 return __nvmem_device_get((void *)dev_name
, device_match_name
);
1216 EXPORT_SYMBOL_GPL(nvmem_device_get
);
1219 * nvmem_device_find() - Find nvmem device with matching function
1221 * @data: Data to pass to match function
1222 * @match: Callback function to check device
1224 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1227 struct nvmem_device
*nvmem_device_find(void *data
,
1228 int (*match
)(struct device
*dev
, const void *data
))
1230 return __nvmem_device_get(data
, match
);
1232 EXPORT_SYMBOL_GPL(nvmem_device_find
);
1234 static int devm_nvmem_device_match(struct device
*dev
, void *res
, void *data
)
1236 struct nvmem_device
**nvmem
= res
;
1238 if (WARN_ON(!nvmem
|| !*nvmem
))
1241 return *nvmem
== data
;
1244 static void devm_nvmem_device_release(struct device
*dev
, void *res
)
1246 nvmem_device_put(*(struct nvmem_device
**)res
);
1250 * devm_nvmem_device_put() - put alredy got nvmem device
1252 * @dev: Device that uses the nvmem device.
1253 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1254 * that needs to be released.
1256 void devm_nvmem_device_put(struct device
*dev
, struct nvmem_device
*nvmem
)
1260 ret
= devres_release(dev
, devm_nvmem_device_release
,
1261 devm_nvmem_device_match
, nvmem
);
1265 EXPORT_SYMBOL_GPL(devm_nvmem_device_put
);
1268 * nvmem_device_put() - put alredy got nvmem device
1270 * @nvmem: pointer to nvmem device that needs to be released.
1272 void nvmem_device_put(struct nvmem_device
*nvmem
)
1274 __nvmem_device_put(nvmem
);
1276 EXPORT_SYMBOL_GPL(nvmem_device_put
);
1279 * devm_nvmem_device_get() - Get nvmem device of device form a given id
1281 * @dev: Device that requests the nvmem device.
1282 * @id: name id for the requested nvmem device.
1284 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1285 * on success. The nvmem_device will be freed by the automatically once the
1288 struct nvmem_device
*devm_nvmem_device_get(struct device
*dev
, const char *id
)
1290 struct nvmem_device
**ptr
, *nvmem
;
1292 ptr
= devres_alloc(devm_nvmem_device_release
, sizeof(*ptr
), GFP_KERNEL
);
1294 return ERR_PTR(-ENOMEM
);
1296 nvmem
= nvmem_device_get(dev
, id
);
1297 if (!IS_ERR(nvmem
)) {
1299 devres_add(dev
, ptr
);
1306 EXPORT_SYMBOL_GPL(devm_nvmem_device_get
);
1308 static struct nvmem_cell
*nvmem_create_cell(struct nvmem_cell_entry
*entry
,
1309 const char *id
, int index
)
1311 struct nvmem_cell
*cell
;
1312 const char *name
= NULL
;
1314 cell
= kzalloc(sizeof(*cell
), GFP_KERNEL
);
1316 return ERR_PTR(-ENOMEM
);
1319 name
= kstrdup_const(id
, GFP_KERNEL
);
1322 return ERR_PTR(-ENOMEM
);
1327 cell
->entry
= entry
;
1328 cell
->index
= index
;
1333 static struct nvmem_cell
*
1334 nvmem_cell_get_from_lookup(struct device
*dev
, const char *con_id
)
1336 struct nvmem_cell_entry
*cell_entry
;
1337 struct nvmem_cell
*cell
= ERR_PTR(-ENOENT
);
1338 struct nvmem_cell_lookup
*lookup
;
1339 struct nvmem_device
*nvmem
;
1343 return ERR_PTR(-EINVAL
);
1345 dev_id
= dev_name(dev
);
1347 mutex_lock(&nvmem_lookup_mutex
);
1349 list_for_each_entry(lookup
, &nvmem_lookup_list
, node
) {
1350 if ((strcmp(lookup
->dev_id
, dev_id
) == 0) &&
1351 (strcmp(lookup
->con_id
, con_id
) == 0)) {
1352 /* This is the right entry. */
1353 nvmem
= __nvmem_device_get((void *)lookup
->nvmem_name
,
1355 if (IS_ERR(nvmem
)) {
1356 /* Provider may not be registered yet. */
1357 cell
= ERR_CAST(nvmem
);
1361 cell_entry
= nvmem_find_cell_entry_by_name(nvmem
,
1364 __nvmem_device_put(nvmem
);
1365 cell
= ERR_PTR(-ENOENT
);
1367 cell
= nvmem_create_cell(cell_entry
, con_id
, 0);
1369 __nvmem_device_put(nvmem
);
1375 mutex_unlock(&nvmem_lookup_mutex
);
1379 static void nvmem_layout_module_put(struct nvmem_device
*nvmem
)
1381 if (nvmem
->layout
&& nvmem
->layout
->dev
.driver
)
1382 module_put(nvmem
->layout
->dev
.driver
->owner
);
1385 #if IS_ENABLED(CONFIG_OF)
1386 static struct nvmem_cell_entry
*
1387 nvmem_find_cell_entry_by_node(struct nvmem_device
*nvmem
, struct device_node
*np
)
1389 struct nvmem_cell_entry
*iter
, *cell
= NULL
;
1391 mutex_lock(&nvmem_mutex
);
1392 list_for_each_entry(iter
, &nvmem
->cells
, node
) {
1393 if (np
== iter
->np
) {
1398 mutex_unlock(&nvmem_mutex
);
1403 static int nvmem_layout_module_get_optional(struct nvmem_device
*nvmem
)
1408 if (!nvmem
->layout
->dev
.driver
||
1409 !try_module_get(nvmem
->layout
->dev
.driver
->owner
))
1410 return -EPROBE_DEFER
;
1416 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1418 * @np: Device tree node that uses the nvmem cell.
1419 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1420 * for the cell at index 0 (the lone cell with no accompanying
1421 * nvmem-cell-names property).
1423 * Return: Will be an ERR_PTR() on error or a valid pointer
1424 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1427 struct nvmem_cell
*of_nvmem_cell_get(struct device_node
*np
, const char *id
)
1429 struct device_node
*cell_np
, *nvmem_np
;
1430 struct nvmem_device
*nvmem
;
1431 struct nvmem_cell_entry
*cell_entry
;
1432 struct nvmem_cell
*cell
;
1433 struct of_phandle_args cell_spec
;
1438 /* if cell name exists, find index to the name */
1440 index
= of_property_match_string(np
, "nvmem-cell-names", id
);
1442 ret
= of_parse_phandle_with_optional_args(np
, "nvmem-cells",
1443 "#nvmem-cell-cells",
1446 return ERR_PTR(-ENOENT
);
1448 if (cell_spec
.args_count
> 1)
1449 return ERR_PTR(-EINVAL
);
1451 cell_np
= cell_spec
.np
;
1452 if (cell_spec
.args_count
)
1453 cell_index
= cell_spec
.args
[0];
1455 nvmem_np
= of_get_parent(cell_np
);
1457 of_node_put(cell_np
);
1458 return ERR_PTR(-EINVAL
);
1461 /* nvmem layouts produce cells within the nvmem-layout container */
1462 if (of_node_name_eq(nvmem_np
, "nvmem-layout")) {
1463 nvmem_np
= of_get_next_parent(nvmem_np
);
1465 of_node_put(cell_np
);
1466 return ERR_PTR(-EINVAL
);
1470 nvmem
= __nvmem_device_get(nvmem_np
, device_match_of_node
);
1471 of_node_put(nvmem_np
);
1472 if (IS_ERR(nvmem
)) {
1473 of_node_put(cell_np
);
1474 return ERR_CAST(nvmem
);
1477 ret
= nvmem_layout_module_get_optional(nvmem
);
1479 of_node_put(cell_np
);
1480 __nvmem_device_put(nvmem
);
1481 return ERR_PTR(ret
);
1484 cell_entry
= nvmem_find_cell_entry_by_node(nvmem
, cell_np
);
1485 of_node_put(cell_np
);
1487 __nvmem_device_put(nvmem
);
1488 nvmem_layout_module_put(nvmem
);
1490 return ERR_PTR(-EPROBE_DEFER
);
1492 return ERR_PTR(-ENOENT
);
1495 cell
= nvmem_create_cell(cell_entry
, id
, cell_index
);
1497 __nvmem_device_put(nvmem
);
1498 nvmem_layout_module_put(nvmem
);
1503 EXPORT_SYMBOL_GPL(of_nvmem_cell_get
);
1507 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1509 * @dev: Device that requests the nvmem cell.
1510 * @id: nvmem cell name to get (this corresponds with the name from the
1511 * nvmem-cell-names property for DT systems and with the con_id from
1512 * the lookup entry for non-DT systems).
1514 * Return: Will be an ERR_PTR() on error or a valid pointer
1515 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1518 struct nvmem_cell
*nvmem_cell_get(struct device
*dev
, const char *id
)
1520 struct nvmem_cell
*cell
;
1522 if (dev
->of_node
) { /* try dt first */
1523 cell
= of_nvmem_cell_get(dev
->of_node
, id
);
1524 if (!IS_ERR(cell
) || PTR_ERR(cell
) == -EPROBE_DEFER
)
1528 /* NULL cell id only allowed for device tree; invalid otherwise */
1530 return ERR_PTR(-EINVAL
);
1532 return nvmem_cell_get_from_lookup(dev
, id
);
1534 EXPORT_SYMBOL_GPL(nvmem_cell_get
);
1536 static void devm_nvmem_cell_release(struct device
*dev
, void *res
)
1538 nvmem_cell_put(*(struct nvmem_cell
**)res
);
1542 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1544 * @dev: Device that requests the nvmem cell.
1545 * @id: nvmem cell name id to get.
1547 * Return: Will be an ERR_PTR() on error or a valid pointer
1548 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1549 * automatically once the device is freed.
1551 struct nvmem_cell
*devm_nvmem_cell_get(struct device
*dev
, const char *id
)
1553 struct nvmem_cell
**ptr
, *cell
;
1555 ptr
= devres_alloc(devm_nvmem_cell_release
, sizeof(*ptr
), GFP_KERNEL
);
1557 return ERR_PTR(-ENOMEM
);
1559 cell
= nvmem_cell_get(dev
, id
);
1560 if (!IS_ERR(cell
)) {
1562 devres_add(dev
, ptr
);
1569 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get
);
1571 static int devm_nvmem_cell_match(struct device
*dev
, void *res
, void *data
)
1573 struct nvmem_cell
**c
= res
;
1575 if (WARN_ON(!c
|| !*c
))
1582 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1583 * from devm_nvmem_cell_get.
1585 * @dev: Device that requests the nvmem cell.
1586 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1588 void devm_nvmem_cell_put(struct device
*dev
, struct nvmem_cell
*cell
)
1592 ret
= devres_release(dev
, devm_nvmem_cell_release
,
1593 devm_nvmem_cell_match
, cell
);
1597 EXPORT_SYMBOL(devm_nvmem_cell_put
);
1600 * nvmem_cell_put() - Release previously allocated nvmem cell.
1602 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1604 void nvmem_cell_put(struct nvmem_cell
*cell
)
1606 struct nvmem_device
*nvmem
= cell
->entry
->nvmem
;
1609 kfree_const(cell
->id
);
1612 __nvmem_device_put(nvmem
);
1613 nvmem_layout_module_put(nvmem
);
1615 EXPORT_SYMBOL_GPL(nvmem_cell_put
);
1617 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry
*cell
, void *buf
)
1620 int i
, extra
, bit_offset
= cell
->bit_offset
;
1625 *b
++ >>= bit_offset
;
1627 /* setup rest of the bytes if any */
1628 for (i
= 1; i
< cell
->bytes
; i
++) {
1629 /* Get bits from next byte and shift them towards msb */
1630 *p
|= *b
<< (BITS_PER_BYTE
- bit_offset
);
1633 *b
++ >>= bit_offset
;
1636 /* point to the msb */
1637 p
+= cell
->bytes
- 1;
1640 /* result fits in less bytes */
1641 extra
= cell
->bytes
- DIV_ROUND_UP(cell
->nbits
, BITS_PER_BYTE
);
1642 while (--extra
>= 0)
1645 /* clear msb bits if any leftover in the last byte */
1646 if (cell
->nbits
% BITS_PER_BYTE
)
1647 *p
&= GENMASK((cell
->nbits
% BITS_PER_BYTE
) - 1, 0);
1650 static int __nvmem_cell_read(struct nvmem_device
*nvmem
,
1651 struct nvmem_cell_entry
*cell
,
1652 void *buf
, size_t *len
, const char *id
, int index
)
1656 rc
= nvmem_reg_read(nvmem
, cell
->offset
, buf
, cell
->raw_len
);
1661 /* shift bits in-place */
1662 if (cell
->bit_offset
|| cell
->nbits
)
1663 nvmem_shift_read_buffer_in_place(cell
, buf
);
1665 if (cell
->read_post_process
) {
1666 rc
= cell
->read_post_process(cell
->priv
, id
, index
,
1667 cell
->offset
, buf
, cell
->raw_len
);
1679 * nvmem_cell_read() - Read a given nvmem cell
1681 * @cell: nvmem cell to be read.
1682 * @len: pointer to length of cell which will be populated on successful read;
1685 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1686 * buffer should be freed by the consumer with a kfree().
1688 void *nvmem_cell_read(struct nvmem_cell
*cell
, size_t *len
)
1690 struct nvmem_cell_entry
*entry
= cell
->entry
;
1691 struct nvmem_device
*nvmem
= entry
->nvmem
;
1696 return ERR_PTR(-EINVAL
);
1698 buf
= kzalloc(max_t(size_t, entry
->raw_len
, entry
->bytes
), GFP_KERNEL
);
1700 return ERR_PTR(-ENOMEM
);
1702 rc
= __nvmem_cell_read(nvmem
, cell
->entry
, buf
, len
, cell
->id
, cell
->index
);
1710 EXPORT_SYMBOL_GPL(nvmem_cell_read
);
1712 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry
*cell
,
1715 struct nvmem_device
*nvmem
= cell
->nvmem
;
1716 int i
, rc
, nbits
, bit_offset
= cell
->bit_offset
;
1717 u8 v
, *p
, *buf
, *b
, pbyte
, pbits
;
1719 nbits
= cell
->nbits
;
1720 buf
= kzalloc(cell
->bytes
, GFP_KERNEL
);
1722 return ERR_PTR(-ENOMEM
);
1724 memcpy(buf
, _buf
, len
);
1731 /* setup the first byte with lsb bits from nvmem */
1732 rc
= nvmem_reg_read(nvmem
, cell
->offset
, &v
, 1);
1735 *b
++ |= GENMASK(bit_offset
- 1, 0) & v
;
1737 /* setup rest of the byte if any */
1738 for (i
= 1; i
< cell
->bytes
; i
++) {
1739 /* Get last byte bits and shift them towards lsb */
1740 pbits
= pbyte
>> (BITS_PER_BYTE
- 1 - bit_offset
);
1748 /* if it's not end on byte boundary */
1749 if ((nbits
+ bit_offset
) % BITS_PER_BYTE
) {
1750 /* setup the last byte with msb bits from nvmem */
1751 rc
= nvmem_reg_read(nvmem
,
1752 cell
->offset
+ cell
->bytes
- 1, &v
, 1);
1755 *p
|= GENMASK(7, (nbits
+ bit_offset
) % BITS_PER_BYTE
) & v
;
1765 static int __nvmem_cell_entry_write(struct nvmem_cell_entry
*cell
, void *buf
, size_t len
)
1767 struct nvmem_device
*nvmem
= cell
->nvmem
;
1770 if (!nvmem
|| nvmem
->read_only
||
1771 (cell
->bit_offset
== 0 && len
!= cell
->bytes
))
1775 * Any cells which have a read_post_process hook are read-only because
1776 * we cannot reverse the operation and it might affect other cells,
1779 if (cell
->read_post_process
)
1782 if (cell
->bit_offset
|| cell
->nbits
) {
1783 buf
= nvmem_cell_prepare_write_buffer(cell
, buf
, len
);
1785 return PTR_ERR(buf
);
1788 rc
= nvmem_reg_write(nvmem
, cell
->offset
, buf
, cell
->bytes
);
1790 /* free the tmp buffer */
1791 if (cell
->bit_offset
|| cell
->nbits
)
1801 * nvmem_cell_write() - Write to a given nvmem cell
1803 * @cell: nvmem cell to be written.
1804 * @buf: Buffer to be written.
1805 * @len: length of buffer to be written to nvmem cell.
1807 * Return: length of bytes written or negative on failure.
1809 int nvmem_cell_write(struct nvmem_cell
*cell
, void *buf
, size_t len
)
1811 return __nvmem_cell_entry_write(cell
->entry
, buf
, len
);
1814 EXPORT_SYMBOL_GPL(nvmem_cell_write
);
1816 static int nvmem_cell_read_common(struct device
*dev
, const char *cell_id
,
1817 void *val
, size_t count
)
1819 struct nvmem_cell
*cell
;
1823 cell
= nvmem_cell_get(dev
, cell_id
);
1825 return PTR_ERR(cell
);
1827 buf
= nvmem_cell_read(cell
, &len
);
1829 nvmem_cell_put(cell
);
1830 return PTR_ERR(buf
);
1834 nvmem_cell_put(cell
);
1837 memcpy(val
, buf
, count
);
1839 nvmem_cell_put(cell
);
1845 * nvmem_cell_read_u8() - Read a cell value as a u8
1847 * @dev: Device that requests the nvmem cell.
1848 * @cell_id: Name of nvmem cell to read.
1849 * @val: pointer to output value.
1851 * Return: 0 on success or negative errno.
1853 int nvmem_cell_read_u8(struct device
*dev
, const char *cell_id
, u8
*val
)
1855 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1857 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8
);
1860 * nvmem_cell_read_u16() - Read a cell value as a u16
1862 * @dev: Device that requests the nvmem cell.
1863 * @cell_id: Name of nvmem cell to read.
1864 * @val: pointer to output value.
1866 * Return: 0 on success or negative errno.
1868 int nvmem_cell_read_u16(struct device
*dev
, const char *cell_id
, u16
*val
)
1870 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1872 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16
);
1875 * nvmem_cell_read_u32() - Read a cell value as a u32
1877 * @dev: Device that requests the nvmem cell.
1878 * @cell_id: Name of nvmem cell to read.
1879 * @val: pointer to output value.
1881 * Return: 0 on success or negative errno.
1883 int nvmem_cell_read_u32(struct device
*dev
, const char *cell_id
, u32
*val
)
1885 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1887 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32
);
1890 * nvmem_cell_read_u64() - Read a cell value as a u64
1892 * @dev: Device that requests the nvmem cell.
1893 * @cell_id: Name of nvmem cell to read.
1894 * @val: pointer to output value.
1896 * Return: 0 on success or negative errno.
1898 int nvmem_cell_read_u64(struct device
*dev
, const char *cell_id
, u64
*val
)
1900 return nvmem_cell_read_common(dev
, cell_id
, val
, sizeof(*val
));
1902 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64
);
1904 static const void *nvmem_cell_read_variable_common(struct device
*dev
,
1905 const char *cell_id
,
1906 size_t max_len
, size_t *len
)
1908 struct nvmem_cell
*cell
;
1912 cell
= nvmem_cell_get(dev
, cell_id
);
1916 nbits
= cell
->entry
->nbits
;
1917 buf
= nvmem_cell_read(cell
, len
);
1918 nvmem_cell_put(cell
);
1923 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1924 * the length of the real data. Throw away the extra junk.
1927 *len
= DIV_ROUND_UP(nbits
, 8);
1929 if (*len
> max_len
) {
1931 return ERR_PTR(-ERANGE
);
1938 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1940 * @dev: Device that requests the nvmem cell.
1941 * @cell_id: Name of nvmem cell to read.
1942 * @val: pointer to output value.
1944 * Return: 0 on success or negative errno.
1946 int nvmem_cell_read_variable_le_u32(struct device
*dev
, const char *cell_id
,
1953 buf
= nvmem_cell_read_variable_common(dev
, cell_id
, sizeof(*val
), &len
);
1955 return PTR_ERR(buf
);
1957 /* Copy w/ implicit endian conversion */
1959 for (i
= 0; i
< len
; i
++)
1960 *val
|= buf
[i
] << (8 * i
);
1966 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32
);
1969 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1971 * @dev: Device that requests the nvmem cell.
1972 * @cell_id: Name of nvmem cell to read.
1973 * @val: pointer to output value.
1975 * Return: 0 on success or negative errno.
1977 int nvmem_cell_read_variable_le_u64(struct device
*dev
, const char *cell_id
,
1984 buf
= nvmem_cell_read_variable_common(dev
, cell_id
, sizeof(*val
), &len
);
1986 return PTR_ERR(buf
);
1988 /* Copy w/ implicit endian conversion */
1990 for (i
= 0; i
< len
; i
++)
1991 *val
|= (uint64_t)buf
[i
] << (8 * i
);
1997 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64
);
2000 * nvmem_device_cell_read() - Read a given nvmem device and cell
2002 * @nvmem: nvmem device to read from.
2003 * @info: nvmem cell info to be read.
2004 * @buf: buffer pointer which will be populated on successful read.
2006 * Return: length of successful bytes read on success and negative
2007 * error code on error.
2009 ssize_t
nvmem_device_cell_read(struct nvmem_device
*nvmem
,
2010 struct nvmem_cell_info
*info
, void *buf
)
2012 struct nvmem_cell_entry cell
;
2019 rc
= nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem
, info
, &cell
);
2023 rc
= __nvmem_cell_read(nvmem
, &cell
, buf
, &len
, NULL
, 0);
2029 EXPORT_SYMBOL_GPL(nvmem_device_cell_read
);
2032 * nvmem_device_cell_write() - Write cell to a given nvmem device
2034 * @nvmem: nvmem device to be written to.
2035 * @info: nvmem cell info to be written.
2036 * @buf: buffer to be written to cell.
2038 * Return: length of bytes written or negative error code on failure.
2040 int nvmem_device_cell_write(struct nvmem_device
*nvmem
,
2041 struct nvmem_cell_info
*info
, void *buf
)
2043 struct nvmem_cell_entry cell
;
2049 rc
= nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem
, info
, &cell
);
2053 return __nvmem_cell_entry_write(&cell
, buf
, cell
.bytes
);
2055 EXPORT_SYMBOL_GPL(nvmem_device_cell_write
);
2058 * nvmem_device_read() - Read from a given nvmem device
2060 * @nvmem: nvmem device to read from.
2061 * @offset: offset in nvmem device.
2062 * @bytes: number of bytes to read.
2063 * @buf: buffer pointer which will be populated on successful read.
2065 * Return: length of successful bytes read on success and negative
2066 * error code on error.
2068 int nvmem_device_read(struct nvmem_device
*nvmem
,
2069 unsigned int offset
,
2070 size_t bytes
, void *buf
)
2077 rc
= nvmem_reg_read(nvmem
, offset
, buf
, bytes
);
2084 EXPORT_SYMBOL_GPL(nvmem_device_read
);
2087 * nvmem_device_write() - Write cell to a given nvmem device
2089 * @nvmem: nvmem device to be written to.
2090 * @offset: offset in nvmem device.
2091 * @bytes: number of bytes to write.
2092 * @buf: buffer to be written.
2094 * Return: length of bytes written or negative error code on failure.
2096 int nvmem_device_write(struct nvmem_device
*nvmem
,
2097 unsigned int offset
,
2098 size_t bytes
, void *buf
)
2105 rc
= nvmem_reg_write(nvmem
, offset
, buf
, bytes
);
2113 EXPORT_SYMBOL_GPL(nvmem_device_write
);
2116 * nvmem_add_cell_table() - register a table of cell info entries
2118 * @table: table of cell info entries
2120 void nvmem_add_cell_table(struct nvmem_cell_table
*table
)
2122 mutex_lock(&nvmem_cell_mutex
);
2123 list_add_tail(&table
->node
, &nvmem_cell_tables
);
2124 mutex_unlock(&nvmem_cell_mutex
);
2126 EXPORT_SYMBOL_GPL(nvmem_add_cell_table
);
2129 * nvmem_del_cell_table() - remove a previously registered cell info table
2131 * @table: table of cell info entries
2133 void nvmem_del_cell_table(struct nvmem_cell_table
*table
)
2135 mutex_lock(&nvmem_cell_mutex
);
2136 list_del(&table
->node
);
2137 mutex_unlock(&nvmem_cell_mutex
);
2139 EXPORT_SYMBOL_GPL(nvmem_del_cell_table
);
2142 * nvmem_add_cell_lookups() - register a list of cell lookup entries
2144 * @entries: array of cell lookup entries
2145 * @nentries: number of cell lookup entries in the array
2147 void nvmem_add_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
2151 mutex_lock(&nvmem_lookup_mutex
);
2152 for (i
= 0; i
< nentries
; i
++)
2153 list_add_tail(&entries
[i
].node
, &nvmem_lookup_list
);
2154 mutex_unlock(&nvmem_lookup_mutex
);
2156 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups
);
2159 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2162 * @entries: array of cell lookup entries
2163 * @nentries: number of cell lookup entries in the array
2165 void nvmem_del_cell_lookups(struct nvmem_cell_lookup
*entries
, size_t nentries
)
2169 mutex_lock(&nvmem_lookup_mutex
);
2170 for (i
= 0; i
< nentries
; i
++)
2171 list_del(&entries
[i
].node
);
2172 mutex_unlock(&nvmem_lookup_mutex
);
2174 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups
);
2177 * nvmem_dev_name() - Get the name of a given nvmem device.
2179 * @nvmem: nvmem device.
2181 * Return: name of the nvmem device.
2183 const char *nvmem_dev_name(struct nvmem_device
*nvmem
)
2185 return dev_name(&nvmem
->dev
);
2187 EXPORT_SYMBOL_GPL(nvmem_dev_name
);
2190 * nvmem_dev_size() - Get the size of a given nvmem device.
2192 * @nvmem: nvmem device.
2194 * Return: size of the nvmem device.
2196 size_t nvmem_dev_size(struct nvmem_device
*nvmem
)
2200 EXPORT_SYMBOL_GPL(nvmem_dev_size
);
2202 static int __init
nvmem_init(void)
2206 ret
= bus_register(&nvmem_bus_type
);
2210 ret
= nvmem_layout_bus_register();
2212 bus_unregister(&nvmem_bus_type
);
2217 static void __exit
nvmem_exit(void)
2219 nvmem_layout_bus_unregister();
2220 bus_unregister(&nvmem_bus_type
);
2223 subsys_initcall(nvmem_init
);
2224 module_exit(nvmem_exit
);
2226 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2227 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2228 MODULE_DESCRIPTION("nvmem Driver Core");