2 * Register map access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/regmap.h>
28 * Sometimes for failures during very early init the trace
29 * infrastructure isn't available early enough to be used. For this
30 * sort of problem defining LOG_DEVICE will add printks for basic
31 * register I/O on a specific device.
35 static int _regmap_update_bits(struct regmap
*map
, unsigned int reg
,
36 unsigned int mask
, unsigned int val
,
39 static int _regmap_bus_reg_read(void *context
, unsigned int reg
,
41 static int _regmap_bus_read(void *context
, unsigned int reg
,
43 static int _regmap_bus_formatted_write(void *context
, unsigned int reg
,
45 static int _regmap_bus_reg_write(void *context
, unsigned int reg
,
47 static int _regmap_bus_raw_write(void *context
, unsigned int reg
,
50 bool regmap_reg_in_ranges(unsigned int reg
,
51 const struct regmap_range
*ranges
,
54 const struct regmap_range
*r
;
57 for (i
= 0, r
= ranges
; i
< nranges
; i
++, r
++)
58 if (regmap_reg_in_range(reg
, r
))
62 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges
);
64 bool regmap_check_range_table(struct regmap
*map
, unsigned int reg
,
65 const struct regmap_access_table
*table
)
67 /* Check "no ranges" first */
68 if (regmap_reg_in_ranges(reg
, table
->no_ranges
, table
->n_no_ranges
))
71 /* In case zero "yes ranges" are supplied, any reg is OK */
72 if (!table
->n_yes_ranges
)
75 return regmap_reg_in_ranges(reg
, table
->yes_ranges
,
78 EXPORT_SYMBOL_GPL(regmap_check_range_table
);
80 bool regmap_writeable(struct regmap
*map
, unsigned int reg
)
82 if (map
->max_register
&& reg
> map
->max_register
)
85 if (map
->writeable_reg
)
86 return map
->writeable_reg(map
->dev
, reg
);
89 return regmap_check_range_table(map
, reg
, map
->wr_table
);
94 bool regmap_readable(struct regmap
*map
, unsigned int reg
)
96 if (map
->max_register
&& reg
> map
->max_register
)
99 if (map
->format
.format_write
)
102 if (map
->readable_reg
)
103 return map
->readable_reg(map
->dev
, reg
);
106 return regmap_check_range_table(map
, reg
, map
->rd_table
);
111 bool regmap_volatile(struct regmap
*map
, unsigned int reg
)
113 if (!map
->format
.format_write
&& !regmap_readable(map
, reg
))
116 if (map
->volatile_reg
)
117 return map
->volatile_reg(map
->dev
, reg
);
119 if (map
->volatile_table
)
120 return regmap_check_range_table(map
, reg
, map
->volatile_table
);
128 bool regmap_precious(struct regmap
*map
, unsigned int reg
)
130 if (!regmap_readable(map
, reg
))
133 if (map
->precious_reg
)
134 return map
->precious_reg(map
->dev
, reg
);
136 if (map
->precious_table
)
137 return regmap_check_range_table(map
, reg
, map
->precious_table
);
142 static bool regmap_volatile_range(struct regmap
*map
, unsigned int reg
,
147 for (i
= 0; i
< num
; i
++)
148 if (!regmap_volatile(map
, reg
+ i
))
154 static void regmap_format_2_6_write(struct regmap
*map
,
155 unsigned int reg
, unsigned int val
)
157 u8
*out
= map
->work_buf
;
159 *out
= (reg
<< 6) | val
;
162 static void regmap_format_4_12_write(struct regmap
*map
,
163 unsigned int reg
, unsigned int val
)
165 __be16
*out
= map
->work_buf
;
166 *out
= cpu_to_be16((reg
<< 12) | val
);
169 static void regmap_format_7_9_write(struct regmap
*map
,
170 unsigned int reg
, unsigned int val
)
172 __be16
*out
= map
->work_buf
;
173 *out
= cpu_to_be16((reg
<< 9) | val
);
176 static void regmap_format_10_14_write(struct regmap
*map
,
177 unsigned int reg
, unsigned int val
)
179 u8
*out
= map
->work_buf
;
182 out
[1] = (val
>> 8) | (reg
<< 6);
186 static void regmap_format_8(void *buf
, unsigned int val
, unsigned int shift
)
193 static void regmap_format_16_be(void *buf
, unsigned int val
, unsigned int shift
)
197 b
[0] = cpu_to_be16(val
<< shift
);
200 static void regmap_format_16_le(void *buf
, unsigned int val
, unsigned int shift
)
204 b
[0] = cpu_to_le16(val
<< shift
);
207 static void regmap_format_16_native(void *buf
, unsigned int val
,
210 *(u16
*)buf
= val
<< shift
;
213 static void regmap_format_24(void *buf
, unsigned int val
, unsigned int shift
)
224 static void regmap_format_32_be(void *buf
, unsigned int val
, unsigned int shift
)
228 b
[0] = cpu_to_be32(val
<< shift
);
231 static void regmap_format_32_le(void *buf
, unsigned int val
, unsigned int shift
)
235 b
[0] = cpu_to_le32(val
<< shift
);
238 static void regmap_format_32_native(void *buf
, unsigned int val
,
241 *(u32
*)buf
= val
<< shift
;
244 static void regmap_parse_inplace_noop(void *buf
)
248 static unsigned int regmap_parse_8(const void *buf
)
255 static unsigned int regmap_parse_16_be(const void *buf
)
257 const __be16
*b
= buf
;
259 return be16_to_cpu(b
[0]);
262 static unsigned int regmap_parse_16_le(const void *buf
)
264 const __le16
*b
= buf
;
266 return le16_to_cpu(b
[0]);
269 static void regmap_parse_16_be_inplace(void *buf
)
273 b
[0] = be16_to_cpu(b
[0]);
276 static void regmap_parse_16_le_inplace(void *buf
)
280 b
[0] = le16_to_cpu(b
[0]);
283 static unsigned int regmap_parse_16_native(const void *buf
)
288 static unsigned int regmap_parse_24(const void *buf
)
291 unsigned int ret
= b
[2];
292 ret
|= ((unsigned int)b
[1]) << 8;
293 ret
|= ((unsigned int)b
[0]) << 16;
298 static unsigned int regmap_parse_32_be(const void *buf
)
300 const __be32
*b
= buf
;
302 return be32_to_cpu(b
[0]);
305 static unsigned int regmap_parse_32_le(const void *buf
)
307 const __le32
*b
= buf
;
309 return le32_to_cpu(b
[0]);
312 static void regmap_parse_32_be_inplace(void *buf
)
316 b
[0] = be32_to_cpu(b
[0]);
319 static void regmap_parse_32_le_inplace(void *buf
)
323 b
[0] = le32_to_cpu(b
[0]);
326 static unsigned int regmap_parse_32_native(const void *buf
)
331 static void regmap_lock_mutex(void *__map
)
333 struct regmap
*map
= __map
;
334 mutex_lock(&map
->mutex
);
337 static void regmap_unlock_mutex(void *__map
)
339 struct regmap
*map
= __map
;
340 mutex_unlock(&map
->mutex
);
343 static void regmap_lock_spinlock(void *__map
)
344 __acquires(&map
->spinlock
)
346 struct regmap
*map
= __map
;
349 spin_lock_irqsave(&map
->spinlock
, flags
);
350 map
->spinlock_flags
= flags
;
353 static void regmap_unlock_spinlock(void *__map
)
354 __releases(&map
->spinlock
)
356 struct regmap
*map
= __map
;
357 spin_unlock_irqrestore(&map
->spinlock
, map
->spinlock_flags
);
360 static void dev_get_regmap_release(struct device
*dev
, void *res
)
363 * We don't actually have anything to do here; the goal here
364 * is not to manage the regmap but to provide a simple way to
365 * get the regmap back given a struct device.
369 static bool _regmap_range_add(struct regmap
*map
,
370 struct regmap_range_node
*data
)
372 struct rb_root
*root
= &map
->range_tree
;
373 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
376 struct regmap_range_node
*this =
377 container_of(*new, struct regmap_range_node
, node
);
380 if (data
->range_max
< this->range_min
)
381 new = &((*new)->rb_left
);
382 else if (data
->range_min
> this->range_max
)
383 new = &((*new)->rb_right
);
388 rb_link_node(&data
->node
, parent
, new);
389 rb_insert_color(&data
->node
, root
);
394 static struct regmap_range_node
*_regmap_range_lookup(struct regmap
*map
,
397 struct rb_node
*node
= map
->range_tree
.rb_node
;
400 struct regmap_range_node
*this =
401 container_of(node
, struct regmap_range_node
, node
);
403 if (reg
< this->range_min
)
404 node
= node
->rb_left
;
405 else if (reg
> this->range_max
)
406 node
= node
->rb_right
;
414 static void regmap_range_exit(struct regmap
*map
)
416 struct rb_node
*next
;
417 struct regmap_range_node
*range_node
;
419 next
= rb_first(&map
->range_tree
);
421 range_node
= rb_entry(next
, struct regmap_range_node
, node
);
422 next
= rb_next(&range_node
->node
);
423 rb_erase(&range_node
->node
, &map
->range_tree
);
427 kfree(map
->selector_work_buf
);
430 int regmap_attach_dev(struct device
*dev
, struct regmap
*map
,
431 const struct regmap_config
*config
)
437 regmap_debugfs_init(map
, config
->name
);
439 /* Add a devres resource for dev_get_regmap() */
440 m
= devres_alloc(dev_get_regmap_release
, sizeof(*m
), GFP_KERNEL
);
442 regmap_debugfs_exit(map
);
450 EXPORT_SYMBOL_GPL(regmap_attach_dev
);
452 static enum regmap_endian
regmap_get_reg_endian(const struct regmap_bus
*bus
,
453 const struct regmap_config
*config
)
455 enum regmap_endian endian
;
457 /* Retrieve the endianness specification from the regmap config */
458 endian
= config
->reg_format_endian
;
460 /* If the regmap config specified a non-default value, use that */
461 if (endian
!= REGMAP_ENDIAN_DEFAULT
)
464 /* Retrieve the endianness specification from the bus config */
465 if (bus
&& bus
->reg_format_endian_default
)
466 endian
= bus
->reg_format_endian_default
;
468 /* If the bus specified a non-default value, use that */
469 if (endian
!= REGMAP_ENDIAN_DEFAULT
)
472 /* Use this if no other value was found */
473 return REGMAP_ENDIAN_BIG
;
476 enum regmap_endian
regmap_get_val_endian(struct device
*dev
,
477 const struct regmap_bus
*bus
,
478 const struct regmap_config
*config
)
480 struct device_node
*np
;
481 enum regmap_endian endian
;
483 /* Retrieve the endianness specification from the regmap config */
484 endian
= config
->val_format_endian
;
486 /* If the regmap config specified a non-default value, use that */
487 if (endian
!= REGMAP_ENDIAN_DEFAULT
)
490 /* If the dev and dev->of_node exist try to get endianness from DT */
491 if (dev
&& dev
->of_node
) {
494 /* Parse the device's DT node for an endianness specification */
495 if (of_property_read_bool(np
, "big-endian"))
496 endian
= REGMAP_ENDIAN_BIG
;
497 else if (of_property_read_bool(np
, "little-endian"))
498 endian
= REGMAP_ENDIAN_LITTLE
;
500 /* If the endianness was specified in DT, use that */
501 if (endian
!= REGMAP_ENDIAN_DEFAULT
)
505 /* Retrieve the endianness specification from the bus config */
506 if (bus
&& bus
->val_format_endian_default
)
507 endian
= bus
->val_format_endian_default
;
509 /* If the bus specified a non-default value, use that */
510 if (endian
!= REGMAP_ENDIAN_DEFAULT
)
513 /* Use this if no other value was found */
514 return REGMAP_ENDIAN_BIG
;
516 EXPORT_SYMBOL_GPL(regmap_get_val_endian
);
519 * regmap_init(): Initialise register map
521 * @dev: Device that will be interacted with
522 * @bus: Bus-specific callbacks to use with device
523 * @bus_context: Data passed to bus-specific callbacks
524 * @config: Configuration for register map
526 * The return value will be an ERR_PTR() on error or a valid pointer to
527 * a struct regmap. This function should generally not be called
528 * directly, it should be called by bus-specific init functions.
530 struct regmap
*regmap_init(struct device
*dev
,
531 const struct regmap_bus
*bus
,
533 const struct regmap_config
*config
)
537 enum regmap_endian reg_endian
, val_endian
;
543 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
549 if (config
->lock
&& config
->unlock
) {
550 map
->lock
= config
->lock
;
551 map
->unlock
= config
->unlock
;
552 map
->lock_arg
= config
->lock_arg
;
554 if ((bus
&& bus
->fast_io
) ||
556 spin_lock_init(&map
->spinlock
);
557 map
->lock
= regmap_lock_spinlock
;
558 map
->unlock
= regmap_unlock_spinlock
;
560 mutex_init(&map
->mutex
);
561 map
->lock
= regmap_lock_mutex
;
562 map
->unlock
= regmap_unlock_mutex
;
566 map
->format
.reg_bytes
= DIV_ROUND_UP(config
->reg_bits
, 8);
567 map
->format
.pad_bytes
= config
->pad_bits
/ 8;
568 map
->format
.val_bytes
= DIV_ROUND_UP(config
->val_bits
, 8);
569 map
->format
.buf_size
= DIV_ROUND_UP(config
->reg_bits
+
570 config
->val_bits
+ config
->pad_bits
, 8);
571 map
->reg_shift
= config
->pad_bits
% 8;
572 if (config
->reg_stride
)
573 map
->reg_stride
= config
->reg_stride
;
576 map
->use_single_rw
= config
->use_single_rw
;
577 map
->can_multi_write
= config
->can_multi_write
;
580 map
->bus_context
= bus_context
;
581 map
->max_register
= config
->max_register
;
582 map
->wr_table
= config
->wr_table
;
583 map
->rd_table
= config
->rd_table
;
584 map
->volatile_table
= config
->volatile_table
;
585 map
->precious_table
= config
->precious_table
;
586 map
->writeable_reg
= config
->writeable_reg
;
587 map
->readable_reg
= config
->readable_reg
;
588 map
->volatile_reg
= config
->volatile_reg
;
589 map
->precious_reg
= config
->precious_reg
;
590 map
->cache_type
= config
->cache_type
;
591 map
->name
= config
->name
;
593 spin_lock_init(&map
->async_lock
);
594 INIT_LIST_HEAD(&map
->async_list
);
595 INIT_LIST_HEAD(&map
->async_free
);
596 init_waitqueue_head(&map
->async_waitq
);
598 if (config
->read_flag_mask
|| config
->write_flag_mask
) {
599 map
->read_flag_mask
= config
->read_flag_mask
;
600 map
->write_flag_mask
= config
->write_flag_mask
;
602 map
->read_flag_mask
= bus
->read_flag_mask
;
606 map
->reg_read
= config
->reg_read
;
607 map
->reg_write
= config
->reg_write
;
609 map
->defer_caching
= false;
610 goto skip_format_initialization
;
611 } else if (!bus
->read
|| !bus
->write
) {
612 map
->reg_read
= _regmap_bus_reg_read
;
613 map
->reg_write
= _regmap_bus_reg_write
;
615 map
->defer_caching
= false;
616 goto skip_format_initialization
;
618 map
->reg_read
= _regmap_bus_read
;
621 reg_endian
= regmap_get_reg_endian(bus
, config
);
622 val_endian
= regmap_get_val_endian(dev
, bus
, config
);
624 switch (config
->reg_bits
+ map
->reg_shift
) {
626 switch (config
->val_bits
) {
628 map
->format
.format_write
= regmap_format_2_6_write
;
636 switch (config
->val_bits
) {
638 map
->format
.format_write
= regmap_format_4_12_write
;
646 switch (config
->val_bits
) {
648 map
->format
.format_write
= regmap_format_7_9_write
;
656 switch (config
->val_bits
) {
658 map
->format
.format_write
= regmap_format_10_14_write
;
666 map
->format
.format_reg
= regmap_format_8
;
670 switch (reg_endian
) {
671 case REGMAP_ENDIAN_BIG
:
672 map
->format
.format_reg
= regmap_format_16_be
;
674 case REGMAP_ENDIAN_NATIVE
:
675 map
->format
.format_reg
= regmap_format_16_native
;
683 if (reg_endian
!= REGMAP_ENDIAN_BIG
)
685 map
->format
.format_reg
= regmap_format_24
;
689 switch (reg_endian
) {
690 case REGMAP_ENDIAN_BIG
:
691 map
->format
.format_reg
= regmap_format_32_be
;
693 case REGMAP_ENDIAN_NATIVE
:
694 map
->format
.format_reg
= regmap_format_32_native
;
705 if (val_endian
== REGMAP_ENDIAN_NATIVE
)
706 map
->format
.parse_inplace
= regmap_parse_inplace_noop
;
708 switch (config
->val_bits
) {
710 map
->format
.format_val
= regmap_format_8
;
711 map
->format
.parse_val
= regmap_parse_8
;
712 map
->format
.parse_inplace
= regmap_parse_inplace_noop
;
715 switch (val_endian
) {
716 case REGMAP_ENDIAN_BIG
:
717 map
->format
.format_val
= regmap_format_16_be
;
718 map
->format
.parse_val
= regmap_parse_16_be
;
719 map
->format
.parse_inplace
= regmap_parse_16_be_inplace
;
721 case REGMAP_ENDIAN_LITTLE
:
722 map
->format
.format_val
= regmap_format_16_le
;
723 map
->format
.parse_val
= regmap_parse_16_le
;
724 map
->format
.parse_inplace
= regmap_parse_16_le_inplace
;
726 case REGMAP_ENDIAN_NATIVE
:
727 map
->format
.format_val
= regmap_format_16_native
;
728 map
->format
.parse_val
= regmap_parse_16_native
;
735 if (val_endian
!= REGMAP_ENDIAN_BIG
)
737 map
->format
.format_val
= regmap_format_24
;
738 map
->format
.parse_val
= regmap_parse_24
;
741 switch (val_endian
) {
742 case REGMAP_ENDIAN_BIG
:
743 map
->format
.format_val
= regmap_format_32_be
;
744 map
->format
.parse_val
= regmap_parse_32_be
;
745 map
->format
.parse_inplace
= regmap_parse_32_be_inplace
;
747 case REGMAP_ENDIAN_LITTLE
:
748 map
->format
.format_val
= regmap_format_32_le
;
749 map
->format
.parse_val
= regmap_parse_32_le
;
750 map
->format
.parse_inplace
= regmap_parse_32_le_inplace
;
752 case REGMAP_ENDIAN_NATIVE
:
753 map
->format
.format_val
= regmap_format_32_native
;
754 map
->format
.parse_val
= regmap_parse_32_native
;
762 if (map
->format
.format_write
) {
763 if ((reg_endian
!= REGMAP_ENDIAN_BIG
) ||
764 (val_endian
!= REGMAP_ENDIAN_BIG
))
766 map
->use_single_rw
= true;
769 if (!map
->format
.format_write
&&
770 !(map
->format
.format_reg
&& map
->format
.format_val
))
773 map
->work_buf
= kzalloc(map
->format
.buf_size
, GFP_KERNEL
);
774 if (map
->work_buf
== NULL
) {
779 if (map
->format
.format_write
) {
780 map
->defer_caching
= false;
781 map
->reg_write
= _regmap_bus_formatted_write
;
782 } else if (map
->format
.format_val
) {
783 map
->defer_caching
= true;
784 map
->reg_write
= _regmap_bus_raw_write
;
787 skip_format_initialization
:
789 map
->range_tree
= RB_ROOT
;
790 for (i
= 0; i
< config
->num_ranges
; i
++) {
791 const struct regmap_range_cfg
*range_cfg
= &config
->ranges
[i
];
792 struct regmap_range_node
*new;
795 if (range_cfg
->range_max
< range_cfg
->range_min
) {
796 dev_err(map
->dev
, "Invalid range %d: %d < %d\n", i
,
797 range_cfg
->range_max
, range_cfg
->range_min
);
801 if (range_cfg
->range_max
> map
->max_register
) {
802 dev_err(map
->dev
, "Invalid range %d: %d > %d\n", i
,
803 range_cfg
->range_max
, map
->max_register
);
807 if (range_cfg
->selector_reg
> map
->max_register
) {
809 "Invalid range %d: selector out of map\n", i
);
813 if (range_cfg
->window_len
== 0) {
814 dev_err(map
->dev
, "Invalid range %d: window_len 0\n",
819 /* Make sure, that this register range has no selector
820 or data window within its boundary */
821 for (j
= 0; j
< config
->num_ranges
; j
++) {
822 unsigned sel_reg
= config
->ranges
[j
].selector_reg
;
823 unsigned win_min
= config
->ranges
[j
].window_start
;
824 unsigned win_max
= win_min
+
825 config
->ranges
[j
].window_len
- 1;
827 /* Allow data window inside its own virtual range */
831 if (range_cfg
->range_min
<= sel_reg
&&
832 sel_reg
<= range_cfg
->range_max
) {
834 "Range %d: selector for %d in window\n",
839 if (!(win_max
< range_cfg
->range_min
||
840 win_min
> range_cfg
->range_max
)) {
842 "Range %d: window for %d in window\n",
848 new = kzalloc(sizeof(*new), GFP_KERNEL
);
855 new->name
= range_cfg
->name
;
856 new->range_min
= range_cfg
->range_min
;
857 new->range_max
= range_cfg
->range_max
;
858 new->selector_reg
= range_cfg
->selector_reg
;
859 new->selector_mask
= range_cfg
->selector_mask
;
860 new->selector_shift
= range_cfg
->selector_shift
;
861 new->window_start
= range_cfg
->window_start
;
862 new->window_len
= range_cfg
->window_len
;
864 if (!_regmap_range_add(map
, new)) {
865 dev_err(map
->dev
, "Failed to add range %d\n", i
);
870 if (map
->selector_work_buf
== NULL
) {
871 map
->selector_work_buf
=
872 kzalloc(map
->format
.buf_size
, GFP_KERNEL
);
873 if (map
->selector_work_buf
== NULL
) {
880 ret
= regcache_init(map
, config
);
885 ret
= regmap_attach_dev(dev
, map
, config
);
895 regmap_range_exit(map
);
896 kfree(map
->work_buf
);
902 EXPORT_SYMBOL_GPL(regmap_init
);
904 static void devm_regmap_release(struct device
*dev
, void *res
)
906 regmap_exit(*(struct regmap
**)res
);
910 * devm_regmap_init(): Initialise managed register map
912 * @dev: Device that will be interacted with
913 * @bus: Bus-specific callbacks to use with device
914 * @bus_context: Data passed to bus-specific callbacks
915 * @config: Configuration for register map
917 * The return value will be an ERR_PTR() on error or a valid pointer
918 * to a struct regmap. This function should generally not be called
919 * directly, it should be called by bus-specific init functions. The
920 * map will be automatically freed by the device management code.
922 struct regmap
*devm_regmap_init(struct device
*dev
,
923 const struct regmap_bus
*bus
,
925 const struct regmap_config
*config
)
927 struct regmap
**ptr
, *regmap
;
929 ptr
= devres_alloc(devm_regmap_release
, sizeof(*ptr
), GFP_KERNEL
);
931 return ERR_PTR(-ENOMEM
);
933 regmap
= regmap_init(dev
, bus
, bus_context
, config
);
934 if (!IS_ERR(regmap
)) {
936 devres_add(dev
, ptr
);
943 EXPORT_SYMBOL_GPL(devm_regmap_init
);
945 static void regmap_field_init(struct regmap_field
*rm_field
,
946 struct regmap
*regmap
, struct reg_field reg_field
)
948 int field_bits
= reg_field
.msb
- reg_field
.lsb
+ 1;
949 rm_field
->regmap
= regmap
;
950 rm_field
->reg
= reg_field
.reg
;
951 rm_field
->shift
= reg_field
.lsb
;
952 rm_field
->mask
= ((BIT(field_bits
) - 1) << reg_field
.lsb
);
953 rm_field
->id_size
= reg_field
.id_size
;
954 rm_field
->id_offset
= reg_field
.id_offset
;
958 * devm_regmap_field_alloc(): Allocate and initialise a register field
961 * @dev: Device that will be interacted with
962 * @regmap: regmap bank in which this register field is located.
963 * @reg_field: Register field with in the bank.
965 * The return value will be an ERR_PTR() on error or a valid pointer
966 * to a struct regmap_field. The regmap_field will be automatically freed
967 * by the device management code.
969 struct regmap_field
*devm_regmap_field_alloc(struct device
*dev
,
970 struct regmap
*regmap
, struct reg_field reg_field
)
972 struct regmap_field
*rm_field
= devm_kzalloc(dev
,
973 sizeof(*rm_field
), GFP_KERNEL
);
975 return ERR_PTR(-ENOMEM
);
977 regmap_field_init(rm_field
, regmap
, reg_field
);
982 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc
);
985 * devm_regmap_field_free(): Free register field allocated using
986 * devm_regmap_field_alloc. Usally drivers need not call this function,
987 * as the memory allocated via devm will be freed as per device-driver
990 * @dev: Device that will be interacted with
991 * @field: regmap field which should be freed.
993 void devm_regmap_field_free(struct device
*dev
,
994 struct regmap_field
*field
)
996 devm_kfree(dev
, field
);
998 EXPORT_SYMBOL_GPL(devm_regmap_field_free
);
1001 * regmap_field_alloc(): Allocate and initialise a register field
1002 * in a register map.
1004 * @regmap: regmap bank in which this register field is located.
1005 * @reg_field: Register field with in the bank.
1007 * The return value will be an ERR_PTR() on error or a valid pointer
1008 * to a struct regmap_field. The regmap_field should be freed by the
1009 * user once its finished working with it using regmap_field_free().
1011 struct regmap_field
*regmap_field_alloc(struct regmap
*regmap
,
1012 struct reg_field reg_field
)
1014 struct regmap_field
*rm_field
= kzalloc(sizeof(*rm_field
), GFP_KERNEL
);
1017 return ERR_PTR(-ENOMEM
);
1019 regmap_field_init(rm_field
, regmap
, reg_field
);
1023 EXPORT_SYMBOL_GPL(regmap_field_alloc
);
1026 * regmap_field_free(): Free register field allocated using regmap_field_alloc
1028 * @field: regmap field which should be freed.
1030 void regmap_field_free(struct regmap_field
*field
)
1034 EXPORT_SYMBOL_GPL(regmap_field_free
);
1037 * regmap_reinit_cache(): Reinitialise the current register cache
1039 * @map: Register map to operate on.
1040 * @config: New configuration. Only the cache data will be used.
1042 * Discard any existing register cache for the map and initialize a
1043 * new cache. This can be used to restore the cache to defaults or to
1044 * update the cache configuration to reflect runtime discovery of the
1047 * No explicit locking is done here, the user needs to ensure that
1048 * this function will not race with other calls to regmap.
1050 int regmap_reinit_cache(struct regmap
*map
, const struct regmap_config
*config
)
1053 regmap_debugfs_exit(map
);
1055 map
->max_register
= config
->max_register
;
1056 map
->writeable_reg
= config
->writeable_reg
;
1057 map
->readable_reg
= config
->readable_reg
;
1058 map
->volatile_reg
= config
->volatile_reg
;
1059 map
->precious_reg
= config
->precious_reg
;
1060 map
->cache_type
= config
->cache_type
;
1062 regmap_debugfs_init(map
, config
->name
);
1064 map
->cache_bypass
= false;
1065 map
->cache_only
= false;
1067 return regcache_init(map
, config
);
1069 EXPORT_SYMBOL_GPL(regmap_reinit_cache
);
1072 * regmap_exit(): Free a previously allocated register map
1074 void regmap_exit(struct regmap
*map
)
1076 struct regmap_async
*async
;
1079 regmap_debugfs_exit(map
);
1080 regmap_range_exit(map
);
1081 if (map
->bus
&& map
->bus
->free_context
)
1082 map
->bus
->free_context(map
->bus_context
);
1083 kfree(map
->work_buf
);
1084 while (!list_empty(&map
->async_free
)) {
1085 async
= list_first_entry_or_null(&map
->async_free
,
1086 struct regmap_async
,
1088 list_del(&async
->list
);
1089 kfree(async
->work_buf
);
1094 EXPORT_SYMBOL_GPL(regmap_exit
);
1096 static int dev_get_regmap_match(struct device
*dev
, void *res
, void *data
)
1098 struct regmap
**r
= res
;
1104 /* If the user didn't specify a name match any */
1106 return (*r
)->name
== data
;
1112 * dev_get_regmap(): Obtain the regmap (if any) for a device
1114 * @dev: Device to retrieve the map for
1115 * @name: Optional name for the register map, usually NULL.
1117 * Returns the regmap for the device if one is present, or NULL. If
1118 * name is specified then it must match the name specified when
1119 * registering the device, if it is NULL then the first regmap found
1120 * will be used. Devices with multiple register maps are very rare,
1121 * generic code should normally not need to specify a name.
1123 struct regmap
*dev_get_regmap(struct device
*dev
, const char *name
)
1125 struct regmap
**r
= devres_find(dev
, dev_get_regmap_release
,
1126 dev_get_regmap_match
, (void *)name
);
1132 EXPORT_SYMBOL_GPL(dev_get_regmap
);
1135 * regmap_get_device(): Obtain the device from a regmap
1137 * @map: Register map to operate on.
1139 * Returns the underlying device that the regmap has been created for.
1141 struct device
*regmap_get_device(struct regmap
*map
)
1145 EXPORT_SYMBOL_GPL(regmap_get_device
);
1147 static int _regmap_select_page(struct regmap
*map
, unsigned int *reg
,
1148 struct regmap_range_node
*range
,
1149 unsigned int val_num
)
1151 void *orig_work_buf
;
1152 unsigned int win_offset
;
1153 unsigned int win_page
;
1157 win_offset
= (*reg
- range
->range_min
) % range
->window_len
;
1158 win_page
= (*reg
- range
->range_min
) / range
->window_len
;
1161 /* Bulk write shouldn't cross range boundary */
1162 if (*reg
+ val_num
- 1 > range
->range_max
)
1165 /* ... or single page boundary */
1166 if (val_num
> range
->window_len
- win_offset
)
1170 /* It is possible to have selector register inside data window.
1171 In that case, selector register is located on every page and
1172 it needs no page switching, when accessed alone. */
1174 range
->window_start
+ win_offset
!= range
->selector_reg
) {
1175 /* Use separate work_buf during page switching */
1176 orig_work_buf
= map
->work_buf
;
1177 map
->work_buf
= map
->selector_work_buf
;
1179 ret
= _regmap_update_bits(map
, range
->selector_reg
,
1180 range
->selector_mask
,
1181 win_page
<< range
->selector_shift
,
1184 map
->work_buf
= orig_work_buf
;
1190 *reg
= range
->window_start
+ win_offset
;
1195 int _regmap_raw_write(struct regmap
*map
, unsigned int reg
,
1196 const void *val
, size_t val_len
)
1198 struct regmap_range_node
*range
;
1199 unsigned long flags
;
1200 u8
*u8
= map
->work_buf
;
1201 void *work_val
= map
->work_buf
+ map
->format
.reg_bytes
+
1202 map
->format
.pad_bytes
;
1204 int ret
= -ENOTSUPP
;
1210 /* Check for unwritable registers before we start */
1211 if (map
->writeable_reg
)
1212 for (i
= 0; i
< val_len
/ map
->format
.val_bytes
; i
++)
1213 if (!map
->writeable_reg(map
->dev
,
1214 reg
+ (i
* map
->reg_stride
)))
1217 if (!map
->cache_bypass
&& map
->format
.parse_val
) {
1219 int val_bytes
= map
->format
.val_bytes
;
1220 for (i
= 0; i
< val_len
/ val_bytes
; i
++) {
1221 ival
= map
->format
.parse_val(val
+ (i
* val_bytes
));
1222 ret
= regcache_write(map
, reg
+ (i
* map
->reg_stride
),
1226 "Error in caching of register: %x ret: %d\n",
1231 if (map
->cache_only
) {
1232 map
->cache_dirty
= true;
1237 range
= _regmap_range_lookup(map
, reg
);
1239 int val_num
= val_len
/ map
->format
.val_bytes
;
1240 int win_offset
= (reg
- range
->range_min
) % range
->window_len
;
1241 int win_residue
= range
->window_len
- win_offset
;
1243 /* If the write goes beyond the end of the window split it */
1244 while (val_num
> win_residue
) {
1245 dev_dbg(map
->dev
, "Writing window %d/%zu\n",
1246 win_residue
, val_len
/ map
->format
.val_bytes
);
1247 ret
= _regmap_raw_write(map
, reg
, val
, win_residue
*
1248 map
->format
.val_bytes
);
1253 val_num
-= win_residue
;
1254 val
+= win_residue
* map
->format
.val_bytes
;
1255 val_len
-= win_residue
* map
->format
.val_bytes
;
1257 win_offset
= (reg
- range
->range_min
) %
1259 win_residue
= range
->window_len
- win_offset
;
1262 ret
= _regmap_select_page(map
, ®
, range
, val_num
);
1267 map
->format
.format_reg(map
->work_buf
, reg
, map
->reg_shift
);
1269 u8
[0] |= map
->write_flag_mask
;
1272 * Essentially all I/O mechanisms will be faster with a single
1273 * buffer to write. Since register syncs often generate raw
1274 * writes of single registers optimise that case.
1276 if (val
!= work_val
&& val_len
== map
->format
.val_bytes
) {
1277 memcpy(work_val
, val
, map
->format
.val_bytes
);
1281 if (map
->async
&& map
->bus
->async_write
) {
1282 struct regmap_async
*async
;
1284 trace_regmap_async_write_start(map
, reg
, val_len
);
1286 spin_lock_irqsave(&map
->async_lock
, flags
);
1287 async
= list_first_entry_or_null(&map
->async_free
,
1288 struct regmap_async
,
1291 list_del(&async
->list
);
1292 spin_unlock_irqrestore(&map
->async_lock
, flags
);
1295 async
= map
->bus
->async_alloc();
1299 async
->work_buf
= kzalloc(map
->format
.buf_size
,
1300 GFP_KERNEL
| GFP_DMA
);
1301 if (!async
->work_buf
) {
1309 /* If the caller supplied the value we can use it safely. */
1310 memcpy(async
->work_buf
, map
->work_buf
, map
->format
.pad_bytes
+
1311 map
->format
.reg_bytes
+ map
->format
.val_bytes
);
1313 spin_lock_irqsave(&map
->async_lock
, flags
);
1314 list_add_tail(&async
->list
, &map
->async_list
);
1315 spin_unlock_irqrestore(&map
->async_lock
, flags
);
1317 if (val
!= work_val
)
1318 ret
= map
->bus
->async_write(map
->bus_context
,
1320 map
->format
.reg_bytes
+
1321 map
->format
.pad_bytes
,
1322 val
, val_len
, async
);
1324 ret
= map
->bus
->async_write(map
->bus_context
,
1326 map
->format
.reg_bytes
+
1327 map
->format
.pad_bytes
+
1328 val_len
, NULL
, 0, async
);
1331 dev_err(map
->dev
, "Failed to schedule write: %d\n",
1334 spin_lock_irqsave(&map
->async_lock
, flags
);
1335 list_move(&async
->list
, &map
->async_free
);
1336 spin_unlock_irqrestore(&map
->async_lock
, flags
);
1342 trace_regmap_hw_write_start(map
, reg
, val_len
/ map
->format
.val_bytes
);
1344 /* If we're doing a single register write we can probably just
1345 * send the work_buf directly, otherwise try to do a gather
1348 if (val
== work_val
)
1349 ret
= map
->bus
->write(map
->bus_context
, map
->work_buf
,
1350 map
->format
.reg_bytes
+
1351 map
->format
.pad_bytes
+
1353 else if (map
->bus
->gather_write
)
1354 ret
= map
->bus
->gather_write(map
->bus_context
, map
->work_buf
,
1355 map
->format
.reg_bytes
+
1356 map
->format
.pad_bytes
,
1359 /* If that didn't work fall back on linearising by hand. */
1360 if (ret
== -ENOTSUPP
) {
1361 len
= map
->format
.reg_bytes
+ map
->format
.pad_bytes
+ val_len
;
1362 buf
= kzalloc(len
, GFP_KERNEL
);
1366 memcpy(buf
, map
->work_buf
, map
->format
.reg_bytes
);
1367 memcpy(buf
+ map
->format
.reg_bytes
+ map
->format
.pad_bytes
,
1369 ret
= map
->bus
->write(map
->bus_context
, buf
, len
);
1374 trace_regmap_hw_write_done(map
, reg
, val_len
/ map
->format
.val_bytes
);
1380 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1382 * @map: Map to check.
1384 bool regmap_can_raw_write(struct regmap
*map
)
1386 return map
->bus
&& map
->format
.format_val
&& map
->format
.format_reg
;
1388 EXPORT_SYMBOL_GPL(regmap_can_raw_write
);
1390 static int _regmap_bus_formatted_write(void *context
, unsigned int reg
,
1394 struct regmap_range_node
*range
;
1395 struct regmap
*map
= context
;
1397 WARN_ON(!map
->bus
|| !map
->format
.format_write
);
1399 range
= _regmap_range_lookup(map
, reg
);
1401 ret
= _regmap_select_page(map
, ®
, range
, 1);
1406 map
->format
.format_write(map
, reg
, val
);
1408 trace_regmap_hw_write_start(map
, reg
, 1);
1410 ret
= map
->bus
->write(map
->bus_context
, map
->work_buf
,
1411 map
->format
.buf_size
);
1413 trace_regmap_hw_write_done(map
, reg
, 1);
1418 static int _regmap_bus_reg_write(void *context
, unsigned int reg
,
1421 struct regmap
*map
= context
;
1423 return map
->bus
->reg_write(map
->bus_context
, reg
, val
);
1426 static int _regmap_bus_raw_write(void *context
, unsigned int reg
,
1429 struct regmap
*map
= context
;
1431 WARN_ON(!map
->bus
|| !map
->format
.format_val
);
1433 map
->format
.format_val(map
->work_buf
+ map
->format
.reg_bytes
1434 + map
->format
.pad_bytes
, val
, 0);
1435 return _regmap_raw_write(map
, reg
,
1437 map
->format
.reg_bytes
+
1438 map
->format
.pad_bytes
,
1439 map
->format
.val_bytes
);
1442 static inline void *_regmap_map_get_context(struct regmap
*map
)
1444 return (map
->bus
) ? map
: map
->bus_context
;
1447 int _regmap_write(struct regmap
*map
, unsigned int reg
,
1451 void *context
= _regmap_map_get_context(map
);
1453 if (!regmap_writeable(map
, reg
))
1456 if (!map
->cache_bypass
&& !map
->defer_caching
) {
1457 ret
= regcache_write(map
, reg
, val
);
1460 if (map
->cache_only
) {
1461 map
->cache_dirty
= true;
1467 if (map
->dev
&& strcmp(dev_name(map
->dev
), LOG_DEVICE
) == 0)
1468 dev_info(map
->dev
, "%x <= %x\n", reg
, val
);
1471 trace_regmap_reg_write(map
, reg
, val
);
1473 return map
->reg_write(context
, reg
, val
);
1477 * regmap_write(): Write a value to a single register
1479 * @map: Register map to write to
1480 * @reg: Register to write to
1481 * @val: Value to be written
1483 * A value of zero will be returned on success, a negative errno will
1484 * be returned in error cases.
1486 int regmap_write(struct regmap
*map
, unsigned int reg
, unsigned int val
)
1490 if (reg
% map
->reg_stride
)
1493 map
->lock(map
->lock_arg
);
1495 ret
= _regmap_write(map
, reg
, val
);
1497 map
->unlock(map
->lock_arg
);
1501 EXPORT_SYMBOL_GPL(regmap_write
);
1504 * regmap_write_async(): Write a value to a single register asynchronously
1506 * @map: Register map to write to
1507 * @reg: Register to write to
1508 * @val: Value to be written
1510 * A value of zero will be returned on success, a negative errno will
1511 * be returned in error cases.
1513 int regmap_write_async(struct regmap
*map
, unsigned int reg
, unsigned int val
)
1517 if (reg
% map
->reg_stride
)
1520 map
->lock(map
->lock_arg
);
1524 ret
= _regmap_write(map
, reg
, val
);
1528 map
->unlock(map
->lock_arg
);
1532 EXPORT_SYMBOL_GPL(regmap_write_async
);
1535 * regmap_raw_write(): Write raw values to one or more registers
1537 * @map: Register map to write to
1538 * @reg: Initial register to write to
1539 * @val: Block of data to be written, laid out for direct transmission to the
1541 * @val_len: Length of data pointed to by val.
1543 * This function is intended to be used for things like firmware
1544 * download where a large block of data needs to be transferred to the
1545 * device. No formatting will be done on the data provided.
1547 * A value of zero will be returned on success, a negative errno will
1548 * be returned in error cases.
1550 int regmap_raw_write(struct regmap
*map
, unsigned int reg
,
1551 const void *val
, size_t val_len
)
1555 if (!regmap_can_raw_write(map
))
1557 if (val_len
% map
->format
.val_bytes
)
1560 map
->lock(map
->lock_arg
);
1562 ret
= _regmap_raw_write(map
, reg
, val
, val_len
);
1564 map
->unlock(map
->lock_arg
);
1568 EXPORT_SYMBOL_GPL(regmap_raw_write
);
1571 * regmap_field_write(): Write a value to a single register field
1573 * @field: Register field to write to
1574 * @val: Value to be written
1576 * A value of zero will be returned on success, a negative errno will
1577 * be returned in error cases.
1579 int regmap_field_write(struct regmap_field
*field
, unsigned int val
)
1581 return regmap_update_bits(field
->regmap
, field
->reg
,
1582 field
->mask
, val
<< field
->shift
);
1584 EXPORT_SYMBOL_GPL(regmap_field_write
);
1587 * regmap_field_update_bits(): Perform a read/modify/write cycle
1588 * on the register field
1590 * @field: Register field to write to
1591 * @mask: Bitmask to change
1592 * @val: Value to be written
1594 * A value of zero will be returned on success, a negative errno will
1595 * be returned in error cases.
1597 int regmap_field_update_bits(struct regmap_field
*field
, unsigned int mask
, unsigned int val
)
1599 mask
= (mask
<< field
->shift
) & field
->mask
;
1601 return regmap_update_bits(field
->regmap
, field
->reg
,
1602 mask
, val
<< field
->shift
);
1604 EXPORT_SYMBOL_GPL(regmap_field_update_bits
);
1607 * regmap_fields_write(): Write a value to a single register field with port ID
1609 * @field: Register field to write to
1611 * @val: Value to be written
1613 * A value of zero will be returned on success, a negative errno will
1614 * be returned in error cases.
1616 int regmap_fields_write(struct regmap_field
*field
, unsigned int id
,
1619 if (id
>= field
->id_size
)
1622 return regmap_update_bits(field
->regmap
,
1623 field
->reg
+ (field
->id_offset
* id
),
1624 field
->mask
, val
<< field
->shift
);
1626 EXPORT_SYMBOL_GPL(regmap_fields_write
);
1629 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1630 * on the register field
1632 * @field: Register field to write to
1634 * @mask: Bitmask to change
1635 * @val: Value to be written
1637 * A value of zero will be returned on success, a negative errno will
1638 * be returned in error cases.
1640 int regmap_fields_update_bits(struct regmap_field
*field
, unsigned int id
,
1641 unsigned int mask
, unsigned int val
)
1643 if (id
>= field
->id_size
)
1646 mask
= (mask
<< field
->shift
) & field
->mask
;
1648 return regmap_update_bits(field
->regmap
,
1649 field
->reg
+ (field
->id_offset
* id
),
1650 mask
, val
<< field
->shift
);
1652 EXPORT_SYMBOL_GPL(regmap_fields_update_bits
);
1655 * regmap_bulk_write(): Write multiple registers to the device
1657 * @map: Register map to write to
1658 * @reg: First register to be write from
1659 * @val: Block of data to be written, in native register size for device
1660 * @val_count: Number of registers to write
1662 * This function is intended to be used for writing a large block of
1663 * data to the device either in single transfer or multiple transfer.
1665 * A value of zero will be returned on success, a negative errno will
1666 * be returned in error cases.
1668 int regmap_bulk_write(struct regmap
*map
, unsigned int reg
, const void *val
,
1672 size_t val_bytes
= map
->format
.val_bytes
;
1674 if (map
->bus
&& !map
->format
.parse_inplace
)
1676 if (reg
% map
->reg_stride
)
1680 * Some devices don't support bulk write, for
1681 * them we have a series of single write operations.
1683 if (!map
->bus
|| map
->use_single_rw
) {
1684 map
->lock(map
->lock_arg
);
1685 for (i
= 0; i
< val_count
; i
++) {
1688 switch (val_bytes
) {
1690 ival
= *(u8
*)(val
+ (i
* val_bytes
));
1693 ival
= *(u16
*)(val
+ (i
* val_bytes
));
1696 ival
= *(u32
*)(val
+ (i
* val_bytes
));
1700 ival
= *(u64
*)(val
+ (i
* val_bytes
));
1708 ret
= _regmap_write(map
, reg
+ (i
* map
->reg_stride
),
1714 map
->unlock(map
->lock_arg
);
1721 wval
= kmemdup(val
, val_count
* val_bytes
, GFP_KERNEL
);
1723 dev_err(map
->dev
, "Error in memory allocation\n");
1726 for (i
= 0; i
< val_count
* val_bytes
; i
+= val_bytes
)
1727 map
->format
.parse_inplace(wval
+ i
);
1729 map
->lock(map
->lock_arg
);
1730 ret
= _regmap_raw_write(map
, reg
, wval
, val_bytes
* val_count
);
1731 map
->unlock(map
->lock_arg
);
1737 EXPORT_SYMBOL_GPL(regmap_bulk_write
);
1740 * _regmap_raw_multi_reg_write()
1742 * the (register,newvalue) pairs in regs have not been formatted, but
1743 * they are all in the same page and have been changed to being page
1744 * relative. The page register has been written if that was neccessary.
1746 static int _regmap_raw_multi_reg_write(struct regmap
*map
,
1747 const struct reg_default
*regs
,
1754 size_t val_bytes
= map
->format
.val_bytes
;
1755 size_t reg_bytes
= map
->format
.reg_bytes
;
1756 size_t pad_bytes
= map
->format
.pad_bytes
;
1757 size_t pair_size
= reg_bytes
+ pad_bytes
+ val_bytes
;
1758 size_t len
= pair_size
* num_regs
;
1763 buf
= kzalloc(len
, GFP_KERNEL
);
1767 /* We have to linearise by hand. */
1771 for (i
= 0; i
< num_regs
; i
++) {
1772 int reg
= regs
[i
].reg
;
1773 int val
= regs
[i
].def
;
1774 trace_regmap_hw_write_start(map
, reg
, 1);
1775 map
->format
.format_reg(u8
, reg
, map
->reg_shift
);
1776 u8
+= reg_bytes
+ pad_bytes
;
1777 map
->format
.format_val(u8
, val
, 0);
1781 *u8
|= map
->write_flag_mask
;
1783 ret
= map
->bus
->write(map
->bus_context
, buf
, len
);
1787 for (i
= 0; i
< num_regs
; i
++) {
1788 int reg
= regs
[i
].reg
;
1789 trace_regmap_hw_write_done(map
, reg
, 1);
1794 static unsigned int _regmap_register_page(struct regmap
*map
,
1796 struct regmap_range_node
*range
)
1798 unsigned int win_page
= (reg
- range
->range_min
) / range
->window_len
;
1803 static int _regmap_range_multi_paged_reg_write(struct regmap
*map
,
1804 struct reg_default
*regs
,
1809 struct reg_default
*base
;
1810 unsigned int this_page
= 0;
1812 * the set of registers are not neccessarily in order, but
1813 * since the order of write must be preserved this algorithm
1814 * chops the set each time the page changes
1817 for (i
= 0, n
= 0; i
< num_regs
; i
++, n
++) {
1818 unsigned int reg
= regs
[i
].reg
;
1819 struct regmap_range_node
*range
;
1821 range
= _regmap_range_lookup(map
, reg
);
1823 unsigned int win_page
= _regmap_register_page(map
, reg
,
1827 this_page
= win_page
;
1828 if (win_page
!= this_page
) {
1829 this_page
= win_page
;
1830 ret
= _regmap_raw_multi_reg_write(map
, base
, n
);
1836 ret
= _regmap_select_page(map
, &base
[n
].reg
, range
, 1);
1842 return _regmap_raw_multi_reg_write(map
, base
, n
);
1846 static int _regmap_multi_reg_write(struct regmap
*map
,
1847 const struct reg_default
*regs
,
1853 if (!map
->can_multi_write
) {
1854 for (i
= 0; i
< num_regs
; i
++) {
1855 ret
= _regmap_write(map
, regs
[i
].reg
, regs
[i
].def
);
1862 if (!map
->format
.parse_inplace
)
1865 if (map
->writeable_reg
)
1866 for (i
= 0; i
< num_regs
; i
++) {
1867 int reg
= regs
[i
].reg
;
1868 if (!map
->writeable_reg(map
->dev
, reg
))
1870 if (reg
% map
->reg_stride
)
1874 if (!map
->cache_bypass
) {
1875 for (i
= 0; i
< num_regs
; i
++) {
1876 unsigned int val
= regs
[i
].def
;
1877 unsigned int reg
= regs
[i
].reg
;
1878 ret
= regcache_write(map
, reg
, val
);
1881 "Error in caching of register: %x ret: %d\n",
1886 if (map
->cache_only
) {
1887 map
->cache_dirty
= true;
1894 for (i
= 0; i
< num_regs
; i
++) {
1895 unsigned int reg
= regs
[i
].reg
;
1896 struct regmap_range_node
*range
;
1897 range
= _regmap_range_lookup(map
, reg
);
1899 size_t len
= sizeof(struct reg_default
)*num_regs
;
1900 struct reg_default
*base
= kmemdup(regs
, len
,
1904 ret
= _regmap_range_multi_paged_reg_write(map
, base
,
1911 return _regmap_raw_multi_reg_write(map
, regs
, num_regs
);
1915 * regmap_multi_reg_write(): Write multiple registers to the device
1917 * where the set of register,value pairs are supplied in any order,
1918 * possibly not all in a single range.
1920 * @map: Register map to write to
1921 * @regs: Array of structures containing register,value to be written
1922 * @num_regs: Number of registers to write
1924 * The 'normal' block write mode will send ultimately send data on the
1925 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
1926 * addressed. However, this alternative block multi write mode will send
1927 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
1928 * must of course support the mode.
1930 * A value of zero will be returned on success, a negative errno will be
1931 * returned in error cases.
1933 int regmap_multi_reg_write(struct regmap
*map
, const struct reg_default
*regs
,
1938 map
->lock(map
->lock_arg
);
1940 ret
= _regmap_multi_reg_write(map
, regs
, num_regs
);
1942 map
->unlock(map
->lock_arg
);
1946 EXPORT_SYMBOL_GPL(regmap_multi_reg_write
);
1949 * regmap_multi_reg_write_bypassed(): Write multiple registers to the
1950 * device but not the cache
1952 * where the set of register are supplied in any order
1954 * @map: Register map to write to
1955 * @regs: Array of structures containing register,value to be written
1956 * @num_regs: Number of registers to write
1958 * This function is intended to be used for writing a large block of data
1959 * atomically to the device in single transfer for those I2C client devices
1960 * that implement this alternative block write mode.
1962 * A value of zero will be returned on success, a negative errno will
1963 * be returned in error cases.
1965 int regmap_multi_reg_write_bypassed(struct regmap
*map
,
1966 const struct reg_default
*regs
,
1972 map
->lock(map
->lock_arg
);
1974 bypass
= map
->cache_bypass
;
1975 map
->cache_bypass
= true;
1977 ret
= _regmap_multi_reg_write(map
, regs
, num_regs
);
1979 map
->cache_bypass
= bypass
;
1981 map
->unlock(map
->lock_arg
);
1985 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed
);
1988 * regmap_raw_write_async(): Write raw values to one or more registers
1991 * @map: Register map to write to
1992 * @reg: Initial register to write to
1993 * @val: Block of data to be written, laid out for direct transmission to the
1994 * device. Must be valid until regmap_async_complete() is called.
1995 * @val_len: Length of data pointed to by val.
1997 * This function is intended to be used for things like firmware
1998 * download where a large block of data needs to be transferred to the
1999 * device. No formatting will be done on the data provided.
2001 * If supported by the underlying bus the write will be scheduled
2002 * asynchronously, helping maximise I/O speed on higher speed buses
2003 * like SPI. regmap_async_complete() can be called to ensure that all
2004 * asynchrnous writes have been completed.
2006 * A value of zero will be returned on success, a negative errno will
2007 * be returned in error cases.
2009 int regmap_raw_write_async(struct regmap
*map
, unsigned int reg
,
2010 const void *val
, size_t val_len
)
2014 if (val_len
% map
->format
.val_bytes
)
2016 if (reg
% map
->reg_stride
)
2019 map
->lock(map
->lock_arg
);
2023 ret
= _regmap_raw_write(map
, reg
, val
, val_len
);
2027 map
->unlock(map
->lock_arg
);
2031 EXPORT_SYMBOL_GPL(regmap_raw_write_async
);
2033 static int _regmap_raw_read(struct regmap
*map
, unsigned int reg
, void *val
,
2034 unsigned int val_len
)
2036 struct regmap_range_node
*range
;
2037 u8
*u8
= map
->work_buf
;
2042 range
= _regmap_range_lookup(map
, reg
);
2044 ret
= _regmap_select_page(map
, ®
, range
,
2045 val_len
/ map
->format
.val_bytes
);
2050 map
->format
.format_reg(map
->work_buf
, reg
, map
->reg_shift
);
2053 * Some buses or devices flag reads by setting the high bits in the
2054 * register addresss; since it's always the high bits for all
2055 * current formats we can do this here rather than in
2056 * formatting. This may break if we get interesting formats.
2058 u8
[0] |= map
->read_flag_mask
;
2060 trace_regmap_hw_read_start(map
, reg
, val_len
/ map
->format
.val_bytes
);
2062 ret
= map
->bus
->read(map
->bus_context
, map
->work_buf
,
2063 map
->format
.reg_bytes
+ map
->format
.pad_bytes
,
2066 trace_regmap_hw_read_done(map
, reg
, val_len
/ map
->format
.val_bytes
);
2071 static int _regmap_bus_reg_read(void *context
, unsigned int reg
,
2074 struct regmap
*map
= context
;
2076 return map
->bus
->reg_read(map
->bus_context
, reg
, val
);
2079 static int _regmap_bus_read(void *context
, unsigned int reg
,
2083 struct regmap
*map
= context
;
2085 if (!map
->format
.parse_val
)
2088 ret
= _regmap_raw_read(map
, reg
, map
->work_buf
, map
->format
.val_bytes
);
2090 *val
= map
->format
.parse_val(map
->work_buf
);
2095 static int _regmap_read(struct regmap
*map
, unsigned int reg
,
2099 void *context
= _regmap_map_get_context(map
);
2101 WARN_ON(!map
->reg_read
);
2103 if (!map
->cache_bypass
) {
2104 ret
= regcache_read(map
, reg
, val
);
2109 if (map
->cache_only
)
2112 if (!regmap_readable(map
, reg
))
2115 ret
= map
->reg_read(context
, reg
, val
);
2118 if (map
->dev
&& strcmp(dev_name(map
->dev
), LOG_DEVICE
) == 0)
2119 dev_info(map
->dev
, "%x => %x\n", reg
, *val
);
2122 trace_regmap_reg_read(map
, reg
, *val
);
2124 if (!map
->cache_bypass
)
2125 regcache_write(map
, reg
, *val
);
2132 * regmap_read(): Read a value from a single register
2134 * @map: Register map to read from
2135 * @reg: Register to be read from
2136 * @val: Pointer to store read value
2138 * A value of zero will be returned on success, a negative errno will
2139 * be returned in error cases.
2141 int regmap_read(struct regmap
*map
, unsigned int reg
, unsigned int *val
)
2145 if (reg
% map
->reg_stride
)
2148 map
->lock(map
->lock_arg
);
2150 ret
= _regmap_read(map
, reg
, val
);
2152 map
->unlock(map
->lock_arg
);
2156 EXPORT_SYMBOL_GPL(regmap_read
);
2159 * regmap_raw_read(): Read raw data from the device
2161 * @map: Register map to read from
2162 * @reg: First register to be read from
2163 * @val: Pointer to store read value
2164 * @val_len: Size of data to read
2166 * A value of zero will be returned on success, a negative errno will
2167 * be returned in error cases.
2169 int regmap_raw_read(struct regmap
*map
, unsigned int reg
, void *val
,
2172 size_t val_bytes
= map
->format
.val_bytes
;
2173 size_t val_count
= val_len
/ val_bytes
;
2179 if (val_len
% map
->format
.val_bytes
)
2181 if (reg
% map
->reg_stride
)
2184 map
->lock(map
->lock_arg
);
2186 if (regmap_volatile_range(map
, reg
, val_count
) || map
->cache_bypass
||
2187 map
->cache_type
== REGCACHE_NONE
) {
2188 /* Physical block read if there's no cache involved */
2189 ret
= _regmap_raw_read(map
, reg
, val
, val_len
);
2192 /* Otherwise go word by word for the cache; should be low
2193 * cost as we expect to hit the cache.
2195 for (i
= 0; i
< val_count
; i
++) {
2196 ret
= _regmap_read(map
, reg
+ (i
* map
->reg_stride
),
2201 map
->format
.format_val(val
+ (i
* val_bytes
), v
, 0);
2206 map
->unlock(map
->lock_arg
);
2210 EXPORT_SYMBOL_GPL(regmap_raw_read
);
2213 * regmap_field_read(): Read a value to a single register field
2215 * @field: Register field to read from
2216 * @val: Pointer to store read value
2218 * A value of zero will be returned on success, a negative errno will
2219 * be returned in error cases.
2221 int regmap_field_read(struct regmap_field
*field
, unsigned int *val
)
2224 unsigned int reg_val
;
2225 ret
= regmap_read(field
->regmap
, field
->reg
, ®_val
);
2229 reg_val
&= field
->mask
;
2230 reg_val
>>= field
->shift
;
2235 EXPORT_SYMBOL_GPL(regmap_field_read
);
2238 * regmap_fields_read(): Read a value to a single register field with port ID
2240 * @field: Register field to read from
2242 * @val: Pointer to store read value
2244 * A value of zero will be returned on success, a negative errno will
2245 * be returned in error cases.
2247 int regmap_fields_read(struct regmap_field
*field
, unsigned int id
,
2251 unsigned int reg_val
;
2253 if (id
>= field
->id_size
)
2256 ret
= regmap_read(field
->regmap
,
2257 field
->reg
+ (field
->id_offset
* id
),
2262 reg_val
&= field
->mask
;
2263 reg_val
>>= field
->shift
;
2268 EXPORT_SYMBOL_GPL(regmap_fields_read
);
2271 * regmap_bulk_read(): Read multiple registers from the device
2273 * @map: Register map to read from
2274 * @reg: First register to be read from
2275 * @val: Pointer to store read value, in native register size for device
2276 * @val_count: Number of registers to read
2278 * A value of zero will be returned on success, a negative errno will
2279 * be returned in error cases.
2281 int regmap_bulk_read(struct regmap
*map
, unsigned int reg
, void *val
,
2285 size_t val_bytes
= map
->format
.val_bytes
;
2286 bool vol
= regmap_volatile_range(map
, reg
, val_count
);
2288 if (reg
% map
->reg_stride
)
2291 if (map
->bus
&& map
->format
.parse_inplace
&& (vol
|| map
->cache_type
== REGCACHE_NONE
)) {
2293 * Some devices does not support bulk read, for
2294 * them we have a series of single read operations.
2296 if (map
->use_single_rw
) {
2297 for (i
= 0; i
< val_count
; i
++) {
2298 ret
= regmap_raw_read(map
,
2299 reg
+ (i
* map
->reg_stride
),
2300 val
+ (i
* val_bytes
),
2306 ret
= regmap_raw_read(map
, reg
, val
,
2307 val_bytes
* val_count
);
2312 for (i
= 0; i
< val_count
* val_bytes
; i
+= val_bytes
)
2313 map
->format
.parse_inplace(val
+ i
);
2315 for (i
= 0; i
< val_count
; i
++) {
2317 ret
= regmap_read(map
, reg
+ (i
* map
->reg_stride
),
2321 memcpy(val
+ (i
* val_bytes
), &ival
, val_bytes
);
2327 EXPORT_SYMBOL_GPL(regmap_bulk_read
);
2329 static int _regmap_update_bits(struct regmap
*map
, unsigned int reg
,
2330 unsigned int mask
, unsigned int val
,
2334 unsigned int tmp
, orig
;
2336 ret
= _regmap_read(map
, reg
, &orig
);
2344 ret
= _regmap_write(map
, reg
, tmp
);
2356 * regmap_update_bits: Perform a read/modify/write cycle on the register map
2358 * @map: Register map to update
2359 * @reg: Register to update
2360 * @mask: Bitmask to change
2361 * @val: New value for bitmask
2363 * Returns zero for success, a negative number on error.
2365 int regmap_update_bits(struct regmap
*map
, unsigned int reg
,
2366 unsigned int mask
, unsigned int val
)
2370 map
->lock(map
->lock_arg
);
2371 ret
= _regmap_update_bits(map
, reg
, mask
, val
, NULL
);
2372 map
->unlock(map
->lock_arg
);
2376 EXPORT_SYMBOL_GPL(regmap_update_bits
);
2379 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2380 * map asynchronously
2382 * @map: Register map to update
2383 * @reg: Register to update
2384 * @mask: Bitmask to change
2385 * @val: New value for bitmask
2387 * With most buses the read must be done synchronously so this is most
2388 * useful for devices with a cache which do not need to interact with
2389 * the hardware to determine the current register value.
2391 * Returns zero for success, a negative number on error.
2393 int regmap_update_bits_async(struct regmap
*map
, unsigned int reg
,
2394 unsigned int mask
, unsigned int val
)
2398 map
->lock(map
->lock_arg
);
2402 ret
= _regmap_update_bits(map
, reg
, mask
, val
, NULL
);
2406 map
->unlock(map
->lock_arg
);
2410 EXPORT_SYMBOL_GPL(regmap_update_bits_async
);
2413 * regmap_update_bits_check: Perform a read/modify/write cycle on the
2414 * register map and report if updated
2416 * @map: Register map to update
2417 * @reg: Register to update
2418 * @mask: Bitmask to change
2419 * @val: New value for bitmask
2420 * @change: Boolean indicating if a write was done
2422 * Returns zero for success, a negative number on error.
2424 int regmap_update_bits_check(struct regmap
*map
, unsigned int reg
,
2425 unsigned int mask
, unsigned int val
,
2430 map
->lock(map
->lock_arg
);
2431 ret
= _regmap_update_bits(map
, reg
, mask
, val
, change
);
2432 map
->unlock(map
->lock_arg
);
2435 EXPORT_SYMBOL_GPL(regmap_update_bits_check
);
2438 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2439 * register map asynchronously and report if
2442 * @map: Register map to update
2443 * @reg: Register to update
2444 * @mask: Bitmask to change
2445 * @val: New value for bitmask
2446 * @change: Boolean indicating if a write was done
2448 * With most buses the read must be done synchronously so this is most
2449 * useful for devices with a cache which do not need to interact with
2450 * the hardware to determine the current register value.
2452 * Returns zero for success, a negative number on error.
2454 int regmap_update_bits_check_async(struct regmap
*map
, unsigned int reg
,
2455 unsigned int mask
, unsigned int val
,
2460 map
->lock(map
->lock_arg
);
2464 ret
= _regmap_update_bits(map
, reg
, mask
, val
, change
);
2468 map
->unlock(map
->lock_arg
);
2472 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async
);
2474 void regmap_async_complete_cb(struct regmap_async
*async
, int ret
)
2476 struct regmap
*map
= async
->map
;
2479 trace_regmap_async_io_complete(map
);
2481 spin_lock(&map
->async_lock
);
2482 list_move(&async
->list
, &map
->async_free
);
2483 wake
= list_empty(&map
->async_list
);
2486 map
->async_ret
= ret
;
2488 spin_unlock(&map
->async_lock
);
2491 wake_up(&map
->async_waitq
);
2493 EXPORT_SYMBOL_GPL(regmap_async_complete_cb
);
2495 static int regmap_async_is_done(struct regmap
*map
)
2497 unsigned long flags
;
2500 spin_lock_irqsave(&map
->async_lock
, flags
);
2501 ret
= list_empty(&map
->async_list
);
2502 spin_unlock_irqrestore(&map
->async_lock
, flags
);
2508 * regmap_async_complete: Ensure all asynchronous I/O has completed.
2510 * @map: Map to operate on.
2512 * Blocks until any pending asynchronous I/O has completed. Returns
2513 * an error code for any failed I/O operations.
2515 int regmap_async_complete(struct regmap
*map
)
2517 unsigned long flags
;
2520 /* Nothing to do with no async support */
2521 if (!map
->bus
|| !map
->bus
->async_write
)
2524 trace_regmap_async_complete_start(map
);
2526 wait_event(map
->async_waitq
, regmap_async_is_done(map
));
2528 spin_lock_irqsave(&map
->async_lock
, flags
);
2529 ret
= map
->async_ret
;
2531 spin_unlock_irqrestore(&map
->async_lock
, flags
);
2533 trace_regmap_async_complete_done(map
);
2537 EXPORT_SYMBOL_GPL(regmap_async_complete
);
2540 * regmap_register_patch: Register and apply register updates to be applied
2541 * on device initialistion
2543 * @map: Register map to apply updates to.
2544 * @regs: Values to update.
2545 * @num_regs: Number of entries in regs.
2547 * Register a set of register updates to be applied to the device
2548 * whenever the device registers are synchronised with the cache and
2549 * apply them immediately. Typically this is used to apply
2550 * corrections to be applied to the device defaults on startup, such
2551 * as the updates some vendors provide to undocumented registers.
2553 * The caller must ensure that this function cannot be called
2554 * concurrently with either itself or regcache_sync().
2556 int regmap_register_patch(struct regmap
*map
, const struct reg_default
*regs
,
2559 struct reg_default
*p
;
2563 if (WARN_ONCE(num_regs
<= 0, "invalid registers number (%d)\n",
2567 p
= krealloc(map
->patch
,
2568 sizeof(struct reg_default
) * (map
->patch_regs
+ num_regs
),
2571 memcpy(p
+ map
->patch_regs
, regs
, num_regs
* sizeof(*regs
));
2573 map
->patch_regs
+= num_regs
;
2578 map
->lock(map
->lock_arg
);
2580 bypass
= map
->cache_bypass
;
2582 map
->cache_bypass
= true;
2585 ret
= _regmap_multi_reg_write(map
, regs
, num_regs
);
2591 map
->cache_bypass
= bypass
;
2593 map
->unlock(map
->lock_arg
);
2595 regmap_async_complete(map
);
2599 EXPORT_SYMBOL_GPL(regmap_register_patch
);
2602 * regmap_get_val_bytes(): Report the size of a register value
2604 * Report the size of a register value, mainly intended to for use by
2605 * generic infrastructure built on top of regmap.
2607 int regmap_get_val_bytes(struct regmap
*map
)
2609 if (map
->format
.format_write
)
2612 return map
->format
.val_bytes
;
2614 EXPORT_SYMBOL_GPL(regmap_get_val_bytes
);
2616 int regmap_parse_val(struct regmap
*map
, const void *buf
,
2619 if (!map
->format
.parse_val
)
2622 *val
= map
->format
.parse_val(buf
);
2626 EXPORT_SYMBOL_GPL(regmap_parse_val
);
2628 static int __init
regmap_initcall(void)
2630 regmap_debugfs_initcall();
2634 postcore_initcall(regmap_initcall
);