2 * Register map access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/rbtree.h>
19 #include <linux/sched.h>
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/regmap.h>
27 * Sometimes for failures during very early init the trace
28 * infrastructure isn't available early enough to be used. For this
29 * sort of problem defining LOG_DEVICE will add printks for basic
30 * register I/O on a specific device.
34 static int _regmap_update_bits(struct regmap
*map
, unsigned int reg
,
35 unsigned int mask
, unsigned int val
,
38 static int _regmap_bus_read(void *context
, unsigned int reg
,
40 static int _regmap_bus_formatted_write(void *context
, unsigned int reg
,
42 static int _regmap_bus_raw_write(void *context
, unsigned int reg
,
45 bool regmap_reg_in_ranges(unsigned int reg
,
46 const struct regmap_range
*ranges
,
49 const struct regmap_range
*r
;
52 for (i
= 0, r
= ranges
; i
< nranges
; i
++, r
++)
53 if (regmap_reg_in_range(reg
, r
))
57 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges
);
59 bool regmap_check_range_table(struct regmap
*map
, unsigned int reg
,
60 const struct regmap_access_table
*table
)
62 /* Check "no ranges" first */
63 if (regmap_reg_in_ranges(reg
, table
->no_ranges
, table
->n_no_ranges
))
66 /* In case zero "yes ranges" are supplied, any reg is OK */
67 if (!table
->n_yes_ranges
)
70 return regmap_reg_in_ranges(reg
, table
->yes_ranges
,
73 EXPORT_SYMBOL_GPL(regmap_check_range_table
);
75 bool regmap_writeable(struct regmap
*map
, unsigned int reg
)
77 if (map
->max_register
&& reg
> map
->max_register
)
80 if (map
->writeable_reg
)
81 return map
->writeable_reg(map
->dev
, reg
);
84 return regmap_check_range_table(map
, reg
, map
->wr_table
);
89 bool regmap_readable(struct regmap
*map
, unsigned int reg
)
91 if (map
->max_register
&& reg
> map
->max_register
)
94 if (map
->format
.format_write
)
97 if (map
->readable_reg
)
98 return map
->readable_reg(map
->dev
, reg
);
101 return regmap_check_range_table(map
, reg
, map
->rd_table
);
106 bool regmap_volatile(struct regmap
*map
, unsigned int reg
)
108 if (!regmap_readable(map
, reg
))
111 if (map
->volatile_reg
)
112 return map
->volatile_reg(map
->dev
, reg
);
114 if (map
->volatile_table
)
115 return regmap_check_range_table(map
, reg
, map
->volatile_table
);
123 bool regmap_precious(struct regmap
*map
, unsigned int reg
)
125 if (!regmap_readable(map
, reg
))
128 if (map
->precious_reg
)
129 return map
->precious_reg(map
->dev
, reg
);
131 if (map
->precious_table
)
132 return regmap_check_range_table(map
, reg
, map
->precious_table
);
137 static bool regmap_volatile_range(struct regmap
*map
, unsigned int reg
,
142 for (i
= 0; i
< num
; i
++)
143 if (!regmap_volatile(map
, reg
+ i
))
149 static void regmap_format_2_6_write(struct regmap
*map
,
150 unsigned int reg
, unsigned int val
)
152 u8
*out
= map
->work_buf
;
154 *out
= (reg
<< 6) | val
;
157 static void regmap_format_4_12_write(struct regmap
*map
,
158 unsigned int reg
, unsigned int val
)
160 __be16
*out
= map
->work_buf
;
161 *out
= cpu_to_be16((reg
<< 12) | val
);
164 static void regmap_format_7_9_write(struct regmap
*map
,
165 unsigned int reg
, unsigned int val
)
167 __be16
*out
= map
->work_buf
;
168 *out
= cpu_to_be16((reg
<< 9) | val
);
171 static void regmap_format_10_14_write(struct regmap
*map
,
172 unsigned int reg
, unsigned int val
)
174 u8
*out
= map
->work_buf
;
177 out
[1] = (val
>> 8) | (reg
<< 6);
181 static void regmap_format_8(void *buf
, unsigned int val
, unsigned int shift
)
188 static void regmap_format_16_be(void *buf
, unsigned int val
, unsigned int shift
)
192 b
[0] = cpu_to_be16(val
<< shift
);
195 static void regmap_format_16_native(void *buf
, unsigned int val
,
198 *(u16
*)buf
= val
<< shift
;
201 static void regmap_format_24(void *buf
, unsigned int val
, unsigned int shift
)
212 static void regmap_format_32_be(void *buf
, unsigned int val
, unsigned int shift
)
216 b
[0] = cpu_to_be32(val
<< shift
);
219 static void regmap_format_32_native(void *buf
, unsigned int val
,
222 *(u32
*)buf
= val
<< shift
;
225 static void regmap_parse_inplace_noop(void *buf
)
229 static unsigned int regmap_parse_8(const void *buf
)
236 static unsigned int regmap_parse_16_be(const void *buf
)
238 const __be16
*b
= buf
;
240 return be16_to_cpu(b
[0]);
243 static void regmap_parse_16_be_inplace(void *buf
)
247 b
[0] = be16_to_cpu(b
[0]);
250 static unsigned int regmap_parse_16_native(const void *buf
)
255 static unsigned int regmap_parse_24(const void *buf
)
258 unsigned int ret
= b
[2];
259 ret
|= ((unsigned int)b
[1]) << 8;
260 ret
|= ((unsigned int)b
[0]) << 16;
265 static unsigned int regmap_parse_32_be(const void *buf
)
267 const __be32
*b
= buf
;
269 return be32_to_cpu(b
[0]);
272 static void regmap_parse_32_be_inplace(void *buf
)
276 b
[0] = be32_to_cpu(b
[0]);
279 static unsigned int regmap_parse_32_native(const void *buf
)
284 static void regmap_lock_mutex(void *__map
)
286 struct regmap
*map
= __map
;
287 mutex_lock(&map
->mutex
);
290 static void regmap_unlock_mutex(void *__map
)
292 struct regmap
*map
= __map
;
293 mutex_unlock(&map
->mutex
);
296 static void regmap_lock_spinlock(void *__map
)
297 __acquires(&map
->spinlock
)
299 struct regmap
*map
= __map
;
302 spin_lock_irqsave(&map
->spinlock
, flags
);
303 map
->spinlock_flags
= flags
;
306 static void regmap_unlock_spinlock(void *__map
)
307 __releases(&map
->spinlock
)
309 struct regmap
*map
= __map
;
310 spin_unlock_irqrestore(&map
->spinlock
, map
->spinlock_flags
);
313 static void dev_get_regmap_release(struct device
*dev
, void *res
)
316 * We don't actually have anything to do here; the goal here
317 * is not to manage the regmap but to provide a simple way to
318 * get the regmap back given a struct device.
322 static bool _regmap_range_add(struct regmap
*map
,
323 struct regmap_range_node
*data
)
325 struct rb_root
*root
= &map
->range_tree
;
326 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
329 struct regmap_range_node
*this =
330 container_of(*new, struct regmap_range_node
, node
);
333 if (data
->range_max
< this->range_min
)
334 new = &((*new)->rb_left
);
335 else if (data
->range_min
> this->range_max
)
336 new = &((*new)->rb_right
);
341 rb_link_node(&data
->node
, parent
, new);
342 rb_insert_color(&data
->node
, root
);
347 static struct regmap_range_node
*_regmap_range_lookup(struct regmap
*map
,
350 struct rb_node
*node
= map
->range_tree
.rb_node
;
353 struct regmap_range_node
*this =
354 container_of(node
, struct regmap_range_node
, node
);
356 if (reg
< this->range_min
)
357 node
= node
->rb_left
;
358 else if (reg
> this->range_max
)
359 node
= node
->rb_right
;
367 static void regmap_range_exit(struct regmap
*map
)
369 struct rb_node
*next
;
370 struct regmap_range_node
*range_node
;
372 next
= rb_first(&map
->range_tree
);
374 range_node
= rb_entry(next
, struct regmap_range_node
, node
);
375 next
= rb_next(&range_node
->node
);
376 rb_erase(&range_node
->node
, &map
->range_tree
);
380 kfree(map
->selector_work_buf
);
384 * regmap_init(): Initialise register map
386 * @dev: Device that will be interacted with
387 * @bus: Bus-specific callbacks to use with device
388 * @bus_context: Data passed to bus-specific callbacks
389 * @config: Configuration for register map
391 * The return value will be an ERR_PTR() on error or a valid pointer to
392 * a struct regmap. This function should generally not be called
393 * directly, it should be called by bus-specific init functions.
395 struct regmap
*regmap_init(struct device
*dev
,
396 const struct regmap_bus
*bus
,
398 const struct regmap_config
*config
)
400 struct regmap
*map
, **m
;
402 enum regmap_endian reg_endian
, val_endian
;
408 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
414 if (config
->lock
&& config
->unlock
) {
415 map
->lock
= config
->lock
;
416 map
->unlock
= config
->unlock
;
417 map
->lock_arg
= config
->lock_arg
;
419 if ((bus
&& bus
->fast_io
) ||
421 spin_lock_init(&map
->spinlock
);
422 map
->lock
= regmap_lock_spinlock
;
423 map
->unlock
= regmap_unlock_spinlock
;
425 mutex_init(&map
->mutex
);
426 map
->lock
= regmap_lock_mutex
;
427 map
->unlock
= regmap_unlock_mutex
;
431 map
->format
.reg_bytes
= DIV_ROUND_UP(config
->reg_bits
, 8);
432 map
->format
.pad_bytes
= config
->pad_bits
/ 8;
433 map
->format
.val_bytes
= DIV_ROUND_UP(config
->val_bits
, 8);
434 map
->format
.buf_size
= DIV_ROUND_UP(config
->reg_bits
+
435 config
->val_bits
+ config
->pad_bits
, 8);
436 map
->reg_shift
= config
->pad_bits
% 8;
437 if (config
->reg_stride
)
438 map
->reg_stride
= config
->reg_stride
;
441 map
->use_single_rw
= config
->use_single_rw
;
444 map
->bus_context
= bus_context
;
445 map
->max_register
= config
->max_register
;
446 map
->wr_table
= config
->wr_table
;
447 map
->rd_table
= config
->rd_table
;
448 map
->volatile_table
= config
->volatile_table
;
449 map
->precious_table
= config
->precious_table
;
450 map
->writeable_reg
= config
->writeable_reg
;
451 map
->readable_reg
= config
->readable_reg
;
452 map
->volatile_reg
= config
->volatile_reg
;
453 map
->precious_reg
= config
->precious_reg
;
454 map
->cache_type
= config
->cache_type
;
455 map
->name
= config
->name
;
457 spin_lock_init(&map
->async_lock
);
458 INIT_LIST_HEAD(&map
->async_list
);
459 INIT_LIST_HEAD(&map
->async_free
);
460 init_waitqueue_head(&map
->async_waitq
);
462 if (config
->read_flag_mask
|| config
->write_flag_mask
) {
463 map
->read_flag_mask
= config
->read_flag_mask
;
464 map
->write_flag_mask
= config
->write_flag_mask
;
466 map
->read_flag_mask
= bus
->read_flag_mask
;
470 map
->reg_read
= config
->reg_read
;
471 map
->reg_write
= config
->reg_write
;
473 map
->defer_caching
= false;
474 goto skip_format_initialization
;
476 map
->reg_read
= _regmap_bus_read
;
479 reg_endian
= config
->reg_format_endian
;
480 if (reg_endian
== REGMAP_ENDIAN_DEFAULT
)
481 reg_endian
= bus
->reg_format_endian_default
;
482 if (reg_endian
== REGMAP_ENDIAN_DEFAULT
)
483 reg_endian
= REGMAP_ENDIAN_BIG
;
485 val_endian
= config
->val_format_endian
;
486 if (val_endian
== REGMAP_ENDIAN_DEFAULT
)
487 val_endian
= bus
->val_format_endian_default
;
488 if (val_endian
== REGMAP_ENDIAN_DEFAULT
)
489 val_endian
= REGMAP_ENDIAN_BIG
;
491 switch (config
->reg_bits
+ map
->reg_shift
) {
493 switch (config
->val_bits
) {
495 map
->format
.format_write
= regmap_format_2_6_write
;
503 switch (config
->val_bits
) {
505 map
->format
.format_write
= regmap_format_4_12_write
;
513 switch (config
->val_bits
) {
515 map
->format
.format_write
= regmap_format_7_9_write
;
523 switch (config
->val_bits
) {
525 map
->format
.format_write
= regmap_format_10_14_write
;
533 map
->format
.format_reg
= regmap_format_8
;
537 switch (reg_endian
) {
538 case REGMAP_ENDIAN_BIG
:
539 map
->format
.format_reg
= regmap_format_16_be
;
541 case REGMAP_ENDIAN_NATIVE
:
542 map
->format
.format_reg
= regmap_format_16_native
;
550 if (reg_endian
!= REGMAP_ENDIAN_BIG
)
552 map
->format
.format_reg
= regmap_format_24
;
556 switch (reg_endian
) {
557 case REGMAP_ENDIAN_BIG
:
558 map
->format
.format_reg
= regmap_format_32_be
;
560 case REGMAP_ENDIAN_NATIVE
:
561 map
->format
.format_reg
= regmap_format_32_native
;
572 if (val_endian
== REGMAP_ENDIAN_NATIVE
)
573 map
->format
.parse_inplace
= regmap_parse_inplace_noop
;
575 switch (config
->val_bits
) {
577 map
->format
.format_val
= regmap_format_8
;
578 map
->format
.parse_val
= regmap_parse_8
;
579 map
->format
.parse_inplace
= regmap_parse_inplace_noop
;
582 switch (val_endian
) {
583 case REGMAP_ENDIAN_BIG
:
584 map
->format
.format_val
= regmap_format_16_be
;
585 map
->format
.parse_val
= regmap_parse_16_be
;
586 map
->format
.parse_inplace
= regmap_parse_16_be_inplace
;
588 case REGMAP_ENDIAN_NATIVE
:
589 map
->format
.format_val
= regmap_format_16_native
;
590 map
->format
.parse_val
= regmap_parse_16_native
;
597 if (val_endian
!= REGMAP_ENDIAN_BIG
)
599 map
->format
.format_val
= regmap_format_24
;
600 map
->format
.parse_val
= regmap_parse_24
;
603 switch (val_endian
) {
604 case REGMAP_ENDIAN_BIG
:
605 map
->format
.format_val
= regmap_format_32_be
;
606 map
->format
.parse_val
= regmap_parse_32_be
;
607 map
->format
.parse_inplace
= regmap_parse_32_be_inplace
;
609 case REGMAP_ENDIAN_NATIVE
:
610 map
->format
.format_val
= regmap_format_32_native
;
611 map
->format
.parse_val
= regmap_parse_32_native
;
619 if (map
->format
.format_write
) {
620 if ((reg_endian
!= REGMAP_ENDIAN_BIG
) ||
621 (val_endian
!= REGMAP_ENDIAN_BIG
))
623 map
->use_single_rw
= true;
626 if (!map
->format
.format_write
&&
627 !(map
->format
.format_reg
&& map
->format
.format_val
))
630 map
->work_buf
= kzalloc(map
->format
.buf_size
, GFP_KERNEL
);
631 if (map
->work_buf
== NULL
) {
636 if (map
->format
.format_write
) {
637 map
->defer_caching
= false;
638 map
->reg_write
= _regmap_bus_formatted_write
;
639 } else if (map
->format
.format_val
) {
640 map
->defer_caching
= true;
641 map
->reg_write
= _regmap_bus_raw_write
;
644 skip_format_initialization
:
646 map
->range_tree
= RB_ROOT
;
647 for (i
= 0; i
< config
->num_ranges
; i
++) {
648 const struct regmap_range_cfg
*range_cfg
= &config
->ranges
[i
];
649 struct regmap_range_node
*new;
652 if (range_cfg
->range_max
< range_cfg
->range_min
) {
653 dev_err(map
->dev
, "Invalid range %d: %d < %d\n", i
,
654 range_cfg
->range_max
, range_cfg
->range_min
);
658 if (range_cfg
->range_max
> map
->max_register
) {
659 dev_err(map
->dev
, "Invalid range %d: %d > %d\n", i
,
660 range_cfg
->range_max
, map
->max_register
);
664 if (range_cfg
->selector_reg
> map
->max_register
) {
666 "Invalid range %d: selector out of map\n", i
);
670 if (range_cfg
->window_len
== 0) {
671 dev_err(map
->dev
, "Invalid range %d: window_len 0\n",
676 /* Make sure, that this register range has no selector
677 or data window within its boundary */
678 for (j
= 0; j
< config
->num_ranges
; j
++) {
679 unsigned sel_reg
= config
->ranges
[j
].selector_reg
;
680 unsigned win_min
= config
->ranges
[j
].window_start
;
681 unsigned win_max
= win_min
+
682 config
->ranges
[j
].window_len
- 1;
684 /* Allow data window inside its own virtual range */
688 if (range_cfg
->range_min
<= sel_reg
&&
689 sel_reg
<= range_cfg
->range_max
) {
691 "Range %d: selector for %d in window\n",
696 if (!(win_max
< range_cfg
->range_min
||
697 win_min
> range_cfg
->range_max
)) {
699 "Range %d: window for %d in window\n",
705 new = kzalloc(sizeof(*new), GFP_KERNEL
);
712 new->name
= range_cfg
->name
;
713 new->range_min
= range_cfg
->range_min
;
714 new->range_max
= range_cfg
->range_max
;
715 new->selector_reg
= range_cfg
->selector_reg
;
716 new->selector_mask
= range_cfg
->selector_mask
;
717 new->selector_shift
= range_cfg
->selector_shift
;
718 new->window_start
= range_cfg
->window_start
;
719 new->window_len
= range_cfg
->window_len
;
721 if (_regmap_range_add(map
, new) == false) {
722 dev_err(map
->dev
, "Failed to add range %d\n", i
);
727 if (map
->selector_work_buf
== NULL
) {
728 map
->selector_work_buf
=
729 kzalloc(map
->format
.buf_size
, GFP_KERNEL
);
730 if (map
->selector_work_buf
== NULL
) {
737 regmap_debugfs_init(map
, config
->name
);
739 ret
= regcache_init(map
, config
);
743 /* Add a devres resource for dev_get_regmap() */
744 m
= devres_alloc(dev_get_regmap_release
, sizeof(*m
), GFP_KERNEL
);
755 regmap_debugfs_exit(map
);
758 regmap_range_exit(map
);
759 kfree(map
->work_buf
);
765 EXPORT_SYMBOL_GPL(regmap_init
);
767 static void devm_regmap_release(struct device
*dev
, void *res
)
769 regmap_exit(*(struct regmap
**)res
);
773 * devm_regmap_init(): Initialise managed register map
775 * @dev: Device that will be interacted with
776 * @bus: Bus-specific callbacks to use with device
777 * @bus_context: Data passed to bus-specific callbacks
778 * @config: Configuration for register map
780 * The return value will be an ERR_PTR() on error or a valid pointer
781 * to a struct regmap. This function should generally not be called
782 * directly, it should be called by bus-specific init functions. The
783 * map will be automatically freed by the device management code.
785 struct regmap
*devm_regmap_init(struct device
*dev
,
786 const struct regmap_bus
*bus
,
788 const struct regmap_config
*config
)
790 struct regmap
**ptr
, *regmap
;
792 ptr
= devres_alloc(devm_regmap_release
, sizeof(*ptr
), GFP_KERNEL
);
794 return ERR_PTR(-ENOMEM
);
796 regmap
= regmap_init(dev
, bus
, bus_context
, config
);
797 if (!IS_ERR(regmap
)) {
799 devres_add(dev
, ptr
);
806 EXPORT_SYMBOL_GPL(devm_regmap_init
);
808 static void regmap_field_init(struct regmap_field
*rm_field
,
809 struct regmap
*regmap
, struct reg_field reg_field
)
811 int field_bits
= reg_field
.msb
- reg_field
.lsb
+ 1;
812 rm_field
->regmap
= regmap
;
813 rm_field
->reg
= reg_field
.reg
;
814 rm_field
->shift
= reg_field
.lsb
;
815 rm_field
->mask
= ((BIT(field_bits
) - 1) << reg_field
.lsb
);
816 rm_field
->id_size
= reg_field
.id_size
;
817 rm_field
->id_offset
= reg_field
.id_offset
;
821 * devm_regmap_field_alloc(): Allocate and initialise a register field
824 * @dev: Device that will be interacted with
825 * @regmap: regmap bank in which this register field is located.
826 * @reg_field: Register field with in the bank.
828 * The return value will be an ERR_PTR() on error or a valid pointer
829 * to a struct regmap_field. The regmap_field will be automatically freed
830 * by the device management code.
832 struct regmap_field
*devm_regmap_field_alloc(struct device
*dev
,
833 struct regmap
*regmap
, struct reg_field reg_field
)
835 struct regmap_field
*rm_field
= devm_kzalloc(dev
,
836 sizeof(*rm_field
), GFP_KERNEL
);
838 return ERR_PTR(-ENOMEM
);
840 regmap_field_init(rm_field
, regmap
, reg_field
);
845 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc
);
848 * devm_regmap_field_free(): Free register field allocated using
849 * devm_regmap_field_alloc. Usally drivers need not call this function,
850 * as the memory allocated via devm will be freed as per device-driver
853 * @dev: Device that will be interacted with
854 * @field: regmap field which should be freed.
856 void devm_regmap_field_free(struct device
*dev
,
857 struct regmap_field
*field
)
859 devm_kfree(dev
, field
);
861 EXPORT_SYMBOL_GPL(devm_regmap_field_free
);
864 * regmap_field_alloc(): Allocate and initialise a register field
867 * @regmap: regmap bank in which this register field is located.
868 * @reg_field: Register field with in the bank.
870 * The return value will be an ERR_PTR() on error or a valid pointer
871 * to a struct regmap_field. The regmap_field should be freed by the
872 * user once its finished working with it using regmap_field_free().
874 struct regmap_field
*regmap_field_alloc(struct regmap
*regmap
,
875 struct reg_field reg_field
)
877 struct regmap_field
*rm_field
= kzalloc(sizeof(*rm_field
), GFP_KERNEL
);
880 return ERR_PTR(-ENOMEM
);
882 regmap_field_init(rm_field
, regmap
, reg_field
);
886 EXPORT_SYMBOL_GPL(regmap_field_alloc
);
889 * regmap_field_free(): Free register field allocated using regmap_field_alloc
891 * @field: regmap field which should be freed.
893 void regmap_field_free(struct regmap_field
*field
)
897 EXPORT_SYMBOL_GPL(regmap_field_free
);
900 * regmap_reinit_cache(): Reinitialise the current register cache
902 * @map: Register map to operate on.
903 * @config: New configuration. Only the cache data will be used.
905 * Discard any existing register cache for the map and initialize a
906 * new cache. This can be used to restore the cache to defaults or to
907 * update the cache configuration to reflect runtime discovery of the
910 * No explicit locking is done here, the user needs to ensure that
911 * this function will not race with other calls to regmap.
913 int regmap_reinit_cache(struct regmap
*map
, const struct regmap_config
*config
)
916 regmap_debugfs_exit(map
);
918 map
->max_register
= config
->max_register
;
919 map
->writeable_reg
= config
->writeable_reg
;
920 map
->readable_reg
= config
->readable_reg
;
921 map
->volatile_reg
= config
->volatile_reg
;
922 map
->precious_reg
= config
->precious_reg
;
923 map
->cache_type
= config
->cache_type
;
925 regmap_debugfs_init(map
, config
->name
);
927 map
->cache_bypass
= false;
928 map
->cache_only
= false;
930 return regcache_init(map
, config
);
932 EXPORT_SYMBOL_GPL(regmap_reinit_cache
);
935 * regmap_exit(): Free a previously allocated register map
937 void regmap_exit(struct regmap
*map
)
939 struct regmap_async
*async
;
942 regmap_debugfs_exit(map
);
943 regmap_range_exit(map
);
944 if (map
->bus
&& map
->bus
->free_context
)
945 map
->bus
->free_context(map
->bus_context
);
946 kfree(map
->work_buf
);
947 while (!list_empty(&map
->async_free
)) {
948 async
= list_first_entry_or_null(&map
->async_free
,
951 list_del(&async
->list
);
952 kfree(async
->work_buf
);
957 EXPORT_SYMBOL_GPL(regmap_exit
);
959 static int dev_get_regmap_match(struct device
*dev
, void *res
, void *data
)
961 struct regmap
**r
= res
;
967 /* If the user didn't specify a name match any */
969 return (*r
)->name
== data
;
975 * dev_get_regmap(): Obtain the regmap (if any) for a device
977 * @dev: Device to retrieve the map for
978 * @name: Optional name for the register map, usually NULL.
980 * Returns the regmap for the device if one is present, or NULL. If
981 * name is specified then it must match the name specified when
982 * registering the device, if it is NULL then the first regmap found
983 * will be used. Devices with multiple register maps are very rare,
984 * generic code should normally not need to specify a name.
986 struct regmap
*dev_get_regmap(struct device
*dev
, const char *name
)
988 struct regmap
**r
= devres_find(dev
, dev_get_regmap_release
,
989 dev_get_regmap_match
, (void *)name
);
995 EXPORT_SYMBOL_GPL(dev_get_regmap
);
997 static int _regmap_select_page(struct regmap
*map
, unsigned int *reg
,
998 struct regmap_range_node
*range
,
999 unsigned int val_num
)
1001 void *orig_work_buf
;
1002 unsigned int win_offset
;
1003 unsigned int win_page
;
1007 win_offset
= (*reg
- range
->range_min
) % range
->window_len
;
1008 win_page
= (*reg
- range
->range_min
) / range
->window_len
;
1011 /* Bulk write shouldn't cross range boundary */
1012 if (*reg
+ val_num
- 1 > range
->range_max
)
1015 /* ... or single page boundary */
1016 if (val_num
> range
->window_len
- win_offset
)
1020 /* It is possible to have selector register inside data window.
1021 In that case, selector register is located on every page and
1022 it needs no page switching, when accessed alone. */
1024 range
->window_start
+ win_offset
!= range
->selector_reg
) {
1025 /* Use separate work_buf during page switching */
1026 orig_work_buf
= map
->work_buf
;
1027 map
->work_buf
= map
->selector_work_buf
;
1029 ret
= _regmap_update_bits(map
, range
->selector_reg
,
1030 range
->selector_mask
,
1031 win_page
<< range
->selector_shift
,
1034 map
->work_buf
= orig_work_buf
;
1040 *reg
= range
->window_start
+ win_offset
;
1045 int _regmap_raw_write(struct regmap
*map
, unsigned int reg
,
1046 const void *val
, size_t val_len
)
1048 struct regmap_range_node
*range
;
1049 unsigned long flags
;
1050 u8
*u8
= map
->work_buf
;
1051 void *work_val
= map
->work_buf
+ map
->format
.reg_bytes
+
1052 map
->format
.pad_bytes
;
1054 int ret
= -ENOTSUPP
;
1060 /* Check for unwritable registers before we start */
1061 if (map
->writeable_reg
)
1062 for (i
= 0; i
< val_len
/ map
->format
.val_bytes
; i
++)
1063 if (!map
->writeable_reg(map
->dev
,
1064 reg
+ (i
* map
->reg_stride
)))
1067 if (!map
->cache_bypass
&& map
->format
.parse_val
) {
1069 int val_bytes
= map
->format
.val_bytes
;
1070 for (i
= 0; i
< val_len
/ val_bytes
; i
++) {
1071 ival
= map
->format
.parse_val(val
+ (i
* val_bytes
));
1072 ret
= regcache_write(map
, reg
+ (i
* map
->reg_stride
),
1076 "Error in caching of register: %x ret: %d\n",
1081 if (map
->cache_only
) {
1082 map
->cache_dirty
= true;
1087 range
= _regmap_range_lookup(map
, reg
);
1089 int val_num
= val_len
/ map
->format
.val_bytes
;
1090 int win_offset
= (reg
- range
->range_min
) % range
->window_len
;
1091 int win_residue
= range
->window_len
- win_offset
;
1093 /* If the write goes beyond the end of the window split it */
1094 while (val_num
> win_residue
) {
1095 dev_dbg(map
->dev
, "Writing window %d/%zu\n",
1096 win_residue
, val_len
/ map
->format
.val_bytes
);
1097 ret
= _regmap_raw_write(map
, reg
, val
, win_residue
*
1098 map
->format
.val_bytes
);
1103 val_num
-= win_residue
;
1104 val
+= win_residue
* map
->format
.val_bytes
;
1105 val_len
-= win_residue
* map
->format
.val_bytes
;
1107 win_offset
= (reg
- range
->range_min
) %
1109 win_residue
= range
->window_len
- win_offset
;
1112 ret
= _regmap_select_page(map
, ®
, range
, val_num
);
1117 map
->format
.format_reg(map
->work_buf
, reg
, map
->reg_shift
);
1119 u8
[0] |= map
->write_flag_mask
;
1122 * Essentially all I/O mechanisms will be faster with a single
1123 * buffer to write. Since register syncs often generate raw
1124 * writes of single registers optimise that case.
1126 if (val
!= work_val
&& val_len
== map
->format
.val_bytes
) {
1127 memcpy(work_val
, val
, map
->format
.val_bytes
);
1131 if (map
->async
&& map
->bus
->async_write
) {
1132 struct regmap_async
*async
;
1134 trace_regmap_async_write_start(map
->dev
, reg
, val_len
);
1136 spin_lock_irqsave(&map
->async_lock
, flags
);
1137 async
= list_first_entry_or_null(&map
->async_free
,
1138 struct regmap_async
,
1141 list_del(&async
->list
);
1142 spin_unlock_irqrestore(&map
->async_lock
, flags
);
1145 async
= map
->bus
->async_alloc();
1149 async
->work_buf
= kzalloc(map
->format
.buf_size
,
1150 GFP_KERNEL
| GFP_DMA
);
1151 if (!async
->work_buf
) {
1159 /* If the caller supplied the value we can use it safely. */
1160 memcpy(async
->work_buf
, map
->work_buf
, map
->format
.pad_bytes
+
1161 map
->format
.reg_bytes
+ map
->format
.val_bytes
);
1163 spin_lock_irqsave(&map
->async_lock
, flags
);
1164 list_add_tail(&async
->list
, &map
->async_list
);
1165 spin_unlock_irqrestore(&map
->async_lock
, flags
);
1167 if (val
!= work_val
)
1168 ret
= map
->bus
->async_write(map
->bus_context
,
1170 map
->format
.reg_bytes
+
1171 map
->format
.pad_bytes
,
1172 val
, val_len
, async
);
1174 ret
= map
->bus
->async_write(map
->bus_context
,
1176 map
->format
.reg_bytes
+
1177 map
->format
.pad_bytes
+
1178 val_len
, NULL
, 0, async
);
1181 dev_err(map
->dev
, "Failed to schedule write: %d\n",
1184 spin_lock_irqsave(&map
->async_lock
, flags
);
1185 list_move(&async
->list
, &map
->async_free
);
1186 spin_unlock_irqrestore(&map
->async_lock
, flags
);
1192 trace_regmap_hw_write_start(map
->dev
, reg
,
1193 val_len
/ map
->format
.val_bytes
);
1195 /* If we're doing a single register write we can probably just
1196 * send the work_buf directly, otherwise try to do a gather
1199 if (val
== work_val
)
1200 ret
= map
->bus
->write(map
->bus_context
, map
->work_buf
,
1201 map
->format
.reg_bytes
+
1202 map
->format
.pad_bytes
+
1204 else if (map
->bus
->gather_write
)
1205 ret
= map
->bus
->gather_write(map
->bus_context
, map
->work_buf
,
1206 map
->format
.reg_bytes
+
1207 map
->format
.pad_bytes
,
1210 /* If that didn't work fall back on linearising by hand. */
1211 if (ret
== -ENOTSUPP
) {
1212 len
= map
->format
.reg_bytes
+ map
->format
.pad_bytes
+ val_len
;
1213 buf
= kzalloc(len
, GFP_KERNEL
);
1217 memcpy(buf
, map
->work_buf
, map
->format
.reg_bytes
);
1218 memcpy(buf
+ map
->format
.reg_bytes
+ map
->format
.pad_bytes
,
1220 ret
= map
->bus
->write(map
->bus_context
, buf
, len
);
1225 trace_regmap_hw_write_done(map
->dev
, reg
,
1226 val_len
/ map
->format
.val_bytes
);
1232 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1234 * @map: Map to check.
1236 bool regmap_can_raw_write(struct regmap
*map
)
1238 return map
->bus
&& map
->format
.format_val
&& map
->format
.format_reg
;
1240 EXPORT_SYMBOL_GPL(regmap_can_raw_write
);
1242 static int _regmap_bus_formatted_write(void *context
, unsigned int reg
,
1246 struct regmap_range_node
*range
;
1247 struct regmap
*map
= context
;
1249 WARN_ON(!map
->bus
|| !map
->format
.format_write
);
1251 range
= _regmap_range_lookup(map
, reg
);
1253 ret
= _regmap_select_page(map
, ®
, range
, 1);
1258 map
->format
.format_write(map
, reg
, val
);
1260 trace_regmap_hw_write_start(map
->dev
, reg
, 1);
1262 ret
= map
->bus
->write(map
->bus_context
, map
->work_buf
,
1263 map
->format
.buf_size
);
1265 trace_regmap_hw_write_done(map
->dev
, reg
, 1);
1270 static int _regmap_bus_raw_write(void *context
, unsigned int reg
,
1273 struct regmap
*map
= context
;
1275 WARN_ON(!map
->bus
|| !map
->format
.format_val
);
1277 map
->format
.format_val(map
->work_buf
+ map
->format
.reg_bytes
1278 + map
->format
.pad_bytes
, val
, 0);
1279 return _regmap_raw_write(map
, reg
,
1281 map
->format
.reg_bytes
+
1282 map
->format
.pad_bytes
,
1283 map
->format
.val_bytes
);
1286 static inline void *_regmap_map_get_context(struct regmap
*map
)
1288 return (map
->bus
) ? map
: map
->bus_context
;
1291 int _regmap_write(struct regmap
*map
, unsigned int reg
,
1295 void *context
= _regmap_map_get_context(map
);
1297 if (!regmap_writeable(map
, reg
))
1300 if (!map
->cache_bypass
&& !map
->defer_caching
) {
1301 ret
= regcache_write(map
, reg
, val
);
1304 if (map
->cache_only
) {
1305 map
->cache_dirty
= true;
1311 if (strcmp(dev_name(map
->dev
), LOG_DEVICE
) == 0)
1312 dev_info(map
->dev
, "%x <= %x\n", reg
, val
);
1315 trace_regmap_reg_write(map
->dev
, reg
, val
);
1317 return map
->reg_write(context
, reg
, val
);
1321 * regmap_write(): Write a value to a single register
1323 * @map: Register map to write to
1324 * @reg: Register to write to
1325 * @val: Value to be written
1327 * A value of zero will be returned on success, a negative errno will
1328 * be returned in error cases.
1330 int regmap_write(struct regmap
*map
, unsigned int reg
, unsigned int val
)
1334 if (reg
% map
->reg_stride
)
1337 map
->lock(map
->lock_arg
);
1339 ret
= _regmap_write(map
, reg
, val
);
1341 map
->unlock(map
->lock_arg
);
1345 EXPORT_SYMBOL_GPL(regmap_write
);
1348 * regmap_write_async(): Write a value to a single register asynchronously
1350 * @map: Register map to write to
1351 * @reg: Register to write to
1352 * @val: Value to be written
1354 * A value of zero will be returned on success, a negative errno will
1355 * be returned in error cases.
1357 int regmap_write_async(struct regmap
*map
, unsigned int reg
, unsigned int val
)
1361 if (reg
% map
->reg_stride
)
1364 map
->lock(map
->lock_arg
);
1368 ret
= _regmap_write(map
, reg
, val
);
1372 map
->unlock(map
->lock_arg
);
1376 EXPORT_SYMBOL_GPL(regmap_write_async
);
1379 * regmap_raw_write(): Write raw values to one or more registers
1381 * @map: Register map to write to
1382 * @reg: Initial register to write to
1383 * @val: Block of data to be written, laid out for direct transmission to the
1385 * @val_len: Length of data pointed to by val.
1387 * This function is intended to be used for things like firmware
1388 * download where a large block of data needs to be transferred to the
1389 * device. No formatting will be done on the data provided.
1391 * A value of zero will be returned on success, a negative errno will
1392 * be returned in error cases.
1394 int regmap_raw_write(struct regmap
*map
, unsigned int reg
,
1395 const void *val
, size_t val_len
)
1399 if (!regmap_can_raw_write(map
))
1401 if (val_len
% map
->format
.val_bytes
)
1404 map
->lock(map
->lock_arg
);
1406 ret
= _regmap_raw_write(map
, reg
, val
, val_len
);
1408 map
->unlock(map
->lock_arg
);
1412 EXPORT_SYMBOL_GPL(regmap_raw_write
);
1415 * regmap_field_write(): Write a value to a single register field
1417 * @field: Register field to write to
1418 * @val: Value to be written
1420 * A value of zero will be returned on success, a negative errno will
1421 * be returned in error cases.
1423 int regmap_field_write(struct regmap_field
*field
, unsigned int val
)
1425 return regmap_update_bits(field
->regmap
, field
->reg
,
1426 field
->mask
, val
<< field
->shift
);
1428 EXPORT_SYMBOL_GPL(regmap_field_write
);
1431 * regmap_field_update_bits(): Perform a read/modify/write cycle
1432 * on the register field
1434 * @field: Register field to write to
1435 * @mask: Bitmask to change
1436 * @val: Value to be written
1438 * A value of zero will be returned on success, a negative errno will
1439 * be returned in error cases.
1441 int regmap_field_update_bits(struct regmap_field
*field
, unsigned int mask
, unsigned int val
)
1443 mask
= (mask
<< field
->shift
) & field
->mask
;
1445 return regmap_update_bits(field
->regmap
, field
->reg
,
1446 mask
, val
<< field
->shift
);
1448 EXPORT_SYMBOL_GPL(regmap_field_update_bits
);
1451 * regmap_fields_write(): Write a value to a single register field with port ID
1453 * @field: Register field to write to
1455 * @val: Value to be written
1457 * A value of zero will be returned on success, a negative errno will
1458 * be returned in error cases.
1460 int regmap_fields_write(struct regmap_field
*field
, unsigned int id
,
1463 if (id
>= field
->id_size
)
1466 return regmap_update_bits(field
->regmap
,
1467 field
->reg
+ (field
->id_offset
* id
),
1468 field
->mask
, val
<< field
->shift
);
1470 EXPORT_SYMBOL_GPL(regmap_fields_write
);
1473 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1474 * on the register field
1476 * @field: Register field to write to
1478 * @mask: Bitmask to change
1479 * @val: Value to be written
1481 * A value of zero will be returned on success, a negative errno will
1482 * be returned in error cases.
1484 int regmap_fields_update_bits(struct regmap_field
*field
, unsigned int id
,
1485 unsigned int mask
, unsigned int val
)
1487 if (id
>= field
->id_size
)
1490 mask
= (mask
<< field
->shift
) & field
->mask
;
1492 return regmap_update_bits(field
->regmap
,
1493 field
->reg
+ (field
->id_offset
* id
),
1494 mask
, val
<< field
->shift
);
1496 EXPORT_SYMBOL_GPL(regmap_fields_update_bits
);
1499 * regmap_bulk_write(): Write multiple registers to the device
1501 * @map: Register map to write to
1502 * @reg: First register to be write from
1503 * @val: Block of data to be written, in native register size for device
1504 * @val_count: Number of registers to write
1506 * This function is intended to be used for writing a large block of
1507 * data to the device either in single transfer or multiple transfer.
1509 * A value of zero will be returned on success, a negative errno will
1510 * be returned in error cases.
1512 int regmap_bulk_write(struct regmap
*map
, unsigned int reg
, const void *val
,
1516 size_t val_bytes
= map
->format
.val_bytes
;
1518 if (map
->bus
&& !map
->format
.parse_inplace
)
1520 if (reg
% map
->reg_stride
)
1523 map
->lock(map
->lock_arg
);
1525 * Some devices don't support bulk write, for
1526 * them we have a series of single write operations.
1528 if (!map
->bus
|| map
->use_single_rw
) {
1529 for (i
= 0; i
< val_count
; i
++) {
1532 switch (val_bytes
) {
1534 ival
= *(u8
*)(val
+ (i
* val_bytes
));
1537 ival
= *(u16
*)(val
+ (i
* val_bytes
));
1540 ival
= *(u32
*)(val
+ (i
* val_bytes
));
1544 ival
= *(u64
*)(val
+ (i
* val_bytes
));
1552 ret
= _regmap_write(map
, reg
+ (i
* map
->reg_stride
),
1560 wval
= kmemdup(val
, val_count
* val_bytes
, GFP_KERNEL
);
1563 dev_err(map
->dev
, "Error in memory allocation\n");
1566 for (i
= 0; i
< val_count
* val_bytes
; i
+= val_bytes
)
1567 map
->format
.parse_inplace(wval
+ i
);
1569 ret
= _regmap_raw_write(map
, reg
, wval
, val_bytes
* val_count
);
1574 map
->unlock(map
->lock_arg
);
1577 EXPORT_SYMBOL_GPL(regmap_bulk_write
);
1580 * regmap_multi_reg_write(): Write multiple registers to the device
1582 * where the set of register are supplied in any order
1584 * @map: Register map to write to
1585 * @regs: Array of structures containing register,value to be written
1586 * @num_regs: Number of registers to write
1588 * This function is intended to be used for writing a large block of data
1589 * atomically to the device in single transfer for those I2C client devices
1590 * that implement this alternative block write mode.
1592 * A value of zero will be returned on success, a negative errno will
1593 * be returned in error cases.
1595 int regmap_multi_reg_write(struct regmap
*map
, struct reg_default
*regs
,
1600 for (i
= 0; i
< num_regs
; i
++) {
1601 int reg
= regs
[i
].reg
;
1602 if (reg
% map
->reg_stride
)
1606 map
->lock(map
->lock_arg
);
1608 for (i
= 0; i
< num_regs
; i
++) {
1609 ret
= _regmap_write(map
, regs
[i
].reg
, regs
[i
].def
);
1614 map
->unlock(map
->lock_arg
);
1618 EXPORT_SYMBOL_GPL(regmap_multi_reg_write
);
1621 * regmap_raw_write_async(): Write raw values to one or more registers
1624 * @map: Register map to write to
1625 * @reg: Initial register to write to
1626 * @val: Block of data to be written, laid out for direct transmission to the
1627 * device. Must be valid until regmap_async_complete() is called.
1628 * @val_len: Length of data pointed to by val.
1630 * This function is intended to be used for things like firmware
1631 * download where a large block of data needs to be transferred to the
1632 * device. No formatting will be done on the data provided.
1634 * If supported by the underlying bus the write will be scheduled
1635 * asynchronously, helping maximise I/O speed on higher speed buses
1636 * like SPI. regmap_async_complete() can be called to ensure that all
1637 * asynchrnous writes have been completed.
1639 * A value of zero will be returned on success, a negative errno will
1640 * be returned in error cases.
1642 int regmap_raw_write_async(struct regmap
*map
, unsigned int reg
,
1643 const void *val
, size_t val_len
)
1647 if (val_len
% map
->format
.val_bytes
)
1649 if (reg
% map
->reg_stride
)
1652 map
->lock(map
->lock_arg
);
1656 ret
= _regmap_raw_write(map
, reg
, val
, val_len
);
1660 map
->unlock(map
->lock_arg
);
1664 EXPORT_SYMBOL_GPL(regmap_raw_write_async
);
1666 static int _regmap_raw_read(struct regmap
*map
, unsigned int reg
, void *val
,
1667 unsigned int val_len
)
1669 struct regmap_range_node
*range
;
1670 u8
*u8
= map
->work_buf
;
1675 range
= _regmap_range_lookup(map
, reg
);
1677 ret
= _regmap_select_page(map
, ®
, range
,
1678 val_len
/ map
->format
.val_bytes
);
1683 map
->format
.format_reg(map
->work_buf
, reg
, map
->reg_shift
);
1686 * Some buses or devices flag reads by setting the high bits in the
1687 * register addresss; since it's always the high bits for all
1688 * current formats we can do this here rather than in
1689 * formatting. This may break if we get interesting formats.
1691 u8
[0] |= map
->read_flag_mask
;
1693 trace_regmap_hw_read_start(map
->dev
, reg
,
1694 val_len
/ map
->format
.val_bytes
);
1696 ret
= map
->bus
->read(map
->bus_context
, map
->work_buf
,
1697 map
->format
.reg_bytes
+ map
->format
.pad_bytes
,
1700 trace_regmap_hw_read_done(map
->dev
, reg
,
1701 val_len
/ map
->format
.val_bytes
);
1706 static int _regmap_bus_read(void *context
, unsigned int reg
,
1710 struct regmap
*map
= context
;
1712 if (!map
->format
.parse_val
)
1715 ret
= _regmap_raw_read(map
, reg
, map
->work_buf
, map
->format
.val_bytes
);
1717 *val
= map
->format
.parse_val(map
->work_buf
);
1722 static int _regmap_read(struct regmap
*map
, unsigned int reg
,
1726 void *context
= _regmap_map_get_context(map
);
1728 WARN_ON(!map
->reg_read
);
1730 if (!map
->cache_bypass
) {
1731 ret
= regcache_read(map
, reg
, val
);
1736 if (map
->cache_only
)
1739 ret
= map
->reg_read(context
, reg
, val
);
1742 if (strcmp(dev_name(map
->dev
), LOG_DEVICE
) == 0)
1743 dev_info(map
->dev
, "%x => %x\n", reg
, *val
);
1746 trace_regmap_reg_read(map
->dev
, reg
, *val
);
1748 if (!map
->cache_bypass
)
1749 regcache_write(map
, reg
, *val
);
1756 * regmap_read(): Read a value from a single register
1758 * @map: Register map to read from
1759 * @reg: Register to be read from
1760 * @val: Pointer to store read value
1762 * A value of zero will be returned on success, a negative errno will
1763 * be returned in error cases.
1765 int regmap_read(struct regmap
*map
, unsigned int reg
, unsigned int *val
)
1769 if (reg
% map
->reg_stride
)
1772 map
->lock(map
->lock_arg
);
1774 ret
= _regmap_read(map
, reg
, val
);
1776 map
->unlock(map
->lock_arg
);
1780 EXPORT_SYMBOL_GPL(regmap_read
);
1783 * regmap_raw_read(): Read raw data from the device
1785 * @map: Register map to read from
1786 * @reg: First register to be read from
1787 * @val: Pointer to store read value
1788 * @val_len: Size of data to read
1790 * A value of zero will be returned on success, a negative errno will
1791 * be returned in error cases.
1793 int regmap_raw_read(struct regmap
*map
, unsigned int reg
, void *val
,
1796 size_t val_bytes
= map
->format
.val_bytes
;
1797 size_t val_count
= val_len
/ val_bytes
;
1803 if (val_len
% map
->format
.val_bytes
)
1805 if (reg
% map
->reg_stride
)
1808 map
->lock(map
->lock_arg
);
1810 if (regmap_volatile_range(map
, reg
, val_count
) || map
->cache_bypass
||
1811 map
->cache_type
== REGCACHE_NONE
) {
1812 /* Physical block read if there's no cache involved */
1813 ret
= _regmap_raw_read(map
, reg
, val
, val_len
);
1816 /* Otherwise go word by word for the cache; should be low
1817 * cost as we expect to hit the cache.
1819 for (i
= 0; i
< val_count
; i
++) {
1820 ret
= _regmap_read(map
, reg
+ (i
* map
->reg_stride
),
1825 map
->format
.format_val(val
+ (i
* val_bytes
), v
, 0);
1830 map
->unlock(map
->lock_arg
);
1834 EXPORT_SYMBOL_GPL(regmap_raw_read
);
1837 * regmap_field_read(): Read a value to a single register field
1839 * @field: Register field to read from
1840 * @val: Pointer to store read value
1842 * A value of zero will be returned on success, a negative errno will
1843 * be returned in error cases.
1845 int regmap_field_read(struct regmap_field
*field
, unsigned int *val
)
1848 unsigned int reg_val
;
1849 ret
= regmap_read(field
->regmap
, field
->reg
, ®_val
);
1853 reg_val
&= field
->mask
;
1854 reg_val
>>= field
->shift
;
1859 EXPORT_SYMBOL_GPL(regmap_field_read
);
1862 * regmap_fields_read(): Read a value to a single register field with port ID
1864 * @field: Register field to read from
1866 * @val: Pointer to store read value
1868 * A value of zero will be returned on success, a negative errno will
1869 * be returned in error cases.
1871 int regmap_fields_read(struct regmap_field
*field
, unsigned int id
,
1875 unsigned int reg_val
;
1877 if (id
>= field
->id_size
)
1880 ret
= regmap_read(field
->regmap
,
1881 field
->reg
+ (field
->id_offset
* id
),
1886 reg_val
&= field
->mask
;
1887 reg_val
>>= field
->shift
;
1892 EXPORT_SYMBOL_GPL(regmap_fields_read
);
1895 * regmap_bulk_read(): Read multiple registers from the device
1897 * @map: Register map to read from
1898 * @reg: First register to be read from
1899 * @val: Pointer to store read value, in native register size for device
1900 * @val_count: Number of registers to read
1902 * A value of zero will be returned on success, a negative errno will
1903 * be returned in error cases.
1905 int regmap_bulk_read(struct regmap
*map
, unsigned int reg
, void *val
,
1909 size_t val_bytes
= map
->format
.val_bytes
;
1910 bool vol
= regmap_volatile_range(map
, reg
, val_count
);
1912 if (reg
% map
->reg_stride
)
1915 if (map
->bus
&& map
->format
.parse_inplace
&& (vol
|| map
->cache_type
== REGCACHE_NONE
)) {
1917 * Some devices does not support bulk read, for
1918 * them we have a series of single read operations.
1920 if (map
->use_single_rw
) {
1921 for (i
= 0; i
< val_count
; i
++) {
1922 ret
= regmap_raw_read(map
,
1923 reg
+ (i
* map
->reg_stride
),
1924 val
+ (i
* val_bytes
),
1930 ret
= regmap_raw_read(map
, reg
, val
,
1931 val_bytes
* val_count
);
1936 for (i
= 0; i
< val_count
* val_bytes
; i
+= val_bytes
)
1937 map
->format
.parse_inplace(val
+ i
);
1939 for (i
= 0; i
< val_count
; i
++) {
1941 ret
= regmap_read(map
, reg
+ (i
* map
->reg_stride
),
1945 memcpy(val
+ (i
* val_bytes
), &ival
, val_bytes
);
1951 EXPORT_SYMBOL_GPL(regmap_bulk_read
);
1953 static int _regmap_update_bits(struct regmap
*map
, unsigned int reg
,
1954 unsigned int mask
, unsigned int val
,
1958 unsigned int tmp
, orig
;
1960 ret
= _regmap_read(map
, reg
, &orig
);
1968 ret
= _regmap_write(map
, reg
, tmp
);
1978 * regmap_update_bits: Perform a read/modify/write cycle on the register map
1980 * @map: Register map to update
1981 * @reg: Register to update
1982 * @mask: Bitmask to change
1983 * @val: New value for bitmask
1985 * Returns zero for success, a negative number on error.
1987 int regmap_update_bits(struct regmap
*map
, unsigned int reg
,
1988 unsigned int mask
, unsigned int val
)
1993 map
->lock(map
->lock_arg
);
1994 ret
= _regmap_update_bits(map
, reg
, mask
, val
, &change
);
1995 map
->unlock(map
->lock_arg
);
1999 EXPORT_SYMBOL_GPL(regmap_update_bits
);
2002 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2003 * map asynchronously
2005 * @map: Register map to update
2006 * @reg: Register to update
2007 * @mask: Bitmask to change
2008 * @val: New value for bitmask
2010 * With most buses the read must be done synchronously so this is most
2011 * useful for devices with a cache which do not need to interact with
2012 * the hardware to determine the current register value.
2014 * Returns zero for success, a negative number on error.
2016 int regmap_update_bits_async(struct regmap
*map
, unsigned int reg
,
2017 unsigned int mask
, unsigned int val
)
2022 map
->lock(map
->lock_arg
);
2026 ret
= _regmap_update_bits(map
, reg
, mask
, val
, &change
);
2030 map
->unlock(map
->lock_arg
);
2034 EXPORT_SYMBOL_GPL(regmap_update_bits_async
);
2037 * regmap_update_bits_check: Perform a read/modify/write cycle on the
2038 * register map and report if updated
2040 * @map: Register map to update
2041 * @reg: Register to update
2042 * @mask: Bitmask to change
2043 * @val: New value for bitmask
2044 * @change: Boolean indicating if a write was done
2046 * Returns zero for success, a negative number on error.
2048 int regmap_update_bits_check(struct regmap
*map
, unsigned int reg
,
2049 unsigned int mask
, unsigned int val
,
2054 map
->lock(map
->lock_arg
);
2055 ret
= _regmap_update_bits(map
, reg
, mask
, val
, change
);
2056 map
->unlock(map
->lock_arg
);
2059 EXPORT_SYMBOL_GPL(regmap_update_bits_check
);
2062 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2063 * register map asynchronously and report if
2066 * @map: Register map to update
2067 * @reg: Register to update
2068 * @mask: Bitmask to change
2069 * @val: New value for bitmask
2070 * @change: Boolean indicating if a write was done
2072 * With most buses the read must be done synchronously so this is most
2073 * useful for devices with a cache which do not need to interact with
2074 * the hardware to determine the current register value.
2076 * Returns zero for success, a negative number on error.
2078 int regmap_update_bits_check_async(struct regmap
*map
, unsigned int reg
,
2079 unsigned int mask
, unsigned int val
,
2084 map
->lock(map
->lock_arg
);
2088 ret
= _regmap_update_bits(map
, reg
, mask
, val
, change
);
2092 map
->unlock(map
->lock_arg
);
2096 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async
);
2098 void regmap_async_complete_cb(struct regmap_async
*async
, int ret
)
2100 struct regmap
*map
= async
->map
;
2103 trace_regmap_async_io_complete(map
->dev
);
2105 spin_lock(&map
->async_lock
);
2106 list_move(&async
->list
, &map
->async_free
);
2107 wake
= list_empty(&map
->async_list
);
2110 map
->async_ret
= ret
;
2112 spin_unlock(&map
->async_lock
);
2115 wake_up(&map
->async_waitq
);
2117 EXPORT_SYMBOL_GPL(regmap_async_complete_cb
);
2119 static int regmap_async_is_done(struct regmap
*map
)
2121 unsigned long flags
;
2124 spin_lock_irqsave(&map
->async_lock
, flags
);
2125 ret
= list_empty(&map
->async_list
);
2126 spin_unlock_irqrestore(&map
->async_lock
, flags
);
2132 * regmap_async_complete: Ensure all asynchronous I/O has completed.
2134 * @map: Map to operate on.
2136 * Blocks until any pending asynchronous I/O has completed. Returns
2137 * an error code for any failed I/O operations.
2139 int regmap_async_complete(struct regmap
*map
)
2141 unsigned long flags
;
2144 /* Nothing to do with no async support */
2145 if (!map
->bus
|| !map
->bus
->async_write
)
2148 trace_regmap_async_complete_start(map
->dev
);
2150 wait_event(map
->async_waitq
, regmap_async_is_done(map
));
2152 spin_lock_irqsave(&map
->async_lock
, flags
);
2153 ret
= map
->async_ret
;
2155 spin_unlock_irqrestore(&map
->async_lock
, flags
);
2157 trace_regmap_async_complete_done(map
->dev
);
2161 EXPORT_SYMBOL_GPL(regmap_async_complete
);
2164 * regmap_register_patch: Register and apply register updates to be applied
2165 * on device initialistion
2167 * @map: Register map to apply updates to.
2168 * @regs: Values to update.
2169 * @num_regs: Number of entries in regs.
2171 * Register a set of register updates to be applied to the device
2172 * whenever the device registers are synchronised with the cache and
2173 * apply them immediately. Typically this is used to apply
2174 * corrections to be applied to the device defaults on startup, such
2175 * as the updates some vendors provide to undocumented registers.
2177 int regmap_register_patch(struct regmap
*map
, const struct reg_default
*regs
,
2180 struct reg_default
*p
;
2184 if (WARN_ONCE(num_regs
<= 0, "invalid registers number (%d)\n",
2188 map
->lock(map
->lock_arg
);
2190 bypass
= map
->cache_bypass
;
2192 map
->cache_bypass
= true;
2195 /* Write out first; it's useful to apply even if we fail later. */
2196 for (i
= 0; i
< num_regs
; i
++) {
2197 ret
= _regmap_write(map
, regs
[i
].reg
, regs
[i
].def
);
2199 dev_err(map
->dev
, "Failed to write %x = %x: %d\n",
2200 regs
[i
].reg
, regs
[i
].def
, ret
);
2205 p
= krealloc(map
->patch
,
2206 sizeof(struct reg_default
) * (map
->patch_regs
+ num_regs
),
2209 memcpy(p
+ map
->patch_regs
, regs
, num_regs
* sizeof(*regs
));
2211 map
->patch_regs
+= num_regs
;
2218 map
->cache_bypass
= bypass
;
2220 map
->unlock(map
->lock_arg
);
2222 regmap_async_complete(map
);
2226 EXPORT_SYMBOL_GPL(regmap_register_patch
);
2229 * regmap_get_val_bytes(): Report the size of a register value
2231 * Report the size of a register value, mainly intended to for use by
2232 * generic infrastructure built on top of regmap.
2234 int regmap_get_val_bytes(struct regmap
*map
)
2236 if (map
->format
.format_write
)
2239 return map
->format
.val_bytes
;
2241 EXPORT_SYMBOL_GPL(regmap_get_val_bytes
);
2243 static int __init
regmap_initcall(void)
2245 regmap_debugfs_initcall();
2249 postcore_initcall(regmap_initcall
);