1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
18 static const struct regcache_ops
*cache_types
[] = {
24 static int regcache_hw_init(struct regmap
*map
)
29 unsigned int reg
, val
;
32 if (!map
->num_reg_defaults_raw
)
35 /* calculate the size of reg_defaults */
36 for (count
= 0, i
= 0; i
< map
->num_reg_defaults_raw
; i
++)
37 if (regmap_readable(map
, i
* map
->reg_stride
) &&
38 !regmap_volatile(map
, i
* map
->reg_stride
))
41 /* all registers are unreadable or volatile, so just bypass */
43 map
->cache_bypass
= true;
47 map
->num_reg_defaults
= count
;
48 map
->reg_defaults
= kmalloc_array(count
, sizeof(struct reg_default
),
50 if (!map
->reg_defaults
)
53 if (!map
->reg_defaults_raw
) {
54 bool cache_bypass
= map
->cache_bypass
;
55 dev_warn(map
->dev
, "No cache defaults, reading back from HW\n");
57 /* Bypass the cache access till data read from HW */
58 map
->cache_bypass
= true;
59 tmp_buf
= kmalloc(map
->cache_size_raw
, GFP_KERNEL
);
64 ret
= regmap_raw_read(map
, 0, tmp_buf
,
66 map
->cache_bypass
= cache_bypass
;
68 map
->reg_defaults_raw
= tmp_buf
;
69 map
->cache_free
= true;
75 /* fill the reg_defaults */
76 for (i
= 0, j
= 0; i
< map
->num_reg_defaults_raw
; i
++) {
77 reg
= i
* map
->reg_stride
;
79 if (!regmap_readable(map
, reg
))
82 if (regmap_volatile(map
, reg
))
85 if (map
->reg_defaults_raw
) {
86 val
= regcache_get_val(map
, map
->reg_defaults_raw
, i
);
88 bool cache_bypass
= map
->cache_bypass
;
90 map
->cache_bypass
= true;
91 ret
= regmap_read(map
, reg
, &val
);
92 map
->cache_bypass
= cache_bypass
;
94 dev_err(map
->dev
, "Failed to read %d: %d\n",
100 map
->reg_defaults
[j
].reg
= reg
;
101 map
->reg_defaults
[j
].def
= val
;
108 kfree(map
->reg_defaults
);
113 int regcache_init(struct regmap
*map
, const struct regmap_config
*config
)
119 if (map
->cache_type
== REGCACHE_NONE
) {
120 if (config
->reg_defaults
|| config
->num_reg_defaults_raw
)
122 "No cache used with register defaults set!\n");
124 map
->cache_bypass
= true;
128 if (config
->reg_defaults
&& !config
->num_reg_defaults
) {
130 "Register defaults are set without the number!\n");
134 if (config
->num_reg_defaults
&& !config
->reg_defaults
) {
136 "Register defaults number are set without the reg!\n");
140 for (i
= 0; i
< config
->num_reg_defaults
; i
++)
141 if (config
->reg_defaults
[i
].reg
% map
->reg_stride
)
144 for (i
= 0; i
< ARRAY_SIZE(cache_types
); i
++)
145 if (cache_types
[i
]->type
== map
->cache_type
)
148 if (i
== ARRAY_SIZE(cache_types
)) {
149 dev_err(map
->dev
, "Could not match cache type: %d\n",
154 map
->num_reg_defaults
= config
->num_reg_defaults
;
155 map
->num_reg_defaults_raw
= config
->num_reg_defaults_raw
;
156 map
->reg_defaults_raw
= config
->reg_defaults_raw
;
157 map
->cache_word_size
= DIV_ROUND_UP(config
->val_bits
, 8);
158 map
->cache_size_raw
= map
->cache_word_size
* config
->num_reg_defaults_raw
;
161 map
->cache_ops
= cache_types
[i
];
163 if (!map
->cache_ops
->read
||
164 !map
->cache_ops
->write
||
165 !map
->cache_ops
->name
)
168 /* We still need to ensure that the reg_defaults
169 * won't vanish from under us. We'll need to make
172 if (config
->reg_defaults
) {
173 tmp_buf
= kmemdup_array(config
->reg_defaults
, map
->num_reg_defaults
,
174 sizeof(*map
->reg_defaults
), GFP_KERNEL
);
177 map
->reg_defaults
= tmp_buf
;
178 } else if (map
->num_reg_defaults_raw
) {
179 /* Some devices such as PMICs don't have cache defaults,
180 * we cope with this by reading back the HW registers and
181 * crafting the cache defaults by hand.
183 ret
= regcache_hw_init(map
);
186 if (map
->cache_bypass
)
190 if (!map
->max_register_is_set
&& map
->num_reg_defaults_raw
) {
191 map
->max_register
= (map
->num_reg_defaults_raw
- 1) * map
->reg_stride
;
192 map
->max_register_is_set
= true;
195 if (map
->cache_ops
->init
) {
196 dev_dbg(map
->dev
, "Initializing %s cache\n",
197 map
->cache_ops
->name
);
198 map
->lock(map
->lock_arg
);
199 ret
= map
->cache_ops
->init(map
);
200 map
->unlock(map
->lock_arg
);
207 kfree(map
->reg_defaults
);
209 kfree(map
->reg_defaults_raw
);
214 void regcache_exit(struct regmap
*map
)
216 if (map
->cache_type
== REGCACHE_NONE
)
219 BUG_ON(!map
->cache_ops
);
221 kfree(map
->reg_defaults
);
223 kfree(map
->reg_defaults_raw
);
225 if (map
->cache_ops
->exit
) {
226 dev_dbg(map
->dev
, "Destroying %s cache\n",
227 map
->cache_ops
->name
);
228 map
->lock(map
->lock_arg
);
229 map
->cache_ops
->exit(map
);
230 map
->unlock(map
->lock_arg
);
235 * regcache_read - Fetch the value of a given register from the cache.
237 * @map: map to configure.
238 * @reg: The register index.
239 * @value: The value to be returned.
241 * Return a negative value on failure, 0 on success.
243 int regcache_read(struct regmap
*map
,
244 unsigned int reg
, unsigned int *value
)
248 if (map
->cache_type
== REGCACHE_NONE
)
251 BUG_ON(!map
->cache_ops
);
253 if (!regmap_volatile(map
, reg
)) {
254 ret
= map
->cache_ops
->read(map
, reg
, value
);
257 trace_regmap_reg_read_cache(map
, reg
, *value
);
266 * regcache_write - Set the value of a given register in the cache.
268 * @map: map to configure.
269 * @reg: The register index.
270 * @value: The new register value.
272 * Return a negative value on failure, 0 on success.
274 int regcache_write(struct regmap
*map
,
275 unsigned int reg
, unsigned int value
)
277 if (map
->cache_type
== REGCACHE_NONE
)
280 BUG_ON(!map
->cache_ops
);
282 if (!regmap_volatile(map
, reg
))
283 return map
->cache_ops
->write(map
, reg
, value
);
288 bool regcache_reg_needs_sync(struct regmap
*map
, unsigned int reg
,
293 if (!regmap_writeable(map
, reg
))
296 /* If we don't know the chip just got reset, then sync everything. */
297 if (!map
->no_sync_defaults
)
300 /* Is this the hardware default? If so skip. */
301 ret
= regcache_lookup_reg(map
, reg
);
302 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
)
307 static int regcache_default_sync(struct regmap
*map
, unsigned int min
,
312 for (reg
= min
; reg
<= max
; reg
+= map
->reg_stride
) {
316 if (regmap_volatile(map
, reg
) ||
317 !regmap_writeable(map
, reg
))
320 ret
= regcache_read(map
, reg
, &val
);
326 if (!regcache_reg_needs_sync(map
, reg
, val
))
329 map
->cache_bypass
= true;
330 ret
= _regmap_write(map
, reg
, val
);
331 map
->cache_bypass
= false;
333 dev_err(map
->dev
, "Unable to sync register %#x. %d\n",
337 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n", reg
, val
);
343 static int rbtree_all(const void *key
, const struct rb_node
*node
)
349 * regcache_sync - Sync the register cache with the hardware.
351 * @map: map to configure.
353 * Any registers that should not be synced should be marked as
354 * volatile. In general drivers can choose not to use the provided
355 * syncing functionality if they so require.
357 * Return a negative value on failure, 0 on success.
359 int regcache_sync(struct regmap
*map
)
365 struct rb_node
*node
;
367 if (WARN_ON(map
->cache_type
== REGCACHE_NONE
))
370 BUG_ON(!map
->cache_ops
);
372 map
->lock(map
->lock_arg
);
373 /* Remember the initial bypass state */
374 bypass
= map
->cache_bypass
;
375 dev_dbg(map
->dev
, "Syncing %s cache\n",
376 map
->cache_ops
->name
);
377 name
= map
->cache_ops
->name
;
378 trace_regcache_sync(map
, name
, "start");
380 if (!map
->cache_dirty
)
383 /* Apply any patch first */
384 map
->cache_bypass
= true;
385 for (i
= 0; i
< map
->patch_regs
; i
++) {
386 ret
= _regmap_write(map
, map
->patch
[i
].reg
, map
->patch
[i
].def
);
388 dev_err(map
->dev
, "Failed to write %x = %x: %d\n",
389 map
->patch
[i
].reg
, map
->patch
[i
].def
, ret
);
393 map
->cache_bypass
= false;
395 if (map
->cache_ops
->sync
)
396 ret
= map
->cache_ops
->sync(map
, 0, map
->max_register
);
398 ret
= regcache_default_sync(map
, 0, map
->max_register
);
401 map
->cache_dirty
= false;
404 /* Restore the bypass state */
405 map
->cache_bypass
= bypass
;
406 map
->no_sync_defaults
= false;
409 * If we did any paging with cache bypassed and a cached
410 * paging register then the register and cache state might
411 * have gone out of sync, force writes of all the paging
414 rb_for_each(node
, NULL
, &map
->range_tree
, rbtree_all
) {
415 struct regmap_range_node
*this =
416 rb_entry(node
, struct regmap_range_node
, node
);
418 /* If there's nothing in the cache there's nothing to sync */
419 if (regcache_read(map
, this->selector_reg
, &i
) != 0)
422 ret
= _regmap_write(map
, this->selector_reg
, i
);
424 dev_err(map
->dev
, "Failed to write %x = %x: %d\n",
425 this->selector_reg
, i
, ret
);
430 map
->unlock(map
->lock_arg
);
432 regmap_async_complete(map
);
434 trace_regcache_sync(map
, name
, "stop");
438 EXPORT_SYMBOL_GPL(regcache_sync
);
441 * regcache_sync_region - Sync part of the register cache with the hardware.
444 * @min: first register to sync
445 * @max: last register to sync
447 * Write all non-default register values in the specified region to
450 * Return a negative value on failure, 0 on success.
452 int regcache_sync_region(struct regmap
*map
, unsigned int min
,
459 if (WARN_ON(map
->cache_type
== REGCACHE_NONE
))
462 BUG_ON(!map
->cache_ops
);
464 map
->lock(map
->lock_arg
);
466 /* Remember the initial bypass state */
467 bypass
= map
->cache_bypass
;
469 name
= map
->cache_ops
->name
;
470 dev_dbg(map
->dev
, "Syncing %s cache from %d-%d\n", name
, min
, max
);
472 trace_regcache_sync(map
, name
, "start region");
474 if (!map
->cache_dirty
)
479 if (map
->cache_ops
->sync
)
480 ret
= map
->cache_ops
->sync(map
, min
, max
);
482 ret
= regcache_default_sync(map
, min
, max
);
485 /* Restore the bypass state */
486 map
->cache_bypass
= bypass
;
488 map
->no_sync_defaults
= false;
489 map
->unlock(map
->lock_arg
);
491 regmap_async_complete(map
);
493 trace_regcache_sync(map
, name
, "stop region");
497 EXPORT_SYMBOL_GPL(regcache_sync_region
);
500 * regcache_drop_region - Discard part of the register cache
502 * @map: map to operate on
503 * @min: first register to discard
504 * @max: last register to discard
506 * Discard part of the register cache.
508 * Return a negative value on failure, 0 on success.
510 int regcache_drop_region(struct regmap
*map
, unsigned int min
,
515 if (!map
->cache_ops
|| !map
->cache_ops
->drop
)
518 map
->lock(map
->lock_arg
);
520 trace_regcache_drop_region(map
, min
, max
);
522 ret
= map
->cache_ops
->drop(map
, min
, max
);
524 map
->unlock(map
->lock_arg
);
528 EXPORT_SYMBOL_GPL(regcache_drop_region
);
531 * regcache_cache_only - Put a register map into cache only mode
533 * @map: map to configure
534 * @enable: flag if changes should be written to the hardware
536 * When a register map is marked as cache only writes to the register
537 * map API will only update the register cache, they will not cause
538 * any hardware changes. This is useful for allowing portions of
539 * drivers to act as though the device were functioning as normal when
540 * it is disabled for power saving reasons.
542 void regcache_cache_only(struct regmap
*map
, bool enable
)
544 map
->lock(map
->lock_arg
);
545 WARN_ON(map
->cache_type
!= REGCACHE_NONE
&&
546 map
->cache_bypass
&& enable
);
547 map
->cache_only
= enable
;
548 trace_regmap_cache_only(map
, enable
);
549 map
->unlock(map
->lock_arg
);
551 EXPORT_SYMBOL_GPL(regcache_cache_only
);
554 * regcache_mark_dirty - Indicate that HW registers were reset to default values
558 * Inform regcache that the device has been powered down or reset, so that
559 * on resume, regcache_sync() knows to write out all non-default values
560 * stored in the cache.
562 * If this function is not called, regcache_sync() will assume that
563 * the hardware state still matches the cache state, modulo any writes that
564 * happened when cache_only was true.
566 void regcache_mark_dirty(struct regmap
*map
)
568 map
->lock(map
->lock_arg
);
569 map
->cache_dirty
= true;
570 map
->no_sync_defaults
= true;
571 map
->unlock(map
->lock_arg
);
573 EXPORT_SYMBOL_GPL(regcache_mark_dirty
);
576 * regcache_cache_bypass - Put a register map into cache bypass mode
578 * @map: map to configure
579 * @enable: flag if changes should not be written to the cache
581 * When a register map is marked with the cache bypass option, writes
582 * to the register map API will only update the hardware and not
583 * the cache directly. This is useful when syncing the cache back to
586 void regcache_cache_bypass(struct regmap
*map
, bool enable
)
588 map
->lock(map
->lock_arg
);
589 WARN_ON(map
->cache_only
&& enable
);
590 map
->cache_bypass
= enable
;
591 trace_regmap_cache_bypass(map
, enable
);
592 map
->unlock(map
->lock_arg
);
594 EXPORT_SYMBOL_GPL(regcache_cache_bypass
);
597 * regcache_reg_cached - Check if a register is cached
600 * @reg: register to check
602 * Reports if a register is cached.
604 bool regcache_reg_cached(struct regmap
*map
, unsigned int reg
)
609 map
->lock(map
->lock_arg
);
611 ret
= regcache_read(map
, reg
, &val
);
613 map
->unlock(map
->lock_arg
);
617 EXPORT_SYMBOL_GPL(regcache_reg_cached
);
619 void regcache_set_val(struct regmap
*map
, void *base
, unsigned int idx
,
622 /* Use device native format if possible */
623 if (map
->format
.format_val
) {
624 map
->format
.format_val(base
+ (map
->cache_word_size
* idx
),
629 switch (map
->cache_word_size
) {
653 unsigned int regcache_get_val(struct regmap
*map
, const void *base
,
659 /* Use device native format if possible */
660 if (map
->format
.parse_val
)
661 return map
->format
.parse_val(regcache_get_val_addr(map
, base
,
664 switch (map
->cache_word_size
) {
666 const u8
*cache
= base
;
671 const u16
*cache
= base
;
676 const u32
*cache
= base
;
687 static int regcache_default_cmp(const void *a
, const void *b
)
689 const struct reg_default
*_a
= a
;
690 const struct reg_default
*_b
= b
;
692 return _a
->reg
- _b
->reg
;
695 int regcache_lookup_reg(struct regmap
*map
, unsigned int reg
)
697 struct reg_default key
;
698 struct reg_default
*r
;
703 r
= bsearch(&key
, map
->reg_defaults
, map
->num_reg_defaults
,
704 sizeof(struct reg_default
), regcache_default_cmp
);
707 return r
- map
->reg_defaults
;
712 static bool regcache_reg_present(unsigned long *cache_present
, unsigned int idx
)
717 return test_bit(idx
, cache_present
);
720 int regcache_sync_val(struct regmap
*map
, unsigned int reg
, unsigned int val
)
724 if (!regcache_reg_needs_sync(map
, reg
, val
))
727 map
->cache_bypass
= true;
729 ret
= _regmap_write(map
, reg
, val
);
731 map
->cache_bypass
= false;
734 dev_err(map
->dev
, "Unable to sync register %#x. %d\n",
738 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
744 static int regcache_sync_block_single(struct regmap
*map
, void *block
,
745 unsigned long *cache_present
,
746 unsigned int block_base
,
747 unsigned int start
, unsigned int end
)
749 unsigned int i
, regtmp
, val
;
752 for (i
= start
; i
< end
; i
++) {
753 regtmp
= block_base
+ (i
* map
->reg_stride
);
755 if (!regcache_reg_present(cache_present
, i
) ||
756 !regmap_writeable(map
, regtmp
))
759 val
= regcache_get_val(map
, block
, i
);
760 ret
= regcache_sync_val(map
, regtmp
, val
);
768 static int regcache_sync_block_raw_flush(struct regmap
*map
, const void **data
,
769 unsigned int base
, unsigned int cur
)
771 size_t val_bytes
= map
->format
.val_bytes
;
777 count
= (cur
- base
) / map
->reg_stride
;
779 dev_dbg(map
->dev
, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
780 count
* val_bytes
, count
, base
, cur
- map
->reg_stride
);
782 map
->cache_bypass
= true;
784 ret
= _regmap_raw_write(map
, base
, *data
, count
* val_bytes
, false);
786 dev_err(map
->dev
, "Unable to sync registers %#x-%#x. %d\n",
787 base
, cur
- map
->reg_stride
, ret
);
789 map
->cache_bypass
= false;
796 static int regcache_sync_block_raw(struct regmap
*map
, void *block
,
797 unsigned long *cache_present
,
798 unsigned int block_base
, unsigned int start
,
802 unsigned int regtmp
= 0;
803 unsigned int base
= 0;
804 const void *data
= NULL
;
807 for (i
= start
; i
< end
; i
++) {
808 regtmp
= block_base
+ (i
* map
->reg_stride
);
810 if (!regcache_reg_present(cache_present
, i
) ||
811 !regmap_writeable(map
, regtmp
)) {
812 ret
= regcache_sync_block_raw_flush(map
, &data
,
819 val
= regcache_get_val(map
, block
, i
);
820 if (!regcache_reg_needs_sync(map
, regtmp
, val
)) {
821 ret
= regcache_sync_block_raw_flush(map
, &data
,
829 data
= regcache_get_val_addr(map
, block
, i
);
834 return regcache_sync_block_raw_flush(map
, &data
, base
, regtmp
+
838 int regcache_sync_block(struct regmap
*map
, void *block
,
839 unsigned long *cache_present
,
840 unsigned int block_base
, unsigned int start
,
843 if (regmap_can_raw_write(map
) && !map
->use_single_write
)
844 return regcache_sync_block_raw(map
, block
, cache_present
,
845 block_base
, start
, end
);
847 return regcache_sync_block_single(map
, block
, cache_present
,
848 block_base
, start
, end
);