2 * Register cache access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/bsearch.h>
14 #include <linux/device.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/sort.h>
22 static const struct regcache_ops
*cache_types
[] = {
24 #if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
30 static int regcache_hw_init(struct regmap
*map
)
35 unsigned int reg
, val
;
38 if (!map
->num_reg_defaults_raw
)
41 /* calculate the size of reg_defaults */
42 for (count
= 0, i
= 0; i
< map
->num_reg_defaults_raw
; i
++)
43 if (regmap_readable(map
, i
* map
->reg_stride
) &&
44 !regmap_volatile(map
, i
* map
->reg_stride
))
47 /* all registers are unreadable or volatile, so just bypass */
49 map
->cache_bypass
= true;
53 map
->num_reg_defaults
= count
;
54 map
->reg_defaults
= kmalloc_array(count
, sizeof(struct reg_default
),
56 if (!map
->reg_defaults
)
59 if (!map
->reg_defaults_raw
) {
60 bool cache_bypass
= map
->cache_bypass
;
61 dev_warn(map
->dev
, "No cache defaults, reading back from HW\n");
63 /* Bypass the cache access till data read from HW */
64 map
->cache_bypass
= true;
65 tmp_buf
= kmalloc(map
->cache_size_raw
, GFP_KERNEL
);
70 ret
= regmap_raw_read(map
, 0, tmp_buf
,
72 map
->cache_bypass
= cache_bypass
;
74 map
->reg_defaults_raw
= tmp_buf
;
81 /* fill the reg_defaults */
82 for (i
= 0, j
= 0; i
< map
->num_reg_defaults_raw
; i
++) {
83 reg
= i
* map
->reg_stride
;
85 if (!regmap_readable(map
, reg
))
88 if (regmap_volatile(map
, reg
))
91 if (map
->reg_defaults_raw
) {
92 val
= regcache_get_val(map
, map
->reg_defaults_raw
, i
);
94 bool cache_bypass
= map
->cache_bypass
;
96 map
->cache_bypass
= true;
97 ret
= regmap_read(map
, reg
, &val
);
98 map
->cache_bypass
= cache_bypass
;
100 dev_err(map
->dev
, "Failed to read %d: %d\n",
106 map
->reg_defaults
[j
].reg
= reg
;
107 map
->reg_defaults
[j
].def
= val
;
114 kfree(map
->reg_defaults
);
119 int regcache_init(struct regmap
*map
, const struct regmap_config
*config
)
125 if (map
->cache_type
== REGCACHE_NONE
) {
126 if (config
->reg_defaults
|| config
->num_reg_defaults_raw
)
128 "No cache used with register defaults set!\n");
130 map
->cache_bypass
= true;
134 if (config
->reg_defaults
&& !config
->num_reg_defaults
) {
136 "Register defaults are set without the number!\n");
140 for (i
= 0; i
< config
->num_reg_defaults
; i
++)
141 if (config
->reg_defaults
[i
].reg
% map
->reg_stride
)
144 for (i
= 0; i
< ARRAY_SIZE(cache_types
); i
++)
145 if (cache_types
[i
]->type
== map
->cache_type
)
148 if (i
== ARRAY_SIZE(cache_types
)) {
149 dev_err(map
->dev
, "Could not match compress type: %d\n",
154 map
->num_reg_defaults
= config
->num_reg_defaults
;
155 map
->num_reg_defaults_raw
= config
->num_reg_defaults_raw
;
156 map
->reg_defaults_raw
= config
->reg_defaults_raw
;
157 map
->cache_word_size
= DIV_ROUND_UP(config
->val_bits
, 8);
158 map
->cache_size_raw
= map
->cache_word_size
* config
->num_reg_defaults_raw
;
161 map
->cache_ops
= cache_types
[i
];
163 if (!map
->cache_ops
->read
||
164 !map
->cache_ops
->write
||
165 !map
->cache_ops
->name
)
168 /* We still need to ensure that the reg_defaults
169 * won't vanish from under us. We'll need to make
172 if (config
->reg_defaults
) {
173 tmp_buf
= kmemdup(config
->reg_defaults
, map
->num_reg_defaults
*
174 sizeof(struct reg_default
), GFP_KERNEL
);
177 map
->reg_defaults
= tmp_buf
;
178 } else if (map
->num_reg_defaults_raw
) {
179 /* Some devices such as PMICs don't have cache defaults,
180 * we cope with this by reading back the HW registers and
181 * crafting the cache defaults by hand.
183 ret
= regcache_hw_init(map
);
186 if (map
->cache_bypass
)
190 if (!map
->max_register
)
191 map
->max_register
= map
->num_reg_defaults_raw
;
193 if (map
->cache_ops
->init
) {
194 dev_dbg(map
->dev
, "Initializing %s cache\n",
195 map
->cache_ops
->name
);
196 ret
= map
->cache_ops
->init(map
);
203 kfree(map
->reg_defaults
);
205 kfree(map
->reg_defaults_raw
);
210 void regcache_exit(struct regmap
*map
)
212 if (map
->cache_type
== REGCACHE_NONE
)
215 BUG_ON(!map
->cache_ops
);
217 kfree(map
->reg_defaults
);
219 kfree(map
->reg_defaults_raw
);
221 if (map
->cache_ops
->exit
) {
222 dev_dbg(map
->dev
, "Destroying %s cache\n",
223 map
->cache_ops
->name
);
224 map
->cache_ops
->exit(map
);
229 * regcache_read - Fetch the value of a given register from the cache.
231 * @map: map to configure.
232 * @reg: The register index.
233 * @value: The value to be returned.
235 * Return a negative value on failure, 0 on success.
237 int regcache_read(struct regmap
*map
,
238 unsigned int reg
, unsigned int *value
)
242 if (map
->cache_type
== REGCACHE_NONE
)
245 BUG_ON(!map
->cache_ops
);
247 if (!regmap_volatile(map
, reg
)) {
248 ret
= map
->cache_ops
->read(map
, reg
, value
);
251 trace_regmap_reg_read_cache(map
, reg
, *value
);
260 * regcache_write - Set the value of a given register in the cache.
262 * @map: map to configure.
263 * @reg: The register index.
264 * @value: The new register value.
266 * Return a negative value on failure, 0 on success.
268 int regcache_write(struct regmap
*map
,
269 unsigned int reg
, unsigned int value
)
271 if (map
->cache_type
== REGCACHE_NONE
)
274 BUG_ON(!map
->cache_ops
);
276 if (!regmap_volatile(map
, reg
))
277 return map
->cache_ops
->write(map
, reg
, value
);
282 static bool regcache_reg_needs_sync(struct regmap
*map
, unsigned int reg
,
287 /* If we don't know the chip just got reset, then sync everything. */
288 if (!map
->no_sync_defaults
)
291 /* Is this the hardware default? If so skip. */
292 ret
= regcache_lookup_reg(map
, reg
);
293 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
)
298 static int regcache_default_sync(struct regmap
*map
, unsigned int min
,
303 for (reg
= min
; reg
<= max
; reg
+= map
->reg_stride
) {
307 if (regmap_volatile(map
, reg
) ||
308 !regmap_writeable(map
, reg
))
311 ret
= regcache_read(map
, reg
, &val
);
315 if (!regcache_reg_needs_sync(map
, reg
, val
))
318 map
->cache_bypass
= true;
319 ret
= _regmap_write(map
, reg
, val
);
320 map
->cache_bypass
= false;
322 dev_err(map
->dev
, "Unable to sync register %#x. %d\n",
326 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n", reg
, val
);
333 * regcache_sync - Sync the register cache with the hardware.
335 * @map: map to configure.
337 * Any registers that should not be synced should be marked as
338 * volatile. In general drivers can choose not to use the provided
339 * syncing functionality if they so require.
341 * Return a negative value on failure, 0 on success.
343 int regcache_sync(struct regmap
*map
)
350 BUG_ON(!map
->cache_ops
);
352 map
->lock(map
->lock_arg
);
353 /* Remember the initial bypass state */
354 bypass
= map
->cache_bypass
;
355 dev_dbg(map
->dev
, "Syncing %s cache\n",
356 map
->cache_ops
->name
);
357 name
= map
->cache_ops
->name
;
358 trace_regcache_sync(map
, name
, "start");
360 if (!map
->cache_dirty
)
365 /* Apply any patch first */
366 map
->cache_bypass
= true;
367 for (i
= 0; i
< map
->patch_regs
; i
++) {
368 ret
= _regmap_write(map
, map
->patch
[i
].reg
, map
->patch
[i
].def
);
370 dev_err(map
->dev
, "Failed to write %x = %x: %d\n",
371 map
->patch
[i
].reg
, map
->patch
[i
].def
, ret
);
375 map
->cache_bypass
= false;
377 if (map
->cache_ops
->sync
)
378 ret
= map
->cache_ops
->sync(map
, 0, map
->max_register
);
380 ret
= regcache_default_sync(map
, 0, map
->max_register
);
383 map
->cache_dirty
= false;
386 /* Restore the bypass state */
388 map
->cache_bypass
= bypass
;
389 map
->no_sync_defaults
= false;
390 map
->unlock(map
->lock_arg
);
392 regmap_async_complete(map
);
394 trace_regcache_sync(map
, name
, "stop");
398 EXPORT_SYMBOL_GPL(regcache_sync
);
401 * regcache_sync_region - Sync part of the register cache with the hardware.
404 * @min: first register to sync
405 * @max: last register to sync
407 * Write all non-default register values in the specified region to
410 * Return a negative value on failure, 0 on success.
412 int regcache_sync_region(struct regmap
*map
, unsigned int min
,
419 BUG_ON(!map
->cache_ops
);
421 map
->lock(map
->lock_arg
);
423 /* Remember the initial bypass state */
424 bypass
= map
->cache_bypass
;
426 name
= map
->cache_ops
->name
;
427 dev_dbg(map
->dev
, "Syncing %s cache from %d-%d\n", name
, min
, max
);
429 trace_regcache_sync(map
, name
, "start region");
431 if (!map
->cache_dirty
)
436 if (map
->cache_ops
->sync
)
437 ret
= map
->cache_ops
->sync(map
, min
, max
);
439 ret
= regcache_default_sync(map
, min
, max
);
442 /* Restore the bypass state */
443 map
->cache_bypass
= bypass
;
445 map
->no_sync_defaults
= false;
446 map
->unlock(map
->lock_arg
);
448 regmap_async_complete(map
);
450 trace_regcache_sync(map
, name
, "stop region");
454 EXPORT_SYMBOL_GPL(regcache_sync_region
);
457 * regcache_drop_region - Discard part of the register cache
459 * @map: map to operate on
460 * @min: first register to discard
461 * @max: last register to discard
463 * Discard part of the register cache.
465 * Return a negative value on failure, 0 on success.
467 int regcache_drop_region(struct regmap
*map
, unsigned int min
,
472 if (!map
->cache_ops
|| !map
->cache_ops
->drop
)
475 map
->lock(map
->lock_arg
);
477 trace_regcache_drop_region(map
, min
, max
);
479 ret
= map
->cache_ops
->drop(map
, min
, max
);
481 map
->unlock(map
->lock_arg
);
485 EXPORT_SYMBOL_GPL(regcache_drop_region
);
488 * regcache_cache_only - Put a register map into cache only mode
490 * @map: map to configure
491 * @enable: flag if changes should be written to the hardware
493 * When a register map is marked as cache only writes to the register
494 * map API will only update the register cache, they will not cause
495 * any hardware changes. This is useful for allowing portions of
496 * drivers to act as though the device were functioning as normal when
497 * it is disabled for power saving reasons.
499 void regcache_cache_only(struct regmap
*map
, bool enable
)
501 map
->lock(map
->lock_arg
);
502 WARN_ON(map
->cache_bypass
&& enable
);
503 map
->cache_only
= enable
;
504 trace_regmap_cache_only(map
, enable
);
505 map
->unlock(map
->lock_arg
);
507 EXPORT_SYMBOL_GPL(regcache_cache_only
);
510 * regcache_mark_dirty - Indicate that HW registers were reset to default values
514 * Inform regcache that the device has been powered down or reset, so that
515 * on resume, regcache_sync() knows to write out all non-default values
516 * stored in the cache.
518 * If this function is not called, regcache_sync() will assume that
519 * the hardware state still matches the cache state, modulo any writes that
520 * happened when cache_only was true.
522 void regcache_mark_dirty(struct regmap
*map
)
524 map
->lock(map
->lock_arg
);
525 map
->cache_dirty
= true;
526 map
->no_sync_defaults
= true;
527 map
->unlock(map
->lock_arg
);
529 EXPORT_SYMBOL_GPL(regcache_mark_dirty
);
532 * regcache_cache_bypass - Put a register map into cache bypass mode
534 * @map: map to configure
535 * @enable: flag if changes should not be written to the cache
537 * When a register map is marked with the cache bypass option, writes
538 * to the register map API will only update the hardware and not the
539 * the cache directly. This is useful when syncing the cache back to
542 void regcache_cache_bypass(struct regmap
*map
, bool enable
)
544 map
->lock(map
->lock_arg
);
545 WARN_ON(map
->cache_only
&& enable
);
546 map
->cache_bypass
= enable
;
547 trace_regmap_cache_bypass(map
, enable
);
548 map
->unlock(map
->lock_arg
);
550 EXPORT_SYMBOL_GPL(regcache_cache_bypass
);
552 bool regcache_set_val(struct regmap
*map
, void *base
, unsigned int idx
,
555 if (regcache_get_val(map
, base
, idx
) == val
)
558 /* Use device native format if possible */
559 if (map
->format
.format_val
) {
560 map
->format
.format_val(base
+ (map
->cache_word_size
* idx
),
565 switch (map
->cache_word_size
) {
598 unsigned int regcache_get_val(struct regmap
*map
, const void *base
,
604 /* Use device native format if possible */
605 if (map
->format
.parse_val
)
606 return map
->format
.parse_val(regcache_get_val_addr(map
, base
,
609 switch (map
->cache_word_size
) {
611 const u8
*cache
= base
;
616 const u16
*cache
= base
;
621 const u32
*cache
= base
;
627 const u64
*cache
= base
;
639 static int regcache_default_cmp(const void *a
, const void *b
)
641 const struct reg_default
*_a
= a
;
642 const struct reg_default
*_b
= b
;
644 return _a
->reg
- _b
->reg
;
647 int regcache_lookup_reg(struct regmap
*map
, unsigned int reg
)
649 struct reg_default key
;
650 struct reg_default
*r
;
655 r
= bsearch(&key
, map
->reg_defaults
, map
->num_reg_defaults
,
656 sizeof(struct reg_default
), regcache_default_cmp
);
659 return r
- map
->reg_defaults
;
664 static bool regcache_reg_present(unsigned long *cache_present
, unsigned int idx
)
669 return test_bit(idx
, cache_present
);
672 static int regcache_sync_block_single(struct regmap
*map
, void *block
,
673 unsigned long *cache_present
,
674 unsigned int block_base
,
675 unsigned int start
, unsigned int end
)
677 unsigned int i
, regtmp
, val
;
680 for (i
= start
; i
< end
; i
++) {
681 regtmp
= block_base
+ (i
* map
->reg_stride
);
683 if (!regcache_reg_present(cache_present
, i
) ||
684 !regmap_writeable(map
, regtmp
))
687 val
= regcache_get_val(map
, block
, i
);
688 if (!regcache_reg_needs_sync(map
, regtmp
, val
))
691 map
->cache_bypass
= true;
693 ret
= _regmap_write(map
, regtmp
, val
);
695 map
->cache_bypass
= false;
697 dev_err(map
->dev
, "Unable to sync register %#x. %d\n",
701 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
708 static int regcache_sync_block_raw_flush(struct regmap
*map
, const void **data
,
709 unsigned int base
, unsigned int cur
)
711 size_t val_bytes
= map
->format
.val_bytes
;
717 count
= (cur
- base
) / map
->reg_stride
;
719 dev_dbg(map
->dev
, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
720 count
* val_bytes
, count
, base
, cur
- map
->reg_stride
);
722 map
->cache_bypass
= true;
724 ret
= _regmap_raw_write(map
, base
, *data
, count
* val_bytes
);
726 dev_err(map
->dev
, "Unable to sync registers %#x-%#x. %d\n",
727 base
, cur
- map
->reg_stride
, ret
);
729 map
->cache_bypass
= false;
736 static int regcache_sync_block_raw(struct regmap
*map
, void *block
,
737 unsigned long *cache_present
,
738 unsigned int block_base
, unsigned int start
,
742 unsigned int regtmp
= 0;
743 unsigned int base
= 0;
744 const void *data
= NULL
;
747 for (i
= start
; i
< end
; i
++) {
748 regtmp
= block_base
+ (i
* map
->reg_stride
);
750 if (!regcache_reg_present(cache_present
, i
) ||
751 !regmap_writeable(map
, regtmp
)) {
752 ret
= regcache_sync_block_raw_flush(map
, &data
,
759 val
= regcache_get_val(map
, block
, i
);
760 if (!regcache_reg_needs_sync(map
, regtmp
, val
)) {
761 ret
= regcache_sync_block_raw_flush(map
, &data
,
769 data
= regcache_get_val_addr(map
, block
, i
);
774 return regcache_sync_block_raw_flush(map
, &data
, base
, regtmp
+
778 int regcache_sync_block(struct regmap
*map
, void *block
,
779 unsigned long *cache_present
,
780 unsigned int block_base
, unsigned int start
,
783 if (regmap_can_raw_write(map
) && !map
->use_single_write
)
784 return regcache_sync_block_raw(map
, block
, cache_present
,
785 block_base
, start
, end
);
787 return regcache_sync_block_single(map
, block
, cache_present
,
788 block_base
, start
, end
);