2 * Register cache access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <trace/events/regmap.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
22 static const struct regcache_ops
*cache_types
[] = {
28 static int regcache_hw_init(struct regmap
*map
)
36 if (!map
->num_reg_defaults_raw
)
39 if (!map
->reg_defaults_raw
) {
40 u32 cache_bypass
= map
->cache_bypass
;
41 dev_warn(map
->dev
, "No cache defaults, reading back from HW\n");
43 /* Bypass the cache access till data read from HW*/
44 map
->cache_bypass
= 1;
45 tmp_buf
= kmalloc(map
->cache_size_raw
, GFP_KERNEL
);
48 ret
= regmap_raw_read(map
, 0, tmp_buf
,
49 map
->num_reg_defaults_raw
);
50 map
->cache_bypass
= cache_bypass
;
55 map
->reg_defaults_raw
= tmp_buf
;
59 /* calculate the size of reg_defaults */
60 for (count
= 0, i
= 0; i
< map
->num_reg_defaults_raw
; i
++) {
61 val
= regcache_get_val(map
, map
->reg_defaults_raw
, i
);
62 if (regmap_volatile(map
, i
* map
->reg_stride
))
67 map
->reg_defaults
= kmalloc(count
* sizeof(struct reg_default
),
69 if (!map
->reg_defaults
) {
74 /* fill the reg_defaults */
75 map
->num_reg_defaults
= count
;
76 for (i
= 0, j
= 0; i
< map
->num_reg_defaults_raw
; i
++) {
77 val
= regcache_get_val(map
, map
->reg_defaults_raw
, i
);
78 if (regmap_volatile(map
, i
* map
->reg_stride
))
80 map
->reg_defaults
[j
].reg
= i
* map
->reg_stride
;
81 map
->reg_defaults
[j
].def
= val
;
89 kfree(map
->reg_defaults_raw
);
94 int regcache_init(struct regmap
*map
, const struct regmap_config
*config
)
100 for (i
= 0; i
< config
->num_reg_defaults
; i
++)
101 if (config
->reg_defaults
[i
].reg
% map
->reg_stride
)
104 if (map
->cache_type
== REGCACHE_NONE
) {
105 map
->cache_bypass
= true;
109 for (i
= 0; i
< ARRAY_SIZE(cache_types
); i
++)
110 if (cache_types
[i
]->type
== map
->cache_type
)
113 if (i
== ARRAY_SIZE(cache_types
)) {
114 dev_err(map
->dev
, "Could not match compress type: %d\n",
119 map
->num_reg_defaults
= config
->num_reg_defaults
;
120 map
->num_reg_defaults_raw
= config
->num_reg_defaults_raw
;
121 map
->reg_defaults_raw
= config
->reg_defaults_raw
;
122 map
->cache_word_size
= DIV_ROUND_UP(config
->val_bits
, 8);
123 map
->cache_size_raw
= map
->cache_word_size
* config
->num_reg_defaults_raw
;
126 map
->cache_ops
= cache_types
[i
];
128 if (!map
->cache_ops
->read
||
129 !map
->cache_ops
->write
||
130 !map
->cache_ops
->name
)
133 /* We still need to ensure that the reg_defaults
134 * won't vanish from under us. We'll need to make
137 if (config
->reg_defaults
) {
138 if (!map
->num_reg_defaults
)
140 tmp_buf
= kmemdup(config
->reg_defaults
, map
->num_reg_defaults
*
141 sizeof(struct reg_default
), GFP_KERNEL
);
144 map
->reg_defaults
= tmp_buf
;
145 } else if (map
->num_reg_defaults_raw
) {
146 /* Some devices such as PMICs don't have cache defaults,
147 * we cope with this by reading back the HW registers and
148 * crafting the cache defaults by hand.
150 ret
= regcache_hw_init(map
);
155 if (!map
->max_register
)
156 map
->max_register
= map
->num_reg_defaults_raw
;
158 if (map
->cache_ops
->init
) {
159 dev_dbg(map
->dev
, "Initializing %s cache\n",
160 map
->cache_ops
->name
);
161 ret
= map
->cache_ops
->init(map
);
168 kfree(map
->reg_defaults
);
170 kfree(map
->reg_defaults_raw
);
175 void regcache_exit(struct regmap
*map
)
177 if (map
->cache_type
== REGCACHE_NONE
)
180 BUG_ON(!map
->cache_ops
);
182 kfree(map
->reg_defaults
);
184 kfree(map
->reg_defaults_raw
);
186 if (map
->cache_ops
->exit
) {
187 dev_dbg(map
->dev
, "Destroying %s cache\n",
188 map
->cache_ops
->name
);
189 map
->cache_ops
->exit(map
);
194 * regcache_read: Fetch the value of a given register from the cache.
196 * @map: map to configure.
197 * @reg: The register index.
198 * @value: The value to be returned.
200 * Return a negative value on failure, 0 on success.
202 int regcache_read(struct regmap
*map
,
203 unsigned int reg
, unsigned int *value
)
207 if (map
->cache_type
== REGCACHE_NONE
)
210 BUG_ON(!map
->cache_ops
);
212 if (!regmap_volatile(map
, reg
)) {
213 ret
= map
->cache_ops
->read(map
, reg
, value
);
216 trace_regmap_reg_read_cache(map
->dev
, reg
, *value
);
225 * regcache_write: Set the value of a given register in the cache.
227 * @map: map to configure.
228 * @reg: The register index.
229 * @value: The new register value.
231 * Return a negative value on failure, 0 on success.
233 int regcache_write(struct regmap
*map
,
234 unsigned int reg
, unsigned int value
)
236 if (map
->cache_type
== REGCACHE_NONE
)
239 BUG_ON(!map
->cache_ops
);
241 if (!regmap_volatile(map
, reg
))
242 return map
->cache_ops
->write(map
, reg
, value
);
247 static int regcache_default_sync(struct regmap
*map
, unsigned int min
,
252 for (reg
= min
; reg
<= max
; reg
++) {
256 if (regmap_volatile(map
, reg
))
259 ret
= regcache_read(map
, reg
, &val
);
263 /* Is this the hardware default? If so skip. */
264 ret
= regcache_lookup_reg(map
, reg
);
265 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
)
268 map
->cache_bypass
= 1;
269 ret
= _regmap_write(map
, reg
, val
);
270 map
->cache_bypass
= 0;
273 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n", reg
, val
);
280 * regcache_sync: Sync the register cache with the hardware.
282 * @map: map to configure.
284 * Any registers that should not be synced should be marked as
285 * volatile. In general drivers can choose not to use the provided
286 * syncing functionality if they so require.
288 * Return a negative value on failure, 0 on success.
290 int regcache_sync(struct regmap
*map
)
297 BUG_ON(!map
->cache_ops
);
299 map
->lock(map
->lock_arg
);
300 /* Remember the initial bypass state */
301 bypass
= map
->cache_bypass
;
302 dev_dbg(map
->dev
, "Syncing %s cache\n",
303 map
->cache_ops
->name
);
304 name
= map
->cache_ops
->name
;
305 trace_regcache_sync(map
->dev
, name
, "start");
307 if (!map
->cache_dirty
)
310 /* Apply any patch first */
311 map
->cache_bypass
= 1;
312 for (i
= 0; i
< map
->patch_regs
; i
++) {
313 if (map
->patch
[i
].reg
% map
->reg_stride
) {
317 ret
= _regmap_write(map
, map
->patch
[i
].reg
, map
->patch
[i
].def
);
319 dev_err(map
->dev
, "Failed to write %x = %x: %d\n",
320 map
->patch
[i
].reg
, map
->patch
[i
].def
, ret
);
324 map
->cache_bypass
= 0;
326 if (map
->cache_ops
->sync
)
327 ret
= map
->cache_ops
->sync(map
, 0, map
->max_register
);
329 ret
= regcache_default_sync(map
, 0, map
->max_register
);
332 map
->cache_dirty
= false;
335 trace_regcache_sync(map
->dev
, name
, "stop");
336 /* Restore the bypass state */
337 map
->cache_bypass
= bypass
;
338 map
->unlock(map
->lock_arg
);
342 EXPORT_SYMBOL_GPL(regcache_sync
);
345 * regcache_sync_region: Sync part of the register cache with the hardware.
348 * @min: first register to sync
349 * @max: last register to sync
351 * Write all non-default register values in the specified region to
354 * Return a negative value on failure, 0 on success.
356 int regcache_sync_region(struct regmap
*map
, unsigned int min
,
363 BUG_ON(!map
->cache_ops
);
365 map
->lock(map
->lock_arg
);
367 /* Remember the initial bypass state */
368 bypass
= map
->cache_bypass
;
370 name
= map
->cache_ops
->name
;
371 dev_dbg(map
->dev
, "Syncing %s cache from %d-%d\n", name
, min
, max
);
373 trace_regcache_sync(map
->dev
, name
, "start region");
375 if (!map
->cache_dirty
)
378 if (map
->cache_ops
->sync
)
379 ret
= map
->cache_ops
->sync(map
, min
, max
);
381 ret
= regcache_default_sync(map
, min
, max
);
384 trace_regcache_sync(map
->dev
, name
, "stop region");
385 /* Restore the bypass state */
386 map
->cache_bypass
= bypass
;
387 map
->unlock(map
->lock_arg
);
391 EXPORT_SYMBOL_GPL(regcache_sync_region
);
394 * regcache_drop_region: Discard part of the register cache
396 * @map: map to operate on
397 * @min: first register to discard
398 * @max: last register to discard
400 * Discard part of the register cache.
402 * Return a negative value on failure, 0 on success.
404 int regcache_drop_region(struct regmap
*map
, unsigned int min
,
409 if (!map
->cache_ops
|| !map
->cache_ops
->drop
)
412 map
->lock(map
->lock_arg
);
414 trace_regcache_drop_region(map
->dev
, min
, max
);
416 ret
= map
->cache_ops
->drop(map
, min
, max
);
418 map
->unlock(map
->lock_arg
);
422 EXPORT_SYMBOL_GPL(regcache_drop_region
);
425 * regcache_cache_only: Put a register map into cache only mode
427 * @map: map to configure
428 * @cache_only: flag if changes should be written to the hardware
430 * When a register map is marked as cache only writes to the register
431 * map API will only update the register cache, they will not cause
432 * any hardware changes. This is useful for allowing portions of
433 * drivers to act as though the device were functioning as normal when
434 * it is disabled for power saving reasons.
436 void regcache_cache_only(struct regmap
*map
, bool enable
)
438 map
->lock(map
->lock_arg
);
439 WARN_ON(map
->cache_bypass
&& enable
);
440 map
->cache_only
= enable
;
441 trace_regmap_cache_only(map
->dev
, enable
);
442 map
->unlock(map
->lock_arg
);
444 EXPORT_SYMBOL_GPL(regcache_cache_only
);
447 * regcache_mark_dirty: Mark the register cache as dirty
451 * Mark the register cache as dirty, for example due to the device
452 * having been powered down for suspend. If the cache is not marked
453 * as dirty then the cache sync will be suppressed.
455 void regcache_mark_dirty(struct regmap
*map
)
457 map
->lock(map
->lock_arg
);
458 map
->cache_dirty
= true;
459 map
->unlock(map
->lock_arg
);
461 EXPORT_SYMBOL_GPL(regcache_mark_dirty
);
464 * regcache_cache_bypass: Put a register map into cache bypass mode
466 * @map: map to configure
467 * @cache_bypass: flag if changes should not be written to the hardware
469 * When a register map is marked with the cache bypass option, writes
470 * to the register map API will only update the hardware and not the
471 * the cache directly. This is useful when syncing the cache back to
474 void regcache_cache_bypass(struct regmap
*map
, bool enable
)
476 map
->lock(map
->lock_arg
);
477 WARN_ON(map
->cache_only
&& enable
);
478 map
->cache_bypass
= enable
;
479 trace_regmap_cache_bypass(map
->dev
, enable
);
480 map
->unlock(map
->lock_arg
);
482 EXPORT_SYMBOL_GPL(regcache_cache_bypass
);
484 bool regcache_set_val(struct regmap
*map
, void *base
, unsigned int idx
,
487 if (regcache_get_val(map
, base
, idx
) == val
)
490 /* Use device native format if possible */
491 if (map
->format
.format_val
) {
492 map
->format
.format_val(base
+ (map
->cache_word_size
* idx
),
497 switch (map
->cache_word_size
) {
519 unsigned int regcache_get_val(struct regmap
*map
, const void *base
,
525 /* Use device native format if possible */
526 if (map
->format
.parse_val
)
527 return map
->format
.parse_val(regcache_get_val_addr(map
, base
,
530 switch (map
->cache_word_size
) {
532 const u8
*cache
= base
;
536 const u16
*cache
= base
;
540 const u32
*cache
= base
;
550 static int regcache_default_cmp(const void *a
, const void *b
)
552 const struct reg_default
*_a
= a
;
553 const struct reg_default
*_b
= b
;
555 return _a
->reg
- _b
->reg
;
558 int regcache_lookup_reg(struct regmap
*map
, unsigned int reg
)
560 struct reg_default key
;
561 struct reg_default
*r
;
566 r
= bsearch(&key
, map
->reg_defaults
, map
->num_reg_defaults
,
567 sizeof(struct reg_default
), regcache_default_cmp
);
570 return r
- map
->reg_defaults
;
575 static bool regcache_reg_present(unsigned long *cache_present
, unsigned int idx
)
580 return test_bit(idx
, cache_present
);
583 static int regcache_sync_block_single(struct regmap
*map
, void *block
,
584 unsigned long *cache_present
,
585 unsigned int block_base
,
586 unsigned int start
, unsigned int end
)
588 unsigned int i
, regtmp
, val
;
591 for (i
= start
; i
< end
; i
++) {
592 regtmp
= block_base
+ (i
* map
->reg_stride
);
594 if (!regcache_reg_present(cache_present
, i
))
597 val
= regcache_get_val(map
, block
, i
);
599 /* Is this the hardware default? If so skip. */
600 ret
= regcache_lookup_reg(map
, regtmp
);
601 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
)
604 map
->cache_bypass
= 1;
606 ret
= _regmap_write(map
, regtmp
, val
);
608 map
->cache_bypass
= 0;
611 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
618 static int regcache_sync_block_raw_flush(struct regmap
*map
, const void **data
,
619 unsigned int base
, unsigned int cur
)
621 size_t val_bytes
= map
->format
.val_bytes
;
629 dev_dbg(map
->dev
, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
630 count
* val_bytes
, count
, base
, cur
- 1);
632 map
->cache_bypass
= 1;
634 ret
= _regmap_raw_write(map
, base
, *data
, count
* val_bytes
,
637 map
->cache_bypass
= 0;
644 static int regcache_sync_block_raw(struct regmap
*map
, void *block
,
645 unsigned long *cache_present
,
646 unsigned int block_base
, unsigned int start
,
650 unsigned int regtmp
= 0;
651 unsigned int base
= 0;
652 const void *data
= NULL
;
655 for (i
= start
; i
< end
; i
++) {
656 regtmp
= block_base
+ (i
* map
->reg_stride
);
658 if (!regcache_reg_present(cache_present
, i
)) {
659 ret
= regcache_sync_block_raw_flush(map
, &data
,
666 val
= regcache_get_val(map
, block
, i
);
668 /* Is this the hardware default? If so skip. */
669 ret
= regcache_lookup_reg(map
, regtmp
);
670 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
) {
671 ret
= regcache_sync_block_raw_flush(map
, &data
,
679 data
= regcache_get_val_addr(map
, block
, i
);
684 return regcache_sync_block_raw_flush(map
, &data
, base
, regtmp
+
688 int regcache_sync_block(struct regmap
*map
, void *block
,
689 unsigned long *cache_present
,
690 unsigned int block_base
, unsigned int start
,
693 if (regmap_can_raw_write(map
))
694 return regcache_sync_block_raw(map
, block
, cache_present
,
695 block_base
, start
, end
);
697 return regcache_sync_block_single(map
, block
, cache_present
,
698 block_base
, start
, end
);