2 * soc-cache.c -- ASoC register cache helpers
4 * Copyright 2009 Wolfson Microelectronics PLC.
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/i2c.h>
15 #include <linux/spi/spi.h>
16 #include <sound/soc.h>
17 #include <linux/lzo.h>
18 #include <linux/bitmap.h>
19 #include <linux/rbtree.h>
20 #include <linux/export.h>
22 #include <trace/events/asoc.h>
24 static bool snd_soc_set_cache_val(void *base
, unsigned int idx
,
25 unsigned int val
, unsigned int word_size
)
30 if (cache
[idx
] == val
)
37 if (cache
[idx
] == val
)
48 static unsigned int snd_soc_get_cache_val(const void *base
, unsigned int idx
,
49 unsigned int word_size
)
56 const u8
*cache
= base
;
60 const u16
*cache
= base
;
70 struct snd_soc_rbtree_node
{
71 struct rb_node node
; /* the actual rbtree node holding this block */
72 unsigned int base_reg
; /* base register handled by this block */
73 unsigned int word_size
; /* number of bytes needed to represent the register index */
74 void *block
; /* block of adjacent registers */
75 unsigned int blklen
; /* number of registers available in the block */
76 } __attribute__ ((packed
));
78 struct snd_soc_rbtree_ctx
{
80 struct snd_soc_rbtree_node
*cached_rbnode
;
83 static inline void snd_soc_rbtree_get_base_top_reg(
84 struct snd_soc_rbtree_node
*rbnode
,
85 unsigned int *base
, unsigned int *top
)
87 *base
= rbnode
->base_reg
;
88 *top
= rbnode
->base_reg
+ rbnode
->blklen
- 1;
91 static unsigned int snd_soc_rbtree_get_register(
92 struct snd_soc_rbtree_node
*rbnode
, unsigned int idx
)
96 switch (rbnode
->word_size
) {
98 u8
*p
= rbnode
->block
;
103 u16
*p
= rbnode
->block
;
114 static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node
*rbnode
,
115 unsigned int idx
, unsigned int val
)
117 switch (rbnode
->word_size
) {
119 u8
*p
= rbnode
->block
;
124 u16
*p
= rbnode
->block
;
134 static struct snd_soc_rbtree_node
*snd_soc_rbtree_lookup(
135 struct rb_root
*root
, unsigned int reg
)
137 struct rb_node
*node
;
138 struct snd_soc_rbtree_node
*rbnode
;
139 unsigned int base_reg
, top_reg
;
141 node
= root
->rb_node
;
143 rbnode
= container_of(node
, struct snd_soc_rbtree_node
, node
);
144 snd_soc_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
145 if (reg
>= base_reg
&& reg
<= top_reg
)
147 else if (reg
> top_reg
)
148 node
= node
->rb_right
;
149 else if (reg
< base_reg
)
150 node
= node
->rb_left
;
156 static int snd_soc_rbtree_insert(struct rb_root
*root
,
157 struct snd_soc_rbtree_node
*rbnode
)
159 struct rb_node
**new, *parent
;
160 struct snd_soc_rbtree_node
*rbnode_tmp
;
161 unsigned int base_reg_tmp
, top_reg_tmp
;
162 unsigned int base_reg
;
165 new = &root
->rb_node
;
167 rbnode_tmp
= container_of(*new, struct snd_soc_rbtree_node
,
169 /* base and top registers of the current rbnode */
170 snd_soc_rbtree_get_base_top_reg(rbnode_tmp
, &base_reg_tmp
,
172 /* base register of the rbnode to be added */
173 base_reg
= rbnode
->base_reg
;
175 /* if this register has already been inserted, just return */
176 if (base_reg
>= base_reg_tmp
&&
177 base_reg
<= top_reg_tmp
)
179 else if (base_reg
> top_reg_tmp
)
180 new = &((*new)->rb_right
);
181 else if (base_reg
< base_reg_tmp
)
182 new = &((*new)->rb_left
);
185 /* insert the node into the rbtree */
186 rb_link_node(&rbnode
->node
, parent
, new);
187 rb_insert_color(&rbnode
->node
, root
);
192 static int snd_soc_rbtree_cache_sync(struct snd_soc_codec
*codec
)
194 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
195 struct rb_node
*node
;
196 struct snd_soc_rbtree_node
*rbnode
;
198 unsigned int val
, def
;
202 rbtree_ctx
= codec
->reg_cache
;
203 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
204 rbnode
= rb_entry(node
, struct snd_soc_rbtree_node
, node
);
205 for (i
= 0; i
< rbnode
->blklen
; ++i
) {
206 regtmp
= rbnode
->base_reg
+ i
;
207 val
= snd_soc_rbtree_get_register(rbnode
, i
);
208 def
= snd_soc_get_cache_val(codec
->reg_def_copy
, i
,
213 WARN_ON(!snd_soc_codec_writable_register(codec
, regtmp
));
215 codec
->cache_bypass
= 1;
216 ret
= snd_soc_write(codec
, regtmp
, val
);
217 codec
->cache_bypass
= 0;
220 dev_dbg(codec
->dev
, "Synced register %#x, value = %#x\n",
228 static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node
*rbnode
,
229 unsigned int pos
, unsigned int reg
,
234 blk
= krealloc(rbnode
->block
,
235 (rbnode
->blklen
+ 1) * rbnode
->word_size
, GFP_KERNEL
);
239 /* insert the register value in the correct place in the rbnode block */
240 memmove(blk
+ (pos
+ 1) * rbnode
->word_size
,
241 blk
+ pos
* rbnode
->word_size
,
242 (rbnode
->blklen
- pos
) * rbnode
->word_size
);
244 /* update the rbnode block, its size and the base register */
248 rbnode
->base_reg
= reg
;
250 snd_soc_rbtree_set_register(rbnode
, pos
, value
);
254 static int snd_soc_rbtree_cache_write(struct snd_soc_codec
*codec
,
255 unsigned int reg
, unsigned int value
)
257 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
258 struct snd_soc_rbtree_node
*rbnode
, *rbnode_tmp
;
259 struct rb_node
*node
;
261 unsigned int reg_tmp
;
262 unsigned int base_reg
, top_reg
;
267 rbtree_ctx
= codec
->reg_cache
;
268 /* look up the required register in the cached rbnode */
269 rbnode
= rbtree_ctx
->cached_rbnode
;
271 snd_soc_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
272 if (reg
>= base_reg
&& reg
<= top_reg
) {
273 reg_tmp
= reg
- base_reg
;
274 val
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
277 snd_soc_rbtree_set_register(rbnode
, reg_tmp
, value
);
281 /* if we can't locate it in the cached rbnode we'll have
282 * to traverse the rbtree looking for it.
284 rbnode
= snd_soc_rbtree_lookup(&rbtree_ctx
->root
, reg
);
286 reg_tmp
= reg
- rbnode
->base_reg
;
287 val
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
290 snd_soc_rbtree_set_register(rbnode
, reg_tmp
, value
);
291 rbtree_ctx
->cached_rbnode
= rbnode
;
293 /* bail out early, no need to create the rbnode yet */
296 /* look for an adjacent register to the one we are about to add */
297 for (node
= rb_first(&rbtree_ctx
->root
); node
;
298 node
= rb_next(node
)) {
299 rbnode_tmp
= rb_entry(node
, struct snd_soc_rbtree_node
, node
);
300 for (i
= 0; i
< rbnode_tmp
->blklen
; ++i
) {
301 reg_tmp
= rbnode_tmp
->base_reg
+ i
;
302 if (abs(reg_tmp
- reg
) != 1)
304 /* decide where in the block to place our register */
305 if (reg_tmp
+ 1 == reg
)
309 ret
= snd_soc_rbtree_insert_to_block(rbnode_tmp
, pos
,
313 rbtree_ctx
->cached_rbnode
= rbnode_tmp
;
317 /* we did not manage to find a place to insert it in an existing
318 * block so create a new rbnode with a single register in its block.
319 * This block will get populated further if any other adjacent
320 * registers get modified in the future.
322 rbnode
= kzalloc(sizeof *rbnode
, GFP_KERNEL
);
326 rbnode
->base_reg
= reg
;
327 rbnode
->word_size
= codec
->driver
->reg_word_size
;
328 rbnode
->block
= kmalloc(rbnode
->blklen
* rbnode
->word_size
,
330 if (!rbnode
->block
) {
334 snd_soc_rbtree_set_register(rbnode
, 0, value
);
335 snd_soc_rbtree_insert(&rbtree_ctx
->root
, rbnode
);
336 rbtree_ctx
->cached_rbnode
= rbnode
;
342 static int snd_soc_rbtree_cache_read(struct snd_soc_codec
*codec
,
343 unsigned int reg
, unsigned int *value
)
345 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
346 struct snd_soc_rbtree_node
*rbnode
;
347 unsigned int base_reg
, top_reg
;
348 unsigned int reg_tmp
;
350 rbtree_ctx
= codec
->reg_cache
;
351 /* look up the required register in the cached rbnode */
352 rbnode
= rbtree_ctx
->cached_rbnode
;
354 snd_soc_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
355 if (reg
>= base_reg
&& reg
<= top_reg
) {
356 reg_tmp
= reg
- base_reg
;
357 *value
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
361 /* if we can't locate it in the cached rbnode we'll have
362 * to traverse the rbtree looking for it.
364 rbnode
= snd_soc_rbtree_lookup(&rbtree_ctx
->root
, reg
);
366 reg_tmp
= reg
- rbnode
->base_reg
;
367 *value
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
368 rbtree_ctx
->cached_rbnode
= rbnode
;
370 /* uninitialized registers default to 0 */
377 static int snd_soc_rbtree_cache_exit(struct snd_soc_codec
*codec
)
379 struct rb_node
*next
;
380 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
381 struct snd_soc_rbtree_node
*rbtree_node
;
383 /* if we've already been called then just return */
384 rbtree_ctx
= codec
->reg_cache
;
388 /* free up the rbtree */
389 next
= rb_first(&rbtree_ctx
->root
);
391 rbtree_node
= rb_entry(next
, struct snd_soc_rbtree_node
, node
);
392 next
= rb_next(&rbtree_node
->node
);
393 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
394 kfree(rbtree_node
->block
);
398 /* release the resources */
399 kfree(codec
->reg_cache
);
400 codec
->reg_cache
= NULL
;
405 static int snd_soc_rbtree_cache_init(struct snd_soc_codec
*codec
)
407 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
408 unsigned int word_size
;
413 codec
->reg_cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
414 if (!codec
->reg_cache
)
417 rbtree_ctx
= codec
->reg_cache
;
418 rbtree_ctx
->root
= RB_ROOT
;
419 rbtree_ctx
->cached_rbnode
= NULL
;
421 if (!codec
->reg_def_copy
)
424 word_size
= codec
->driver
->reg_word_size
;
425 for (i
= 0; i
< codec
->driver
->reg_cache_size
; ++i
) {
426 val
= snd_soc_get_cache_val(codec
->reg_def_copy
, i
,
430 ret
= snd_soc_rbtree_cache_write(codec
, i
, val
);
438 snd_soc_cache_exit(codec
);
442 #ifdef CONFIG_SND_SOC_CACHE_LZO
443 struct snd_soc_lzo_ctx
{
449 size_t decompressed_size
;
450 unsigned long *sync_bmp
;
454 #define LZO_BLOCK_NUM 8
455 static int snd_soc_lzo_block_count(void)
457 return LZO_BLOCK_NUM
;
460 static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx
*lzo_ctx
)
462 lzo_ctx
->wmem
= kmalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
468 static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx
*lzo_ctx
)
470 size_t compress_size
;
473 ret
= lzo1x_1_compress(lzo_ctx
->src
, lzo_ctx
->src_len
,
474 lzo_ctx
->dst
, &compress_size
, lzo_ctx
->wmem
);
475 if (ret
!= LZO_E_OK
|| compress_size
> lzo_ctx
->dst_len
)
477 lzo_ctx
->dst_len
= compress_size
;
481 static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx
*lzo_ctx
)
486 dst_len
= lzo_ctx
->dst_len
;
487 ret
= lzo1x_decompress_safe(lzo_ctx
->src
, lzo_ctx
->src_len
,
488 lzo_ctx
->dst
, &dst_len
);
489 if (ret
!= LZO_E_OK
|| dst_len
!= lzo_ctx
->dst_len
)
494 static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec
*codec
,
495 struct snd_soc_lzo_ctx
*lzo_ctx
)
499 lzo_ctx
->dst_len
= lzo1x_worst_compress(PAGE_SIZE
);
500 lzo_ctx
->dst
= kmalloc(lzo_ctx
->dst_len
, GFP_KERNEL
);
502 lzo_ctx
->dst_len
= 0;
506 ret
= snd_soc_lzo_compress(lzo_ctx
);
512 static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec
*codec
,
513 struct snd_soc_lzo_ctx
*lzo_ctx
)
517 lzo_ctx
->dst_len
= lzo_ctx
->decompressed_size
;
518 lzo_ctx
->dst
= kmalloc(lzo_ctx
->dst_len
, GFP_KERNEL
);
520 lzo_ctx
->dst_len
= 0;
524 ret
= snd_soc_lzo_decompress(lzo_ctx
);
530 static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec
*codec
,
533 const struct snd_soc_codec_driver
*codec_drv
;
535 codec_drv
= codec
->driver
;
536 return (reg
* codec_drv
->reg_word_size
) /
537 DIV_ROUND_UP(codec
->reg_size
, snd_soc_lzo_block_count());
540 static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec
*codec
,
543 const struct snd_soc_codec_driver
*codec_drv
;
545 codec_drv
= codec
->driver
;
546 return reg
% (DIV_ROUND_UP(codec
->reg_size
, snd_soc_lzo_block_count()) /
547 codec_drv
->reg_word_size
);
550 static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec
*codec
)
552 return DIV_ROUND_UP(codec
->reg_size
, snd_soc_lzo_block_count());
555 static int snd_soc_lzo_cache_sync(struct snd_soc_codec
*codec
)
557 struct snd_soc_lzo_ctx
**lzo_blocks
;
562 lzo_blocks
= codec
->reg_cache
;
563 for_each_set_bit(i
, lzo_blocks
[0]->sync_bmp
, lzo_blocks
[0]->sync_bmp_nbits
) {
564 WARN_ON(!snd_soc_codec_writable_register(codec
, i
));
565 ret
= snd_soc_cache_read(codec
, i
, &val
);
568 codec
->cache_bypass
= 1;
569 ret
= snd_soc_write(codec
, i
, val
);
570 codec
->cache_bypass
= 0;
573 dev_dbg(codec
->dev
, "Synced register %#x, value = %#x\n",
580 static int snd_soc_lzo_cache_write(struct snd_soc_codec
*codec
,
581 unsigned int reg
, unsigned int value
)
583 struct snd_soc_lzo_ctx
*lzo_block
, **lzo_blocks
;
584 int ret
, blkindex
, blkpos
;
585 size_t blksize
, tmp_dst_len
;
588 /* index of the compressed lzo block */
589 blkindex
= snd_soc_lzo_get_blkindex(codec
, reg
);
590 /* register index within the decompressed block */
591 blkpos
= snd_soc_lzo_get_blkpos(codec
, reg
);
592 /* size of the compressed block */
593 blksize
= snd_soc_lzo_get_blksize(codec
);
594 lzo_blocks
= codec
->reg_cache
;
595 lzo_block
= lzo_blocks
[blkindex
];
597 /* save the pointer and length of the compressed block */
598 tmp_dst
= lzo_block
->dst
;
599 tmp_dst_len
= lzo_block
->dst_len
;
601 /* prepare the source to be the compressed block */
602 lzo_block
->src
= lzo_block
->dst
;
603 lzo_block
->src_len
= lzo_block
->dst_len
;
605 /* decompress the block */
606 ret
= snd_soc_lzo_decompress_cache_block(codec
, lzo_block
);
608 kfree(lzo_block
->dst
);
612 /* write the new value to the cache */
613 if (snd_soc_set_cache_val(lzo_block
->dst
, blkpos
, value
,
614 codec
->driver
->reg_word_size
)) {
615 kfree(lzo_block
->dst
);
619 /* prepare the source to be the decompressed block */
620 lzo_block
->src
= lzo_block
->dst
;
621 lzo_block
->src_len
= lzo_block
->dst_len
;
623 /* compress the block */
624 ret
= snd_soc_lzo_compress_cache_block(codec
, lzo_block
);
626 kfree(lzo_block
->dst
);
627 kfree(lzo_block
->src
);
631 /* set the bit so we know we have to sync this register */
632 set_bit(reg
, lzo_block
->sync_bmp
);
634 kfree(lzo_block
->src
);
637 lzo_block
->dst
= tmp_dst
;
638 lzo_block
->dst_len
= tmp_dst_len
;
642 static int snd_soc_lzo_cache_read(struct snd_soc_codec
*codec
,
643 unsigned int reg
, unsigned int *value
)
645 struct snd_soc_lzo_ctx
*lzo_block
, **lzo_blocks
;
646 int ret
, blkindex
, blkpos
;
647 size_t blksize
, tmp_dst_len
;
651 /* index of the compressed lzo block */
652 blkindex
= snd_soc_lzo_get_blkindex(codec
, reg
);
653 /* register index within the decompressed block */
654 blkpos
= snd_soc_lzo_get_blkpos(codec
, reg
);
655 /* size of the compressed block */
656 blksize
= snd_soc_lzo_get_blksize(codec
);
657 lzo_blocks
= codec
->reg_cache
;
658 lzo_block
= lzo_blocks
[blkindex
];
660 /* save the pointer and length of the compressed block */
661 tmp_dst
= lzo_block
->dst
;
662 tmp_dst_len
= lzo_block
->dst_len
;
664 /* prepare the source to be the compressed block */
665 lzo_block
->src
= lzo_block
->dst
;
666 lzo_block
->src_len
= lzo_block
->dst_len
;
668 /* decompress the block */
669 ret
= snd_soc_lzo_decompress_cache_block(codec
, lzo_block
);
671 /* fetch the value from the cache */
672 *value
= snd_soc_get_cache_val(lzo_block
->dst
, blkpos
,
673 codec
->driver
->reg_word_size
);
675 kfree(lzo_block
->dst
);
676 /* restore the pointer and length of the compressed block */
677 lzo_block
->dst
= tmp_dst
;
678 lzo_block
->dst_len
= tmp_dst_len
;
682 static int snd_soc_lzo_cache_exit(struct snd_soc_codec
*codec
)
684 struct snd_soc_lzo_ctx
**lzo_blocks
;
687 lzo_blocks
= codec
->reg_cache
;
691 blkcount
= snd_soc_lzo_block_count();
693 * the pointer to the bitmap used for syncing the cache
694 * is shared amongst all lzo_blocks. Ensure it is freed
698 kfree(lzo_blocks
[0]->sync_bmp
);
699 for (i
= 0; i
< blkcount
; ++i
) {
701 kfree(lzo_blocks
[i
]->wmem
);
702 kfree(lzo_blocks
[i
]->dst
);
704 /* each lzo_block is a pointer returned by kmalloc or NULL */
705 kfree(lzo_blocks
[i
]);
708 codec
->reg_cache
= NULL
;
712 static int snd_soc_lzo_cache_init(struct snd_soc_codec
*codec
)
714 struct snd_soc_lzo_ctx
**lzo_blocks
;
716 const struct snd_soc_codec_driver
*codec_drv
;
717 int ret
, tofree
, i
, blksize
, blkcount
;
719 unsigned long *sync_bmp
;
722 codec_drv
= codec
->driver
;
725 * If we have not been given a default register cache
726 * then allocate a dummy zero-ed out region, compress it
727 * and remember to free it afterwards.
730 if (!codec
->reg_def_copy
)
733 if (!codec
->reg_def_copy
) {
734 codec
->reg_def_copy
= kzalloc(codec
->reg_size
, GFP_KERNEL
);
735 if (!codec
->reg_def_copy
)
739 blkcount
= snd_soc_lzo_block_count();
740 codec
->reg_cache
= kzalloc(blkcount
* sizeof *lzo_blocks
,
742 if (!codec
->reg_cache
) {
746 lzo_blocks
= codec
->reg_cache
;
749 * allocate a bitmap to be used when syncing the cache with
750 * the hardware. Each time a register is modified, the corresponding
751 * bit is set in the bitmap, so we know that we have to sync
754 bmp_size
= codec_drv
->reg_cache_size
;
755 sync_bmp
= kmalloc(BITS_TO_LONGS(bmp_size
) * sizeof(long),
761 bitmap_zero(sync_bmp
, bmp_size
);
763 /* allocate the lzo blocks and initialize them */
764 for (i
= 0; i
< blkcount
; ++i
) {
765 lzo_blocks
[i
] = kzalloc(sizeof **lzo_blocks
,
767 if (!lzo_blocks
[i
]) {
772 lzo_blocks
[i
]->sync_bmp
= sync_bmp
;
773 lzo_blocks
[i
]->sync_bmp_nbits
= bmp_size
;
774 /* alloc the working space for the compressed block */
775 ret
= snd_soc_lzo_prepare(lzo_blocks
[i
]);
780 blksize
= snd_soc_lzo_get_blksize(codec
);
781 p
= codec
->reg_def_copy
;
782 end
= codec
->reg_def_copy
+ codec
->reg_size
;
783 /* compress the register map and fill the lzo blocks */
784 for (i
= 0; i
< blkcount
; ++i
, p
+= blksize
) {
785 lzo_blocks
[i
]->src
= p
;
786 if (p
+ blksize
> end
)
787 lzo_blocks
[i
]->src_len
= end
- p
;
789 lzo_blocks
[i
]->src_len
= blksize
;
790 ret
= snd_soc_lzo_compress_cache_block(codec
,
794 lzo_blocks
[i
]->decompressed_size
=
795 lzo_blocks
[i
]->src_len
;
799 kfree(codec
->reg_def_copy
);
800 codec
->reg_def_copy
= NULL
;
804 snd_soc_cache_exit(codec
);
807 kfree(codec
->reg_def_copy
);
808 codec
->reg_def_copy
= NULL
;
814 static int snd_soc_flat_cache_sync(struct snd_soc_codec
*codec
)
818 const struct snd_soc_codec_driver
*codec_drv
;
821 codec_drv
= codec
->driver
;
822 for (i
= 0; i
< codec_drv
->reg_cache_size
; ++i
) {
823 ret
= snd_soc_cache_read(codec
, i
, &val
);
826 if (codec
->reg_def_copy
)
827 if (snd_soc_get_cache_val(codec
->reg_def_copy
,
828 i
, codec_drv
->reg_word_size
) == val
)
831 WARN_ON(!snd_soc_codec_writable_register(codec
, i
));
833 ret
= snd_soc_write(codec
, i
, val
);
836 dev_dbg(codec
->dev
, "Synced register %#x, value = %#x\n",
842 static int snd_soc_flat_cache_write(struct snd_soc_codec
*codec
,
843 unsigned int reg
, unsigned int value
)
845 snd_soc_set_cache_val(codec
->reg_cache
, reg
, value
,
846 codec
->driver
->reg_word_size
);
850 static int snd_soc_flat_cache_read(struct snd_soc_codec
*codec
,
851 unsigned int reg
, unsigned int *value
)
853 *value
= snd_soc_get_cache_val(codec
->reg_cache
, reg
,
854 codec
->driver
->reg_word_size
);
858 static int snd_soc_flat_cache_exit(struct snd_soc_codec
*codec
)
860 if (!codec
->reg_cache
)
862 kfree(codec
->reg_cache
);
863 codec
->reg_cache
= NULL
;
867 static int snd_soc_flat_cache_init(struct snd_soc_codec
*codec
)
869 if (codec
->reg_def_copy
)
870 codec
->reg_cache
= kmemdup(codec
->reg_def_copy
,
871 codec
->reg_size
, GFP_KERNEL
);
873 codec
->reg_cache
= kzalloc(codec
->reg_size
, GFP_KERNEL
);
874 if (!codec
->reg_cache
)
880 /* an array of all supported compression types */
881 static const struct snd_soc_cache_ops cache_types
[] = {
882 /* Flat *must* be the first entry for fallback */
884 .id
= SND_SOC_FLAT_COMPRESSION
,
886 .init
= snd_soc_flat_cache_init
,
887 .exit
= snd_soc_flat_cache_exit
,
888 .read
= snd_soc_flat_cache_read
,
889 .write
= snd_soc_flat_cache_write
,
890 .sync
= snd_soc_flat_cache_sync
892 #ifdef CONFIG_SND_SOC_CACHE_LZO
894 .id
= SND_SOC_LZO_COMPRESSION
,
896 .init
= snd_soc_lzo_cache_init
,
897 .exit
= snd_soc_lzo_cache_exit
,
898 .read
= snd_soc_lzo_cache_read
,
899 .write
= snd_soc_lzo_cache_write
,
900 .sync
= snd_soc_lzo_cache_sync
904 .id
= SND_SOC_RBTREE_COMPRESSION
,
906 .init
= snd_soc_rbtree_cache_init
,
907 .exit
= snd_soc_rbtree_cache_exit
,
908 .read
= snd_soc_rbtree_cache_read
,
909 .write
= snd_soc_rbtree_cache_write
,
910 .sync
= snd_soc_rbtree_cache_sync
914 int snd_soc_cache_init(struct snd_soc_codec
*codec
)
918 for (i
= 0; i
< ARRAY_SIZE(cache_types
); ++i
)
919 if (cache_types
[i
].id
== codec
->compress_type
)
922 /* Fall back to flat compression */
923 if (i
== ARRAY_SIZE(cache_types
)) {
924 dev_warn(codec
->dev
, "Could not match compress type: %d\n",
925 codec
->compress_type
);
929 mutex_init(&codec
->cache_rw_mutex
);
930 codec
->cache_ops
= &cache_types
[i
];
932 if (codec
->cache_ops
->init
) {
933 if (codec
->cache_ops
->name
)
934 dev_dbg(codec
->dev
, "Initializing %s cache for %s codec\n",
935 codec
->cache_ops
->name
, codec
->name
);
936 return codec
->cache_ops
->init(codec
);
942 * NOTE: keep in mind that this function might be called
945 int snd_soc_cache_exit(struct snd_soc_codec
*codec
)
947 if (codec
->cache_ops
&& codec
->cache_ops
->exit
) {
948 if (codec
->cache_ops
->name
)
949 dev_dbg(codec
->dev
, "Destroying %s cache for %s codec\n",
950 codec
->cache_ops
->name
, codec
->name
);
951 return codec
->cache_ops
->exit(codec
);
957 * snd_soc_cache_read: Fetch the value of a given register from the cache.
959 * @codec: CODEC to configure.
960 * @reg: The register index.
961 * @value: The value to be returned.
963 int snd_soc_cache_read(struct snd_soc_codec
*codec
,
964 unsigned int reg
, unsigned int *value
)
968 mutex_lock(&codec
->cache_rw_mutex
);
970 if (value
&& codec
->cache_ops
&& codec
->cache_ops
->read
) {
971 ret
= codec
->cache_ops
->read(codec
, reg
, value
);
972 mutex_unlock(&codec
->cache_rw_mutex
);
976 mutex_unlock(&codec
->cache_rw_mutex
);
979 EXPORT_SYMBOL_GPL(snd_soc_cache_read
);
982 * snd_soc_cache_write: Set the value of a given register in the cache.
984 * @codec: CODEC to configure.
985 * @reg: The register index.
986 * @value: The new register value.
988 int snd_soc_cache_write(struct snd_soc_codec
*codec
,
989 unsigned int reg
, unsigned int value
)
993 mutex_lock(&codec
->cache_rw_mutex
);
995 if (codec
->cache_ops
&& codec
->cache_ops
->write
) {
996 ret
= codec
->cache_ops
->write(codec
, reg
, value
);
997 mutex_unlock(&codec
->cache_rw_mutex
);
1001 mutex_unlock(&codec
->cache_rw_mutex
);
1004 EXPORT_SYMBOL_GPL(snd_soc_cache_write
);
1007 * snd_soc_cache_sync: Sync the register cache with the hardware.
1009 * @codec: CODEC to configure.
1011 * Any registers that should not be synced should be marked as
1012 * volatile. In general drivers can choose not to use the provided
1013 * syncing functionality if they so require.
1015 int snd_soc_cache_sync(struct snd_soc_codec
*codec
)
1020 if (!codec
->cache_sync
) {
1024 if (!codec
->cache_ops
|| !codec
->cache_ops
->sync
)
1027 if (codec
->cache_ops
->name
)
1028 name
= codec
->cache_ops
->name
;
1032 if (codec
->cache_ops
->name
)
1033 dev_dbg(codec
->dev
, "Syncing %s cache for %s codec\n",
1034 codec
->cache_ops
->name
, codec
->name
);
1035 trace_snd_soc_cache_sync(codec
, name
, "start");
1036 ret
= codec
->cache_ops
->sync(codec
);
1038 codec
->cache_sync
= 0;
1039 trace_snd_soc_cache_sync(codec
, name
, "end");
1042 EXPORT_SYMBOL_GPL(snd_soc_cache_sync
);
1044 static int snd_soc_get_reg_access_index(struct snd_soc_codec
*codec
,
1047 const struct snd_soc_codec_driver
*codec_drv
;
1048 unsigned int min
, max
, index
;
1050 codec_drv
= codec
->driver
;
1052 max
= codec_drv
->reg_access_size
- 1;
1054 index
= (min
+ max
) / 2;
1055 if (codec_drv
->reg_access_default
[index
].reg
== reg
)
1057 if (codec_drv
->reg_access_default
[index
].reg
< reg
)
1061 } while (min
<= max
);
1065 int snd_soc_default_volatile_register(struct snd_soc_codec
*codec
,
1070 if (reg
>= codec
->driver
->reg_cache_size
)
1072 index
= snd_soc_get_reg_access_index(codec
, reg
);
1075 return codec
->driver
->reg_access_default
[index
].vol
;
1077 EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register
);
1079 int snd_soc_default_readable_register(struct snd_soc_codec
*codec
,
1084 if (reg
>= codec
->driver
->reg_cache_size
)
1086 index
= snd_soc_get_reg_access_index(codec
, reg
);
1089 return codec
->driver
->reg_access_default
[index
].read
;
1091 EXPORT_SYMBOL_GPL(snd_soc_default_readable_register
);
1093 int snd_soc_default_writable_register(struct snd_soc_codec
*codec
,
1098 if (reg
>= codec
->driver
->reg_cache_size
)
1100 index
= snd_soc_get_reg_access_index(codec
, reg
);
1103 return codec
->driver
->reg_access_default
[index
].write
;
1105 EXPORT_SYMBOL_GPL(snd_soc_default_writable_register
);