2 * soc-cache.c -- ASoC register cache helpers
4 * Copyright 2009 Wolfson Microelectronics PLC.
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/i2c.h>
15 #include <linux/spi/spi.h>
16 #include <sound/soc.h>
17 #include <linux/lzo.h>
18 #include <linux/bitmap.h>
19 #include <linux/rbtree.h>
21 #include <trace/events/asoc.h>
23 static bool snd_soc_set_cache_val(void *base
, unsigned int idx
,
24 unsigned int val
, unsigned int word_size
)
29 if (cache
[idx
] == val
)
36 if (cache
[idx
] == val
)
47 static unsigned int snd_soc_get_cache_val(const void *base
, unsigned int idx
,
48 unsigned int word_size
)
55 const u8
*cache
= base
;
59 const u16
*cache
= base
;
69 struct snd_soc_rbtree_node
{
70 struct rb_node node
; /* the actual rbtree node holding this block */
71 unsigned int base_reg
; /* base register handled by this block */
72 unsigned int word_size
; /* number of bytes needed to represent the register index */
73 void *block
; /* block of adjacent registers */
74 unsigned int blklen
; /* number of registers available in the block */
75 } __attribute__ ((packed
));
77 struct snd_soc_rbtree_ctx
{
79 struct snd_soc_rbtree_node
*cached_rbnode
;
82 static inline void snd_soc_rbtree_get_base_top_reg(
83 struct snd_soc_rbtree_node
*rbnode
,
84 unsigned int *base
, unsigned int *top
)
86 *base
= rbnode
->base_reg
;
87 *top
= rbnode
->base_reg
+ rbnode
->blklen
- 1;
90 static unsigned int snd_soc_rbtree_get_register(
91 struct snd_soc_rbtree_node
*rbnode
, unsigned int idx
)
95 switch (rbnode
->word_size
) {
97 u8
*p
= rbnode
->block
;
102 u16
*p
= rbnode
->block
;
113 static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node
*rbnode
,
114 unsigned int idx
, unsigned int val
)
116 switch (rbnode
->word_size
) {
118 u8
*p
= rbnode
->block
;
123 u16
*p
= rbnode
->block
;
133 static struct snd_soc_rbtree_node
*snd_soc_rbtree_lookup(
134 struct rb_root
*root
, unsigned int reg
)
136 struct rb_node
*node
;
137 struct snd_soc_rbtree_node
*rbnode
;
138 unsigned int base_reg
, top_reg
;
140 node
= root
->rb_node
;
142 rbnode
= container_of(node
, struct snd_soc_rbtree_node
, node
);
143 snd_soc_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
144 if (reg
>= base_reg
&& reg
<= top_reg
)
146 else if (reg
> top_reg
)
147 node
= node
->rb_right
;
148 else if (reg
< base_reg
)
149 node
= node
->rb_left
;
155 static int snd_soc_rbtree_insert(struct rb_root
*root
,
156 struct snd_soc_rbtree_node
*rbnode
)
158 struct rb_node
**new, *parent
;
159 struct snd_soc_rbtree_node
*rbnode_tmp
;
160 unsigned int base_reg_tmp
, top_reg_tmp
;
161 unsigned int base_reg
;
164 new = &root
->rb_node
;
166 rbnode_tmp
= container_of(*new, struct snd_soc_rbtree_node
,
168 /* base and top registers of the current rbnode */
169 snd_soc_rbtree_get_base_top_reg(rbnode_tmp
, &base_reg_tmp
,
171 /* base register of the rbnode to be added */
172 base_reg
= rbnode
->base_reg
;
174 /* if this register has already been inserted, just return */
175 if (base_reg
>= base_reg_tmp
&&
176 base_reg
<= top_reg_tmp
)
178 else if (base_reg
> top_reg_tmp
)
179 new = &((*new)->rb_right
);
180 else if (base_reg
< base_reg_tmp
)
181 new = &((*new)->rb_left
);
184 /* insert the node into the rbtree */
185 rb_link_node(&rbnode
->node
, parent
, new);
186 rb_insert_color(&rbnode
->node
, root
);
191 static int snd_soc_rbtree_cache_sync(struct snd_soc_codec
*codec
)
193 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
194 struct rb_node
*node
;
195 struct snd_soc_rbtree_node
*rbnode
;
197 unsigned int val
, def
;
201 rbtree_ctx
= codec
->reg_cache
;
202 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
203 rbnode
= rb_entry(node
, struct snd_soc_rbtree_node
, node
);
204 for (i
= 0; i
< rbnode
->blklen
; ++i
) {
205 regtmp
= rbnode
->base_reg
+ i
;
206 WARN_ON(codec
->writable_register
&&
207 codec
->writable_register(codec
, regtmp
));
208 val
= snd_soc_rbtree_get_register(rbnode
, i
);
209 def
= snd_soc_get_cache_val(codec
->reg_def_copy
, i
,
214 codec
->cache_bypass
= 1;
215 ret
= snd_soc_write(codec
, regtmp
, val
);
216 codec
->cache_bypass
= 0;
219 dev_dbg(codec
->dev
, "Synced register %#x, value = %#x\n",
227 static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node
*rbnode
,
228 unsigned int pos
, unsigned int reg
,
233 blk
= krealloc(rbnode
->block
,
234 (rbnode
->blklen
+ 1) * rbnode
->word_size
, GFP_KERNEL
);
238 /* insert the register value in the correct place in the rbnode block */
239 memmove(blk
+ (pos
+ 1) * rbnode
->word_size
,
240 blk
+ pos
* rbnode
->word_size
,
241 (rbnode
->blklen
- pos
) * rbnode
->word_size
);
243 /* update the rbnode block, its size and the base register */
247 rbnode
->base_reg
= reg
;
249 snd_soc_rbtree_set_register(rbnode
, pos
, value
);
253 static int snd_soc_rbtree_cache_write(struct snd_soc_codec
*codec
,
254 unsigned int reg
, unsigned int value
)
256 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
257 struct snd_soc_rbtree_node
*rbnode
, *rbnode_tmp
;
258 struct rb_node
*node
;
260 unsigned int reg_tmp
;
261 unsigned int base_reg
, top_reg
;
266 rbtree_ctx
= codec
->reg_cache
;
267 /* look up the required register in the cached rbnode */
268 rbnode
= rbtree_ctx
->cached_rbnode
;
270 snd_soc_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
271 if (reg
>= base_reg
&& reg
<= top_reg
) {
272 reg_tmp
= reg
- base_reg
;
273 val
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
276 snd_soc_rbtree_set_register(rbnode
, reg_tmp
, value
);
280 /* if we can't locate it in the cached rbnode we'll have
281 * to traverse the rbtree looking for it.
283 rbnode
= snd_soc_rbtree_lookup(&rbtree_ctx
->root
, reg
);
285 reg_tmp
= reg
- rbnode
->base_reg
;
286 val
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
289 snd_soc_rbtree_set_register(rbnode
, reg_tmp
, value
);
290 rbtree_ctx
->cached_rbnode
= rbnode
;
292 /* bail out early, no need to create the rbnode yet */
295 /* look for an adjacent register to the one we are about to add */
296 for (node
= rb_first(&rbtree_ctx
->root
); node
;
297 node
= rb_next(node
)) {
298 rbnode_tmp
= rb_entry(node
, struct snd_soc_rbtree_node
, node
);
299 for (i
= 0; i
< rbnode_tmp
->blklen
; ++i
) {
300 reg_tmp
= rbnode_tmp
->base_reg
+ i
;
301 if (abs(reg_tmp
- reg
) != 1)
303 /* decide where in the block to place our register */
304 if (reg_tmp
+ 1 == reg
)
308 ret
= snd_soc_rbtree_insert_to_block(rbnode_tmp
, pos
,
312 rbtree_ctx
->cached_rbnode
= rbnode_tmp
;
316 /* we did not manage to find a place to insert it in an existing
317 * block so create a new rbnode with a single register in its block.
318 * This block will get populated further if any other adjacent
319 * registers get modified in the future.
321 rbnode
= kzalloc(sizeof *rbnode
, GFP_KERNEL
);
325 rbnode
->base_reg
= reg
;
326 rbnode
->word_size
= codec
->driver
->reg_word_size
;
327 rbnode
->block
= kmalloc(rbnode
->blklen
* rbnode
->word_size
,
329 if (!rbnode
->block
) {
333 snd_soc_rbtree_set_register(rbnode
, 0, value
);
334 snd_soc_rbtree_insert(&rbtree_ctx
->root
, rbnode
);
335 rbtree_ctx
->cached_rbnode
= rbnode
;
341 static int snd_soc_rbtree_cache_read(struct snd_soc_codec
*codec
,
342 unsigned int reg
, unsigned int *value
)
344 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
345 struct snd_soc_rbtree_node
*rbnode
;
346 unsigned int base_reg
, top_reg
;
347 unsigned int reg_tmp
;
349 rbtree_ctx
= codec
->reg_cache
;
350 /* look up the required register in the cached rbnode */
351 rbnode
= rbtree_ctx
->cached_rbnode
;
353 snd_soc_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
354 if (reg
>= base_reg
&& reg
<= top_reg
) {
355 reg_tmp
= reg
- base_reg
;
356 *value
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
360 /* if we can't locate it in the cached rbnode we'll have
361 * to traverse the rbtree looking for it.
363 rbnode
= snd_soc_rbtree_lookup(&rbtree_ctx
->root
, reg
);
365 reg_tmp
= reg
- rbnode
->base_reg
;
366 *value
= snd_soc_rbtree_get_register(rbnode
, reg_tmp
);
367 rbtree_ctx
->cached_rbnode
= rbnode
;
369 /* uninitialized registers default to 0 */
376 static int snd_soc_rbtree_cache_exit(struct snd_soc_codec
*codec
)
378 struct rb_node
*next
;
379 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
380 struct snd_soc_rbtree_node
*rbtree_node
;
382 /* if we've already been called then just return */
383 rbtree_ctx
= codec
->reg_cache
;
387 /* free up the rbtree */
388 next
= rb_first(&rbtree_ctx
->root
);
390 rbtree_node
= rb_entry(next
, struct snd_soc_rbtree_node
, node
);
391 next
= rb_next(&rbtree_node
->node
);
392 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
393 kfree(rbtree_node
->block
);
397 /* release the resources */
398 kfree(codec
->reg_cache
);
399 codec
->reg_cache
= NULL
;
404 static int snd_soc_rbtree_cache_init(struct snd_soc_codec
*codec
)
406 struct snd_soc_rbtree_ctx
*rbtree_ctx
;
407 unsigned int word_size
;
412 codec
->reg_cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
413 if (!codec
->reg_cache
)
416 rbtree_ctx
= codec
->reg_cache
;
417 rbtree_ctx
->root
= RB_ROOT
;
418 rbtree_ctx
->cached_rbnode
= NULL
;
420 if (!codec
->reg_def_copy
)
423 word_size
= codec
->driver
->reg_word_size
;
424 for (i
= 0; i
< codec
->driver
->reg_cache_size
; ++i
) {
425 val
= snd_soc_get_cache_val(codec
->reg_def_copy
, i
,
429 ret
= snd_soc_rbtree_cache_write(codec
, i
, val
);
437 snd_soc_cache_exit(codec
);
441 #ifdef CONFIG_SND_SOC_CACHE_LZO
442 struct snd_soc_lzo_ctx
{
448 size_t decompressed_size
;
449 unsigned long *sync_bmp
;
453 #define LZO_BLOCK_NUM 8
454 static int snd_soc_lzo_block_count(void)
456 return LZO_BLOCK_NUM
;
459 static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx
*lzo_ctx
)
461 lzo_ctx
->wmem
= kmalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
467 static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx
*lzo_ctx
)
469 size_t compress_size
;
472 ret
= lzo1x_1_compress(lzo_ctx
->src
, lzo_ctx
->src_len
,
473 lzo_ctx
->dst
, &compress_size
, lzo_ctx
->wmem
);
474 if (ret
!= LZO_E_OK
|| compress_size
> lzo_ctx
->dst_len
)
476 lzo_ctx
->dst_len
= compress_size
;
480 static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx
*lzo_ctx
)
485 dst_len
= lzo_ctx
->dst_len
;
486 ret
= lzo1x_decompress_safe(lzo_ctx
->src
, lzo_ctx
->src_len
,
487 lzo_ctx
->dst
, &dst_len
);
488 if (ret
!= LZO_E_OK
|| dst_len
!= lzo_ctx
->dst_len
)
493 static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec
*codec
,
494 struct snd_soc_lzo_ctx
*lzo_ctx
)
498 lzo_ctx
->dst_len
= lzo1x_worst_compress(PAGE_SIZE
);
499 lzo_ctx
->dst
= kmalloc(lzo_ctx
->dst_len
, GFP_KERNEL
);
501 lzo_ctx
->dst_len
= 0;
505 ret
= snd_soc_lzo_compress(lzo_ctx
);
511 static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec
*codec
,
512 struct snd_soc_lzo_ctx
*lzo_ctx
)
516 lzo_ctx
->dst_len
= lzo_ctx
->decompressed_size
;
517 lzo_ctx
->dst
= kmalloc(lzo_ctx
->dst_len
, GFP_KERNEL
);
519 lzo_ctx
->dst_len
= 0;
523 ret
= snd_soc_lzo_decompress(lzo_ctx
);
529 static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec
*codec
,
532 const struct snd_soc_codec_driver
*codec_drv
;
534 codec_drv
= codec
->driver
;
535 return (reg
* codec_drv
->reg_word_size
) /
536 DIV_ROUND_UP(codec
->reg_size
, snd_soc_lzo_block_count());
539 static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec
*codec
,
542 const struct snd_soc_codec_driver
*codec_drv
;
544 codec_drv
= codec
->driver
;
545 return reg
% (DIV_ROUND_UP(codec
->reg_size
, snd_soc_lzo_block_count()) /
546 codec_drv
->reg_word_size
);
549 static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec
*codec
)
551 const struct snd_soc_codec_driver
*codec_drv
;
553 codec_drv
= codec
->driver
;
554 return DIV_ROUND_UP(codec
->reg_size
, snd_soc_lzo_block_count());
557 static int snd_soc_lzo_cache_sync(struct snd_soc_codec
*codec
)
559 struct snd_soc_lzo_ctx
**lzo_blocks
;
564 lzo_blocks
= codec
->reg_cache
;
565 for_each_set_bit(i
, lzo_blocks
[0]->sync_bmp
, lzo_blocks
[0]->sync_bmp_nbits
) {
566 WARN_ON(codec
->writable_register
&&
567 codec
->writable_register(codec
, i
));
568 ret
= snd_soc_cache_read(codec
, i
, &val
);
571 codec
->cache_bypass
= 1;
572 ret
= snd_soc_write(codec
, i
, val
);
573 codec
->cache_bypass
= 0;
576 dev_dbg(codec
->dev
, "Synced register %#x, value = %#x\n",
583 static int snd_soc_lzo_cache_write(struct snd_soc_codec
*codec
,
584 unsigned int reg
, unsigned int value
)
586 struct snd_soc_lzo_ctx
*lzo_block
, **lzo_blocks
;
587 int ret
, blkindex
, blkpos
;
588 size_t blksize
, tmp_dst_len
;
591 /* index of the compressed lzo block */
592 blkindex
= snd_soc_lzo_get_blkindex(codec
, reg
);
593 /* register index within the decompressed block */
594 blkpos
= snd_soc_lzo_get_blkpos(codec
, reg
);
595 /* size of the compressed block */
596 blksize
= snd_soc_lzo_get_blksize(codec
);
597 lzo_blocks
= codec
->reg_cache
;
598 lzo_block
= lzo_blocks
[blkindex
];
600 /* save the pointer and length of the compressed block */
601 tmp_dst
= lzo_block
->dst
;
602 tmp_dst_len
= lzo_block
->dst_len
;
604 /* prepare the source to be the compressed block */
605 lzo_block
->src
= lzo_block
->dst
;
606 lzo_block
->src_len
= lzo_block
->dst_len
;
608 /* decompress the block */
609 ret
= snd_soc_lzo_decompress_cache_block(codec
, lzo_block
);
611 kfree(lzo_block
->dst
);
615 /* write the new value to the cache */
616 if (snd_soc_set_cache_val(lzo_block
->dst
, blkpos
, value
,
617 codec
->driver
->reg_word_size
)) {
618 kfree(lzo_block
->dst
);
622 /* prepare the source to be the decompressed block */
623 lzo_block
->src
= lzo_block
->dst
;
624 lzo_block
->src_len
= lzo_block
->dst_len
;
626 /* compress the block */
627 ret
= snd_soc_lzo_compress_cache_block(codec
, lzo_block
);
629 kfree(lzo_block
->dst
);
630 kfree(lzo_block
->src
);
634 /* set the bit so we know we have to sync this register */
635 set_bit(reg
, lzo_block
->sync_bmp
);
637 kfree(lzo_block
->src
);
640 lzo_block
->dst
= tmp_dst
;
641 lzo_block
->dst_len
= tmp_dst_len
;
645 static int snd_soc_lzo_cache_read(struct snd_soc_codec
*codec
,
646 unsigned int reg
, unsigned int *value
)
648 struct snd_soc_lzo_ctx
*lzo_block
, **lzo_blocks
;
649 int ret
, blkindex
, blkpos
;
650 size_t blksize
, tmp_dst_len
;
654 /* index of the compressed lzo block */
655 blkindex
= snd_soc_lzo_get_blkindex(codec
, reg
);
656 /* register index within the decompressed block */
657 blkpos
= snd_soc_lzo_get_blkpos(codec
, reg
);
658 /* size of the compressed block */
659 blksize
= snd_soc_lzo_get_blksize(codec
);
660 lzo_blocks
= codec
->reg_cache
;
661 lzo_block
= lzo_blocks
[blkindex
];
663 /* save the pointer and length of the compressed block */
664 tmp_dst
= lzo_block
->dst
;
665 tmp_dst_len
= lzo_block
->dst_len
;
667 /* prepare the source to be the compressed block */
668 lzo_block
->src
= lzo_block
->dst
;
669 lzo_block
->src_len
= lzo_block
->dst_len
;
671 /* decompress the block */
672 ret
= snd_soc_lzo_decompress_cache_block(codec
, lzo_block
);
674 /* fetch the value from the cache */
675 *value
= snd_soc_get_cache_val(lzo_block
->dst
, blkpos
,
676 codec
->driver
->reg_word_size
);
678 kfree(lzo_block
->dst
);
679 /* restore the pointer and length of the compressed block */
680 lzo_block
->dst
= tmp_dst
;
681 lzo_block
->dst_len
= tmp_dst_len
;
685 static int snd_soc_lzo_cache_exit(struct snd_soc_codec
*codec
)
687 struct snd_soc_lzo_ctx
**lzo_blocks
;
690 lzo_blocks
= codec
->reg_cache
;
694 blkcount
= snd_soc_lzo_block_count();
696 * the pointer to the bitmap used for syncing the cache
697 * is shared amongst all lzo_blocks. Ensure it is freed
701 kfree(lzo_blocks
[0]->sync_bmp
);
702 for (i
= 0; i
< blkcount
; ++i
) {
704 kfree(lzo_blocks
[i
]->wmem
);
705 kfree(lzo_blocks
[i
]->dst
);
707 /* each lzo_block is a pointer returned by kmalloc or NULL */
708 kfree(lzo_blocks
[i
]);
711 codec
->reg_cache
= NULL
;
715 static int snd_soc_lzo_cache_init(struct snd_soc_codec
*codec
)
717 struct snd_soc_lzo_ctx
**lzo_blocks
;
719 const struct snd_soc_codec_driver
*codec_drv
;
720 int ret
, tofree
, i
, blksize
, blkcount
;
722 unsigned long *sync_bmp
;
725 codec_drv
= codec
->driver
;
728 * If we have not been given a default register cache
729 * then allocate a dummy zero-ed out region, compress it
730 * and remember to free it afterwards.
733 if (!codec
->reg_def_copy
)
736 if (!codec
->reg_def_copy
) {
737 codec
->reg_def_copy
= kzalloc(codec
->reg_size
, GFP_KERNEL
);
738 if (!codec
->reg_def_copy
)
742 blkcount
= snd_soc_lzo_block_count();
743 codec
->reg_cache
= kzalloc(blkcount
* sizeof *lzo_blocks
,
745 if (!codec
->reg_cache
) {
749 lzo_blocks
= codec
->reg_cache
;
752 * allocate a bitmap to be used when syncing the cache with
753 * the hardware. Each time a register is modified, the corresponding
754 * bit is set in the bitmap, so we know that we have to sync
757 bmp_size
= codec_drv
->reg_cache_size
;
758 sync_bmp
= kmalloc(BITS_TO_LONGS(bmp_size
) * sizeof(long),
764 bitmap_zero(sync_bmp
, bmp_size
);
766 /* allocate the lzo blocks and initialize them */
767 for (i
= 0; i
< blkcount
; ++i
) {
768 lzo_blocks
[i
] = kzalloc(sizeof **lzo_blocks
,
770 if (!lzo_blocks
[i
]) {
775 lzo_blocks
[i
]->sync_bmp
= sync_bmp
;
776 lzo_blocks
[i
]->sync_bmp_nbits
= bmp_size
;
777 /* alloc the working space for the compressed block */
778 ret
= snd_soc_lzo_prepare(lzo_blocks
[i
]);
783 blksize
= snd_soc_lzo_get_blksize(codec
);
784 p
= codec
->reg_def_copy
;
785 end
= codec
->reg_def_copy
+ codec
->reg_size
;
786 /* compress the register map and fill the lzo blocks */
787 for (i
= 0; i
< blkcount
; ++i
, p
+= blksize
) {
788 lzo_blocks
[i
]->src
= p
;
789 if (p
+ blksize
> end
)
790 lzo_blocks
[i
]->src_len
= end
- p
;
792 lzo_blocks
[i
]->src_len
= blksize
;
793 ret
= snd_soc_lzo_compress_cache_block(codec
,
797 lzo_blocks
[i
]->decompressed_size
=
798 lzo_blocks
[i
]->src_len
;
802 kfree(codec
->reg_def_copy
);
803 codec
->reg_def_copy
= NULL
;
807 snd_soc_cache_exit(codec
);
810 kfree(codec
->reg_def_copy
);
811 codec
->reg_def_copy
= NULL
;
817 static int snd_soc_flat_cache_sync(struct snd_soc_codec
*codec
)
821 const struct snd_soc_codec_driver
*codec_drv
;
824 codec_drv
= codec
->driver
;
825 for (i
= 0; i
< codec_drv
->reg_cache_size
; ++i
) {
826 WARN_ON(codec
->writable_register
&&
827 codec
->writable_register(codec
, i
));
828 ret
= snd_soc_cache_read(codec
, i
, &val
);
831 if (codec
->reg_def_copy
)
832 if (snd_soc_get_cache_val(codec
->reg_def_copy
,
833 i
, codec_drv
->reg_word_size
) == val
)
835 ret
= snd_soc_write(codec
, i
, val
);
838 dev_dbg(codec
->dev
, "Synced register %#x, value = %#x\n",
844 static int snd_soc_flat_cache_write(struct snd_soc_codec
*codec
,
845 unsigned int reg
, unsigned int value
)
847 snd_soc_set_cache_val(codec
->reg_cache
, reg
, value
,
848 codec
->driver
->reg_word_size
);
852 static int snd_soc_flat_cache_read(struct snd_soc_codec
*codec
,
853 unsigned int reg
, unsigned int *value
)
855 *value
= snd_soc_get_cache_val(codec
->reg_cache
, reg
,
856 codec
->driver
->reg_word_size
);
860 static int snd_soc_flat_cache_exit(struct snd_soc_codec
*codec
)
862 if (!codec
->reg_cache
)
864 kfree(codec
->reg_cache
);
865 codec
->reg_cache
= NULL
;
869 static int snd_soc_flat_cache_init(struct snd_soc_codec
*codec
)
871 const struct snd_soc_codec_driver
*codec_drv
;
873 codec_drv
= codec
->driver
;
875 if (codec
->reg_def_copy
)
876 codec
->reg_cache
= kmemdup(codec
->reg_def_copy
,
877 codec
->reg_size
, GFP_KERNEL
);
879 codec
->reg_cache
= kzalloc(codec
->reg_size
, GFP_KERNEL
);
880 if (!codec
->reg_cache
)
886 /* an array of all supported compression types */
887 static const struct snd_soc_cache_ops cache_types
[] = {
888 /* Flat *must* be the first entry for fallback */
890 .id
= SND_SOC_FLAT_COMPRESSION
,
892 .init
= snd_soc_flat_cache_init
,
893 .exit
= snd_soc_flat_cache_exit
,
894 .read
= snd_soc_flat_cache_read
,
895 .write
= snd_soc_flat_cache_write
,
896 .sync
= snd_soc_flat_cache_sync
898 #ifdef CONFIG_SND_SOC_CACHE_LZO
900 .id
= SND_SOC_LZO_COMPRESSION
,
902 .init
= snd_soc_lzo_cache_init
,
903 .exit
= snd_soc_lzo_cache_exit
,
904 .read
= snd_soc_lzo_cache_read
,
905 .write
= snd_soc_lzo_cache_write
,
906 .sync
= snd_soc_lzo_cache_sync
910 .id
= SND_SOC_RBTREE_COMPRESSION
,
912 .init
= snd_soc_rbtree_cache_init
,
913 .exit
= snd_soc_rbtree_cache_exit
,
914 .read
= snd_soc_rbtree_cache_read
,
915 .write
= snd_soc_rbtree_cache_write
,
916 .sync
= snd_soc_rbtree_cache_sync
920 int snd_soc_cache_init(struct snd_soc_codec
*codec
)
924 for (i
= 0; i
< ARRAY_SIZE(cache_types
); ++i
)
925 if (cache_types
[i
].id
== codec
->compress_type
)
928 /* Fall back to flat compression */
929 if (i
== ARRAY_SIZE(cache_types
)) {
930 dev_warn(codec
->dev
, "Could not match compress type: %d\n",
931 codec
->compress_type
);
935 mutex_init(&codec
->cache_rw_mutex
);
936 codec
->cache_ops
= &cache_types
[i
];
938 if (codec
->cache_ops
->init
) {
939 if (codec
->cache_ops
->name
)
940 dev_dbg(codec
->dev
, "Initializing %s cache for %s codec\n",
941 codec
->cache_ops
->name
, codec
->name
);
942 return codec
->cache_ops
->init(codec
);
948 * NOTE: keep in mind that this function might be called
951 int snd_soc_cache_exit(struct snd_soc_codec
*codec
)
953 if (codec
->cache_ops
&& codec
->cache_ops
->exit
) {
954 if (codec
->cache_ops
->name
)
955 dev_dbg(codec
->dev
, "Destroying %s cache for %s codec\n",
956 codec
->cache_ops
->name
, codec
->name
);
957 return codec
->cache_ops
->exit(codec
);
963 * snd_soc_cache_read: Fetch the value of a given register from the cache.
965 * @codec: CODEC to configure.
966 * @reg: The register index.
967 * @value: The value to be returned.
969 int snd_soc_cache_read(struct snd_soc_codec
*codec
,
970 unsigned int reg
, unsigned int *value
)
974 mutex_lock(&codec
->cache_rw_mutex
);
976 if (value
&& codec
->cache_ops
&& codec
->cache_ops
->read
) {
977 ret
= codec
->cache_ops
->read(codec
, reg
, value
);
978 mutex_unlock(&codec
->cache_rw_mutex
);
982 mutex_unlock(&codec
->cache_rw_mutex
);
985 EXPORT_SYMBOL_GPL(snd_soc_cache_read
);
988 * snd_soc_cache_write: Set the value of a given register in the cache.
990 * @codec: CODEC to configure.
991 * @reg: The register index.
992 * @value: The new register value.
994 int snd_soc_cache_write(struct snd_soc_codec
*codec
,
995 unsigned int reg
, unsigned int value
)
999 mutex_lock(&codec
->cache_rw_mutex
);
1001 if (codec
->cache_ops
&& codec
->cache_ops
->write
) {
1002 ret
= codec
->cache_ops
->write(codec
, reg
, value
);
1003 mutex_unlock(&codec
->cache_rw_mutex
);
1007 mutex_unlock(&codec
->cache_rw_mutex
);
1010 EXPORT_SYMBOL_GPL(snd_soc_cache_write
);
1013 * snd_soc_cache_sync: Sync the register cache with the hardware.
1015 * @codec: CODEC to configure.
1017 * Any registers that should not be synced should be marked as
1018 * volatile. In general drivers can choose not to use the provided
1019 * syncing functionality if they so require.
1021 int snd_soc_cache_sync(struct snd_soc_codec
*codec
)
1026 if (!codec
->cache_sync
) {
1030 if (!codec
->cache_ops
|| !codec
->cache_ops
->sync
)
1033 if (codec
->cache_ops
->name
)
1034 name
= codec
->cache_ops
->name
;
1038 if (codec
->cache_ops
->name
)
1039 dev_dbg(codec
->dev
, "Syncing %s cache for %s codec\n",
1040 codec
->cache_ops
->name
, codec
->name
);
1041 trace_snd_soc_cache_sync(codec
, name
, "start");
1042 ret
= codec
->cache_ops
->sync(codec
);
1044 codec
->cache_sync
= 0;
1045 trace_snd_soc_cache_sync(codec
, name
, "end");
1048 EXPORT_SYMBOL_GPL(snd_soc_cache_sync
);
1050 static int snd_soc_get_reg_access_index(struct snd_soc_codec
*codec
,
1053 const struct snd_soc_codec_driver
*codec_drv
;
1054 unsigned int min
, max
, index
;
1056 codec_drv
= codec
->driver
;
1058 max
= codec_drv
->reg_access_size
- 1;
1060 index
= (min
+ max
) / 2;
1061 if (codec_drv
->reg_access_default
[index
].reg
== reg
)
1063 if (codec_drv
->reg_access_default
[index
].reg
< reg
)
1067 } while (min
<= max
);
1071 int snd_soc_default_volatile_register(struct snd_soc_codec
*codec
,
1076 if (reg
>= codec
->driver
->reg_cache_size
)
1078 index
= snd_soc_get_reg_access_index(codec
, reg
);
1081 return codec
->driver
->reg_access_default
[index
].vol
;
1083 EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register
);
1085 int snd_soc_default_readable_register(struct snd_soc_codec
*codec
,
1090 if (reg
>= codec
->driver
->reg_cache_size
)
1092 index
= snd_soc_get_reg_access_index(codec
, reg
);
1095 return codec
->driver
->reg_access_default
[index
].read
;
1097 EXPORT_SYMBOL_GPL(snd_soc_default_readable_register
);
1099 int snd_soc_default_writable_register(struct snd_soc_codec
*codec
,
1104 if (reg
>= codec
->driver
->reg_cache_size
)
1106 index
= snd_soc_get_reg_access_index(codec
, reg
);
1109 return codec
->driver
->reg_access_default
[index
].write
;
1111 EXPORT_SYMBOL_GPL(snd_soc_default_writable_register
);