1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - LZO caching support
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
9 #include <linux/device.h>
10 #include <linux/lzo.h>
11 #include <linux/slab.h>
15 static int regcache_lzo_exit(struct regmap
*map
);
17 struct regcache_lzo_ctx
{
23 size_t decompressed_size
;
24 unsigned long *sync_bmp
;
28 #define LZO_BLOCK_NUM 8
29 static int regcache_lzo_block_count(struct regmap
*map
)
34 static int regcache_lzo_prepare(struct regcache_lzo_ctx
*lzo_ctx
)
36 lzo_ctx
->wmem
= kmalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
42 static int regcache_lzo_compress(struct regcache_lzo_ctx
*lzo_ctx
)
47 ret
= lzo1x_1_compress(lzo_ctx
->src
, lzo_ctx
->src_len
,
48 lzo_ctx
->dst
, &compress_size
, lzo_ctx
->wmem
);
49 if (ret
!= LZO_E_OK
|| compress_size
> lzo_ctx
->dst_len
)
51 lzo_ctx
->dst_len
= compress_size
;
55 static int regcache_lzo_decompress(struct regcache_lzo_ctx
*lzo_ctx
)
60 dst_len
= lzo_ctx
->dst_len
;
61 ret
= lzo1x_decompress_safe(lzo_ctx
->src
, lzo_ctx
->src_len
,
62 lzo_ctx
->dst
, &dst_len
);
63 if (ret
!= LZO_E_OK
|| dst_len
!= lzo_ctx
->dst_len
)
68 static int regcache_lzo_compress_cache_block(struct regmap
*map
,
69 struct regcache_lzo_ctx
*lzo_ctx
)
73 lzo_ctx
->dst_len
= lzo1x_worst_compress(PAGE_SIZE
);
74 lzo_ctx
->dst
= kmalloc(lzo_ctx
->dst_len
, GFP_KERNEL
);
80 ret
= regcache_lzo_compress(lzo_ctx
);
86 static int regcache_lzo_decompress_cache_block(struct regmap
*map
,
87 struct regcache_lzo_ctx
*lzo_ctx
)
91 lzo_ctx
->dst_len
= lzo_ctx
->decompressed_size
;
92 lzo_ctx
->dst
= kmalloc(lzo_ctx
->dst_len
, GFP_KERNEL
);
98 ret
= regcache_lzo_decompress(lzo_ctx
);
104 static inline int regcache_lzo_get_blkindex(struct regmap
*map
,
107 return ((reg
/ map
->reg_stride
) * map
->cache_word_size
) /
108 DIV_ROUND_UP(map
->cache_size_raw
,
109 regcache_lzo_block_count(map
));
112 static inline int regcache_lzo_get_blkpos(struct regmap
*map
,
115 return (reg
/ map
->reg_stride
) %
116 (DIV_ROUND_UP(map
->cache_size_raw
,
117 regcache_lzo_block_count(map
)) /
118 map
->cache_word_size
);
121 static inline int regcache_lzo_get_blksize(struct regmap
*map
)
123 return DIV_ROUND_UP(map
->cache_size_raw
,
124 regcache_lzo_block_count(map
));
127 static int regcache_lzo_init(struct regmap
*map
)
129 struct regcache_lzo_ctx
**lzo_blocks
;
131 int ret
, i
, blksize
, blkcount
;
133 unsigned long *sync_bmp
;
137 blkcount
= regcache_lzo_block_count(map
);
138 map
->cache
= kcalloc(blkcount
, sizeof(*lzo_blocks
),
142 lzo_blocks
= map
->cache
;
145 * allocate a bitmap to be used when syncing the cache with
146 * the hardware. Each time a register is modified, the corresponding
147 * bit is set in the bitmap, so we know that we have to sync
150 bmp_size
= map
->num_reg_defaults_raw
;
151 sync_bmp
= bitmap_zalloc(bmp_size
, GFP_KERNEL
);
157 /* allocate the lzo blocks and initialize them */
158 for (i
= 0; i
< blkcount
; i
++) {
159 lzo_blocks
[i
] = kzalloc(sizeof **lzo_blocks
,
161 if (!lzo_blocks
[i
]) {
162 bitmap_free(sync_bmp
);
166 lzo_blocks
[i
]->sync_bmp
= sync_bmp
;
167 lzo_blocks
[i
]->sync_bmp_nbits
= bmp_size
;
168 /* alloc the working space for the compressed block */
169 ret
= regcache_lzo_prepare(lzo_blocks
[i
]);
174 blksize
= regcache_lzo_get_blksize(map
);
175 p
= map
->reg_defaults_raw
;
176 end
= map
->reg_defaults_raw
+ map
->cache_size_raw
;
177 /* compress the register map and fill the lzo blocks */
178 for (i
= 0; i
< blkcount
; i
++, p
+= blksize
) {
179 lzo_blocks
[i
]->src
= p
;
180 if (p
+ blksize
> end
)
181 lzo_blocks
[i
]->src_len
= end
- p
;
183 lzo_blocks
[i
]->src_len
= blksize
;
184 ret
= regcache_lzo_compress_cache_block(map
,
188 lzo_blocks
[i
]->decompressed_size
=
189 lzo_blocks
[i
]->src_len
;
194 regcache_lzo_exit(map
);
198 static int regcache_lzo_exit(struct regmap
*map
)
200 struct regcache_lzo_ctx
**lzo_blocks
;
203 lzo_blocks
= map
->cache
;
207 blkcount
= regcache_lzo_block_count(map
);
209 * the pointer to the bitmap used for syncing the cache
210 * is shared amongst all lzo_blocks. Ensure it is freed
214 bitmap_free(lzo_blocks
[0]->sync_bmp
);
215 for (i
= 0; i
< blkcount
; i
++) {
217 kfree(lzo_blocks
[i
]->wmem
);
218 kfree(lzo_blocks
[i
]->dst
);
220 /* each lzo_block is a pointer returned by kmalloc or NULL */
221 kfree(lzo_blocks
[i
]);
228 static int regcache_lzo_read(struct regmap
*map
,
229 unsigned int reg
, unsigned int *value
)
231 struct regcache_lzo_ctx
*lzo_block
, **lzo_blocks
;
232 int ret
, blkindex
, blkpos
;
236 /* index of the compressed lzo block */
237 blkindex
= regcache_lzo_get_blkindex(map
, reg
);
238 /* register index within the decompressed block */
239 blkpos
= regcache_lzo_get_blkpos(map
, reg
);
240 lzo_blocks
= map
->cache
;
241 lzo_block
= lzo_blocks
[blkindex
];
243 /* save the pointer and length of the compressed block */
244 tmp_dst
= lzo_block
->dst
;
245 tmp_dst_len
= lzo_block
->dst_len
;
247 /* prepare the source to be the compressed block */
248 lzo_block
->src
= lzo_block
->dst
;
249 lzo_block
->src_len
= lzo_block
->dst_len
;
251 /* decompress the block */
252 ret
= regcache_lzo_decompress_cache_block(map
, lzo_block
);
254 /* fetch the value from the cache */
255 *value
= regcache_get_val(map
, lzo_block
->dst
, blkpos
);
257 kfree(lzo_block
->dst
);
258 /* restore the pointer and length of the compressed block */
259 lzo_block
->dst
= tmp_dst
;
260 lzo_block
->dst_len
= tmp_dst_len
;
265 static int regcache_lzo_write(struct regmap
*map
,
266 unsigned int reg
, unsigned int value
)
268 struct regcache_lzo_ctx
*lzo_block
, **lzo_blocks
;
269 int ret
, blkindex
, blkpos
;
273 /* index of the compressed lzo block */
274 blkindex
= regcache_lzo_get_blkindex(map
, reg
);
275 /* register index within the decompressed block */
276 blkpos
= regcache_lzo_get_blkpos(map
, reg
);
277 lzo_blocks
= map
->cache
;
278 lzo_block
= lzo_blocks
[blkindex
];
280 /* save the pointer and length of the compressed block */
281 tmp_dst
= lzo_block
->dst
;
282 tmp_dst_len
= lzo_block
->dst_len
;
284 /* prepare the source to be the compressed block */
285 lzo_block
->src
= lzo_block
->dst
;
286 lzo_block
->src_len
= lzo_block
->dst_len
;
288 /* decompress the block */
289 ret
= regcache_lzo_decompress_cache_block(map
, lzo_block
);
291 kfree(lzo_block
->dst
);
295 /* write the new value to the cache */
296 if (regcache_set_val(map
, lzo_block
->dst
, blkpos
, value
)) {
297 kfree(lzo_block
->dst
);
301 /* prepare the source to be the decompressed block */
302 lzo_block
->src
= lzo_block
->dst
;
303 lzo_block
->src_len
= lzo_block
->dst_len
;
305 /* compress the block */
306 ret
= regcache_lzo_compress_cache_block(map
, lzo_block
);
308 kfree(lzo_block
->dst
);
309 kfree(lzo_block
->src
);
313 /* set the bit so we know we have to sync this register */
314 set_bit(reg
/ map
->reg_stride
, lzo_block
->sync_bmp
);
316 kfree(lzo_block
->src
);
319 lzo_block
->dst
= tmp_dst
;
320 lzo_block
->dst_len
= tmp_dst_len
;
324 static int regcache_lzo_sync(struct regmap
*map
, unsigned int min
,
327 struct regcache_lzo_ctx
**lzo_blocks
;
332 lzo_blocks
= map
->cache
;
334 for_each_set_bit_from(i
, lzo_blocks
[0]->sync_bmp
,
335 lzo_blocks
[0]->sync_bmp_nbits
) {
339 ret
= regcache_read(map
, i
, &val
);
343 /* Is this the hardware default? If so skip. */
344 ret
= regcache_lookup_reg(map
, i
);
345 if (ret
> 0 && val
== map
->reg_defaults
[ret
].def
)
348 map
->cache_bypass
= true;
349 ret
= _regmap_write(map
, i
, val
);
350 map
->cache_bypass
= false;
353 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
360 struct regcache_ops regcache_lzo_ops
= {
361 .type
= REGCACHE_COMPRESSED
,
363 .init
= regcache_lzo_init
,
364 .exit
= regcache_lzo_exit
,
365 .read
= regcache_lzo_read
,
366 .write
= regcache_lzo_write
,
367 .sync
= regcache_lzo_sync