2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/debugfs.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
20 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
22 static int regcache_rbtree_exit(struct regmap
*map
);
24 struct regcache_rbtree_node
{
25 /* the actual rbtree node holding this block */
27 /* base register handled by this block */
28 unsigned int base_reg
;
29 /* block of adjacent registers */
31 /* number of registers available in the block */
33 } __attribute__ ((packed
));
35 struct regcache_rbtree_ctx
{
37 struct regcache_rbtree_node
*cached_rbnode
;
40 static inline void regcache_rbtree_get_base_top_reg(
41 struct regcache_rbtree_node
*rbnode
,
42 unsigned int *base
, unsigned int *top
)
44 *base
= rbnode
->base_reg
;
45 *top
= rbnode
->base_reg
+ rbnode
->blklen
- 1;
48 static unsigned int regcache_rbtree_get_register(
49 struct regcache_rbtree_node
*rbnode
, unsigned int idx
,
50 unsigned int word_size
)
52 return regcache_get_val(rbnode
->block
, idx
, word_size
);
55 static void regcache_rbtree_set_register(struct regcache_rbtree_node
*rbnode
,
56 unsigned int idx
, unsigned int val
,
57 unsigned int word_size
)
59 regcache_set_val(rbnode
->block
, idx
, val
, word_size
);
62 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
65 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
67 struct regcache_rbtree_node
*rbnode
;
68 unsigned int base_reg
, top_reg
;
70 rbnode
= rbtree_ctx
->cached_rbnode
;
72 regcache_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
73 if (reg
>= base_reg
&& reg
<= top_reg
)
77 node
= rbtree_ctx
->root
.rb_node
;
79 rbnode
= container_of(node
, struct regcache_rbtree_node
, node
);
80 regcache_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
81 if (reg
>= base_reg
&& reg
<= top_reg
) {
82 rbtree_ctx
->cached_rbnode
= rbnode
;
84 } else if (reg
> top_reg
) {
85 node
= node
->rb_right
;
86 } else if (reg
< base_reg
) {
94 static int regcache_rbtree_insert(struct rb_root
*root
,
95 struct regcache_rbtree_node
*rbnode
)
97 struct rb_node
**new, *parent
;
98 struct regcache_rbtree_node
*rbnode_tmp
;
99 unsigned int base_reg_tmp
, top_reg_tmp
;
100 unsigned int base_reg
;
103 new = &root
->rb_node
;
105 rbnode_tmp
= container_of(*new, struct regcache_rbtree_node
,
107 /* base and top registers of the current rbnode */
108 regcache_rbtree_get_base_top_reg(rbnode_tmp
, &base_reg_tmp
,
110 /* base register of the rbnode to be added */
111 base_reg
= rbnode
->base_reg
;
113 /* if this register has already been inserted, just return */
114 if (base_reg
>= base_reg_tmp
&&
115 base_reg
<= top_reg_tmp
)
117 else if (base_reg
> top_reg_tmp
)
118 new = &((*new)->rb_right
);
119 else if (base_reg
< base_reg_tmp
)
120 new = &((*new)->rb_left
);
123 /* insert the node into the rbtree */
124 rb_link_node(&rbnode
->node
, parent
, new);
125 rb_insert_color(&rbnode
->node
, root
);
130 #ifdef CONFIG_DEBUG_FS
131 static int rbtree_show(struct seq_file
*s
, void *ignored
)
133 struct regmap
*map
= s
->private;
134 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
135 struct regcache_rbtree_node
*n
;
136 struct rb_node
*node
;
137 unsigned int base
, top
;
142 mutex_lock(&map
->lock
);
144 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
145 node
= rb_next(node
)) {
146 n
= container_of(node
, struct regcache_rbtree_node
, node
);
148 regcache_rbtree_get_base_top_reg(n
, &base
, &top
);
149 seq_printf(s
, "%x-%x (%d)\n", base
, top
, top
- base
+ 1);
152 registers
+= top
- base
+ 1;
156 average
= registers
/ nodes
;
160 seq_printf(s
, "%d nodes, %d registers, average %d registers\n",
161 nodes
, registers
, average
);
163 mutex_unlock(&map
->lock
);
168 static int rbtree_open(struct inode
*inode
, struct file
*file
)
170 return single_open(file
, rbtree_show
, inode
->i_private
);
173 static const struct file_operations rbtree_fops
= {
177 .release
= single_release
,
180 static void rbtree_debugfs_init(struct regmap
*map
)
182 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
185 static void rbtree_debugfs_init(struct regmap
*map
)
190 static int regcache_rbtree_init(struct regmap
*map
)
192 struct regcache_rbtree_ctx
*rbtree_ctx
;
196 map
->cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
200 rbtree_ctx
= map
->cache
;
201 rbtree_ctx
->root
= RB_ROOT
;
202 rbtree_ctx
->cached_rbnode
= NULL
;
204 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
205 ret
= regcache_rbtree_write(map
,
206 map
->reg_defaults
[i
].reg
,
207 map
->reg_defaults
[i
].def
);
212 rbtree_debugfs_init(map
);
217 regcache_rbtree_exit(map
);
221 static int regcache_rbtree_exit(struct regmap
*map
)
223 struct rb_node
*next
;
224 struct regcache_rbtree_ctx
*rbtree_ctx
;
225 struct regcache_rbtree_node
*rbtree_node
;
227 /* if we've already been called then just return */
228 rbtree_ctx
= map
->cache
;
232 /* free up the rbtree */
233 next
= rb_first(&rbtree_ctx
->root
);
235 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
236 next
= rb_next(&rbtree_node
->node
);
237 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
238 kfree(rbtree_node
->block
);
242 /* release the resources */
249 static int regcache_rbtree_read(struct regmap
*map
,
250 unsigned int reg
, unsigned int *value
)
252 struct regcache_rbtree_node
*rbnode
;
253 unsigned int reg_tmp
;
255 rbnode
= regcache_rbtree_lookup(map
, reg
);
257 reg_tmp
= reg
- rbnode
->base_reg
;
258 *value
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
259 map
->cache_word_size
);
268 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node
*rbnode
,
269 unsigned int pos
, unsigned int reg
,
270 unsigned int value
, unsigned int word_size
)
274 blk
= krealloc(rbnode
->block
,
275 (rbnode
->blklen
+ 1) * word_size
, GFP_KERNEL
);
279 /* insert the register value in the correct place in the rbnode block */
280 memmove(blk
+ (pos
+ 1) * word_size
,
281 blk
+ pos
* word_size
,
282 (rbnode
->blklen
- pos
) * word_size
);
284 /* update the rbnode block, its size and the base register */
288 rbnode
->base_reg
= reg
;
290 regcache_rbtree_set_register(rbnode
, pos
, value
, word_size
);
294 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
297 struct regcache_rbtree_ctx
*rbtree_ctx
;
298 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
299 struct rb_node
*node
;
301 unsigned int reg_tmp
;
306 rbtree_ctx
= map
->cache
;
307 /* if we can't locate it in the cached rbnode we'll have
308 * to traverse the rbtree looking for it.
310 rbnode
= regcache_rbtree_lookup(map
, reg
);
312 reg_tmp
= reg
- rbnode
->base_reg
;
313 val
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
314 map
->cache_word_size
);
317 regcache_rbtree_set_register(rbnode
, reg_tmp
, value
,
318 map
->cache_word_size
);
320 /* look for an adjacent register to the one we are about to add */
321 for (node
= rb_first(&rbtree_ctx
->root
); node
;
322 node
= rb_next(node
)) {
323 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
, node
);
324 for (i
= 0; i
< rbnode_tmp
->blklen
; i
++) {
325 reg_tmp
= rbnode_tmp
->base_reg
+ i
;
326 if (abs(reg_tmp
- reg
) != 1)
328 /* decide where in the block to place our register */
329 if (reg_tmp
+ 1 == reg
)
333 ret
= regcache_rbtree_insert_to_block(rbnode_tmp
, pos
,
335 map
->cache_word_size
);
338 rbtree_ctx
->cached_rbnode
= rbnode_tmp
;
342 /* we did not manage to find a place to insert it in an existing
343 * block so create a new rbnode with a single register in its block.
344 * This block will get populated further if any other adjacent
345 * registers get modified in the future.
347 rbnode
= kzalloc(sizeof *rbnode
, GFP_KERNEL
);
351 rbnode
->base_reg
= reg
;
352 rbnode
->block
= kmalloc(rbnode
->blklen
* map
->cache_word_size
,
354 if (!rbnode
->block
) {
358 regcache_rbtree_set_register(rbnode
, 0, value
, map
->cache_word_size
);
359 regcache_rbtree_insert(&rbtree_ctx
->root
, rbnode
);
360 rbtree_ctx
->cached_rbnode
= rbnode
;
366 static int regcache_rbtree_sync(struct regmap
*map
)
368 struct regcache_rbtree_ctx
*rbtree_ctx
;
369 struct rb_node
*node
;
370 struct regcache_rbtree_node
*rbnode
;
376 rbtree_ctx
= map
->cache
;
377 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
378 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
379 for (i
= 0; i
< rbnode
->blklen
; i
++) {
380 regtmp
= rbnode
->base_reg
+ i
;
381 val
= regcache_rbtree_get_register(rbnode
, i
,
382 map
->cache_word_size
);
384 /* Is this the hardware default? If so skip. */
385 ret
= regcache_lookup_reg(map
, i
);
386 if (ret
> 0 && val
== map
->reg_defaults
[ret
].def
)
389 map
->cache_bypass
= 1;
390 ret
= _regmap_write(map
, regtmp
, val
);
391 map
->cache_bypass
= 0;
394 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
402 struct regcache_ops regcache_rbtree_ops
= {
403 .type
= REGCACHE_RBTREE
,
405 .init
= regcache_rbtree_init
,
406 .exit
= regcache_rbtree_exit
,
407 .read
= regcache_rbtree_read
,
408 .write
= regcache_rbtree_write
,
409 .sync
= regcache_rbtree_sync