2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/debugfs.h>
16 #include <linux/rbtree.h>
17 #include <linux/seq_file.h>
21 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
23 static int regcache_rbtree_exit(struct regmap
*map
);
25 struct regcache_rbtree_node
{
26 /* the actual rbtree node holding this block */
28 /* base register handled by this block */
29 unsigned int base_reg
;
30 /* block of adjacent registers */
32 /* number of registers available in the block */
34 } __attribute__ ((packed
));
36 struct regcache_rbtree_ctx
{
38 struct regcache_rbtree_node
*cached_rbnode
;
41 static inline void regcache_rbtree_get_base_top_reg(
42 struct regcache_rbtree_node
*rbnode
,
43 unsigned int *base
, unsigned int *top
)
45 *base
= rbnode
->base_reg
;
46 *top
= rbnode
->base_reg
+ rbnode
->blklen
- 1;
49 static unsigned int regcache_rbtree_get_register(
50 struct regcache_rbtree_node
*rbnode
, unsigned int idx
,
51 unsigned int word_size
)
53 return regcache_get_val(rbnode
->block
, idx
, word_size
);
56 static void regcache_rbtree_set_register(struct regcache_rbtree_node
*rbnode
,
57 unsigned int idx
, unsigned int val
,
58 unsigned int word_size
)
60 regcache_set_val(rbnode
->block
, idx
, val
, word_size
);
63 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
66 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
68 struct regcache_rbtree_node
*rbnode
;
69 unsigned int base_reg
, top_reg
;
71 rbnode
= rbtree_ctx
->cached_rbnode
;
73 regcache_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
74 if (reg
>= base_reg
&& reg
<= top_reg
)
78 node
= rbtree_ctx
->root
.rb_node
;
80 rbnode
= container_of(node
, struct regcache_rbtree_node
, node
);
81 regcache_rbtree_get_base_top_reg(rbnode
, &base_reg
, &top_reg
);
82 if (reg
>= base_reg
&& reg
<= top_reg
) {
83 rbtree_ctx
->cached_rbnode
= rbnode
;
85 } else if (reg
> top_reg
) {
86 node
= node
->rb_right
;
87 } else if (reg
< base_reg
) {
95 static int regcache_rbtree_insert(struct rb_root
*root
,
96 struct regcache_rbtree_node
*rbnode
)
98 struct rb_node
**new, *parent
;
99 struct regcache_rbtree_node
*rbnode_tmp
;
100 unsigned int base_reg_tmp
, top_reg_tmp
;
101 unsigned int base_reg
;
104 new = &root
->rb_node
;
106 rbnode_tmp
= container_of(*new, struct regcache_rbtree_node
,
108 /* base and top registers of the current rbnode */
109 regcache_rbtree_get_base_top_reg(rbnode_tmp
, &base_reg_tmp
,
111 /* base register of the rbnode to be added */
112 base_reg
= rbnode
->base_reg
;
114 /* if this register has already been inserted, just return */
115 if (base_reg
>= base_reg_tmp
&&
116 base_reg
<= top_reg_tmp
)
118 else if (base_reg
> top_reg_tmp
)
119 new = &((*new)->rb_right
);
120 else if (base_reg
< base_reg_tmp
)
121 new = &((*new)->rb_left
);
124 /* insert the node into the rbtree */
125 rb_link_node(&rbnode
->node
, parent
, new);
126 rb_insert_color(&rbnode
->node
, root
);
131 #ifdef CONFIG_DEBUG_FS
132 static int rbtree_show(struct seq_file
*s
, void *ignored
)
134 struct regmap
*map
= s
->private;
135 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
136 struct regcache_rbtree_node
*n
;
137 struct rb_node
*node
;
138 unsigned int base
, top
;
143 mutex_lock(&map
->lock
);
145 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
146 node
= rb_next(node
)) {
147 n
= container_of(node
, struct regcache_rbtree_node
, node
);
149 regcache_rbtree_get_base_top_reg(n
, &base
, &top
);
150 seq_printf(s
, "%x-%x (%d)\n", base
, top
, top
- base
+ 1);
153 registers
+= top
- base
+ 1;
157 average
= registers
/ nodes
;
161 seq_printf(s
, "%d nodes, %d registers, average %d registers\n",
162 nodes
, registers
, average
);
164 mutex_unlock(&map
->lock
);
169 static int rbtree_open(struct inode
*inode
, struct file
*file
)
171 return single_open(file
, rbtree_show
, inode
->i_private
);
174 static const struct file_operations rbtree_fops
= {
178 .release
= single_release
,
181 static void rbtree_debugfs_init(struct regmap
*map
)
183 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
186 static void rbtree_debugfs_init(struct regmap
*map
)
191 static int regcache_rbtree_init(struct regmap
*map
)
193 struct regcache_rbtree_ctx
*rbtree_ctx
;
197 map
->cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
201 rbtree_ctx
= map
->cache
;
202 rbtree_ctx
->root
= RB_ROOT
;
203 rbtree_ctx
->cached_rbnode
= NULL
;
205 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
206 ret
= regcache_rbtree_write(map
,
207 map
->reg_defaults
[i
].reg
,
208 map
->reg_defaults
[i
].def
);
213 rbtree_debugfs_init(map
);
218 regcache_rbtree_exit(map
);
222 static int regcache_rbtree_exit(struct regmap
*map
)
224 struct rb_node
*next
;
225 struct regcache_rbtree_ctx
*rbtree_ctx
;
226 struct regcache_rbtree_node
*rbtree_node
;
228 /* if we've already been called then just return */
229 rbtree_ctx
= map
->cache
;
233 /* free up the rbtree */
234 next
= rb_first(&rbtree_ctx
->root
);
236 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
237 next
= rb_next(&rbtree_node
->node
);
238 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
239 kfree(rbtree_node
->block
);
243 /* release the resources */
250 static int regcache_rbtree_read(struct regmap
*map
,
251 unsigned int reg
, unsigned int *value
)
253 struct regcache_rbtree_node
*rbnode
;
254 unsigned int reg_tmp
;
256 rbnode
= regcache_rbtree_lookup(map
, reg
);
258 reg_tmp
= reg
- rbnode
->base_reg
;
259 *value
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
260 map
->cache_word_size
);
269 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node
*rbnode
,
270 unsigned int pos
, unsigned int reg
,
271 unsigned int value
, unsigned int word_size
)
275 blk
= krealloc(rbnode
->block
,
276 (rbnode
->blklen
+ 1) * word_size
, GFP_KERNEL
);
280 /* insert the register value in the correct place in the rbnode block */
281 memmove(blk
+ (pos
+ 1) * word_size
,
282 blk
+ pos
* word_size
,
283 (rbnode
->blklen
- pos
) * word_size
);
285 /* update the rbnode block, its size and the base register */
289 rbnode
->base_reg
= reg
;
291 regcache_rbtree_set_register(rbnode
, pos
, value
, word_size
);
295 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
298 struct regcache_rbtree_ctx
*rbtree_ctx
;
299 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
300 struct rb_node
*node
;
302 unsigned int reg_tmp
;
307 rbtree_ctx
= map
->cache
;
308 /* if we can't locate it in the cached rbnode we'll have
309 * to traverse the rbtree looking for it.
311 rbnode
= regcache_rbtree_lookup(map
, reg
);
313 reg_tmp
= reg
- rbnode
->base_reg
;
314 val
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
315 map
->cache_word_size
);
318 regcache_rbtree_set_register(rbnode
, reg_tmp
, value
,
319 map
->cache_word_size
);
321 /* look for an adjacent register to the one we are about to add */
322 for (node
= rb_first(&rbtree_ctx
->root
); node
;
323 node
= rb_next(node
)) {
324 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
, node
);
325 for (i
= 0; i
< rbnode_tmp
->blklen
; i
++) {
326 reg_tmp
= rbnode_tmp
->base_reg
+ i
;
327 if (abs(reg_tmp
- reg
) != 1)
329 /* decide where in the block to place our register */
330 if (reg_tmp
+ 1 == reg
)
334 ret
= regcache_rbtree_insert_to_block(rbnode_tmp
, pos
,
336 map
->cache_word_size
);
339 rbtree_ctx
->cached_rbnode
= rbnode_tmp
;
343 /* we did not manage to find a place to insert it in an existing
344 * block so create a new rbnode with a single register in its block.
345 * This block will get populated further if any other adjacent
346 * registers get modified in the future.
348 rbnode
= kzalloc(sizeof *rbnode
, GFP_KERNEL
);
352 rbnode
->base_reg
= reg
;
353 rbnode
->block
= kmalloc(rbnode
->blklen
* map
->cache_word_size
,
355 if (!rbnode
->block
) {
359 regcache_rbtree_set_register(rbnode
, 0, value
, map
->cache_word_size
);
360 regcache_rbtree_insert(&rbtree_ctx
->root
, rbnode
);
361 rbtree_ctx
->cached_rbnode
= rbnode
;
367 static int regcache_rbtree_sync(struct regmap
*map
, unsigned int min
,
370 struct regcache_rbtree_ctx
*rbtree_ctx
;
371 struct rb_node
*node
;
372 struct regcache_rbtree_node
*rbnode
;
378 rbtree_ctx
= map
->cache
;
379 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
380 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
382 if (rbnode
->base_reg
< min
)
384 if (rbnode
->base_reg
> max
)
386 if (rbnode
->base_reg
+ rbnode
->blklen
< min
)
389 if (min
> rbnode
->base_reg
)
390 base
= min
- rbnode
->base_reg
;
394 if (max
< rbnode
->base_reg
+ rbnode
->blklen
)
395 end
= rbnode
->base_reg
+ rbnode
->blklen
- max
;
397 end
= rbnode
->blklen
;
399 for (i
= base
; i
< end
; i
++) {
400 regtmp
= rbnode
->base_reg
+ i
;
401 val
= regcache_rbtree_get_register(rbnode
, i
,
402 map
->cache_word_size
);
404 /* Is this the hardware default? If so skip. */
405 ret
= regcache_lookup_reg(map
, regtmp
);
406 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
)
409 map
->cache_bypass
= 1;
410 ret
= _regmap_write(map
, regtmp
, val
);
411 map
->cache_bypass
= 0;
414 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
422 struct regcache_ops regcache_rbtree_ops
= {
423 .type
= REGCACHE_RBTREE
,
425 .init
= regcache_rbtree_init
,
426 .exit
= regcache_rbtree_exit
,
427 .read
= regcache_rbtree_read
,
428 .write
= regcache_rbtree_write
,
429 .sync
= regcache_rbtree_sync