2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/debugfs.h>
16 #include <linux/rbtree.h>
17 #include <linux/seq_file.h>
21 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
23 static int regcache_rbtree_exit(struct regmap
*map
);
25 struct regcache_rbtree_node
{
26 /* the actual rbtree node holding this block */
28 /* base register handled by this block */
29 unsigned int base_reg
;
30 /* block of adjacent registers */
32 /* number of registers available in the block */
34 } __attribute__ ((packed
));
36 struct regcache_rbtree_ctx
{
38 struct regcache_rbtree_node
*cached_rbnode
;
41 static inline void regcache_rbtree_get_base_top_reg(
43 struct regcache_rbtree_node
*rbnode
,
44 unsigned int *base
, unsigned int *top
)
46 *base
= rbnode
->base_reg
;
47 *top
= rbnode
->base_reg
+ ((rbnode
->blklen
- 1) * map
->reg_stride
);
50 static unsigned int regcache_rbtree_get_register(
51 struct regcache_rbtree_node
*rbnode
, unsigned int idx
,
52 unsigned int word_size
)
54 return regcache_get_val(rbnode
->block
, idx
, word_size
);
57 static void regcache_rbtree_set_register(struct regcache_rbtree_node
*rbnode
,
58 unsigned int idx
, unsigned int val
,
59 unsigned int word_size
)
61 regcache_set_val(rbnode
->block
, idx
, val
, word_size
);
64 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
67 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
69 struct regcache_rbtree_node
*rbnode
;
70 unsigned int base_reg
, top_reg
;
72 rbnode
= rbtree_ctx
->cached_rbnode
;
74 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
76 if (reg
>= base_reg
&& reg
<= top_reg
)
80 node
= rbtree_ctx
->root
.rb_node
;
82 rbnode
= container_of(node
, struct regcache_rbtree_node
, node
);
83 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
85 if (reg
>= base_reg
&& reg
<= top_reg
) {
86 rbtree_ctx
->cached_rbnode
= rbnode
;
88 } else if (reg
> top_reg
) {
89 node
= node
->rb_right
;
90 } else if (reg
< base_reg
) {
98 static int regcache_rbtree_insert(struct regmap
*map
, struct rb_root
*root
,
99 struct regcache_rbtree_node
*rbnode
)
101 struct rb_node
**new, *parent
;
102 struct regcache_rbtree_node
*rbnode_tmp
;
103 unsigned int base_reg_tmp
, top_reg_tmp
;
104 unsigned int base_reg
;
107 new = &root
->rb_node
;
109 rbnode_tmp
= container_of(*new, struct regcache_rbtree_node
,
111 /* base and top registers of the current rbnode */
112 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
, &base_reg_tmp
,
114 /* base register of the rbnode to be added */
115 base_reg
= rbnode
->base_reg
;
117 /* if this register has already been inserted, just return */
118 if (base_reg
>= base_reg_tmp
&&
119 base_reg
<= top_reg_tmp
)
121 else if (base_reg
> top_reg_tmp
)
122 new = &((*new)->rb_right
);
123 else if (base_reg
< base_reg_tmp
)
124 new = &((*new)->rb_left
);
127 /* insert the node into the rbtree */
128 rb_link_node(&rbnode
->node
, parent
, new);
129 rb_insert_color(&rbnode
->node
, root
);
134 #ifdef CONFIG_DEBUG_FS
135 static int rbtree_show(struct seq_file
*s
, void *ignored
)
137 struct regmap
*map
= s
->private;
138 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
139 struct regcache_rbtree_node
*n
;
140 struct rb_node
*node
;
141 unsigned int base
, top
;
144 int this_registers
, average
;
148 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
149 node
= rb_next(node
)) {
150 n
= container_of(node
, struct regcache_rbtree_node
, node
);
152 regcache_rbtree_get_base_top_reg(map
, n
, &base
, &top
);
153 this_registers
= ((top
- base
) / map
->reg_stride
) + 1;
154 seq_printf(s
, "%x-%x (%d)\n", base
, top
, this_registers
);
157 registers
+= this_registers
;
161 average
= registers
/ nodes
;
165 seq_printf(s
, "%d nodes, %d registers, average %d registers\n",
166 nodes
, registers
, average
);
173 static int rbtree_open(struct inode
*inode
, struct file
*file
)
175 return single_open(file
, rbtree_show
, inode
->i_private
);
178 static const struct file_operations rbtree_fops
= {
182 .release
= single_release
,
185 static void rbtree_debugfs_init(struct regmap
*map
)
187 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
190 static void rbtree_debugfs_init(struct regmap
*map
)
195 static int regcache_rbtree_init(struct regmap
*map
)
197 struct regcache_rbtree_ctx
*rbtree_ctx
;
201 map
->cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
205 rbtree_ctx
= map
->cache
;
206 rbtree_ctx
->root
= RB_ROOT
;
207 rbtree_ctx
->cached_rbnode
= NULL
;
209 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
210 ret
= regcache_rbtree_write(map
,
211 map
->reg_defaults
[i
].reg
,
212 map
->reg_defaults
[i
].def
);
217 rbtree_debugfs_init(map
);
222 regcache_rbtree_exit(map
);
226 static int regcache_rbtree_exit(struct regmap
*map
)
228 struct rb_node
*next
;
229 struct regcache_rbtree_ctx
*rbtree_ctx
;
230 struct regcache_rbtree_node
*rbtree_node
;
232 /* if we've already been called then just return */
233 rbtree_ctx
= map
->cache
;
237 /* free up the rbtree */
238 next
= rb_first(&rbtree_ctx
->root
);
240 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
241 next
= rb_next(&rbtree_node
->node
);
242 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
243 kfree(rbtree_node
->block
);
247 /* release the resources */
254 static int regcache_rbtree_read(struct regmap
*map
,
255 unsigned int reg
, unsigned int *value
)
257 struct regcache_rbtree_node
*rbnode
;
258 unsigned int reg_tmp
;
260 rbnode
= regcache_rbtree_lookup(map
, reg
);
262 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
263 *value
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
264 map
->cache_word_size
);
273 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node
*rbnode
,
274 unsigned int pos
, unsigned int reg
,
275 unsigned int value
, unsigned int word_size
)
279 blk
= krealloc(rbnode
->block
,
280 (rbnode
->blklen
+ 1) * word_size
, GFP_KERNEL
);
284 /* insert the register value in the correct place in the rbnode block */
285 memmove(blk
+ (pos
+ 1) * word_size
,
286 blk
+ pos
* word_size
,
287 (rbnode
->blklen
- pos
) * word_size
);
289 /* update the rbnode block, its size and the base register */
293 rbnode
->base_reg
= reg
;
295 regcache_rbtree_set_register(rbnode
, pos
, value
, word_size
);
299 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
302 struct regcache_rbtree_ctx
*rbtree_ctx
;
303 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
304 struct rb_node
*node
;
306 unsigned int reg_tmp
;
311 rbtree_ctx
= map
->cache
;
312 /* if we can't locate it in the cached rbnode we'll have
313 * to traverse the rbtree looking for it.
315 rbnode
= regcache_rbtree_lookup(map
, reg
);
317 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
318 val
= regcache_rbtree_get_register(rbnode
, reg_tmp
,
319 map
->cache_word_size
);
322 regcache_rbtree_set_register(rbnode
, reg_tmp
, value
,
323 map
->cache_word_size
);
325 /* look for an adjacent register to the one we are about to add */
326 for (node
= rb_first(&rbtree_ctx
->root
); node
;
327 node
= rb_next(node
)) {
328 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
,
330 for (i
= 0; i
< rbnode_tmp
->blklen
; i
++) {
331 reg_tmp
= rbnode_tmp
->base_reg
+
332 (i
* map
->reg_stride
);
333 if (abs(reg_tmp
- reg
) != map
->reg_stride
)
335 /* decide where in the block to place our register */
336 if (reg_tmp
+ map
->reg_stride
== reg
)
340 ret
= regcache_rbtree_insert_to_block(rbnode_tmp
, pos
,
342 map
->cache_word_size
);
345 rbtree_ctx
->cached_rbnode
= rbnode_tmp
;
349 /* we did not manage to find a place to insert it in an existing
350 * block so create a new rbnode with a single register in its block.
351 * This block will get populated further if any other adjacent
352 * registers get modified in the future.
354 rbnode
= kzalloc(sizeof *rbnode
, GFP_KERNEL
);
358 rbnode
->base_reg
= reg
;
359 rbnode
->block
= kmalloc(rbnode
->blklen
* map
->cache_word_size
,
361 if (!rbnode
->block
) {
365 regcache_rbtree_set_register(rbnode
, 0, value
, map
->cache_word_size
);
366 regcache_rbtree_insert(map
, &rbtree_ctx
->root
, rbnode
);
367 rbtree_ctx
->cached_rbnode
= rbnode
;
373 static int regcache_rbtree_sync(struct regmap
*map
, unsigned int min
,
376 struct regcache_rbtree_ctx
*rbtree_ctx
;
377 struct rb_node
*node
;
378 struct regcache_rbtree_node
*rbnode
;
384 rbtree_ctx
= map
->cache
;
385 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
386 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
388 if (rbnode
->base_reg
< min
)
390 if (rbnode
->base_reg
> max
)
392 if (rbnode
->base_reg
+ rbnode
->blklen
< min
)
395 if (min
> rbnode
->base_reg
)
396 base
= min
- rbnode
->base_reg
;
400 if (max
< rbnode
->base_reg
+ rbnode
->blklen
)
401 end
= rbnode
->base_reg
+ rbnode
->blklen
- max
;
403 end
= rbnode
->blklen
;
405 for (i
= base
; i
< end
; i
++) {
406 regtmp
= rbnode
->base_reg
+ (i
* map
->reg_stride
);
407 val
= regcache_rbtree_get_register(rbnode
, i
,
408 map
->cache_word_size
);
410 /* Is this the hardware default? If so skip. */
411 ret
= regcache_lookup_reg(map
, regtmp
);
412 if (ret
>= 0 && val
== map
->reg_defaults
[ret
].def
)
415 map
->cache_bypass
= 1;
416 ret
= _regmap_write(map
, regtmp
, val
);
417 map
->cache_bypass
= 0;
420 dev_dbg(map
->dev
, "Synced register %#x, value %#x\n",
428 struct regcache_ops regcache_rbtree_ops
= {
429 .type
= REGCACHE_RBTREE
,
431 .init
= regcache_rbtree_init
,
432 .exit
= regcache_rbtree_exit
,
433 .read
= regcache_rbtree_read
,
434 .write
= regcache_rbtree_write
,
435 .sync
= regcache_rbtree_sync