2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/debugfs.h>
14 #include <linux/device.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
21 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
23 static int regcache_rbtree_exit(struct regmap
*map
);
25 struct regcache_rbtree_node
{
26 /* block of adjacent registers */
28 /* Which registers are present */
30 /* base register handled by this block */
31 unsigned int base_reg
;
32 /* number of registers available in the block */
34 /* the actual rbtree node holding this block */
36 } __attribute__ ((packed
));
38 struct regcache_rbtree_ctx
{
40 struct regcache_rbtree_node
*cached_rbnode
;
43 static inline void regcache_rbtree_get_base_top_reg(
45 struct regcache_rbtree_node
*rbnode
,
46 unsigned int *base
, unsigned int *top
)
48 *base
= rbnode
->base_reg
;
49 *top
= rbnode
->base_reg
+ ((rbnode
->blklen
- 1) * map
->reg_stride
);
52 static unsigned int regcache_rbtree_get_register(struct regmap
*map
,
53 struct regcache_rbtree_node
*rbnode
, unsigned int idx
)
55 return regcache_get_val(map
, rbnode
->block
, idx
);
58 static void regcache_rbtree_set_register(struct regmap
*map
,
59 struct regcache_rbtree_node
*rbnode
,
60 unsigned int idx
, unsigned int val
)
62 set_bit(idx
, rbnode
->cache_present
);
63 regcache_set_val(map
, rbnode
->block
, idx
, val
);
66 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
69 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
71 struct regcache_rbtree_node
*rbnode
;
72 unsigned int base_reg
, top_reg
;
74 rbnode
= rbtree_ctx
->cached_rbnode
;
76 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
78 if (reg
>= base_reg
&& reg
<= top_reg
)
82 node
= rbtree_ctx
->root
.rb_node
;
84 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
85 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
87 if (reg
>= base_reg
&& reg
<= top_reg
) {
88 rbtree_ctx
->cached_rbnode
= rbnode
;
90 } else if (reg
> top_reg
) {
91 node
= node
->rb_right
;
92 } else if (reg
< base_reg
) {
100 static int regcache_rbtree_insert(struct regmap
*map
, struct rb_root
*root
,
101 struct regcache_rbtree_node
*rbnode
)
103 struct rb_node
**new, *parent
;
104 struct regcache_rbtree_node
*rbnode_tmp
;
105 unsigned int base_reg_tmp
, top_reg_tmp
;
106 unsigned int base_reg
;
109 new = &root
->rb_node
;
111 rbnode_tmp
= rb_entry(*new, struct regcache_rbtree_node
, node
);
112 /* base and top registers of the current rbnode */
113 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
, &base_reg_tmp
,
115 /* base register of the rbnode to be added */
116 base_reg
= rbnode
->base_reg
;
118 /* if this register has already been inserted, just return */
119 if (base_reg
>= base_reg_tmp
&&
120 base_reg
<= top_reg_tmp
)
122 else if (base_reg
> top_reg_tmp
)
123 new = &((*new)->rb_right
);
124 else if (base_reg
< base_reg_tmp
)
125 new = &((*new)->rb_left
);
128 /* insert the node into the rbtree */
129 rb_link_node(&rbnode
->node
, parent
, new);
130 rb_insert_color(&rbnode
->node
, root
);
135 #ifdef CONFIG_DEBUG_FS
136 static int rbtree_show(struct seq_file
*s
, void *ignored
)
138 struct regmap
*map
= s
->private;
139 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
140 struct regcache_rbtree_node
*n
;
141 struct rb_node
*node
;
142 unsigned int base
, top
;
146 int this_registers
, average
;
148 map
->lock(map
->lock_arg
);
150 mem_size
= sizeof(*rbtree_ctx
);
152 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
153 node
= rb_next(node
)) {
154 n
= rb_entry(node
, struct regcache_rbtree_node
, node
);
155 mem_size
+= sizeof(*n
);
156 mem_size
+= (n
->blklen
* map
->cache_word_size
);
157 mem_size
+= BITS_TO_LONGS(n
->blklen
) * sizeof(long);
159 regcache_rbtree_get_base_top_reg(map
, n
, &base
, &top
);
160 this_registers
= ((top
- base
) / map
->reg_stride
) + 1;
161 seq_printf(s
, "%x-%x (%d)\n", base
, top
, this_registers
);
164 registers
+= this_registers
;
168 average
= registers
/ nodes
;
172 seq_printf(s
, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
173 nodes
, registers
, average
, mem_size
);
175 map
->unlock(map
->lock_arg
);
180 static int rbtree_open(struct inode
*inode
, struct file
*file
)
182 return single_open(file
, rbtree_show
, inode
->i_private
);
185 static const struct file_operations rbtree_fops
= {
189 .release
= single_release
,
192 static void rbtree_debugfs_init(struct regmap
*map
)
194 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
198 static int regcache_rbtree_init(struct regmap
*map
)
200 struct regcache_rbtree_ctx
*rbtree_ctx
;
204 map
->cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
208 rbtree_ctx
= map
->cache
;
209 rbtree_ctx
->root
= RB_ROOT
;
210 rbtree_ctx
->cached_rbnode
= NULL
;
212 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
213 ret
= regcache_rbtree_write(map
,
214 map
->reg_defaults
[i
].reg
,
215 map
->reg_defaults
[i
].def
);
223 regcache_rbtree_exit(map
);
227 static int regcache_rbtree_exit(struct regmap
*map
)
229 struct rb_node
*next
;
230 struct regcache_rbtree_ctx
*rbtree_ctx
;
231 struct regcache_rbtree_node
*rbtree_node
;
233 /* if we've already been called then just return */
234 rbtree_ctx
= map
->cache
;
238 /* free up the rbtree */
239 next
= rb_first(&rbtree_ctx
->root
);
241 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
242 next
= rb_next(&rbtree_node
->node
);
243 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
244 kfree(rbtree_node
->cache_present
);
245 kfree(rbtree_node
->block
);
249 /* release the resources */
256 static int regcache_rbtree_read(struct regmap
*map
,
257 unsigned int reg
, unsigned int *value
)
259 struct regcache_rbtree_node
*rbnode
;
260 unsigned int reg_tmp
;
262 rbnode
= regcache_rbtree_lookup(map
, reg
);
264 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
265 if (!test_bit(reg_tmp
, rbnode
->cache_present
))
267 *value
= regcache_rbtree_get_register(map
, rbnode
, reg_tmp
);
276 static int regcache_rbtree_insert_to_block(struct regmap
*map
,
277 struct regcache_rbtree_node
*rbnode
,
278 unsigned int base_reg
,
279 unsigned int top_reg
,
284 unsigned int pos
, offset
;
285 unsigned long *present
;
288 blklen
= (top_reg
- base_reg
) / map
->reg_stride
+ 1;
289 pos
= (reg
- base_reg
) / map
->reg_stride
;
290 offset
= (rbnode
->base_reg
- base_reg
) / map
->reg_stride
;
292 blk
= krealloc(rbnode
->block
,
293 blklen
* map
->cache_word_size
,
298 if (BITS_TO_LONGS(blklen
) > BITS_TO_LONGS(rbnode
->blklen
)) {
299 present
= krealloc(rbnode
->cache_present
,
300 BITS_TO_LONGS(blklen
) * sizeof(*present
),
307 memset(present
+ BITS_TO_LONGS(rbnode
->blklen
), 0,
308 (BITS_TO_LONGS(blklen
) - BITS_TO_LONGS(rbnode
->blklen
))
311 present
= rbnode
->cache_present
;
314 /* insert the register value in the correct place in the rbnode block */
316 memmove(blk
+ offset
* map
->cache_word_size
,
317 blk
, rbnode
->blklen
* map
->cache_word_size
);
318 bitmap_shift_left(present
, present
, offset
, blklen
);
321 /* update the rbnode block, its size and the base register */
323 rbnode
->blklen
= blklen
;
324 rbnode
->base_reg
= base_reg
;
325 rbnode
->cache_present
= present
;
327 regcache_rbtree_set_register(map
, rbnode
, pos
, value
);
331 static struct regcache_rbtree_node
*
332 regcache_rbtree_node_alloc(struct regmap
*map
, unsigned int reg
)
334 struct regcache_rbtree_node
*rbnode
;
335 const struct regmap_range
*range
;
338 rbnode
= kzalloc(sizeof(*rbnode
), GFP_KERNEL
);
342 /* If there is a read table then use it to guess at an allocation */
344 for (i
= 0; i
< map
->rd_table
->n_yes_ranges
; i
++) {
345 if (regmap_reg_in_range(reg
,
346 &map
->rd_table
->yes_ranges
[i
]))
350 if (i
!= map
->rd_table
->n_yes_ranges
) {
351 range
= &map
->rd_table
->yes_ranges
[i
];
352 rbnode
->blklen
= (range
->range_max
- range
->range_min
) /
354 rbnode
->base_reg
= range
->range_min
;
358 if (!rbnode
->blklen
) {
360 rbnode
->base_reg
= reg
;
363 rbnode
->block
= kmalloc_array(rbnode
->blklen
, map
->cache_word_size
,
368 rbnode
->cache_present
= kcalloc(BITS_TO_LONGS(rbnode
->blklen
),
369 sizeof(*rbnode
->cache_present
),
371 if (!rbnode
->cache_present
)
377 kfree(rbnode
->block
);
383 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
386 struct regcache_rbtree_ctx
*rbtree_ctx
;
387 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
388 struct rb_node
*node
;
389 unsigned int reg_tmp
;
392 rbtree_ctx
= map
->cache
;
394 /* if we can't locate it in the cached rbnode we'll have
395 * to traverse the rbtree looking for it.
397 rbnode
= regcache_rbtree_lookup(map
, reg
);
399 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
400 regcache_rbtree_set_register(map
, rbnode
, reg_tmp
, value
);
402 unsigned int base_reg
, top_reg
;
403 unsigned int new_base_reg
, new_top_reg
;
404 unsigned int min
, max
;
405 unsigned int max_dist
;
406 unsigned int dist
, best_dist
= UINT_MAX
;
408 max_dist
= map
->reg_stride
* sizeof(*rbnode_tmp
) /
409 map
->cache_word_size
;
413 min
= reg
- max_dist
;
414 max
= reg
+ max_dist
;
416 /* look for an adjacent register to the one we are about to add */
417 node
= rbtree_ctx
->root
.rb_node
;
419 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
,
422 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
,
423 &base_reg
, &top_reg
);
425 if (base_reg
<= max
&& top_reg
>= min
) {
427 dist
= base_reg
- reg
;
428 else if (reg
> top_reg
)
429 dist
= reg
- top_reg
;
432 if (dist
< best_dist
) {
435 new_base_reg
= min(reg
, base_reg
);
436 new_top_reg
= max(reg
, top_reg
);
441 * Keep looking, we want to choose the closest block,
442 * otherwise we might end up creating overlapping
443 * blocks, which breaks the rbtree.
446 node
= node
->rb_left
;
447 else if (reg
> top_reg
)
448 node
= node
->rb_right
;
454 ret
= regcache_rbtree_insert_to_block(map
, rbnode
,
460 rbtree_ctx
->cached_rbnode
= rbnode
;
464 /* We did not manage to find a place to insert it in
465 * an existing block so create a new rbnode.
467 rbnode
= regcache_rbtree_node_alloc(map
, reg
);
470 regcache_rbtree_set_register(map
, rbnode
,
471 reg
- rbnode
->base_reg
, value
);
472 regcache_rbtree_insert(map
, &rbtree_ctx
->root
, rbnode
);
473 rbtree_ctx
->cached_rbnode
= rbnode
;
479 static int regcache_rbtree_sync(struct regmap
*map
, unsigned int min
,
482 struct regcache_rbtree_ctx
*rbtree_ctx
;
483 struct rb_node
*node
;
484 struct regcache_rbtree_node
*rbnode
;
485 unsigned int base_reg
, top_reg
;
486 unsigned int start
, end
;
489 rbtree_ctx
= map
->cache
;
490 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
491 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
493 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
501 start
= (min
- base_reg
) / map
->reg_stride
;
506 end
= (max
- base_reg
) / map
->reg_stride
+ 1;
508 end
= rbnode
->blklen
;
510 ret
= regcache_sync_block(map
, rbnode
->block
,
511 rbnode
->cache_present
,
512 rbnode
->base_reg
, start
, end
);
517 return regmap_async_complete(map
);
520 static int regcache_rbtree_drop(struct regmap
*map
, unsigned int min
,
523 struct regcache_rbtree_ctx
*rbtree_ctx
;
524 struct regcache_rbtree_node
*rbnode
;
525 struct rb_node
*node
;
526 unsigned int base_reg
, top_reg
;
527 unsigned int start
, end
;
529 rbtree_ctx
= map
->cache
;
530 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
531 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
533 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
541 start
= (min
- base_reg
) / map
->reg_stride
;
546 end
= (max
- base_reg
) / map
->reg_stride
+ 1;
548 end
= rbnode
->blklen
;
550 bitmap_clear(rbnode
->cache_present
, start
, end
- start
);
556 struct regcache_ops regcache_rbtree_ops
= {
557 .type
= REGCACHE_RBTREE
,
559 .init
= regcache_rbtree_init
,
560 .exit
= regcache_rbtree_exit
,
561 #ifdef CONFIG_DEBUG_FS
562 .debugfs_init
= rbtree_debugfs_init
,
564 .read
= regcache_rbtree_read
,
565 .write
= regcache_rbtree_write
,
566 .sync
= regcache_rbtree_sync
,
567 .drop
= regcache_rbtree_drop
,