1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - rbtree caching support
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/rbtree.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
17 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
19 static int regcache_rbtree_exit(struct regmap
*map
);
21 struct regcache_rbtree_node
{
22 /* block of adjacent registers */
24 /* Which registers are present */
25 unsigned long *cache_present
;
26 /* base register handled by this block */
27 unsigned int base_reg
;
28 /* number of registers available in the block */
30 /* the actual rbtree node holding this block */
34 struct regcache_rbtree_ctx
{
36 struct regcache_rbtree_node
*cached_rbnode
;
39 static inline void regcache_rbtree_get_base_top_reg(
41 struct regcache_rbtree_node
*rbnode
,
42 unsigned int *base
, unsigned int *top
)
44 *base
= rbnode
->base_reg
;
45 *top
= rbnode
->base_reg
+ ((rbnode
->blklen
- 1) * map
->reg_stride
);
48 static unsigned int regcache_rbtree_get_register(struct regmap
*map
,
49 struct regcache_rbtree_node
*rbnode
, unsigned int idx
)
51 return regcache_get_val(map
, rbnode
->block
, idx
);
54 static void regcache_rbtree_set_register(struct regmap
*map
,
55 struct regcache_rbtree_node
*rbnode
,
56 unsigned int idx
, unsigned int val
)
58 set_bit(idx
, rbnode
->cache_present
);
59 regcache_set_val(map
, rbnode
->block
, idx
, val
);
62 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
65 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
67 struct regcache_rbtree_node
*rbnode
;
68 unsigned int base_reg
, top_reg
;
70 rbnode
= rbtree_ctx
->cached_rbnode
;
72 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
74 if (reg
>= base_reg
&& reg
<= top_reg
)
78 node
= rbtree_ctx
->root
.rb_node
;
80 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
81 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
83 if (reg
>= base_reg
&& reg
<= top_reg
) {
84 rbtree_ctx
->cached_rbnode
= rbnode
;
86 } else if (reg
> top_reg
) {
87 node
= node
->rb_right
;
88 } else if (reg
< base_reg
) {
96 static int regcache_rbtree_insert(struct regmap
*map
, struct rb_root
*root
,
97 struct regcache_rbtree_node
*rbnode
)
99 struct rb_node
**new, *parent
;
100 struct regcache_rbtree_node
*rbnode_tmp
;
101 unsigned int base_reg_tmp
, top_reg_tmp
;
102 unsigned int base_reg
;
105 new = &root
->rb_node
;
107 rbnode_tmp
= rb_entry(*new, struct regcache_rbtree_node
, node
);
108 /* base and top registers of the current rbnode */
109 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
, &base_reg_tmp
,
111 /* base register of the rbnode to be added */
112 base_reg
= rbnode
->base_reg
;
114 /* if this register has already been inserted, just return */
115 if (base_reg
>= base_reg_tmp
&&
116 base_reg
<= top_reg_tmp
)
118 else if (base_reg
> top_reg_tmp
)
119 new = &((*new)->rb_right
);
120 else if (base_reg
< base_reg_tmp
)
121 new = &((*new)->rb_left
);
124 /* insert the node into the rbtree */
125 rb_link_node(&rbnode
->node
, parent
, new);
126 rb_insert_color(&rbnode
->node
, root
);
131 #ifdef CONFIG_DEBUG_FS
132 static int rbtree_show(struct seq_file
*s
, void *ignored
)
134 struct regmap
*map
= s
->private;
135 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
136 struct regcache_rbtree_node
*n
;
137 struct rb_node
*node
;
138 unsigned int base
, top
;
142 int this_registers
, average
;
144 map
->lock(map
->lock_arg
);
146 mem_size
= sizeof(*rbtree_ctx
);
148 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
149 node
= rb_next(node
)) {
150 n
= rb_entry(node
, struct regcache_rbtree_node
, node
);
151 mem_size
+= sizeof(*n
);
152 mem_size
+= (n
->blklen
* map
->cache_word_size
);
153 mem_size
+= BITS_TO_LONGS(n
->blklen
) * sizeof(long);
155 regcache_rbtree_get_base_top_reg(map
, n
, &base
, &top
);
156 this_registers
= ((top
- base
) / map
->reg_stride
) + 1;
157 seq_printf(s
, "%x-%x (%d)\n", base
, top
, this_registers
);
160 registers
+= this_registers
;
164 average
= registers
/ nodes
;
168 seq_printf(s
, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
169 nodes
, registers
, average
, mem_size
);
171 map
->unlock(map
->lock_arg
);
176 DEFINE_SHOW_ATTRIBUTE(rbtree
);
178 static void rbtree_debugfs_init(struct regmap
*map
)
180 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
184 static int regcache_rbtree_init(struct regmap
*map
)
186 struct regcache_rbtree_ctx
*rbtree_ctx
;
190 map
->cache
= kmalloc(sizeof *rbtree_ctx
, map
->alloc_flags
);
194 rbtree_ctx
= map
->cache
;
195 rbtree_ctx
->root
= RB_ROOT
;
196 rbtree_ctx
->cached_rbnode
= NULL
;
198 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
199 ret
= regcache_rbtree_write(map
,
200 map
->reg_defaults
[i
].reg
,
201 map
->reg_defaults
[i
].def
);
209 regcache_rbtree_exit(map
);
213 static int regcache_rbtree_exit(struct regmap
*map
)
215 struct rb_node
*next
;
216 struct regcache_rbtree_ctx
*rbtree_ctx
;
217 struct regcache_rbtree_node
*rbtree_node
;
219 /* if we've already been called then just return */
220 rbtree_ctx
= map
->cache
;
224 /* free up the rbtree */
225 next
= rb_first(&rbtree_ctx
->root
);
227 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
228 next
= rb_next(&rbtree_node
->node
);
229 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
230 kfree(rbtree_node
->cache_present
);
231 kfree(rbtree_node
->block
);
235 /* release the resources */
242 static int regcache_rbtree_read(struct regmap
*map
,
243 unsigned int reg
, unsigned int *value
)
245 struct regcache_rbtree_node
*rbnode
;
246 unsigned int reg_tmp
;
248 rbnode
= regcache_rbtree_lookup(map
, reg
);
250 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
251 if (!test_bit(reg_tmp
, rbnode
->cache_present
))
253 *value
= regcache_rbtree_get_register(map
, rbnode
, reg_tmp
);
262 static int regcache_rbtree_insert_to_block(struct regmap
*map
,
263 struct regcache_rbtree_node
*rbnode
,
264 unsigned int base_reg
,
265 unsigned int top_reg
,
270 unsigned int pos
, offset
;
271 unsigned long *present
;
274 blklen
= (top_reg
- base_reg
) / map
->reg_stride
+ 1;
275 pos
= (reg
- base_reg
) / map
->reg_stride
;
276 offset
= (rbnode
->base_reg
- base_reg
) / map
->reg_stride
;
278 blk
= krealloc(rbnode
->block
,
279 blklen
* map
->cache_word_size
,
286 if (BITS_TO_LONGS(blklen
) > BITS_TO_LONGS(rbnode
->blklen
)) {
287 present
= krealloc(rbnode
->cache_present
,
288 BITS_TO_LONGS(blklen
) * sizeof(*present
),
293 memset(present
+ BITS_TO_LONGS(rbnode
->blklen
), 0,
294 (BITS_TO_LONGS(blklen
) - BITS_TO_LONGS(rbnode
->blklen
))
297 present
= rbnode
->cache_present
;
300 /* insert the register value in the correct place in the rbnode block */
302 memmove(blk
+ offset
* map
->cache_word_size
,
303 blk
, rbnode
->blklen
* map
->cache_word_size
);
304 bitmap_shift_left(present
, present
, offset
, blklen
);
307 /* update the rbnode block, its size and the base register */
308 rbnode
->blklen
= blklen
;
309 rbnode
->base_reg
= base_reg
;
310 rbnode
->cache_present
= present
;
312 regcache_rbtree_set_register(map
, rbnode
, pos
, value
);
316 static struct regcache_rbtree_node
*
317 regcache_rbtree_node_alloc(struct regmap
*map
, unsigned int reg
)
319 struct regcache_rbtree_node
*rbnode
;
320 const struct regmap_range
*range
;
323 rbnode
= kzalloc(sizeof(*rbnode
), map
->alloc_flags
);
327 /* If there is a read table then use it to guess at an allocation */
329 for (i
= 0; i
< map
->rd_table
->n_yes_ranges
; i
++) {
330 if (regmap_reg_in_range(reg
,
331 &map
->rd_table
->yes_ranges
[i
]))
335 if (i
!= map
->rd_table
->n_yes_ranges
) {
336 range
= &map
->rd_table
->yes_ranges
[i
];
337 rbnode
->blklen
= (range
->range_max
- range
->range_min
) /
339 rbnode
->base_reg
= range
->range_min
;
343 if (!rbnode
->blklen
) {
345 rbnode
->base_reg
= reg
;
348 rbnode
->block
= kmalloc_array(rbnode
->blklen
, map
->cache_word_size
,
353 rbnode
->cache_present
= kcalloc(BITS_TO_LONGS(rbnode
->blklen
),
354 sizeof(*rbnode
->cache_present
),
356 if (!rbnode
->cache_present
)
362 kfree(rbnode
->block
);
368 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
371 struct regcache_rbtree_ctx
*rbtree_ctx
;
372 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
373 struct rb_node
*node
;
374 unsigned int reg_tmp
;
377 rbtree_ctx
= map
->cache
;
379 /* if we can't locate it in the cached rbnode we'll have
380 * to traverse the rbtree looking for it.
382 rbnode
= regcache_rbtree_lookup(map
, reg
);
384 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
385 regcache_rbtree_set_register(map
, rbnode
, reg_tmp
, value
);
387 unsigned int base_reg
, top_reg
;
388 unsigned int new_base_reg
, new_top_reg
;
389 unsigned int min
, max
;
390 unsigned int max_dist
;
391 unsigned int dist
, best_dist
= UINT_MAX
;
393 max_dist
= map
->reg_stride
* sizeof(*rbnode_tmp
) /
394 map
->cache_word_size
;
398 min
= reg
- max_dist
;
399 max
= reg
+ max_dist
;
401 /* look for an adjacent register to the one we are about to add */
402 node
= rbtree_ctx
->root
.rb_node
;
404 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
,
407 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
,
408 &base_reg
, &top_reg
);
410 if (base_reg
<= max
&& top_reg
>= min
) {
412 dist
= base_reg
- reg
;
413 else if (reg
> top_reg
)
414 dist
= reg
- top_reg
;
417 if (dist
< best_dist
) {
420 new_base_reg
= min(reg
, base_reg
);
421 new_top_reg
= max(reg
, top_reg
);
426 * Keep looking, we want to choose the closest block,
427 * otherwise we might end up creating overlapping
428 * blocks, which breaks the rbtree.
431 node
= node
->rb_left
;
432 else if (reg
> top_reg
)
433 node
= node
->rb_right
;
439 ret
= regcache_rbtree_insert_to_block(map
, rbnode
,
445 rbtree_ctx
->cached_rbnode
= rbnode
;
449 /* We did not manage to find a place to insert it in
450 * an existing block so create a new rbnode.
452 rbnode
= regcache_rbtree_node_alloc(map
, reg
);
455 regcache_rbtree_set_register(map
, rbnode
,
456 (reg
- rbnode
->base_reg
) / map
->reg_stride
,
458 regcache_rbtree_insert(map
, &rbtree_ctx
->root
, rbnode
);
459 rbtree_ctx
->cached_rbnode
= rbnode
;
465 static int regcache_rbtree_sync(struct regmap
*map
, unsigned int min
,
468 struct regcache_rbtree_ctx
*rbtree_ctx
;
469 struct rb_node
*node
;
470 struct regcache_rbtree_node
*rbnode
;
471 unsigned int base_reg
, top_reg
;
472 unsigned int start
, end
;
477 rbtree_ctx
= map
->cache
;
478 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
479 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
481 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
489 start
= (min
- base_reg
) / map
->reg_stride
;
494 end
= (max
- base_reg
) / map
->reg_stride
+ 1;
496 end
= rbnode
->blklen
;
498 ret
= regcache_sync_block(map
, rbnode
->block
,
499 rbnode
->cache_present
,
500 rbnode
->base_reg
, start
, end
);
507 return regmap_async_complete(map
);
510 static int regcache_rbtree_drop(struct regmap
*map
, unsigned int min
,
513 struct regcache_rbtree_ctx
*rbtree_ctx
;
514 struct regcache_rbtree_node
*rbnode
;
515 struct rb_node
*node
;
516 unsigned int base_reg
, top_reg
;
517 unsigned int start
, end
;
519 rbtree_ctx
= map
->cache
;
520 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
521 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
523 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
531 start
= (min
- base_reg
) / map
->reg_stride
;
536 end
= (max
- base_reg
) / map
->reg_stride
+ 1;
538 end
= rbnode
->blklen
;
540 bitmap_clear(rbnode
->cache_present
, start
, end
- start
);
546 struct regcache_ops regcache_rbtree_ops
= {
547 .type
= REGCACHE_RBTREE
,
549 .init
= regcache_rbtree_init
,
550 .exit
= regcache_rbtree_exit
,
551 #ifdef CONFIG_DEBUG_FS
552 .debugfs_init
= rbtree_debugfs_init
,
554 .read
= regcache_rbtree_read
,
555 .write
= regcache_rbtree_write
,
556 .sync
= regcache_rbtree_sync
,
557 .drop
= regcache_rbtree_drop
,