1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - rbtree caching support
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/rbtree.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
17 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
19 static int regcache_rbtree_exit(struct regmap
*map
);
21 struct regcache_rbtree_node
{
22 /* block of adjacent registers */
24 /* Which registers are present */
26 /* base register handled by this block */
27 unsigned int base_reg
;
28 /* number of registers available in the block */
30 /* the actual rbtree node holding this block */
34 struct regcache_rbtree_ctx
{
36 struct regcache_rbtree_node
*cached_rbnode
;
39 static inline void regcache_rbtree_get_base_top_reg(
41 struct regcache_rbtree_node
*rbnode
,
42 unsigned int *base
, unsigned int *top
)
44 *base
= rbnode
->base_reg
;
45 *top
= rbnode
->base_reg
+ ((rbnode
->blklen
- 1) * map
->reg_stride
);
48 static unsigned int regcache_rbtree_get_register(struct regmap
*map
,
49 struct regcache_rbtree_node
*rbnode
, unsigned int idx
)
51 return regcache_get_val(map
, rbnode
->block
, idx
);
54 static void regcache_rbtree_set_register(struct regmap
*map
,
55 struct regcache_rbtree_node
*rbnode
,
56 unsigned int idx
, unsigned int val
)
58 set_bit(idx
, rbnode
->cache_present
);
59 regcache_set_val(map
, rbnode
->block
, idx
, val
);
62 static struct regcache_rbtree_node
*regcache_rbtree_lookup(struct regmap
*map
,
65 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
67 struct regcache_rbtree_node
*rbnode
;
68 unsigned int base_reg
, top_reg
;
70 rbnode
= rbtree_ctx
->cached_rbnode
;
72 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
74 if (reg
>= base_reg
&& reg
<= top_reg
)
78 node
= rbtree_ctx
->root
.rb_node
;
80 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
81 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
83 if (reg
>= base_reg
&& reg
<= top_reg
) {
84 rbtree_ctx
->cached_rbnode
= rbnode
;
86 } else if (reg
> top_reg
) {
87 node
= node
->rb_right
;
88 } else if (reg
< base_reg
) {
96 static int regcache_rbtree_insert(struct regmap
*map
, struct rb_root
*root
,
97 struct regcache_rbtree_node
*rbnode
)
99 struct rb_node
**new, *parent
;
100 struct regcache_rbtree_node
*rbnode_tmp
;
101 unsigned int base_reg_tmp
, top_reg_tmp
;
102 unsigned int base_reg
;
105 new = &root
->rb_node
;
107 rbnode_tmp
= rb_entry(*new, struct regcache_rbtree_node
, node
);
108 /* base and top registers of the current rbnode */
109 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
, &base_reg_tmp
,
111 /* base register of the rbnode to be added */
112 base_reg
= rbnode
->base_reg
;
114 /* if this register has already been inserted, just return */
115 if (base_reg
>= base_reg_tmp
&&
116 base_reg
<= top_reg_tmp
)
118 else if (base_reg
> top_reg_tmp
)
119 new = &((*new)->rb_right
);
120 else if (base_reg
< base_reg_tmp
)
121 new = &((*new)->rb_left
);
124 /* insert the node into the rbtree */
125 rb_link_node(&rbnode
->node
, parent
, new);
126 rb_insert_color(&rbnode
->node
, root
);
131 #ifdef CONFIG_DEBUG_FS
132 static int rbtree_show(struct seq_file
*s
, void *ignored
)
134 struct regmap
*map
= s
->private;
135 struct regcache_rbtree_ctx
*rbtree_ctx
= map
->cache
;
136 struct regcache_rbtree_node
*n
;
137 struct rb_node
*node
;
138 unsigned int base
, top
;
142 int this_registers
, average
;
144 map
->lock(map
->lock_arg
);
146 mem_size
= sizeof(*rbtree_ctx
);
148 for (node
= rb_first(&rbtree_ctx
->root
); node
!= NULL
;
149 node
= rb_next(node
)) {
150 n
= rb_entry(node
, struct regcache_rbtree_node
, node
);
151 mem_size
+= sizeof(*n
);
152 mem_size
+= (n
->blklen
* map
->cache_word_size
);
153 mem_size
+= BITS_TO_LONGS(n
->blklen
) * sizeof(long);
155 regcache_rbtree_get_base_top_reg(map
, n
, &base
, &top
);
156 this_registers
= ((top
- base
) / map
->reg_stride
) + 1;
157 seq_printf(s
, "%x-%x (%d)\n", base
, top
, this_registers
);
160 registers
+= this_registers
;
164 average
= registers
/ nodes
;
168 seq_printf(s
, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
169 nodes
, registers
, average
, mem_size
);
171 map
->unlock(map
->lock_arg
);
176 DEFINE_SHOW_ATTRIBUTE(rbtree
);
178 static void rbtree_debugfs_init(struct regmap
*map
)
180 debugfs_create_file("rbtree", 0400, map
->debugfs
, map
, &rbtree_fops
);
184 static int regcache_rbtree_init(struct regmap
*map
)
186 struct regcache_rbtree_ctx
*rbtree_ctx
;
190 map
->cache
= kmalloc(sizeof *rbtree_ctx
, GFP_KERNEL
);
194 rbtree_ctx
= map
->cache
;
195 rbtree_ctx
->root
= RB_ROOT
;
196 rbtree_ctx
->cached_rbnode
= NULL
;
198 for (i
= 0; i
< map
->num_reg_defaults
; i
++) {
199 ret
= regcache_rbtree_write(map
,
200 map
->reg_defaults
[i
].reg
,
201 map
->reg_defaults
[i
].def
);
209 regcache_rbtree_exit(map
);
213 static int regcache_rbtree_exit(struct regmap
*map
)
215 struct rb_node
*next
;
216 struct regcache_rbtree_ctx
*rbtree_ctx
;
217 struct regcache_rbtree_node
*rbtree_node
;
219 /* if we've already been called then just return */
220 rbtree_ctx
= map
->cache
;
224 /* free up the rbtree */
225 next
= rb_first(&rbtree_ctx
->root
);
227 rbtree_node
= rb_entry(next
, struct regcache_rbtree_node
, node
);
228 next
= rb_next(&rbtree_node
->node
);
229 rb_erase(&rbtree_node
->node
, &rbtree_ctx
->root
);
230 kfree(rbtree_node
->cache_present
);
231 kfree(rbtree_node
->block
);
235 /* release the resources */
242 static int regcache_rbtree_read(struct regmap
*map
,
243 unsigned int reg
, unsigned int *value
)
245 struct regcache_rbtree_node
*rbnode
;
246 unsigned int reg_tmp
;
248 rbnode
= regcache_rbtree_lookup(map
, reg
);
250 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
251 if (!test_bit(reg_tmp
, rbnode
->cache_present
))
253 *value
= regcache_rbtree_get_register(map
, rbnode
, reg_tmp
);
262 static int regcache_rbtree_insert_to_block(struct regmap
*map
,
263 struct regcache_rbtree_node
*rbnode
,
264 unsigned int base_reg
,
265 unsigned int top_reg
,
270 unsigned int pos
, offset
;
271 unsigned long *present
;
274 blklen
= (top_reg
- base_reg
) / map
->reg_stride
+ 1;
275 pos
= (reg
- base_reg
) / map
->reg_stride
;
276 offset
= (rbnode
->base_reg
- base_reg
) / map
->reg_stride
;
278 blk
= krealloc(rbnode
->block
,
279 blklen
* map
->cache_word_size
,
284 if (BITS_TO_LONGS(blklen
) > BITS_TO_LONGS(rbnode
->blklen
)) {
285 present
= krealloc(rbnode
->cache_present
,
286 BITS_TO_LONGS(blklen
) * sizeof(*present
),
293 memset(present
+ BITS_TO_LONGS(rbnode
->blklen
), 0,
294 (BITS_TO_LONGS(blklen
) - BITS_TO_LONGS(rbnode
->blklen
))
297 present
= rbnode
->cache_present
;
300 /* insert the register value in the correct place in the rbnode block */
302 memmove(blk
+ offset
* map
->cache_word_size
,
303 blk
, rbnode
->blklen
* map
->cache_word_size
);
304 bitmap_shift_left(present
, present
, offset
, blklen
);
307 /* update the rbnode block, its size and the base register */
309 rbnode
->blklen
= blklen
;
310 rbnode
->base_reg
= base_reg
;
311 rbnode
->cache_present
= present
;
313 regcache_rbtree_set_register(map
, rbnode
, pos
, value
);
317 static struct regcache_rbtree_node
*
318 regcache_rbtree_node_alloc(struct regmap
*map
, unsigned int reg
)
320 struct regcache_rbtree_node
*rbnode
;
321 const struct regmap_range
*range
;
324 rbnode
= kzalloc(sizeof(*rbnode
), GFP_KERNEL
);
328 /* If there is a read table then use it to guess at an allocation */
330 for (i
= 0; i
< map
->rd_table
->n_yes_ranges
; i
++) {
331 if (regmap_reg_in_range(reg
,
332 &map
->rd_table
->yes_ranges
[i
]))
336 if (i
!= map
->rd_table
->n_yes_ranges
) {
337 range
= &map
->rd_table
->yes_ranges
[i
];
338 rbnode
->blklen
= (range
->range_max
- range
->range_min
) /
340 rbnode
->base_reg
= range
->range_min
;
344 if (!rbnode
->blklen
) {
346 rbnode
->base_reg
= reg
;
349 rbnode
->block
= kmalloc_array(rbnode
->blklen
, map
->cache_word_size
,
354 rbnode
->cache_present
= kcalloc(BITS_TO_LONGS(rbnode
->blklen
),
355 sizeof(*rbnode
->cache_present
),
357 if (!rbnode
->cache_present
)
363 kfree(rbnode
->block
);
369 static int regcache_rbtree_write(struct regmap
*map
, unsigned int reg
,
372 struct regcache_rbtree_ctx
*rbtree_ctx
;
373 struct regcache_rbtree_node
*rbnode
, *rbnode_tmp
;
374 struct rb_node
*node
;
375 unsigned int reg_tmp
;
378 rbtree_ctx
= map
->cache
;
380 /* if we can't locate it in the cached rbnode we'll have
381 * to traverse the rbtree looking for it.
383 rbnode
= regcache_rbtree_lookup(map
, reg
);
385 reg_tmp
= (reg
- rbnode
->base_reg
) / map
->reg_stride
;
386 regcache_rbtree_set_register(map
, rbnode
, reg_tmp
, value
);
388 unsigned int base_reg
, top_reg
;
389 unsigned int new_base_reg
, new_top_reg
;
390 unsigned int min
, max
;
391 unsigned int max_dist
;
392 unsigned int dist
, best_dist
= UINT_MAX
;
394 max_dist
= map
->reg_stride
* sizeof(*rbnode_tmp
) /
395 map
->cache_word_size
;
399 min
= reg
- max_dist
;
400 max
= reg
+ max_dist
;
402 /* look for an adjacent register to the one we are about to add */
403 node
= rbtree_ctx
->root
.rb_node
;
405 rbnode_tmp
= rb_entry(node
, struct regcache_rbtree_node
,
408 regcache_rbtree_get_base_top_reg(map
, rbnode_tmp
,
409 &base_reg
, &top_reg
);
411 if (base_reg
<= max
&& top_reg
>= min
) {
413 dist
= base_reg
- reg
;
414 else if (reg
> top_reg
)
415 dist
= reg
- top_reg
;
418 if (dist
< best_dist
) {
421 new_base_reg
= min(reg
, base_reg
);
422 new_top_reg
= max(reg
, top_reg
);
427 * Keep looking, we want to choose the closest block,
428 * otherwise we might end up creating overlapping
429 * blocks, which breaks the rbtree.
432 node
= node
->rb_left
;
433 else if (reg
> top_reg
)
434 node
= node
->rb_right
;
440 ret
= regcache_rbtree_insert_to_block(map
, rbnode
,
446 rbtree_ctx
->cached_rbnode
= rbnode
;
450 /* We did not manage to find a place to insert it in
451 * an existing block so create a new rbnode.
453 rbnode
= regcache_rbtree_node_alloc(map
, reg
);
456 regcache_rbtree_set_register(map
, rbnode
,
457 reg
- rbnode
->base_reg
, value
);
458 regcache_rbtree_insert(map
, &rbtree_ctx
->root
, rbnode
);
459 rbtree_ctx
->cached_rbnode
= rbnode
;
465 static int regcache_rbtree_sync(struct regmap
*map
, unsigned int min
,
468 struct regcache_rbtree_ctx
*rbtree_ctx
;
469 struct rb_node
*node
;
470 struct regcache_rbtree_node
*rbnode
;
471 unsigned int base_reg
, top_reg
;
472 unsigned int start
, end
;
475 rbtree_ctx
= map
->cache
;
476 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
477 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
479 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
487 start
= (min
- base_reg
) / map
->reg_stride
;
492 end
= (max
- base_reg
) / map
->reg_stride
+ 1;
494 end
= rbnode
->blklen
;
496 ret
= regcache_sync_block(map
, rbnode
->block
,
497 rbnode
->cache_present
,
498 rbnode
->base_reg
, start
, end
);
503 return regmap_async_complete(map
);
506 static int regcache_rbtree_drop(struct regmap
*map
, unsigned int min
,
509 struct regcache_rbtree_ctx
*rbtree_ctx
;
510 struct regcache_rbtree_node
*rbnode
;
511 struct rb_node
*node
;
512 unsigned int base_reg
, top_reg
;
513 unsigned int start
, end
;
515 rbtree_ctx
= map
->cache
;
516 for (node
= rb_first(&rbtree_ctx
->root
); node
; node
= rb_next(node
)) {
517 rbnode
= rb_entry(node
, struct regcache_rbtree_node
, node
);
519 regcache_rbtree_get_base_top_reg(map
, rbnode
, &base_reg
,
527 start
= (min
- base_reg
) / map
->reg_stride
;
532 end
= (max
- base_reg
) / map
->reg_stride
+ 1;
534 end
= rbnode
->blklen
;
536 bitmap_clear(rbnode
->cache_present
, start
, end
- start
);
542 struct regcache_ops regcache_rbtree_ops
= {
543 .type
= REGCACHE_RBTREE
,
545 .init
= regcache_rbtree_init
,
546 .exit
= regcache_rbtree_exit
,
547 #ifdef CONFIG_DEBUG_FS
548 .debugfs_init
= rbtree_debugfs_init
,
550 .read
= regcache_rbtree_read
,
551 .write
= regcache_rbtree_write
,
552 .sync
= regcache_rbtree_sync
,
553 .drop
= regcache_rbtree_drop
,