PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / base / regmap / regcache-rbtree.c
blob930cad4e5df8a10d7af43c42e2b3d0ebee3bf121
1 /*
2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/debugfs.h>
16 #include <linux/rbtree.h>
17 #include <linux/seq_file.h>
19 #include "internal.h"
21 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
22 unsigned int value);
23 static int regcache_rbtree_exit(struct regmap *map);
25 struct regcache_rbtree_node {
26 /* the actual rbtree node holding this block */
27 struct rb_node node;
28 /* base register handled by this block */
29 unsigned int base_reg;
30 /* block of adjacent registers */
31 void *block;
32 /* Which registers are present */
33 long *cache_present;
34 /* number of registers available in the block */
35 unsigned int blklen;
36 } __attribute__ ((packed));
38 struct regcache_rbtree_ctx {
39 struct rb_root root;
40 struct regcache_rbtree_node *cached_rbnode;
43 static inline void regcache_rbtree_get_base_top_reg(
44 struct regmap *map,
45 struct regcache_rbtree_node *rbnode,
46 unsigned int *base, unsigned int *top)
48 *base = rbnode->base_reg;
49 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
52 static unsigned int regcache_rbtree_get_register(struct regmap *map,
53 struct regcache_rbtree_node *rbnode, unsigned int idx)
55 return regcache_get_val(map, rbnode->block, idx);
58 static void regcache_rbtree_set_register(struct regmap *map,
59 struct regcache_rbtree_node *rbnode,
60 unsigned int idx, unsigned int val)
62 set_bit(idx, rbnode->cache_present);
63 regcache_set_val(map, rbnode->block, idx, val);
66 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
67 unsigned int reg)
69 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
70 struct rb_node *node;
71 struct regcache_rbtree_node *rbnode;
72 unsigned int base_reg, top_reg;
74 rbnode = rbtree_ctx->cached_rbnode;
75 if (rbnode) {
76 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
77 &top_reg);
78 if (reg >= base_reg && reg <= top_reg)
79 return rbnode;
82 node = rbtree_ctx->root.rb_node;
83 while (node) {
84 rbnode = container_of(node, struct regcache_rbtree_node, node);
85 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
86 &top_reg);
87 if (reg >= base_reg && reg <= top_reg) {
88 rbtree_ctx->cached_rbnode = rbnode;
89 return rbnode;
90 } else if (reg > top_reg) {
91 node = node->rb_right;
92 } else if (reg < base_reg) {
93 node = node->rb_left;
97 return NULL;
100 static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
101 struct regcache_rbtree_node *rbnode)
103 struct rb_node **new, *parent;
104 struct regcache_rbtree_node *rbnode_tmp;
105 unsigned int base_reg_tmp, top_reg_tmp;
106 unsigned int base_reg;
108 parent = NULL;
109 new = &root->rb_node;
110 while (*new) {
111 rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
112 node);
113 /* base and top registers of the current rbnode */
114 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
115 &top_reg_tmp);
116 /* base register of the rbnode to be added */
117 base_reg = rbnode->base_reg;
118 parent = *new;
119 /* if this register has already been inserted, just return */
120 if (base_reg >= base_reg_tmp &&
121 base_reg <= top_reg_tmp)
122 return 0;
123 else if (base_reg > top_reg_tmp)
124 new = &((*new)->rb_right);
125 else if (base_reg < base_reg_tmp)
126 new = &((*new)->rb_left);
129 /* insert the node into the rbtree */
130 rb_link_node(&rbnode->node, parent, new);
131 rb_insert_color(&rbnode->node, root);
133 return 1;
136 #ifdef CONFIG_DEBUG_FS
137 static int rbtree_show(struct seq_file *s, void *ignored)
139 struct regmap *map = s->private;
140 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
141 struct regcache_rbtree_node *n;
142 struct rb_node *node;
143 unsigned int base, top;
144 size_t mem_size;
145 int nodes = 0;
146 int registers = 0;
147 int this_registers, average;
149 map->lock(map->lock_arg);
151 mem_size = sizeof(*rbtree_ctx);
153 for (node = rb_first(&rbtree_ctx->root); node != NULL;
154 node = rb_next(node)) {
155 n = container_of(node, struct regcache_rbtree_node, node);
156 mem_size += sizeof(*n);
157 mem_size += (n->blklen * map->cache_word_size);
158 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
160 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
161 this_registers = ((top - base) / map->reg_stride) + 1;
162 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
164 nodes++;
165 registers += this_registers;
168 if (nodes)
169 average = registers / nodes;
170 else
171 average = 0;
173 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
174 nodes, registers, average, mem_size);
176 map->unlock(map->lock_arg);
178 return 0;
181 static int rbtree_open(struct inode *inode, struct file *file)
183 return single_open(file, rbtree_show, inode->i_private);
186 static const struct file_operations rbtree_fops = {
187 .open = rbtree_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
193 static void rbtree_debugfs_init(struct regmap *map)
195 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
197 #else
198 static void rbtree_debugfs_init(struct regmap *map)
201 #endif
203 static int regcache_rbtree_init(struct regmap *map)
205 struct regcache_rbtree_ctx *rbtree_ctx;
206 int i;
207 int ret;
209 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
210 if (!map->cache)
211 return -ENOMEM;
213 rbtree_ctx = map->cache;
214 rbtree_ctx->root = RB_ROOT;
215 rbtree_ctx->cached_rbnode = NULL;
217 for (i = 0; i < map->num_reg_defaults; i++) {
218 ret = regcache_rbtree_write(map,
219 map->reg_defaults[i].reg,
220 map->reg_defaults[i].def);
221 if (ret)
222 goto err;
225 rbtree_debugfs_init(map);
227 return 0;
229 err:
230 regcache_rbtree_exit(map);
231 return ret;
234 static int regcache_rbtree_exit(struct regmap *map)
236 struct rb_node *next;
237 struct regcache_rbtree_ctx *rbtree_ctx;
238 struct regcache_rbtree_node *rbtree_node;
240 /* if we've already been called then just return */
241 rbtree_ctx = map->cache;
242 if (!rbtree_ctx)
243 return 0;
245 /* free up the rbtree */
246 next = rb_first(&rbtree_ctx->root);
247 while (next) {
248 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
249 next = rb_next(&rbtree_node->node);
250 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
251 kfree(rbtree_node->cache_present);
252 kfree(rbtree_node->block);
253 kfree(rbtree_node);
256 /* release the resources */
257 kfree(map->cache);
258 map->cache = NULL;
260 return 0;
263 static int regcache_rbtree_read(struct regmap *map,
264 unsigned int reg, unsigned int *value)
266 struct regcache_rbtree_node *rbnode;
267 unsigned int reg_tmp;
269 rbnode = regcache_rbtree_lookup(map, reg);
270 if (rbnode) {
271 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
272 if (!test_bit(reg_tmp, rbnode->cache_present))
273 return -ENOENT;
274 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
275 } else {
276 return -ENOENT;
279 return 0;
283 static int regcache_rbtree_insert_to_block(struct regmap *map,
284 struct regcache_rbtree_node *rbnode,
285 unsigned int base_reg,
286 unsigned int top_reg,
287 unsigned int reg,
288 unsigned int value)
290 unsigned int blklen;
291 unsigned int pos, offset;
292 unsigned long *present;
293 u8 *blk;
295 blklen = (top_reg - base_reg) / map->reg_stride + 1;
296 pos = (reg - base_reg) / map->reg_stride;
297 offset = (rbnode->base_reg - base_reg) / map->reg_stride;
299 blk = krealloc(rbnode->block,
300 blklen * map->cache_word_size,
301 GFP_KERNEL);
302 if (!blk)
303 return -ENOMEM;
305 present = krealloc(rbnode->cache_present,
306 BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
307 if (!present) {
308 kfree(blk);
309 return -ENOMEM;
312 /* insert the register value in the correct place in the rbnode block */
313 if (pos == 0) {
314 memmove(blk + offset * map->cache_word_size,
315 blk, rbnode->blklen * map->cache_word_size);
316 bitmap_shift_right(present, present, offset, blklen);
319 /* update the rbnode block, its size and the base register */
320 rbnode->block = blk;
321 rbnode->blklen = blklen;
322 rbnode->base_reg = base_reg;
323 rbnode->cache_present = present;
325 regcache_rbtree_set_register(map, rbnode, pos, value);
326 return 0;
329 static struct regcache_rbtree_node *
330 regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
332 struct regcache_rbtree_node *rbnode;
333 const struct regmap_range *range;
334 int i;
336 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
337 if (!rbnode)
338 return NULL;
340 /* If there is a read table then use it to guess at an allocation */
341 if (map->rd_table) {
342 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
343 if (regmap_reg_in_range(reg,
344 &map->rd_table->yes_ranges[i]))
345 break;
348 if (i != map->rd_table->n_yes_ranges) {
349 range = &map->rd_table->yes_ranges[i];
350 rbnode->blklen = (range->range_max - range->range_min) /
351 map->reg_stride + 1;
352 rbnode->base_reg = range->range_min;
356 if (!rbnode->blklen) {
357 rbnode->blklen = 1;
358 rbnode->base_reg = reg;
361 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
362 GFP_KERNEL);
363 if (!rbnode->block)
364 goto err_free;
366 rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
367 sizeof(*rbnode->cache_present), GFP_KERNEL);
368 if (!rbnode->cache_present)
369 goto err_free_block;
371 return rbnode;
373 err_free_block:
374 kfree(rbnode->block);
375 err_free:
376 kfree(rbnode);
377 return NULL;
380 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
381 unsigned int value)
383 struct regcache_rbtree_ctx *rbtree_ctx;
384 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
385 struct rb_node *node;
386 unsigned int reg_tmp;
387 int ret;
389 rbtree_ctx = map->cache;
391 /* if we can't locate it in the cached rbnode we'll have
392 * to traverse the rbtree looking for it.
394 rbnode = regcache_rbtree_lookup(map, reg);
395 if (rbnode) {
396 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
397 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
398 } else {
399 unsigned int base_reg, top_reg;
400 unsigned int new_base_reg, new_top_reg;
401 unsigned int min, max;
402 unsigned int max_dist;
404 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
405 map->cache_word_size;
406 if (reg < max_dist)
407 min = 0;
408 else
409 min = reg - max_dist;
410 max = reg + max_dist;
412 /* look for an adjacent register to the one we are about to add */
413 for (node = rb_first(&rbtree_ctx->root); node;
414 node = rb_next(node)) {
415 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
416 node);
418 regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
419 &base_reg, &top_reg);
421 if (base_reg <= max && top_reg >= min) {
422 new_base_reg = min(reg, base_reg);
423 new_top_reg = max(reg, top_reg);
424 } else {
425 continue;
428 ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
429 new_base_reg,
430 new_top_reg, reg,
431 value);
432 if (ret)
433 return ret;
434 rbtree_ctx->cached_rbnode = rbnode_tmp;
435 return 0;
438 /* We did not manage to find a place to insert it in
439 * an existing block so create a new rbnode.
441 rbnode = regcache_rbtree_node_alloc(map, reg);
442 if (!rbnode)
443 return -ENOMEM;
444 regcache_rbtree_set_register(map, rbnode,
445 reg - rbnode->base_reg, value);
446 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
447 rbtree_ctx->cached_rbnode = rbnode;
450 return 0;
453 static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
454 unsigned int max)
456 struct regcache_rbtree_ctx *rbtree_ctx;
457 struct rb_node *node;
458 struct regcache_rbtree_node *rbnode;
459 unsigned int base_reg, top_reg;
460 unsigned int start, end;
461 int ret;
463 rbtree_ctx = map->cache;
464 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
465 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
467 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
468 &top_reg);
469 if (base_reg > max)
470 break;
471 if (top_reg < min)
472 continue;
474 if (min > base_reg)
475 start = (min - base_reg) / map->reg_stride;
476 else
477 start = 0;
479 if (max < top_reg)
480 end = (max - base_reg) / map->reg_stride + 1;
481 else
482 end = rbnode->blklen;
484 ret = regcache_sync_block(map, rbnode->block,
485 rbnode->cache_present,
486 rbnode->base_reg, start, end);
487 if (ret != 0)
488 return ret;
491 return regmap_async_complete(map);
494 static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
495 unsigned int max)
497 struct regcache_rbtree_ctx *rbtree_ctx;
498 struct regcache_rbtree_node *rbnode;
499 struct rb_node *node;
500 unsigned int base_reg, top_reg;
501 unsigned int start, end;
503 rbtree_ctx = map->cache;
504 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
505 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
507 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
508 &top_reg);
509 if (base_reg > max)
510 break;
511 if (top_reg < min)
512 continue;
514 if (min > base_reg)
515 start = (min - base_reg) / map->reg_stride;
516 else
517 start = 0;
519 if (max < top_reg)
520 end = (max - base_reg) / map->reg_stride + 1;
521 else
522 end = rbnode->blklen;
524 bitmap_clear(rbnode->cache_present, start, end - start);
527 return 0;
530 struct regcache_ops regcache_rbtree_ops = {
531 .type = REGCACHE_RBTREE,
532 .name = "rbtree",
533 .init = regcache_rbtree_init,
534 .exit = regcache_rbtree_exit,
535 .read = regcache_rbtree_read,
536 .write = regcache_rbtree_write,
537 .sync = regcache_rbtree_sync,
538 .drop = regcache_rbtree_drop,