i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / base / regmap / regcache-rbtree.c
blobd1179fdff081406226946ce26387b15236ea07a7
1 /*
2 * Register cache access API - rbtree caching support
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/debugfs.h>
15 #include <linux/rbtree.h>
16 #include <linux/seq_file.h>
18 #include "internal.h"
20 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
21 unsigned int value);
22 static int regcache_rbtree_exit(struct regmap *map);
24 struct regcache_rbtree_node {
25 /* the actual rbtree node holding this block */
26 struct rb_node node;
27 /* base register handled by this block */
28 unsigned int base_reg;
29 /* block of adjacent registers */
30 void *block;
31 /* number of registers available in the block */
32 unsigned int blklen;
33 } __attribute__ ((packed));
35 struct regcache_rbtree_ctx {
36 struct rb_root root;
37 struct regcache_rbtree_node *cached_rbnode;
40 static inline void regcache_rbtree_get_base_top_reg(
41 struct regcache_rbtree_node *rbnode,
42 unsigned int *base, unsigned int *top)
44 *base = rbnode->base_reg;
45 *top = rbnode->base_reg + rbnode->blklen - 1;
48 static unsigned int regcache_rbtree_get_register(
49 struct regcache_rbtree_node *rbnode, unsigned int idx,
50 unsigned int word_size)
52 return regcache_get_val(rbnode->block, idx, word_size);
55 static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
56 unsigned int idx, unsigned int val,
57 unsigned int word_size)
59 regcache_set_val(rbnode->block, idx, val, word_size);
62 static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
63 unsigned int reg)
65 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
66 struct rb_node *node;
67 struct regcache_rbtree_node *rbnode;
68 unsigned int base_reg, top_reg;
70 rbnode = rbtree_ctx->cached_rbnode;
71 if (rbnode) {
72 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
73 if (reg >= base_reg && reg <= top_reg)
74 return rbnode;
77 node = rbtree_ctx->root.rb_node;
78 while (node) {
79 rbnode = container_of(node, struct regcache_rbtree_node, node);
80 regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
81 if (reg >= base_reg && reg <= top_reg) {
82 rbtree_ctx->cached_rbnode = rbnode;
83 return rbnode;
84 } else if (reg > top_reg) {
85 node = node->rb_right;
86 } else if (reg < base_reg) {
87 node = node->rb_left;
91 return NULL;
94 static int regcache_rbtree_insert(struct rb_root *root,
95 struct regcache_rbtree_node *rbnode)
97 struct rb_node **new, *parent;
98 struct regcache_rbtree_node *rbnode_tmp;
99 unsigned int base_reg_tmp, top_reg_tmp;
100 unsigned int base_reg;
102 parent = NULL;
103 new = &root->rb_node;
104 while (*new) {
105 rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
106 node);
107 /* base and top registers of the current rbnode */
108 regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
109 &top_reg_tmp);
110 /* base register of the rbnode to be added */
111 base_reg = rbnode->base_reg;
112 parent = *new;
113 /* if this register has already been inserted, just return */
114 if (base_reg >= base_reg_tmp &&
115 base_reg <= top_reg_tmp)
116 return 0;
117 else if (base_reg > top_reg_tmp)
118 new = &((*new)->rb_right);
119 else if (base_reg < base_reg_tmp)
120 new = &((*new)->rb_left);
123 /* insert the node into the rbtree */
124 rb_link_node(&rbnode->node, parent, new);
125 rb_insert_color(&rbnode->node, root);
127 return 1;
130 #ifdef CONFIG_DEBUG_FS
131 static int rbtree_show(struct seq_file *s, void *ignored)
133 struct regmap *map = s->private;
134 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
135 struct regcache_rbtree_node *n;
136 struct rb_node *node;
137 unsigned int base, top;
138 int nodes = 0;
139 int registers = 0;
140 int average;
142 mutex_lock(&map->lock);
144 for (node = rb_first(&rbtree_ctx->root); node != NULL;
145 node = rb_next(node)) {
146 n = container_of(node, struct regcache_rbtree_node, node);
148 regcache_rbtree_get_base_top_reg(n, &base, &top);
149 seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
151 nodes++;
152 registers += top - base + 1;
155 if (nodes)
156 average = registers / nodes;
157 else
158 average = 0;
160 seq_printf(s, "%d nodes, %d registers, average %d registers\n",
161 nodes, registers, average);
163 mutex_unlock(&map->lock);
165 return 0;
168 static int rbtree_open(struct inode *inode, struct file *file)
170 return single_open(file, rbtree_show, inode->i_private);
173 static const struct file_operations rbtree_fops = {
174 .open = rbtree_open,
175 .read = seq_read,
176 .llseek = seq_lseek,
177 .release = single_release,
180 static void rbtree_debugfs_init(struct regmap *map)
182 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
184 #else
185 static void rbtree_debugfs_init(struct regmap *map)
188 #endif
190 static int regcache_rbtree_init(struct regmap *map)
192 struct regcache_rbtree_ctx *rbtree_ctx;
193 int i;
194 int ret;
196 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
197 if (!map->cache)
198 return -ENOMEM;
200 rbtree_ctx = map->cache;
201 rbtree_ctx->root = RB_ROOT;
202 rbtree_ctx->cached_rbnode = NULL;
204 for (i = 0; i < map->num_reg_defaults; i++) {
205 ret = regcache_rbtree_write(map,
206 map->reg_defaults[i].reg,
207 map->reg_defaults[i].def);
208 if (ret)
209 goto err;
212 rbtree_debugfs_init(map);
214 return 0;
216 err:
217 regcache_rbtree_exit(map);
218 return ret;
221 static int regcache_rbtree_exit(struct regmap *map)
223 struct rb_node *next;
224 struct regcache_rbtree_ctx *rbtree_ctx;
225 struct regcache_rbtree_node *rbtree_node;
227 /* if we've already been called then just return */
228 rbtree_ctx = map->cache;
229 if (!rbtree_ctx)
230 return 0;
232 /* free up the rbtree */
233 next = rb_first(&rbtree_ctx->root);
234 while (next) {
235 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
236 next = rb_next(&rbtree_node->node);
237 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
238 kfree(rbtree_node->block);
239 kfree(rbtree_node);
242 /* release the resources */
243 kfree(map->cache);
244 map->cache = NULL;
246 return 0;
249 static int regcache_rbtree_read(struct regmap *map,
250 unsigned int reg, unsigned int *value)
252 struct regcache_rbtree_node *rbnode;
253 unsigned int reg_tmp;
255 rbnode = regcache_rbtree_lookup(map, reg);
256 if (rbnode) {
257 reg_tmp = reg - rbnode->base_reg;
258 *value = regcache_rbtree_get_register(rbnode, reg_tmp,
259 map->cache_word_size);
260 } else {
261 return -ENOENT;
264 return 0;
268 static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
269 unsigned int pos, unsigned int reg,
270 unsigned int value, unsigned int word_size)
272 u8 *blk;
274 blk = krealloc(rbnode->block,
275 (rbnode->blklen + 1) * word_size, GFP_KERNEL);
276 if (!blk)
277 return -ENOMEM;
279 /* insert the register value in the correct place in the rbnode block */
280 memmove(blk + (pos + 1) * word_size,
281 blk + pos * word_size,
282 (rbnode->blklen - pos) * word_size);
284 /* update the rbnode block, its size and the base register */
285 rbnode->block = blk;
286 rbnode->blklen++;
287 if (!pos)
288 rbnode->base_reg = reg;
290 regcache_rbtree_set_register(rbnode, pos, value, word_size);
291 return 0;
294 static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
295 unsigned int value)
297 struct regcache_rbtree_ctx *rbtree_ctx;
298 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
299 struct rb_node *node;
300 unsigned int val;
301 unsigned int reg_tmp;
302 unsigned int pos;
303 int i;
304 int ret;
306 rbtree_ctx = map->cache;
307 /* if we can't locate it in the cached rbnode we'll have
308 * to traverse the rbtree looking for it.
310 rbnode = regcache_rbtree_lookup(map, reg);
311 if (rbnode) {
312 reg_tmp = reg - rbnode->base_reg;
313 val = regcache_rbtree_get_register(rbnode, reg_tmp,
314 map->cache_word_size);
315 if (val == value)
316 return 0;
317 regcache_rbtree_set_register(rbnode, reg_tmp, value,
318 map->cache_word_size);
319 } else {
320 /* look for an adjacent register to the one we are about to add */
321 for (node = rb_first(&rbtree_ctx->root); node;
322 node = rb_next(node)) {
323 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
324 for (i = 0; i < rbnode_tmp->blklen; i++) {
325 reg_tmp = rbnode_tmp->base_reg + i;
326 if (abs(reg_tmp - reg) != 1)
327 continue;
328 /* decide where in the block to place our register */
329 if (reg_tmp + 1 == reg)
330 pos = i + 1;
331 else
332 pos = i;
333 ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
334 reg, value,
335 map->cache_word_size);
336 if (ret)
337 return ret;
338 rbtree_ctx->cached_rbnode = rbnode_tmp;
339 return 0;
342 /* we did not manage to find a place to insert it in an existing
343 * block so create a new rbnode with a single register in its block.
344 * This block will get populated further if any other adjacent
345 * registers get modified in the future.
347 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
348 if (!rbnode)
349 return -ENOMEM;
350 rbnode->blklen = 1;
351 rbnode->base_reg = reg;
352 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
353 GFP_KERNEL);
354 if (!rbnode->block) {
355 kfree(rbnode);
356 return -ENOMEM;
358 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
359 regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
360 rbtree_ctx->cached_rbnode = rbnode;
363 return 0;
366 static int regcache_rbtree_sync(struct regmap *map)
368 struct regcache_rbtree_ctx *rbtree_ctx;
369 struct rb_node *node;
370 struct regcache_rbtree_node *rbnode;
371 unsigned int regtmp;
372 unsigned int val;
373 int ret;
374 int i;
376 rbtree_ctx = map->cache;
377 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
378 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
379 for (i = 0; i < rbnode->blklen; i++) {
380 regtmp = rbnode->base_reg + i;
381 val = regcache_rbtree_get_register(rbnode, i,
382 map->cache_word_size);
384 /* Is this the hardware default? If so skip. */
385 ret = regcache_lookup_reg(map, i);
386 if (ret > 0 && val == map->reg_defaults[ret].def)
387 continue;
389 map->cache_bypass = 1;
390 ret = _regmap_write(map, regtmp, val);
391 map->cache_bypass = 0;
392 if (ret)
393 return ret;
394 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
395 regtmp, val);
399 return 0;
402 struct regcache_ops regcache_rbtree_ops = {
403 .type = REGCACHE_RBTREE,
404 .name = "rbtree",
405 .init = regcache_rbtree_init,
406 .exit = regcache_rbtree_exit,
407 .read = regcache_rbtree_read,
408 .write = regcache_rbtree_write,
409 .sync = regcache_rbtree_sync