2 * Register map access API - debugfs
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/debugfs.h>
16 #include <linux/uaccess.h>
17 #include <linux/device.h>
18 #include <linux/list.h>
22 struct regmap_debugfs_node
{
25 struct list_head link
;
28 static struct dentry
*regmap_debugfs_root
;
29 static LIST_HEAD(regmap_debugfs_early_list
);
30 static DEFINE_MUTEX(regmap_debugfs_early_lock
);
32 /* Calculate the length of a fixed format */
33 static size_t regmap_calc_reg_len(int max_val
)
35 return snprintf(NULL
, 0, "%x", max_val
);
38 static ssize_t
regmap_name_read_file(struct file
*file
,
39 char __user
*user_buf
, size_t count
,
42 struct regmap
*map
= file
->private_data
;
46 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
50 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n", map
->dev
->driver
->name
);
56 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
61 static const struct file_operations regmap_name_fops
= {
63 .read
= regmap_name_read_file
,
64 .llseek
= default_llseek
,
67 static void regmap_debugfs_free_dump_cache(struct regmap
*map
)
69 struct regmap_debugfs_off_cache
*c
;
71 while (!list_empty(&map
->debugfs_off_cache
)) {
72 c
= list_first_entry(&map
->debugfs_off_cache
,
73 struct regmap_debugfs_off_cache
,
81 * Work out where the start offset maps into register numbers, bearing
82 * in mind that we suppress hidden registers.
84 static unsigned int regmap_debugfs_get_dump_start(struct regmap
*map
,
89 struct regmap_debugfs_off_cache
*c
= NULL
;
92 unsigned int fpos_offset
;
93 unsigned int reg_offset
;
95 /* Suppress the cache if we're using a subrange */
100 * If we don't have a cache build one so we don't have to do a
101 * linear scan each time.
103 mutex_lock(&map
->cache_lock
);
105 if (list_empty(&map
->debugfs_off_cache
)) {
106 for (; i
<= map
->max_register
; i
+= map
->reg_stride
) {
107 /* Skip unprinted registers, closing off cache entry */
108 if (!regmap_readable(map
, i
) ||
109 regmap_precious(map
, i
)) {
112 c
->max_reg
= i
- map
->reg_stride
;
113 list_add_tail(&c
->list
,
114 &map
->debugfs_off_cache
);
121 /* No cache entry? Start a new one */
123 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
125 regmap_debugfs_free_dump_cache(map
);
126 mutex_unlock(&map
->cache_lock
);
133 p
+= map
->debugfs_tot_len
;
137 /* Close the last entry off if we didn't scan beyond it */
140 c
->max_reg
= i
- map
->reg_stride
;
141 list_add_tail(&c
->list
,
142 &map
->debugfs_off_cache
);
146 * This should never happen; we return above if we fail to
147 * allocate and we should never be in this code if there are
148 * no registers at all.
150 WARN_ON(list_empty(&map
->debugfs_off_cache
));
153 /* Find the relevant block:offset */
154 list_for_each_entry(c
, &map
->debugfs_off_cache
, list
) {
155 if (from
>= c
->min
&& from
<= c
->max
) {
156 fpos_offset
= from
- c
->min
;
157 reg_offset
= fpos_offset
/ map
->debugfs_tot_len
;
158 *pos
= c
->min
+ (reg_offset
* map
->debugfs_tot_len
);
159 mutex_unlock(&map
->cache_lock
);
160 return c
->base_reg
+ (reg_offset
* map
->reg_stride
);
166 mutex_unlock(&map
->cache_lock
);
171 static inline void regmap_calc_tot_len(struct regmap
*map
,
172 void *buf
, size_t count
)
174 /* Calculate the length of a fixed format */
175 if (!map
->debugfs_tot_len
) {
176 map
->debugfs_reg_len
= regmap_calc_reg_len(map
->max_register
),
177 map
->debugfs_val_len
= 2 * map
->format
.val_bytes
;
178 map
->debugfs_tot_len
= map
->debugfs_reg_len
+
179 map
->debugfs_val_len
+ 3; /* : \n */
183 static ssize_t
regmap_read_debugfs(struct regmap
*map
, unsigned int from
,
184 unsigned int to
, char __user
*user_buf
,
185 size_t count
, loff_t
*ppos
)
192 unsigned int val
, start_reg
;
194 if (*ppos
< 0 || !count
)
197 buf
= kmalloc(count
, GFP_KERNEL
);
201 regmap_calc_tot_len(map
, buf
, count
);
203 /* Work out which register we're starting at */
204 start_reg
= regmap_debugfs_get_dump_start(map
, from
, *ppos
, &p
);
206 for (i
= start_reg
; i
<= to
; i
+= map
->reg_stride
) {
207 if (!regmap_readable(map
, i
))
210 if (regmap_precious(map
, i
))
213 /* If we're in the region the user is trying to read */
215 /* ...but not beyond it */
216 if (buf_pos
+ map
->debugfs_tot_len
> count
)
219 /* Format the register */
220 snprintf(buf
+ buf_pos
, count
- buf_pos
, "%.*x: ",
221 map
->debugfs_reg_len
, i
- from
);
222 buf_pos
+= map
->debugfs_reg_len
+ 2;
224 /* Format the value, write all X if we can't read */
225 ret
= regmap_read(map
, i
, &val
);
227 snprintf(buf
+ buf_pos
, count
- buf_pos
,
228 "%.*x", map
->debugfs_val_len
, val
);
230 memset(buf
+ buf_pos
, 'X',
231 map
->debugfs_val_len
);
232 buf_pos
+= 2 * map
->format
.val_bytes
;
234 buf
[buf_pos
++] = '\n';
236 p
+= map
->debugfs_tot_len
;
241 if (copy_to_user(user_buf
, buf
, buf_pos
)) {
253 static ssize_t
regmap_map_read_file(struct file
*file
, char __user
*user_buf
,
254 size_t count
, loff_t
*ppos
)
256 struct regmap
*map
= file
->private_data
;
258 return regmap_read_debugfs(map
, 0, map
->max_register
, user_buf
,
262 #undef REGMAP_ALLOW_WRITE_DEBUGFS
263 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
265 * This can be dangerous especially when we have clients such as
266 * PMICs, therefore don't provide any real compile time configuration option
267 * for this feature, people who want to use this will need to modify
268 * the source code directly.
270 static ssize_t
regmap_map_write_file(struct file
*file
,
271 const char __user
*user_buf
,
272 size_t count
, loff_t
*ppos
)
277 unsigned long reg
, value
;
278 struct regmap
*map
= file
->private_data
;
281 buf_size
= min(count
, (sizeof(buf
)-1));
282 if (copy_from_user(buf
, user_buf
, buf_size
))
286 while (*start
== ' ')
288 reg
= simple_strtoul(start
, &start
, 16);
289 while (*start
== ' ')
291 if (kstrtoul(start
, 16, &value
))
294 /* Userspace has been fiddling around behind the kernel's back */
295 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
297 ret
= regmap_write(map
, reg
, value
);
303 #define regmap_map_write_file NULL
306 static const struct file_operations regmap_map_fops
= {
308 .read
= regmap_map_read_file
,
309 .write
= regmap_map_write_file
,
310 .llseek
= default_llseek
,
313 static ssize_t
regmap_range_read_file(struct file
*file
, char __user
*user_buf
,
314 size_t count
, loff_t
*ppos
)
316 struct regmap_range_node
*range
= file
->private_data
;
317 struct regmap
*map
= range
->map
;
319 return regmap_read_debugfs(map
, range
->range_min
, range
->range_max
,
320 user_buf
, count
, ppos
);
323 static const struct file_operations regmap_range_fops
= {
325 .read
= regmap_range_read_file
,
326 .llseek
= default_llseek
,
329 static ssize_t
regmap_reg_ranges_read_file(struct file
*file
,
330 char __user
*user_buf
, size_t count
,
333 struct regmap
*map
= file
->private_data
;
334 struct regmap_debugfs_off_cache
*c
;
342 if (*ppos
< 0 || !count
)
345 buf
= kmalloc(count
, GFP_KERNEL
);
349 entry
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
355 /* While we are at it, build the register dump cache
356 * now so the read() operation on the `registers' file
357 * can benefit from using the cache. We do not care
358 * about the file position information that is contained
359 * in the cache, just about the actual register blocks */
360 regmap_calc_tot_len(map
, buf
, count
);
361 regmap_debugfs_get_dump_start(map
, 0, *ppos
, &p
);
363 /* Reset file pointer as the fixed-format of the `registers'
364 * file is not compatible with the `range' file */
366 mutex_lock(&map
->cache_lock
);
367 list_for_each_entry(c
, &map
->debugfs_off_cache
, list
) {
368 entry_len
= snprintf(entry
, PAGE_SIZE
, "%x-%x\n",
369 c
->base_reg
, c
->max_reg
);
371 if (buf_pos
+ entry_len
> count
)
373 memcpy(buf
+ buf_pos
, entry
, entry_len
);
374 buf_pos
+= entry_len
;
378 mutex_unlock(&map
->cache_lock
);
383 if (copy_to_user(user_buf
, buf
, buf_pos
)) {
394 static const struct file_operations regmap_reg_ranges_fops
= {
396 .read
= regmap_reg_ranges_read_file
,
397 .llseek
= default_llseek
,
400 static ssize_t
regmap_access_read_file(struct file
*file
,
401 char __user
*user_buf
, size_t count
,
404 int reg_len
, tot_len
;
409 struct regmap
*map
= file
->private_data
;
412 if (*ppos
< 0 || !count
)
415 buf
= kmalloc(count
, GFP_KERNEL
);
419 /* Calculate the length of a fixed format */
420 reg_len
= regmap_calc_reg_len(map
->max_register
);
421 tot_len
= reg_len
+ 10; /* ': R W V P\n' */
423 for (i
= 0; i
<= map
->max_register
; i
+= map
->reg_stride
) {
424 /* Ignore registers which are neither readable nor writable */
425 if (!regmap_readable(map
, i
) && !regmap_writeable(map
, i
))
428 /* If we're in the region the user is trying to read */
430 /* ...but not beyond it */
431 if (buf_pos
+ tot_len
+ 1 >= count
)
434 /* Format the register */
435 snprintf(buf
+ buf_pos
, count
- buf_pos
,
436 "%.*x: %c %c %c %c\n",
438 regmap_readable(map
, i
) ? 'y' : 'n',
439 regmap_writeable(map
, i
) ? 'y' : 'n',
440 regmap_volatile(map
, i
) ? 'y' : 'n',
441 regmap_precious(map
, i
) ? 'y' : 'n');
450 if (copy_to_user(user_buf
, buf
, buf_pos
)) {
462 static const struct file_operations regmap_access_fops
= {
464 .read
= regmap_access_read_file
,
465 .llseek
= default_llseek
,
468 static ssize_t
regmap_cache_only_write_file(struct file
*file
,
469 const char __user
*user_buf
,
470 size_t count
, loff_t
*ppos
)
472 struct regmap
*map
= container_of(file
->private_data
,
473 struct regmap
, cache_only
);
475 bool was_enabled
, require_sync
= false;
478 map
->lock(map
->lock_arg
);
480 was_enabled
= map
->cache_only
;
482 result
= debugfs_write_file_bool(file
, user_buf
, count
, ppos
);
484 map
->unlock(map
->lock_arg
);
488 if (map
->cache_only
&& !was_enabled
) {
489 dev_warn(map
->dev
, "debugfs cache_only=Y forced\n");
490 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
491 } else if (!map
->cache_only
&& was_enabled
) {
492 dev_warn(map
->dev
, "debugfs cache_only=N forced: syncing cache\n");
496 map
->unlock(map
->lock_arg
);
499 err
= regcache_sync(map
);
501 dev_err(map
->dev
, "Failed to sync cache %d\n", err
);
507 static const struct file_operations regmap_cache_only_fops
= {
509 .read
= debugfs_read_file_bool
,
510 .write
= regmap_cache_only_write_file
,
513 static ssize_t
regmap_cache_bypass_write_file(struct file
*file
,
514 const char __user
*user_buf
,
515 size_t count
, loff_t
*ppos
)
517 struct regmap
*map
= container_of(file
->private_data
,
518 struct regmap
, cache_bypass
);
522 map
->lock(map
->lock_arg
);
524 was_enabled
= map
->cache_bypass
;
526 result
= debugfs_write_file_bool(file
, user_buf
, count
, ppos
);
530 if (map
->cache_bypass
&& !was_enabled
) {
531 dev_warn(map
->dev
, "debugfs cache_bypass=Y forced\n");
532 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
533 } else if (!map
->cache_bypass
&& was_enabled
) {
534 dev_warn(map
->dev
, "debugfs cache_bypass=N forced\n");
538 map
->unlock(map
->lock_arg
);
543 static const struct file_operations regmap_cache_bypass_fops
= {
545 .read
= debugfs_read_file_bool
,
546 .write
= regmap_cache_bypass_write_file
,
549 void regmap_debugfs_init(struct regmap
*map
, const char *name
)
551 struct rb_node
*next
;
552 struct regmap_range_node
*range_node
;
553 const char *devname
= "dummy";
555 /* If we don't have the debugfs root yet, postpone init */
556 if (!regmap_debugfs_root
) {
557 struct regmap_debugfs_node
*node
;
558 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
563 mutex_lock(®map_debugfs_early_lock
);
564 list_add(&node
->link
, ®map_debugfs_early_list
);
565 mutex_unlock(®map_debugfs_early_lock
);
569 INIT_LIST_HEAD(&map
->debugfs_off_cache
);
570 mutex_init(&map
->cache_lock
);
573 devname
= dev_name(map
->dev
);
576 map
->debugfs_name
= kasprintf(GFP_KERNEL
, "%s-%s",
578 name
= map
->debugfs_name
;
583 map
->debugfs
= debugfs_create_dir(name
, regmap_debugfs_root
);
585 dev_warn(map
->dev
, "Failed to create debugfs directory\n");
589 debugfs_create_file("name", 0400, map
->debugfs
,
590 map
, ®map_name_fops
);
592 debugfs_create_file("range", 0400, map
->debugfs
,
593 map
, ®map_reg_ranges_fops
);
595 if (map
->max_register
|| regmap_readable(map
, 0)) {
596 umode_t registers_mode
;
598 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
599 registers_mode
= 0600;
601 registers_mode
= 0400;
604 debugfs_create_file("registers", registers_mode
, map
->debugfs
,
605 map
, ®map_map_fops
);
606 debugfs_create_file("access", 0400, map
->debugfs
,
607 map
, ®map_access_fops
);
610 if (map
->cache_type
) {
611 debugfs_create_file("cache_only", 0600, map
->debugfs
,
612 &map
->cache_only
, ®map_cache_only_fops
);
613 debugfs_create_bool("cache_dirty", 0400, map
->debugfs
,
615 debugfs_create_file("cache_bypass", 0600, map
->debugfs
,
617 ®map_cache_bypass_fops
);
620 next
= rb_first(&map
->range_tree
);
622 range_node
= rb_entry(next
, struct regmap_range_node
, node
);
624 if (range_node
->name
)
625 debugfs_create_file(range_node
->name
, 0400,
626 map
->debugfs
, range_node
,
629 next
= rb_next(&range_node
->node
);
632 if (map
->cache_ops
&& map
->cache_ops
->debugfs_init
)
633 map
->cache_ops
->debugfs_init(map
);
636 void regmap_debugfs_exit(struct regmap
*map
)
639 debugfs_remove_recursive(map
->debugfs
);
640 mutex_lock(&map
->cache_lock
);
641 regmap_debugfs_free_dump_cache(map
);
642 mutex_unlock(&map
->cache_lock
);
643 kfree(map
->debugfs_name
);
645 struct regmap_debugfs_node
*node
, *tmp
;
647 mutex_lock(®map_debugfs_early_lock
);
648 list_for_each_entry_safe(node
, tmp
, ®map_debugfs_early_list
,
650 if (node
->map
== map
) {
651 list_del(&node
->link
);
655 mutex_unlock(®map_debugfs_early_lock
);
659 void regmap_debugfs_initcall(void)
661 struct regmap_debugfs_node
*node
, *tmp
;
663 regmap_debugfs_root
= debugfs_create_dir("regmap", NULL
);
664 if (!regmap_debugfs_root
) {
665 pr_warn("regmap: Failed to create debugfs root\n");
669 mutex_lock(®map_debugfs_early_lock
);
670 list_for_each_entry_safe(node
, tmp
, ®map_debugfs_early_list
, link
) {
671 regmap_debugfs_init(node
->map
, node
->name
);
672 list_del(&node
->link
);
675 mutex_unlock(®map_debugfs_early_lock
);