1 // SPDX-License-Identifier: GPL-2.0
3 // Register map access API - debugfs
5 // Copyright 2011 Wolfson Microelectronics plc
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
9 #include <linux/slab.h>
10 #include <linux/mutex.h>
11 #include <linux/debugfs.h>
12 #include <linux/uaccess.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
18 struct regmap_debugfs_node
{
21 struct list_head link
;
24 static unsigned int dummy_index
;
25 static struct dentry
*regmap_debugfs_root
;
26 static LIST_HEAD(regmap_debugfs_early_list
);
27 static DEFINE_MUTEX(regmap_debugfs_early_lock
);
29 /* Calculate the length of a fixed format */
30 static size_t regmap_calc_reg_len(int max_val
)
32 return snprintf(NULL
, 0, "%x", max_val
);
35 static ssize_t
regmap_name_read_file(struct file
*file
,
36 char __user
*user_buf
, size_t count
,
39 struct regmap
*map
= file
->private_data
;
40 const char *name
= "nodev";
44 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
48 if (map
->dev
&& map
->dev
->driver
)
49 name
= map
->dev
->driver
->name
;
51 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n", name
);
57 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, ret
);
62 static const struct file_operations regmap_name_fops
= {
64 .read
= regmap_name_read_file
,
65 .llseek
= default_llseek
,
68 static void regmap_debugfs_free_dump_cache(struct regmap
*map
)
70 struct regmap_debugfs_off_cache
*c
;
72 while (!list_empty(&map
->debugfs_off_cache
)) {
73 c
= list_first_entry(&map
->debugfs_off_cache
,
74 struct regmap_debugfs_off_cache
,
81 static bool regmap_printable(struct regmap
*map
, unsigned int reg
)
83 if (regmap_precious(map
, reg
))
86 if (!regmap_readable(map
, reg
) && !regmap_cached(map
, reg
))
93 * Work out where the start offset maps into register numbers, bearing
94 * in mind that we suppress hidden registers.
96 static unsigned int regmap_debugfs_get_dump_start(struct regmap
*map
,
101 struct regmap_debugfs_off_cache
*c
= NULL
;
104 unsigned int fpos_offset
;
105 unsigned int reg_offset
;
107 /* Suppress the cache if we're using a subrange */
112 * If we don't have a cache build one so we don't have to do a
113 * linear scan each time.
115 mutex_lock(&map
->cache_lock
);
117 if (list_empty(&map
->debugfs_off_cache
)) {
118 for (; i
<= map
->max_register
; i
+= map
->reg_stride
) {
119 /* Skip unprinted registers, closing off cache entry */
120 if (!regmap_printable(map
, i
)) {
123 c
->max_reg
= i
- map
->reg_stride
;
124 list_add_tail(&c
->list
,
125 &map
->debugfs_off_cache
);
132 /* No cache entry? Start a new one */
134 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
136 regmap_debugfs_free_dump_cache(map
);
137 mutex_unlock(&map
->cache_lock
);
144 p
+= map
->debugfs_tot_len
;
148 /* Close the last entry off if we didn't scan beyond it */
151 c
->max_reg
= i
- map
->reg_stride
;
152 list_add_tail(&c
->list
,
153 &map
->debugfs_off_cache
);
157 * This should never happen; we return above if we fail to
158 * allocate and we should never be in this code if there are
159 * no registers at all.
161 WARN_ON(list_empty(&map
->debugfs_off_cache
));
164 /* Find the relevant block:offset */
165 list_for_each_entry(c
, &map
->debugfs_off_cache
, list
) {
166 if (from
>= c
->min
&& from
<= c
->max
) {
167 fpos_offset
= from
- c
->min
;
168 reg_offset
= fpos_offset
/ map
->debugfs_tot_len
;
169 *pos
= c
->min
+ (reg_offset
* map
->debugfs_tot_len
);
170 mutex_unlock(&map
->cache_lock
);
171 return c
->base_reg
+ (reg_offset
* map
->reg_stride
);
177 mutex_unlock(&map
->cache_lock
);
182 static inline void regmap_calc_tot_len(struct regmap
*map
,
183 void *buf
, size_t count
)
185 /* Calculate the length of a fixed format */
186 if (!map
->debugfs_tot_len
) {
187 map
->debugfs_reg_len
= regmap_calc_reg_len(map
->max_register
),
188 map
->debugfs_val_len
= 2 * map
->format
.val_bytes
;
189 map
->debugfs_tot_len
= map
->debugfs_reg_len
+
190 map
->debugfs_val_len
+ 3; /* : \n */
194 static int regmap_next_readable_reg(struct regmap
*map
, int reg
)
196 struct regmap_debugfs_off_cache
*c
;
199 if (regmap_printable(map
, reg
+ map
->reg_stride
)) {
200 ret
= reg
+ map
->reg_stride
;
202 mutex_lock(&map
->cache_lock
);
203 list_for_each_entry(c
, &map
->debugfs_off_cache
, list
) {
204 if (reg
> c
->max_reg
)
206 if (reg
< c
->base_reg
) {
211 mutex_unlock(&map
->cache_lock
);
216 static ssize_t
regmap_read_debugfs(struct regmap
*map
, unsigned int from
,
217 unsigned int to
, char __user
*user_buf
,
218 size_t count
, loff_t
*ppos
)
225 unsigned int val
, start_reg
;
227 if (*ppos
< 0 || !count
)
230 if (count
> (PAGE_SIZE
<< (MAX_ORDER
- 1)))
231 count
= PAGE_SIZE
<< (MAX_ORDER
- 1);
233 buf
= kmalloc(count
, GFP_KERNEL
);
237 regmap_calc_tot_len(map
, buf
, count
);
239 /* Work out which register we're starting at */
240 start_reg
= regmap_debugfs_get_dump_start(map
, from
, *ppos
, &p
);
242 for (i
= start_reg
; i
>= 0 && i
<= to
;
243 i
= regmap_next_readable_reg(map
, i
)) {
245 /* If we're in the region the user is trying to read */
247 /* ...but not beyond it */
248 if (buf_pos
+ map
->debugfs_tot_len
> count
)
251 /* Format the register */
252 snprintf(buf
+ buf_pos
, count
- buf_pos
, "%.*x: ",
253 map
->debugfs_reg_len
, i
- from
);
254 buf_pos
+= map
->debugfs_reg_len
+ 2;
256 /* Format the value, write all X if we can't read */
257 ret
= regmap_read(map
, i
, &val
);
259 snprintf(buf
+ buf_pos
, count
- buf_pos
,
260 "%.*x", map
->debugfs_val_len
, val
);
262 memset(buf
+ buf_pos
, 'X',
263 map
->debugfs_val_len
);
264 buf_pos
+= 2 * map
->format
.val_bytes
;
266 buf
[buf_pos
++] = '\n';
268 p
+= map
->debugfs_tot_len
;
273 if (copy_to_user(user_buf
, buf
, buf_pos
)) {
285 static ssize_t
regmap_map_read_file(struct file
*file
, char __user
*user_buf
,
286 size_t count
, loff_t
*ppos
)
288 struct regmap
*map
= file
->private_data
;
290 return regmap_read_debugfs(map
, 0, map
->max_register
, user_buf
,
294 #undef REGMAP_ALLOW_WRITE_DEBUGFS
295 #ifdef REGMAP_ALLOW_WRITE_DEBUGFS
297 * This can be dangerous especially when we have clients such as
298 * PMICs, therefore don't provide any real compile time configuration option
299 * for this feature, people who want to use this will need to modify
300 * the source code directly.
302 static ssize_t
regmap_map_write_file(struct file
*file
,
303 const char __user
*user_buf
,
304 size_t count
, loff_t
*ppos
)
309 unsigned long reg
, value
;
310 struct regmap
*map
= file
->private_data
;
313 buf_size
= min(count
, (sizeof(buf
)-1));
314 if (copy_from_user(buf
, user_buf
, buf_size
))
318 while (*start
== ' ')
320 reg
= simple_strtoul(start
, &start
, 16);
321 while (*start
== ' ')
323 if (kstrtoul(start
, 16, &value
))
326 /* Userspace has been fiddling around behind the kernel's back */
327 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
329 ret
= regmap_write(map
, reg
, value
);
335 #define regmap_map_write_file NULL
338 static const struct file_operations regmap_map_fops
= {
340 .read
= regmap_map_read_file
,
341 .write
= regmap_map_write_file
,
342 .llseek
= default_llseek
,
345 static ssize_t
regmap_range_read_file(struct file
*file
, char __user
*user_buf
,
346 size_t count
, loff_t
*ppos
)
348 struct regmap_range_node
*range
= file
->private_data
;
349 struct regmap
*map
= range
->map
;
351 return regmap_read_debugfs(map
, range
->range_min
, range
->range_max
,
352 user_buf
, count
, ppos
);
355 static const struct file_operations regmap_range_fops
= {
357 .read
= regmap_range_read_file
,
358 .llseek
= default_llseek
,
361 static ssize_t
regmap_reg_ranges_read_file(struct file
*file
,
362 char __user
*user_buf
, size_t count
,
365 struct regmap
*map
= file
->private_data
;
366 struct regmap_debugfs_off_cache
*c
;
374 if (*ppos
< 0 || !count
)
377 if (count
> (PAGE_SIZE
<< (MAX_ORDER
- 1)))
378 count
= PAGE_SIZE
<< (MAX_ORDER
- 1);
380 buf
= kmalloc(count
, GFP_KERNEL
);
384 entry
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
390 /* While we are at it, build the register dump cache
391 * now so the read() operation on the `registers' file
392 * can benefit from using the cache. We do not care
393 * about the file position information that is contained
394 * in the cache, just about the actual register blocks */
395 regmap_calc_tot_len(map
, buf
, count
);
396 regmap_debugfs_get_dump_start(map
, 0, *ppos
, &p
);
398 /* Reset file pointer as the fixed-format of the `registers'
399 * file is not compatible with the `range' file */
401 mutex_lock(&map
->cache_lock
);
402 list_for_each_entry(c
, &map
->debugfs_off_cache
, list
) {
403 entry_len
= snprintf(entry
, PAGE_SIZE
, "%x-%x\n",
404 c
->base_reg
, c
->max_reg
);
406 if (buf_pos
+ entry_len
> count
)
408 memcpy(buf
+ buf_pos
, entry
, entry_len
);
409 buf_pos
+= entry_len
;
413 mutex_unlock(&map
->cache_lock
);
418 if (copy_to_user(user_buf
, buf
, buf_pos
)) {
429 static const struct file_operations regmap_reg_ranges_fops
= {
431 .read
= regmap_reg_ranges_read_file
,
432 .llseek
= default_llseek
,
435 static int regmap_access_show(struct seq_file
*s
, void *ignored
)
437 struct regmap
*map
= s
->private;
440 reg_len
= regmap_calc_reg_len(map
->max_register
);
442 for (i
= 0; i
<= map
->max_register
; i
+= map
->reg_stride
) {
443 /* Ignore registers which are neither readable nor writable */
444 if (!regmap_readable(map
, i
) && !regmap_writeable(map
, i
))
447 /* Format the register */
448 seq_printf(s
, "%.*x: %c %c %c %c\n", reg_len
, i
,
449 regmap_readable(map
, i
) ? 'y' : 'n',
450 regmap_writeable(map
, i
) ? 'y' : 'n',
451 regmap_volatile(map
, i
) ? 'y' : 'n',
452 regmap_precious(map
, i
) ? 'y' : 'n');
458 DEFINE_SHOW_ATTRIBUTE(regmap_access
);
460 static ssize_t
regmap_cache_only_write_file(struct file
*file
,
461 const char __user
*user_buf
,
462 size_t count
, loff_t
*ppos
)
464 struct regmap
*map
= container_of(file
->private_data
,
465 struct regmap
, cache_only
);
466 bool new_val
, require_sync
= false;
469 err
= kstrtobool_from_user(user_buf
, count
, &new_val
);
470 /* Ignore malforned data like debugfs_write_file_bool() */
474 err
= debugfs_file_get(file
->f_path
.dentry
);
478 map
->lock(map
->lock_arg
);
480 if (new_val
&& !map
->cache_only
) {
481 dev_warn(map
->dev
, "debugfs cache_only=Y forced\n");
482 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
483 } else if (!new_val
&& map
->cache_only
) {
484 dev_warn(map
->dev
, "debugfs cache_only=N forced: syncing cache\n");
487 map
->cache_only
= new_val
;
489 map
->unlock(map
->lock_arg
);
490 debugfs_file_put(file
->f_path
.dentry
);
493 err
= regcache_sync(map
);
495 dev_err(map
->dev
, "Failed to sync cache %d\n", err
);
501 static const struct file_operations regmap_cache_only_fops
= {
503 .read
= debugfs_read_file_bool
,
504 .write
= regmap_cache_only_write_file
,
507 static ssize_t
regmap_cache_bypass_write_file(struct file
*file
,
508 const char __user
*user_buf
,
509 size_t count
, loff_t
*ppos
)
511 struct regmap
*map
= container_of(file
->private_data
,
512 struct regmap
, cache_bypass
);
516 err
= kstrtobool_from_user(user_buf
, count
, &new_val
);
517 /* Ignore malforned data like debugfs_write_file_bool() */
521 err
= debugfs_file_get(file
->f_path
.dentry
);
525 map
->lock(map
->lock_arg
);
527 if (new_val
&& !map
->cache_bypass
) {
528 dev_warn(map
->dev
, "debugfs cache_bypass=Y forced\n");
529 add_taint(TAINT_USER
, LOCKDEP_STILL_OK
);
530 } else if (!new_val
&& map
->cache_bypass
) {
531 dev_warn(map
->dev
, "debugfs cache_bypass=N forced\n");
533 map
->cache_bypass
= new_val
;
535 map
->unlock(map
->lock_arg
);
536 debugfs_file_put(file
->f_path
.dentry
);
541 static const struct file_operations regmap_cache_bypass_fops
= {
543 .read
= debugfs_read_file_bool
,
544 .write
= regmap_cache_bypass_write_file
,
547 void regmap_debugfs_init(struct regmap
*map
, const char *name
)
549 struct rb_node
*next
;
550 struct regmap_range_node
*range_node
;
551 const char *devname
= "dummy";
554 * Userspace can initiate reads from the hardware over debugfs.
555 * Normally internal regmap structures and buffers are protected with
556 * a mutex or a spinlock, but if the regmap owner decided to disable
557 * all locking mechanisms, this is no longer the case. For safety:
558 * don't create the debugfs entries if locking is disabled.
560 if (map
->debugfs_disable
) {
561 dev_dbg(map
->dev
, "regmap locking disabled - not creating debugfs entries\n");
565 /* If we don't have the debugfs root yet, postpone init */
566 if (!regmap_debugfs_root
) {
567 struct regmap_debugfs_node
*node
;
568 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
573 mutex_lock(®map_debugfs_early_lock
);
574 list_add(&node
->link
, ®map_debugfs_early_list
);
575 mutex_unlock(®map_debugfs_early_lock
);
579 INIT_LIST_HEAD(&map
->debugfs_off_cache
);
580 mutex_init(&map
->cache_lock
);
583 devname
= dev_name(map
->dev
);
586 map
->debugfs_name
= kasprintf(GFP_KERNEL
, "%s-%s",
588 name
= map
->debugfs_name
;
593 if (!strcmp(name
, "dummy")) {
594 kfree(map
->debugfs_name
);
596 map
->debugfs_name
= kasprintf(GFP_KERNEL
, "dummy%d",
598 name
= map
->debugfs_name
;
602 map
->debugfs
= debugfs_create_dir(name
, regmap_debugfs_root
);
604 debugfs_create_file("name", 0400, map
->debugfs
,
605 map
, ®map_name_fops
);
607 debugfs_create_file("range", 0400, map
->debugfs
,
608 map
, ®map_reg_ranges_fops
);
610 if (map
->max_register
|| regmap_readable(map
, 0)) {
611 umode_t registers_mode
;
613 #if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
614 registers_mode
= 0600;
616 registers_mode
= 0400;
619 debugfs_create_file("registers", registers_mode
, map
->debugfs
,
620 map
, ®map_map_fops
);
621 debugfs_create_file("access", 0400, map
->debugfs
,
622 map
, ®map_access_fops
);
625 if (map
->cache_type
) {
626 debugfs_create_file("cache_only", 0600, map
->debugfs
,
627 &map
->cache_only
, ®map_cache_only_fops
);
628 debugfs_create_bool("cache_dirty", 0400, map
->debugfs
,
630 debugfs_create_file("cache_bypass", 0600, map
->debugfs
,
632 ®map_cache_bypass_fops
);
635 next
= rb_first(&map
->range_tree
);
637 range_node
= rb_entry(next
, struct regmap_range_node
, node
);
639 if (range_node
->name
)
640 debugfs_create_file(range_node
->name
, 0400,
641 map
->debugfs
, range_node
,
644 next
= rb_next(&range_node
->node
);
647 if (map
->cache_ops
&& map
->cache_ops
->debugfs_init
)
648 map
->cache_ops
->debugfs_init(map
);
651 void regmap_debugfs_exit(struct regmap
*map
)
654 debugfs_remove_recursive(map
->debugfs
);
655 mutex_lock(&map
->cache_lock
);
656 regmap_debugfs_free_dump_cache(map
);
657 mutex_unlock(&map
->cache_lock
);
658 kfree(map
->debugfs_name
);
660 struct regmap_debugfs_node
*node
, *tmp
;
662 mutex_lock(®map_debugfs_early_lock
);
663 list_for_each_entry_safe(node
, tmp
, ®map_debugfs_early_list
,
665 if (node
->map
== map
) {
666 list_del(&node
->link
);
670 mutex_unlock(®map_debugfs_early_lock
);
674 void regmap_debugfs_initcall(void)
676 struct regmap_debugfs_node
*node
, *tmp
;
678 regmap_debugfs_root
= debugfs_create_dir("regmap", NULL
);
680 mutex_lock(®map_debugfs_early_lock
);
681 list_for_each_entry_safe(node
, tmp
, ®map_debugfs_early_list
, link
) {
682 regmap_debugfs_init(node
->map
, node
->name
);
683 list_del(&node
->link
);
686 mutex_unlock(®map_debugfs_early_lock
);