1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - maple tree based cache
5 // Copyright 2023 Arm, Ltd
7 // Author: Mark Brown <broonie@kernel.org>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
16 static int regcache_maple_read(struct regmap
*map
,
17 unsigned int reg
, unsigned int *value
)
19 struct maple_tree
*mt
= map
->cache
;
20 MA_STATE(mas
, mt
, reg
, reg
);
25 entry
= mas_walk(&mas
);
31 *value
= entry
[reg
- mas
.index
];
38 static int regcache_maple_write(struct regmap
*map
, unsigned int reg
,
41 struct maple_tree
*mt
= map
->cache
;
42 MA_STATE(mas
, mt
, reg
, reg
);
43 unsigned long *entry
, *upper
, *lower
;
44 unsigned long index
, last
;
45 size_t lower_sz
, upper_sz
;
50 entry
= mas_walk(&mas
);
52 entry
[reg
- mas
.index
] = val
;
57 /* Any adjacent entries to extend/merge? */
58 mas_set_range(&mas
, reg
- 1, reg
+ 1);
62 lower
= mas_find(&mas
, reg
- 1);
65 lower_sz
= (mas
.last
- mas
.index
+ 1) * sizeof(unsigned long);
68 upper
= mas_find(&mas
, reg
+ 1);
71 upper_sz
= (mas
.last
- mas
.index
+ 1) * sizeof(unsigned long);
76 entry
= kmalloc((last
- index
+ 1) * sizeof(unsigned long),
82 memcpy(entry
, lower
, lower_sz
);
83 entry
[reg
- index
] = val
;
85 memcpy(&entry
[reg
- index
+ 1], upper
, upper_sz
);
88 * This is safe because the regmap lock means the Maple lock
89 * is redundant, but we need to take it due to lockdep asserts
90 * in the maple tree code.
94 mas_set_range(&mas
, index
, last
);
95 ret
= mas_store_gfp(&mas
, entry
, map
->alloc_flags
);
107 static int regcache_maple_drop(struct regmap
*map
, unsigned int min
,
110 struct maple_tree
*mt
= map
->cache
;
111 MA_STATE(mas
, mt
, min
, max
);
112 unsigned long *entry
, *lower
, *upper
;
113 /* initialized to work around false-positive -Wuninitialized warning */
114 unsigned long lower_index
= 0, lower_last
= 0;
115 unsigned long upper_index
, upper_last
;
123 mas_for_each(&mas
, entry
, max
) {
125 * This is safe because the regmap lock means the
126 * Maple lock is redundant, but we need to take it due
127 * to lockdep asserts in the maple tree code.
131 /* Do we need to save any of this entry? */
132 if (mas
.index
< min
) {
133 lower_index
= mas
.index
;
136 lower
= kmemdup_array(entry
,
137 min
- mas
.index
, sizeof(*lower
),
145 if (mas
.last
> max
) {
146 upper_index
= max
+ 1;
147 upper_last
= mas
.last
;
149 upper
= kmemdup_array(&entry
[max
- mas
.index
+ 1],
150 mas
.last
- max
, sizeof(*upper
),
162 /* Insert new nodes with the saved data */
164 mas_set_range(&mas
, lower_index
, lower_last
);
165 ret
= mas_store_gfp(&mas
, lower
, map
->alloc_flags
);
172 mas_set_range(&mas
, upper_index
, upper_last
);
173 ret
= mas_store_gfp(&mas
, upper
, map
->alloc_flags
);
189 static int regcache_maple_sync_block(struct regmap
*map
, unsigned long *entry
,
190 struct ma_state
*mas
,
191 unsigned int min
, unsigned int max
)
195 size_t val_bytes
= map
->format
.val_bytes
;
202 * Use a raw write if writing more than one register to a
203 * device that supports raw writes to reduce transaction
206 if (max
- min
> 1 && regmap_can_raw_write(map
)) {
207 buf
= kmalloc(val_bytes
* (max
- min
), map
->alloc_flags
);
213 /* Render the data for a raw write */
214 for (r
= min
; r
< max
; r
++) {
215 regcache_set_val(map
, buf
, r
- min
,
216 entry
[r
- mas
->index
]);
219 ret
= _regmap_raw_write(map
, min
, buf
, (max
- min
) * val_bytes
,
224 for (r
= min
; r
< max
; r
++) {
225 ret
= _regmap_write(map
, r
,
226 entry
[r
- mas
->index
]);
238 static int regcache_maple_sync(struct regmap
*map
, unsigned int min
,
241 struct maple_tree
*mt
= map
->cache
;
242 unsigned long *entry
;
243 MA_STATE(mas
, mt
, min
, max
);
244 unsigned long lmin
= min
;
245 unsigned long lmax
= max
;
246 unsigned int r
, v
, sync_start
;
248 bool sync_needed
= false;
250 map
->cache_bypass
= true;
254 mas_for_each(&mas
, entry
, max
) {
255 for (r
= max(mas
.index
, lmin
); r
<= min(mas
.last
, lmax
); r
++) {
256 v
= entry
[r
- mas
.index
];
258 if (regcache_reg_needs_sync(map
, r
, v
)) {
269 ret
= regcache_maple_sync_block(map
, entry
, &mas
,
277 ret
= regcache_maple_sync_block(map
, entry
, &mas
,
288 map
->cache_bypass
= false;
293 static int regcache_maple_exit(struct regmap
*map
)
295 struct maple_tree
*mt
= map
->cache
;
296 MA_STATE(mas
, mt
, 0, UINT_MAX
);
299 /* if we've already been called then just return */
304 mas_for_each(&mas
, entry
, UINT_MAX
)
315 static int regcache_maple_insert_block(struct regmap
*map
, int first
,
318 struct maple_tree
*mt
= map
->cache
;
319 MA_STATE(mas
, mt
, first
, last
);
320 unsigned long *entry
;
323 entry
= kcalloc(last
- first
+ 1, sizeof(unsigned long), map
->alloc_flags
);
327 for (i
= 0; i
< last
- first
+ 1; i
++)
328 entry
[i
] = map
->reg_defaults
[first
+ i
].def
;
332 mas_set_range(&mas
, map
->reg_defaults
[first
].reg
,
333 map
->reg_defaults
[last
].reg
);
334 ret
= mas_store_gfp(&mas
, entry
, map
->alloc_flags
);
344 static int regcache_maple_init(struct regmap
*map
)
346 struct maple_tree
*mt
;
351 mt
= kmalloc(sizeof(*mt
), map
->alloc_flags
);
358 if (!mt_external_lock(mt
) && map
->lock_key
)
359 lockdep_set_class_and_subclass(&mt
->ma_lock
, map
->lock_key
, 1);
361 if (!map
->num_reg_defaults
)
366 /* Scan for ranges of contiguous registers */
367 for (i
= 1; i
< map
->num_reg_defaults
; i
++) {
368 if (map
->reg_defaults
[i
].reg
!=
369 map
->reg_defaults
[i
- 1].reg
+ 1) {
370 ret
= regcache_maple_insert_block(map
, range_start
,
379 /* Add the last block */
380 ret
= regcache_maple_insert_block(map
, range_start
,
381 map
->num_reg_defaults
- 1);
388 regcache_maple_exit(map
);
392 struct regcache_ops regcache_maple_ops
= {
393 .type
= REGCACHE_MAPLE
,
395 .init
= regcache_maple_init
,
396 .exit
= regcache_maple_exit
,
397 .read
= regcache_maple_read
,
398 .write
= regcache_maple_write
,
399 .drop
= regcache_maple_drop
,
400 .sync
= regcache_maple_sync
,