1 // SPDX-License-Identifier: GPL-2.0
5 // Copyright 2023 Arm Ltd
7 #include <kunit/device.h>
8 #include <kunit/resource.h>
9 #include <kunit/test.h>
12 #define BLOCK_TEST_SIZE 12
14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action
, regmap_exit
, struct regmap
*);
16 struct regmap_test_priv
{
20 struct regmap_test_param
{
21 enum regcache_type cache
;
22 enum regmap_endian val_endian
;
24 unsigned int from_reg
;
28 static void get_changed_bytes(void *orig
, void *new, size_t size
)
34 get_random_bytes(new, size
);
37 * This could be nicer and more efficient but we shouldn't
40 for (i
= 0; i
< size
; i
++)
42 get_random_bytes(&n
[i
], 1);
45 static const struct regmap_config test_regmap_config
= {
47 .val_bits
= sizeof(unsigned int) * 8,
50 static const char *regcache_type_name(enum regcache_type type
)
66 static const char *regmap_endian_name(enum regmap_endian endian
)
69 case REGMAP_ENDIAN_BIG
:
71 case REGMAP_ENDIAN_LITTLE
:
73 case REGMAP_ENDIAN_DEFAULT
:
75 case REGMAP_ENDIAN_NATIVE
:
82 static void param_to_desc(const struct regmap_test_param
*param
, char *desc
)
84 snprintf(desc
, KUNIT_PARAM_DESC_SIZE
, "%s-%s%s @%#x",
85 regcache_type_name(param
->cache
),
86 regmap_endian_name(param
->val_endian
),
87 param
->fast_io
? " fast I/O" : "",
91 static const struct regmap_test_param regcache_types_list
[] = {
92 { .cache
= REGCACHE_NONE
},
93 { .cache
= REGCACHE_NONE
, .fast_io
= true },
94 { .cache
= REGCACHE_FLAT
},
95 { .cache
= REGCACHE_FLAT
, .fast_io
= true },
96 { .cache
= REGCACHE_RBTREE
},
97 { .cache
= REGCACHE_RBTREE
, .fast_io
= true },
98 { .cache
= REGCACHE_MAPLE
},
99 { .cache
= REGCACHE_MAPLE
, .fast_io
= true },
102 KUNIT_ARRAY_PARAM(regcache_types
, regcache_types_list
, param_to_desc
);
104 static const struct regmap_test_param real_cache_types_only_list
[] = {
105 { .cache
= REGCACHE_FLAT
},
106 { .cache
= REGCACHE_FLAT
, .fast_io
= true },
107 { .cache
= REGCACHE_RBTREE
},
108 { .cache
= REGCACHE_RBTREE
, .fast_io
= true },
109 { .cache
= REGCACHE_MAPLE
},
110 { .cache
= REGCACHE_MAPLE
, .fast_io
= true },
113 KUNIT_ARRAY_PARAM(real_cache_types_only
, real_cache_types_only_list
, param_to_desc
);
115 static const struct regmap_test_param real_cache_types_list
[] = {
116 { .cache
= REGCACHE_FLAT
, .from_reg
= 0 },
117 { .cache
= REGCACHE_FLAT
, .from_reg
= 0, .fast_io
= true },
118 { .cache
= REGCACHE_FLAT
, .from_reg
= 0x2001 },
119 { .cache
= REGCACHE_FLAT
, .from_reg
= 0x2002 },
120 { .cache
= REGCACHE_FLAT
, .from_reg
= 0x2003 },
121 { .cache
= REGCACHE_FLAT
, .from_reg
= 0x2004 },
122 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0 },
123 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0, .fast_io
= true },
124 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2001 },
125 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2002 },
126 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2003 },
127 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2004 },
128 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0 },
129 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0, .fast_io
= true },
130 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2001 },
131 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2002 },
132 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2003 },
133 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2004 },
136 KUNIT_ARRAY_PARAM(real_cache_types
, real_cache_types_list
, param_to_desc
);
138 static const struct regmap_test_param sparse_cache_types_list
[] = {
139 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0 },
140 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0, .fast_io
= true },
141 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2001 },
142 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2002 },
143 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2003 },
144 { .cache
= REGCACHE_RBTREE
, .from_reg
= 0x2004 },
145 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0 },
146 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0, .fast_io
= true },
147 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2001 },
148 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2002 },
149 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2003 },
150 { .cache
= REGCACHE_MAPLE
, .from_reg
= 0x2004 },
153 KUNIT_ARRAY_PARAM(sparse_cache_types
, sparse_cache_types_list
, param_to_desc
);
155 static struct regmap
*gen_regmap(struct kunit
*test
,
156 struct regmap_config
*config
,
157 struct regmap_ram_data
**data
)
159 const struct regmap_test_param
*param
= test
->param_value
;
160 struct regmap_test_priv
*priv
= test
->priv
;
162 struct regmap
*ret
= ERR_PTR(-ENOMEM
);
165 struct reg_default
*defaults
;
167 config
->cache_type
= param
->cache
;
168 config
->fast_io
= param
->fast_io
;
170 if (config
->max_register
== 0) {
171 config
->max_register
= param
->from_reg
;
172 if (config
->num_reg_defaults
)
173 config
->max_register
+= (config
->num_reg_defaults
- 1) *
176 config
->max_register
+= (BLOCK_TEST_SIZE
* config
->reg_stride
);
179 size
= array_size(config
->max_register
+ 1, sizeof(*buf
));
180 buf
= kmalloc(size
, GFP_KERNEL
);
182 return ERR_PTR(-ENOMEM
);
184 get_random_bytes(buf
, size
);
186 *data
= kzalloc(sizeof(**data
), GFP_KERNEL
);
191 if (config
->num_reg_defaults
) {
192 defaults
= kunit_kcalloc(test
,
193 config
->num_reg_defaults
,
194 sizeof(struct reg_default
),
199 config
->reg_defaults
= defaults
;
201 for (i
= 0; i
< config
->num_reg_defaults
; i
++) {
202 defaults
[i
].reg
= param
->from_reg
+ (i
* config
->reg_stride
);
203 defaults
[i
].def
= buf
[param
->from_reg
+ (i
* config
->reg_stride
)];
207 ret
= regmap_init_ram(priv
->dev
, config
, *data
);
211 /* This calls regmap_exit() on failure, which frees buf and *data */
212 error
= kunit_add_action_or_reset(test
, regmap_exit_action
, ret
);
214 ret
= ERR_PTR(error
);
225 static bool reg_5_false(struct device
*dev
, unsigned int reg
)
227 struct kunit
*test
= dev_get_drvdata(dev
);
228 const struct regmap_test_param
*param
= test
->param_value
;
230 return reg
!= (param
->from_reg
+ 5);
233 static void basic_read_write(struct kunit
*test
)
236 struct regmap_config config
;
237 struct regmap_ram_data
*data
;
238 unsigned int val
, rval
;
240 config
= test_regmap_config
;
242 map
= gen_regmap(test
, &config
, &data
);
243 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
247 get_random_bytes(&val
, sizeof(val
));
249 /* If we write a value to a register we can read it back */
250 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, 0, val
));
251 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, 0, &rval
));
252 KUNIT_EXPECT_EQ(test
, val
, rval
);
254 /* If using a cache the cache satisfied the read */
255 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[0]);
258 static void bulk_write(struct kunit
*test
)
261 struct regmap_config config
;
262 struct regmap_ram_data
*data
;
263 unsigned int val
[BLOCK_TEST_SIZE
], rval
[BLOCK_TEST_SIZE
];
266 config
= test_regmap_config
;
268 map
= gen_regmap(test
, &config
, &data
);
269 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
273 get_random_bytes(&val
, sizeof(val
));
276 * Data written via the bulk API can be read back with single
279 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, 0, val
,
281 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
282 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
[i
]));
284 KUNIT_EXPECT_MEMEQ(test
, val
, rval
, sizeof(val
));
286 /* If using a cache the cache satisfied the read */
287 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
288 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
291 static void bulk_read(struct kunit
*test
)
294 struct regmap_config config
;
295 struct regmap_ram_data
*data
;
296 unsigned int val
[BLOCK_TEST_SIZE
], rval
[BLOCK_TEST_SIZE
];
299 config
= test_regmap_config
;
301 map
= gen_regmap(test
, &config
, &data
);
302 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
306 get_random_bytes(&val
, sizeof(val
));
308 /* Data written as single writes can be read via the bulk API */
309 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
310 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, val
[i
]));
311 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, 0, rval
,
313 KUNIT_EXPECT_MEMEQ(test
, val
, rval
, sizeof(val
));
315 /* If using a cache the cache satisfied the read */
316 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
317 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
320 static void multi_write(struct kunit
*test
)
323 struct regmap_config config
;
324 struct regmap_ram_data
*data
;
325 struct reg_sequence sequence
[BLOCK_TEST_SIZE
];
326 unsigned int val
[BLOCK_TEST_SIZE
], rval
[BLOCK_TEST_SIZE
];
329 config
= test_regmap_config
;
331 map
= gen_regmap(test
, &config
, &data
);
332 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
336 get_random_bytes(&val
, sizeof(val
));
339 * Data written via the multi API can be read back with single
342 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
344 sequence
[i
].def
= val
[i
];
345 sequence
[i
].delay_us
= 0;
347 KUNIT_EXPECT_EQ(test
, 0,
348 regmap_multi_reg_write(map
, sequence
, BLOCK_TEST_SIZE
));
349 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
350 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
[i
]));
352 KUNIT_EXPECT_MEMEQ(test
, val
, rval
, sizeof(val
));
354 /* If using a cache the cache satisfied the read */
355 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
356 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
359 static void multi_read(struct kunit
*test
)
362 struct regmap_config config
;
363 struct regmap_ram_data
*data
;
364 unsigned int regs
[BLOCK_TEST_SIZE
];
365 unsigned int val
[BLOCK_TEST_SIZE
], rval
[BLOCK_TEST_SIZE
];
368 config
= test_regmap_config
;
370 map
= gen_regmap(test
, &config
, &data
);
371 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
375 get_random_bytes(&val
, sizeof(val
));
377 /* Data written as single writes can be read via the multi API */
378 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
380 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, val
[i
]));
382 KUNIT_EXPECT_EQ(test
, 0,
383 regmap_multi_reg_read(map
, regs
, rval
, BLOCK_TEST_SIZE
));
384 KUNIT_EXPECT_MEMEQ(test
, val
, rval
, sizeof(val
));
386 /* If using a cache the cache satisfied the read */
387 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
388 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
391 static void read_bypassed(struct kunit
*test
)
393 const struct regmap_test_param
*param
= test
->param_value
;
395 struct regmap_config config
;
396 struct regmap_ram_data
*data
;
397 unsigned int val
[BLOCK_TEST_SIZE
], rval
;
400 config
= test_regmap_config
;
402 map
= gen_regmap(test
, &config
, &data
);
403 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
407 KUNIT_EXPECT_FALSE(test
, map
->cache_bypass
);
409 get_random_bytes(&val
, sizeof(val
));
411 /* Write some test values */
412 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, param
->from_reg
, val
, ARRAY_SIZE(val
)));
414 regcache_cache_only(map
, true);
417 * While in cache-only regmap_read_bypassed() should return the register
418 * value and leave the map in cache-only.
420 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
421 /* Put inverted bits in rval to prove we really read the value */
423 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
+ i
, &rval
));
424 KUNIT_EXPECT_EQ(test
, val
[i
], rval
);
427 KUNIT_EXPECT_EQ(test
, 0, regmap_read_bypassed(map
, param
->from_reg
+ i
, &rval
));
428 KUNIT_EXPECT_EQ(test
, val
[i
], rval
);
429 KUNIT_EXPECT_TRUE(test
, map
->cache_only
);
430 KUNIT_EXPECT_FALSE(test
, map
->cache_bypass
);
434 * Change the underlying register values to prove it is returning
435 * real values not cached values.
437 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
439 data
->vals
[param
->from_reg
+ i
] = val
[i
];
442 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
444 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
+ i
, &rval
));
445 KUNIT_EXPECT_NE(test
, val
[i
], rval
);
448 KUNIT_EXPECT_EQ(test
, 0, regmap_read_bypassed(map
, param
->from_reg
+ i
, &rval
));
449 KUNIT_EXPECT_EQ(test
, val
[i
], rval
);
450 KUNIT_EXPECT_TRUE(test
, map
->cache_only
);
451 KUNIT_EXPECT_FALSE(test
, map
->cache_bypass
);
455 static void read_bypassed_volatile(struct kunit
*test
)
457 const struct regmap_test_param
*param
= test
->param_value
;
459 struct regmap_config config
;
460 struct regmap_ram_data
*data
;
461 unsigned int val
[BLOCK_TEST_SIZE
], rval
;
464 config
= test_regmap_config
;
465 /* All registers except #5 volatile */
466 config
.volatile_reg
= reg_5_false
;
468 map
= gen_regmap(test
, &config
, &data
);
469 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
473 KUNIT_EXPECT_FALSE(test
, map
->cache_bypass
);
475 get_random_bytes(&val
, sizeof(val
));
477 /* Write some test values */
478 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, param
->from_reg
, val
, ARRAY_SIZE(val
)));
480 regcache_cache_only(map
, true);
483 * While in cache-only regmap_read_bypassed() should return the register
484 * value and leave the map in cache-only.
486 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
487 /* Register #5 is non-volatile so should read from cache */
488 KUNIT_EXPECT_EQ(test
, (i
== 5) ? 0 : -EBUSY
,
489 regmap_read(map
, param
->from_reg
+ i
, &rval
));
491 /* Put inverted bits in rval to prove we really read the value */
493 KUNIT_EXPECT_EQ(test
, 0, regmap_read_bypassed(map
, param
->from_reg
+ i
, &rval
));
494 KUNIT_EXPECT_EQ(test
, val
[i
], rval
);
495 KUNIT_EXPECT_TRUE(test
, map
->cache_only
);
496 KUNIT_EXPECT_FALSE(test
, map
->cache_bypass
);
500 * Change the underlying register values to prove it is returning
501 * real values not cached values.
503 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
505 data
->vals
[param
->from_reg
+ i
] = val
[i
];
508 for (i
= 0; i
< ARRAY_SIZE(val
); i
++) {
513 KUNIT_EXPECT_EQ(test
, 0, regmap_read_bypassed(map
, param
->from_reg
+ i
, &rval
));
514 KUNIT_EXPECT_EQ(test
, val
[i
], rval
);
515 KUNIT_EXPECT_TRUE(test
, map
->cache_only
);
516 KUNIT_EXPECT_FALSE(test
, map
->cache_bypass
);
520 static void write_readonly(struct kunit
*test
)
523 struct regmap_config config
;
524 struct regmap_ram_data
*data
;
528 config
= test_regmap_config
;
529 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
530 config
.writeable_reg
= reg_5_false
;
532 map
= gen_regmap(test
, &config
, &data
);
533 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
537 get_random_bytes(&val
, sizeof(val
));
539 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
540 data
->written
[i
] = false;
542 /* Change the value of all registers, readonly should fail */
543 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
544 KUNIT_EXPECT_EQ(test
, i
!= 5, regmap_write(map
, i
, val
) == 0);
546 /* Did that match what we see on the device? */
547 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
548 KUNIT_EXPECT_EQ(test
, i
!= 5, data
->written
[i
]);
551 static void read_writeonly(struct kunit
*test
)
554 struct regmap_config config
;
555 struct regmap_ram_data
*data
;
559 config
= test_regmap_config
;
560 config
.readable_reg
= reg_5_false
;
562 map
= gen_regmap(test
, &config
, &data
);
563 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
567 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
568 data
->read
[i
] = false;
571 * Try to read all the registers, the writeonly one should
572 * fail if we aren't using the flat cache.
574 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
575 if (config
.cache_type
!= REGCACHE_FLAT
) {
576 KUNIT_EXPECT_EQ(test
, i
!= 5,
577 regmap_read(map
, i
, &val
) == 0);
579 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &val
));
583 /* Did we trigger a hardware access? */
584 KUNIT_EXPECT_FALSE(test
, data
->read
[5]);
587 static void reg_defaults(struct kunit
*test
)
590 struct regmap_config config
;
591 struct regmap_ram_data
*data
;
592 unsigned int rval
[BLOCK_TEST_SIZE
];
595 config
= test_regmap_config
;
596 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
598 map
= gen_regmap(test
, &config
, &data
);
599 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
603 /* Read back the expected default data */
604 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, 0, rval
,
606 KUNIT_EXPECT_MEMEQ(test
, data
->vals
, rval
, sizeof(rval
));
608 /* The data should have been read from cache if there was one */
609 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
610 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
613 static void reg_defaults_read_dev(struct kunit
*test
)
616 struct regmap_config config
;
617 struct regmap_ram_data
*data
;
618 unsigned int rval
[BLOCK_TEST_SIZE
];
621 config
= test_regmap_config
;
622 config
.num_reg_defaults_raw
= BLOCK_TEST_SIZE
;
624 map
= gen_regmap(test
, &config
, &data
);
625 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
629 /* We should have read the cache defaults back from the map */
630 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
631 KUNIT_EXPECT_EQ(test
, config
.cache_type
!= REGCACHE_NONE
, data
->read
[i
]);
632 data
->read
[i
] = false;
635 /* Read back the expected default data */
636 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, 0, rval
,
638 KUNIT_EXPECT_MEMEQ(test
, data
->vals
, rval
, sizeof(rval
));
640 /* The data should have been read from cache if there was one */
641 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
642 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
645 static void register_patch(struct kunit
*test
)
648 struct regmap_config config
;
649 struct regmap_ram_data
*data
;
650 struct reg_sequence patch
[2];
651 unsigned int rval
[BLOCK_TEST_SIZE
];
654 /* We need defaults so readback works */
655 config
= test_regmap_config
;
656 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
658 map
= gen_regmap(test
, &config
, &data
);
659 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
663 /* Stash the original values */
664 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, 0, rval
,
667 /* Patch a couple of values */
669 patch
[0].def
= rval
[2] + 1;
670 patch
[0].delay_us
= 0;
672 patch
[1].def
= rval
[5] + 1;
673 patch
[1].delay_us
= 0;
674 KUNIT_EXPECT_EQ(test
, 0, regmap_register_patch(map
, patch
,
677 /* Only the patched registers are written */
678 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
682 KUNIT_EXPECT_TRUE(test
, data
->written
[i
]);
683 KUNIT_EXPECT_EQ(test
, data
->vals
[i
], rval
[i
] + 1);
686 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
687 KUNIT_EXPECT_EQ(test
, data
->vals
[i
], rval
[i
]);
693 static void stride(struct kunit
*test
)
696 struct regmap_config config
;
697 struct regmap_ram_data
*data
;
701 config
= test_regmap_config
;
702 config
.reg_stride
= 2;
703 config
.num_reg_defaults
= BLOCK_TEST_SIZE
/ 2;
706 * Allow one extra register so that the read/written arrays
707 * are sized big enough to include an entry for the odd
708 * address past the final reg_default register.
710 config
.max_register
= BLOCK_TEST_SIZE
;
712 map
= gen_regmap(test
, &config
, &data
);
713 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
717 /* Only even addresses can be accessed, try both read and write */
718 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
719 data
->read
[i
] = false;
720 data
->written
[i
] = false;
723 KUNIT_EXPECT_NE(test
, 0, regmap_read(map
, i
, &rval
));
724 KUNIT_EXPECT_NE(test
, 0, regmap_write(map
, i
, rval
));
725 KUNIT_EXPECT_FALSE(test
, data
->read
[i
]);
726 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
728 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
));
729 KUNIT_EXPECT_EQ(test
, data
->vals
[i
], rval
);
730 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
,
733 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, rval
));
734 KUNIT_EXPECT_TRUE(test
, data
->written
[i
]);
739 static struct regmap_range_cfg test_range
= {
741 .selector_mask
= 0xff,
750 static bool test_range_window_volatile(struct device
*dev
, unsigned int reg
)
752 if (reg
>= test_range
.window_start
&&
753 reg
<= test_range
.window_start
+ test_range
.window_len
)
759 static bool test_range_all_volatile(struct device
*dev
, unsigned int reg
)
761 if (test_range_window_volatile(dev
, reg
))
764 if (reg
>= test_range
.range_min
&& reg
<= test_range
.range_max
)
770 static void basic_ranges(struct kunit
*test
)
773 struct regmap_config config
;
774 struct regmap_ram_data
*data
;
778 config
= test_regmap_config
;
779 config
.volatile_reg
= test_range_all_volatile
;
780 config
.ranges
= &test_range
;
781 config
.num_ranges
= 1;
782 config
.max_register
= test_range
.range_max
;
784 map
= gen_regmap(test
, &config
, &data
);
785 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
789 for (i
= test_range
.range_min
; i
< test_range
.range_max
; i
++) {
790 data
->read
[i
] = false;
791 data
->written
[i
] = false;
794 /* Reset the page to a non-zero value to trigger a change */
795 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, test_range
.selector_reg
,
796 test_range
.range_max
));
798 /* Check we set the page and use the window for writes */
799 data
->written
[test_range
.selector_reg
] = false;
800 data
->written
[test_range
.window_start
] = false;
801 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, test_range
.range_min
, 0));
802 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
803 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.window_start
]);
805 data
->written
[test_range
.selector_reg
] = false;
806 data
->written
[test_range
.window_start
] = false;
807 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
,
808 test_range
.range_min
+
809 test_range
.window_len
,
811 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
812 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.window_start
]);
815 data
->written
[test_range
.selector_reg
] = false;
816 data
->read
[test_range
.window_start
] = false;
817 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, test_range
.range_min
, &val
));
818 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
819 KUNIT_EXPECT_TRUE(test
, data
->read
[test_range
.window_start
]);
821 data
->written
[test_range
.selector_reg
] = false;
822 data
->read
[test_range
.window_start
] = false;
823 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
,
824 test_range
.range_min
+
825 test_range
.window_len
,
827 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
828 KUNIT_EXPECT_TRUE(test
, data
->read
[test_range
.window_start
]);
830 /* No physical access triggered in the virtual range */
831 for (i
= test_range
.range_min
; i
< test_range
.range_max
; i
++) {
832 KUNIT_EXPECT_FALSE(test
, data
->read
[i
]);
833 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
837 /* Try to stress dynamic creation of cache data structures */
838 static void stress_insert(struct kunit
*test
)
841 struct regmap_config config
;
842 struct regmap_ram_data
*data
;
843 unsigned int rval
, *vals
;
847 config
= test_regmap_config
;
848 config
.max_register
= 300;
850 map
= gen_regmap(test
, &config
, &data
);
851 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
855 buf_sz
= array_size(sizeof(*vals
), config
.max_register
);
856 vals
= kunit_kmalloc(test
, buf_sz
, GFP_KERNEL
);
857 KUNIT_ASSERT_FALSE(test
, vals
== NULL
);
859 get_random_bytes(vals
, buf_sz
);
861 /* Write data into the map/cache in ever decreasing strides */
862 for (i
= 0; i
< config
.max_register
; i
+= 100)
863 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
864 for (i
= 0; i
< config
.max_register
; i
+= 50)
865 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
866 for (i
= 0; i
< config
.max_register
; i
+= 25)
867 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
868 for (i
= 0; i
< config
.max_register
; i
+= 10)
869 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
870 for (i
= 0; i
< config
.max_register
; i
+= 5)
871 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
872 for (i
= 0; i
< config
.max_register
; i
+= 3)
873 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
874 for (i
= 0; i
< config
.max_register
; i
+= 2)
875 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
876 for (i
= 0; i
< config
.max_register
; i
++)
877 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, i
, vals
[i
]));
879 /* Do reads from the cache (if there is one) match? */
880 for (i
= 0; i
< config
.max_register
; i
++) {
881 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
));
882 KUNIT_EXPECT_EQ(test
, rval
, vals
[i
]);
883 KUNIT_EXPECT_EQ(test
, config
.cache_type
== REGCACHE_NONE
, data
->read
[i
]);
887 static void cache_bypass(struct kunit
*test
)
889 const struct regmap_test_param
*param
= test
->param_value
;
891 struct regmap_config config
;
892 struct regmap_ram_data
*data
;
893 unsigned int val
, rval
;
895 config
= test_regmap_config
;
897 map
= gen_regmap(test
, &config
, &data
);
898 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
902 get_random_bytes(&val
, sizeof(val
));
904 /* Ensure the cache has a value in it */
905 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
, val
));
907 /* Bypass then write a different value */
908 regcache_cache_bypass(map
, true);
909 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
, val
+ 1));
911 /* Read the bypassed value */
912 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
, &rval
));
913 KUNIT_EXPECT_EQ(test
, val
+ 1, rval
);
914 KUNIT_EXPECT_EQ(test
, data
->vals
[param
->from_reg
], rval
);
916 /* Disable bypass, the cache should still return the original value */
917 regcache_cache_bypass(map
, false);
918 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
, &rval
));
919 KUNIT_EXPECT_EQ(test
, val
, rval
);
922 static void cache_sync_marked_dirty(struct kunit
*test
)
924 const struct regmap_test_param
*param
= test
->param_value
;
926 struct regmap_config config
;
927 struct regmap_ram_data
*data
;
928 unsigned int val
[BLOCK_TEST_SIZE
];
931 config
= test_regmap_config
;
933 map
= gen_regmap(test
, &config
, &data
);
934 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
938 get_random_bytes(&val
, sizeof(val
));
940 /* Put some data into the cache */
941 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, param
->from_reg
, val
,
943 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
944 data
->written
[param
->from_reg
+ i
] = false;
946 /* Trash the data on the device itself then resync */
947 regcache_mark_dirty(map
);
948 memset(data
->vals
, 0, sizeof(val
));
949 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
951 /* Did we just write the correct data out? */
952 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], val
, sizeof(val
));
953 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
954 KUNIT_EXPECT_EQ(test
, true, data
->written
[param
->from_reg
+ i
]);
957 static void cache_sync_after_cache_only(struct kunit
*test
)
959 const struct regmap_test_param
*param
= test
->param_value
;
961 struct regmap_config config
;
962 struct regmap_ram_data
*data
;
963 unsigned int val
[BLOCK_TEST_SIZE
];
964 unsigned int val_mask
;
967 config
= test_regmap_config
;
969 map
= gen_regmap(test
, &config
, &data
);
970 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
974 val_mask
= GENMASK(config
.val_bits
- 1, 0);
975 get_random_bytes(&val
, sizeof(val
));
977 /* Put some data into the cache */
978 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, param
->from_reg
, val
,
980 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
981 data
->written
[param
->from_reg
+ i
] = false;
983 /* Set cache-only and change the values */
984 regcache_cache_only(map
, true);
985 for (i
= 0; i
< ARRAY_SIZE(val
); ++i
)
986 val
[i
] = ~val
[i
] & val_mask
;
988 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, param
->from_reg
, val
,
990 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
991 KUNIT_EXPECT_FALSE(test
, data
->written
[param
->from_reg
+ i
]);
993 KUNIT_EXPECT_MEMNEQ(test
, &data
->vals
[param
->from_reg
], val
, sizeof(val
));
995 /* Exit cache-only and sync the cache without marking hardware registers dirty */
996 regcache_cache_only(map
, false);
998 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1000 /* Did we just write the correct data out? */
1001 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], val
, sizeof(val
));
1002 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1003 KUNIT_EXPECT_TRUE(test
, data
->written
[param
->from_reg
+ i
]);
1006 static void cache_sync_defaults_marked_dirty(struct kunit
*test
)
1008 const struct regmap_test_param
*param
= test
->param_value
;
1010 struct regmap_config config
;
1011 struct regmap_ram_data
*data
;
1015 config
= test_regmap_config
;
1016 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
1018 map
= gen_regmap(test
, &config
, &data
);
1019 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1023 get_random_bytes(&val
, sizeof(val
));
1025 /* Change the value of one register */
1026 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
+ 2, val
));
1029 regcache_mark_dirty(map
);
1030 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1031 data
->written
[param
->from_reg
+ i
] = false;
1032 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1034 /* Did we just sync the one register we touched? */
1035 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1036 KUNIT_EXPECT_EQ(test
, i
== 2, data
->written
[param
->from_reg
+ i
]);
1038 /* Rewrite registers back to their defaults */
1039 for (i
= 0; i
< config
.num_reg_defaults
; ++i
)
1040 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, config
.reg_defaults
[i
].reg
,
1041 config
.reg_defaults
[i
].def
));
1044 * Resync after regcache_mark_dirty() should not write out registers
1045 * that are at default value
1047 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1048 data
->written
[param
->from_reg
+ i
] = false;
1049 regcache_mark_dirty(map
);
1050 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1051 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1052 KUNIT_EXPECT_FALSE(test
, data
->written
[param
->from_reg
+ i
]);
1055 static void cache_sync_default_after_cache_only(struct kunit
*test
)
1057 const struct regmap_test_param
*param
= test
->param_value
;
1059 struct regmap_config config
;
1060 struct regmap_ram_data
*data
;
1061 unsigned int orig_val
;
1064 config
= test_regmap_config
;
1065 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
1067 map
= gen_regmap(test
, &config
, &data
);
1068 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1072 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
+ 2, &orig_val
));
1074 /* Enter cache-only and change the value of one register */
1075 regcache_cache_only(map
, true);
1076 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
+ 2, orig_val
+ 1));
1078 /* Exit cache-only and resync, should write out the changed register */
1079 regcache_cache_only(map
, false);
1080 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1081 data
->written
[param
->from_reg
+ i
] = false;
1082 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1084 /* Was the register written out? */
1085 KUNIT_EXPECT_TRUE(test
, data
->written
[param
->from_reg
+ 2]);
1086 KUNIT_EXPECT_EQ(test
, data
->vals
[param
->from_reg
+ 2], orig_val
+ 1);
1088 /* Enter cache-only and write register back to its default value */
1089 regcache_cache_only(map
, true);
1090 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
+ 2, orig_val
));
1092 /* Resync should write out the new value */
1093 regcache_cache_only(map
, false);
1094 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1095 data
->written
[param
->from_reg
+ i
] = false;
1097 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1098 KUNIT_EXPECT_TRUE(test
, data
->written
[param
->from_reg
+ 2]);
1099 KUNIT_EXPECT_EQ(test
, data
->vals
[param
->from_reg
+ 2], orig_val
);
1102 static void cache_sync_readonly(struct kunit
*test
)
1104 const struct regmap_test_param
*param
= test
->param_value
;
1106 struct regmap_config config
;
1107 struct regmap_ram_data
*data
;
1111 config
= test_regmap_config
;
1112 config
.writeable_reg
= reg_5_false
;
1114 map
= gen_regmap(test
, &config
, &data
);
1115 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1119 /* Read all registers to fill the cache */
1120 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1121 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
+ i
, &val
));
1123 /* Change the value of all registers, readonly should fail */
1124 get_random_bytes(&val
, sizeof(val
));
1125 regcache_cache_only(map
, true);
1126 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1127 KUNIT_EXPECT_EQ(test
, i
!= 5, regmap_write(map
, param
->from_reg
+ i
, val
) == 0);
1128 regcache_cache_only(map
, false);
1131 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1132 data
->written
[param
->from_reg
+ i
] = false;
1133 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1135 /* Did that match what we see on the device? */
1136 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1137 KUNIT_EXPECT_EQ(test
, i
!= 5, data
->written
[param
->from_reg
+ i
]);
1140 static void cache_sync_patch(struct kunit
*test
)
1142 const struct regmap_test_param
*param
= test
->param_value
;
1144 struct regmap_config config
;
1145 struct regmap_ram_data
*data
;
1146 struct reg_sequence patch
[2];
1147 unsigned int rval
[BLOCK_TEST_SIZE
], val
;
1150 /* We need defaults so readback works */
1151 config
= test_regmap_config
;
1152 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
1154 map
= gen_regmap(test
, &config
, &data
);
1155 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1159 /* Stash the original values */
1160 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, param
->from_reg
, rval
,
1163 /* Patch a couple of values */
1164 patch
[0].reg
= param
->from_reg
+ 2;
1165 patch
[0].def
= rval
[2] + 1;
1166 patch
[0].delay_us
= 0;
1167 patch
[1].reg
= param
->from_reg
+ 5;
1168 patch
[1].def
= rval
[5] + 1;
1169 patch
[1].delay_us
= 0;
1170 KUNIT_EXPECT_EQ(test
, 0, regmap_register_patch(map
, patch
,
1171 ARRAY_SIZE(patch
)));
1173 /* Sync the cache */
1174 regcache_mark_dirty(map
);
1175 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1176 data
->written
[param
->from_reg
+ i
] = false;
1177 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1179 /* The patch should be on the device but not in the cache */
1180 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
1181 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
+ i
, &val
));
1182 KUNIT_EXPECT_EQ(test
, val
, rval
[i
]);
1187 KUNIT_EXPECT_EQ(test
, true, data
->written
[param
->from_reg
+ i
]);
1188 KUNIT_EXPECT_EQ(test
, data
->vals
[param
->from_reg
+ i
], rval
[i
] + 1);
1191 KUNIT_EXPECT_EQ(test
, false, data
->written
[param
->from_reg
+ i
]);
1192 KUNIT_EXPECT_EQ(test
, data
->vals
[param
->from_reg
+ i
], rval
[i
]);
1198 static void cache_drop(struct kunit
*test
)
1200 const struct regmap_test_param
*param
= test
->param_value
;
1202 struct regmap_config config
;
1203 struct regmap_ram_data
*data
;
1204 unsigned int rval
[BLOCK_TEST_SIZE
];
1207 config
= test_regmap_config
;
1208 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
1210 map
= gen_regmap(test
, &config
, &data
);
1211 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1215 /* Ensure the data is read from the cache */
1216 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1217 data
->read
[param
->from_reg
+ i
] = false;
1218 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, param
->from_reg
, rval
,
1220 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++) {
1221 KUNIT_EXPECT_FALSE(test
, data
->read
[param
->from_reg
+ i
]);
1222 data
->read
[param
->from_reg
+ i
] = false;
1224 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], rval
, sizeof(rval
));
1226 /* Drop some registers */
1227 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, param
->from_reg
+ 3,
1228 param
->from_reg
+ 5));
1230 /* Reread and check only the dropped registers hit the device. */
1231 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, param
->from_reg
, rval
,
1233 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1234 KUNIT_EXPECT_EQ(test
, data
->read
[param
->from_reg
+ i
], i
>= 3 && i
<= 5);
1235 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], rval
, sizeof(rval
));
1238 static void cache_drop_with_non_contiguous_ranges(struct kunit
*test
)
1240 const struct regmap_test_param
*param
= test
->param_value
;
1242 struct regmap_config config
;
1243 struct regmap_ram_data
*data
;
1244 unsigned int val
[4][BLOCK_TEST_SIZE
];
1246 const int num_ranges
= ARRAY_SIZE(val
) * 2;
1249 static_assert(ARRAY_SIZE(val
) == 4);
1251 config
= test_regmap_config
;
1252 config
.max_register
= param
->from_reg
+ (num_ranges
* BLOCK_TEST_SIZE
);
1254 map
= gen_regmap(test
, &config
, &data
);
1255 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1259 for (i
= 0; i
< config
.max_register
+ 1; i
++)
1260 data
->written
[i
] = false;
1262 /* Create non-contiguous cache blocks by writing every other range */
1263 get_random_bytes(&val
, sizeof(val
));
1264 for (rangeidx
= 0; rangeidx
< num_ranges
; rangeidx
+= 2) {
1265 reg
= param
->from_reg
+ (rangeidx
* BLOCK_TEST_SIZE
);
1266 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_write(map
, reg
,
1269 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[reg
],
1270 &val
[rangeidx
/ 2], sizeof(val
[rangeidx
/ 2]));
1273 /* Check that odd ranges weren't written */
1274 for (rangeidx
= 1; rangeidx
< num_ranges
; rangeidx
+= 2) {
1275 reg
= param
->from_reg
+ (rangeidx
* BLOCK_TEST_SIZE
);
1276 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1277 KUNIT_EXPECT_FALSE(test
, data
->written
[reg
+ i
]);
1281 reg
= param
->from_reg
+ (2 * BLOCK_TEST_SIZE
);
1282 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, reg
, reg
+ BLOCK_TEST_SIZE
- 1));
1284 /* Drop part of range 4 */
1285 reg
= param
->from_reg
+ (4 * BLOCK_TEST_SIZE
);
1286 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, reg
+ 3, reg
+ 5));
1288 /* Mark dirty and reset mock registers to 0 */
1289 regcache_mark_dirty(map
);
1290 for (i
= 0; i
< config
.max_register
+ 1; i
++) {
1292 data
->written
[i
] = false;
1295 /* The registers that were dropped from range 4 should now remain at 0 */
1300 /* Sync and check that the expected register ranges were written */
1301 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1303 /* Check that odd ranges weren't written */
1304 for (rangeidx
= 1; rangeidx
< num_ranges
; rangeidx
+= 2) {
1305 reg
= param
->from_reg
+ (rangeidx
* BLOCK_TEST_SIZE
);
1306 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1307 KUNIT_EXPECT_FALSE(test
, data
->written
[reg
+ i
]);
1310 /* Check that even ranges (except 2 and 4) were written */
1311 for (rangeidx
= 0; rangeidx
< num_ranges
; rangeidx
+= 2) {
1312 if ((rangeidx
== 2) || (rangeidx
== 4))
1315 reg
= param
->from_reg
+ (rangeidx
* BLOCK_TEST_SIZE
);
1316 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1317 KUNIT_EXPECT_TRUE(test
, data
->written
[reg
+ i
]);
1319 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[reg
],
1320 &val
[rangeidx
/ 2], sizeof(val
[rangeidx
/ 2]));
1323 /* Check that range 2 wasn't written */
1324 reg
= param
->from_reg
+ (2 * BLOCK_TEST_SIZE
);
1325 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1326 KUNIT_EXPECT_FALSE(test
, data
->written
[reg
+ i
]);
1328 /* Check that range 4 was partially written */
1329 reg
= param
->from_reg
+ (4 * BLOCK_TEST_SIZE
);
1330 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1331 KUNIT_EXPECT_EQ(test
, data
->written
[reg
+ i
], i
< 3 || i
> 5);
1333 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[reg
], &val
[4 / 2], sizeof(val
[4 / 2]));
1335 /* Nothing before param->from_reg should have been written */
1336 for (i
= 0; i
< param
->from_reg
; i
++)
1337 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
1340 static void cache_drop_all_and_sync_marked_dirty(struct kunit
*test
)
1342 const struct regmap_test_param
*param
= test
->param_value
;
1344 struct regmap_config config
;
1345 struct regmap_ram_data
*data
;
1346 unsigned int rval
[BLOCK_TEST_SIZE
];
1349 config
= test_regmap_config
;
1350 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
1352 map
= gen_regmap(test
, &config
, &data
);
1353 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1357 /* Ensure the data is read from the cache */
1358 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1359 data
->read
[param
->from_reg
+ i
] = false;
1360 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, param
->from_reg
, rval
,
1362 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], rval
, sizeof(rval
));
1364 /* Change all values in cache from defaults */
1365 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1366 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
+ i
, rval
[i
] + 1));
1368 /* Drop all registers */
1369 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, 0, config
.max_register
));
1371 /* Mark dirty and cache sync should not write anything. */
1372 regcache_mark_dirty(map
);
1373 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1374 data
->written
[param
->from_reg
+ i
] = false;
1376 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1377 for (i
= 0; i
<= config
.max_register
; i
++)
1378 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
1381 static void cache_drop_all_and_sync_no_defaults(struct kunit
*test
)
1383 const struct regmap_test_param
*param
= test
->param_value
;
1385 struct regmap_config config
;
1386 struct regmap_ram_data
*data
;
1387 unsigned int rval
[BLOCK_TEST_SIZE
];
1390 config
= test_regmap_config
;
1392 map
= gen_regmap(test
, &config
, &data
);
1393 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1397 /* Ensure the data is read from the cache */
1398 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1399 data
->read
[param
->from_reg
+ i
] = false;
1400 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, param
->from_reg
, rval
,
1402 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], rval
, sizeof(rval
));
1404 /* Change all values in cache */
1405 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1406 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
+ i
, rval
[i
] + 1));
1408 /* Drop all registers */
1409 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, 0, config
.max_register
));
1412 * Sync cache without marking it dirty. All registers were dropped
1413 * so the cache should not have any entries to write out.
1415 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1416 data
->written
[param
->from_reg
+ i
] = false;
1418 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1419 for (i
= 0; i
<= config
.max_register
; i
++)
1420 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
1423 static void cache_drop_all_and_sync_has_defaults(struct kunit
*test
)
1425 const struct regmap_test_param
*param
= test
->param_value
;
1427 struct regmap_config config
;
1428 struct regmap_ram_data
*data
;
1429 unsigned int rval
[BLOCK_TEST_SIZE
];
1432 config
= test_regmap_config
;
1433 config
.num_reg_defaults
= BLOCK_TEST_SIZE
;
1435 map
= gen_regmap(test
, &config
, &data
);
1436 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1440 /* Ensure the data is read from the cache */
1441 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1442 data
->read
[param
->from_reg
+ i
] = false;
1443 KUNIT_EXPECT_EQ(test
, 0, regmap_bulk_read(map
, param
->from_reg
, rval
,
1445 KUNIT_EXPECT_MEMEQ(test
, &data
->vals
[param
->from_reg
], rval
, sizeof(rval
));
1447 /* Change all values in cache from defaults */
1448 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1449 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, param
->from_reg
+ i
, rval
[i
] + 1));
1451 /* Drop all registers */
1452 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, 0, config
.max_register
));
1455 * Sync cache without marking it dirty. All registers were dropped
1456 * so the cache should not have any entries to write out.
1458 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1459 data
->written
[param
->from_reg
+ i
] = false;
1461 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1462 for (i
= 0; i
<= config
.max_register
; i
++)
1463 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
1466 static void cache_present(struct kunit
*test
)
1468 const struct regmap_test_param
*param
= test
->param_value
;
1470 struct regmap_config config
;
1471 struct regmap_ram_data
*data
;
1475 config
= test_regmap_config
;
1477 map
= gen_regmap(test
, &config
, &data
);
1478 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1482 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1483 data
->read
[param
->from_reg
+ i
] = false;
1485 /* No defaults so no registers cached. */
1486 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1487 KUNIT_ASSERT_FALSE(test
, regcache_reg_cached(map
, param
->from_reg
+ i
));
1489 /* We didn't trigger any reads */
1490 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1491 KUNIT_ASSERT_FALSE(test
, data
->read
[param
->from_reg
+ i
]);
1493 /* Fill the cache */
1494 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1495 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, param
->from_reg
+ i
, &val
));
1497 /* Now everything should be cached */
1498 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1499 KUNIT_ASSERT_TRUE(test
, regcache_reg_cached(map
, param
->from_reg
+ i
));
1502 static void cache_write_zero(struct kunit
*test
)
1504 const struct regmap_test_param
*param
= test
->param_value
;
1506 struct regmap_config config
;
1507 struct regmap_ram_data
*data
;
1511 config
= test_regmap_config
;
1513 map
= gen_regmap(test
, &config
, &data
);
1514 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1518 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1519 data
->read
[param
->from_reg
+ i
] = false;
1521 /* No defaults so no registers cached. */
1522 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1523 KUNIT_ASSERT_FALSE(test
, regcache_reg_cached(map
, param
->from_reg
+ i
));
1525 /* We didn't trigger any reads */
1526 for (i
= 0; i
< BLOCK_TEST_SIZE
; i
++)
1527 KUNIT_ASSERT_FALSE(test
, data
->read
[param
->from_reg
+ i
]);
1529 /* Write a zero value */
1530 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, 1, 0));
1532 /* Read that zero value back */
1533 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, 1, &val
));
1534 KUNIT_EXPECT_EQ(test
, 0, val
);
1536 /* From the cache? */
1537 KUNIT_ASSERT_TRUE(test
, regcache_reg_cached(map
, 1));
1539 /* Try to throw it away */
1540 KUNIT_EXPECT_EQ(test
, 0, regcache_drop_region(map
, 1, 1));
1541 KUNIT_ASSERT_FALSE(test
, regcache_reg_cached(map
, 1));
1544 /* Check that caching the window register works with sync */
1545 static void cache_range_window_reg(struct kunit
*test
)
1548 struct regmap_config config
;
1549 struct regmap_ram_data
*data
;
1553 config
= test_regmap_config
;
1554 config
.volatile_reg
= test_range_window_volatile
;
1555 config
.ranges
= &test_range
;
1556 config
.num_ranges
= 1;
1557 config
.max_register
= test_range
.range_max
;
1559 map
= gen_regmap(test
, &config
, &data
);
1560 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1564 /* Write new values to the entire range */
1565 for (i
= test_range
.range_min
; i
<= test_range
.range_max
; i
++)
1566 KUNIT_ASSERT_EQ(test
, 0, regmap_write(map
, i
, 0));
1568 val
= data
->vals
[test_range
.selector_reg
] & test_range
.selector_mask
;
1569 KUNIT_ASSERT_EQ(test
, val
, 2);
1571 /* Write to the first register in the range to reset the page */
1572 KUNIT_ASSERT_EQ(test
, 0, regmap_write(map
, test_range
.range_min
, 0));
1573 val
= data
->vals
[test_range
.selector_reg
] & test_range
.selector_mask
;
1574 KUNIT_ASSERT_EQ(test
, val
, 0);
1576 /* Trigger a cache sync */
1577 regcache_mark_dirty(map
);
1578 KUNIT_ASSERT_EQ(test
, 0, regcache_sync(map
));
1580 /* Write to the first register again, the page should be reset */
1581 KUNIT_ASSERT_EQ(test
, 0, regmap_write(map
, test_range
.range_min
, 0));
1582 val
= data
->vals
[test_range
.selector_reg
] & test_range
.selector_mask
;
1583 KUNIT_ASSERT_EQ(test
, val
, 0);
1585 /* Trigger another cache sync */
1586 regcache_mark_dirty(map
);
1587 KUNIT_ASSERT_EQ(test
, 0, regcache_sync(map
));
1589 /* Write to the last register again, the page should be reset */
1590 KUNIT_ASSERT_EQ(test
, 0, regmap_write(map
, test_range
.range_max
, 0));
1591 val
= data
->vals
[test_range
.selector_reg
] & test_range
.selector_mask
;
1592 KUNIT_ASSERT_EQ(test
, val
, 2);
1595 static const struct regmap_test_param raw_types_list
[] = {
1596 { .cache
= REGCACHE_NONE
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1597 { .cache
= REGCACHE_NONE
, .val_endian
= REGMAP_ENDIAN_BIG
},
1598 { .cache
= REGCACHE_FLAT
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1599 { .cache
= REGCACHE_FLAT
, .val_endian
= REGMAP_ENDIAN_BIG
},
1600 { .cache
= REGCACHE_RBTREE
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1601 { .cache
= REGCACHE_RBTREE
, .val_endian
= REGMAP_ENDIAN_BIG
},
1602 { .cache
= REGCACHE_MAPLE
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1603 { .cache
= REGCACHE_MAPLE
, .val_endian
= REGMAP_ENDIAN_BIG
},
1606 KUNIT_ARRAY_PARAM(raw_test_types
, raw_types_list
, param_to_desc
);
1608 static const struct regmap_test_param raw_cache_types_list
[] = {
1609 { .cache
= REGCACHE_FLAT
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1610 { .cache
= REGCACHE_FLAT
, .val_endian
= REGMAP_ENDIAN_BIG
},
1611 { .cache
= REGCACHE_RBTREE
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1612 { .cache
= REGCACHE_RBTREE
, .val_endian
= REGMAP_ENDIAN_BIG
},
1613 { .cache
= REGCACHE_MAPLE
, .val_endian
= REGMAP_ENDIAN_LITTLE
},
1614 { .cache
= REGCACHE_MAPLE
, .val_endian
= REGMAP_ENDIAN_BIG
},
1617 KUNIT_ARRAY_PARAM(raw_test_cache_types
, raw_cache_types_list
, param_to_desc
);
1619 static const struct regmap_config raw_regmap_config
= {
1620 .max_register
= BLOCK_TEST_SIZE
,
1622 .reg_format_endian
= REGMAP_ENDIAN_LITTLE
,
1627 static struct regmap
*gen_raw_regmap(struct kunit
*test
,
1628 struct regmap_config
*config
,
1629 struct regmap_ram_data
**data
)
1631 struct regmap_test_priv
*priv
= test
->priv
;
1632 const struct regmap_test_param
*param
= test
->param_value
;
1634 struct regmap
*ret
= ERR_PTR(-ENOMEM
);
1636 struct reg_default
*defaults
;
1639 config
->cache_type
= param
->cache
;
1640 config
->val_format_endian
= param
->val_endian
;
1641 config
->disable_locking
= config
->cache_type
== REGCACHE_RBTREE
||
1642 config
->cache_type
== REGCACHE_MAPLE
;
1644 size
= array_size(config
->max_register
+ 1, BITS_TO_BYTES(config
->reg_bits
));
1645 buf
= kmalloc(size
, GFP_KERNEL
);
1647 return ERR_PTR(-ENOMEM
);
1649 get_random_bytes(buf
, size
);
1651 *data
= kzalloc(sizeof(**data
), GFP_KERNEL
);
1654 (*data
)->vals
= (void *)buf
;
1656 config
->num_reg_defaults
= config
->max_register
+ 1;
1657 defaults
= kunit_kcalloc(test
,
1658 config
->num_reg_defaults
,
1659 sizeof(struct reg_default
),
1663 config
->reg_defaults
= defaults
;
1665 for (i
= 0; i
< config
->num_reg_defaults
; i
++) {
1666 defaults
[i
].reg
= i
;
1667 switch (param
->val_endian
) {
1668 case REGMAP_ENDIAN_LITTLE
:
1669 defaults
[i
].def
= le16_to_cpu(buf
[i
]);
1671 case REGMAP_ENDIAN_BIG
:
1672 defaults
[i
].def
= be16_to_cpu(buf
[i
]);
1675 ret
= ERR_PTR(-EINVAL
);
1681 * We use the defaults in the tests but they don't make sense
1682 * to the core if there's no cache.
1684 if (config
->cache_type
== REGCACHE_NONE
)
1685 config
->num_reg_defaults
= 0;
1687 ret
= regmap_init_raw_ram(priv
->dev
, config
, *data
);
1691 /* This calls regmap_exit() on failure, which frees buf and *data */
1692 error
= kunit_add_action_or_reset(test
, regmap_exit_action
, ret
);
1694 ret
= ERR_PTR(error
);
1705 static void raw_read_defaults_single(struct kunit
*test
)
1708 struct regmap_config config
;
1709 struct regmap_ram_data
*data
;
1713 config
= raw_regmap_config
;
1715 map
= gen_raw_regmap(test
, &config
, &data
);
1716 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1720 /* Check that we can read the defaults via the API */
1721 for (i
= 0; i
< config
.max_register
+ 1; i
++) {
1722 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
));
1723 KUNIT_EXPECT_EQ(test
, config
.reg_defaults
[i
].def
, rval
);
1727 static void raw_read_defaults(struct kunit
*test
)
1730 struct regmap_config config
;
1731 struct regmap_ram_data
*data
;
1737 config
= raw_regmap_config
;
1739 map
= gen_raw_regmap(test
, &config
, &data
);
1740 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1744 val_len
= array_size(sizeof(*rval
), config
.max_register
+ 1);
1745 rval
= kunit_kmalloc(test
, val_len
, GFP_KERNEL
);
1746 KUNIT_ASSERT_TRUE(test
, rval
!= NULL
);
1750 /* Check that we can read the defaults via the API */
1751 KUNIT_EXPECT_EQ(test
, 0, regmap_raw_read(map
, 0, rval
, val_len
));
1752 for (i
= 0; i
< config
.max_register
+ 1; i
++) {
1753 def
= config
.reg_defaults
[i
].def
;
1754 if (config
.val_format_endian
== REGMAP_ENDIAN_BIG
) {
1755 KUNIT_EXPECT_EQ(test
, def
, be16_to_cpu((__force __be16
)rval
[i
]));
1757 KUNIT_EXPECT_EQ(test
, def
, le16_to_cpu((__force __le16
)rval
[i
]));
1762 static void raw_write_read_single(struct kunit
*test
)
1765 struct regmap_config config
;
1766 struct regmap_ram_data
*data
;
1770 config
= raw_regmap_config
;
1772 map
= gen_raw_regmap(test
, &config
, &data
);
1773 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1777 get_random_bytes(&val
, sizeof(val
));
1779 /* If we write a value to a register we can read it back */
1780 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, 0, val
));
1781 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, 0, &rval
));
1782 KUNIT_EXPECT_EQ(test
, val
, rval
);
1785 static void raw_write(struct kunit
*test
)
1788 struct regmap_config config
;
1789 struct regmap_ram_data
*data
;
1795 config
= raw_regmap_config
;
1797 map
= gen_raw_regmap(test
, &config
, &data
);
1798 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1802 hw_buf
= (u16
*)data
->vals
;
1804 get_random_bytes(&val
, sizeof(val
));
1806 /* Do a raw write */
1807 KUNIT_EXPECT_EQ(test
, 0, regmap_raw_write(map
, 2, val
, sizeof(val
)));
1809 /* We should read back the new values, and defaults for the rest */
1810 for (i
= 0; i
< config
.max_register
+ 1; i
++) {
1811 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
));
1816 if (config
.val_format_endian
== REGMAP_ENDIAN_BIG
) {
1817 KUNIT_EXPECT_EQ(test
, rval
,
1818 be16_to_cpu((__force __be16
)val
[i
% 2]));
1820 KUNIT_EXPECT_EQ(test
, rval
,
1821 le16_to_cpu((__force __le16
)val
[i
% 2]));
1825 KUNIT_EXPECT_EQ(test
, config
.reg_defaults
[i
].def
, rval
);
1830 /* The values should appear in the "hardware" */
1831 KUNIT_EXPECT_MEMEQ(test
, &hw_buf
[2], val
, sizeof(val
));
1834 static bool reg_zero(struct device
*dev
, unsigned int reg
)
1839 static bool ram_reg_zero(struct regmap_ram_data
*data
, unsigned int reg
)
1844 static void raw_noinc_write(struct kunit
*test
)
1847 struct regmap_config config
;
1848 struct regmap_ram_data
*data
;
1850 u16 val_test
, val_last
;
1851 u16 val_array
[BLOCK_TEST_SIZE
];
1853 config
= raw_regmap_config
;
1854 config
.volatile_reg
= reg_zero
;
1855 config
.writeable_noinc_reg
= reg_zero
;
1856 config
.readable_noinc_reg
= reg_zero
;
1858 map
= gen_raw_regmap(test
, &config
, &data
);
1859 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1863 data
->noinc_reg
= ram_reg_zero
;
1865 get_random_bytes(&val_array
, sizeof(val_array
));
1867 if (config
.val_format_endian
== REGMAP_ENDIAN_BIG
) {
1868 val_test
= be16_to_cpu(val_array
[1]) + 100;
1869 val_last
= be16_to_cpu(val_array
[BLOCK_TEST_SIZE
- 1]);
1871 val_test
= le16_to_cpu(val_array
[1]) + 100;
1872 val_last
= le16_to_cpu(val_array
[BLOCK_TEST_SIZE
- 1]);
1875 /* Put some data into the register following the noinc register */
1876 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, 1, val_test
));
1878 /* Write some data to the noinc register */
1879 KUNIT_EXPECT_EQ(test
, 0, regmap_noinc_write(map
, 0, val_array
,
1880 sizeof(val_array
)));
1882 /* We should read back the last value written */
1883 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, 0, &val
));
1884 KUNIT_ASSERT_EQ(test
, val_last
, val
);
1886 /* Make sure we didn't touch the register after the noinc register */
1887 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, 1, &val
));
1888 KUNIT_ASSERT_EQ(test
, val_test
, val
);
1891 static void raw_sync(struct kunit
*test
)
1894 struct regmap_config config
;
1895 struct regmap_ram_data
*data
;
1901 config
= raw_regmap_config
;
1903 map
= gen_raw_regmap(test
, &config
, &data
);
1904 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1908 hw_buf
= (u16
*)data
->vals
;
1910 get_changed_bytes(&hw_buf
[2], &val
[0], sizeof(val
));
1912 /* Do a regular write and a raw write in cache only mode */
1913 regcache_cache_only(map
, true);
1914 KUNIT_EXPECT_EQ(test
, 0, regmap_raw_write(map
, 2, val
,
1916 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, 4, val
[2]));
1918 /* We should read back the new values, and defaults for the rest */
1919 for (i
= 0; i
< config
.max_register
+ 1; i
++) {
1920 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, i
, &rval
));
1925 if (config
.val_format_endian
== REGMAP_ENDIAN_BIG
) {
1926 KUNIT_EXPECT_EQ(test
, rval
,
1927 be16_to_cpu((__force __be16
)val
[i
- 2]));
1929 KUNIT_EXPECT_EQ(test
, rval
,
1930 le16_to_cpu((__force __le16
)val
[i
- 2]));
1934 KUNIT_EXPECT_EQ(test
, rval
, val
[i
- 2]);
1937 KUNIT_EXPECT_EQ(test
, config
.reg_defaults
[i
].def
, rval
);
1943 * The value written via _write() was translated by the core,
1944 * translate the original copy for comparison purposes.
1946 if (config
.val_format_endian
== REGMAP_ENDIAN_BIG
)
1947 val
[2] = cpu_to_be16(val
[2]);
1949 val
[2] = cpu_to_le16(val
[2]);
1951 /* The values should not appear in the "hardware" */
1952 KUNIT_EXPECT_MEMNEQ(test
, &hw_buf
[2], &val
[0], sizeof(val
));
1954 for (i
= 0; i
< config
.max_register
+ 1; i
++)
1955 data
->written
[i
] = false;
1958 regcache_cache_only(map
, false);
1959 regcache_mark_dirty(map
);
1960 KUNIT_EXPECT_EQ(test
, 0, regcache_sync(map
));
1962 /* The values should now appear in the "hardware" */
1963 KUNIT_EXPECT_MEMEQ(test
, &hw_buf
[2], &val
[0], sizeof(val
));
1966 static void raw_ranges(struct kunit
*test
)
1969 struct regmap_config config
;
1970 struct regmap_ram_data
*data
;
1974 config
= raw_regmap_config
;
1975 config
.volatile_reg
= test_range_all_volatile
;
1976 config
.ranges
= &test_range
;
1977 config
.num_ranges
= 1;
1978 config
.max_register
= test_range
.range_max
;
1980 map
= gen_raw_regmap(test
, &config
, &data
);
1981 KUNIT_ASSERT_FALSE(test
, IS_ERR(map
));
1985 /* Reset the page to a non-zero value to trigger a change */
1986 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, test_range
.selector_reg
,
1987 test_range
.range_max
));
1989 /* Check we set the page and use the window for writes */
1990 data
->written
[test_range
.selector_reg
] = false;
1991 data
->written
[test_range
.window_start
] = false;
1992 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
, test_range
.range_min
, 0));
1993 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
1994 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.window_start
]);
1996 data
->written
[test_range
.selector_reg
] = false;
1997 data
->written
[test_range
.window_start
] = false;
1998 KUNIT_EXPECT_EQ(test
, 0, regmap_write(map
,
1999 test_range
.range_min
+
2000 test_range
.window_len
,
2002 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
2003 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.window_start
]);
2005 /* Same for reads */
2006 data
->written
[test_range
.selector_reg
] = false;
2007 data
->read
[test_range
.window_start
] = false;
2008 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
, test_range
.range_min
, &val
));
2009 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
2010 KUNIT_EXPECT_TRUE(test
, data
->read
[test_range
.window_start
]);
2012 data
->written
[test_range
.selector_reg
] = false;
2013 data
->read
[test_range
.window_start
] = false;
2014 KUNIT_EXPECT_EQ(test
, 0, regmap_read(map
,
2015 test_range
.range_min
+
2016 test_range
.window_len
,
2018 KUNIT_EXPECT_TRUE(test
, data
->written
[test_range
.selector_reg
]);
2019 KUNIT_EXPECT_TRUE(test
, data
->read
[test_range
.window_start
]);
2021 /* No physical access triggered in the virtual range */
2022 for (i
= test_range
.range_min
; i
< test_range
.range_max
; i
++) {
2023 KUNIT_EXPECT_FALSE(test
, data
->read
[i
]);
2024 KUNIT_EXPECT_FALSE(test
, data
->written
[i
]);
2028 static struct kunit_case regmap_test_cases
[] = {
2029 KUNIT_CASE_PARAM(basic_read_write
, regcache_types_gen_params
),
2030 KUNIT_CASE_PARAM(read_bypassed
, real_cache_types_gen_params
),
2031 KUNIT_CASE_PARAM(read_bypassed_volatile
, real_cache_types_gen_params
),
2032 KUNIT_CASE_PARAM(bulk_write
, regcache_types_gen_params
),
2033 KUNIT_CASE_PARAM(bulk_read
, regcache_types_gen_params
),
2034 KUNIT_CASE_PARAM(multi_write
, regcache_types_gen_params
),
2035 KUNIT_CASE_PARAM(multi_read
, regcache_types_gen_params
),
2036 KUNIT_CASE_PARAM(write_readonly
, regcache_types_gen_params
),
2037 KUNIT_CASE_PARAM(read_writeonly
, regcache_types_gen_params
),
2038 KUNIT_CASE_PARAM(reg_defaults
, regcache_types_gen_params
),
2039 KUNIT_CASE_PARAM(reg_defaults_read_dev
, regcache_types_gen_params
),
2040 KUNIT_CASE_PARAM(register_patch
, regcache_types_gen_params
),
2041 KUNIT_CASE_PARAM(stride
, regcache_types_gen_params
),
2042 KUNIT_CASE_PARAM(basic_ranges
, regcache_types_gen_params
),
2043 KUNIT_CASE_PARAM(stress_insert
, regcache_types_gen_params
),
2044 KUNIT_CASE_PARAM(cache_bypass
, real_cache_types_gen_params
),
2045 KUNIT_CASE_PARAM(cache_sync_marked_dirty
, real_cache_types_gen_params
),
2046 KUNIT_CASE_PARAM(cache_sync_after_cache_only
, real_cache_types_gen_params
),
2047 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty
, real_cache_types_gen_params
),
2048 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only
, real_cache_types_gen_params
),
2049 KUNIT_CASE_PARAM(cache_sync_readonly
, real_cache_types_gen_params
),
2050 KUNIT_CASE_PARAM(cache_sync_patch
, real_cache_types_gen_params
),
2051 KUNIT_CASE_PARAM(cache_drop
, sparse_cache_types_gen_params
),
2052 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges
, sparse_cache_types_gen_params
),
2053 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty
, sparse_cache_types_gen_params
),
2054 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults
, sparse_cache_types_gen_params
),
2055 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults
, sparse_cache_types_gen_params
),
2056 KUNIT_CASE_PARAM(cache_present
, sparse_cache_types_gen_params
),
2057 KUNIT_CASE_PARAM(cache_write_zero
, sparse_cache_types_gen_params
),
2058 KUNIT_CASE_PARAM(cache_range_window_reg
, real_cache_types_only_gen_params
),
2060 KUNIT_CASE_PARAM(raw_read_defaults_single
, raw_test_types_gen_params
),
2061 KUNIT_CASE_PARAM(raw_read_defaults
, raw_test_types_gen_params
),
2062 KUNIT_CASE_PARAM(raw_write_read_single
, raw_test_types_gen_params
),
2063 KUNIT_CASE_PARAM(raw_write
, raw_test_types_gen_params
),
2064 KUNIT_CASE_PARAM(raw_noinc_write
, raw_test_types_gen_params
),
2065 KUNIT_CASE_PARAM(raw_sync
, raw_test_cache_types_gen_params
),
2066 KUNIT_CASE_PARAM(raw_ranges
, raw_test_cache_types_gen_params
),
2070 static int regmap_test_init(struct kunit
*test
)
2072 struct regmap_test_priv
*priv
;
2075 priv
= kunit_kzalloc(test
, sizeof(*priv
), GFP_KERNEL
);
2081 dev
= kunit_device_register(test
, "regmap_test");
2083 return PTR_ERR(dev
);
2085 priv
->dev
= get_device(dev
);
2086 dev_set_drvdata(dev
, test
);
2091 static void regmap_test_exit(struct kunit
*test
)
2093 struct regmap_test_priv
*priv
= test
->priv
;
2095 /* Destroy the dummy struct device */
2096 if (priv
&& priv
->dev
)
2097 put_device(priv
->dev
);
2100 static struct kunit_suite regmap_test_suite
= {
2102 .init
= regmap_test_init
,
2103 .exit
= regmap_test_exit
,
2104 .test_cases
= regmap_test_cases
,
2106 kunit_test_suite(regmap_test_suite
);
2108 MODULE_DESCRIPTION("Regmap KUnit tests");
2109 MODULE_LICENSE("GPL v2");