1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 Red Hat, Inc.
5 * This is a test "dust" device, which fails reads on specified
6 * sectors, emulating the behavior of a hard disk drive sending
7 * a "Read Medium Error" sense.
11 #include <linux/device-mapper.h>
12 #include <linux/module.h>
13 #include <linux/rbtree.h>
15 #define DM_MSG_PREFIX "dust"
20 unsigned char wr_fail_cnt
;
25 struct rb_root badblocklist
;
26 unsigned long long badblock_count
;
29 int sect_per_block_shift
;
30 unsigned int sect_per_block
;
32 bool fail_read_on_bb
:1;
36 static struct badblock
*dust_rb_search(struct rb_root
*root
, sector_t blk
)
38 struct rb_node
*node
= root
->rb_node
;
41 struct badblock
*bblk
= rb_entry(node
, struct badblock
, node
);
45 else if (bblk
->bb
< blk
)
46 node
= node
->rb_right
;
54 static bool dust_rb_insert(struct rb_root
*root
, struct badblock
*new)
56 struct badblock
*bblk
;
57 struct rb_node
**link
= &root
->rb_node
, *parent
= NULL
;
58 sector_t value
= new->bb
;
62 bblk
= rb_entry(parent
, struct badblock
, node
);
65 link
= &(*link
)->rb_left
;
66 else if (bblk
->bb
< value
)
67 link
= &(*link
)->rb_right
;
72 rb_link_node(&new->node
, parent
, link
);
73 rb_insert_color(&new->node
, root
);
78 static int dust_remove_block(struct dust_device
*dd
, unsigned long long block
)
80 struct badblock
*bblock
;
83 spin_lock_irqsave(&dd
->dust_lock
, flags
);
84 bblock
= dust_rb_search(&dd
->badblocklist
, block
);
87 if (!dd
->quiet_mode
) {
88 DMERR("%s: block %llu not found in badblocklist",
91 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
95 rb_erase(&bblock
->node
, &dd
->badblocklist
);
98 DMINFO("%s: badblock removed at block %llu", __func__
, block
);
100 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
105 static int dust_add_block(struct dust_device
*dd
, unsigned long long block
,
106 unsigned char wr_fail_cnt
)
108 struct badblock
*bblock
;
111 bblock
= kmalloc(sizeof(*bblock
), GFP_KERNEL
);
112 if (bblock
== NULL
) {
114 DMERR("%s: badblock allocation failed", __func__
);
118 spin_lock_irqsave(&dd
->dust_lock
, flags
);
120 bblock
->wr_fail_cnt
= wr_fail_cnt
;
121 if (!dust_rb_insert(&dd
->badblocklist
, bblock
)) {
122 if (!dd
->quiet_mode
) {
123 DMERR("%s: block %llu already in badblocklist",
126 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
131 dd
->badblock_count
++;
132 if (!dd
->quiet_mode
) {
133 DMINFO("%s: badblock added at block %llu with write fail count %hhu",
134 __func__
, block
, wr_fail_cnt
);
136 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
141 static int dust_query_block(struct dust_device
*dd
, unsigned long long block
)
143 struct badblock
*bblock
;
146 spin_lock_irqsave(&dd
->dust_lock
, flags
);
147 bblock
= dust_rb_search(&dd
->badblocklist
, block
);
149 DMINFO("%s: block %llu found in badblocklist", __func__
, block
);
151 DMINFO("%s: block %llu not found in badblocklist", __func__
, block
);
152 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
157 static int __dust_map_read(struct dust_device
*dd
, sector_t thisblock
)
159 struct badblock
*bblk
= dust_rb_search(&dd
->badblocklist
, thisblock
);
162 return DM_MAPIO_KILL
;
164 return DM_MAPIO_REMAPPED
;
167 static int dust_map_read(struct dust_device
*dd
, sector_t thisblock
,
168 bool fail_read_on_bb
)
171 int r
= DM_MAPIO_REMAPPED
;
173 if (fail_read_on_bb
) {
174 thisblock
>>= dd
->sect_per_block_shift
;
175 spin_lock_irqsave(&dd
->dust_lock
, flags
);
176 r
= __dust_map_read(dd
, thisblock
);
177 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
183 static int __dust_map_write(struct dust_device
*dd
, sector_t thisblock
)
185 struct badblock
*bblk
= dust_rb_search(&dd
->badblocklist
, thisblock
);
187 if (bblk
&& bblk
->wr_fail_cnt
> 0) {
189 return DM_MAPIO_KILL
;
193 rb_erase(&bblk
->node
, &dd
->badblocklist
);
194 dd
->badblock_count
--;
196 if (!dd
->quiet_mode
) {
197 sector_div(thisblock
, dd
->sect_per_block
);
198 DMINFO("block %llu removed from badblocklist by write",
199 (unsigned long long)thisblock
);
203 return DM_MAPIO_REMAPPED
;
206 static int dust_map_write(struct dust_device
*dd
, sector_t thisblock
,
207 bool fail_read_on_bb
)
210 int ret
= DM_MAPIO_REMAPPED
;
212 if (fail_read_on_bb
) {
213 thisblock
>>= dd
->sect_per_block_shift
;
214 spin_lock_irqsave(&dd
->dust_lock
, flags
);
215 ret
= __dust_map_write(dd
, thisblock
);
216 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
222 static int dust_map(struct dm_target
*ti
, struct bio
*bio
)
224 struct dust_device
*dd
= ti
->private;
227 bio_set_dev(bio
, dd
->dev
->bdev
);
228 bio
->bi_iter
.bi_sector
= dd
->start
+ dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
230 if (bio_data_dir(bio
) == READ
)
231 r
= dust_map_read(dd
, bio
->bi_iter
.bi_sector
, dd
->fail_read_on_bb
);
233 r
= dust_map_write(dd
, bio
->bi_iter
.bi_sector
, dd
->fail_read_on_bb
);
238 static bool __dust_clear_badblocks(struct rb_root
*tree
,
239 unsigned long long count
)
241 struct rb_node
*node
= NULL
, *nnode
= NULL
;
243 nnode
= rb_first(tree
);
251 nnode
= rb_next(node
);
252 rb_erase(node
, tree
);
257 BUG_ON(tree
->rb_node
!= NULL
);
262 static int dust_clear_badblocks(struct dust_device
*dd
)
265 struct rb_root badblocklist
;
266 unsigned long long badblock_count
;
268 spin_lock_irqsave(&dd
->dust_lock
, flags
);
269 badblocklist
= dd
->badblocklist
;
270 badblock_count
= dd
->badblock_count
;
271 dd
->badblocklist
= RB_ROOT
;
272 dd
->badblock_count
= 0;
273 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
275 if (!__dust_clear_badblocks(&badblocklist
, badblock_count
))
276 DMINFO("%s: no badblocks found", __func__
);
278 DMINFO("%s: badblocks cleared", __func__
);
286 * <device_path> <offset> <blksz>
288 * device_path: path to the block device
289 * offset: offset to data area from start of device_path
290 * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
292 static int dust_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
294 struct dust_device
*dd
;
295 unsigned long long tmp
;
298 unsigned int sect_per_block
;
299 sector_t DUST_MAX_BLKSZ_SECTORS
= 2097152;
300 sector_t max_block_sectors
= min(ti
->len
, DUST_MAX_BLKSZ_SECTORS
);
303 ti
->error
= "Invalid argument count";
307 if (kstrtouint(argv
[2], 10, &blksz
) || !blksz
) {
308 ti
->error
= "Invalid block size parameter";
313 ti
->error
= "Block size must be at least 512";
317 if (!is_power_of_2(blksz
)) {
318 ti
->error
= "Block size must be a power of 2";
322 if (to_sector(blksz
) > max_block_sectors
) {
323 ti
->error
= "Block size is too large";
327 sect_per_block
= (blksz
>> SECTOR_SHIFT
);
329 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1 || tmp
!= (sector_t
)tmp
) {
330 ti
->error
= "Invalid device offset sector";
334 dd
= kzalloc(sizeof(struct dust_device
), GFP_KERNEL
);
336 ti
->error
= "Cannot allocate context";
340 if (dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dd
->dev
)) {
341 ti
->error
= "Device lookup failed";
346 dd
->sect_per_block
= sect_per_block
;
350 dd
->sect_per_block_shift
= __ffs(sect_per_block
);
353 * Whether to fail a read on a "bad" block.
354 * Defaults to false; enabled later by message.
356 dd
->fail_read_on_bb
= false;
359 * Initialize bad block list rbtree.
361 dd
->badblocklist
= RB_ROOT
;
362 dd
->badblock_count
= 0;
363 spin_lock_init(&dd
->dust_lock
);
365 dd
->quiet_mode
= false;
367 BUG_ON(dm_set_target_max_io_len(ti
, dd
->sect_per_block
) != 0);
369 ti
->num_discard_bios
= 1;
370 ti
->num_flush_bios
= 1;
376 static void dust_dtr(struct dm_target
*ti
)
378 struct dust_device
*dd
= ti
->private;
380 __dust_clear_badblocks(&dd
->badblocklist
, dd
->badblock_count
);
381 dm_put_device(ti
, dd
->dev
);
385 static int dust_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
386 char *result_buf
, unsigned int maxlen
)
388 struct dust_device
*dd
= ti
->private;
389 sector_t size
= i_size_read(dd
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
390 bool invalid_msg
= false;
392 unsigned long long tmp
, block
;
393 unsigned char wr_fail_cnt
;
399 if (!strcasecmp(argv
[0], "addbadblock") ||
400 !strcasecmp(argv
[0], "removebadblock") ||
401 !strcasecmp(argv
[0], "queryblock")) {
402 DMERR("%s requires an additional argument", argv
[0]);
403 } else if (!strcasecmp(argv
[0], "disable")) {
404 DMINFO("disabling read failures on bad sectors");
405 dd
->fail_read_on_bb
= false;
407 } else if (!strcasecmp(argv
[0], "enable")) {
408 DMINFO("enabling read failures on bad sectors");
409 dd
->fail_read_on_bb
= true;
411 } else if (!strcasecmp(argv
[0], "countbadblocks")) {
412 spin_lock_irqsave(&dd
->dust_lock
, flags
);
413 DMINFO("countbadblocks: %llu badblock(s) found",
415 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
417 } else if (!strcasecmp(argv
[0], "clearbadblocks")) {
418 r
= dust_clear_badblocks(dd
);
419 } else if (!strcasecmp(argv
[0], "quiet")) {
421 dd
->quiet_mode
= true;
423 dd
->quiet_mode
= false;
428 } else if (argc
== 2) {
429 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1)
433 sector_div(size
, dd
->sect_per_block
);
435 DMERR("selected block value out of range");
439 if (!strcasecmp(argv
[0], "addbadblock"))
440 r
= dust_add_block(dd
, block
, 0);
441 else if (!strcasecmp(argv
[0], "removebadblock"))
442 r
= dust_remove_block(dd
, block
);
443 else if (!strcasecmp(argv
[0], "queryblock"))
444 r
= dust_query_block(dd
, block
);
448 } else if (argc
== 3) {
449 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1)
452 if (sscanf(argv
[2], "%u%c", &tmp_ui
, &dummy
) != 1)
457 DMERR("selected write fail count out of range");
460 wr_fail_cnt
= tmp_ui
;
461 sector_div(size
, dd
->sect_per_block
);
463 DMERR("selected block value out of range");
467 if (!strcasecmp(argv
[0], "addbadblock"))
468 r
= dust_add_block(dd
, block
, wr_fail_cnt
);
473 DMERR("invalid number of arguments '%d'", argc
);
476 DMERR("unrecognized message '%s' received", argv
[0]);
481 static void dust_status(struct dm_target
*ti
, status_type_t type
,
482 unsigned int status_flags
, char *result
, unsigned int maxlen
)
484 struct dust_device
*dd
= ti
->private;
488 case STATUSTYPE_INFO
:
489 DMEMIT("%s %s %s", dd
->dev
->name
,
490 dd
->fail_read_on_bb
? "fail_read_on_bad_block" : "bypass",
491 dd
->quiet_mode
? "quiet" : "verbose");
494 case STATUSTYPE_TABLE
:
495 DMEMIT("%s %llu %u", dd
->dev
->name
,
496 (unsigned long long)dd
->start
, dd
->blksz
);
501 static int dust_prepare_ioctl(struct dm_target
*ti
, struct block_device
**bdev
)
503 struct dust_device
*dd
= ti
->private;
504 struct dm_dev
*dev
= dd
->dev
;
509 * Only pass ioctls through if the device sizes match exactly.
512 ti
->len
!= i_size_read(dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
)
518 static int dust_iterate_devices(struct dm_target
*ti
, iterate_devices_callout_fn fn
,
521 struct dust_device
*dd
= ti
->private;
523 return fn(ti
, dd
->dev
, dd
->start
, ti
->len
, data
);
526 static struct target_type dust_target
= {
528 .version
= {1, 0, 0},
529 .module
= THIS_MODULE
,
532 .iterate_devices
= dust_iterate_devices
,
534 .message
= dust_message
,
535 .status
= dust_status
,
536 .prepare_ioctl
= dust_prepare_ioctl
,
539 static int __init
dm_dust_init(void)
541 int r
= dm_register_target(&dust_target
);
544 DMERR("dm_register_target failed %d", r
);
549 static void __exit
dm_dust_exit(void)
551 dm_unregister_target(&dust_target
);
554 module_init(dm_dust_init
);
555 module_exit(dm_dust_exit
);
557 MODULE_DESCRIPTION(DM_NAME
" dust test target");
558 MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
559 MODULE_LICENSE("GPL");