1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 Red Hat, Inc.
5 * This is a test "dust" device, which fails reads on specified
6 * sectors, emulating the behavior of a hard disk drive sending
7 * a "Read Medium Error" sense.
11 #include <linux/device-mapper.h>
12 #include <linux/module.h>
13 #include <linux/rbtree.h>
15 #define DM_MSG_PREFIX "dust"
24 struct rb_root badblocklist
;
25 unsigned long long badblock_count
;
28 int sect_per_block_shift
;
29 unsigned int sect_per_block
;
31 bool fail_read_on_bb
:1;
35 static struct badblock
*dust_rb_search(struct rb_root
*root
, sector_t blk
)
37 struct rb_node
*node
= root
->rb_node
;
40 struct badblock
*bblk
= rb_entry(node
, struct badblock
, node
);
44 else if (bblk
->bb
< blk
)
45 node
= node
->rb_right
;
53 static bool dust_rb_insert(struct rb_root
*root
, struct badblock
*new)
55 struct badblock
*bblk
;
56 struct rb_node
**link
= &root
->rb_node
, *parent
= NULL
;
57 sector_t value
= new->bb
;
61 bblk
= rb_entry(parent
, struct badblock
, node
);
64 link
= &(*link
)->rb_left
;
65 else if (bblk
->bb
< value
)
66 link
= &(*link
)->rb_right
;
71 rb_link_node(&new->node
, parent
, link
);
72 rb_insert_color(&new->node
, root
);
77 static int dust_remove_block(struct dust_device
*dd
, unsigned long long block
)
79 struct badblock
*bblock
;
82 spin_lock_irqsave(&dd
->dust_lock
, flags
);
83 bblock
= dust_rb_search(&dd
->badblocklist
, block
);
86 if (!dd
->quiet_mode
) {
87 DMERR("%s: block %llu not found in badblocklist",
90 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
94 rb_erase(&bblock
->node
, &dd
->badblocklist
);
97 DMINFO("%s: badblock removed at block %llu", __func__
, block
);
99 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
104 static int dust_add_block(struct dust_device
*dd
, unsigned long long block
)
106 struct badblock
*bblock
;
109 bblock
= kmalloc(sizeof(*bblock
), GFP_KERNEL
);
110 if (bblock
== NULL
) {
112 DMERR("%s: badblock allocation failed", __func__
);
116 spin_lock_irqsave(&dd
->dust_lock
, flags
);
118 if (!dust_rb_insert(&dd
->badblocklist
, bblock
)) {
119 if (!dd
->quiet_mode
) {
120 DMERR("%s: block %llu already in badblocklist",
123 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
128 dd
->badblock_count
++;
130 DMINFO("%s: badblock added at block %llu", __func__
, block
);
131 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
136 static int dust_query_block(struct dust_device
*dd
, unsigned long long block
)
138 struct badblock
*bblock
;
141 spin_lock_irqsave(&dd
->dust_lock
, flags
);
142 bblock
= dust_rb_search(&dd
->badblocklist
, block
);
144 DMINFO("%s: block %llu found in badblocklist", __func__
, block
);
146 DMINFO("%s: block %llu not found in badblocklist", __func__
, block
);
147 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
152 static int __dust_map_read(struct dust_device
*dd
, sector_t thisblock
)
154 struct badblock
*bblk
= dust_rb_search(&dd
->badblocklist
, thisblock
);
157 return DM_MAPIO_KILL
;
159 return DM_MAPIO_REMAPPED
;
162 static int dust_map_read(struct dust_device
*dd
, sector_t thisblock
,
163 bool fail_read_on_bb
)
166 int ret
= DM_MAPIO_REMAPPED
;
168 if (fail_read_on_bb
) {
169 thisblock
>>= dd
->sect_per_block_shift
;
170 spin_lock_irqsave(&dd
->dust_lock
, flags
);
171 ret
= __dust_map_read(dd
, thisblock
);
172 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
178 static void __dust_map_write(struct dust_device
*dd
, sector_t thisblock
)
180 struct badblock
*bblk
= dust_rb_search(&dd
->badblocklist
, thisblock
);
183 rb_erase(&bblk
->node
, &dd
->badblocklist
);
184 dd
->badblock_count
--;
186 if (!dd
->quiet_mode
) {
187 sector_div(thisblock
, dd
->sect_per_block
);
188 DMINFO("block %llu removed from badblocklist by write",
189 (unsigned long long)thisblock
);
194 static int dust_map_write(struct dust_device
*dd
, sector_t thisblock
,
195 bool fail_read_on_bb
)
199 if (fail_read_on_bb
) {
200 thisblock
>>= dd
->sect_per_block_shift
;
201 spin_lock_irqsave(&dd
->dust_lock
, flags
);
202 __dust_map_write(dd
, thisblock
);
203 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
206 return DM_MAPIO_REMAPPED
;
209 static int dust_map(struct dm_target
*ti
, struct bio
*bio
)
211 struct dust_device
*dd
= ti
->private;
214 bio_set_dev(bio
, dd
->dev
->bdev
);
215 bio
->bi_iter
.bi_sector
= dd
->start
+ dm_target_offset(ti
, bio
->bi_iter
.bi_sector
);
217 if (bio_data_dir(bio
) == READ
)
218 ret
= dust_map_read(dd
, bio
->bi_iter
.bi_sector
, dd
->fail_read_on_bb
);
220 ret
= dust_map_write(dd
, bio
->bi_iter
.bi_sector
, dd
->fail_read_on_bb
);
225 static bool __dust_clear_badblocks(struct rb_root
*tree
,
226 unsigned long long count
)
228 struct rb_node
*node
= NULL
, *nnode
= NULL
;
230 nnode
= rb_first(tree
);
238 nnode
= rb_next(node
);
239 rb_erase(node
, tree
);
244 BUG_ON(tree
->rb_node
!= NULL
);
249 static int dust_clear_badblocks(struct dust_device
*dd
)
252 struct rb_root badblocklist
;
253 unsigned long long badblock_count
;
255 spin_lock_irqsave(&dd
->dust_lock
, flags
);
256 badblocklist
= dd
->badblocklist
;
257 badblock_count
= dd
->badblock_count
;
258 dd
->badblocklist
= RB_ROOT
;
259 dd
->badblock_count
= 0;
260 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
262 if (!__dust_clear_badblocks(&badblocklist
, badblock_count
))
263 DMINFO("%s: no badblocks found", __func__
);
265 DMINFO("%s: badblocks cleared", __func__
);
273 * <device_path> <offset> <blksz>
275 * device_path: path to the block device
276 * offset: offset to data area from start of device_path
277 * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
279 static int dust_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
281 struct dust_device
*dd
;
282 unsigned long long tmp
;
285 unsigned int sect_per_block
;
286 sector_t DUST_MAX_BLKSZ_SECTORS
= 2097152;
287 sector_t max_block_sectors
= min(ti
->len
, DUST_MAX_BLKSZ_SECTORS
);
290 ti
->error
= "Invalid argument count";
294 if (kstrtouint(argv
[2], 10, &blksz
) || !blksz
) {
295 ti
->error
= "Invalid block size parameter";
300 ti
->error
= "Block size must be at least 512";
304 if (!is_power_of_2(blksz
)) {
305 ti
->error
= "Block size must be a power of 2";
309 if (to_sector(blksz
) > max_block_sectors
) {
310 ti
->error
= "Block size is too large";
314 sect_per_block
= (blksz
>> SECTOR_SHIFT
);
316 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1 || tmp
!= (sector_t
)tmp
) {
317 ti
->error
= "Invalid device offset sector";
321 dd
= kzalloc(sizeof(struct dust_device
), GFP_KERNEL
);
323 ti
->error
= "Cannot allocate context";
327 if (dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dd
->dev
)) {
328 ti
->error
= "Device lookup failed";
333 dd
->sect_per_block
= sect_per_block
;
337 dd
->sect_per_block_shift
= __ffs(sect_per_block
);
340 * Whether to fail a read on a "bad" block.
341 * Defaults to false; enabled later by message.
343 dd
->fail_read_on_bb
= false;
346 * Initialize bad block list rbtree.
348 dd
->badblocklist
= RB_ROOT
;
349 dd
->badblock_count
= 0;
350 spin_lock_init(&dd
->dust_lock
);
352 dd
->quiet_mode
= false;
354 BUG_ON(dm_set_target_max_io_len(ti
, dd
->sect_per_block
) != 0);
356 ti
->num_discard_bios
= 1;
357 ti
->num_flush_bios
= 1;
363 static void dust_dtr(struct dm_target
*ti
)
365 struct dust_device
*dd
= ti
->private;
367 __dust_clear_badblocks(&dd
->badblocklist
, dd
->badblock_count
);
368 dm_put_device(ti
, dd
->dev
);
372 static int dust_message(struct dm_target
*ti
, unsigned int argc
, char **argv
,
373 char *result_buf
, unsigned int maxlen
)
375 struct dust_device
*dd
= ti
->private;
376 sector_t size
= i_size_read(dd
->dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
;
377 bool invalid_msg
= false;
378 int result
= -EINVAL
;
379 unsigned long long tmp
, block
;
384 if (!strcasecmp(argv
[0], "addbadblock") ||
385 !strcasecmp(argv
[0], "removebadblock") ||
386 !strcasecmp(argv
[0], "queryblock")) {
387 DMERR("%s requires an additional argument", argv
[0]);
388 } else if (!strcasecmp(argv
[0], "disable")) {
389 DMINFO("disabling read failures on bad sectors");
390 dd
->fail_read_on_bb
= false;
392 } else if (!strcasecmp(argv
[0], "enable")) {
393 DMINFO("enabling read failures on bad sectors");
394 dd
->fail_read_on_bb
= true;
396 } else if (!strcasecmp(argv
[0], "countbadblocks")) {
397 spin_lock_irqsave(&dd
->dust_lock
, flags
);
398 DMINFO("countbadblocks: %llu badblock(s) found",
400 spin_unlock_irqrestore(&dd
->dust_lock
, flags
);
402 } else if (!strcasecmp(argv
[0], "clearbadblocks")) {
403 result
= dust_clear_badblocks(dd
);
404 } else if (!strcasecmp(argv
[0], "quiet")) {
406 dd
->quiet_mode
= true;
408 dd
->quiet_mode
= false;
413 } else if (argc
== 2) {
414 if (sscanf(argv
[1], "%llu%c", &tmp
, &dummy
) != 1)
418 sector_div(size
, dd
->sect_per_block
);
420 DMERR("selected block value out of range");
424 if (!strcasecmp(argv
[0], "addbadblock"))
425 result
= dust_add_block(dd
, block
);
426 else if (!strcasecmp(argv
[0], "removebadblock"))
427 result
= dust_remove_block(dd
, block
);
428 else if (!strcasecmp(argv
[0], "queryblock"))
429 result
= dust_query_block(dd
, block
);
434 DMERR("invalid number of arguments '%d'", argc
);
437 DMERR("unrecognized message '%s' received", argv
[0]);
442 static void dust_status(struct dm_target
*ti
, status_type_t type
,
443 unsigned int status_flags
, char *result
, unsigned int maxlen
)
445 struct dust_device
*dd
= ti
->private;
449 case STATUSTYPE_INFO
:
450 DMEMIT("%s %s %s", dd
->dev
->name
,
451 dd
->fail_read_on_bb
? "fail_read_on_bad_block" : "bypass",
452 dd
->quiet_mode
? "quiet" : "verbose");
455 case STATUSTYPE_TABLE
:
456 DMEMIT("%s %llu %u", dd
->dev
->name
,
457 (unsigned long long)dd
->start
, dd
->blksz
);
462 static int dust_prepare_ioctl(struct dm_target
*ti
, struct block_device
**bdev
)
464 struct dust_device
*dd
= ti
->private;
465 struct dm_dev
*dev
= dd
->dev
;
470 * Only pass ioctls through if the device sizes match exactly.
473 ti
->len
!= i_size_read(dev
->bdev
->bd_inode
) >> SECTOR_SHIFT
)
479 static int dust_iterate_devices(struct dm_target
*ti
, iterate_devices_callout_fn fn
,
482 struct dust_device
*dd
= ti
->private;
484 return fn(ti
, dd
->dev
, dd
->start
, ti
->len
, data
);
487 static struct target_type dust_target
= {
489 .version
= {1, 0, 0},
490 .module
= THIS_MODULE
,
493 .iterate_devices
= dust_iterate_devices
,
495 .message
= dust_message
,
496 .status
= dust_status
,
497 .prepare_ioctl
= dust_prepare_ioctl
,
500 static int __init
dm_dust_init(void)
502 int result
= dm_register_target(&dust_target
);
505 DMERR("dm_register_target failed %d", result
);
510 static void __exit
dm_dust_exit(void)
512 dm_unregister_target(&dust_target
);
515 module_init(dm_dust_init
);
516 module_exit(dm_dust_exit
);
518 MODULE_DESCRIPTION(DM_NAME
" dust test target");
519 MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
520 MODULE_LICENSE("GPL");