1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2001 Sistina Software (UK) Limited
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kmod.h>
13 #include <linux/bio.h>
14 #include <linux/dax.h>
16 #define DM_MSG_PREFIX "target"
18 static LIST_HEAD(_targets
);
19 static DECLARE_RWSEM(_lock
);
21 static inline struct target_type
*__find_target_type(const char *name
)
23 struct target_type
*tt
;
25 list_for_each_entry(tt
, &_targets
, list
)
26 if (!strcmp(name
, tt
->name
))
32 static struct target_type
*get_target_type(const char *name
)
34 struct target_type
*tt
;
38 tt
= __find_target_type(name
);
39 if (tt
&& !try_module_get(tt
->module
))
46 static void load_module(const char *name
)
48 request_module("dm-%s", name
);
51 struct target_type
*dm_get_target_type(const char *name
)
53 struct target_type
*tt
= get_target_type(name
);
57 tt
= get_target_type(name
);
63 void dm_put_target_type(struct target_type
*tt
)
66 module_put(tt
->module
);
70 int dm_target_iterate(void (*iter_func
)(struct target_type
*tt
,
71 void *param
), void *param
)
73 struct target_type
*tt
;
76 list_for_each_entry(tt
, &_targets
, list
)
83 int dm_register_target(struct target_type
*tt
)
88 if (__find_target_type(tt
->name
)) {
89 DMERR("%s: '%s' target already registered",
93 list_add(&tt
->list
, &_targets
);
99 EXPORT_SYMBOL(dm_register_target
);
101 void dm_unregister_target(struct target_type
*tt
)
104 if (!__find_target_type(tt
->name
)) {
105 DMCRIT("Unregistering unrecognised target: %s", tt
->name
);
113 EXPORT_SYMBOL(dm_unregister_target
);
116 * io-err: always fails an io, useful for bringing
117 * up LVs that have holes in them.
124 static int io_err_get_args(struct dm_target
*tt
, unsigned int argc
, char **args
)
126 unsigned long long start
;
127 struct io_err_c
*ioec
;
131 ioec
= kmalloc(sizeof(*ioec
), GFP_KERNEL
);
133 tt
->error
= "Cannot allocate io_err context";
138 if (sscanf(args
[1], "%llu%c", &start
, &dummy
) != 1 ||
139 start
!= (sector_t
)start
) {
140 tt
->error
= "Invalid device sector";
145 ret
= dm_get_device(tt
, args
[0], dm_table_get_mode(tt
->table
), &ioec
->dev
);
147 tt
->error
= "Device lookup failed";
161 static int io_err_ctr(struct dm_target
*tt
, unsigned int argc
, char **args
)
164 * If we have arguments, assume it is the path to the backing
165 * block device and its mapping start sector (same as dm-linear).
166 * In this case, get the device so that we can get its limits.
169 int ret
= io_err_get_args(tt
, argc
, args
);
176 * Return error for discards instead of -EOPNOTSUPP
178 tt
->num_discard_bios
= 1;
179 tt
->discards_supported
= true;
184 static void io_err_dtr(struct dm_target
*tt
)
186 struct io_err_c
*ioec
= tt
->private;
189 dm_put_device(tt
, ioec
->dev
);
194 static int io_err_map(struct dm_target
*tt
, struct bio
*bio
)
196 return DM_MAPIO_KILL
;
199 static int io_err_clone_and_map_rq(struct dm_target
*ti
, struct request
*rq
,
200 union map_info
*map_context
,
201 struct request
**clone
)
203 return DM_MAPIO_KILL
;
206 static void io_err_release_clone_rq(struct request
*clone
,
207 union map_info
*map_context
)
211 #ifdef CONFIG_BLK_DEV_ZONED
212 static sector_t
io_err_map_sector(struct dm_target
*ti
, sector_t bi_sector
)
214 struct io_err_c
*ioec
= ti
->private;
216 return ioec
->start
+ dm_target_offset(ti
, bi_sector
);
219 static int io_err_report_zones(struct dm_target
*ti
,
220 struct dm_report_zones_args
*args
, unsigned int nr_zones
)
222 struct io_err_c
*ioec
= ti
->private;
225 * This should never be called when we do not have a backing device
226 * as that mean the target is not a zoned one.
228 if (WARN_ON_ONCE(!ioec
))
231 return dm_report_zones(ioec
->dev
->bdev
, ioec
->start
,
232 io_err_map_sector(ti
, args
->next_sector
),
236 #define io_err_report_zones NULL
239 static int io_err_iterate_devices(struct dm_target
*ti
,
240 iterate_devices_callout_fn fn
, void *data
)
242 struct io_err_c
*ioec
= ti
->private;
247 return fn(ti
, ioec
->dev
, ioec
->start
, ti
->len
, data
);
250 static void io_err_io_hints(struct dm_target
*ti
, struct queue_limits
*limits
)
252 limits
->max_hw_discard_sectors
= UINT_MAX
;
253 limits
->discard_granularity
= 512;
256 static long io_err_dax_direct_access(struct dm_target
*ti
, pgoff_t pgoff
,
257 long nr_pages
, enum dax_access_mode mode
, void **kaddr
,
263 static struct target_type error_target
= {
265 .version
= {1, 7, 0},
266 .features
= DM_TARGET_WILDCARD
| DM_TARGET_ZONED_HM
,
270 .clone_and_map_rq
= io_err_clone_and_map_rq
,
271 .release_clone_rq
= io_err_release_clone_rq
,
272 .iterate_devices
= io_err_iterate_devices
,
273 .io_hints
= io_err_io_hints
,
274 .direct_access
= io_err_dax_direct_access
,
275 .report_zones
= io_err_report_zones
,
278 int __init
dm_target_init(void)
280 return dm_register_target(&error_target
);
283 void dm_target_exit(void)
285 dm_unregister_target(&error_target
);