Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / md / dm-target.c
blob652627aea11b6188d2873ade3e8717e746dea6dd
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2001 Sistina Software (UK) Limited
5 * This file is released under the GPL.
6 */
8 #include "dm-core.h"
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kmod.h>
13 #include <linux/bio.h>
14 #include <linux/dax.h>
16 #define DM_MSG_PREFIX "target"
18 static LIST_HEAD(_targets);
19 static DECLARE_RWSEM(_lock);
21 static inline struct target_type *__find_target_type(const char *name)
23 struct target_type *tt;
25 list_for_each_entry(tt, &_targets, list)
26 if (!strcmp(name, tt->name))
27 return tt;
29 return NULL;
32 static struct target_type *get_target_type(const char *name)
34 struct target_type *tt;
36 down_read(&_lock);
38 tt = __find_target_type(name);
39 if (tt && !try_module_get(tt->module))
40 tt = NULL;
42 up_read(&_lock);
43 return tt;
46 static void load_module(const char *name)
48 request_module("dm-%s", name);
51 struct target_type *dm_get_target_type(const char *name)
53 struct target_type *tt = get_target_type(name);
55 if (!tt) {
56 load_module(name);
57 tt = get_target_type(name);
60 return tt;
63 void dm_put_target_type(struct target_type *tt)
65 down_read(&_lock);
66 module_put(tt->module);
67 up_read(&_lock);
70 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
71 void *param), void *param)
73 struct target_type *tt;
75 down_read(&_lock);
76 list_for_each_entry(tt, &_targets, list)
77 iter_func(tt, param);
78 up_read(&_lock);
80 return 0;
83 int dm_register_target(struct target_type *tt)
85 int rv = 0;
87 down_write(&_lock);
88 if (__find_target_type(tt->name)) {
89 DMERR("%s: '%s' target already registered",
90 __func__, tt->name);
91 rv = -EEXIST;
92 } else {
93 list_add(&tt->list, &_targets);
95 up_write(&_lock);
97 return rv;
99 EXPORT_SYMBOL(dm_register_target);
101 void dm_unregister_target(struct target_type *tt)
103 down_write(&_lock);
104 if (!__find_target_type(tt->name)) {
105 DMCRIT("Unregistering unrecognised target: %s", tt->name);
106 BUG();
109 list_del(&tt->list);
111 up_write(&_lock);
113 EXPORT_SYMBOL(dm_unregister_target);
116 * io-err: always fails an io, useful for bringing
117 * up LVs that have holes in them.
119 struct io_err_c {
120 struct dm_dev *dev;
121 sector_t start;
124 static int io_err_get_args(struct dm_target *tt, unsigned int argc, char **args)
126 unsigned long long start;
127 struct io_err_c *ioec;
128 char dummy;
129 int ret;
131 ioec = kmalloc(sizeof(*ioec), GFP_KERNEL);
132 if (!ioec) {
133 tt->error = "Cannot allocate io_err context";
134 return -ENOMEM;
137 ret = -EINVAL;
138 if (sscanf(args[1], "%llu%c", &start, &dummy) != 1 ||
139 start != (sector_t)start) {
140 tt->error = "Invalid device sector";
141 goto bad;
143 ioec->start = start;
145 ret = dm_get_device(tt, args[0], dm_table_get_mode(tt->table), &ioec->dev);
146 if (ret) {
147 tt->error = "Device lookup failed";
148 goto bad;
151 tt->private = ioec;
153 return 0;
155 bad:
156 kfree(ioec);
158 return ret;
161 static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
164 * If we have arguments, assume it is the path to the backing
165 * block device and its mapping start sector (same as dm-linear).
166 * In this case, get the device so that we can get its limits.
168 if (argc == 2) {
169 int ret = io_err_get_args(tt, argc, args);
171 if (ret)
172 return ret;
176 * Return error for discards instead of -EOPNOTSUPP
178 tt->num_discard_bios = 1;
179 tt->discards_supported = true;
181 return 0;
184 static void io_err_dtr(struct dm_target *tt)
186 struct io_err_c *ioec = tt->private;
188 if (ioec) {
189 dm_put_device(tt, ioec->dev);
190 kfree(ioec);
194 static int io_err_map(struct dm_target *tt, struct bio *bio)
196 return DM_MAPIO_KILL;
199 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
200 union map_info *map_context,
201 struct request **clone)
203 return DM_MAPIO_KILL;
206 static void io_err_release_clone_rq(struct request *clone,
207 union map_info *map_context)
211 #ifdef CONFIG_BLK_DEV_ZONED
212 static sector_t io_err_map_sector(struct dm_target *ti, sector_t bi_sector)
214 struct io_err_c *ioec = ti->private;
216 return ioec->start + dm_target_offset(ti, bi_sector);
219 static int io_err_report_zones(struct dm_target *ti,
220 struct dm_report_zones_args *args, unsigned int nr_zones)
222 struct io_err_c *ioec = ti->private;
225 * This should never be called when we do not have a backing device
226 * as that mean the target is not a zoned one.
228 if (WARN_ON_ONCE(!ioec))
229 return -EIO;
231 return dm_report_zones(ioec->dev->bdev, ioec->start,
232 io_err_map_sector(ti, args->next_sector),
233 args, nr_zones);
235 #else
236 #define io_err_report_zones NULL
237 #endif
239 static int io_err_iterate_devices(struct dm_target *ti,
240 iterate_devices_callout_fn fn, void *data)
242 struct io_err_c *ioec = ti->private;
244 if (!ioec)
245 return 0;
247 return fn(ti, ioec->dev, ioec->start, ti->len, data);
250 static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
252 limits->max_hw_discard_sectors = UINT_MAX;
253 limits->discard_granularity = 512;
256 static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
257 long nr_pages, enum dax_access_mode mode, void **kaddr,
258 pfn_t *pfn)
260 return -EIO;
263 static struct target_type error_target = {
264 .name = "error",
265 .version = {1, 7, 0},
266 .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
267 .ctr = io_err_ctr,
268 .dtr = io_err_dtr,
269 .map = io_err_map,
270 .clone_and_map_rq = io_err_clone_and_map_rq,
271 .release_clone_rq = io_err_release_clone_rq,
272 .iterate_devices = io_err_iterate_devices,
273 .io_hints = io_err_io_hints,
274 .direct_access = io_err_dax_direct_access,
275 .report_zones = io_err_report_zones,
278 int __init dm_target_init(void)
280 return dm_register_target(&error_target);
283 void dm_target_exit(void)
285 dm_unregister_target(&error_target);