init from v2.6.32.60
[mach-moxart.git] / drivers / md / dm-ioctl.c
blob818b617ab3b28b1fcb687266f23a886ff4651e74
1 /*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
6 */
8 #include "dm.h"
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/miscdevice.h>
13 #include <linux/init.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/dm-ioctl.h>
17 #include <linux/hdreg.h>
18 #include <linux/compat.h>
20 #include <asm/uaccess.h>
22 #define DM_MSG_PREFIX "ioctl"
23 #define DM_DRIVER_EMAIL "dm-devel@redhat.com"
25 /*-----------------------------------------------------------------
26 * The ioctl interface needs to be able to look up devices by
27 * name or uuid.
28 *---------------------------------------------------------------*/
29 struct hash_cell {
30 struct list_head name_list;
31 struct list_head uuid_list;
33 char *name;
34 char *uuid;
35 struct mapped_device *md;
36 struct dm_table *new_map;
39 struct vers_iter {
40 size_t param_size;
41 struct dm_target_versions *vers, *old_vers;
42 char *end;
43 uint32_t flags;
47 #define NUM_BUCKETS 64
48 #define MASK_BUCKETS (NUM_BUCKETS - 1)
49 static struct list_head _name_buckets[NUM_BUCKETS];
50 static struct list_head _uuid_buckets[NUM_BUCKETS];
52 static void dm_hash_remove_all(int keep_open_devices);
55 * Guards access to both hash tables.
57 static DECLARE_RWSEM(_hash_lock);
60 * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
62 static DEFINE_MUTEX(dm_hash_cells_mutex);
64 static void init_buckets(struct list_head *buckets)
66 unsigned int i;
68 for (i = 0; i < NUM_BUCKETS; i++)
69 INIT_LIST_HEAD(buckets + i);
72 static int dm_hash_init(void)
74 init_buckets(_name_buckets);
75 init_buckets(_uuid_buckets);
76 return 0;
79 static void dm_hash_exit(void)
81 dm_hash_remove_all(0);
84 /*-----------------------------------------------------------------
85 * Hash function:
86 * We're not really concerned with the str hash function being
87 * fast since it's only used by the ioctl interface.
88 *---------------------------------------------------------------*/
89 static unsigned int hash_str(const char *str)
91 const unsigned int hash_mult = 2654435387U;
92 unsigned int h = 0;
94 while (*str)
95 h = (h + (unsigned int) *str++) * hash_mult;
97 return h & MASK_BUCKETS;
100 /*-----------------------------------------------------------------
101 * Code for looking up a device by name
102 *---------------------------------------------------------------*/
103 static struct hash_cell *__get_name_cell(const char *str)
105 struct hash_cell *hc;
106 unsigned int h = hash_str(str);
108 list_for_each_entry (hc, _name_buckets + h, name_list)
109 if (!strcmp(hc->name, str)) {
110 dm_get(hc->md);
111 return hc;
114 return NULL;
117 static struct hash_cell *__get_uuid_cell(const char *str)
119 struct hash_cell *hc;
120 unsigned int h = hash_str(str);
122 list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
123 if (!strcmp(hc->uuid, str)) {
124 dm_get(hc->md);
125 return hc;
128 return NULL;
131 /*-----------------------------------------------------------------
132 * Inserting, removing and renaming a device.
133 *---------------------------------------------------------------*/
134 static struct hash_cell *alloc_cell(const char *name, const char *uuid,
135 struct mapped_device *md)
137 struct hash_cell *hc;
139 hc = kmalloc(sizeof(*hc), GFP_KERNEL);
140 if (!hc)
141 return NULL;
143 hc->name = kstrdup(name, GFP_KERNEL);
144 if (!hc->name) {
145 kfree(hc);
146 return NULL;
149 if (!uuid)
150 hc->uuid = NULL;
152 else {
153 hc->uuid = kstrdup(uuid, GFP_KERNEL);
154 if (!hc->uuid) {
155 kfree(hc->name);
156 kfree(hc);
157 return NULL;
161 INIT_LIST_HEAD(&hc->name_list);
162 INIT_LIST_HEAD(&hc->uuid_list);
163 hc->md = md;
164 hc->new_map = NULL;
165 return hc;
168 static void free_cell(struct hash_cell *hc)
170 if (hc) {
171 kfree(hc->name);
172 kfree(hc->uuid);
173 kfree(hc);
178 * The kdev_t and uuid of a device can never change once it is
179 * initially inserted.
181 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
183 struct hash_cell *cell, *hc;
186 * Allocate the new cells.
188 cell = alloc_cell(name, uuid, md);
189 if (!cell)
190 return -ENOMEM;
193 * Insert the cell into both hash tables.
195 down_write(&_hash_lock);
196 hc = __get_name_cell(name);
197 if (hc) {
198 dm_put(hc->md);
199 goto bad;
202 list_add(&cell->name_list, _name_buckets + hash_str(name));
204 if (uuid) {
205 hc = __get_uuid_cell(uuid);
206 if (hc) {
207 list_del(&cell->name_list);
208 dm_put(hc->md);
209 goto bad;
211 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
213 dm_get(md);
214 mutex_lock(&dm_hash_cells_mutex);
215 dm_set_mdptr(md, cell);
216 mutex_unlock(&dm_hash_cells_mutex);
217 up_write(&_hash_lock);
219 return 0;
221 bad:
222 up_write(&_hash_lock);
223 free_cell(cell);
224 return -EBUSY;
227 static void __hash_remove(struct hash_cell *hc)
229 struct dm_table *table;
231 /* remove from the dev hash */
232 list_del(&hc->uuid_list);
233 list_del(&hc->name_list);
234 mutex_lock(&dm_hash_cells_mutex);
235 dm_set_mdptr(hc->md, NULL);
236 mutex_unlock(&dm_hash_cells_mutex);
238 table = dm_get_table(hc->md);
239 if (table) {
240 dm_table_event(table);
241 dm_table_put(table);
244 if (hc->new_map)
245 dm_table_destroy(hc->new_map);
246 dm_put(hc->md);
247 free_cell(hc);
250 static void dm_hash_remove_all(int keep_open_devices)
252 int i, dev_skipped;
253 struct hash_cell *hc;
254 struct mapped_device *md;
256 retry:
257 dev_skipped = 0;
259 down_write(&_hash_lock);
261 for (i = 0; i < NUM_BUCKETS; i++) {
262 list_for_each_entry(hc, _name_buckets + i, name_list) {
263 md = hc->md;
264 dm_get(md);
266 if (keep_open_devices && dm_lock_for_deletion(md)) {
267 dm_put(md);
268 dev_skipped++;
269 continue;
272 __hash_remove(hc);
274 up_write(&_hash_lock);
276 dm_put(md);
279 * Some mapped devices may be using other mapped
280 * devices, so repeat until we make no further
281 * progress. If a new mapped device is created
282 * here it will also get removed.
284 goto retry;
288 up_write(&_hash_lock);
290 if (dev_skipped)
291 DMWARN("remove_all left %d open device(s)", dev_skipped);
294 static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
296 char *new_name, *old_name;
297 struct hash_cell *hc;
298 struct dm_table *table;
301 * duplicate new.
303 new_name = kstrdup(new, GFP_KERNEL);
304 if (!new_name)
305 return -ENOMEM;
307 down_write(&_hash_lock);
310 * Is new free ?
312 hc = __get_name_cell(new);
313 if (hc) {
314 DMWARN("asked to rename to an already existing name %s -> %s",
315 old, new);
316 dm_put(hc->md);
317 up_write(&_hash_lock);
318 kfree(new_name);
319 return -EBUSY;
323 * Is there such a device as 'old' ?
325 hc = __get_name_cell(old);
326 if (!hc) {
327 DMWARN("asked to rename a non existent device %s -> %s",
328 old, new);
329 up_write(&_hash_lock);
330 kfree(new_name);
331 return -ENXIO;
335 * rename and move the name cell.
337 list_del(&hc->name_list);
338 old_name = hc->name;
339 mutex_lock(&dm_hash_cells_mutex);
340 hc->name = new_name;
341 mutex_unlock(&dm_hash_cells_mutex);
342 list_add(&hc->name_list, _name_buckets + hash_str(new_name));
345 * Wake up any dm event waiters.
347 table = dm_get_table(hc->md);
348 if (table) {
349 dm_table_event(table);
350 dm_table_put(table);
353 dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
355 dm_put(hc->md);
356 up_write(&_hash_lock);
357 kfree(old_name);
358 return 0;
361 /*-----------------------------------------------------------------
362 * Implementation of the ioctl commands
363 *---------------------------------------------------------------*/
365 * All the ioctl commands get dispatched to functions with this
366 * prototype.
368 typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
370 static int remove_all(struct dm_ioctl *param, size_t param_size)
372 dm_hash_remove_all(1);
373 param->data_size = 0;
374 return 0;
378 * Round up the ptr to an 8-byte boundary.
380 #define ALIGN_MASK 7
381 static inline void *align_ptr(void *ptr)
383 return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK);
387 * Retrieves the data payload buffer from an already allocated
388 * struct dm_ioctl.
390 static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
391 size_t *len)
393 param->data_start = align_ptr(param + 1) - (void *) param;
395 if (param->data_start < param_size)
396 *len = param_size - param->data_start;
397 else
398 *len = 0;
400 return ((void *) param) + param->data_start;
403 static int list_devices(struct dm_ioctl *param, size_t param_size)
405 unsigned int i;
406 struct hash_cell *hc;
407 size_t len, needed = 0;
408 struct gendisk *disk;
409 struct dm_name_list *nl, *old_nl = NULL;
411 down_write(&_hash_lock);
414 * Loop through all the devices working out how much
415 * space we need.
417 for (i = 0; i < NUM_BUCKETS; i++) {
418 list_for_each_entry (hc, _name_buckets + i, name_list) {
419 needed += sizeof(struct dm_name_list);
420 needed += strlen(hc->name) + 1;
421 needed += ALIGN_MASK;
426 * Grab our output buffer.
428 nl = get_result_buffer(param, param_size, &len);
429 if (len < needed) {
430 param->flags |= DM_BUFFER_FULL_FLAG;
431 goto out;
433 param->data_size = param->data_start + needed;
435 nl->dev = 0; /* Flags no data */
438 * Now loop through filling out the names.
440 for (i = 0; i < NUM_BUCKETS; i++) {
441 list_for_each_entry (hc, _name_buckets + i, name_list) {
442 if (old_nl)
443 old_nl->next = (uint32_t) ((void *) nl -
444 (void *) old_nl);
445 disk = dm_disk(hc->md);
446 nl->dev = huge_encode_dev(disk_devt(disk));
447 nl->next = 0;
448 strcpy(nl->name, hc->name);
450 old_nl = nl;
451 nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1);
455 out:
456 up_write(&_hash_lock);
457 return 0;
460 static void list_version_get_needed(struct target_type *tt, void *needed_param)
462 size_t *needed = needed_param;
464 *needed += sizeof(struct dm_target_versions);
465 *needed += strlen(tt->name);
466 *needed += ALIGN_MASK;
469 static void list_version_get_info(struct target_type *tt, void *param)
471 struct vers_iter *info = param;
473 /* Check space - it might have changed since the first iteration */
474 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 >
475 info->end) {
477 info->flags = DM_BUFFER_FULL_FLAG;
478 return;
481 if (info->old_vers)
482 info->old_vers->next = (uint32_t) ((void *)info->vers -
483 (void *)info->old_vers);
484 info->vers->version[0] = tt->version[0];
485 info->vers->version[1] = tt->version[1];
486 info->vers->version[2] = tt->version[2];
487 info->vers->next = 0;
488 strcpy(info->vers->name, tt->name);
490 info->old_vers = info->vers;
491 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1);
494 static int list_versions(struct dm_ioctl *param, size_t param_size)
496 size_t len, needed = 0;
497 struct dm_target_versions *vers;
498 struct vers_iter iter_info;
501 * Loop through all the devices working out how much
502 * space we need.
504 dm_target_iterate(list_version_get_needed, &needed);
507 * Grab our output buffer.
509 vers = get_result_buffer(param, param_size, &len);
510 if (len < needed) {
511 param->flags |= DM_BUFFER_FULL_FLAG;
512 goto out;
514 param->data_size = param->data_start + needed;
516 iter_info.param_size = param_size;
517 iter_info.old_vers = NULL;
518 iter_info.vers = vers;
519 iter_info.flags = 0;
520 iter_info.end = (char *)vers+len;
523 * Now loop through filling out the names & versions.
525 dm_target_iterate(list_version_get_info, &iter_info);
526 param->flags |= iter_info.flags;
528 out:
529 return 0;
534 static int check_name(const char *name)
536 if (strchr(name, '/')) {
537 DMWARN("invalid device name");
538 return -EINVAL;
541 return 0;
545 * Fills in a dm_ioctl structure, ready for sending back to
546 * userland.
548 static int __dev_status(struct mapped_device *md, struct dm_ioctl *param)
550 struct gendisk *disk = dm_disk(md);
551 struct dm_table *table;
553 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
554 DM_ACTIVE_PRESENT_FLAG);
556 if (dm_suspended(md))
557 param->flags |= DM_SUSPEND_FLAG;
559 param->dev = huge_encode_dev(disk_devt(disk));
562 * Yes, this will be out of date by the time it gets back
563 * to userland, but it is still very useful for
564 * debugging.
566 param->open_count = dm_open_count(md);
568 if (get_disk_ro(disk))
569 param->flags |= DM_READONLY_FLAG;
571 param->event_nr = dm_get_event_nr(md);
573 table = dm_get_table(md);
574 if (table) {
575 param->flags |= DM_ACTIVE_PRESENT_FLAG;
576 param->target_count = dm_table_get_num_targets(table);
577 dm_table_put(table);
578 } else
579 param->target_count = 0;
581 return 0;
584 static int dev_create(struct dm_ioctl *param, size_t param_size)
586 int r, m = DM_ANY_MINOR;
587 struct mapped_device *md;
589 r = check_name(param->name);
590 if (r)
591 return r;
593 if (param->flags & DM_PERSISTENT_DEV_FLAG)
594 m = MINOR(huge_decode_dev(param->dev));
596 r = dm_create(m, &md);
597 if (r)
598 return r;
600 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
601 if (r) {
602 dm_put(md);
603 return r;
606 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
608 r = __dev_status(md, param);
609 dm_put(md);
611 return r;
615 * Always use UUID for lookups if it's present, otherwise use name or dev.
617 static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
619 struct mapped_device *md;
620 void *mdptr = NULL;
622 if (*param->uuid)
623 return __get_uuid_cell(param->uuid);
625 if (*param->name)
626 return __get_name_cell(param->name);
628 md = dm_get_md(huge_decode_dev(param->dev));
629 if (!md)
630 goto out;
632 mdptr = dm_get_mdptr(md);
633 if (!mdptr)
634 dm_put(md);
636 out:
637 return mdptr;
640 static struct mapped_device *find_device(struct dm_ioctl *param)
642 struct hash_cell *hc;
643 struct mapped_device *md = NULL;
645 down_read(&_hash_lock);
646 hc = __find_device_hash_cell(param);
647 if (hc) {
648 md = hc->md;
651 * Sneakily write in both the name and the uuid
652 * while we have the cell.
654 strncpy(param->name, hc->name, sizeof(param->name));
655 if (hc->uuid)
656 strncpy(param->uuid, hc->uuid, sizeof(param->uuid)-1);
657 else
658 param->uuid[0] = '\0';
660 if (hc->new_map)
661 param->flags |= DM_INACTIVE_PRESENT_FLAG;
662 else
663 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
665 up_read(&_hash_lock);
667 return md;
670 static int dev_remove(struct dm_ioctl *param, size_t param_size)
672 struct hash_cell *hc;
673 struct mapped_device *md;
674 int r;
676 down_write(&_hash_lock);
677 hc = __find_device_hash_cell(param);
679 if (!hc) {
680 DMWARN("device doesn't appear to be in the dev hash table.");
681 up_write(&_hash_lock);
682 return -ENXIO;
685 md = hc->md;
688 * Ensure the device is not open and nothing further can open it.
690 r = dm_lock_for_deletion(md);
691 if (r) {
692 DMWARN("unable to remove open device %s", hc->name);
693 up_write(&_hash_lock);
694 dm_put(md);
695 return r;
698 __hash_remove(hc);
699 up_write(&_hash_lock);
701 dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
703 dm_put(md);
704 param->data_size = 0;
705 return 0;
709 * Check a string doesn't overrun the chunk of
710 * memory we copied from userland.
712 static int invalid_str(char *str, void *end)
714 while ((void *) str < end)
715 if (!*str++)
716 return 0;
718 return -EINVAL;
721 static int dev_rename(struct dm_ioctl *param, size_t param_size)
723 int r;
724 char *new_name = (char *) param + param->data_start;
726 if (new_name < param->data ||
727 invalid_str(new_name, (void *) param + param_size) ||
728 strlen(new_name) > DM_NAME_LEN - 1) {
729 DMWARN("Invalid new logical volume name supplied.");
730 return -EINVAL;
733 r = check_name(new_name);
734 if (r)
735 return r;
737 param->data_size = 0;
738 return dm_hash_rename(param->event_nr, param->name, new_name);
741 static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
743 int r = -EINVAL, x;
744 struct mapped_device *md;
745 struct hd_geometry geometry;
746 unsigned long indata[4];
747 char *geostr = (char *) param + param->data_start;
749 md = find_device(param);
750 if (!md)
751 return -ENXIO;
753 if (geostr < param->data ||
754 invalid_str(geostr, (void *) param + param_size)) {
755 DMWARN("Invalid geometry supplied.");
756 goto out;
759 x = sscanf(geostr, "%lu %lu %lu %lu", indata,
760 indata + 1, indata + 2, indata + 3);
762 if (x != 4) {
763 DMWARN("Unable to interpret geometry settings.");
764 goto out;
767 if (indata[0] > 65535 || indata[1] > 255 ||
768 indata[2] > 255 || indata[3] > ULONG_MAX) {
769 DMWARN("Geometry exceeds range limits.");
770 goto out;
773 geometry.cylinders = indata[0];
774 geometry.heads = indata[1];
775 geometry.sectors = indata[2];
776 geometry.start = indata[3];
778 r = dm_set_geometry(md, &geometry);
779 if (!r)
780 r = __dev_status(md, param);
782 param->data_size = 0;
784 out:
785 dm_put(md);
786 return r;
789 static int do_suspend(struct dm_ioctl *param)
791 int r = 0;
792 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
793 struct mapped_device *md;
795 md = find_device(param);
796 if (!md)
797 return -ENXIO;
799 if (param->flags & DM_SKIP_LOCKFS_FLAG)
800 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
801 if (param->flags & DM_NOFLUSH_FLAG)
802 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
804 if (!dm_suspended(md))
805 r = dm_suspend(md, suspend_flags);
807 if (!r)
808 r = __dev_status(md, param);
810 dm_put(md);
811 return r;
814 static int do_resume(struct dm_ioctl *param)
816 int r = 0;
817 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
818 struct hash_cell *hc;
819 struct mapped_device *md;
820 struct dm_table *new_map;
822 down_write(&_hash_lock);
824 hc = __find_device_hash_cell(param);
825 if (!hc) {
826 DMWARN("device doesn't appear to be in the dev hash table.");
827 up_write(&_hash_lock);
828 return -ENXIO;
831 md = hc->md;
833 new_map = hc->new_map;
834 hc->new_map = NULL;
835 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
837 up_write(&_hash_lock);
839 /* Do we need to load a new map ? */
840 if (new_map) {
841 /* Suspend if it isn't already suspended */
842 if (param->flags & DM_SKIP_LOCKFS_FLAG)
843 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
844 if (param->flags & DM_NOFLUSH_FLAG)
845 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
846 if (!dm_suspended(md))
847 dm_suspend(md, suspend_flags);
849 r = dm_swap_table(md, new_map);
850 if (r) {
851 dm_table_destroy(new_map);
852 dm_put(md);
853 return r;
856 if (dm_table_get_mode(new_map) & FMODE_WRITE)
857 set_disk_ro(dm_disk(md), 0);
858 else
859 set_disk_ro(dm_disk(md), 1);
862 if (dm_suspended(md))
863 r = dm_resume(md);
866 if (!r) {
867 dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
868 r = __dev_status(md, param);
871 dm_put(md);
872 return r;
876 * Set or unset the suspension state of a device.
877 * If the device already is in the requested state we just return its status.
879 static int dev_suspend(struct dm_ioctl *param, size_t param_size)
881 if (param->flags & DM_SUSPEND_FLAG)
882 return do_suspend(param);
884 return do_resume(param);
888 * Copies device info back to user space, used by
889 * the create and info ioctls.
891 static int dev_status(struct dm_ioctl *param, size_t param_size)
893 int r;
894 struct mapped_device *md;
896 md = find_device(param);
897 if (!md)
898 return -ENXIO;
900 r = __dev_status(md, param);
901 dm_put(md);
902 return r;
906 * Build up the status struct for each target
908 static void retrieve_status(struct dm_table *table,
909 struct dm_ioctl *param, size_t param_size)
911 unsigned int i, num_targets;
912 struct dm_target_spec *spec;
913 char *outbuf, *outptr;
914 status_type_t type;
915 size_t remaining, len, used = 0;
917 outptr = outbuf = get_result_buffer(param, param_size, &len);
919 if (param->flags & DM_STATUS_TABLE_FLAG)
920 type = STATUSTYPE_TABLE;
921 else
922 type = STATUSTYPE_INFO;
924 /* Get all the target info */
925 num_targets = dm_table_get_num_targets(table);
926 for (i = 0; i < num_targets; i++) {
927 struct dm_target *ti = dm_table_get_target(table, i);
929 remaining = len - (outptr - outbuf);
930 if (remaining <= sizeof(struct dm_target_spec)) {
931 param->flags |= DM_BUFFER_FULL_FLAG;
932 break;
935 spec = (struct dm_target_spec *) outptr;
937 spec->status = 0;
938 spec->sector_start = ti->begin;
939 spec->length = ti->len;
940 strncpy(spec->target_type, ti->type->name,
941 sizeof(spec->target_type));
943 outptr += sizeof(struct dm_target_spec);
944 remaining = len - (outptr - outbuf);
945 if (remaining <= 0) {
946 param->flags |= DM_BUFFER_FULL_FLAG;
947 break;
950 /* Get the status/table string from the target driver */
951 if (ti->type->status) {
952 if (ti->type->status(ti, type, outptr, remaining)) {
953 param->flags |= DM_BUFFER_FULL_FLAG;
954 break;
956 } else
957 outptr[0] = '\0';
959 outptr += strlen(outptr) + 1;
960 used = param->data_start + (outptr - outbuf);
962 outptr = align_ptr(outptr);
963 spec->next = outptr - outbuf;
966 if (used)
967 param->data_size = used;
969 param->target_count = num_targets;
973 * Wait for a device to report an event
975 static int dev_wait(struct dm_ioctl *param, size_t param_size)
977 int r;
978 struct mapped_device *md;
979 struct dm_table *table;
981 md = find_device(param);
982 if (!md)
983 return -ENXIO;
986 * Wait for a notification event
988 if (dm_wait_event(md, param->event_nr)) {
989 r = -ERESTARTSYS;
990 goto out;
994 * The userland program is going to want to know what
995 * changed to trigger the event, so we may as well tell
996 * him and save an ioctl.
998 r = __dev_status(md, param);
999 if (r)
1000 goto out;
1002 table = dm_get_table(md);
1003 if (table) {
1004 retrieve_status(table, param, param_size);
1005 dm_table_put(table);
1008 out:
1009 dm_put(md);
1010 return r;
1013 static inline fmode_t get_mode(struct dm_ioctl *param)
1015 fmode_t mode = FMODE_READ | FMODE_WRITE;
1017 if (param->flags & DM_READONLY_FLAG)
1018 mode = FMODE_READ;
1020 return mode;
1023 static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
1024 struct dm_target_spec **spec, char **target_params)
1026 *spec = (struct dm_target_spec *) ((unsigned char *) last + next);
1027 *target_params = (char *) (*spec + 1);
1029 if (*spec < (last + 1))
1030 return -EINVAL;
1032 return invalid_str(*target_params, end);
1035 static int populate_table(struct dm_table *table,
1036 struct dm_ioctl *param, size_t param_size)
1038 int r;
1039 unsigned int i = 0;
1040 struct dm_target_spec *spec = (struct dm_target_spec *) param;
1041 uint32_t next = param->data_start;
1042 void *end = (void *) param + param_size;
1043 char *target_params;
1045 if (!param->target_count) {
1046 DMWARN("populate_table: no targets specified");
1047 return -EINVAL;
1050 for (i = 0; i < param->target_count; i++) {
1052 r = next_target(spec, next, end, &spec, &target_params);
1053 if (r) {
1054 DMWARN("unable to find target");
1055 return r;
1058 r = dm_table_add_target(table, spec->target_type,
1059 (sector_t) spec->sector_start,
1060 (sector_t) spec->length,
1061 target_params);
1062 if (r) {
1063 DMWARN("error adding target to table");
1064 return r;
1067 next = spec->next;
1070 r = dm_table_set_type(table);
1071 if (r) {
1072 DMWARN("unable to set table type");
1073 return r;
1076 return dm_table_complete(table);
1079 static int table_prealloc_integrity(struct dm_table *t,
1080 struct mapped_device *md)
1082 struct list_head *devices = dm_table_get_devices(t);
1083 struct dm_dev_internal *dd;
1085 list_for_each_entry(dd, devices, list)
1086 if (bdev_get_integrity(dd->dm_dev.bdev))
1087 return blk_integrity_register(dm_disk(md), NULL);
1089 return 0;
1092 static int table_load(struct dm_ioctl *param, size_t param_size)
1094 int r;
1095 struct hash_cell *hc;
1096 struct dm_table *t;
1097 struct mapped_device *md;
1099 md = find_device(param);
1100 if (!md)
1101 return -ENXIO;
1103 r = dm_table_create(&t, get_mode(param), param->target_count, md);
1104 if (r)
1105 goto out;
1107 r = populate_table(t, param, param_size);
1108 if (r) {
1109 dm_table_destroy(t);
1110 goto out;
1113 r = table_prealloc_integrity(t, md);
1114 if (r) {
1115 DMERR("%s: could not register integrity profile.",
1116 dm_device_name(md));
1117 dm_table_destroy(t);
1118 goto out;
1121 r = dm_table_alloc_md_mempools(t);
1122 if (r) {
1123 DMWARN("unable to allocate mempools for this table");
1124 dm_table_destroy(t);
1125 goto out;
1128 down_write(&_hash_lock);
1129 hc = dm_get_mdptr(md);
1130 if (!hc || hc->md != md) {
1131 DMWARN("device has been removed from the dev hash table.");
1132 dm_table_destroy(t);
1133 up_write(&_hash_lock);
1134 r = -ENXIO;
1135 goto out;
1138 if (hc->new_map)
1139 dm_table_destroy(hc->new_map);
1140 hc->new_map = t;
1141 up_write(&_hash_lock);
1143 param->flags |= DM_INACTIVE_PRESENT_FLAG;
1144 r = __dev_status(md, param);
1146 out:
1147 dm_put(md);
1149 return r;
1152 static int table_clear(struct dm_ioctl *param, size_t param_size)
1154 int r;
1155 struct hash_cell *hc;
1156 struct mapped_device *md;
1158 down_write(&_hash_lock);
1160 hc = __find_device_hash_cell(param);
1161 if (!hc) {
1162 DMWARN("device doesn't appear to be in the dev hash table.");
1163 up_write(&_hash_lock);
1164 return -ENXIO;
1167 if (hc->new_map) {
1168 dm_table_destroy(hc->new_map);
1169 hc->new_map = NULL;
1172 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1174 r = __dev_status(hc->md, param);
1175 md = hc->md;
1176 up_write(&_hash_lock);
1177 dm_put(md);
1178 return r;
1182 * Retrieves a list of devices used by a particular dm device.
1184 static void retrieve_deps(struct dm_table *table,
1185 struct dm_ioctl *param, size_t param_size)
1187 unsigned int count = 0;
1188 struct list_head *tmp;
1189 size_t len, needed;
1190 struct dm_dev_internal *dd;
1191 struct dm_target_deps *deps;
1193 deps = get_result_buffer(param, param_size, &len);
1196 * Count the devices.
1198 list_for_each (tmp, dm_table_get_devices(table))
1199 count++;
1202 * Check we have enough space.
1204 needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
1205 if (len < needed) {
1206 param->flags |= DM_BUFFER_FULL_FLAG;
1207 return;
1211 * Fill in the devices.
1213 deps->count = count;
1214 count = 0;
1215 list_for_each_entry (dd, dm_table_get_devices(table), list)
1216 deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
1218 param->data_size = param->data_start + needed;
1221 static int table_deps(struct dm_ioctl *param, size_t param_size)
1223 int r = 0;
1224 struct mapped_device *md;
1225 struct dm_table *table;
1227 md = find_device(param);
1228 if (!md)
1229 return -ENXIO;
1231 r = __dev_status(md, param);
1232 if (r)
1233 goto out;
1235 table = dm_get_table(md);
1236 if (table) {
1237 retrieve_deps(table, param, param_size);
1238 dm_table_put(table);
1241 out:
1242 dm_put(md);
1243 return r;
1247 * Return the status of a device as a text string for each
1248 * target.
1250 static int table_status(struct dm_ioctl *param, size_t param_size)
1252 int r;
1253 struct mapped_device *md;
1254 struct dm_table *table;
1256 md = find_device(param);
1257 if (!md)
1258 return -ENXIO;
1260 r = __dev_status(md, param);
1261 if (r)
1262 goto out;
1264 table = dm_get_table(md);
1265 if (table) {
1266 retrieve_status(table, param, param_size);
1267 dm_table_put(table);
1270 out:
1271 dm_put(md);
1272 return r;
1276 * Pass a message to the target that's at the supplied device offset.
1278 static int target_message(struct dm_ioctl *param, size_t param_size)
1280 int r, argc;
1281 char **argv;
1282 struct mapped_device *md;
1283 struct dm_table *table;
1284 struct dm_target *ti;
1285 struct dm_target_msg *tmsg = (void *) param + param->data_start;
1287 md = find_device(param);
1288 if (!md)
1289 return -ENXIO;
1291 r = __dev_status(md, param);
1292 if (r)
1293 goto out;
1295 if (tmsg < (struct dm_target_msg *) param->data ||
1296 invalid_str(tmsg->message, (void *) param + param_size)) {
1297 DMWARN("Invalid target message parameters.");
1298 r = -EINVAL;
1299 goto out;
1302 r = dm_split_args(&argc, &argv, tmsg->message);
1303 if (r) {
1304 DMWARN("Failed to split target message parameters");
1305 goto out;
1308 table = dm_get_table(md);
1309 if (!table)
1310 goto out_argv;
1312 ti = dm_table_find_target(table, tmsg->sector);
1313 if (!dm_target_is_valid(ti)) {
1314 DMWARN("Target message sector outside device.");
1315 r = -EINVAL;
1316 } else if (ti->type->message)
1317 r = ti->type->message(ti, argc, argv);
1318 else {
1319 DMWARN("Target type does not support messages");
1320 r = -EINVAL;
1323 dm_table_put(table);
1324 out_argv:
1325 kfree(argv);
1326 out:
1327 param->data_size = 0;
1328 dm_put(md);
1329 return r;
1332 /*-----------------------------------------------------------------
1333 * Implementation of open/close/ioctl on the special char
1334 * device.
1335 *---------------------------------------------------------------*/
1336 static ioctl_fn lookup_ioctl(unsigned int cmd)
1338 static struct {
1339 int cmd;
1340 ioctl_fn fn;
1341 } _ioctls[] = {
1342 {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
1343 {DM_REMOVE_ALL_CMD, remove_all},
1344 {DM_LIST_DEVICES_CMD, list_devices},
1346 {DM_DEV_CREATE_CMD, dev_create},
1347 {DM_DEV_REMOVE_CMD, dev_remove},
1348 {DM_DEV_RENAME_CMD, dev_rename},
1349 {DM_DEV_SUSPEND_CMD, dev_suspend},
1350 {DM_DEV_STATUS_CMD, dev_status},
1351 {DM_DEV_WAIT_CMD, dev_wait},
1353 {DM_TABLE_LOAD_CMD, table_load},
1354 {DM_TABLE_CLEAR_CMD, table_clear},
1355 {DM_TABLE_DEPS_CMD, table_deps},
1356 {DM_TABLE_STATUS_CMD, table_status},
1358 {DM_LIST_VERSIONS_CMD, list_versions},
1360 {DM_TARGET_MSG_CMD, target_message},
1361 {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
1364 return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
1368 * As well as checking the version compatibility this always
1369 * copies the kernel interface version out.
1371 static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
1373 uint32_t version[3];
1374 int r = 0;
1376 if (copy_from_user(version, user->version, sizeof(version)))
1377 return -EFAULT;
1379 if ((DM_VERSION_MAJOR != version[0]) ||
1380 (DM_VERSION_MINOR < version[1])) {
1381 DMWARN("ioctl interface mismatch: "
1382 "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
1383 DM_VERSION_MAJOR, DM_VERSION_MINOR,
1384 DM_VERSION_PATCHLEVEL,
1385 version[0], version[1], version[2], cmd);
1386 r = -EINVAL;
1390 * Fill in the kernel version.
1392 version[0] = DM_VERSION_MAJOR;
1393 version[1] = DM_VERSION_MINOR;
1394 version[2] = DM_VERSION_PATCHLEVEL;
1395 if (copy_to_user(user->version, version, sizeof(version)))
1396 return -EFAULT;
1398 return r;
1401 static void free_params(struct dm_ioctl *param)
1403 vfree(param);
1406 static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
1408 struct dm_ioctl tmp, *dmi;
1410 if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
1411 return -EFAULT;
1413 if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
1414 return -EINVAL;
1416 dmi = vmalloc(tmp.data_size);
1417 if (!dmi)
1418 return -ENOMEM;
1420 if (copy_from_user(dmi, user, tmp.data_size)) {
1421 vfree(dmi);
1422 return -EFAULT;
1425 *param = dmi;
1426 return 0;
1429 static int validate_params(uint cmd, struct dm_ioctl *param)
1431 /* Always clear this flag */
1432 param->flags &= ~DM_BUFFER_FULL_FLAG;
1434 /* Ignores parameters */
1435 if (cmd == DM_REMOVE_ALL_CMD ||
1436 cmd == DM_LIST_DEVICES_CMD ||
1437 cmd == DM_LIST_VERSIONS_CMD)
1438 return 0;
1440 if ((cmd == DM_DEV_CREATE_CMD)) {
1441 if (!*param->name) {
1442 DMWARN("name not supplied when creating device");
1443 return -EINVAL;
1445 } else if ((*param->uuid && *param->name)) {
1446 DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
1447 return -EINVAL;
1450 /* Ensure strings are terminated */
1451 param->name[DM_NAME_LEN - 1] = '\0';
1452 param->uuid[DM_UUID_LEN - 1] = '\0';
1454 return 0;
1457 static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
1459 int r = 0;
1460 unsigned int cmd;
1461 struct dm_ioctl *uninitialized_var(param);
1462 ioctl_fn fn = NULL;
1463 size_t param_size;
1465 /* only root can play with this */
1466 if (!capable(CAP_SYS_ADMIN))
1467 return -EACCES;
1469 if (_IOC_TYPE(command) != DM_IOCTL)
1470 return -ENOTTY;
1472 cmd = _IOC_NR(command);
1475 * Check the interface version passed in. This also
1476 * writes out the kernel's interface version.
1478 r = check_version(cmd, user);
1479 if (r)
1480 return r;
1483 * Nothing more to do for the version command.
1485 if (cmd == DM_VERSION_CMD)
1486 return 0;
1488 fn = lookup_ioctl(cmd);
1489 if (!fn) {
1490 DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
1491 return -ENOTTY;
1495 * Trying to avoid low memory issues when a device is
1496 * suspended.
1498 current->flags |= PF_MEMALLOC;
1501 * Copy the parameters into kernel space.
1503 r = copy_params(user, &param);
1505 current->flags &= ~PF_MEMALLOC;
1507 if (r)
1508 return r;
1510 r = validate_params(cmd, param);
1511 if (r)
1512 goto out;
1514 param_size = param->data_size;
1515 param->data_size = sizeof(*param);
1516 r = fn(param, param_size);
1519 * Copy the results back to userland.
1521 if (!r && copy_to_user(user, param, param->data_size))
1522 r = -EFAULT;
1524 out:
1525 free_params(param);
1526 return r;
1529 static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
1531 return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u);
1534 #ifdef CONFIG_COMPAT
1535 static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
1537 return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u));
1539 #else
1540 #define dm_compat_ctl_ioctl NULL
1541 #endif
1543 static const struct file_operations _ctl_fops = {
1544 .unlocked_ioctl = dm_ctl_ioctl,
1545 .compat_ioctl = dm_compat_ctl_ioctl,
1546 .owner = THIS_MODULE,
1549 static struct miscdevice _dm_misc = {
1550 .minor = MISC_DYNAMIC_MINOR,
1551 .name = DM_NAME,
1552 .nodename = "mapper/control",
1553 .fops = &_ctl_fops
1557 * Create misc character device and link to DM_DIR/control.
1559 int __init dm_interface_init(void)
1561 int r;
1563 r = dm_hash_init();
1564 if (r)
1565 return r;
1567 r = misc_register(&_dm_misc);
1568 if (r) {
1569 DMERR("misc_register failed for control device");
1570 dm_hash_exit();
1571 return r;
1574 DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
1575 DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
1576 DM_DRIVER_EMAIL);
1577 return 0;
1580 void dm_interface_exit(void)
1582 if (misc_deregister(&_dm_misc) < 0)
1583 DMERR("misc_deregister failed for control device");
1585 dm_hash_exit();
1589 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
1590 * @md: Pointer to mapped_device
1591 * @name: Buffer (size DM_NAME_LEN) for name
1592 * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined
1594 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
1596 int r = 0;
1597 struct hash_cell *hc;
1599 if (!md)
1600 return -ENXIO;
1602 mutex_lock(&dm_hash_cells_mutex);
1603 hc = dm_get_mdptr(md);
1604 if (!hc || hc->md != md) {
1605 r = -ENXIO;
1606 goto out;
1609 if (name)
1610 strcpy(name, hc->name);
1611 if (uuid)
1612 strcpy(uuid, hc->uuid ? : "");
1614 out:
1615 mutex_unlock(&dm_hash_cells_mutex);
1617 return r;