Sync usage with man page.
[netbsd-mini2440.git] / external / gpl2 / lvm2 / dist / libdm / ioctl / libdm-iface.c
blob44fc1d180b8a23a24f9e8c65f3d8d0785e04cc6f
1 /* $NetBSD$ */
3 /*
4 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 * This file is part of the device-mapper userspace tools.
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include "dmlib.h"
19 #include "libdm-targets.h"
20 #include "libdm-common.h"
22 #ifdef DM_COMPAT
23 # include "libdm-compat.h"
24 #endif
26 #include <fcntl.h>
27 #include <dirent.h>
28 #include <sys/ioctl.h>
29 #include <sys/utsname.h>
30 #include <limits.h>
32 #ifdef linux
33 # include "kdev_t.h"
34 # include <linux/limits.h>
35 #else
36 # define MAJOR(x) major((x))
37 # define MINOR(x) minor((x))
38 # define MKDEV(x,y) makedev((x),(y))
39 #endif
41 #include "dm-ioctl.h"
44 * Ensure build compatibility.
45 * The hard-coded versions here are the highest present
46 * in the _cmd_data arrays.
49 #if !((DM_VERSION_MAJOR == 1 && DM_VERSION_MINOR >= 0) || \
50 (DM_VERSION_MAJOR == 4 && DM_VERSION_MINOR >= 0))
51 #error The version of dm-ioctl.h included is incompatible.
52 #endif
54 /* FIXME This should be exported in device-mapper.h */
55 #define DM_NAME "device-mapper"
57 #define PROC_MISC "/proc/misc"
58 #define PROC_DEVICES "/proc/devices"
59 #define MISC_NAME "misc"
61 #define NUMBER_OF_MAJORS 4096
63 /* dm major version no for running kernel */
64 static unsigned _dm_version = DM_VERSION_MAJOR;
65 static unsigned _dm_version_minor = 0;
66 static unsigned _dm_version_patchlevel = 0;
67 static int _log_suppress = 0;
70 * If the kernel dm driver only supports one major number
71 * we store it in _dm_device_major. Otherwise we indicate
72 * which major numbers have been claimed by device-mapper
73 * in _dm_bitset.
75 static unsigned _dm_multiple_major_support = 1;
76 static dm_bitset_t _dm_bitset = NULL;
77 static uint32_t _dm_device_major = 0;
79 static int _control_fd = -1;
80 static int _version_checked = 0;
81 static int _version_ok = 1;
82 static unsigned _ioctl_buffer_double_factor = 0;
86 * Support both old and new major numbers to ease the transition.
87 * Clumsy, but only temporary.
89 #if DM_VERSION_MAJOR == 4 && defined(DM_COMPAT)
90 const int _dm_compat = 1;
91 #else
92 const int _dm_compat = 0;
93 #endif
96 /* *INDENT-OFF* */
97 static struct cmd_data _cmd_data_v4[] = {
98 {"create", DM_DEV_CREATE, {4, 0, 0}},
99 {"reload", DM_TABLE_LOAD, {4, 0, 0}},
100 {"remove", DM_DEV_REMOVE, {4, 0, 0}},
101 {"remove_all", DM_REMOVE_ALL, {4, 0, 0}},
102 {"suspend", DM_DEV_SUSPEND, {4, 0, 0}},
103 {"resume", DM_DEV_SUSPEND, {4, 0, 0}},
104 {"info", DM_DEV_STATUS, {4, 0, 0}},
105 {"deps", DM_TABLE_DEPS, {4, 0, 0}},
106 {"rename", DM_DEV_RENAME, {4, 0, 0}},
107 {"version", DM_VERSION, {4, 0, 0}},
108 {"status", DM_TABLE_STATUS, {4, 0, 0}},
109 {"table", DM_TABLE_STATUS, {4, 0, 0}},
110 {"waitevent", DM_DEV_WAIT, {4, 0, 0}},
111 {"names", DM_LIST_DEVICES, {4, 0, 0}},
112 {"clear", DM_TABLE_CLEAR, {4, 0, 0}},
113 {"mknodes", DM_DEV_STATUS, {4, 0, 0}},
114 #ifdef DM_LIST_VERSIONS
115 {"versions", DM_LIST_VERSIONS, {4, 1, 0}},
116 #endif
117 #ifdef DM_TARGET_MSG
118 {"message", DM_TARGET_MSG, {4, 2, 0}},
119 #endif
120 #ifdef DM_DEV_SET_GEOMETRY
121 {"setgeometry", DM_DEV_SET_GEOMETRY, {4, 6, 0}},
122 #endif
124 /* *INDENT-ON* */
126 #define ALIGNMENT_V1 sizeof(int)
127 #define ALIGNMENT 8
129 /* FIXME Rejig library to record & use errno instead */
130 #ifndef DM_EXISTS_FLAG
131 # define DM_EXISTS_FLAG 0x00000004
132 #endif
134 static void *_align(void *ptr, unsigned int a)
136 register unsigned long agn = --a;
138 return (void *) (((unsigned long) ptr + agn) & ~agn);
141 #ifdef DM_IOCTLS
143 * Set number to NULL to populate _dm_bitset - otherwise first
144 * match is returned.
146 static int _get_proc_number(const char *file, const char *name,
147 uint32_t *number)
149 FILE *fl;
150 char nm[256];
151 int c;
152 uint32_t num;
154 if (!(fl = fopen(file, "r"))) {
155 log_sys_error("fopen", file);
156 return 0;
159 while (!feof(fl)) {
160 if (fscanf(fl, "%d %255s\n", &num, &nm[0]) == 2) {
161 if (!strcmp(name, nm)) {
162 if (number) {
163 *number = num;
164 if (fclose(fl))
165 log_sys_error("fclose", file);
166 return 1;
168 dm_bit_set(_dm_bitset, num);
170 } else do {
171 c = fgetc(fl);
172 } while (c != EOF && c != '\n');
174 if (fclose(fl))
175 log_sys_error("fclose", file);
177 if (number) {
178 log_error("%s: No entry for %s found", file, name);
179 return 0;
182 return 1;
185 static int _control_device_number(uint32_t *major, uint32_t *minor)
187 if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major) ||
188 !_get_proc_number(PROC_MISC, DM_NAME, minor)) {
189 *major = 0;
190 return 0;
193 return 1;
197 * Returns 1 if exists; 0 if it doesn't; -1 if it's wrong
199 static int _control_exists(const char *control, uint32_t major, uint32_t minor)
201 struct stat buf;
203 if (stat(control, &buf) < 0) {
204 if (errno != ENOENT)
205 log_sys_error("stat", control);
206 return 0;
209 if (!S_ISCHR(buf.st_mode)) {
210 log_verbose("%s: Wrong inode type", control);
211 if (!unlink(control))
212 return 0;
213 log_sys_error("unlink", control);
214 return -1;
217 if (major && buf.st_rdev != MKDEV(major, minor)) {
218 log_verbose("%s: Wrong device number: (%u, %u) instead of "
219 "(%u, %u)", control,
220 MAJOR(buf.st_mode), MINOR(buf.st_mode),
221 major, minor);
222 if (!unlink(control))
223 return 0;
224 log_sys_error("unlink", control);
225 return -1;
228 return 1;
231 static int _create_control(const char *control, uint32_t major, uint32_t minor)
233 int ret;
234 mode_t old_umask;
236 if (!major)
237 return 0;
239 old_umask = umask(DM_DEV_DIR_UMASK);
240 ret = dm_create_dir(dm_dir());
241 umask(old_umask);
243 if (!ret)
244 return 0;
246 log_verbose("Creating device %s (%u, %u)", control, major, minor);
248 if (mknod(control, S_IFCHR | S_IRUSR | S_IWUSR,
249 MKDEV(major, minor)) < 0) {
250 log_sys_error("mknod", control);
251 return 0;
254 #ifdef HAVE_SELINUX
255 if (!dm_set_selinux_context(control, S_IFCHR)) {
256 stack;
257 return 0;
259 #endif
261 return 1;
263 #endif
266 * FIXME Update bitset in long-running process if dm claims new major numbers.
268 static int _create_dm_bitset(void)
270 #ifdef DM_IOCTLS
271 struct utsname uts;
273 if (_dm_bitset || _dm_device_major)
274 return 1;
276 if (uname(&uts))
277 return 0;
280 * 2.6 kernels are limited to one major number.
281 * Assume 2.4 kernels are patched not to.
282 * FIXME Check _dm_version and _dm_version_minor if 2.6 changes this.
284 if (!strncmp(uts.release, "2.6.", 4))
285 _dm_multiple_major_support = 0;
287 if (!_dm_multiple_major_support) {
288 if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major))
289 return 0;
290 return 1;
293 /* Multiple major numbers supported */
294 if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS)))
295 return 0;
297 if (!_get_proc_number(PROC_DEVICES, DM_NAME, NULL)) {
298 dm_bitset_destroy(_dm_bitset);
299 _dm_bitset = NULL;
300 return 0;
303 return 1;
304 #else
305 return 0;
306 #endif
309 int dm_is_dm_major(uint32_t major)
311 if (!_create_dm_bitset())
312 return 0;
314 if (_dm_multiple_major_support)
315 return dm_bit(_dm_bitset, major) ? 1 : 0;
316 else
317 return (major == _dm_device_major) ? 1 : 0;
320 static int _open_control(void)
322 #ifdef DM_IOCTLS
323 char control[PATH_MAX];
324 uint32_t major = 0, minor;
326 if (_control_fd != -1)
327 return 1;
329 snprintf(control, sizeof(control), "%s/control", dm_dir());
331 if (!_control_device_number(&major, &minor))
332 log_error("Is device-mapper driver missing from kernel?");
334 if (!_control_exists(control, major, minor) &&
335 !_create_control(control, major, minor))
336 goto error;
338 if ((_control_fd = open(control, O_RDWR)) < 0) {
339 log_sys_error("open", control);
340 goto error;
343 if (!_create_dm_bitset()) {
344 log_error("Failed to set up list of device-mapper major numbers");
345 return 0;
348 return 1;
350 error:
351 log_error("Failure to communicate with kernel device-mapper driver.");
352 return 0;
353 #else
354 return 1;
355 #endif
358 void dm_task_destroy(struct dm_task *dmt)
360 struct target *t, *n;
362 for (t = dmt->head; t; t = n) {
363 n = t->next;
364 dm_free(t->params);
365 dm_free(t->type);
366 dm_free(t);
369 if (dmt->dev_name)
370 dm_free(dmt->dev_name);
372 if (dmt->newname)
373 dm_free(dmt->newname);
375 if (dmt->message)
376 dm_free(dmt->message);
378 if (dmt->dmi.v4)
379 dm_free(dmt->dmi.v4);
381 if (dmt->uuid)
382 dm_free(dmt->uuid);
384 dm_free(dmt);
388 * Protocol Version 1 compatibility functions.
391 #ifdef DM_COMPAT
393 static int _dm_task_get_driver_version_v1(struct dm_task *dmt, char *version,
394 size_t size)
396 unsigned int *v;
398 if (!dmt->dmi.v1) {
399 version[0] = '\0';
400 return 0;
403 v = dmt->dmi.v1->version;
404 snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]);
405 return 1;
408 /* Unmarshall the target info returned from a status call */
409 static int _unmarshal_status_v1(struct dm_task *dmt, struct dm_ioctl_v1 *dmi)
411 char *outbuf = (char *) dmi + dmi->data_start;
412 char *outptr = outbuf;
413 int32_t i;
414 struct dm_target_spec_v1 *spec;
416 for (i = 0; i < dmi->target_count; i++) {
417 spec = (struct dm_target_spec_v1 *) outptr;
419 if (!dm_task_add_target(dmt, spec->sector_start,
420 (uint64_t) spec->length,
421 spec->target_type,
422 outptr + sizeof(*spec))) {
423 return 0;
426 outptr = outbuf + spec->next;
429 return 1;
432 static int _dm_format_dev_v1(char *buf, int bufsize, uint32_t dev_major,
433 uint32_t dev_minor)
435 int r;
437 if (bufsize < 8)
438 return 0;
440 r = snprintf(buf, bufsize, "%03x:%03x", dev_major, dev_minor);
441 if (r < 0 || r > bufsize - 1)
442 return 0;
444 return 1;
447 static int _dm_task_get_info_v1(struct dm_task *dmt, struct dm_info *info)
449 if (!dmt->dmi.v1)
450 return 0;
452 memset(info, 0, sizeof(*info));
454 info->exists = dmt->dmi.v1->flags & DM_EXISTS_FLAG ? 1 : 0;
455 if (!info->exists)
456 return 1;
458 info->suspended = dmt->dmi.v1->flags & DM_SUSPEND_FLAG ? 1 : 0;
459 info->read_only = dmt->dmi.v1->flags & DM_READONLY_FLAG ? 1 : 0;
460 info->target_count = dmt->dmi.v1->target_count;
461 info->open_count = dmt->dmi.v1->open_count;
462 info->event_nr = 0;
463 info->major = MAJOR(dmt->dmi.v1->dev);
464 info->minor = MINOR(dmt->dmi.v1->dev);
465 info->live_table = 1;
466 info->inactive_table = 0;
468 return 1;
471 static const char *_dm_task_get_name_v1(const struct dm_task *dmt)
473 return (dmt->dmi.v1->name);
476 static const char *_dm_task_get_uuid_v1(const struct dm_task *dmt)
478 return (dmt->dmi.v1->uuid);
481 static struct dm_deps *_dm_task_get_deps_v1(struct dm_task *dmt)
483 log_error("deps version 1 no longer supported by libdevmapper");
484 return NULL;
487 static struct dm_names *_dm_task_get_names_v1(struct dm_task *dmt)
489 return (struct dm_names *) (((void *) dmt->dmi.v1) +
490 dmt->dmi.v1->data_start);
493 static void *_add_target_v1(struct target *t, void *out, void *end)
495 void *out_sp = out;
496 struct dm_target_spec_v1 sp;
497 size_t sp_size = sizeof(struct dm_target_spec_v1);
498 int len;
499 const char no_space[] = "Ran out of memory building ioctl parameter";
501 out += sp_size;
502 if (out >= end) {
503 log_error(no_space);
504 return NULL;
507 sp.status = 0;
508 sp.sector_start = t->start;
509 sp.length = t->length;
510 strncpy(sp.target_type, t->type, sizeof(sp.target_type));
512 len = strlen(t->params);
514 if ((out + len + 1) >= end) {
515 log_error(no_space);
517 log_error("t->params= '%s'", t->params);
518 return NULL;
520 strcpy((char *) out, t->params);
521 out += len + 1;
523 /* align next block */
524 out = _align(out, ALIGNMENT_V1);
526 sp.next = out - out_sp;
528 memcpy(out_sp, &sp, sp_size);
530 return out;
533 static struct dm_ioctl_v1 *_flatten_v1(struct dm_task *dmt)
535 const size_t min_size = 16 * 1024;
536 const int (*version)[3];
538 struct dm_ioctl_v1 *dmi;
539 struct target *t;
540 size_t len = sizeof(struct dm_ioctl_v1);
541 void *b, *e;
542 int count = 0;
544 for (t = dmt->head; t; t = t->next) {
545 len += sizeof(struct dm_target_spec_v1);
546 len += strlen(t->params) + 1 + ALIGNMENT_V1;
547 count++;
550 if (count && dmt->newname) {
551 log_error("targets and newname are incompatible");
552 return NULL;
555 if (dmt->newname)
556 len += strlen(dmt->newname) + 1;
559 * Give len a minimum size so that we have space to store
560 * dependencies or status information.
562 if (len < min_size)
563 len = min_size;
565 if (!(dmi = dm_malloc(len)))
566 return NULL;
568 memset(dmi, 0, len);
570 version = &_cmd_data_v1[dmt->type].version;
572 dmi->version[0] = (*version)[0];
573 dmi->version[1] = (*version)[1];
574 dmi->version[2] = (*version)[2];
576 dmi->data_size = len;
577 dmi->data_start = sizeof(struct dm_ioctl_v1);
579 if (dmt->dev_name)
580 strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
582 if (dmt->type == DM_DEVICE_SUSPEND)
583 dmi->flags |= DM_SUSPEND_FLAG;
584 if (dmt->read_only)
585 dmi->flags |= DM_READONLY_FLAG;
587 if (dmt->minor >= 0) {
588 if (dmt->major <= 0) {
589 log_error("Missing major number for persistent device");
590 return NULL;
592 dmi->flags |= DM_PERSISTENT_DEV_FLAG;
593 dmi->dev = MKDEV(dmt->major, dmt->minor);
596 if (dmt->uuid)
597 strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid));
599 dmi->target_count = count;
601 b = (void *) (dmi + 1);
602 e = (void *) ((char *) dmi + len);
604 for (t = dmt->head; t; t = t->next)
605 if (!(b = _add_target_v1(t, b, e)))
606 goto bad;
608 if (dmt->newname)
609 strcpy(b, dmt->newname);
611 return dmi;
613 bad:
614 dm_free(dmi);
615 return NULL;
618 static int _dm_names_v1(struct dm_ioctl_v1 *dmi)
620 const char *dev_dir = dm_dir();
621 int r = 1, len;
622 const char *name;
623 struct dirent *dirent;
624 DIR *d;
625 struct dm_names *names, *old_names = NULL;
626 void *end = (void *) dmi + dmi->data_size;
627 struct stat buf;
628 char path[PATH_MAX];
630 log_warn("WARNING: Device list may be incomplete with interface "
631 "version 1.");
632 log_warn("Please upgrade your kernel device-mapper driver.");
634 if (!(d = opendir(dev_dir))) {
635 log_sys_error("opendir", dev_dir);
636 return 0;
639 names = (struct dm_names *) ((void *) dmi + dmi->data_start);
641 names->dev = 0; /* Flags no data */
643 while ((dirent = readdir(d))) {
644 name = dirent->d_name;
646 if (name[0] == '.' || !strcmp(name, "control"))
647 continue;
649 if (old_names)
650 old_names->next = (uint32_t) ((void *) names -
651 (void *) old_names);
652 snprintf(path, sizeof(path), "%s/%s", dev_dir, name);
653 if (stat(path, &buf)) {
654 log_sys_error("stat", path);
655 continue;
657 if (!S_ISBLK(buf.st_mode))
658 continue;
659 names->dev = (uint64_t) buf.st_rdev;
660 names->next = 0;
661 len = strlen(name);
662 if (((void *) (names + 1) + len + 1) >= end) {
663 log_error("Insufficient buffer space for device list");
664 r = 0;
665 break;
668 strcpy(names->name, name);
670 old_names = names;
671 names = _align((void *) ++names + len + 1, ALIGNMENT);
674 if (closedir(d))
675 log_sys_error("closedir", dev_dir);
677 return r;
680 static int _dm_task_run_v1(struct dm_task *dmt)
682 struct dm_ioctl_v1 *dmi;
683 unsigned int command;
685 dmi = _flatten_v1(dmt);
686 if (!dmi) {
687 log_error("Couldn't create ioctl argument.");
688 return 0;
691 if (!_open_control())
692 return 0;
694 if ((unsigned) dmt->type >=
695 (sizeof(_cmd_data_v1) / sizeof(*_cmd_data_v1))) {
696 log_error("Internal error: unknown device-mapper task %d",
697 dmt->type);
698 goto bad;
701 command = _cmd_data_v1[dmt->type].cmd;
703 if (dmt->type == DM_DEVICE_TABLE)
704 dmi->flags |= DM_STATUS_TABLE_FLAG;
706 log_debug("dm %s %s %s%s%s [%u]", _cmd_data_v1[dmt->type].name,
707 dmi->name, dmi->uuid, dmt->newname ? " " : "",
708 dmt->newname ? dmt->newname : "",
709 dmi->data_size);
710 if (dmt->type == DM_DEVICE_LIST) {
711 if (!_dm_names_v1(dmi))
712 goto bad;
714 #ifdef DM_IOCTLS
715 else if (ioctl(_control_fd, command, dmi) < 0) {
716 if (_log_suppress)
717 log_verbose("device-mapper: %s ioctl failed: %s",
718 _cmd_data_v1[dmt->type].name,
719 strerror(errno));
720 else
721 log_error("device-mapper: %s ioctl failed: %s",
722 _cmd_data_v1[dmt->type].name,
723 strerror(errno));
724 goto bad;
726 #else /* Userspace alternative for testing */
727 #endif
729 if (dmi->flags & DM_BUFFER_FULL_FLAG)
730 /* FIXME Increase buffer size and retry operation (if query) */
731 log_error("WARNING: libdevmapper buffer too small for data");
733 switch (dmt->type) {
734 case DM_DEVICE_CREATE:
735 add_dev_node(dmt->dev_name, MAJOR(dmi->dev), MINOR(dmi->dev),
736 dmt->uid, dmt->gid, dmt->mode, 0);
737 break;
739 case DM_DEVICE_REMOVE:
740 rm_dev_node(dmt->dev_name, 0);
741 break;
743 case DM_DEVICE_RENAME:
744 rename_dev_node(dmt->dev_name, dmt->newname, 0);
745 break;
747 case DM_DEVICE_MKNODES:
748 if (dmi->flags & DM_EXISTS_FLAG)
749 add_dev_node(dmt->dev_name, MAJOR(dmi->dev),
750 MINOR(dmi->dev), dmt->uid,
751 dmt->gid, dmt->mode, 0);
752 else
753 rm_dev_node(dmt->dev_name, 0);
754 break;
756 case DM_DEVICE_STATUS:
757 case DM_DEVICE_TABLE:
758 if (!_unmarshal_status_v1(dmt, dmi))
759 goto bad;
760 break;
762 case DM_DEVICE_SUSPEND:
763 case DM_DEVICE_RESUME:
764 dmt->type = DM_DEVICE_INFO;
765 if (!dm_task_run(dmt))
766 goto bad;
767 dm_free(dmi); /* We'll use what info returned */
768 return 1;
771 dmt->dmi.v1 = dmi;
772 return 1;
774 bad:
775 dm_free(dmi);
776 return 0;
779 #endif
782 * Protocol Version 4 functions.
785 int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size)
787 unsigned *v;
789 #ifdef DM_COMPAT
790 if (_dm_version == 1)
791 return _dm_task_get_driver_version_v1(dmt, version, size);
792 #endif
794 if (!dmt->dmi.v4) {
795 version[0] = '\0';
796 return 0;
799 v = dmt->dmi.v4->version;
800 snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]);
801 _dm_version_minor = v[1];
802 _dm_version_patchlevel = v[2];
804 return 1;
807 static int _check_version(char *version, size_t size, int log_suppress)
809 struct dm_task *task;
810 int r;
812 if (!(task = dm_task_create(DM_DEVICE_VERSION))) {
813 log_error("Failed to get device-mapper version");
814 version[0] = '\0';
815 return 0;
818 if (log_suppress)
819 _log_suppress = 1;
821 r = dm_task_run(task);
822 dm_task_get_driver_version(task, version, size);
823 dm_task_destroy(task);
824 _log_suppress = 0;
826 return r;
830 * Find out device-mapper's major version number the first time
831 * this is called and whether or not we support it.
833 int dm_check_version(void)
835 char libversion[64], dmversion[64];
836 const char *compat = "";
838 if (_version_checked)
839 return _version_ok;
841 _version_checked = 1;
843 if (_check_version(dmversion, sizeof(dmversion), _dm_compat))
844 return 1;
846 if (!_dm_compat)
847 goto bad;
849 log_verbose("device-mapper ioctl protocol version %u failed. "
850 "Trying protocol version 1.", _dm_version);
851 _dm_version = 1;
852 if (_check_version(dmversion, sizeof(dmversion), 0)) {
853 log_verbose("Using device-mapper ioctl protocol version 1");
854 return 1;
857 compat = "(compat)";
859 dm_get_library_version(libversion, sizeof(libversion));
861 log_error("Incompatible libdevmapper %s%s and kernel driver %s",
862 libversion, compat, dmversion);
864 bad:
865 _version_ok = 0;
866 return 0;
869 int dm_cookie_supported(void)
871 return (dm_check_version() &&
872 _dm_version >= 4 &&
873 _dm_version_minor >= 15);
876 void *dm_get_next_target(struct dm_task *dmt, void *next,
877 uint64_t *start, uint64_t *length,
878 char **target_type, char **params)
880 struct target *t = (struct target *) next;
882 if (!t)
883 t = dmt->head;
885 if (!t)
886 return NULL;
888 *start = t->start;
889 *length = t->length;
890 *target_type = t->type;
891 *params = t->params;
893 return t->next;
896 /* Unmarshall the target info returned from a status call */
897 static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
899 char *outbuf = (char *) dmi + dmi->data_start;
900 char *outptr = outbuf;
901 uint32_t i;
902 struct dm_target_spec *spec;
904 for (i = 0; i < dmi->target_count; i++) {
905 spec = (struct dm_target_spec *) outptr;
906 if (!dm_task_add_target(dmt, spec->sector_start,
907 spec->length,
908 spec->target_type,
909 outptr + sizeof(*spec))) {
910 return 0;
913 outptr = outbuf + spec->next;
916 return 1;
919 int dm_format_dev(char *buf, int bufsize, uint32_t dev_major,
920 uint32_t dev_minor)
922 int r;
924 #ifdef DM_COMPAT
925 if (_dm_version == 1)
926 return _dm_format_dev_v1(buf, bufsize, dev_major, dev_minor);
927 #endif
929 if (bufsize < 8)
930 return 0;
932 r = snprintf(buf, (size_t) bufsize, "%u:%u", dev_major, dev_minor);
933 if (r < 0 || r > bufsize - 1)
934 return 0;
936 return 1;
939 int dm_task_get_info(struct dm_task *dmt, struct dm_info *info)
941 #ifdef DM_COMPAT
942 if (_dm_version == 1)
943 return _dm_task_get_info_v1(dmt, info);
944 #endif
946 if (!dmt->dmi.v4)
947 return 0;
949 memset(info, 0, sizeof(*info));
951 info->exists = dmt->dmi.v4->flags & DM_EXISTS_FLAG ? 1 : 0;
952 if (!info->exists)
953 return 1;
955 info->suspended = dmt->dmi.v4->flags & DM_SUSPEND_FLAG ? 1 : 0;
956 info->read_only = dmt->dmi.v4->flags & DM_READONLY_FLAG ? 1 : 0;
957 info->live_table = dmt->dmi.v4->flags & DM_ACTIVE_PRESENT_FLAG ? 1 : 0;
958 info->inactive_table = dmt->dmi.v4->flags & DM_INACTIVE_PRESENT_FLAG ?
959 1 : 0;
960 info->target_count = dmt->dmi.v4->target_count;
961 info->open_count = dmt->dmi.v4->open_count;
962 info->event_nr = dmt->dmi.v4->event_nr;
963 info->major = MAJOR(dmt->dmi.v4->dev);
964 info->minor = MINOR(dmt->dmi.v4->dev);
966 return 1;
969 uint32_t dm_task_get_read_ahead(const struct dm_task *dmt, uint32_t *read_ahead)
971 const char *dev_name;
973 *read_ahead = 0;
975 #ifdef DM_COMPAT
976 /* Not supporting this */
977 if (_dm_version == 1)
978 return 1;
979 #endif
981 if (!dmt->dmi.v4 || !(dmt->dmi.v4->flags & DM_EXISTS_FLAG))
982 return 0;
984 if (*dmt->dmi.v4->name)
985 dev_name = dmt->dmi.v4->name;
986 else if (dmt->dev_name)
987 dev_name = dmt->dev_name;
988 else {
989 log_error("Get read ahead request failed: device name unrecorded.");
990 return 0;
993 return get_dev_node_read_ahead(dev_name, read_ahead);
996 const char *dm_task_get_name(const struct dm_task *dmt)
998 #ifdef DM_COMPAT
999 if (_dm_version == 1)
1000 return _dm_task_get_name_v1(dmt);
1001 #endif
1003 return (dmt->dmi.v4->name);
1006 const char *dm_task_get_uuid(const struct dm_task *dmt)
1008 #ifdef DM_COMPAT
1009 if (_dm_version == 1)
1010 return _dm_task_get_uuid_v1(dmt);
1011 #endif
1013 return (dmt->dmi.v4->uuid);
1016 struct dm_deps *dm_task_get_deps(struct dm_task *dmt)
1018 #ifdef DM_COMPAT
1019 if (_dm_version == 1)
1020 return _dm_task_get_deps_v1(dmt);
1021 #endif
1023 return (struct dm_deps *) (((void *) dmt->dmi.v4) +
1024 dmt->dmi.v4->data_start);
1027 struct dm_names *dm_task_get_names(struct dm_task *dmt)
1029 #ifdef DM_COMPAT
1030 if (_dm_version == 1)
1031 return _dm_task_get_names_v1(dmt);
1032 #endif
1034 return (struct dm_names *) (((void *) dmt->dmi.v4) +
1035 dmt->dmi.v4->data_start);
1038 struct dm_versions *dm_task_get_versions(struct dm_task *dmt)
1040 return (struct dm_versions *) (((void *) dmt->dmi.v4) +
1041 dmt->dmi.v4->data_start);
1044 int dm_task_set_ro(struct dm_task *dmt)
1046 dmt->read_only = 1;
1047 return 1;
1050 int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead,
1051 uint32_t read_ahead_flags)
1053 dmt->read_ahead = read_ahead;
1054 dmt->read_ahead_flags = read_ahead_flags;
1056 return 1;
1059 int dm_task_suppress_identical_reload(struct dm_task *dmt)
1061 dmt->suppress_identical_reload = 1;
1062 return 1;
1065 int dm_task_set_newname(struct dm_task *dmt, const char *newname)
1067 if (strchr(newname, '/')) {
1068 log_error("Name \"%s\" invalid. It contains \"/\".", newname);
1069 return 0;
1072 if (strlen(newname) >= DM_NAME_LEN) {
1073 log_error("Name \"%s\" too long", newname);
1074 return 0;
1077 if (!(dmt->newname = dm_strdup(newname))) {
1078 log_error("dm_task_set_newname: strdup(%s) failed", newname);
1079 return 0;
1082 return 1;
1085 int dm_task_set_message(struct dm_task *dmt, const char *message)
1087 if (!(dmt->message = dm_strdup(message))) {
1088 log_error("dm_task_set_message: strdup(%s) failed", message);
1089 return 0;
1092 return 1;
1095 int dm_task_set_sector(struct dm_task *dmt, uint64_t sector)
1097 dmt->sector = sector;
1099 return 1;
1102 int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, const char *sectors, const char *start)
1104 size_t len = strlen(cylinders) + 1 + strlen(heads) + 1 + strlen(sectors) + 1 + strlen(start) + 1;
1106 if (!(dmt->geometry = dm_malloc(len))) {
1107 log_error("dm_task_set_geometry: dm_malloc failed");
1108 return 0;
1111 if (sprintf(dmt->geometry, "%s %s %s %s", cylinders, heads, sectors, start) < 0) {
1112 log_error("dm_task_set_geometry: sprintf failed");
1113 return 0;
1116 return 1;
1119 int dm_task_no_flush(struct dm_task *dmt)
1121 dmt->no_flush = 1;
1123 return 1;
1126 int dm_task_no_open_count(struct dm_task *dmt)
1128 dmt->no_open_count = 1;
1130 return 1;
1133 int dm_task_skip_lockfs(struct dm_task *dmt)
1135 dmt->skip_lockfs = 1;
1137 return 1;
1140 int dm_task_query_inactive_table(struct dm_task *dmt)
1142 dmt->query_inactive_table = 1;
1144 return 1;
1147 int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr)
1149 dmt->event_nr = event_nr;
1151 return 1;
1154 struct target *create_target(uint64_t start, uint64_t len, const char *type,
1155 const char *params)
1157 struct target *t = dm_malloc(sizeof(*t));
1159 if (!t) {
1160 log_error("create_target: malloc(%" PRIsize_t ") failed",
1161 sizeof(*t));
1162 return NULL;
1165 memset(t, 0, sizeof(*t));
1167 if (!(t->params = dm_strdup(params))) {
1168 log_error("create_target: strdup(params) failed");
1169 goto bad;
1172 if (!(t->type = dm_strdup(type))) {
1173 log_error("create_target: strdup(type) failed");
1174 goto bad;
1177 t->start = start;
1178 t->length = len;
1179 return t;
1181 bad:
1182 dm_free(t->params);
1183 dm_free(t->type);
1184 dm_free(t);
1185 return NULL;
1188 static void *_add_target(struct target *t, void *out, void *end)
1190 void *out_sp = out;
1191 struct dm_target_spec sp;
1192 size_t sp_size = sizeof(struct dm_target_spec);
1193 int len;
1194 const char no_space[] = "Ran out of memory building ioctl parameter";
1196 out += sp_size;
1197 if (out >= end) {
1198 log_error(no_space);
1199 return NULL;
1202 sp.status = 0;
1203 sp.sector_start = t->start;
1204 sp.length = t->length;
1205 strncpy(sp.target_type, t->type, sizeof(sp.target_type));
1207 len = strlen(t->params);
1209 if ((out + len + 1) >= end) {
1210 log_error(no_space);
1212 log_error("t->params= '%s'", t->params);
1213 return NULL;
1215 strcpy((char *) out, t->params);
1216 out += len + 1;
1218 /* align next block */
1219 out = _align(out, ALIGNMENT);
1221 sp.next = out - out_sp;
1222 memcpy(out_sp, &sp, sp_size);
1224 return out;
1227 static int _lookup_dev_name(uint64_t dev, char *buf, size_t len)
1229 struct dm_names *names;
1230 unsigned next = 0;
1231 struct dm_task *dmt;
1232 int r = 0;
1234 if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1235 return 0;
1237 if (!dm_task_run(dmt))
1238 goto out;
1240 if (!(names = dm_task_get_names(dmt)))
1241 goto out;
1243 if (!names->dev)
1244 goto out;
1246 do {
1247 names = (void *) names + next;
1248 if (names->dev == dev) {
1249 strncpy(buf, names->name, len);
1250 r = 1;
1251 break;
1253 next = names->next;
1254 } while (next);
1256 out:
1257 dm_task_destroy(dmt);
1258 return r;
1261 static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count)
1263 const size_t min_size = 16 * 1024;
1264 const int (*version)[3];
1266 struct dm_ioctl *dmi;
1267 struct target *t;
1268 struct dm_target_msg *tmsg;
1269 size_t len = sizeof(struct dm_ioctl);
1270 void *b, *e;
1271 int count = 0;
1273 for (t = dmt->head; t; t = t->next) {
1274 len += sizeof(struct dm_target_spec);
1275 len += strlen(t->params) + 1 + ALIGNMENT;
1276 count++;
1279 if (count && (dmt->sector || dmt->message)) {
1280 log_error("targets and message are incompatible");
1281 return NULL;
1284 if (count && dmt->newname) {
1285 log_error("targets and newname are incompatible");
1286 return NULL;
1289 if (count && dmt->geometry) {
1290 log_error("targets and geometry are incompatible");
1291 return NULL;
1294 if (dmt->newname && (dmt->sector || dmt->message)) {
1295 log_error("message and newname are incompatible");
1296 return NULL;
1299 if (dmt->newname && dmt->geometry) {
1300 log_error("geometry and newname are incompatible");
1301 return NULL;
1304 if (dmt->geometry && (dmt->sector || dmt->message)) {
1305 log_error("geometry and message are incompatible");
1306 return NULL;
1309 if (dmt->sector && !dmt->message) {
1310 log_error("message is required with sector");
1311 return NULL;
1314 if (dmt->newname)
1315 len += strlen(dmt->newname) + 1;
1317 if (dmt->message)
1318 len += sizeof(struct dm_target_msg) + strlen(dmt->message) + 1;
1320 if (dmt->geometry)
1321 len += strlen(dmt->geometry) + 1;
1324 * Give len a minimum size so that we have space to store
1325 * dependencies or status information.
1327 if (len < min_size)
1328 len = min_size;
1330 /* Increase buffer size if repeating because buffer was too small */
1331 while (repeat_count--)
1332 len *= 2;
1334 if (!(dmi = dm_malloc(len)))
1335 return NULL;
1337 memset(dmi, 0, len);
1339 version = &_cmd_data_v4[dmt->type].version;
1341 dmi->version[0] = (*version)[0];
1342 dmi->version[1] = (*version)[1];
1343 dmi->version[2] = (*version)[2];
1345 dmi->data_size = len;
1346 dmi->data_start = sizeof(struct dm_ioctl);
1348 if (dmt->minor >= 0) {
1349 if (dmt->major <= 0) {
1350 log_error("Missing major number for persistent device.");
1351 goto bad;
1354 if (!_dm_multiple_major_support && dmt->allow_default_major_fallback &&
1355 dmt->major != _dm_device_major) {
1356 log_verbose("Overriding major number of %" PRIu32
1357 " with %" PRIu32 " for persistent device.",
1358 dmt->major, _dm_device_major);
1359 dmt->major = _dm_device_major;
1362 dmi->flags |= DM_PERSISTENT_DEV_FLAG;
1363 dmi->dev = MKDEV(dmt->major, dmt->minor);
1366 /* Does driver support device number referencing? */
1367 if (_dm_version_minor < 3 && !dmt->dev_name && !dmt->uuid && dmi->dev) {
1368 if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) {
1369 log_error("Unable to find name for device (%" PRIu32
1370 ":%" PRIu32 ")", dmt->major, dmt->minor);
1371 goto bad;
1373 log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s "
1374 "for compatibility with old kernel",
1375 dmt->major, dmt->minor, dmi->name);
1378 /* FIXME Until resume ioctl supplies name, use dev_name for readahead */
1379 if (dmt->dev_name && (dmt->type != DM_DEVICE_RESUME || dmt->minor < 0 ||
1380 dmt->major < 0))
1381 strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
1383 if (dmt->uuid)
1384 strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid));
1386 if (dmt->type == DM_DEVICE_SUSPEND)
1387 dmi->flags |= DM_SUSPEND_FLAG;
1388 if (dmt->no_flush)
1389 dmi->flags |= DM_NOFLUSH_FLAG;
1390 if (dmt->read_only)
1391 dmi->flags |= DM_READONLY_FLAG;
1392 if (dmt->skip_lockfs)
1393 dmi->flags |= DM_SKIP_LOCKFS_FLAG;
1394 if (dmt->query_inactive_table) {
1395 if (_dm_version_minor < 16)
1396 log_warn("WARNING: Inactive table query unsupported "
1397 "by kernel. It will use live table.");
1398 dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG;
1401 dmi->target_count = count;
1402 dmi->event_nr = dmt->event_nr;
1404 b = (void *) (dmi + 1);
1405 e = (void *) ((char *) dmi + len);
1407 for (t = dmt->head; t; t = t->next)
1408 if (!(b = _add_target(t, b, e)))
1409 goto bad;
1411 if (dmt->newname)
1412 strcpy(b, dmt->newname);
1414 if (dmt->message) {
1415 tmsg = (struct dm_target_msg *) b;
1416 tmsg->sector = dmt->sector;
1417 strcpy(tmsg->message, dmt->message);
1420 if (dmt->geometry)
1421 strcpy(b, dmt->geometry);
1423 return dmi;
1425 bad:
1426 dm_free(dmi);
1427 return NULL;
1430 static int _process_mapper_dir(struct dm_task *dmt)
1432 struct dirent *dirent;
1433 DIR *d;
1434 const char *dir;
1435 int r = 1;
1437 dir = dm_dir();
1438 if (!(d = opendir(dir))) {
1439 log_sys_error("opendir", dir);
1440 return 0;
1443 while ((dirent = readdir(d))) {
1444 if (!strcmp(dirent->d_name, ".") ||
1445 !strcmp(dirent->d_name, "..") ||
1446 !strcmp(dirent->d_name, "control"))
1447 continue;
1448 dm_task_set_name(dmt, dirent->d_name);
1449 dm_task_run(dmt);
1452 if (closedir(d))
1453 log_sys_error("closedir", dir);
1455 return r;
1458 static int _process_all_v4(struct dm_task *dmt)
1460 struct dm_task *task;
1461 struct dm_names *names;
1462 unsigned next = 0;
1463 int r = 1;
1465 if (!(task = dm_task_create(DM_DEVICE_LIST)))
1466 return 0;
1468 if (!dm_task_run(task)) {
1469 r = 0;
1470 goto out;
1473 if (!(names = dm_task_get_names(task))) {
1474 r = 0;
1475 goto out;
1478 if (!names->dev)
1479 goto out;
1481 do {
1482 names = (void *) names + next;
1483 if (!dm_task_set_name(dmt, names->name)) {
1484 r = 0;
1485 goto out;
1487 if (!dm_task_run(dmt))
1488 r = 0;
1489 next = names->next;
1490 } while (next);
1492 out:
1493 dm_task_destroy(task);
1494 return r;
1497 static int _mknodes_v4(struct dm_task *dmt)
1499 (void) _process_mapper_dir(dmt);
1501 return _process_all_v4(dmt);
1505 * If an operation that uses a cookie fails, decrement the
1506 * semaphore instead of udev.
1508 static int _udev_complete(struct dm_task *dmt)
1510 uint32_t cookie;
1512 if (dmt->cookie_set) {
1513 /* strip flags from the cookie and use cookie magic instead */
1514 cookie = (dmt->event_nr & ~DM_UDEV_FLAGS_MASK) |
1515 (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT);
1516 return dm_udev_complete(cookie);
1519 return 1;
1522 static int _create_and_load_v4(struct dm_task *dmt)
1524 struct dm_task *task;
1525 int r;
1527 /* Use new task struct to create the device */
1528 if (!(task = dm_task_create(DM_DEVICE_CREATE))) {
1529 log_error("Failed to create device-mapper task struct");
1530 _udev_complete(dmt);
1531 return 0;
1534 /* Copy across relevant fields */
1535 if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1536 dm_task_destroy(task);
1537 _udev_complete(dmt);
1538 return 0;
1541 if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1542 dm_task_destroy(task);
1543 _udev_complete(dmt);
1544 return 0;
1547 task->major = dmt->major;
1548 task->minor = dmt->minor;
1549 task->uid = dmt->uid;
1550 task->gid = dmt->gid;
1551 task->mode = dmt->mode;
1552 /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1553 task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1554 task->cookie_set = dmt->cookie_set;
1556 r = dm_task_run(task);
1557 dm_task_destroy(task);
1558 if (!r) {
1559 _udev_complete(dmt);
1560 return 0;
1563 /* Next load the table */
1564 if (!(task = dm_task_create(DM_DEVICE_RELOAD))) {
1565 log_error("Failed to create device-mapper task struct");
1566 _udev_complete(dmt);
1567 return 0;
1570 /* Copy across relevant fields */
1571 if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1572 dm_task_destroy(task);
1573 _udev_complete(dmt);
1574 return 0;
1577 task->read_only = dmt->read_only;
1578 task->head = dmt->head;
1579 task->tail = dmt->tail;
1581 r = dm_task_run(task);
1583 task->head = NULL;
1584 task->tail = NULL;
1585 dm_task_destroy(task);
1586 if (!r) {
1587 _udev_complete(dmt);
1588 goto revert;
1591 /* Use the original structure last so the info will be correct */
1592 dmt->type = DM_DEVICE_RESUME;
1593 dm_free(dmt->uuid);
1594 dmt->uuid = NULL;
1596 r = dm_task_run(dmt);
1598 if (r)
1599 return r;
1601 revert:
1602 dmt->type = DM_DEVICE_REMOVE;
1603 dm_free(dmt->uuid);
1604 dmt->uuid = NULL;
1605 dmt->cookie_set = 0;
1607 if (!dm_task_run(dmt))
1608 log_error("Failed to revert device creation.");
1610 return r;
1613 uint64_t dm_task_get_existing_table_size(struct dm_task *dmt)
1615 return dmt->existing_table_size;
1618 static int _reload_with_suppression_v4(struct dm_task *dmt)
1620 struct dm_task *task;
1621 struct target *t1, *t2;
1622 int r;
1624 /* New task to get existing table information */
1625 if (!(task = dm_task_create(DM_DEVICE_TABLE))) {
1626 log_error("Failed to create device-mapper task struct");
1627 return 0;
1630 /* Copy across relevant fields */
1631 if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1632 dm_task_destroy(task);
1633 return 0;
1636 if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1637 dm_task_destroy(task);
1638 return 0;
1641 task->major = dmt->major;
1642 task->minor = dmt->minor;
1644 r = dm_task_run(task);
1646 if (!r) {
1647 dm_task_destroy(task);
1648 return r;
1651 /* Store existing table size */
1652 t2 = task->head;
1653 while (t2 && t2->next)
1654 t2 = t2->next;
1655 dmt->existing_table_size = t2 ? t2->start + t2->length : 0;
1657 if ((task->dmi.v4->flags & DM_READONLY_FLAG) ? 1 : 0 != dmt->read_only)
1658 goto no_match;
1660 t1 = dmt->head;
1661 t2 = task->head;
1663 while (t1 && t2) {
1664 while (t2->params[strlen(t2->params) - 1] == ' ')
1665 t2->params[strlen(t2->params) - 1] = '\0';
1666 if ((t1->start != t2->start) ||
1667 (t1->length != t2->length) ||
1668 (strcmp(t1->type, t2->type)) ||
1669 (strcmp(t1->params, t2->params)))
1670 goto no_match;
1671 t1 = t1->next;
1672 t2 = t2->next;
1675 if (!t1 && !t2) {
1676 dmt->dmi.v4 = task->dmi.v4;
1677 task->dmi.v4 = NULL;
1678 dm_task_destroy(task);
1679 return 1;
1682 no_match:
1683 dm_task_destroy(task);
1685 /* Now do the original reload */
1686 dmt->suppress_identical_reload = 0;
1687 r = dm_task_run(dmt);
1689 return r;
1692 static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
1693 unsigned repeat_count)
1695 struct dm_ioctl *dmi;
1697 dmi = _flatten(dmt, repeat_count);
1698 if (!dmi) {
1699 log_error("Couldn't create ioctl argument.");
1700 return NULL;
1703 if (dmt->type == DM_DEVICE_TABLE)
1704 dmi->flags |= DM_STATUS_TABLE_FLAG;
1706 dmi->flags |= DM_EXISTS_FLAG; /* FIXME */
1708 if (dmt->no_open_count)
1709 dmi->flags |= DM_SKIP_BDGET_FLAG;
1712 * Prevent udev vs. libdevmapper race when processing nodes and
1713 * symlinks. This can happen when the udev rules are installed and
1714 * udev synchronisation code is enabled in libdevmapper but the
1715 * software using libdevmapper does not make use of it (by not calling
1716 * dm_task_set_cookie before). We need to instruct the udev rules not
1717 * to be applied at all in this situation so we can gracefully fallback
1718 * to libdevmapper's node and symlink creation code.
1720 if (dm_udev_get_sync_support() && !dmt->cookie_set &&
1721 (dmt->type == DM_DEVICE_RESUME ||
1722 dmt->type == DM_DEVICE_REMOVE ||
1723 dmt->type == DM_DEVICE_RENAME)) {
1724 log_debug("Cookie value is not set while trying to call "
1725 "DM_DEVICE_RESUME, DM_DEVICE_REMOVE or DM_DEVICE_RENAME "
1726 "ioctl. Please, consider using libdevmapper's udev "
1727 "synchronisation interface or disable it explicitly "
1728 "by calling dm_udev_set_sync_support(0).");
1729 log_debug("Switching off device-mapper and all subsystem related "
1730 "udev rules. Falling back to libdevmapper node creation.");
1732 * Disable general dm and subsystem rules but keep dm disk rules
1733 * if not flagged out explicitly before. We need /dev/disk content
1734 * for the software that expects it.
1736 dmi->event_nr |= (DM_UDEV_DISABLE_DM_RULES_FLAG |
1737 DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG) <<
1738 DM_UDEV_FLAGS_SHIFT;
1741 log_debug("dm %s %s %s%s%s %s%.0d%s%.0d%s"
1742 "%s%c%c%s%s %.0" PRIu64 " %s [%u]",
1743 _cmd_data_v4[dmt->type].name,
1744 dmi->name, dmi->uuid, dmt->newname ? " " : "",
1745 dmt->newname ? dmt->newname : "",
1746 dmt->major > 0 ? "(" : "",
1747 dmt->major > 0 ? dmt->major : 0,
1748 dmt->major > 0 ? ":" : "",
1749 dmt->minor > 0 ? dmt->minor : 0,
1750 dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1751 dmt->major > 0 ? ") " : "",
1752 dmt->no_open_count ? 'N' : 'O',
1753 dmt->no_flush ? 'N' : 'F',
1754 dmt->skip_lockfs ? "S " : "",
1755 dmt->query_inactive_table ? "I " : "",
1756 dmt->sector, dmt->message ? dmt->message : "",
1757 dmi->data_size);
1758 #ifdef DM_IOCTLS
1759 if (ioctl(_control_fd, command, dmi) < 0) {
1760 if (errno == ENXIO && ((dmt->type == DM_DEVICE_INFO) ||
1761 (dmt->type == DM_DEVICE_MKNODES) ||
1762 (dmt->type == DM_DEVICE_STATUS)))
1763 dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */
1764 else {
1765 if (_log_suppress)
1766 log_verbose("device-mapper: %s ioctl "
1767 "failed: %s",
1768 _cmd_data_v4[dmt->type].name,
1769 strerror(errno));
1770 else
1771 log_error("device-mapper: %s ioctl "
1772 "failed: %s",
1773 _cmd_data_v4[dmt->type].name,
1774 strerror(errno));
1775 dm_free(dmi);
1776 return NULL;
1779 #else /* Userspace alternative for testing */
1780 #endif
1781 return dmi;
1784 void dm_task_update_nodes(void)
1786 update_devs();
1789 int dm_task_run(struct dm_task *dmt)
1791 struct dm_ioctl *dmi;
1792 unsigned command;
1793 int check_udev;
1795 #ifdef DM_COMPAT
1796 if (_dm_version == 1)
1797 return _dm_task_run_v1(dmt);
1798 #endif
1800 if ((unsigned) dmt->type >=
1801 (sizeof(_cmd_data_v4) / sizeof(*_cmd_data_v4))) {
1802 log_error("Internal error: unknown device-mapper task %d",
1803 dmt->type);
1804 return 0;
1807 command = _cmd_data_v4[dmt->type].cmd;
1809 /* Old-style creation had a table supplied */
1810 if (dmt->type == DM_DEVICE_CREATE && dmt->head)
1811 return _create_and_load_v4(dmt);
1813 if (dmt->type == DM_DEVICE_MKNODES && !dmt->dev_name &&
1814 !dmt->uuid && dmt->major <= 0)
1815 return _mknodes_v4(dmt);
1817 if ((dmt->type == DM_DEVICE_RELOAD) && dmt->suppress_identical_reload)
1818 return _reload_with_suppression_v4(dmt);
1820 if (!_open_control()) {
1821 _udev_complete(dmt);
1822 return 0;
1825 /* FIXME Detect and warn if cookie set but should not be. */
1826 repeat_ioctl:
1827 if (!(dmi = _do_dm_ioctl(dmt, command, _ioctl_buffer_double_factor))) {
1828 _udev_complete(dmt);
1829 return 0;
1832 if (dmi->flags & DM_BUFFER_FULL_FLAG) {
1833 switch (dmt->type) {
1834 case DM_DEVICE_LIST_VERSIONS:
1835 case DM_DEVICE_LIST:
1836 case DM_DEVICE_DEPS:
1837 case DM_DEVICE_STATUS:
1838 case DM_DEVICE_TABLE:
1839 case DM_DEVICE_WAITEVENT:
1840 _ioctl_buffer_double_factor++;
1841 dm_free(dmi);
1842 goto repeat_ioctl;
1843 default:
1844 log_error("WARNING: libdevmapper buffer too small for data");
1848 check_udev = dmt->cookie_set &&
1849 !(dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
1850 DM_UDEV_DISABLE_DM_RULES_FLAG);
1852 switch (dmt->type) {
1853 case DM_DEVICE_CREATE:
1854 if (dmt->dev_name && *dmt->dev_name)
1855 add_dev_node(dmt->dev_name, MAJOR(dmi->dev),
1856 MINOR(dmi->dev), dmt->uid, dmt->gid,
1857 dmt->mode, check_udev);
1858 break;
1859 case DM_DEVICE_REMOVE:
1860 /* FIXME Kernel needs to fill in dmi->name */
1861 if (dmt->dev_name)
1862 rm_dev_node(dmt->dev_name, check_udev);
1863 break;
1865 case DM_DEVICE_RENAME:
1866 /* FIXME Kernel needs to fill in dmi->name */
1867 if (dmt->dev_name)
1868 rename_dev_node(dmt->dev_name, dmt->newname,
1869 check_udev);
1870 break;
1872 case DM_DEVICE_RESUME:
1873 /* FIXME Kernel needs to fill in dmi->name */
1874 set_dev_node_read_ahead(dmt->dev_name, dmt->read_ahead,
1875 dmt->read_ahead_flags);
1876 break;
1878 case DM_DEVICE_MKNODES:
1879 if (dmi->flags & DM_EXISTS_FLAG)
1880 add_dev_node(dmi->name, MAJOR(dmi->dev),
1881 MINOR(dmi->dev), dmt->uid,
1882 dmt->gid, dmt->mode, 0);
1883 else if (dmt->dev_name)
1884 rm_dev_node(dmt->dev_name, 0);
1885 break;
1887 case DM_DEVICE_STATUS:
1888 case DM_DEVICE_TABLE:
1889 case DM_DEVICE_WAITEVENT:
1890 if (!_unmarshal_status(dmt, dmi))
1891 goto bad;
1892 break;
1895 /* Was structure reused? */
1896 if (dmt->dmi.v4)
1897 dm_free(dmt->dmi.v4);
1898 dmt->dmi.v4 = dmi;
1899 return 1;
1901 bad:
1902 dm_free(dmi);
1903 return 0;
1906 void dm_lib_release(void)
1908 if (_control_fd != -1) {
1909 close(_control_fd);
1910 _control_fd = -1;
1912 update_devs();
1915 void dm_pools_check_leaks(void);
1917 void dm_lib_exit(void)
1919 dm_lib_release();
1920 if (_dm_bitset)
1921 dm_bitset_destroy(_dm_bitset);
1922 _dm_bitset = NULL;
1923 dm_pools_check_leaks();
1924 dm_dump_memory();
1925 _version_ok = 1;
1926 _version_checked = 0;