Sync usage with man page.
[netbsd-mini2440.git] / external / gpl2 / lvm2 / dist / lib / metadata / lv_manip.c
blob487bd3239df417a3036e442a1de7305058de16fd
1 /* $NetBSD: lv_manip.c,v 1.3 2009/02/18 12:16:13 haad Exp $ */
3 /*
4 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 * This file is part of LVM2.
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include "lib.h"
19 #include "metadata.h"
20 #include "locking.h"
21 #include "pv_map.h"
22 #include "lvm-string.h"
23 #include "toolcontext.h"
24 #include "lv_alloc.h"
25 #include "pv_alloc.h"
26 #include "display.h"
27 #include "segtype.h"
28 #include "archiver.h"
29 #include "activate.h"
30 #include "str_list.h"
32 struct lv_names {
33 const char *old;
34 const char *new;
37 int add_seg_to_segs_using_this_lv(struct logical_volume *lv,
38 struct lv_segment *seg)
40 struct seg_list *sl;
42 dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
43 if (sl->seg == seg) {
44 sl->count++;
45 return 1;
49 log_very_verbose("Adding %s:%" PRIu32 " as an user of %s",
50 seg->lv->name, seg->le, lv->name);
52 if (!(sl = dm_pool_zalloc(lv->vg->cmd->mem, sizeof(*sl)))) {
53 log_error("Failed to allocate segment list");
54 return 0;
57 sl->count = 1;
58 sl->seg = seg;
59 dm_list_add(&lv->segs_using_this_lv, &sl->list);
61 return 1;
64 int remove_seg_from_segs_using_this_lv(struct logical_volume *lv,
65 struct lv_segment *seg)
67 struct seg_list *sl;
69 dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
70 if (sl->seg != seg)
71 continue;
72 if (sl->count > 1)
73 sl->count--;
74 else {
75 log_very_verbose("%s:%" PRIu32 " is no longer a user "
76 "of %s", seg->lv->name, seg->le,
77 lv->name);
78 dm_list_del(&sl->list);
80 return 1;
83 return 0;
87 * This is a function specialized for the common case where there is
88 * only one segment which uses the LV.
89 * e.g. the LV is a layer inserted by insert_layer_for_lv().
91 * In general, walk through lv->segs_using_this_lv.
93 struct lv_segment *get_only_segment_using_this_lv(struct logical_volume *lv)
95 struct seg_list *sl;
97 if (dm_list_size(&lv->segs_using_this_lv) != 1) {
98 log_error("%s is expected to have only one segment using it, "
99 "while it has %d", lv->name,
100 dm_list_size(&lv->segs_using_this_lv));
101 return NULL;
104 sl = dm_list_item(dm_list_first(&lv->segs_using_this_lv), struct seg_list);
106 if (sl->count != 1) {
107 log_error("%s is expected to have only one segment using it, "
108 "while %s:%" PRIu32 " uses it %d times",
109 lv->name, sl->seg->lv->name, sl->seg->le, sl->count);
110 return NULL;
113 return sl->seg;
117 * PVs used by a segment of an LV
119 struct seg_pvs {
120 struct dm_list list;
122 struct dm_list pvs; /* struct pv_list */
124 uint32_t le;
125 uint32_t len;
128 static struct seg_pvs *_find_seg_pvs_by_le(struct dm_list *list, uint32_t le)
130 struct seg_pvs *spvs;
132 dm_list_iterate_items(spvs, list)
133 if (le >= spvs->le && le < spvs->le + spvs->len)
134 return spvs;
136 return NULL;
140 * Find first unused LV number.
142 uint32_t find_free_lvnum(struct logical_volume *lv)
144 int lvnum_used[MAX_RESTRICTED_LVS + 1];
145 uint32_t i = 0;
146 struct lv_list *lvl;
147 int lvnum;
149 memset(&lvnum_used, 0, sizeof(lvnum_used));
151 dm_list_iterate_items(lvl, &lv->vg->lvs) {
152 lvnum = lvnum_from_lvid(&lvl->lv->lvid);
153 if (lvnum <= MAX_RESTRICTED_LVS)
154 lvnum_used[lvnum] = 1;
157 while (lvnum_used[i])
158 i++;
160 /* FIXME What if none are free? */
162 return i;
166 * All lv_segments get created here.
168 struct lv_segment *alloc_lv_segment(struct dm_pool *mem,
169 const struct segment_type *segtype,
170 struct logical_volume *lv,
171 uint32_t le, uint32_t len,
172 uint32_t status,
173 uint32_t stripe_size,
174 struct logical_volume *log_lv,
175 uint32_t area_count,
176 uint32_t area_len,
177 uint32_t chunk_size,
178 uint32_t region_size,
179 uint32_t extents_copied)
181 struct lv_segment *seg;
182 uint32_t areas_sz = area_count * sizeof(*seg->areas);
184 if (!(seg = dm_pool_zalloc(mem, sizeof(*seg))))
185 return_NULL;
187 if (!(seg->areas = dm_pool_zalloc(mem, areas_sz))) {
188 dm_pool_free(mem, seg);
189 return_NULL;
192 if (!segtype) {
193 log_error("alloc_lv_segment: Missing segtype.");
194 return NULL;
197 seg->segtype = segtype;
198 seg->lv = lv;
199 seg->le = le;
200 seg->len = len;
201 seg->status = status;
202 seg->stripe_size = stripe_size;
203 seg->area_count = area_count;
204 seg->area_len = area_len;
205 seg->chunk_size = chunk_size;
206 seg->region_size = region_size;
207 seg->extents_copied = extents_copied;
208 seg->log_lv = log_lv;
209 dm_list_init(&seg->tags);
211 if (log_lv && !attach_mirror_log(seg, log_lv))
212 return_NULL;
214 return seg;
217 struct lv_segment *alloc_snapshot_seg(struct logical_volume *lv,
218 uint32_t status, uint32_t old_le_count)
220 struct lv_segment *seg;
221 const struct segment_type *segtype;
223 segtype = get_segtype_from_string(lv->vg->cmd, "snapshot");
224 if (!segtype) {
225 log_error("Failed to find snapshot segtype");
226 return NULL;
229 if (!(seg = alloc_lv_segment(lv->vg->cmd->mem, segtype, lv, old_le_count,
230 lv->le_count - old_le_count, status, 0,
231 NULL, 0, lv->le_count - old_le_count,
232 0, 0, 0))) {
233 log_error("Couldn't allocate new snapshot segment.");
234 return NULL;
237 dm_list_add(&lv->segments, &seg->list);
238 lv->status |= VIRTUAL;
240 return seg;
243 void release_lv_segment_area(struct lv_segment *seg, uint32_t s,
244 uint32_t area_reduction)
246 if (seg_type(seg, s) == AREA_UNASSIGNED)
247 return;
249 if (seg_type(seg, s) == AREA_PV) {
250 if (release_pv_segment(seg_pvseg(seg, s), area_reduction) &&
251 seg->area_len == area_reduction)
252 seg_type(seg, s) = AREA_UNASSIGNED;
253 return;
256 if (seg_lv(seg, s)->status & MIRROR_IMAGE) {
257 lv_reduce(seg_lv(seg, s), area_reduction);
258 return;
261 if (area_reduction == seg->area_len) {
262 log_very_verbose("Remove %s:%" PRIu32 "[%" PRIu32 "] from "
263 "the top of LV %s:%" PRIu32,
264 seg->lv->name, seg->le, s,
265 seg_lv(seg, s)->name, seg_le(seg, s));
267 remove_seg_from_segs_using_this_lv(seg_lv(seg, s), seg);
268 seg_lv(seg, s) = NULL;
269 seg_le(seg, s) = 0;
270 seg_type(seg, s) = AREA_UNASSIGNED;
275 * Move a segment area from one segment to another
277 int move_lv_segment_area(struct lv_segment *seg_to, uint32_t area_to,
278 struct lv_segment *seg_from, uint32_t area_from)
280 struct physical_volume *pv;
281 struct logical_volume *lv;
282 uint32_t pe, le;
284 switch (seg_type(seg_from, area_from)) {
285 case AREA_PV:
286 pv = seg_pv(seg_from, area_from);
287 pe = seg_pe(seg_from, area_from);
289 release_lv_segment_area(seg_from, area_from,
290 seg_from->area_len);
291 release_lv_segment_area(seg_to, area_to, seg_to->area_len);
293 if (!set_lv_segment_area_pv(seg_to, area_to, pv, pe))
294 return_0;
296 break;
298 case AREA_LV:
299 lv = seg_lv(seg_from, area_from);
300 le = seg_le(seg_from, area_from);
302 release_lv_segment_area(seg_from, area_from,
303 seg_from->area_len);
304 release_lv_segment_area(seg_to, area_to, seg_to->area_len);
306 if (!set_lv_segment_area_lv(seg_to, area_to, lv, le, 0))
307 return_0;
309 break;
311 case AREA_UNASSIGNED:
312 release_lv_segment_area(seg_to, area_to, seg_to->area_len);
315 return 1;
319 * Link part of a PV to an LV segment.
321 int set_lv_segment_area_pv(struct lv_segment *seg, uint32_t area_num,
322 struct physical_volume *pv, uint32_t pe)
324 seg->areas[area_num].type = AREA_PV;
326 if (!(seg_pvseg(seg, area_num) =
327 assign_peg_to_lvseg(pv, pe, seg->area_len, seg, area_num)))
328 return_0;
330 return 1;
334 * Link one LV segment to another. Assumes sizes already match.
336 int set_lv_segment_area_lv(struct lv_segment *seg, uint32_t area_num,
337 struct logical_volume *lv, uint32_t le,
338 uint32_t flags)
340 log_very_verbose("Stack %s:%" PRIu32 "[%" PRIu32 "] on LV %s:%" PRIu32,
341 seg->lv->name, seg->le, area_num, lv->name, le);
343 seg->areas[area_num].type = AREA_LV;
344 seg_lv(seg, area_num) = lv;
345 seg_le(seg, area_num) = le;
346 lv->status |= flags;
348 if (!add_seg_to_segs_using_this_lv(lv, seg))
349 return_0;
351 return 1;
355 * Prepare for adding parallel areas to an existing segment.
357 static int _lv_segment_add_areas(struct logical_volume *lv,
358 struct lv_segment *seg,
359 uint32_t new_area_count)
361 struct lv_segment_area *newareas;
362 uint32_t areas_sz = new_area_count * sizeof(*newareas);
364 if (!(newareas = dm_pool_zalloc(lv->vg->cmd->mem, areas_sz)))
365 return_0;
367 memcpy(newareas, seg->areas, seg->area_count * sizeof(*seg->areas));
369 seg->areas = newareas;
370 seg->area_count = new_area_count;
372 return 1;
376 * Reduce the size of an lv_segment. New size can be zero.
378 static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
380 uint32_t area_reduction, s;
382 /* Caller must ensure exact divisibility */
383 if (seg_is_striped(seg)) {
384 if (reduction % seg->area_count) {
385 log_error("Segment extent reduction %" PRIu32
386 "not divisible by #stripes %" PRIu32,
387 reduction, seg->area_count);
388 return 0;
390 area_reduction = (reduction / seg->area_count);
391 } else
392 area_reduction = reduction;
394 for (s = 0; s < seg->area_count; s++)
395 release_lv_segment_area(seg, s, area_reduction);
397 seg->len -= reduction;
398 seg->area_len -= area_reduction;
400 return 1;
404 * Entry point for all LV reductions in size.
406 static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
408 struct lv_segment *seg;
409 uint32_t count = extents;
410 uint32_t reduction;
412 dm_list_iterate_back_items(seg, &lv->segments) {
413 if (!count)
414 break;
416 if (seg->len <= count) {
417 /* remove this segment completely */
418 /* FIXME Check this is safe */
419 if (seg->log_lv && !lv_remove(seg->log_lv))
420 return_0;
421 dm_list_del(&seg->list);
422 reduction = seg->len;
423 } else
424 reduction = count;
426 if (!_lv_segment_reduce(seg, reduction))
427 return_0;
428 count -= reduction;
431 lv->le_count -= extents;
432 lv->size = (uint64_t) lv->le_count * lv->vg->extent_size;
434 if (!delete)
435 return 1;
437 /* Remove the LV if it is now empty */
438 if (!lv->le_count && !unlink_lv_from_vg(lv))
439 return_0;
440 else if (lv->vg->fid->fmt->ops->lv_setup &&
441 !lv->vg->fid->fmt->ops->lv_setup(lv->vg->fid, lv))
442 return_0;
444 return 1;
448 * Empty an LV.
450 int lv_empty(struct logical_volume *lv)
452 return _lv_reduce(lv, lv->le_count, 0);
456 * Empty an LV and add error segment.
458 int replace_lv_with_error_segment(struct logical_volume *lv)
460 uint32_t len = lv->le_count;
462 if (!lv_empty(lv))
463 return_0;
465 if (!lv_add_virtual_segment(lv, 0, len,
466 get_segtype_from_string(lv->vg->cmd,
467 "error")))
468 return_0;
470 return 1;
474 * Remove given number of extents from LV.
476 int lv_reduce(struct logical_volume *lv, uint32_t extents)
478 return _lv_reduce(lv, extents, 1);
482 * Completely remove an LV.
484 int lv_remove(struct logical_volume *lv)
487 if (!lv_reduce(lv, lv->le_count))
488 return_0;
490 return 1;
494 * A set of contiguous physical extents allocated
496 struct alloced_area {
497 struct dm_list list;
499 struct physical_volume *pv;
500 uint32_t pe;
501 uint32_t len;
505 * Details of an allocation attempt
507 struct alloc_handle {
508 struct cmd_context *cmd;
509 struct dm_pool *mem;
511 alloc_policy_t alloc; /* Overall policy */
512 uint32_t area_count; /* Number of parallel areas */
513 uint32_t area_multiple; /* seg->len = area_len * area_multiple */
514 uint32_t log_count; /* Number of parallel 1-extent logs */
515 uint32_t log_region_size; /* region size for log device */
516 uint32_t total_area_len; /* Total number of parallel extents */
518 struct dm_list *parallel_areas; /* PVs to avoid */
520 struct alloced_area log_area; /* Extent used for log */
521 struct dm_list alloced_areas[0]; /* Lists of areas in each stripe */
524 static uint32_t calc_area_multiple(const struct segment_type *segtype,
525 const uint32_t area_count)
527 if (!segtype_is_striped(segtype) || !area_count)
528 return 1;
530 return area_count;
534 * Preparation for a specific allocation attempt
536 static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
537 struct dm_pool *mem,
538 const struct segment_type *segtype,
539 alloc_policy_t alloc,
540 uint32_t mirrors,
541 uint32_t stripes,
542 uint32_t log_count,
543 uint32_t log_region_size,
544 struct dm_list *parallel_areas)
546 struct alloc_handle *ah;
547 uint32_t s, area_count;
549 if (stripes > 1 && mirrors > 1) {
550 log_error("Striped mirrors are not supported yet");
551 return NULL;
554 if (log_count && stripes > 1) {
555 log_error("Can't mix striping with a mirror log yet.");
556 return NULL;
559 if (segtype_is_virtual(segtype))
560 area_count = 0;
561 else if (mirrors > 1)
562 area_count = mirrors;
563 else
564 area_count = stripes;
566 if (!(ah = dm_pool_zalloc(mem, sizeof(*ah) + sizeof(ah->alloced_areas[0]) * area_count))) {
567 log_error("allocation handle allocation failed");
568 return NULL;
571 if (segtype_is_virtual(segtype))
572 return ah;
574 ah->cmd = cmd;
576 if (!(ah->mem = dm_pool_create("allocation", 1024))) {
577 log_error("allocation pool creation failed");
578 return NULL;
581 ah->area_count = area_count;
582 ah->log_count = log_count;
583 ah->log_region_size = log_region_size;
584 ah->alloc = alloc;
585 ah->area_multiple = calc_area_multiple(segtype, area_count);
587 for (s = 0; s < ah->area_count; s++)
588 dm_list_init(&ah->alloced_areas[s]);
590 ah->parallel_areas = parallel_areas;
592 return ah;
595 void alloc_destroy(struct alloc_handle *ah)
597 if (ah->mem)
598 dm_pool_destroy(ah->mem);
601 static int _log_parallel_areas(struct dm_pool *mem, struct dm_list *parallel_areas)
603 struct seg_pvs *spvs;
604 struct pv_list *pvl;
605 char *pvnames;
607 if (!parallel_areas)
608 return 1;
610 dm_list_iterate_items(spvs, parallel_areas) {
611 if (!dm_pool_begin_object(mem, 256)) {
612 log_error("dm_pool_begin_object failed");
613 return 0;
616 dm_list_iterate_items(pvl, &spvs->pvs) {
617 if (!dm_pool_grow_object(mem, pv_dev_name(pvl->pv), strlen(pv_dev_name(pvl->pv)))) {
618 log_error("dm_pool_grow_object failed");
619 dm_pool_abandon_object(mem);
620 return 0;
622 if (!dm_pool_grow_object(mem, " ", 1)) {
623 log_error("dm_pool_grow_object failed");
624 dm_pool_abandon_object(mem);
625 return 0;
629 if (!dm_pool_grow_object(mem, "\0", 1)) {
630 log_error("dm_pool_grow_object failed");
631 dm_pool_abandon_object(mem);
632 return 0;
635 pvnames = dm_pool_end_object(mem);
636 log_debug("Parallel PVs at LE %" PRIu32 " length %" PRIu32 ": %s",
637 spvs->le, spvs->len, pvnames);
638 dm_pool_free(mem, pvnames);
641 return 1;
644 static int _setup_alloced_segment(struct logical_volume *lv, uint32_t status,
645 uint32_t area_count,
646 uint32_t stripe_size,
647 const struct segment_type *segtype,
648 struct alloced_area *aa,
649 uint32_t region_size,
650 struct logical_volume *log_lv __attribute((unused)))
652 uint32_t s, extents, area_multiple;
653 struct lv_segment *seg;
655 area_multiple = calc_area_multiple(segtype, area_count);
657 /* log_lv gets set up elsehere */
658 if (!(seg = alloc_lv_segment(lv->vg->cmd->mem, segtype, lv,
659 lv->le_count,
660 aa[0].len * area_multiple,
661 status, stripe_size, NULL,
662 area_count,
663 aa[0].len, 0u, region_size, 0u))) {
664 log_error("Couldn't allocate new LV segment.");
665 return 0;
668 for (s = 0; s < area_count; s++)
669 if (!set_lv_segment_area_pv(seg, s, aa[s].pv, aa[s].pe))
670 return_0;
672 dm_list_add(&lv->segments, &seg->list);
674 extents = aa[0].len * area_multiple;
675 lv->le_count += extents;
676 lv->size += (uint64_t) extents *lv->vg->extent_size;
678 if (segtype_is_mirrored(segtype))
679 lv->status |= MIRRORED;
681 return 1;
684 static int _setup_alloced_segments(struct logical_volume *lv,
685 struct dm_list *alloced_areas,
686 uint32_t area_count,
687 uint32_t status,
688 uint32_t stripe_size,
689 const struct segment_type *segtype,
690 uint32_t region_size,
691 struct logical_volume *log_lv)
693 struct alloced_area *aa;
695 dm_list_iterate_items(aa, &alloced_areas[0]) {
696 if (!_setup_alloced_segment(lv, status, area_count,
697 stripe_size, segtype, aa,
698 region_size, log_lv))
699 return_0;
702 return 1;
706 * Returns log device size in extents, algorithm from kernel code
708 #define BYTE_SHIFT 3
709 static uint32_t mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint32_t area_len)
711 size_t area_size, bitset_size, log_size, region_count;
713 area_size = area_len * pe_size;
714 region_count = dm_div_up(area_size, region_size);
716 /* Work out how many "unsigned long"s we need to hold the bitset. */
717 bitset_size = dm_round_up(region_count, sizeof(uint32_t) << BYTE_SHIFT);
718 bitset_size >>= BYTE_SHIFT;
720 /* Log device holds both header and bitset. */
721 log_size = dm_round_up((MIRROR_LOG_OFFSET << SECTOR_SHIFT) + bitset_size, 1 << SECTOR_SHIFT);
722 log_size >>= SECTOR_SHIFT;
724 return dm_div_up(log_size, pe_size);
728 * This function takes a list of pv_areas and adds them to allocated_areas.
729 * If the complete area is not needed then it gets split.
730 * The part used is removed from the pv_map so it can't be allocated twice.
732 static int _alloc_parallel_area(struct alloc_handle *ah, uint32_t needed,
733 struct pv_area **areas,
734 uint32_t *ix, struct pv_area *log_area,
735 uint32_t log_len)
737 uint32_t area_len, remaining;
738 uint32_t s;
739 struct alloced_area *aa;
741 remaining = needed - *ix;
742 area_len = remaining / ah->area_multiple;
744 /* Reduce area_len to the smallest of the areas */
745 for (s = 0; s < ah->area_count; s++)
746 if (area_len > areas[s]->count)
747 area_len = areas[s]->count;
749 if (!(aa = dm_pool_alloc(ah->mem, sizeof(*aa) *
750 (ah->area_count + (log_area ? 1 : 0))))) {
751 log_error("alloced_area allocation failed");
752 return 0;
755 for (s = 0; s < ah->area_count; s++) {
756 aa[s].pv = areas[s]->map->pv;
757 aa[s].pe = areas[s]->start;
758 aa[s].len = area_len;
759 dm_list_add(&ah->alloced_areas[s], &aa[s].list);
762 ah->total_area_len += area_len;
764 for (s = 0; s < ah->area_count; s++)
765 consume_pv_area(areas[s], area_len);
767 if (log_area) {
768 ah->log_area.pv = log_area->map->pv;
769 ah->log_area.pe = log_area->start;
770 ah->log_area.len = log_len;
771 consume_pv_area(log_area, ah->log_area.len);
774 *ix += area_len * ah->area_multiple;
776 return 1;
780 * Call fn for each AREA_PV used by the LV segment at lv:le of length *max_seg_len.
781 * If any constituent area contains more than one segment, max_seg_len is
782 * reduced to cover only the first.
783 * fn should return 0 on error, 1 to continue scanning or >1 to terminate without error.
784 * In the last case, this function passes on the return code.
786 static int _for_each_pv(struct cmd_context *cmd, struct logical_volume *lv,
787 uint32_t le, uint32_t len, uint32_t *max_seg_len,
788 uint32_t first_area, uint32_t max_areas,
789 int top_level_area_index,
790 int only_single_area_segments,
791 int (*fn)(struct cmd_context *cmd,
792 struct pv_segment *peg, uint32_t s,
793 void *data),
794 void *data)
796 struct lv_segment *seg;
797 uint32_t s;
798 uint32_t remaining_seg_len, area_len, area_multiple;
799 int r = 1;
801 if (!(seg = find_seg_by_le(lv, le))) {
802 log_error("Failed to find segment for %s extent %" PRIu32,
803 lv->name, le);
804 return 0;
807 /* Remaining logical length of segment */
808 remaining_seg_len = seg->len - (le - seg->le);
810 if (remaining_seg_len > len)
811 remaining_seg_len = len;
813 if (max_seg_len && *max_seg_len > remaining_seg_len)
814 *max_seg_len = remaining_seg_len;
816 area_multiple = calc_area_multiple(seg->segtype, seg->area_count);
817 area_len = remaining_seg_len / area_multiple ? : 1;
819 for (s = first_area;
820 s < seg->area_count && (!max_areas || s <= max_areas);
821 s++) {
822 if (seg_type(seg, s) == AREA_LV) {
823 if (!(r = _for_each_pv(cmd, seg_lv(seg, s),
824 seg_le(seg, s) +
825 (le - seg->le) / area_multiple,
826 area_len, max_seg_len,
827 only_single_area_segments ? 0 : 0,
828 only_single_area_segments ? 1U : 0U,
829 top_level_area_index != -1 ? top_level_area_index : (int) s,
830 only_single_area_segments, fn,
831 data)))
832 stack;
833 } else if (seg_type(seg, s) == AREA_PV)
834 if (!(r = fn(cmd, seg_pvseg(seg, s), top_level_area_index != -1 ? (uint32_t) top_level_area_index : s, data)))
835 stack;
836 if (r != 1)
837 return r;
840 /* FIXME only_single_area_segments used as workaround to skip log LV - needs new param? */
841 if (!only_single_area_segments && seg_is_mirrored(seg) && seg->log_lv) {
842 if (!(r = _for_each_pv(cmd, seg->log_lv, 0, seg->log_lv->le_count,
843 NULL, 0, 0, 0, only_single_area_segments,
844 fn, data)))
845 stack;
846 if (r != 1)
847 return r;
850 /* FIXME Add snapshot cow LVs etc. */
852 return 1;
855 static int _comp_area(const void *l, const void *r)
857 const struct pv_area *lhs = *((const struct pv_area **) l);
858 const struct pv_area *rhs = *((const struct pv_area **) r);
860 if (lhs->count < rhs->count)
861 return 1;
863 else if (lhs->count > rhs->count)
864 return -1;
866 return 0;
870 * Search for pvseg that matches condition
872 struct pv_match {
873 int (*condition)(struct pv_segment *pvseg, struct pv_area *pva);
875 struct pv_area **areas;
876 struct pv_area *pva;
877 uint32_t areas_size;
878 int s; /* Area index of match */
882 * Is PV area on the same PV?
884 static int _is_same_pv(struct pv_segment *pvseg, struct pv_area *pva)
886 if (pvseg->pv != pva->map->pv)
887 return 0;
889 return 1;
893 * Is PV area contiguous to PV segment?
895 static int _is_contiguous(struct pv_segment *pvseg, struct pv_area *pva)
897 if (pvseg->pv != pva->map->pv)
898 return 0;
900 if (pvseg->pe + pvseg->len != pva->start)
901 return 0;
903 return 1;
906 static int _is_condition(struct cmd_context *cmd __attribute((unused)),
907 struct pv_segment *pvseg, uint32_t s,
908 void *data)
910 struct pv_match *pvmatch = data;
912 if (!pvmatch->condition(pvseg, pvmatch->pva))
913 return 1; /* Continue */
915 if (s >= pvmatch->areas_size)
916 return 1;
918 pvmatch->areas[s] = pvmatch->pva;
920 return 2; /* Finished */
924 * Is pva on same PV as any existing areas?
926 static int _check_cling(struct cmd_context *cmd,
927 struct lv_segment *prev_lvseg, struct pv_area *pva,
928 struct pv_area **areas, uint32_t areas_size)
930 struct pv_match pvmatch;
931 int r;
933 pvmatch.condition = _is_same_pv;
934 pvmatch.areas = areas;
935 pvmatch.areas_size = areas_size;
936 pvmatch.pva = pva;
938 /* FIXME Cope with stacks by flattening */
939 if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
940 prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
941 0, 0, -1, 1,
942 _is_condition, &pvmatch)))
943 stack;
945 if (r != 2)
946 return 0;
948 return 1;
952 * Is pva contiguous to any existing areas or on the same PV?
954 static int _check_contiguous(struct cmd_context *cmd,
955 struct lv_segment *prev_lvseg, struct pv_area *pva,
956 struct pv_area **areas, uint32_t areas_size)
958 struct pv_match pvmatch;
959 int r;
961 pvmatch.condition = _is_contiguous;
962 pvmatch.areas = areas;
963 pvmatch.areas_size = areas_size;
964 pvmatch.pva = pva;
966 /* FIXME Cope with stacks by flattening */
967 if (!(r = _for_each_pv(cmd, prev_lvseg->lv,
968 prev_lvseg->le + prev_lvseg->len - 1, 1, NULL,
969 0, 0, -1, 1,
970 _is_condition, &pvmatch)))
971 stack;
973 if (r != 2)
974 return 0;
976 return 1;
980 * Choose sets of parallel areas to use, respecting any constraints.
982 static int _find_parallel_space(struct alloc_handle *ah, alloc_policy_t alloc,
983 struct dm_list *pvms, struct pv_area **areas,
984 uint32_t areas_size, unsigned can_split,
985 struct lv_segment *prev_lvseg,
986 uint32_t *allocated, uint32_t needed)
988 struct pv_map *pvm;
989 struct pv_area *pva;
990 struct pv_list *pvl;
991 unsigned already_found_one = 0;
992 unsigned contiguous = 0, cling = 0, preferred_count = 0;
993 unsigned ix;
994 unsigned ix_offset = 0; /* Offset for non-preferred allocations */
995 unsigned too_small_for_log_count; /* How many too small for log? */
996 uint32_t max_parallel; /* Maximum extents to allocate */
997 uint32_t next_le;
998 struct seg_pvs *spvs;
999 struct dm_list *parallel_pvs;
1000 uint32_t free_pes;
1001 uint32_t log_len;
1002 struct pv_area *log_area;
1003 unsigned log_needs_allocating;
1005 /* Is there enough total space? */
1006 free_pes = pv_maps_size(pvms);
1007 if (needed - *allocated > free_pes) {
1008 log_error("Insufficient free space: %" PRIu32 " extents needed,"
1009 " but only %" PRIu32 " available",
1010 needed - *allocated, free_pes);
1011 return 0;
1014 /* FIXME Select log PV appropriately if there isn't one yet */
1016 /* Are there any preceding segments we must follow on from? */
1017 if (prev_lvseg) {
1018 ix_offset = prev_lvseg->area_count;
1019 if ((alloc == ALLOC_CONTIGUOUS))
1020 contiguous = 1;
1021 else if ((alloc == ALLOC_CLING))
1022 cling = 1;
1023 else
1024 ix_offset = 0;
1027 /* FIXME This algorithm needs a lot of cleaning up! */
1028 /* FIXME anywhere doesn't find all space yet */
1029 /* ix_offset holds the number of allocations that must be contiguous */
1030 /* ix holds the number of areas found on other PVs */
1031 do {
1032 ix = 0;
1033 preferred_count = 0;
1035 parallel_pvs = NULL;
1036 max_parallel = needed;
1039 * If there are existing parallel PVs, avoid them and reduce
1040 * the maximum we can allocate in one go accordingly.
1042 if (ah->parallel_areas) {
1043 next_le = (prev_lvseg ? prev_lvseg->le + prev_lvseg->len : 0) + *allocated / ah->area_multiple;
1044 dm_list_iterate_items(spvs, ah->parallel_areas) {
1045 if (next_le >= spvs->le + spvs->len)
1046 continue;
1048 if (max_parallel > (spvs->le + spvs->len) * ah->area_multiple)
1049 max_parallel = (spvs->le + spvs->len) * ah->area_multiple;
1050 parallel_pvs = &spvs->pvs;
1051 break;
1056 * Put the smallest area of each PV that is at least the
1057 * size we need into areas array. If there isn't one
1058 * that fits completely and we're allowed more than one
1059 * LV segment, then take the largest remaining instead.
1061 dm_list_iterate_items(pvm, pvms) {
1062 if (dm_list_empty(&pvm->areas))
1063 continue; /* Next PV */
1065 if (alloc != ALLOC_ANYWHERE) {
1066 /* Don't allocate onto the log pv */
1067 if (ah->log_count &&
1068 pvm->pv == ah->log_area.pv)
1069 continue; /* Next PV */
1071 /* Avoid PVs used by existing parallel areas */
1072 if (parallel_pvs)
1073 dm_list_iterate_items(pvl, parallel_pvs)
1074 if (pvm->pv == pvl->pv)
1075 goto next_pv;
1078 already_found_one = 0;
1079 /* First area in each list is the largest */
1080 dm_list_iterate_items(pva, &pvm->areas) {
1081 if (contiguous) {
1082 if (prev_lvseg &&
1083 _check_contiguous(ah->cmd,
1084 prev_lvseg,
1085 pva, areas,
1086 areas_size)) {
1087 preferred_count++;
1088 goto next_pv;
1090 continue;
1093 if (cling) {
1094 if (prev_lvseg &&
1095 _check_cling(ah->cmd,
1096 prev_lvseg,
1097 pva, areas,
1098 areas_size)) {
1099 preferred_count++;
1101 goto next_pv;
1104 /* Is it big enough on its own? */
1105 if (pva->count * ah->area_multiple <
1106 max_parallel - *allocated &&
1107 ((!can_split && !ah->log_count) ||
1108 (already_found_one &&
1109 !(alloc == ALLOC_ANYWHERE))))
1110 goto next_pv;
1112 if (!already_found_one ||
1113 alloc == ALLOC_ANYWHERE) {
1114 ix++;
1115 already_found_one = 1;
1118 areas[ix + ix_offset - 1] = pva;
1120 goto next_pv;
1122 next_pv:
1123 if (ix >= areas_size)
1124 break;
1127 if ((contiguous || cling) && (preferred_count < ix_offset))
1128 break;
1130 log_needs_allocating = (ah->log_count && !ah->log_area.len) ?
1131 1 : 0;
1133 if (ix + ix_offset < ah->area_count +
1134 (log_needs_allocating ? ah->log_count : 0))
1135 break;
1137 /* sort the areas so we allocate from the biggest */
1138 if (ix > 1)
1139 qsort(areas + ix_offset, ix, sizeof(*areas),
1140 _comp_area);
1143 * First time around, if there's a log, allocate it on the
1144 * smallest device that has space for it.
1146 * FIXME decide which PV to use at top of function instead
1149 too_small_for_log_count = 0;
1151 if (!log_needs_allocating) {
1152 log_len = 0;
1153 log_area = NULL;
1154 } else {
1155 log_len = mirror_log_extents(ah->log_region_size,
1156 pv_pe_size((*areas)->map->pv),
1157 (max_parallel - *allocated) / ah->area_multiple);
1159 /* How many areas are too small for the log? */
1160 while (too_small_for_log_count < ix_offset + ix &&
1161 (*(areas + ix_offset + ix - 1 -
1162 too_small_for_log_count))->count < log_len)
1163 too_small_for_log_count++;
1165 log_area = *(areas + ix_offset + ix - 1 -
1166 too_small_for_log_count);
1169 if (ix + ix_offset < ah->area_count +
1170 (log_needs_allocating ? ah->log_count +
1171 too_small_for_log_count : 0))
1172 /* FIXME With ALLOC_ANYWHERE, need to split areas */
1173 break;
1175 if (!_alloc_parallel_area(ah, max_parallel, areas, allocated,
1176 log_area, log_len))
1177 return_0;
1179 } while (!contiguous && *allocated != needed && can_split);
1181 return 1;
1185 * Allocate several segments, each the same size, in parallel.
1186 * If mirrored_pv and mirrored_pe are supplied, it is used as
1187 * the first area, and additional areas are allocated parallel to it.
1189 static int _allocate(struct alloc_handle *ah,
1190 struct volume_group *vg,
1191 struct logical_volume *lv,
1192 uint32_t new_extents,
1193 unsigned can_split,
1194 struct dm_list *allocatable_pvs)
1196 struct pv_area **areas;
1197 uint32_t allocated = lv ? lv->le_count : 0;
1198 uint32_t old_allocated;
1199 struct lv_segment *prev_lvseg = NULL;
1200 int r = 0;
1201 struct dm_list *pvms;
1202 uint32_t areas_size;
1203 alloc_policy_t alloc;
1205 if (allocated >= new_extents && !ah->log_count) {
1206 log_error("_allocate called with no work to do!");
1207 return 1;
1210 if (ah->alloc == ALLOC_CONTIGUOUS)
1211 can_split = 0;
1213 if (lv && !dm_list_empty(&lv->segments))
1214 prev_lvseg = dm_list_item(dm_list_last(&lv->segments),
1215 struct lv_segment);
1217 * Build the sets of available areas on the pv's.
1219 if (!(pvms = create_pv_maps(ah->mem, vg, allocatable_pvs)))
1220 return_0;
1222 if (!_log_parallel_areas(ah->mem, ah->parallel_areas))
1223 stack;
1225 areas_size = dm_list_size(pvms);
1226 if (areas_size && areas_size < (ah->area_count + ah->log_count)) {
1227 if (ah->alloc != ALLOC_ANYWHERE) {
1228 log_error("Not enough PVs with free space available "
1229 "for parallel allocation.");
1230 log_error("Consider --alloc anywhere if desperate.");
1231 return 0;
1233 areas_size = ah->area_count + ah->log_count;
1236 /* Upper bound if none of the PVs in prev_lvseg is in pvms */
1237 /* FIXME Work size out properly */
1238 if (prev_lvseg)
1239 areas_size += prev_lvseg->area_count;
1241 /* Allocate an array of pv_areas to hold the largest space on each PV */
1242 if (!(areas = dm_malloc(sizeof(*areas) * areas_size))) {
1243 log_error("Couldn't allocate areas array.");
1244 return 0;
1247 /* Attempt each defined allocation policy in turn */
1248 for (alloc = ALLOC_CONTIGUOUS; alloc < ALLOC_INHERIT; alloc++) {
1249 old_allocated = allocated;
1250 if (!_find_parallel_space(ah, alloc, pvms, areas,
1251 areas_size, can_split,
1252 prev_lvseg, &allocated, new_extents))
1253 goto_out;
1254 if ((allocated == new_extents) || (ah->alloc == alloc) ||
1255 (!can_split && (allocated != old_allocated)))
1256 break;
1259 if (allocated != new_extents) {
1260 log_error("Insufficient suitable %sallocatable extents "
1261 "for logical volume %s: %u more required",
1262 can_split ? "" : "contiguous ",
1263 lv ? lv->name : "",
1264 (new_extents - allocated) * ah->area_count
1265 / ah->area_multiple);
1266 goto out;
1269 if (ah->log_count && !ah->log_area.len) {
1270 log_error("Insufficient extents for log allocation "
1271 "for logical volume %s.",
1272 lv ? lv->name : "");
1273 goto out;
1276 r = 1;
1278 out:
1279 dm_free(areas);
1280 return r;
1283 int lv_add_virtual_segment(struct logical_volume *lv, uint32_t status,
1284 uint32_t extents, const struct segment_type *segtype)
1286 struct lv_segment *seg;
1288 if (!(seg = alloc_lv_segment(lv->vg->cmd->mem, segtype, lv,
1289 lv->le_count, extents, status, 0,
1290 NULL, 0, extents, 0, 0, 0))) {
1291 log_error("Couldn't allocate new zero segment.");
1292 return 0;
1295 dm_list_add(&lv->segments, &seg->list);
1297 lv->le_count += extents;
1298 lv->size += (uint64_t) extents *lv->vg->extent_size;
1300 lv->status |= VIRTUAL;
1302 return 1;
1306 * Entry point for all extent allocations.
1308 struct alloc_handle *allocate_extents(struct volume_group *vg,
1309 struct logical_volume *lv,
1310 const struct segment_type *segtype,
1311 uint32_t stripes,
1312 uint32_t mirrors, uint32_t log_count,
1313 uint32_t log_region_size, uint32_t extents,
1314 struct dm_list *allocatable_pvs,
1315 alloc_policy_t alloc,
1316 struct dm_list *parallel_areas)
1318 struct alloc_handle *ah;
1320 if (segtype_is_virtual(segtype)) {
1321 log_error("allocate_extents does not handle virtual segments");
1322 return NULL;
1325 if (vg->fid->fmt->ops->segtype_supported &&
1326 !vg->fid->fmt->ops->segtype_supported(vg->fid, segtype)) {
1327 log_error("Metadata format (%s) does not support required "
1328 "LV segment type (%s).", vg->fid->fmt->name,
1329 segtype->name);
1330 log_error("Consider changing the metadata format by running "
1331 "vgconvert.");
1332 return NULL;
1335 if (alloc == ALLOC_INHERIT)
1336 alloc = vg->alloc;
1338 if (!(ah = _alloc_init(vg->cmd, vg->cmd->mem, segtype, alloc, mirrors,
1339 stripes, log_count, log_region_size, parallel_areas)))
1340 return_NULL;
1342 if (!segtype_is_virtual(segtype) &&
1343 !_allocate(ah, vg, lv, (lv ? lv->le_count : 0) + extents,
1344 1, allocatable_pvs)) {
1345 alloc_destroy(ah);
1346 return_NULL;
1349 return ah;
1353 * Add new segments to an LV from supplied list of areas.
1355 int lv_add_segment(struct alloc_handle *ah,
1356 uint32_t first_area, uint32_t num_areas,
1357 struct logical_volume *lv,
1358 const struct segment_type *segtype,
1359 uint32_t stripe_size,
1360 uint32_t status,
1361 uint32_t region_size,
1362 struct logical_volume *log_lv)
1364 if (!segtype) {
1365 log_error("Missing segtype in lv_add_segment().");
1366 return 0;
1369 if (segtype_is_virtual(segtype)) {
1370 log_error("lv_add_segment cannot handle virtual segments");
1371 return 0;
1374 if (!_setup_alloced_segments(lv, &ah->alloced_areas[first_area],
1375 num_areas, status,
1376 stripe_size, segtype,
1377 region_size, log_lv))
1378 return_0;
1380 if ((segtype->flags & SEG_CAN_SPLIT) && !lv_merge_segments(lv)) {
1381 log_error("Couldn't merge segments after extending "
1382 "logical volume.");
1383 return 0;
1386 if (lv->vg->fid->fmt->ops->lv_setup &&
1387 !lv->vg->fid->fmt->ops->lv_setup(lv->vg->fid, lv))
1388 return_0;
1390 return 1;
1394 * "mirror" segment type doesn't support split.
1395 * So, when adding mirrors to linear LV segment, first split it,
1396 * then convert it to "mirror" and add areas.
1398 static struct lv_segment *_convert_seg_to_mirror(struct lv_segment *seg,
1399 uint32_t region_size,
1400 struct logical_volume *log_lv)
1402 struct lv_segment *newseg;
1403 uint32_t s;
1405 if (!seg_is_striped(seg)) {
1406 log_error("Can't convert non-striped segment to mirrored.");
1407 return NULL;
1410 if (seg->area_count > 1) {
1411 log_error("Can't convert striped segment with multiple areas "
1412 "to mirrored.");
1413 return NULL;
1416 if (!(newseg = alloc_lv_segment(seg->lv->vg->cmd->mem,
1417 get_segtype_from_string(seg->lv->vg->cmd, "mirror"),
1418 seg->lv, seg->le, seg->len,
1419 seg->status, seg->stripe_size,
1420 log_lv,
1421 seg->area_count, seg->area_len,
1422 seg->chunk_size, region_size,
1423 seg->extents_copied))) {
1424 log_error("Couldn't allocate converted LV segment");
1425 return NULL;
1428 for (s = 0; s < seg->area_count; s++)
1429 if (!move_lv_segment_area(newseg, s, seg, s))
1430 return_NULL;
1432 dm_list_add(&seg->list, &newseg->list);
1433 dm_list_del(&seg->list);
1435 return newseg;
1439 * Add new areas to mirrored segments
1441 int lv_add_mirror_areas(struct alloc_handle *ah,
1442 struct logical_volume *lv, uint32_t le,
1443 uint32_t region_size)
1445 struct alloced_area *aa;
1446 struct lv_segment *seg;
1447 uint32_t current_le = le;
1448 uint32_t s, old_area_count, new_area_count;
1450 dm_list_iterate_items(aa, &ah->alloced_areas[0]) {
1451 if (!(seg = find_seg_by_le(lv, current_le))) {
1452 log_error("Failed to find segment for %s extent %"
1453 PRIu32, lv->name, current_le);
1454 return 0;
1457 /* Allocator assures aa[0].len <= seg->area_len */
1458 if (aa[0].len < seg->area_len) {
1459 if (!lv_split_segment(lv, seg->le + aa[0].len)) {
1460 log_error("Failed to split segment at %s "
1461 "extent %" PRIu32, lv->name, le);
1462 return 0;
1466 if (!seg_is_mirrored(seg) &&
1467 (!(seg = _convert_seg_to_mirror(seg, region_size, NULL))))
1468 return_0;
1470 old_area_count = seg->area_count;
1471 new_area_count = old_area_count + ah->area_count;
1473 if (!_lv_segment_add_areas(lv, seg, new_area_count))
1474 return_0;
1476 for (s = 0; s < ah->area_count; s++) {
1477 if (!set_lv_segment_area_pv(seg, s + old_area_count,
1478 aa[s].pv, aa[s].pe))
1479 return_0;
1482 current_le += seg->area_len;
1485 lv->status |= MIRRORED;
1487 if (lv->vg->fid->fmt->ops->lv_setup &&
1488 !lv->vg->fid->fmt->ops->lv_setup(lv->vg->fid, lv))
1489 return_0;
1491 return 1;
1495 * Add mirror image LVs to mirrored segments
1497 int lv_add_mirror_lvs(struct logical_volume *lv,
1498 struct logical_volume **sub_lvs,
1499 uint32_t num_extra_areas,
1500 uint32_t status, uint32_t region_size)
1502 struct lv_segment *seg;
1503 uint32_t old_area_count, new_area_count;
1504 uint32_t m;
1505 struct segment_type *mirror_segtype;
1507 seg = first_seg(lv);
1509 if (dm_list_size(&lv->segments) != 1 || seg_type(seg, 0) != AREA_LV) {
1510 log_error("Mirror layer must be inserted before adding mirrors");
1511 return_0;
1514 mirror_segtype = get_segtype_from_string(lv->vg->cmd, "mirror");
1515 if (seg->segtype != mirror_segtype)
1516 if (!(seg = _convert_seg_to_mirror(seg, region_size, NULL)))
1517 return_0;
1519 if (region_size && region_size != seg->region_size) {
1520 log_error("Conflicting region_size");
1521 return 0;
1524 old_area_count = seg->area_count;
1525 new_area_count = old_area_count + num_extra_areas;
1527 if (!_lv_segment_add_areas(lv, seg, new_area_count)) {
1528 log_error("Failed to allocate widened LV segment for %s.",
1529 lv->name);
1530 return 0;
1533 for (m = 0; m < old_area_count; m++)
1534 seg_lv(seg, m)->status |= status;
1536 for (m = old_area_count; m < new_area_count; m++) {
1537 if (!set_lv_segment_area_lv(seg, m, sub_lvs[m - old_area_count],
1538 0, status))
1539 return_0;
1540 lv_set_hidden(sub_lvs[m - old_area_count]);
1543 lv->status |= MIRRORED;
1545 return 1;
1549 * Turn an empty LV into a mirror log.
1551 int lv_add_log_segment(struct alloc_handle *ah, struct logical_volume *log_lv)
1553 struct lv_segment *seg;
1555 if (dm_list_size(&log_lv->segments)) {
1556 log_error("Log segments can only be added to an empty LV");
1557 return 0;
1560 if (!(seg = alloc_lv_segment(log_lv->vg->cmd->mem,
1561 get_segtype_from_string(log_lv->vg->cmd,
1562 "striped"),
1563 log_lv, 0, ah->log_area.len, MIRROR_LOG,
1564 0, NULL, 1, ah->log_area.len, 0, 0, 0))) {
1565 log_error("Couldn't allocate new mirror log segment.");
1566 return 0;
1569 if (!set_lv_segment_area_pv(seg, 0, ah->log_area.pv, ah->log_area.pe))
1570 return_0;
1572 dm_list_add(&log_lv->segments, &seg->list);
1573 log_lv->le_count += ah->log_area.len;
1574 log_lv->size += (uint64_t) log_lv->le_count * log_lv->vg->extent_size;
1576 if (log_lv->vg->fid->fmt->ops->lv_setup &&
1577 !log_lv->vg->fid->fmt->ops->lv_setup(log_lv->vg->fid, log_lv))
1578 return_0;
1580 return 1;
1583 static int _lv_extend_mirror(struct alloc_handle *ah,
1584 struct logical_volume *lv,
1585 uint32_t extents, uint32_t first_area)
1587 struct lv_segment *seg;
1588 uint32_t m, s;
1590 seg = first_seg(lv);
1591 for (m = first_area, s = 0; s < seg->area_count; s++) {
1592 if (is_temporary_mirror_layer(seg_lv(seg, s))) {
1593 if (!_lv_extend_mirror(ah, seg_lv(seg, s), extents, m))
1594 return_0;
1595 m += lv_mirror_count(seg_lv(seg, s));
1596 continue;
1599 if (!lv_add_segment(ah, m++, 1, seg_lv(seg, s),
1600 get_segtype_from_string(lv->vg->cmd,
1601 "striped"),
1602 0, 0, 0, NULL)) {
1603 log_error("Aborting. Failed to extend %s.",
1604 seg_lv(seg, s)->name);
1605 return 0;
1608 seg->area_len += extents;
1609 seg->len += extents;
1610 lv->le_count += extents;
1611 lv->size += (uint64_t) extents *lv->vg->extent_size;
1613 return 1;
1617 * Entry point for single-step LV allocation + extension.
1619 int lv_extend(struct logical_volume *lv,
1620 const struct segment_type *segtype,
1621 uint32_t stripes, uint32_t stripe_size,
1622 uint32_t mirrors, uint32_t extents,
1623 struct physical_volume *mirrored_pv __attribute((unused)),
1624 uint32_t mirrored_pe __attribute((unused)),
1625 uint32_t status, struct dm_list *allocatable_pvs,
1626 alloc_policy_t alloc)
1628 int r = 1;
1629 struct alloc_handle *ah;
1631 if (segtype_is_virtual(segtype))
1632 return lv_add_virtual_segment(lv, status, extents, segtype);
1634 if (!(ah = allocate_extents(lv->vg, lv, segtype, stripes, mirrors, 0, 0,
1635 extents, allocatable_pvs, alloc, NULL)))
1636 return_0;
1638 if (mirrors < 2)
1639 r = lv_add_segment(ah, 0, ah->area_count, lv, segtype,
1640 stripe_size, status, 0, NULL);
1641 else
1642 r = _lv_extend_mirror(ah, lv, extents, 0);
1644 alloc_destroy(ah);
1645 return r;
1649 * Minimal LV renaming function.
1650 * Metadata transaction should be made by caller.
1651 * Assumes new_name is allocated from cmd->mem pool.
1653 static int _rename_single_lv(struct logical_volume *lv, char *new_name)
1655 struct volume_group *vg = lv->vg;
1657 if (find_lv_in_vg(vg, new_name)) {
1658 log_error("Logical volume \"%s\" already exists in "
1659 "volume group \"%s\"", new_name, vg->name);
1660 return 0;
1663 if (lv->status & LOCKED) {
1664 log_error("Cannot rename locked LV %s", lv->name);
1665 return 0;
1668 lv->name = new_name;
1670 return 1;
1674 * Rename sub LV.
1675 * 'lv_name_old' and 'lv_name_new' are old and new names of the main LV.
1677 static int _rename_sub_lv(struct cmd_context *cmd,
1678 struct logical_volume *lv,
1679 const char *lv_name_old, const char *lv_name_new)
1681 char *suffix, *new_name;
1682 size_t len;
1685 * A sub LV name starts with lv_name_old + '_'.
1686 * The suffix follows lv_name_old and includes '_'.
1688 len = strlen(lv_name_old);
1689 if (strncmp(lv->name, lv_name_old, len) || lv->name[len] != '_') {
1690 log_error("Cannot rename \"%s\": name format not recognized "
1691 "for internal LV \"%s\"",
1692 lv_name_old, lv->name);
1693 return 0;
1695 suffix = lv->name + len;
1698 * Compose a new name for sub lv:
1699 * e.g. new name is "lvol1_mlog"
1700 * if the sub LV is "lvol0_mlog" and
1701 * a new name for main LV is "lvol1"
1703 len = strlen(lv_name_new) + strlen(suffix) + 1;
1704 new_name = dm_pool_alloc(cmd->mem, len);
1705 if (!new_name) {
1706 log_error("Failed to allocate space for new name");
1707 return 0;
1709 if (!dm_snprintf(new_name, len, "%s%s", lv_name_new, suffix)) {
1710 log_error("Failed to create new name");
1711 return 0;
1714 /* Rename it */
1715 return _rename_single_lv(lv, new_name);
1718 /* Callback for _for_each_sub_lv */
1719 static int _rename_cb(struct cmd_context *cmd, struct logical_volume *lv,
1720 void *data)
1722 struct lv_names *lv_names = (struct lv_names *) data;
1724 return _rename_sub_lv(cmd, lv, lv_names->old, lv_names->new);
1728 * Loop down sub LVs and call "func" for each.
1729 * "func" is responsible to log necessary information on failure.
1731 static int _for_each_sub_lv(struct cmd_context *cmd, struct logical_volume *lv,
1732 int (*func)(struct cmd_context *cmd,
1733 struct logical_volume *lv,
1734 void *data),
1735 void *data)
1737 struct logical_volume *org;
1738 struct lv_segment *seg;
1739 uint32_t s;
1741 if (lv_is_cow(lv) && lv_is_virtual_origin(org = origin_from_cow(lv)))
1742 if (!func(cmd, org, data))
1743 return_0;
1745 dm_list_iterate_items(seg, &lv->segments) {
1746 if (seg->log_lv && !func(cmd, seg->log_lv, data))
1747 return_0;
1748 for (s = 0; s < seg->area_count; s++) {
1749 if (seg_type(seg, s) != AREA_LV)
1750 continue;
1751 if (!func(cmd, seg_lv(seg, s), data))
1752 return_0;
1753 if (!_for_each_sub_lv(cmd, seg_lv(seg, s), func, data))
1754 return_0;
1758 return 1;
1763 * Core of LV renaming routine.
1764 * VG must be locked by caller.
1766 int lv_rename(struct cmd_context *cmd, struct logical_volume *lv,
1767 const char *new_name)
1769 struct volume_group *vg = lv->vg;
1770 struct lv_names lv_names;
1771 DM_LIST_INIT(lvs_changed);
1772 struct lv_list lvl, lvl2;
1773 int r = 0;
1775 /* rename is not allowed on sub LVs */
1776 if (!lv_is_visible(lv)) {
1777 log_error("Cannot rename internal LV \"%s\".", lv->name);
1778 return 0;
1781 if (find_lv_in_vg(vg, new_name)) {
1782 log_error("Logical volume \"%s\" already exists in "
1783 "volume group \"%s\"", new_name, vg->name);
1784 return 0;
1787 if (lv->status & LOCKED) {
1788 log_error("Cannot rename locked LV %s", lv->name);
1789 return 0;
1792 if (!archive(vg))
1793 return 0;
1795 /* rename sub LVs */
1796 lv_names.old = lv->name;
1797 lv_names.new = new_name;
1798 if (!_for_each_sub_lv(cmd, lv, _rename_cb, (void *) &lv_names))
1799 return 0;
1801 /* rename main LV */
1802 if (!(lv->name = dm_pool_strdup(cmd->mem, new_name))) {
1803 log_error("Failed to allocate space for new name");
1804 return 0;
1807 lvl.lv = lv;
1808 dm_list_add(&lvs_changed, &lvl.list);
1810 /* rename active virtual origin too */
1811 if (lv_is_cow(lv) && lv_is_virtual_origin(lvl2.lv = origin_from_cow(lv)))
1812 dm_list_add_h(&lvs_changed, &lvl2.list);
1814 log_verbose("Writing out updated volume group");
1815 if (!vg_write(vg))
1816 return 0;
1819 if (!suspend_lvs(cmd, &lvs_changed)) {
1820 vg_revert(vg);
1821 goto_out;
1824 if (!(r = vg_commit(vg)))
1825 stack;
1827 resume_lvs(cmd, &lvs_changed);
1828 out:
1829 backup(vg);
1830 return r;
1833 char *generate_lv_name(struct volume_group *vg, const char *format,
1834 char *buffer, size_t len)
1836 struct lv_list *lvl;
1837 int high = -1, i;
1839 dm_list_iterate_items(lvl, &vg->lvs) {
1840 if (sscanf(lvl->lv->name, format, &i) != 1)
1841 continue;
1843 if (i > high)
1844 high = i;
1847 if (dm_snprintf(buffer, len, format, high + 1) < 0)
1848 return NULL;
1850 return buffer;
1853 int vg_max_lv_reached(struct volume_group *vg)
1855 if (!vg->max_lv)
1856 return 0;
1858 if (vg->max_lv > vg_visible_lvs(vg))
1859 return 0;
1861 log_verbose("Maximum number of logical volumes (%u) reached "
1862 "in volume group %s", vg->max_lv, vg->name);
1864 return 1;
1867 struct logical_volume *alloc_lv(struct dm_pool *mem)
1869 struct logical_volume *lv;
1871 if (!(lv = dm_pool_zalloc(mem, sizeof(*lv)))) {
1872 log_error("Unable to allocate logical volume structure");
1873 return NULL;
1876 lv->snapshot = NULL;
1877 dm_list_init(&lv->snapshot_segs);
1878 dm_list_init(&lv->segments);
1879 dm_list_init(&lv->tags);
1880 dm_list_init(&lv->segs_using_this_lv);
1882 return lv;
1886 * Create a new empty LV.
1888 struct logical_volume *lv_create_empty(const char *name,
1889 union lvid *lvid,
1890 uint32_t status,
1891 alloc_policy_t alloc,
1892 struct volume_group *vg)
1894 struct format_instance *fi = vg->fid;
1895 struct logical_volume *lv;
1896 char dname[NAME_LEN];
1898 if (vg_max_lv_reached(vg))
1899 stack;
1901 if (strstr(name, "%d") &&
1902 !(name = generate_lv_name(vg, name, dname, sizeof(dname)))) {
1903 log_error("Failed to generate unique name for the new "
1904 "logical volume");
1905 return NULL;
1906 } else if (find_lv_in_vg(vg, name)) {
1907 log_error("Unable to create LV %s in Volume Group %s: "
1908 "name already in use.", name, vg->name);
1909 return NULL;
1912 log_verbose("Creating logical volume %s", name);
1914 if (!(lv = alloc_lv(vg->vgmem)))
1915 return_NULL;
1917 if (!(lv->name = dm_pool_strdup(vg->vgmem, name)))
1918 goto_bad;
1920 lv->status = status;
1921 lv->alloc = alloc;
1922 lv->read_ahead = vg->cmd->default_settings.read_ahead;
1923 lv->major = -1;
1924 lv->minor = -1;
1925 lv->size = UINT64_C(0);
1926 lv->le_count = 0;
1928 if (lvid)
1929 lv->lvid = *lvid;
1931 if (!link_lv_to_vg(vg, lv))
1932 goto_bad;
1934 if (fi->fmt->ops->lv_setup && !fi->fmt->ops->lv_setup(fi, lv))
1935 goto_bad;
1937 return lv;
1938 bad:
1939 dm_pool_free(vg->vgmem, lv);
1940 return NULL;
1943 static int _add_pvs(struct cmd_context *cmd, struct pv_segment *peg,
1944 uint32_t s __attribute((unused)), void *data)
1946 struct seg_pvs *spvs = (struct seg_pvs *) data;
1947 struct pv_list *pvl;
1949 /* Don't add again if it's already on list. */
1950 if (find_pv_in_pv_list(&spvs->pvs, peg->pv))
1951 return 1;
1953 if (!(pvl = dm_pool_alloc(cmd->mem, sizeof(*pvl)))) {
1954 log_error("pv_list allocation failed");
1955 return 0;
1958 pvl->pv = peg->pv;
1960 dm_list_add(&spvs->pvs, &pvl->list);
1962 return 1;
1966 * Construct dm_list of segments of LVs showing which PVs they use.
1968 struct dm_list *build_parallel_areas_from_lv(struct cmd_context *cmd,
1969 struct logical_volume *lv)
1971 struct dm_list *parallel_areas;
1972 struct seg_pvs *spvs;
1973 uint32_t current_le = 0;
1975 if (!(parallel_areas = dm_pool_alloc(cmd->mem, sizeof(*parallel_areas)))) {
1976 log_error("parallel_areas allocation failed");
1977 return NULL;
1980 dm_list_init(parallel_areas);
1982 do {
1983 if (!(spvs = dm_pool_zalloc(cmd->mem, sizeof(*spvs)))) {
1984 log_error("allocation failed");
1985 return NULL;
1988 dm_list_init(&spvs->pvs);
1990 spvs->le = current_le;
1991 spvs->len = lv->le_count - current_le;
1993 dm_list_add(parallel_areas, &spvs->list);
1995 /* Find next segment end */
1996 /* FIXME Unnecessary nesting! */
1997 if (!_for_each_pv(cmd, lv, current_le, spvs->len, &spvs->len,
1998 0, 0, -1, 0, _add_pvs, (void *) spvs))
1999 return_NULL;
2001 current_le = spvs->le + spvs->len;
2002 } while (current_le < lv->le_count);
2004 /* FIXME Merge adjacent segments with identical PV lists (avoids need for contiguous allocation attempts between successful allocations) */
2006 return parallel_areas;
2009 int link_lv_to_vg(struct volume_group *vg, struct logical_volume *lv)
2011 struct lv_list *lvl;
2013 if (vg_max_lv_reached(vg))
2014 stack;
2016 if (!(lvl = dm_pool_zalloc(vg->vgmem, sizeof(*lvl))))
2017 return_0;
2019 lvl->lv = lv;
2020 lv->vg = vg;
2021 dm_list_add(&vg->lvs, &lvl->list);
2023 return 1;
2026 int unlink_lv_from_vg(struct logical_volume *lv)
2028 struct lv_list *lvl;
2030 if (!(lvl = find_lv_in_vg(lv->vg, lv->name)))
2031 return_0;
2033 dm_list_del(&lvl->list);
2035 return 1;
2038 void lv_set_visible(struct logical_volume *lv)
2040 if (lv_is_visible(lv))
2041 return;
2043 lv->status |= VISIBLE_LV;
2045 log_debug("LV %s in VG %s is now visible.", lv->name, lv->vg->name);
2048 void lv_set_hidden(struct logical_volume *lv)
2050 if (!lv_is_visible(lv))
2051 return;
2053 lv->status &= ~VISIBLE_LV;
2055 log_debug("LV %s in VG %s is now hidden.", lv->name, lv->vg->name);
2058 int lv_remove_single(struct cmd_context *cmd, struct logical_volume *lv,
2059 const force_t force)
2061 struct volume_group *vg;
2062 struct lvinfo info;
2063 struct logical_volume *origin = NULL;
2065 vg = lv->vg;
2067 if (!vg_check_status(vg, LVM_WRITE))
2068 return 0;
2070 if (lv_is_origin(lv)) {
2071 log_error("Can't remove logical volume \"%s\" under snapshot",
2072 lv->name);
2073 return 0;
2076 if (lv->status & MIRROR_IMAGE) {
2077 log_error("Can't remove logical volume %s used by a mirror",
2078 lv->name);
2079 return 0;
2082 if (lv->status & MIRROR_LOG) {
2083 log_error("Can't remove logical volume %s used as mirror log",
2084 lv->name);
2085 return 0;
2088 if (lv->status & LOCKED) {
2089 log_error("Can't remove locked LV %s", lv->name);
2090 return 0;
2093 /* FIXME Ensure not referred to by another existing LVs */
2095 if (lv_info(cmd, lv, &info, 1, 0)) {
2096 if (info.open_count) {
2097 log_error("Can't remove open logical volume \"%s\"",
2098 lv->name);
2099 return 0;
2102 if (lv_is_active(lv) && (force == PROMPT) &&
2103 lv_is_visible(lv) &&
2104 yes_no_prompt("Do you really want to remove active "
2105 "%slogical volume %s? [y/n]: ",
2106 vg_is_clustered(vg) ? "clustered " : "",
2107 lv->name) == 'n') {
2108 log_print("Logical volume %s not removed", lv->name);
2109 return 0;
2113 if (!archive(vg))
2114 return 0;
2116 /* FIXME Snapshot commit out of sequence if it fails after here? */
2117 if (!deactivate_lv(cmd, lv)) {
2118 log_error("Unable to deactivate logical volume \"%s\"",
2119 lv->name);
2120 return 0;
2123 if (lv_is_cow(lv)) {
2124 origin = origin_from_cow(lv);
2125 log_verbose("Removing snapshot %s", lv->name);
2126 if (!vg_remove_snapshot(lv))
2127 return_0;
2130 log_verbose("Releasing logical volume \"%s\"", lv->name);
2131 if (!lv_remove(lv)) {
2132 log_error("Error releasing logical volume \"%s\"", lv->name);
2133 return 0;
2136 /* store it on disks */
2137 if (!vg_write(vg) || !vg_commit(vg))
2138 return_0;
2140 backup(vg);
2142 /* If no snapshots left, reload without -real. */
2143 if (origin && !lv_is_origin(origin)) {
2144 if (!suspend_lv(cmd, origin))
2145 log_error("Failed to refresh %s without snapshot.", origin->name);
2146 else if (!resume_lv(cmd, origin))
2147 log_error("Failed to resume %s.", origin->name);
2150 if (lv_is_visible(lv))
2151 log_print("Logical volume \"%s\" successfully removed", lv->name);
2153 return 1;
2157 * remove LVs with its dependencies - LV leaf nodes should be removed first
2159 int lv_remove_with_dependencies(struct cmd_context *cmd, struct logical_volume *lv,
2160 const force_t force)
2162 struct dm_list *snh, *snht;
2164 if (lv_is_origin(lv)) {
2165 /* remove snapshot LVs first */
2166 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs) {
2167 if (!lv_remove_with_dependencies(cmd, dm_list_struct_base(snh, struct lv_segment,
2168 origin_list)->cow,
2169 force))
2170 return 0;
2174 return lv_remove_single(cmd, lv, force);
2178 * insert_layer_for_segments_on_pv() inserts a layer segment for a segment area.
2179 * However, layer modification could split the underlying layer segment.
2180 * This function splits the parent area according to keep the 1:1 relationship
2181 * between the parent area and the underlying layer segment.
2182 * Since the layer LV might have other layers below, build_parallel_areas()
2183 * is used to find the lowest-level segment boundaries.
2185 static int _split_parent_area(struct lv_segment *seg, uint32_t s,
2186 struct dm_list *layer_seg_pvs)
2188 uint32_t parent_area_len, parent_le, layer_le;
2189 uint32_t area_multiple;
2190 struct seg_pvs *spvs;
2192 if (seg_is_striped(seg))
2193 area_multiple = seg->area_count;
2194 else
2195 area_multiple = 1;
2197 parent_area_len = seg->area_len;
2198 parent_le = seg->le;
2199 layer_le = seg_le(seg, s);
2201 while (parent_area_len > 0) {
2202 /* Find the layer segment pointed at */
2203 if (!(spvs = _find_seg_pvs_by_le(layer_seg_pvs, layer_le))) {
2204 log_error("layer segment for %s:%" PRIu32 " not found",
2205 seg->lv->name, parent_le);
2206 return 0;
2209 if (spvs->le != layer_le) {
2210 log_error("Incompatible layer boundary: "
2211 "%s:%" PRIu32 "[%" PRIu32 "] on %s:%" PRIu32,
2212 seg->lv->name, parent_le, s,
2213 seg_lv(seg, s)->name, layer_le);
2214 return 0;
2217 if (spvs->len < parent_area_len) {
2218 parent_le += spvs->len * area_multiple;
2219 if (!lv_split_segment(seg->lv, parent_le))
2220 return_0;
2223 parent_area_len -= spvs->len;
2224 layer_le += spvs->len;
2227 return 1;
2231 * Split the parent LV segments if the layer LV below it is splitted.
2233 int split_parent_segments_for_layer(struct cmd_context *cmd,
2234 struct logical_volume *layer_lv)
2236 struct lv_list *lvl;
2237 struct logical_volume *parent_lv;
2238 struct lv_segment *seg;
2239 uint32_t s;
2240 struct dm_list *parallel_areas;
2242 if (!(parallel_areas = build_parallel_areas_from_lv(cmd, layer_lv)))
2243 return_0;
2245 /* Loop through all LVs except itself */
2246 dm_list_iterate_items(lvl, &layer_lv->vg->lvs) {
2247 parent_lv = lvl->lv;
2248 if (parent_lv == layer_lv)
2249 continue;
2251 /* Find all segments that point at the layer LV */
2252 dm_list_iterate_items(seg, &parent_lv->segments) {
2253 for (s = 0; s < seg->area_count; s++) {
2254 if (seg_type(seg, s) != AREA_LV ||
2255 seg_lv(seg, s) != layer_lv)
2256 continue;
2258 if (!_split_parent_area(seg, s, parallel_areas))
2259 return_0;
2264 return 1;
2267 /* Remove a layer from the LV */
2268 int remove_layers_for_segments(struct cmd_context *cmd,
2269 struct logical_volume *lv,
2270 struct logical_volume *layer_lv,
2271 uint32_t status_mask, struct dm_list *lvs_changed)
2273 struct lv_segment *seg, *lseg;
2274 uint32_t s;
2275 int lv_changed = 0;
2276 struct lv_list *lvl;
2278 log_very_verbose("Removing layer %s for segments of %s",
2279 layer_lv->name, lv->name);
2281 /* Find all segments that point at the temporary mirror */
2282 dm_list_iterate_items(seg, &lv->segments) {
2283 for (s = 0; s < seg->area_count; s++) {
2284 if (seg_type(seg, s) != AREA_LV ||
2285 seg_lv(seg, s) != layer_lv)
2286 continue;
2288 /* Find the layer segment pointed at */
2289 if (!(lseg = find_seg_by_le(layer_lv, seg_le(seg, s)))) {
2290 log_error("Layer segment found: %s:%" PRIu32,
2291 layer_lv->name, seg_le(seg, s));
2292 return 0;
2295 /* Check the segment params are compatible */
2296 if (!seg_is_striped(lseg) || lseg->area_count != 1) {
2297 log_error("Layer is not linear: %s:%" PRIu32,
2298 layer_lv->name, lseg->le);
2299 return 0;
2301 if ((lseg->status & status_mask) != status_mask) {
2302 log_error("Layer status does not match: "
2303 "%s:%" PRIu32 " status: 0x%x/0x%x",
2304 layer_lv->name, lseg->le,
2305 lseg->status, status_mask);
2306 return 0;
2308 if (lseg->le != seg_le(seg, s) ||
2309 lseg->area_len != seg->area_len) {
2310 log_error("Layer boundary mismatch: "
2311 "%s:%" PRIu32 "-%" PRIu32 " on "
2312 "%s:%" PRIu32 " / "
2313 "%" PRIu32 "-%" PRIu32 " / ",
2314 lv->name, seg->le, seg->area_len,
2315 layer_lv->name, seg_le(seg, s),
2316 lseg->le, lseg->area_len);
2317 return 0;
2320 if (!move_lv_segment_area(seg, s, lseg, 0))
2321 return_0;
2323 /* Replace mirror with error segment */
2324 if (!(lseg->segtype =
2325 get_segtype_from_string(lv->vg->cmd, "error"))) {
2326 log_error("Missing error segtype");
2327 return 0;
2329 lseg->area_count = 0;
2331 /* First time, add LV to list of LVs affected */
2332 if (!lv_changed && lvs_changed) {
2333 if (!(lvl = dm_pool_alloc(cmd->mem, sizeof(*lvl)))) {
2334 log_error("lv_list alloc failed");
2335 return 0;
2337 lvl->lv = lv;
2338 dm_list_add(lvs_changed, &lvl->list);
2339 lv_changed = 1;
2343 if (lv_changed && !lv_merge_segments(lv))
2344 stack;
2346 return 1;
2349 /* Remove a layer */
2350 int remove_layers_for_segments_all(struct cmd_context *cmd,
2351 struct logical_volume *layer_lv,
2352 uint32_t status_mask,
2353 struct dm_list *lvs_changed)
2355 struct lv_list *lvl;
2356 struct logical_volume *lv1;
2358 /* Loop through all LVs except the temporary mirror */
2359 dm_list_iterate_items(lvl, &layer_lv->vg->lvs) {
2360 lv1 = lvl->lv;
2361 if (lv1 == layer_lv)
2362 continue;
2364 if (!remove_layers_for_segments(cmd, lv1, layer_lv,
2365 status_mask, lvs_changed))
2366 return_0;
2369 if (!lv_empty(layer_lv))
2370 return_0;
2372 return 1;
2375 static int _move_lv_segments(struct logical_volume *lv_to,
2376 struct logical_volume *lv_from,
2377 uint32_t set_status, uint32_t reset_status)
2379 struct lv_segment *seg;
2381 dm_list_iterate_items(seg, &lv_to->segments) {
2382 if (seg->origin) {
2383 log_error("Can't move snapshot segment");
2384 return 0;
2388 lv_to->segments = lv_from->segments;
2389 lv_to->segments.n->p = &lv_to->segments;
2390 lv_to->segments.p->n = &lv_to->segments;
2392 dm_list_iterate_items(seg, &lv_to->segments) {
2393 seg->lv = lv_to;
2394 seg->status &= ~reset_status;
2395 seg->status |= set_status;
2398 dm_list_init(&lv_from->segments);
2400 lv_to->le_count = lv_from->le_count;
2401 lv_to->size = lv_from->size;
2403 lv_from->le_count = 0;
2404 lv_from->size = 0;
2406 return 1;
2409 /* Remove a layer from the LV */
2410 int remove_layer_from_lv(struct logical_volume *lv,
2411 struct logical_volume *layer_lv)
2413 struct logical_volume *parent;
2414 struct lv_segment *parent_seg;
2415 struct segment_type *segtype;
2417 log_very_verbose("Removing layer %s for %s", layer_lv->name, lv->name);
2419 if (!(parent_seg = get_only_segment_using_this_lv(layer_lv))) {
2420 log_error("Failed to find layer %s in %s",
2421 layer_lv->name, lv->name);
2422 return 0;
2424 parent = parent_seg->lv;
2427 * Before removal, the layer should be cleaned up,
2428 * i.e. additional segments and areas should have been removed.
2430 if (dm_list_size(&parent->segments) != 1 ||
2431 parent_seg->area_count != 1 ||
2432 seg_type(parent_seg, 0) != AREA_LV ||
2433 layer_lv != seg_lv(parent_seg, 0) ||
2434 parent->le_count != layer_lv->le_count)
2435 return_0;
2437 if (!lv_empty(parent))
2438 return_0;
2440 if (!_move_lv_segments(parent, layer_lv, 0, 0))
2441 return_0;
2443 /* Replace the empty layer with error segment */
2444 segtype = get_segtype_from_string(lv->vg->cmd, "error");
2445 if (!lv_add_virtual_segment(layer_lv, 0, parent->le_count, segtype))
2446 return_0;
2448 return 1;
2452 * Create and insert a linear LV "above" lv_where.
2453 * After the insertion, a new LV named lv_where->name + suffix is created
2454 * and all segments of lv_where is moved to the new LV.
2455 * lv_where will have a single segment which maps linearly to the new LV.
2457 struct logical_volume *insert_layer_for_lv(struct cmd_context *cmd,
2458 struct logical_volume *lv_where,
2459 uint32_t status,
2460 const char *layer_suffix)
2462 struct logical_volume *layer_lv;
2463 char *name;
2464 size_t len;
2465 struct segment_type *segtype;
2466 struct lv_segment *mapseg;
2468 /* create an empty layer LV */
2469 len = strlen(lv_where->name) + 32;
2470 if (!(name = alloca(len))) {
2471 log_error("layer name allocation failed. "
2472 "Remove new LV and retry.");
2473 return NULL;
2476 if (dm_snprintf(name, len, "%s%s", lv_where->name, layer_suffix) < 0) {
2477 log_error("layer name allocation failed. "
2478 "Remove new LV and retry.");
2479 return NULL;
2482 if (!(layer_lv = lv_create_empty(name, NULL, LVM_READ | LVM_WRITE,
2483 ALLOC_INHERIT, lv_where->vg))) {
2484 log_error("Creation of layer LV failed");
2485 return NULL;
2488 if (lv_is_active(lv_where) && strstr(name, "_mimagetmp")) {
2489 log_very_verbose("Creating transient LV %s for mirror conversion in VG %s.", name, lv_where->vg->name);
2491 segtype = get_segtype_from_string(cmd, "error");
2493 if (!lv_add_virtual_segment(layer_lv, 0, lv_where->le_count, segtype)) {
2494 log_error("Creation of transient LV %s for mirror conversion in VG %s failed.", name, lv_where->vg->name);
2495 return NULL;
2498 if (!vg_write(lv_where->vg)) {
2499 log_error("Failed to write intermediate VG %s metadata for mirror conversion.", lv_where->vg->name);
2500 return NULL;
2503 if (!vg_commit(lv_where->vg)) {
2504 log_error("Failed to commit intermediate VG %s metadata for mirror conversion.", lv_where->vg->name);
2505 vg_revert(lv_where->vg);
2506 return NULL;
2509 if (!activate_lv(cmd, layer_lv)) {
2510 log_error("Failed to resume transient error LV %s for mirror conversion in VG %s.", name, lv_where->vg->name);
2511 return NULL;
2515 log_very_verbose("Inserting layer %s for %s",
2516 layer_lv->name, lv_where->name);
2518 if (!_move_lv_segments(layer_lv, lv_where, 0, 0))
2519 return_NULL;
2521 if (!(segtype = get_segtype_from_string(cmd, "striped")))
2522 return_NULL;
2524 /* allocate a new linear segment */
2525 if (!(mapseg = alloc_lv_segment(cmd->mem, segtype,
2526 lv_where, 0, layer_lv->le_count,
2527 status, 0, NULL, 1, layer_lv->le_count,
2528 0, 0, 0)))
2529 return_NULL;
2531 /* map the new segment to the original underlying are */
2532 if (!set_lv_segment_area_lv(mapseg, 0, layer_lv, 0, 0))
2533 return_NULL;
2535 /* add the new segment to the layer LV */
2536 dm_list_add(&lv_where->segments, &mapseg->list);
2537 lv_where->le_count = layer_lv->le_count;
2538 lv_where->size = lv_where->le_count * lv_where->vg->extent_size;
2540 return layer_lv;
2544 * Extend and insert a linear layer LV beneath the source segment area.
2546 static int _extend_layer_lv_for_segment(struct logical_volume *layer_lv,
2547 struct lv_segment *seg, uint32_t s,
2548 uint32_t status)
2550 struct lv_segment *mapseg;
2551 struct segment_type *segtype;
2552 struct physical_volume *src_pv = seg_pv(seg, s);
2553 uint32_t src_pe = seg_pe(seg, s);
2555 if (seg_type(seg, s) != AREA_PV && seg_type(seg, s) != AREA_LV)
2556 return_0;
2558 if (!(segtype = get_segtype_from_string(layer_lv->vg->cmd, "striped")))
2559 return_0;
2561 /* FIXME Incomplete message? Needs more context */
2562 log_very_verbose("Inserting %s:%" PRIu32 "-%" PRIu32 " of %s/%s",
2563 pv_dev_name(src_pv),
2564 src_pe, src_pe + seg->area_len - 1,
2565 seg->lv->vg->name, seg->lv->name);
2567 /* allocate a new segment */
2568 if (!(mapseg = alloc_lv_segment(layer_lv->vg->cmd->mem, segtype,
2569 layer_lv, layer_lv->le_count,
2570 seg->area_len, status, 0,
2571 NULL, 1, seg->area_len, 0, 0, 0)))
2572 return_0;
2574 /* map the new segment to the original underlying are */
2575 if (!move_lv_segment_area(mapseg, 0, seg, s))
2576 return_0;
2578 /* add the new segment to the layer LV */
2579 dm_list_add(&layer_lv->segments, &mapseg->list);
2580 layer_lv->le_count += seg->area_len;
2581 layer_lv->size += seg->area_len * layer_lv->vg->extent_size;
2583 /* map the original area to the new segment */
2584 if (!set_lv_segment_area_lv(seg, s, layer_lv, mapseg->le, 0))
2585 return_0;
2587 return 1;
2591 * Match the segment area to PEs in the pvl
2592 * (the segment area boundary should be aligned to PE ranges by
2593 * _adjust_layer_segments() so that there is no partial overlap.)
2595 static int _match_seg_area_to_pe_range(struct lv_segment *seg, uint32_t s,
2596 struct pv_list *pvl)
2598 struct pe_range *per;
2599 uint32_t pe_start, per_end;
2601 if (!pvl)
2602 return 1;
2604 if (seg_type(seg, s) != AREA_PV || seg_dev(seg, s) != pvl->pv->dev)
2605 return 0;
2607 pe_start = seg_pe(seg, s);
2609 /* Do these PEs match to any of the PEs in pvl? */
2610 dm_list_iterate_items(per, pvl->pe_ranges) {
2611 per_end = per->start + per->count - 1;
2613 if ((pe_start < per->start) || (pe_start > per_end))
2614 continue;
2616 /* FIXME Missing context in this message - add LV/seg details */
2617 log_debug("Matched PE range %s:%" PRIu32 "-%" PRIu32 " against "
2618 "%s %" PRIu32 " len %" PRIu32, dev_name(pvl->pv->dev),
2619 per->start, per_end, dev_name(seg_dev(seg, s)),
2620 seg_pe(seg, s), seg->area_len);
2622 return 1;
2625 return 0;
2629 * For each segment in lv_where that uses a PV in pvl directly,
2630 * split the segment if it spans more than one underlying PV.
2632 static int _align_segment_boundary_to_pe_range(struct logical_volume *lv_where,
2633 struct pv_list *pvl)
2635 struct lv_segment *seg;
2636 struct pe_range *per;
2637 uint32_t pe_start, pe_end, per_end, stripe_multiplier, s;
2639 if (!pvl)
2640 return 1;
2642 /* Split LV segments to match PE ranges */
2643 dm_list_iterate_items(seg, &lv_where->segments) {
2644 for (s = 0; s < seg->area_count; s++) {
2645 if (seg_type(seg, s) != AREA_PV ||
2646 seg_dev(seg, s) != pvl->pv->dev)
2647 continue;
2649 /* Do these PEs match with the condition? */
2650 dm_list_iterate_items(per, pvl->pe_ranges) {
2651 pe_start = seg_pe(seg, s);
2652 pe_end = pe_start + seg->area_len - 1;
2653 per_end = per->start + per->count - 1;
2655 /* No overlap? */
2656 if ((pe_end < per->start) ||
2657 (pe_start > per_end))
2658 continue;
2660 if (seg_is_striped(seg))
2661 stripe_multiplier = seg->area_count;
2662 else
2663 stripe_multiplier = 1;
2665 if ((per->start != pe_start &&
2666 per->start > pe_start) &&
2667 !lv_split_segment(lv_where, seg->le +
2668 (per->start - pe_start) *
2669 stripe_multiplier))
2670 return_0;
2672 if ((per_end != pe_end &&
2673 per_end < pe_end) &&
2674 !lv_split_segment(lv_where, seg->le +
2675 (per_end - pe_start + 1) *
2676 stripe_multiplier))
2677 return_0;
2682 return 1;
2686 * Scan lv_where for segments on a PV in pvl, and for each one found
2687 * append a linear segment to lv_layer and insert it between the two.
2689 * If pvl is empty, a layer is placed under the whole of lv_where.
2690 * If the layer is inserted, lv_where is added to lvs_changed.
2692 int insert_layer_for_segments_on_pv(struct cmd_context *cmd,
2693 struct logical_volume *lv_where,
2694 struct logical_volume *layer_lv,
2695 uint32_t status,
2696 struct pv_list *pvl,
2697 struct dm_list *lvs_changed)
2699 struct lv_segment *seg;
2700 struct lv_list *lvl;
2701 int lv_used = 0;
2702 uint32_t s;
2704 log_very_verbose("Inserting layer %s for segments of %s on %s",
2705 layer_lv->name, lv_where->name,
2706 pvl ? pv_dev_name(pvl->pv) : "any");
2708 if (!_align_segment_boundary_to_pe_range(lv_where, pvl))
2709 return_0;
2711 /* Work through all segments on the supplied PV */
2712 dm_list_iterate_items(seg, &lv_where->segments) {
2713 for (s = 0; s < seg->area_count; s++) {
2714 if (!_match_seg_area_to_pe_range(seg, s, pvl))
2715 continue;
2717 /* First time, add LV to list of LVs affected */
2718 if (!lv_used && lvs_changed) {
2719 if (!(lvl = dm_pool_alloc(cmd->mem, sizeof(*lvl)))) {
2720 log_error("lv_list alloc failed");
2721 return 0;
2723 lvl->lv = lv_where;
2724 dm_list_add(lvs_changed, &lvl->list);
2725 lv_used = 1;
2728 if (!_extend_layer_lv_for_segment(layer_lv, seg, s,
2729 status)) {
2730 log_error("Failed to insert segment in layer "
2731 "LV %s under %s:%" PRIu32 "-%" PRIu32,
2732 layer_lv->name, lv_where->name,
2733 seg->le, seg->le + seg->len);
2734 return 0;
2739 return 1;
2743 * Initialize the LV with 'value'.
2745 int set_lv(struct cmd_context *cmd, struct logical_volume *lv,
2746 uint64_t sectors, int value)
2748 struct device *dev;
2749 char *name;
2752 * FIXME:
2753 * <clausen> also, more than 4k
2754 * <clausen> say, reiserfs puts it's superblock 32k in, IIRC
2755 * <ejt_> k, I'll drop a fixme to that effect
2756 * (I know the device is at least 4k, but not 32k)
2758 if (!(name = dm_pool_alloc(cmd->mem, PATH_MAX))) {
2759 log_error("Name allocation failed - device not cleared");
2760 return 0;
2762 #ifdef __NetBSD__
2763 if (dm_snprintf(name, PATH_MAX, "%s%s/r%s", cmd->dev_dir,
2764 lv->vg->name, lv->name) < 0) {
2765 log_error("Name too long - device not cleared (%s)", lv->name);
2766 return 0;
2768 #else
2769 if (dm_snprintf(name, PATH_MAX, "%s%s/%s", cmd->dev_dir,
2770 lv->vg->name, lv->name) < 0) {
2771 log_error("Name too long - device not cleared (%s)", lv->name);
2772 return 0;
2774 #endif
2775 log_verbose("Clearing start of logical volume \"%s\"", lv->name);
2777 if (!(dev = dev_cache_get(name, NULL))) {
2778 log_error("%s: not found: device not cleared", name);
2779 return 0;
2782 if (!dev_open_quiet(dev))
2783 return_0;
2785 if (!sectors)
2786 sectors = UINT64_C(4096) >> SECTOR_SHIFT;
2788 if (sectors > lv->size)
2789 sectors = lv->size;
2791 dev_set(dev, UINT64_C(0), (size_t) sectors << SECTOR_SHIFT, value);
2792 dev_flush(dev);
2793 dev_close_immediate(dev);
2795 return 1;
2799 static struct logical_volume *_create_virtual_origin(struct cmd_context *cmd,
2800 struct volume_group *vg,
2801 const char *lv_name,
2802 uint32_t permission,
2803 uint64_t voriginextents)
2805 const struct segment_type *segtype;
2806 size_t len;
2807 char *vorigin_name;
2808 struct logical_volume *lv;
2810 if (!(segtype = get_segtype_from_string(cmd, "zero"))) {
2811 log_error("Zero segment type for virtual origin not found");
2812 return NULL;
2815 len = strlen(lv_name) + 32;
2816 if (!(vorigin_name = alloca(len)) ||
2817 dm_snprintf(vorigin_name, len, "%s_vorigin", lv_name) < 0) {
2818 log_error("Virtual origin name allocation failed.");
2819 return NULL;
2822 if (!(lv = lv_create_empty(vorigin_name, NULL, permission,
2823 ALLOC_INHERIT, vg)))
2824 return_NULL;
2826 if (!lv_extend(lv, segtype, 1, 0, 1, voriginextents, NULL, 0u, 0u,
2827 NULL, ALLOC_INHERIT))
2828 return_NULL;
2830 /* store vg on disk(s) */
2831 if (!vg_write(vg) || !vg_commit(vg))
2832 return_NULL;
2834 backup(vg);
2836 return lv;
2839 int lv_create_single(struct volume_group *vg,
2840 struct lvcreate_params *lp)
2842 struct cmd_context *cmd = vg->cmd;
2843 uint32_t size_rest;
2844 uint32_t status = 0;
2845 struct logical_volume *lv, *org = NULL;
2846 int origin_active = 0;
2847 char lv_name_buf[128];
2848 const char *lv_name;
2849 struct lvinfo info;
2851 if (lp->lv_name && find_lv_in_vg(vg, lp->lv_name)) {
2852 log_error("Logical volume \"%s\" already exists in "
2853 "volume group \"%s\"", lp->lv_name, lp->vg_name);
2854 return 0;
2857 if (vg_max_lv_reached(vg)) {
2858 log_error("Maximum number of logical volumes (%u) reached "
2859 "in volume group %s", vg->max_lv, vg->name);
2860 return 0;
2863 if (lp->mirrors > 1 && !(vg->fid->fmt->features & FMT_SEGMENTS)) {
2864 log_error("Metadata does not support mirroring.");
2865 return 0;
2868 if (lp->read_ahead != DM_READ_AHEAD_AUTO &&
2869 lp->read_ahead != DM_READ_AHEAD_NONE &&
2870 (vg->fid->fmt->features & FMT_RESTRICTED_READAHEAD) &&
2871 (lp->read_ahead < 2 || lp->read_ahead > 120)) {
2872 log_error("Metadata only supports readahead values between 2 and 120.");
2873 return 0;
2876 if (lp->stripe_size > vg->extent_size) {
2877 log_error("Reducing requested stripe size %s to maximum, "
2878 "physical extent size %s",
2879 display_size(cmd, (uint64_t) lp->stripe_size),
2880 display_size(cmd, (uint64_t) vg->extent_size));
2881 lp->stripe_size = vg->extent_size;
2884 /* Need to check the vg's format to verify this - the cmd format isn't setup properly yet */
2885 if (lp->stripes > 1 &&
2886 !(vg->fid->fmt->features & FMT_UNLIMITED_STRIPESIZE) &&
2887 (lp->stripe_size > STRIPE_SIZE_MAX)) {
2888 log_error("Stripe size may not exceed %s",
2889 display_size(cmd, (uint64_t) STRIPE_SIZE_MAX));
2890 return 0;
2893 if ((size_rest = lp->extents % lp->stripes)) {
2894 log_print("Rounding size (%d extents) up to stripe boundary "
2895 "size (%d extents)", lp->extents,
2896 lp->extents - size_rest + lp->stripes);
2897 lp->extents = lp->extents - size_rest + lp->stripes;
2900 if (lp->zero && !activation()) {
2901 log_error("Can't wipe start of new LV without using "
2902 "device-mapper kernel driver");
2903 return 0;
2906 status |= lp->permission | VISIBLE_LV;
2908 if (lp->snapshot) {
2909 if (!activation()) {
2910 log_error("Can't create snapshot without using "
2911 "device-mapper kernel driver");
2912 return 0;
2914 /* FIXME Allow exclusive activation. */
2915 if (vg_is_clustered(vg)) {
2916 log_error("Clustered snapshots are not yet supported.");
2917 return 0;
2920 /* Must zero cow */
2921 status |= LVM_WRITE;
2923 if (lp->voriginsize)
2924 origin_active = 1;
2925 else {
2927 if (!(org = find_lv(vg, lp->origin))) {
2928 log_error("Couldn't find origin volume '%s'.",
2929 lp->origin);
2930 return 0;
2932 if (lv_is_virtual_origin(org)) {
2933 log_error("Can't share virtual origins. "
2934 "Use --virtualsize.");
2935 return 0;
2937 if (lv_is_cow(org)) {
2938 log_error("Snapshots of snapshots are not "
2939 "supported yet.");
2940 return 0;
2942 if (org->status & LOCKED) {
2943 log_error("Snapshots of locked devices are not "
2944 "supported yet");
2945 return 0;
2947 if ((org->status & MIRROR_IMAGE) ||
2948 (org->status & MIRROR_LOG)) {
2949 log_error("Snapshots of mirror %ss "
2950 "are not supported",
2951 (org->status & MIRROR_LOG) ?
2952 "log" : "image");
2953 return 0;
2956 if (!lv_info(cmd, org, &info, 0, 0)) {
2957 log_error("Check for existence of snapshot "
2958 "origin '%s' failed.", org->name);
2959 return 0;
2961 origin_active = info.exists;
2965 if (!lp->extents) {
2966 log_error("Unable to create new logical volume with no extents");
2967 return 0;
2970 if (!seg_is_virtual(lp) &&
2971 vg->free_count < lp->extents) {
2972 log_error("Insufficient free extents (%u) in volume group %s: "
2973 "%u required", vg->free_count, vg->name, lp->extents);
2974 return 0;
2977 if (lp->stripes > dm_list_size(lp->pvh) && lp->alloc != ALLOC_ANYWHERE) {
2978 log_error("Number of stripes (%u) must not exceed "
2979 "number of physical volumes (%d)", lp->stripes,
2980 dm_list_size(lp->pvh));
2981 return 0;
2984 if (lp->mirrors > 1 && !activation()) {
2985 log_error("Can't create mirror without using "
2986 "device-mapper kernel driver.");
2987 return 0;
2990 /* The snapshot segment gets created later */
2991 if (lp->snapshot &&
2992 !(lp->segtype = get_segtype_from_string(cmd, "striped")))
2993 return_0;
2995 if (!archive(vg))
2996 return 0;
2998 if (lp->lv_name)
2999 lv_name = lp->lv_name;
3000 else {
3001 if (!generate_lv_name(vg, "lvol%d", lv_name_buf, sizeof(lv_name_buf))) {
3002 log_error("Failed to generate LV name.");
3003 return 0;
3005 lv_name = &lv_name_buf[0];
3008 if (lp->tag) {
3009 if (!(vg->fid->fmt->features & FMT_TAGS)) {
3010 log_error("Volume group %s does not support tags",
3011 vg->name);
3012 return 0;
3016 if (lp->mirrors > 1) {
3017 init_mirror_in_sync(lp->nosync);
3019 if (lp->nosync) {
3020 log_warn("WARNING: New mirror won't be synchronised. "
3021 "Don't read what you didn't write!");
3022 status |= MIRROR_NOTSYNCED;
3026 if (!(lv = lv_create_empty(lv_name ? lv_name : "lvol%d", NULL,
3027 status, lp->alloc, vg)))
3028 return_0;
3030 if (lp->read_ahead) {
3031 log_verbose("Setting read ahead sectors");
3032 lv->read_ahead = lp->read_ahead;
3035 if (lp->minor >= 0) {
3036 lv->major = lp->major;
3037 lv->minor = lp->minor;
3038 lv->status |= FIXED_MINOR;
3039 log_verbose("Setting device number to (%d, %d)", lv->major,
3040 lv->minor);
3043 if (lp->tag && !str_list_add(cmd->mem, &lv->tags, lp->tag)) {
3044 log_error("Failed to add tag %s to %s/%s",
3045 lp->tag, lv->vg->name, lv->name);
3046 return 0;
3049 if (!lv_extend(lv, lp->segtype, lp->stripes, lp->stripe_size,
3050 1, lp->extents, NULL, 0u, 0u, lp->pvh, lp->alloc))
3051 return_0;
3053 if (lp->mirrors > 1) {
3054 if (!lv_add_mirrors(cmd, lv, lp->mirrors - 1, lp->stripes,
3055 adjusted_mirror_region_size(
3056 vg->extent_size,
3057 lv->le_count,
3058 lp->region_size),
3059 lp->corelog ? 0U : 1U, lp->pvh, lp->alloc,
3060 MIRROR_BY_LV |
3061 (lp->nosync ? MIRROR_SKIP_INIT_SYNC : 0))) {
3062 stack;
3063 goto revert_new_lv;
3067 /* store vg on disk(s) */
3068 if (!vg_write(vg) || !vg_commit(vg))
3069 return_0;
3071 backup(vg);
3073 if (lp->snapshot) {
3074 if (!activate_lv_excl(cmd, lv)) {
3075 log_error("Aborting. Failed to activate snapshot "
3076 "exception store.");
3077 goto revert_new_lv;
3079 } else if (!activate_lv(cmd, lv)) {
3080 if (lp->zero) {
3081 log_error("Aborting. Failed to activate new LV to wipe "
3082 "the start of it.");
3083 goto deactivate_and_revert_new_lv;
3085 log_error("Failed to activate new LV.");
3086 return 0;
3089 if (!lp->zero && !lp->snapshot)
3090 log_error("WARNING: \"%s\" not zeroed", lv->name);
3091 else if (!set_lv(cmd, lv, UINT64_C(0), 0)) {
3092 log_error("Aborting. Failed to wipe %s.",
3093 lp->snapshot ? "snapshot exception store" :
3094 "start of new LV");
3095 goto deactivate_and_revert_new_lv;
3098 if (lp->snapshot) {
3099 /* Reset permission after zeroing */
3100 if (!(lp->permission & LVM_WRITE))
3101 lv->status &= ~LVM_WRITE;
3103 /* COW area must be deactivated if origin is not active */
3104 if (!origin_active && !deactivate_lv(cmd, lv)) {
3105 log_error("Aborting. Couldn't deactivate snapshot "
3106 "COW area. Manual intervention required.");
3107 return 0;
3110 /* A virtual origin must be activated explicitly. */
3111 if (lp->voriginsize &&
3112 (!(org = _create_virtual_origin(cmd, vg, lv->name,
3113 lp->permission,
3114 lp->voriginextents)) ||
3115 !activate_lv(cmd, org))) {
3116 log_error("Couldn't create virtual origin for LV %s",
3117 lv->name);
3118 if (org && !lv_remove(org))
3119 stack;
3120 goto deactivate_and_revert_new_lv;
3123 /* cow LV remains active and becomes snapshot LV */
3125 if (!vg_add_snapshot(org, lv, NULL,
3126 org->le_count, lp->chunk_size)) {
3127 log_error("Couldn't create snapshot.");
3128 goto deactivate_and_revert_new_lv;
3131 /* store vg on disk(s) */
3132 if (!vg_write(vg))
3133 return_0;
3135 if (!suspend_lv(cmd, org)) {
3136 log_error("Failed to suspend origin %s", org->name);
3137 vg_revert(vg);
3138 return 0;
3141 if (!vg_commit(vg))
3142 return_0;
3144 if (!resume_lv(cmd, org)) {
3145 log_error("Problem reactivating origin %s", org->name);
3146 return 0;
3149 /* FIXME out of sequence */
3150 backup(vg);
3152 log_print("Logical volume \"%s\" created", lv->name);
3155 * FIXME: as a sanity check we could try reading the
3156 * last block of the device ?
3159 return 1;
3161 deactivate_and_revert_new_lv:
3162 if (!deactivate_lv(cmd, lv)) {
3163 log_error("Unable to deactivate failed new LV. "
3164 "Manual intervention required.");
3165 return 0;
3168 revert_new_lv:
3169 /* FIXME Better to revert to backup of metadata? */
3170 if (!lv_remove(lv) || !vg_write(vg) || !vg_commit(vg))
3171 log_error("Manual intervention may be required to remove "
3172 "abandoned LV(s) before retrying.");
3173 else
3174 backup(vg);
3176 return 0;