1 /* $NetBSD: lv_manip.c,v 1.3 2009/02/18 12:16:13 haad Exp $ */
4 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 * This file is part of LVM2.
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "lvm-string.h"
23 #include "toolcontext.h"
37 int add_seg_to_segs_using_this_lv(struct logical_volume
*lv
,
38 struct lv_segment
*seg
)
42 dm_list_iterate_items(sl
, &lv
->segs_using_this_lv
) {
49 log_very_verbose("Adding %s:%" PRIu32
" as an user of %s",
50 seg
->lv
->name
, seg
->le
, lv
->name
);
52 if (!(sl
= dm_pool_zalloc(lv
->vg
->cmd
->mem
, sizeof(*sl
)))) {
53 log_error("Failed to allocate segment list");
59 dm_list_add(&lv
->segs_using_this_lv
, &sl
->list
);
64 int remove_seg_from_segs_using_this_lv(struct logical_volume
*lv
,
65 struct lv_segment
*seg
)
69 dm_list_iterate_items(sl
, &lv
->segs_using_this_lv
) {
75 log_very_verbose("%s:%" PRIu32
" is no longer a user "
76 "of %s", seg
->lv
->name
, seg
->le
,
78 dm_list_del(&sl
->list
);
87 * This is a function specialized for the common case where there is
88 * only one segment which uses the LV.
89 * e.g. the LV is a layer inserted by insert_layer_for_lv().
91 * In general, walk through lv->segs_using_this_lv.
93 struct lv_segment
*get_only_segment_using_this_lv(struct logical_volume
*lv
)
97 if (dm_list_size(&lv
->segs_using_this_lv
) != 1) {
98 log_error("%s is expected to have only one segment using it, "
99 "while it has %d", lv
->name
,
100 dm_list_size(&lv
->segs_using_this_lv
));
104 sl
= dm_list_item(dm_list_first(&lv
->segs_using_this_lv
), struct seg_list
);
106 if (sl
->count
!= 1) {
107 log_error("%s is expected to have only one segment using it, "
108 "while %s:%" PRIu32
" uses it %d times",
109 lv
->name
, sl
->seg
->lv
->name
, sl
->seg
->le
, sl
->count
);
117 * PVs used by a segment of an LV
122 struct dm_list pvs
; /* struct pv_list */
128 static struct seg_pvs
*_find_seg_pvs_by_le(struct dm_list
*list
, uint32_t le
)
130 struct seg_pvs
*spvs
;
132 dm_list_iterate_items(spvs
, list
)
133 if (le
>= spvs
->le
&& le
< spvs
->le
+ spvs
->len
)
140 * Find first unused LV number.
142 uint32_t find_free_lvnum(struct logical_volume
*lv
)
144 int lvnum_used
[MAX_RESTRICTED_LVS
+ 1];
149 memset(&lvnum_used
, 0, sizeof(lvnum_used
));
151 dm_list_iterate_items(lvl
, &lv
->vg
->lvs
) {
152 lvnum
= lvnum_from_lvid(&lvl
->lv
->lvid
);
153 if (lvnum
<= MAX_RESTRICTED_LVS
)
154 lvnum_used
[lvnum
] = 1;
157 while (lvnum_used
[i
])
160 /* FIXME What if none are free? */
166 * All lv_segments get created here.
168 struct lv_segment
*alloc_lv_segment(struct dm_pool
*mem
,
169 const struct segment_type
*segtype
,
170 struct logical_volume
*lv
,
171 uint32_t le
, uint32_t len
,
173 uint32_t stripe_size
,
174 struct logical_volume
*log_lv
,
178 uint32_t region_size
,
179 uint32_t extents_copied
)
181 struct lv_segment
*seg
;
182 uint32_t areas_sz
= area_count
* sizeof(*seg
->areas
);
184 if (!(seg
= dm_pool_zalloc(mem
, sizeof(*seg
))))
187 if (!(seg
->areas
= dm_pool_zalloc(mem
, areas_sz
))) {
188 dm_pool_free(mem
, seg
);
193 log_error("alloc_lv_segment: Missing segtype.");
197 seg
->segtype
= segtype
;
201 seg
->status
= status
;
202 seg
->stripe_size
= stripe_size
;
203 seg
->area_count
= area_count
;
204 seg
->area_len
= area_len
;
205 seg
->chunk_size
= chunk_size
;
206 seg
->region_size
= region_size
;
207 seg
->extents_copied
= extents_copied
;
208 seg
->log_lv
= log_lv
;
209 dm_list_init(&seg
->tags
);
211 if (log_lv
&& !attach_mirror_log(seg
, log_lv
))
217 struct lv_segment
*alloc_snapshot_seg(struct logical_volume
*lv
,
218 uint32_t status
, uint32_t old_le_count
)
220 struct lv_segment
*seg
;
221 const struct segment_type
*segtype
;
223 segtype
= get_segtype_from_string(lv
->vg
->cmd
, "snapshot");
225 log_error("Failed to find snapshot segtype");
229 if (!(seg
= alloc_lv_segment(lv
->vg
->cmd
->mem
, segtype
, lv
, old_le_count
,
230 lv
->le_count
- old_le_count
, status
, 0,
231 NULL
, 0, lv
->le_count
- old_le_count
,
233 log_error("Couldn't allocate new snapshot segment.");
237 dm_list_add(&lv
->segments
, &seg
->list
);
238 lv
->status
|= VIRTUAL
;
243 void release_lv_segment_area(struct lv_segment
*seg
, uint32_t s
,
244 uint32_t area_reduction
)
246 if (seg_type(seg
, s
) == AREA_UNASSIGNED
)
249 if (seg_type(seg
, s
) == AREA_PV
) {
250 if (release_pv_segment(seg_pvseg(seg
, s
), area_reduction
) &&
251 seg
->area_len
== area_reduction
)
252 seg_type(seg
, s
) = AREA_UNASSIGNED
;
256 if (seg_lv(seg
, s
)->status
& MIRROR_IMAGE
) {
257 lv_reduce(seg_lv(seg
, s
), area_reduction
);
261 if (area_reduction
== seg
->area_len
) {
262 log_very_verbose("Remove %s:%" PRIu32
"[%" PRIu32
"] from "
263 "the top of LV %s:%" PRIu32
,
264 seg
->lv
->name
, seg
->le
, s
,
265 seg_lv(seg
, s
)->name
, seg_le(seg
, s
));
267 remove_seg_from_segs_using_this_lv(seg_lv(seg
, s
), seg
);
268 seg_lv(seg
, s
) = NULL
;
270 seg_type(seg
, s
) = AREA_UNASSIGNED
;
275 * Move a segment area from one segment to another
277 int move_lv_segment_area(struct lv_segment
*seg_to
, uint32_t area_to
,
278 struct lv_segment
*seg_from
, uint32_t area_from
)
280 struct physical_volume
*pv
;
281 struct logical_volume
*lv
;
284 switch (seg_type(seg_from
, area_from
)) {
286 pv
= seg_pv(seg_from
, area_from
);
287 pe
= seg_pe(seg_from
, area_from
);
289 release_lv_segment_area(seg_from
, area_from
,
291 release_lv_segment_area(seg_to
, area_to
, seg_to
->area_len
);
293 if (!set_lv_segment_area_pv(seg_to
, area_to
, pv
, pe
))
299 lv
= seg_lv(seg_from
, area_from
);
300 le
= seg_le(seg_from
, area_from
);
302 release_lv_segment_area(seg_from
, area_from
,
304 release_lv_segment_area(seg_to
, area_to
, seg_to
->area_len
);
306 if (!set_lv_segment_area_lv(seg_to
, area_to
, lv
, le
, 0))
311 case AREA_UNASSIGNED
:
312 release_lv_segment_area(seg_to
, area_to
, seg_to
->area_len
);
319 * Link part of a PV to an LV segment.
321 int set_lv_segment_area_pv(struct lv_segment
*seg
, uint32_t area_num
,
322 struct physical_volume
*pv
, uint32_t pe
)
324 seg
->areas
[area_num
].type
= AREA_PV
;
326 if (!(seg_pvseg(seg
, area_num
) =
327 assign_peg_to_lvseg(pv
, pe
, seg
->area_len
, seg
, area_num
)))
334 * Link one LV segment to another. Assumes sizes already match.
336 int set_lv_segment_area_lv(struct lv_segment
*seg
, uint32_t area_num
,
337 struct logical_volume
*lv
, uint32_t le
,
340 log_very_verbose("Stack %s:%" PRIu32
"[%" PRIu32
"] on LV %s:%" PRIu32
,
341 seg
->lv
->name
, seg
->le
, area_num
, lv
->name
, le
);
343 seg
->areas
[area_num
].type
= AREA_LV
;
344 seg_lv(seg
, area_num
) = lv
;
345 seg_le(seg
, area_num
) = le
;
348 if (!add_seg_to_segs_using_this_lv(lv
, seg
))
355 * Prepare for adding parallel areas to an existing segment.
357 static int _lv_segment_add_areas(struct logical_volume
*lv
,
358 struct lv_segment
*seg
,
359 uint32_t new_area_count
)
361 struct lv_segment_area
*newareas
;
362 uint32_t areas_sz
= new_area_count
* sizeof(*newareas
);
364 if (!(newareas
= dm_pool_zalloc(lv
->vg
->cmd
->mem
, areas_sz
)))
367 memcpy(newareas
, seg
->areas
, seg
->area_count
* sizeof(*seg
->areas
));
369 seg
->areas
= newareas
;
370 seg
->area_count
= new_area_count
;
376 * Reduce the size of an lv_segment. New size can be zero.
378 static int _lv_segment_reduce(struct lv_segment
*seg
, uint32_t reduction
)
380 uint32_t area_reduction
, s
;
382 /* Caller must ensure exact divisibility */
383 if (seg_is_striped(seg
)) {
384 if (reduction
% seg
->area_count
) {
385 log_error("Segment extent reduction %" PRIu32
386 "not divisible by #stripes %" PRIu32
,
387 reduction
, seg
->area_count
);
390 area_reduction
= (reduction
/ seg
->area_count
);
392 area_reduction
= reduction
;
394 for (s
= 0; s
< seg
->area_count
; s
++)
395 release_lv_segment_area(seg
, s
, area_reduction
);
397 seg
->len
-= reduction
;
398 seg
->area_len
-= area_reduction
;
404 * Entry point for all LV reductions in size.
406 static int _lv_reduce(struct logical_volume
*lv
, uint32_t extents
, int delete)
408 struct lv_segment
*seg
;
409 uint32_t count
= extents
;
412 dm_list_iterate_back_items(seg
, &lv
->segments
) {
416 if (seg
->len
<= count
) {
417 /* remove this segment completely */
418 /* FIXME Check this is safe */
419 if (seg
->log_lv
&& !lv_remove(seg
->log_lv
))
421 dm_list_del(&seg
->list
);
422 reduction
= seg
->len
;
426 if (!_lv_segment_reduce(seg
, reduction
))
431 lv
->le_count
-= extents
;
432 lv
->size
= (uint64_t) lv
->le_count
* lv
->vg
->extent_size
;
437 /* Remove the LV if it is now empty */
438 if (!lv
->le_count
&& !unlink_lv_from_vg(lv
))
440 else if (lv
->vg
->fid
->fmt
->ops
->lv_setup
&&
441 !lv
->vg
->fid
->fmt
->ops
->lv_setup(lv
->vg
->fid
, lv
))
450 int lv_empty(struct logical_volume
*lv
)
452 return _lv_reduce(lv
, lv
->le_count
, 0);
456 * Empty an LV and add error segment.
458 int replace_lv_with_error_segment(struct logical_volume
*lv
)
460 uint32_t len
= lv
->le_count
;
465 if (!lv_add_virtual_segment(lv
, 0, len
,
466 get_segtype_from_string(lv
->vg
->cmd
,
474 * Remove given number of extents from LV.
476 int lv_reduce(struct logical_volume
*lv
, uint32_t extents
)
478 return _lv_reduce(lv
, extents
, 1);
482 * Completely remove an LV.
484 int lv_remove(struct logical_volume
*lv
)
487 if (!lv_reduce(lv
, lv
->le_count
))
494 * A set of contiguous physical extents allocated
496 struct alloced_area
{
499 struct physical_volume
*pv
;
505 * Details of an allocation attempt
507 struct alloc_handle
{
508 struct cmd_context
*cmd
;
511 alloc_policy_t alloc
; /* Overall policy */
512 uint32_t area_count
; /* Number of parallel areas */
513 uint32_t area_multiple
; /* seg->len = area_len * area_multiple */
514 uint32_t log_count
; /* Number of parallel 1-extent logs */
515 uint32_t log_region_size
; /* region size for log device */
516 uint32_t total_area_len
; /* Total number of parallel extents */
518 struct dm_list
*parallel_areas
; /* PVs to avoid */
520 struct alloced_area log_area
; /* Extent used for log */
521 struct dm_list alloced_areas
[0]; /* Lists of areas in each stripe */
524 static uint32_t calc_area_multiple(const struct segment_type
*segtype
,
525 const uint32_t area_count
)
527 if (!segtype_is_striped(segtype
) || !area_count
)
534 * Preparation for a specific allocation attempt
536 static struct alloc_handle
*_alloc_init(struct cmd_context
*cmd
,
538 const struct segment_type
*segtype
,
539 alloc_policy_t alloc
,
543 uint32_t log_region_size
,
544 struct dm_list
*parallel_areas
)
546 struct alloc_handle
*ah
;
547 uint32_t s
, area_count
;
549 if (stripes
> 1 && mirrors
> 1) {
550 log_error("Striped mirrors are not supported yet");
554 if (log_count
&& stripes
> 1) {
555 log_error("Can't mix striping with a mirror log yet.");
559 if (segtype_is_virtual(segtype
))
561 else if (mirrors
> 1)
562 area_count
= mirrors
;
564 area_count
= stripes
;
566 if (!(ah
= dm_pool_zalloc(mem
, sizeof(*ah
) + sizeof(ah
->alloced_areas
[0]) * area_count
))) {
567 log_error("allocation handle allocation failed");
571 if (segtype_is_virtual(segtype
))
576 if (!(ah
->mem
= dm_pool_create("allocation", 1024))) {
577 log_error("allocation pool creation failed");
581 ah
->area_count
= area_count
;
582 ah
->log_count
= log_count
;
583 ah
->log_region_size
= log_region_size
;
585 ah
->area_multiple
= calc_area_multiple(segtype
, area_count
);
587 for (s
= 0; s
< ah
->area_count
; s
++)
588 dm_list_init(&ah
->alloced_areas
[s
]);
590 ah
->parallel_areas
= parallel_areas
;
595 void alloc_destroy(struct alloc_handle
*ah
)
598 dm_pool_destroy(ah
->mem
);
601 static int _log_parallel_areas(struct dm_pool
*mem
, struct dm_list
*parallel_areas
)
603 struct seg_pvs
*spvs
;
610 dm_list_iterate_items(spvs
, parallel_areas
) {
611 if (!dm_pool_begin_object(mem
, 256)) {
612 log_error("dm_pool_begin_object failed");
616 dm_list_iterate_items(pvl
, &spvs
->pvs
) {
617 if (!dm_pool_grow_object(mem
, pv_dev_name(pvl
->pv
), strlen(pv_dev_name(pvl
->pv
)))) {
618 log_error("dm_pool_grow_object failed");
619 dm_pool_abandon_object(mem
);
622 if (!dm_pool_grow_object(mem
, " ", 1)) {
623 log_error("dm_pool_grow_object failed");
624 dm_pool_abandon_object(mem
);
629 if (!dm_pool_grow_object(mem
, "\0", 1)) {
630 log_error("dm_pool_grow_object failed");
631 dm_pool_abandon_object(mem
);
635 pvnames
= dm_pool_end_object(mem
);
636 log_debug("Parallel PVs at LE %" PRIu32
" length %" PRIu32
": %s",
637 spvs
->le
, spvs
->len
, pvnames
);
638 dm_pool_free(mem
, pvnames
);
644 static int _setup_alloced_segment(struct logical_volume
*lv
, uint32_t status
,
646 uint32_t stripe_size
,
647 const struct segment_type
*segtype
,
648 struct alloced_area
*aa
,
649 uint32_t region_size
,
650 struct logical_volume
*log_lv
__attribute((unused
)))
652 uint32_t s
, extents
, area_multiple
;
653 struct lv_segment
*seg
;
655 area_multiple
= calc_area_multiple(segtype
, area_count
);
657 /* log_lv gets set up elsehere */
658 if (!(seg
= alloc_lv_segment(lv
->vg
->cmd
->mem
, segtype
, lv
,
660 aa
[0].len
* area_multiple
,
661 status
, stripe_size
, NULL
,
663 aa
[0].len
, 0u, region_size
, 0u))) {
664 log_error("Couldn't allocate new LV segment.");
668 for (s
= 0; s
< area_count
; s
++)
669 if (!set_lv_segment_area_pv(seg
, s
, aa
[s
].pv
, aa
[s
].pe
))
672 dm_list_add(&lv
->segments
, &seg
->list
);
674 extents
= aa
[0].len
* area_multiple
;
675 lv
->le_count
+= extents
;
676 lv
->size
+= (uint64_t) extents
*lv
->vg
->extent_size
;
678 if (segtype_is_mirrored(segtype
))
679 lv
->status
|= MIRRORED
;
684 static int _setup_alloced_segments(struct logical_volume
*lv
,
685 struct dm_list
*alloced_areas
,
688 uint32_t stripe_size
,
689 const struct segment_type
*segtype
,
690 uint32_t region_size
,
691 struct logical_volume
*log_lv
)
693 struct alloced_area
*aa
;
695 dm_list_iterate_items(aa
, &alloced_areas
[0]) {
696 if (!_setup_alloced_segment(lv
, status
, area_count
,
697 stripe_size
, segtype
, aa
,
698 region_size
, log_lv
))
706 * Returns log device size in extents, algorithm from kernel code
709 static uint32_t mirror_log_extents(uint32_t region_size
, uint32_t pe_size
, uint32_t area_len
)
711 size_t area_size
, bitset_size
, log_size
, region_count
;
713 area_size
= area_len
* pe_size
;
714 region_count
= dm_div_up(area_size
, region_size
);
716 /* Work out how many "unsigned long"s we need to hold the bitset. */
717 bitset_size
= dm_round_up(region_count
, sizeof(uint32_t) << BYTE_SHIFT
);
718 bitset_size
>>= BYTE_SHIFT
;
720 /* Log device holds both header and bitset. */
721 log_size
= dm_round_up((MIRROR_LOG_OFFSET
<< SECTOR_SHIFT
) + bitset_size
, 1 << SECTOR_SHIFT
);
722 log_size
>>= SECTOR_SHIFT
;
724 return dm_div_up(log_size
, pe_size
);
728 * This function takes a list of pv_areas and adds them to allocated_areas.
729 * If the complete area is not needed then it gets split.
730 * The part used is removed from the pv_map so it can't be allocated twice.
732 static int _alloc_parallel_area(struct alloc_handle
*ah
, uint32_t needed
,
733 struct pv_area
**areas
,
734 uint32_t *ix
, struct pv_area
*log_area
,
737 uint32_t area_len
, remaining
;
739 struct alloced_area
*aa
;
741 remaining
= needed
- *ix
;
742 area_len
= remaining
/ ah
->area_multiple
;
744 /* Reduce area_len to the smallest of the areas */
745 for (s
= 0; s
< ah
->area_count
; s
++)
746 if (area_len
> areas
[s
]->count
)
747 area_len
= areas
[s
]->count
;
749 if (!(aa
= dm_pool_alloc(ah
->mem
, sizeof(*aa
) *
750 (ah
->area_count
+ (log_area
? 1 : 0))))) {
751 log_error("alloced_area allocation failed");
755 for (s
= 0; s
< ah
->area_count
; s
++) {
756 aa
[s
].pv
= areas
[s
]->map
->pv
;
757 aa
[s
].pe
= areas
[s
]->start
;
758 aa
[s
].len
= area_len
;
759 dm_list_add(&ah
->alloced_areas
[s
], &aa
[s
].list
);
762 ah
->total_area_len
+= area_len
;
764 for (s
= 0; s
< ah
->area_count
; s
++)
765 consume_pv_area(areas
[s
], area_len
);
768 ah
->log_area
.pv
= log_area
->map
->pv
;
769 ah
->log_area
.pe
= log_area
->start
;
770 ah
->log_area
.len
= log_len
;
771 consume_pv_area(log_area
, ah
->log_area
.len
);
774 *ix
+= area_len
* ah
->area_multiple
;
780 * Call fn for each AREA_PV used by the LV segment at lv:le of length *max_seg_len.
781 * If any constituent area contains more than one segment, max_seg_len is
782 * reduced to cover only the first.
783 * fn should return 0 on error, 1 to continue scanning or >1 to terminate without error.
784 * In the last case, this function passes on the return code.
786 static int _for_each_pv(struct cmd_context
*cmd
, struct logical_volume
*lv
,
787 uint32_t le
, uint32_t len
, uint32_t *max_seg_len
,
788 uint32_t first_area
, uint32_t max_areas
,
789 int top_level_area_index
,
790 int only_single_area_segments
,
791 int (*fn
)(struct cmd_context
*cmd
,
792 struct pv_segment
*peg
, uint32_t s
,
796 struct lv_segment
*seg
;
798 uint32_t remaining_seg_len
, area_len
, area_multiple
;
801 if (!(seg
= find_seg_by_le(lv
, le
))) {
802 log_error("Failed to find segment for %s extent %" PRIu32
,
807 /* Remaining logical length of segment */
808 remaining_seg_len
= seg
->len
- (le
- seg
->le
);
810 if (remaining_seg_len
> len
)
811 remaining_seg_len
= len
;
813 if (max_seg_len
&& *max_seg_len
> remaining_seg_len
)
814 *max_seg_len
= remaining_seg_len
;
816 area_multiple
= calc_area_multiple(seg
->segtype
, seg
->area_count
);
817 area_len
= remaining_seg_len
/ area_multiple
? : 1;
820 s
< seg
->area_count
&& (!max_areas
|| s
<= max_areas
);
822 if (seg_type(seg
, s
) == AREA_LV
) {
823 if (!(r
= _for_each_pv(cmd
, seg_lv(seg
, s
),
825 (le
- seg
->le
) / area_multiple
,
826 area_len
, max_seg_len
,
827 only_single_area_segments
? 0 : 0,
828 only_single_area_segments
? 1U : 0U,
829 top_level_area_index
!= -1 ? top_level_area_index
: (int) s
,
830 only_single_area_segments
, fn
,
833 } else if (seg_type(seg
, s
) == AREA_PV
)
834 if (!(r
= fn(cmd
, seg_pvseg(seg
, s
), top_level_area_index
!= -1 ? (uint32_t) top_level_area_index
: s
, data
)))
840 /* FIXME only_single_area_segments used as workaround to skip log LV - needs new param? */
841 if (!only_single_area_segments
&& seg_is_mirrored(seg
) && seg
->log_lv
) {
842 if (!(r
= _for_each_pv(cmd
, seg
->log_lv
, 0, seg
->log_lv
->le_count
,
843 NULL
, 0, 0, 0, only_single_area_segments
,
850 /* FIXME Add snapshot cow LVs etc. */
855 static int _comp_area(const void *l
, const void *r
)
857 const struct pv_area
*lhs
= *((const struct pv_area
**) l
);
858 const struct pv_area
*rhs
= *((const struct pv_area
**) r
);
860 if (lhs
->count
< rhs
->count
)
863 else if (lhs
->count
> rhs
->count
)
870 * Search for pvseg that matches condition
873 int (*condition
)(struct pv_segment
*pvseg
, struct pv_area
*pva
);
875 struct pv_area
**areas
;
878 int s
; /* Area index of match */
882 * Is PV area on the same PV?
884 static int _is_same_pv(struct pv_segment
*pvseg
, struct pv_area
*pva
)
886 if (pvseg
->pv
!= pva
->map
->pv
)
893 * Is PV area contiguous to PV segment?
895 static int _is_contiguous(struct pv_segment
*pvseg
, struct pv_area
*pva
)
897 if (pvseg
->pv
!= pva
->map
->pv
)
900 if (pvseg
->pe
+ pvseg
->len
!= pva
->start
)
906 static int _is_condition(struct cmd_context
*cmd
__attribute((unused
)),
907 struct pv_segment
*pvseg
, uint32_t s
,
910 struct pv_match
*pvmatch
= data
;
912 if (!pvmatch
->condition(pvseg
, pvmatch
->pva
))
913 return 1; /* Continue */
915 if (s
>= pvmatch
->areas_size
)
918 pvmatch
->areas
[s
] = pvmatch
->pva
;
920 return 2; /* Finished */
924 * Is pva on same PV as any existing areas?
926 static int _check_cling(struct cmd_context
*cmd
,
927 struct lv_segment
*prev_lvseg
, struct pv_area
*pva
,
928 struct pv_area
**areas
, uint32_t areas_size
)
930 struct pv_match pvmatch
;
933 pvmatch
.condition
= _is_same_pv
;
934 pvmatch
.areas
= areas
;
935 pvmatch
.areas_size
= areas_size
;
938 /* FIXME Cope with stacks by flattening */
939 if (!(r
= _for_each_pv(cmd
, prev_lvseg
->lv
,
940 prev_lvseg
->le
+ prev_lvseg
->len
- 1, 1, NULL
,
942 _is_condition
, &pvmatch
)))
952 * Is pva contiguous to any existing areas or on the same PV?
954 static int _check_contiguous(struct cmd_context
*cmd
,
955 struct lv_segment
*prev_lvseg
, struct pv_area
*pva
,
956 struct pv_area
**areas
, uint32_t areas_size
)
958 struct pv_match pvmatch
;
961 pvmatch
.condition
= _is_contiguous
;
962 pvmatch
.areas
= areas
;
963 pvmatch
.areas_size
= areas_size
;
966 /* FIXME Cope with stacks by flattening */
967 if (!(r
= _for_each_pv(cmd
, prev_lvseg
->lv
,
968 prev_lvseg
->le
+ prev_lvseg
->len
- 1, 1, NULL
,
970 _is_condition
, &pvmatch
)))
980 * Choose sets of parallel areas to use, respecting any constraints.
982 static int _find_parallel_space(struct alloc_handle
*ah
, alloc_policy_t alloc
,
983 struct dm_list
*pvms
, struct pv_area
**areas
,
984 uint32_t areas_size
, unsigned can_split
,
985 struct lv_segment
*prev_lvseg
,
986 uint32_t *allocated
, uint32_t needed
)
991 unsigned already_found_one
= 0;
992 unsigned contiguous
= 0, cling
= 0, preferred_count
= 0;
994 unsigned ix_offset
= 0; /* Offset for non-preferred allocations */
995 unsigned too_small_for_log_count
; /* How many too small for log? */
996 uint32_t max_parallel
; /* Maximum extents to allocate */
998 struct seg_pvs
*spvs
;
999 struct dm_list
*parallel_pvs
;
1002 struct pv_area
*log_area
;
1003 unsigned log_needs_allocating
;
1005 /* Is there enough total space? */
1006 free_pes
= pv_maps_size(pvms
);
1007 if (needed
- *allocated
> free_pes
) {
1008 log_error("Insufficient free space: %" PRIu32
" extents needed,"
1009 " but only %" PRIu32
" available",
1010 needed
- *allocated
, free_pes
);
1014 /* FIXME Select log PV appropriately if there isn't one yet */
1016 /* Are there any preceding segments we must follow on from? */
1018 ix_offset
= prev_lvseg
->area_count
;
1019 if ((alloc
== ALLOC_CONTIGUOUS
))
1021 else if ((alloc
== ALLOC_CLING
))
1027 /* FIXME This algorithm needs a lot of cleaning up! */
1028 /* FIXME anywhere doesn't find all space yet */
1029 /* ix_offset holds the number of allocations that must be contiguous */
1030 /* ix holds the number of areas found on other PVs */
1033 preferred_count
= 0;
1035 parallel_pvs
= NULL
;
1036 max_parallel
= needed
;
1039 * If there are existing parallel PVs, avoid them and reduce
1040 * the maximum we can allocate in one go accordingly.
1042 if (ah
->parallel_areas
) {
1043 next_le
= (prev_lvseg
? prev_lvseg
->le
+ prev_lvseg
->len
: 0) + *allocated
/ ah
->area_multiple
;
1044 dm_list_iterate_items(spvs
, ah
->parallel_areas
) {
1045 if (next_le
>= spvs
->le
+ spvs
->len
)
1048 if (max_parallel
> (spvs
->le
+ spvs
->len
) * ah
->area_multiple
)
1049 max_parallel
= (spvs
->le
+ spvs
->len
) * ah
->area_multiple
;
1050 parallel_pvs
= &spvs
->pvs
;
1056 * Put the smallest area of each PV that is at least the
1057 * size we need into areas array. If there isn't one
1058 * that fits completely and we're allowed more than one
1059 * LV segment, then take the largest remaining instead.
1061 dm_list_iterate_items(pvm
, pvms
) {
1062 if (dm_list_empty(&pvm
->areas
))
1063 continue; /* Next PV */
1065 if (alloc
!= ALLOC_ANYWHERE
) {
1066 /* Don't allocate onto the log pv */
1067 if (ah
->log_count
&&
1068 pvm
->pv
== ah
->log_area
.pv
)
1069 continue; /* Next PV */
1071 /* Avoid PVs used by existing parallel areas */
1073 dm_list_iterate_items(pvl
, parallel_pvs
)
1074 if (pvm
->pv
== pvl
->pv
)
1078 already_found_one
= 0;
1079 /* First area in each list is the largest */
1080 dm_list_iterate_items(pva
, &pvm
->areas
) {
1083 _check_contiguous(ah
->cmd
,
1095 _check_cling(ah
->cmd
,
1104 /* Is it big enough on its own? */
1105 if (pva
->count
* ah
->area_multiple
<
1106 max_parallel
- *allocated
&&
1107 ((!can_split
&& !ah
->log_count
) ||
1108 (already_found_one
&&
1109 !(alloc
== ALLOC_ANYWHERE
))))
1112 if (!already_found_one
||
1113 alloc
== ALLOC_ANYWHERE
) {
1115 already_found_one
= 1;
1118 areas
[ix
+ ix_offset
- 1] = pva
;
1123 if (ix
>= areas_size
)
1127 if ((contiguous
|| cling
) && (preferred_count
< ix_offset
))
1130 log_needs_allocating
= (ah
->log_count
&& !ah
->log_area
.len
) ?
1133 if (ix
+ ix_offset
< ah
->area_count
+
1134 (log_needs_allocating
? ah
->log_count
: 0))
1137 /* sort the areas so we allocate from the biggest */
1139 qsort(areas
+ ix_offset
, ix
, sizeof(*areas
),
1143 * First time around, if there's a log, allocate it on the
1144 * smallest device that has space for it.
1146 * FIXME decide which PV to use at top of function instead
1149 too_small_for_log_count
= 0;
1151 if (!log_needs_allocating
) {
1155 log_len
= mirror_log_extents(ah
->log_region_size
,
1156 pv_pe_size((*areas
)->map
->pv
),
1157 (max_parallel
- *allocated
) / ah
->area_multiple
);
1159 /* How many areas are too small for the log? */
1160 while (too_small_for_log_count
< ix_offset
+ ix
&&
1161 (*(areas
+ ix_offset
+ ix
- 1 -
1162 too_small_for_log_count
))->count
< log_len
)
1163 too_small_for_log_count
++;
1165 log_area
= *(areas
+ ix_offset
+ ix
- 1 -
1166 too_small_for_log_count
);
1169 if (ix
+ ix_offset
< ah
->area_count
+
1170 (log_needs_allocating
? ah
->log_count
+
1171 too_small_for_log_count
: 0))
1172 /* FIXME With ALLOC_ANYWHERE, need to split areas */
1175 if (!_alloc_parallel_area(ah
, max_parallel
, areas
, allocated
,
1179 } while (!contiguous
&& *allocated
!= needed
&& can_split
);
1185 * Allocate several segments, each the same size, in parallel.
1186 * If mirrored_pv and mirrored_pe are supplied, it is used as
1187 * the first area, and additional areas are allocated parallel to it.
1189 static int _allocate(struct alloc_handle
*ah
,
1190 struct volume_group
*vg
,
1191 struct logical_volume
*lv
,
1192 uint32_t new_extents
,
1194 struct dm_list
*allocatable_pvs
)
1196 struct pv_area
**areas
;
1197 uint32_t allocated
= lv
? lv
->le_count
: 0;
1198 uint32_t old_allocated
;
1199 struct lv_segment
*prev_lvseg
= NULL
;
1201 struct dm_list
*pvms
;
1202 uint32_t areas_size
;
1203 alloc_policy_t alloc
;
1205 if (allocated
>= new_extents
&& !ah
->log_count
) {
1206 log_error("_allocate called with no work to do!");
1210 if (ah
->alloc
== ALLOC_CONTIGUOUS
)
1213 if (lv
&& !dm_list_empty(&lv
->segments
))
1214 prev_lvseg
= dm_list_item(dm_list_last(&lv
->segments
),
1217 * Build the sets of available areas on the pv's.
1219 if (!(pvms
= create_pv_maps(ah
->mem
, vg
, allocatable_pvs
)))
1222 if (!_log_parallel_areas(ah
->mem
, ah
->parallel_areas
))
1225 areas_size
= dm_list_size(pvms
);
1226 if (areas_size
&& areas_size
< (ah
->area_count
+ ah
->log_count
)) {
1227 if (ah
->alloc
!= ALLOC_ANYWHERE
) {
1228 log_error("Not enough PVs with free space available "
1229 "for parallel allocation.");
1230 log_error("Consider --alloc anywhere if desperate.");
1233 areas_size
= ah
->area_count
+ ah
->log_count
;
1236 /* Upper bound if none of the PVs in prev_lvseg is in pvms */
1237 /* FIXME Work size out properly */
1239 areas_size
+= prev_lvseg
->area_count
;
1241 /* Allocate an array of pv_areas to hold the largest space on each PV */
1242 if (!(areas
= dm_malloc(sizeof(*areas
) * areas_size
))) {
1243 log_error("Couldn't allocate areas array.");
1247 /* Attempt each defined allocation policy in turn */
1248 for (alloc
= ALLOC_CONTIGUOUS
; alloc
< ALLOC_INHERIT
; alloc
++) {
1249 old_allocated
= allocated
;
1250 if (!_find_parallel_space(ah
, alloc
, pvms
, areas
,
1251 areas_size
, can_split
,
1252 prev_lvseg
, &allocated
, new_extents
))
1254 if ((allocated
== new_extents
) || (ah
->alloc
== alloc
) ||
1255 (!can_split
&& (allocated
!= old_allocated
)))
1259 if (allocated
!= new_extents
) {
1260 log_error("Insufficient suitable %sallocatable extents "
1261 "for logical volume %s: %u more required",
1262 can_split
? "" : "contiguous ",
1264 (new_extents
- allocated
) * ah
->area_count
1265 / ah
->area_multiple
);
1269 if (ah
->log_count
&& !ah
->log_area
.len
) {
1270 log_error("Insufficient extents for log allocation "
1271 "for logical volume %s.",
1272 lv
? lv
->name
: "");
1283 int lv_add_virtual_segment(struct logical_volume
*lv
, uint32_t status
,
1284 uint32_t extents
, const struct segment_type
*segtype
)
1286 struct lv_segment
*seg
;
1288 if (!(seg
= alloc_lv_segment(lv
->vg
->cmd
->mem
, segtype
, lv
,
1289 lv
->le_count
, extents
, status
, 0,
1290 NULL
, 0, extents
, 0, 0, 0))) {
1291 log_error("Couldn't allocate new zero segment.");
1295 dm_list_add(&lv
->segments
, &seg
->list
);
1297 lv
->le_count
+= extents
;
1298 lv
->size
+= (uint64_t) extents
*lv
->vg
->extent_size
;
1300 lv
->status
|= VIRTUAL
;
1306 * Entry point for all extent allocations.
1308 struct alloc_handle
*allocate_extents(struct volume_group
*vg
,
1309 struct logical_volume
*lv
,
1310 const struct segment_type
*segtype
,
1312 uint32_t mirrors
, uint32_t log_count
,
1313 uint32_t log_region_size
, uint32_t extents
,
1314 struct dm_list
*allocatable_pvs
,
1315 alloc_policy_t alloc
,
1316 struct dm_list
*parallel_areas
)
1318 struct alloc_handle
*ah
;
1320 if (segtype_is_virtual(segtype
)) {
1321 log_error("allocate_extents does not handle virtual segments");
1325 if (vg
->fid
->fmt
->ops
->segtype_supported
&&
1326 !vg
->fid
->fmt
->ops
->segtype_supported(vg
->fid
, segtype
)) {
1327 log_error("Metadata format (%s) does not support required "
1328 "LV segment type (%s).", vg
->fid
->fmt
->name
,
1330 log_error("Consider changing the metadata format by running "
1335 if (alloc
== ALLOC_INHERIT
)
1338 if (!(ah
= _alloc_init(vg
->cmd
, vg
->cmd
->mem
, segtype
, alloc
, mirrors
,
1339 stripes
, log_count
, log_region_size
, parallel_areas
)))
1342 if (!segtype_is_virtual(segtype
) &&
1343 !_allocate(ah
, vg
, lv
, (lv
? lv
->le_count
: 0) + extents
,
1344 1, allocatable_pvs
)) {
1353 * Add new segments to an LV from supplied list of areas.
1355 int lv_add_segment(struct alloc_handle
*ah
,
1356 uint32_t first_area
, uint32_t num_areas
,
1357 struct logical_volume
*lv
,
1358 const struct segment_type
*segtype
,
1359 uint32_t stripe_size
,
1361 uint32_t region_size
,
1362 struct logical_volume
*log_lv
)
1365 log_error("Missing segtype in lv_add_segment().");
1369 if (segtype_is_virtual(segtype
)) {
1370 log_error("lv_add_segment cannot handle virtual segments");
1374 if (!_setup_alloced_segments(lv
, &ah
->alloced_areas
[first_area
],
1376 stripe_size
, segtype
,
1377 region_size
, log_lv
))
1380 if ((segtype
->flags
& SEG_CAN_SPLIT
) && !lv_merge_segments(lv
)) {
1381 log_error("Couldn't merge segments after extending "
1386 if (lv
->vg
->fid
->fmt
->ops
->lv_setup
&&
1387 !lv
->vg
->fid
->fmt
->ops
->lv_setup(lv
->vg
->fid
, lv
))
1394 * "mirror" segment type doesn't support split.
1395 * So, when adding mirrors to linear LV segment, first split it,
1396 * then convert it to "mirror" and add areas.
1398 static struct lv_segment
*_convert_seg_to_mirror(struct lv_segment
*seg
,
1399 uint32_t region_size
,
1400 struct logical_volume
*log_lv
)
1402 struct lv_segment
*newseg
;
1405 if (!seg_is_striped(seg
)) {
1406 log_error("Can't convert non-striped segment to mirrored.");
1410 if (seg
->area_count
> 1) {
1411 log_error("Can't convert striped segment with multiple areas "
1416 if (!(newseg
= alloc_lv_segment(seg
->lv
->vg
->cmd
->mem
,
1417 get_segtype_from_string(seg
->lv
->vg
->cmd
, "mirror"),
1418 seg
->lv
, seg
->le
, seg
->len
,
1419 seg
->status
, seg
->stripe_size
,
1421 seg
->area_count
, seg
->area_len
,
1422 seg
->chunk_size
, region_size
,
1423 seg
->extents_copied
))) {
1424 log_error("Couldn't allocate converted LV segment");
1428 for (s
= 0; s
< seg
->area_count
; s
++)
1429 if (!move_lv_segment_area(newseg
, s
, seg
, s
))
1432 dm_list_add(&seg
->list
, &newseg
->list
);
1433 dm_list_del(&seg
->list
);
1439 * Add new areas to mirrored segments
1441 int lv_add_mirror_areas(struct alloc_handle
*ah
,
1442 struct logical_volume
*lv
, uint32_t le
,
1443 uint32_t region_size
)
1445 struct alloced_area
*aa
;
1446 struct lv_segment
*seg
;
1447 uint32_t current_le
= le
;
1448 uint32_t s
, old_area_count
, new_area_count
;
1450 dm_list_iterate_items(aa
, &ah
->alloced_areas
[0]) {
1451 if (!(seg
= find_seg_by_le(lv
, current_le
))) {
1452 log_error("Failed to find segment for %s extent %"
1453 PRIu32
, lv
->name
, current_le
);
1457 /* Allocator assures aa[0].len <= seg->area_len */
1458 if (aa
[0].len
< seg
->area_len
) {
1459 if (!lv_split_segment(lv
, seg
->le
+ aa
[0].len
)) {
1460 log_error("Failed to split segment at %s "
1461 "extent %" PRIu32
, lv
->name
, le
);
1466 if (!seg_is_mirrored(seg
) &&
1467 (!(seg
= _convert_seg_to_mirror(seg
, region_size
, NULL
))))
1470 old_area_count
= seg
->area_count
;
1471 new_area_count
= old_area_count
+ ah
->area_count
;
1473 if (!_lv_segment_add_areas(lv
, seg
, new_area_count
))
1476 for (s
= 0; s
< ah
->area_count
; s
++) {
1477 if (!set_lv_segment_area_pv(seg
, s
+ old_area_count
,
1478 aa
[s
].pv
, aa
[s
].pe
))
1482 current_le
+= seg
->area_len
;
1485 lv
->status
|= MIRRORED
;
1487 if (lv
->vg
->fid
->fmt
->ops
->lv_setup
&&
1488 !lv
->vg
->fid
->fmt
->ops
->lv_setup(lv
->vg
->fid
, lv
))
1495 * Add mirror image LVs to mirrored segments
1497 int lv_add_mirror_lvs(struct logical_volume
*lv
,
1498 struct logical_volume
**sub_lvs
,
1499 uint32_t num_extra_areas
,
1500 uint32_t status
, uint32_t region_size
)
1502 struct lv_segment
*seg
;
1503 uint32_t old_area_count
, new_area_count
;
1505 struct segment_type
*mirror_segtype
;
1507 seg
= first_seg(lv
);
1509 if (dm_list_size(&lv
->segments
) != 1 || seg_type(seg
, 0) != AREA_LV
) {
1510 log_error("Mirror layer must be inserted before adding mirrors");
1514 mirror_segtype
= get_segtype_from_string(lv
->vg
->cmd
, "mirror");
1515 if (seg
->segtype
!= mirror_segtype
)
1516 if (!(seg
= _convert_seg_to_mirror(seg
, region_size
, NULL
)))
1519 if (region_size
&& region_size
!= seg
->region_size
) {
1520 log_error("Conflicting region_size");
1524 old_area_count
= seg
->area_count
;
1525 new_area_count
= old_area_count
+ num_extra_areas
;
1527 if (!_lv_segment_add_areas(lv
, seg
, new_area_count
)) {
1528 log_error("Failed to allocate widened LV segment for %s.",
1533 for (m
= 0; m
< old_area_count
; m
++)
1534 seg_lv(seg
, m
)->status
|= status
;
1536 for (m
= old_area_count
; m
< new_area_count
; m
++) {
1537 if (!set_lv_segment_area_lv(seg
, m
, sub_lvs
[m
- old_area_count
],
1540 lv_set_hidden(sub_lvs
[m
- old_area_count
]);
1543 lv
->status
|= MIRRORED
;
1549 * Turn an empty LV into a mirror log.
1551 int lv_add_log_segment(struct alloc_handle
*ah
, struct logical_volume
*log_lv
)
1553 struct lv_segment
*seg
;
1555 if (dm_list_size(&log_lv
->segments
)) {
1556 log_error("Log segments can only be added to an empty LV");
1560 if (!(seg
= alloc_lv_segment(log_lv
->vg
->cmd
->mem
,
1561 get_segtype_from_string(log_lv
->vg
->cmd
,
1563 log_lv
, 0, ah
->log_area
.len
, MIRROR_LOG
,
1564 0, NULL
, 1, ah
->log_area
.len
, 0, 0, 0))) {
1565 log_error("Couldn't allocate new mirror log segment.");
1569 if (!set_lv_segment_area_pv(seg
, 0, ah
->log_area
.pv
, ah
->log_area
.pe
))
1572 dm_list_add(&log_lv
->segments
, &seg
->list
);
1573 log_lv
->le_count
+= ah
->log_area
.len
;
1574 log_lv
->size
+= (uint64_t) log_lv
->le_count
* log_lv
->vg
->extent_size
;
1576 if (log_lv
->vg
->fid
->fmt
->ops
->lv_setup
&&
1577 !log_lv
->vg
->fid
->fmt
->ops
->lv_setup(log_lv
->vg
->fid
, log_lv
))
1583 static int _lv_extend_mirror(struct alloc_handle
*ah
,
1584 struct logical_volume
*lv
,
1585 uint32_t extents
, uint32_t first_area
)
1587 struct lv_segment
*seg
;
1590 seg
= first_seg(lv
);
1591 for (m
= first_area
, s
= 0; s
< seg
->area_count
; s
++) {
1592 if (is_temporary_mirror_layer(seg_lv(seg
, s
))) {
1593 if (!_lv_extend_mirror(ah
, seg_lv(seg
, s
), extents
, m
))
1595 m
+= lv_mirror_count(seg_lv(seg
, s
));
1599 if (!lv_add_segment(ah
, m
++, 1, seg_lv(seg
, s
),
1600 get_segtype_from_string(lv
->vg
->cmd
,
1603 log_error("Aborting. Failed to extend %s.",
1604 seg_lv(seg
, s
)->name
);
1608 seg
->area_len
+= extents
;
1609 seg
->len
+= extents
;
1610 lv
->le_count
+= extents
;
1611 lv
->size
+= (uint64_t) extents
*lv
->vg
->extent_size
;
1617 * Entry point for single-step LV allocation + extension.
1619 int lv_extend(struct logical_volume
*lv
,
1620 const struct segment_type
*segtype
,
1621 uint32_t stripes
, uint32_t stripe_size
,
1622 uint32_t mirrors
, uint32_t extents
,
1623 struct physical_volume
*mirrored_pv
__attribute((unused
)),
1624 uint32_t mirrored_pe
__attribute((unused
)),
1625 uint32_t status
, struct dm_list
*allocatable_pvs
,
1626 alloc_policy_t alloc
)
1629 struct alloc_handle
*ah
;
1631 if (segtype_is_virtual(segtype
))
1632 return lv_add_virtual_segment(lv
, status
, extents
, segtype
);
1634 if (!(ah
= allocate_extents(lv
->vg
, lv
, segtype
, stripes
, mirrors
, 0, 0,
1635 extents
, allocatable_pvs
, alloc
, NULL
)))
1639 r
= lv_add_segment(ah
, 0, ah
->area_count
, lv
, segtype
,
1640 stripe_size
, status
, 0, NULL
);
1642 r
= _lv_extend_mirror(ah
, lv
, extents
, 0);
1649 * Minimal LV renaming function.
1650 * Metadata transaction should be made by caller.
1651 * Assumes new_name is allocated from cmd->mem pool.
1653 static int _rename_single_lv(struct logical_volume
*lv
, char *new_name
)
1655 struct volume_group
*vg
= lv
->vg
;
1657 if (find_lv_in_vg(vg
, new_name
)) {
1658 log_error("Logical volume \"%s\" already exists in "
1659 "volume group \"%s\"", new_name
, vg
->name
);
1663 if (lv
->status
& LOCKED
) {
1664 log_error("Cannot rename locked LV %s", lv
->name
);
1668 lv
->name
= new_name
;
1675 * 'lv_name_old' and 'lv_name_new' are old and new names of the main LV.
1677 static int _rename_sub_lv(struct cmd_context
*cmd
,
1678 struct logical_volume
*lv
,
1679 const char *lv_name_old
, const char *lv_name_new
)
1681 char *suffix
, *new_name
;
1685 * A sub LV name starts with lv_name_old + '_'.
1686 * The suffix follows lv_name_old and includes '_'.
1688 len
= strlen(lv_name_old
);
1689 if (strncmp(lv
->name
, lv_name_old
, len
) || lv
->name
[len
] != '_') {
1690 log_error("Cannot rename \"%s\": name format not recognized "
1691 "for internal LV \"%s\"",
1692 lv_name_old
, lv
->name
);
1695 suffix
= lv
->name
+ len
;
1698 * Compose a new name for sub lv:
1699 * e.g. new name is "lvol1_mlog"
1700 * if the sub LV is "lvol0_mlog" and
1701 * a new name for main LV is "lvol1"
1703 len
= strlen(lv_name_new
) + strlen(suffix
) + 1;
1704 new_name
= dm_pool_alloc(cmd
->mem
, len
);
1706 log_error("Failed to allocate space for new name");
1709 if (!dm_snprintf(new_name
, len
, "%s%s", lv_name_new
, suffix
)) {
1710 log_error("Failed to create new name");
1715 return _rename_single_lv(lv
, new_name
);
1718 /* Callback for _for_each_sub_lv */
1719 static int _rename_cb(struct cmd_context
*cmd
, struct logical_volume
*lv
,
1722 struct lv_names
*lv_names
= (struct lv_names
*) data
;
1724 return _rename_sub_lv(cmd
, lv
, lv_names
->old
, lv_names
->new);
1728 * Loop down sub LVs and call "func" for each.
1729 * "func" is responsible to log necessary information on failure.
1731 static int _for_each_sub_lv(struct cmd_context
*cmd
, struct logical_volume
*lv
,
1732 int (*func
)(struct cmd_context
*cmd
,
1733 struct logical_volume
*lv
,
1737 struct logical_volume
*org
;
1738 struct lv_segment
*seg
;
1741 if (lv_is_cow(lv
) && lv_is_virtual_origin(org
= origin_from_cow(lv
)))
1742 if (!func(cmd
, org
, data
))
1745 dm_list_iterate_items(seg
, &lv
->segments
) {
1746 if (seg
->log_lv
&& !func(cmd
, seg
->log_lv
, data
))
1748 for (s
= 0; s
< seg
->area_count
; s
++) {
1749 if (seg_type(seg
, s
) != AREA_LV
)
1751 if (!func(cmd
, seg_lv(seg
, s
), data
))
1753 if (!_for_each_sub_lv(cmd
, seg_lv(seg
, s
), func
, data
))
1763 * Core of LV renaming routine.
1764 * VG must be locked by caller.
1766 int lv_rename(struct cmd_context
*cmd
, struct logical_volume
*lv
,
1767 const char *new_name
)
1769 struct volume_group
*vg
= lv
->vg
;
1770 struct lv_names lv_names
;
1771 DM_LIST_INIT(lvs_changed
);
1772 struct lv_list lvl
, lvl2
;
1775 /* rename is not allowed on sub LVs */
1776 if (!lv_is_visible(lv
)) {
1777 log_error("Cannot rename internal LV \"%s\".", lv
->name
);
1781 if (find_lv_in_vg(vg
, new_name
)) {
1782 log_error("Logical volume \"%s\" already exists in "
1783 "volume group \"%s\"", new_name
, vg
->name
);
1787 if (lv
->status
& LOCKED
) {
1788 log_error("Cannot rename locked LV %s", lv
->name
);
1795 /* rename sub LVs */
1796 lv_names
.old
= lv
->name
;
1797 lv_names
.new = new_name
;
1798 if (!_for_each_sub_lv(cmd
, lv
, _rename_cb
, (void *) &lv_names
))
1801 /* rename main LV */
1802 if (!(lv
->name
= dm_pool_strdup(cmd
->mem
, new_name
))) {
1803 log_error("Failed to allocate space for new name");
1808 dm_list_add(&lvs_changed
, &lvl
.list
);
1810 /* rename active virtual origin too */
1811 if (lv_is_cow(lv
) && lv_is_virtual_origin(lvl2
.lv
= origin_from_cow(lv
)))
1812 dm_list_add_h(&lvs_changed
, &lvl2
.list
);
1814 log_verbose("Writing out updated volume group");
1819 if (!suspend_lvs(cmd
, &lvs_changed
)) {
1824 if (!(r
= vg_commit(vg
)))
1827 resume_lvs(cmd
, &lvs_changed
);
1833 char *generate_lv_name(struct volume_group
*vg
, const char *format
,
1834 char *buffer
, size_t len
)
1836 struct lv_list
*lvl
;
1839 dm_list_iterate_items(lvl
, &vg
->lvs
) {
1840 if (sscanf(lvl
->lv
->name
, format
, &i
) != 1)
1847 if (dm_snprintf(buffer
, len
, format
, high
+ 1) < 0)
1853 int vg_max_lv_reached(struct volume_group
*vg
)
1858 if (vg
->max_lv
> vg_visible_lvs(vg
))
1861 log_verbose("Maximum number of logical volumes (%u) reached "
1862 "in volume group %s", vg
->max_lv
, vg
->name
);
1867 struct logical_volume
*alloc_lv(struct dm_pool
*mem
)
1869 struct logical_volume
*lv
;
1871 if (!(lv
= dm_pool_zalloc(mem
, sizeof(*lv
)))) {
1872 log_error("Unable to allocate logical volume structure");
1876 lv
->snapshot
= NULL
;
1877 dm_list_init(&lv
->snapshot_segs
);
1878 dm_list_init(&lv
->segments
);
1879 dm_list_init(&lv
->tags
);
1880 dm_list_init(&lv
->segs_using_this_lv
);
1886 * Create a new empty LV.
1888 struct logical_volume
*lv_create_empty(const char *name
,
1891 alloc_policy_t alloc
,
1892 struct volume_group
*vg
)
1894 struct format_instance
*fi
= vg
->fid
;
1895 struct logical_volume
*lv
;
1896 char dname
[NAME_LEN
];
1898 if (vg_max_lv_reached(vg
))
1901 if (strstr(name
, "%d") &&
1902 !(name
= generate_lv_name(vg
, name
, dname
, sizeof(dname
)))) {
1903 log_error("Failed to generate unique name for the new "
1906 } else if (find_lv_in_vg(vg
, name
)) {
1907 log_error("Unable to create LV %s in Volume Group %s: "
1908 "name already in use.", name
, vg
->name
);
1912 log_verbose("Creating logical volume %s", name
);
1914 if (!(lv
= alloc_lv(vg
->vgmem
)))
1917 if (!(lv
->name
= dm_pool_strdup(vg
->vgmem
, name
)))
1920 lv
->status
= status
;
1922 lv
->read_ahead
= vg
->cmd
->default_settings
.read_ahead
;
1925 lv
->size
= UINT64_C(0);
1931 if (!link_lv_to_vg(vg
, lv
))
1934 if (fi
->fmt
->ops
->lv_setup
&& !fi
->fmt
->ops
->lv_setup(fi
, lv
))
1939 dm_pool_free(vg
->vgmem
, lv
);
1943 static int _add_pvs(struct cmd_context
*cmd
, struct pv_segment
*peg
,
1944 uint32_t s
__attribute((unused
)), void *data
)
1946 struct seg_pvs
*spvs
= (struct seg_pvs
*) data
;
1947 struct pv_list
*pvl
;
1949 /* Don't add again if it's already on list. */
1950 if (find_pv_in_pv_list(&spvs
->pvs
, peg
->pv
))
1953 if (!(pvl
= dm_pool_alloc(cmd
->mem
, sizeof(*pvl
)))) {
1954 log_error("pv_list allocation failed");
1960 dm_list_add(&spvs
->pvs
, &pvl
->list
);
1966 * Construct dm_list of segments of LVs showing which PVs they use.
1968 struct dm_list
*build_parallel_areas_from_lv(struct cmd_context
*cmd
,
1969 struct logical_volume
*lv
)
1971 struct dm_list
*parallel_areas
;
1972 struct seg_pvs
*spvs
;
1973 uint32_t current_le
= 0;
1975 if (!(parallel_areas
= dm_pool_alloc(cmd
->mem
, sizeof(*parallel_areas
)))) {
1976 log_error("parallel_areas allocation failed");
1980 dm_list_init(parallel_areas
);
1983 if (!(spvs
= dm_pool_zalloc(cmd
->mem
, sizeof(*spvs
)))) {
1984 log_error("allocation failed");
1988 dm_list_init(&spvs
->pvs
);
1990 spvs
->le
= current_le
;
1991 spvs
->len
= lv
->le_count
- current_le
;
1993 dm_list_add(parallel_areas
, &spvs
->list
);
1995 /* Find next segment end */
1996 /* FIXME Unnecessary nesting! */
1997 if (!_for_each_pv(cmd
, lv
, current_le
, spvs
->len
, &spvs
->len
,
1998 0, 0, -1, 0, _add_pvs
, (void *) spvs
))
2001 current_le
= spvs
->le
+ spvs
->len
;
2002 } while (current_le
< lv
->le_count
);
2004 /* FIXME Merge adjacent segments with identical PV lists (avoids need for contiguous allocation attempts between successful allocations) */
2006 return parallel_areas
;
2009 int link_lv_to_vg(struct volume_group
*vg
, struct logical_volume
*lv
)
2011 struct lv_list
*lvl
;
2013 if (vg_max_lv_reached(vg
))
2016 if (!(lvl
= dm_pool_zalloc(vg
->vgmem
, sizeof(*lvl
))))
2021 dm_list_add(&vg
->lvs
, &lvl
->list
);
2026 int unlink_lv_from_vg(struct logical_volume
*lv
)
2028 struct lv_list
*lvl
;
2030 if (!(lvl
= find_lv_in_vg(lv
->vg
, lv
->name
)))
2033 dm_list_del(&lvl
->list
);
2038 void lv_set_visible(struct logical_volume
*lv
)
2040 if (lv_is_visible(lv
))
2043 lv
->status
|= VISIBLE_LV
;
2045 log_debug("LV %s in VG %s is now visible.", lv
->name
, lv
->vg
->name
);
2048 void lv_set_hidden(struct logical_volume
*lv
)
2050 if (!lv_is_visible(lv
))
2053 lv
->status
&= ~VISIBLE_LV
;
2055 log_debug("LV %s in VG %s is now hidden.", lv
->name
, lv
->vg
->name
);
2058 int lv_remove_single(struct cmd_context
*cmd
, struct logical_volume
*lv
,
2059 const force_t force
)
2061 struct volume_group
*vg
;
2063 struct logical_volume
*origin
= NULL
;
2067 if (!vg_check_status(vg
, LVM_WRITE
))
2070 if (lv_is_origin(lv
)) {
2071 log_error("Can't remove logical volume \"%s\" under snapshot",
2076 if (lv
->status
& MIRROR_IMAGE
) {
2077 log_error("Can't remove logical volume %s used by a mirror",
2082 if (lv
->status
& MIRROR_LOG
) {
2083 log_error("Can't remove logical volume %s used as mirror log",
2088 if (lv
->status
& LOCKED
) {
2089 log_error("Can't remove locked LV %s", lv
->name
);
2093 /* FIXME Ensure not referred to by another existing LVs */
2095 if (lv_info(cmd
, lv
, &info
, 1, 0)) {
2096 if (info
.open_count
) {
2097 log_error("Can't remove open logical volume \"%s\"",
2102 if (lv_is_active(lv
) && (force
== PROMPT
) &&
2103 lv_is_visible(lv
) &&
2104 yes_no_prompt("Do you really want to remove active "
2105 "%slogical volume %s? [y/n]: ",
2106 vg_is_clustered(vg
) ? "clustered " : "",
2108 log_print("Logical volume %s not removed", lv
->name
);
2116 /* FIXME Snapshot commit out of sequence if it fails after here? */
2117 if (!deactivate_lv(cmd
, lv
)) {
2118 log_error("Unable to deactivate logical volume \"%s\"",
2123 if (lv_is_cow(lv
)) {
2124 origin
= origin_from_cow(lv
);
2125 log_verbose("Removing snapshot %s", lv
->name
);
2126 if (!vg_remove_snapshot(lv
))
2130 log_verbose("Releasing logical volume \"%s\"", lv
->name
);
2131 if (!lv_remove(lv
)) {
2132 log_error("Error releasing logical volume \"%s\"", lv
->name
);
2136 /* store it on disks */
2137 if (!vg_write(vg
) || !vg_commit(vg
))
2142 /* If no snapshots left, reload without -real. */
2143 if (origin
&& !lv_is_origin(origin
)) {
2144 if (!suspend_lv(cmd
, origin
))
2145 log_error("Failed to refresh %s without snapshot.", origin
->name
);
2146 else if (!resume_lv(cmd
, origin
))
2147 log_error("Failed to resume %s.", origin
->name
);
2150 if (lv_is_visible(lv
))
2151 log_print("Logical volume \"%s\" successfully removed", lv
->name
);
2157 * remove LVs with its dependencies - LV leaf nodes should be removed first
2159 int lv_remove_with_dependencies(struct cmd_context
*cmd
, struct logical_volume
*lv
,
2160 const force_t force
)
2162 struct dm_list
*snh
, *snht
;
2164 if (lv_is_origin(lv
)) {
2165 /* remove snapshot LVs first */
2166 dm_list_iterate_safe(snh
, snht
, &lv
->snapshot_segs
) {
2167 if (!lv_remove_with_dependencies(cmd
, dm_list_struct_base(snh
, struct lv_segment
,
2174 return lv_remove_single(cmd
, lv
, force
);
2178 * insert_layer_for_segments_on_pv() inserts a layer segment for a segment area.
2179 * However, layer modification could split the underlying layer segment.
2180 * This function splits the parent area according to keep the 1:1 relationship
2181 * between the parent area and the underlying layer segment.
2182 * Since the layer LV might have other layers below, build_parallel_areas()
2183 * is used to find the lowest-level segment boundaries.
2185 static int _split_parent_area(struct lv_segment
*seg
, uint32_t s
,
2186 struct dm_list
*layer_seg_pvs
)
2188 uint32_t parent_area_len
, parent_le
, layer_le
;
2189 uint32_t area_multiple
;
2190 struct seg_pvs
*spvs
;
2192 if (seg_is_striped(seg
))
2193 area_multiple
= seg
->area_count
;
2197 parent_area_len
= seg
->area_len
;
2198 parent_le
= seg
->le
;
2199 layer_le
= seg_le(seg
, s
);
2201 while (parent_area_len
> 0) {
2202 /* Find the layer segment pointed at */
2203 if (!(spvs
= _find_seg_pvs_by_le(layer_seg_pvs
, layer_le
))) {
2204 log_error("layer segment for %s:%" PRIu32
" not found",
2205 seg
->lv
->name
, parent_le
);
2209 if (spvs
->le
!= layer_le
) {
2210 log_error("Incompatible layer boundary: "
2211 "%s:%" PRIu32
"[%" PRIu32
"] on %s:%" PRIu32
,
2212 seg
->lv
->name
, parent_le
, s
,
2213 seg_lv(seg
, s
)->name
, layer_le
);
2217 if (spvs
->len
< parent_area_len
) {
2218 parent_le
+= spvs
->len
* area_multiple
;
2219 if (!lv_split_segment(seg
->lv
, parent_le
))
2223 parent_area_len
-= spvs
->len
;
2224 layer_le
+= spvs
->len
;
2231 * Split the parent LV segments if the layer LV below it is splitted.
2233 int split_parent_segments_for_layer(struct cmd_context
*cmd
,
2234 struct logical_volume
*layer_lv
)
2236 struct lv_list
*lvl
;
2237 struct logical_volume
*parent_lv
;
2238 struct lv_segment
*seg
;
2240 struct dm_list
*parallel_areas
;
2242 if (!(parallel_areas
= build_parallel_areas_from_lv(cmd
, layer_lv
)))
2245 /* Loop through all LVs except itself */
2246 dm_list_iterate_items(lvl
, &layer_lv
->vg
->lvs
) {
2247 parent_lv
= lvl
->lv
;
2248 if (parent_lv
== layer_lv
)
2251 /* Find all segments that point at the layer LV */
2252 dm_list_iterate_items(seg
, &parent_lv
->segments
) {
2253 for (s
= 0; s
< seg
->area_count
; s
++) {
2254 if (seg_type(seg
, s
) != AREA_LV
||
2255 seg_lv(seg
, s
) != layer_lv
)
2258 if (!_split_parent_area(seg
, s
, parallel_areas
))
2267 /* Remove a layer from the LV */
2268 int remove_layers_for_segments(struct cmd_context
*cmd
,
2269 struct logical_volume
*lv
,
2270 struct logical_volume
*layer_lv
,
2271 uint32_t status_mask
, struct dm_list
*lvs_changed
)
2273 struct lv_segment
*seg
, *lseg
;
2276 struct lv_list
*lvl
;
2278 log_very_verbose("Removing layer %s for segments of %s",
2279 layer_lv
->name
, lv
->name
);
2281 /* Find all segments that point at the temporary mirror */
2282 dm_list_iterate_items(seg
, &lv
->segments
) {
2283 for (s
= 0; s
< seg
->area_count
; s
++) {
2284 if (seg_type(seg
, s
) != AREA_LV
||
2285 seg_lv(seg
, s
) != layer_lv
)
2288 /* Find the layer segment pointed at */
2289 if (!(lseg
= find_seg_by_le(layer_lv
, seg_le(seg
, s
)))) {
2290 log_error("Layer segment found: %s:%" PRIu32
,
2291 layer_lv
->name
, seg_le(seg
, s
));
2295 /* Check the segment params are compatible */
2296 if (!seg_is_striped(lseg
) || lseg
->area_count
!= 1) {
2297 log_error("Layer is not linear: %s:%" PRIu32
,
2298 layer_lv
->name
, lseg
->le
);
2301 if ((lseg
->status
& status_mask
) != status_mask
) {
2302 log_error("Layer status does not match: "
2303 "%s:%" PRIu32
" status: 0x%x/0x%x",
2304 layer_lv
->name
, lseg
->le
,
2305 lseg
->status
, status_mask
);
2308 if (lseg
->le
!= seg_le(seg
, s
) ||
2309 lseg
->area_len
!= seg
->area_len
) {
2310 log_error("Layer boundary mismatch: "
2311 "%s:%" PRIu32
"-%" PRIu32
" on "
2313 "%" PRIu32
"-%" PRIu32
" / ",
2314 lv
->name
, seg
->le
, seg
->area_len
,
2315 layer_lv
->name
, seg_le(seg
, s
),
2316 lseg
->le
, lseg
->area_len
);
2320 if (!move_lv_segment_area(seg
, s
, lseg
, 0))
2323 /* Replace mirror with error segment */
2324 if (!(lseg
->segtype
=
2325 get_segtype_from_string(lv
->vg
->cmd
, "error"))) {
2326 log_error("Missing error segtype");
2329 lseg
->area_count
= 0;
2331 /* First time, add LV to list of LVs affected */
2332 if (!lv_changed
&& lvs_changed
) {
2333 if (!(lvl
= dm_pool_alloc(cmd
->mem
, sizeof(*lvl
)))) {
2334 log_error("lv_list alloc failed");
2338 dm_list_add(lvs_changed
, &lvl
->list
);
2343 if (lv_changed
&& !lv_merge_segments(lv
))
2349 /* Remove a layer */
2350 int remove_layers_for_segments_all(struct cmd_context
*cmd
,
2351 struct logical_volume
*layer_lv
,
2352 uint32_t status_mask
,
2353 struct dm_list
*lvs_changed
)
2355 struct lv_list
*lvl
;
2356 struct logical_volume
*lv1
;
2358 /* Loop through all LVs except the temporary mirror */
2359 dm_list_iterate_items(lvl
, &layer_lv
->vg
->lvs
) {
2361 if (lv1
== layer_lv
)
2364 if (!remove_layers_for_segments(cmd
, lv1
, layer_lv
,
2365 status_mask
, lvs_changed
))
2369 if (!lv_empty(layer_lv
))
2375 static int _move_lv_segments(struct logical_volume
*lv_to
,
2376 struct logical_volume
*lv_from
,
2377 uint32_t set_status
, uint32_t reset_status
)
2379 struct lv_segment
*seg
;
2381 dm_list_iterate_items(seg
, &lv_to
->segments
) {
2383 log_error("Can't move snapshot segment");
2388 lv_to
->segments
= lv_from
->segments
;
2389 lv_to
->segments
.n
->p
= &lv_to
->segments
;
2390 lv_to
->segments
.p
->n
= &lv_to
->segments
;
2392 dm_list_iterate_items(seg
, &lv_to
->segments
) {
2394 seg
->status
&= ~reset_status
;
2395 seg
->status
|= set_status
;
2398 dm_list_init(&lv_from
->segments
);
2400 lv_to
->le_count
= lv_from
->le_count
;
2401 lv_to
->size
= lv_from
->size
;
2403 lv_from
->le_count
= 0;
2409 /* Remove a layer from the LV */
2410 int remove_layer_from_lv(struct logical_volume
*lv
,
2411 struct logical_volume
*layer_lv
)
2413 struct logical_volume
*parent
;
2414 struct lv_segment
*parent_seg
;
2415 struct segment_type
*segtype
;
2417 log_very_verbose("Removing layer %s for %s", layer_lv
->name
, lv
->name
);
2419 if (!(parent_seg
= get_only_segment_using_this_lv(layer_lv
))) {
2420 log_error("Failed to find layer %s in %s",
2421 layer_lv
->name
, lv
->name
);
2424 parent
= parent_seg
->lv
;
2427 * Before removal, the layer should be cleaned up,
2428 * i.e. additional segments and areas should have been removed.
2430 if (dm_list_size(&parent
->segments
) != 1 ||
2431 parent_seg
->area_count
!= 1 ||
2432 seg_type(parent_seg
, 0) != AREA_LV
||
2433 layer_lv
!= seg_lv(parent_seg
, 0) ||
2434 parent
->le_count
!= layer_lv
->le_count
)
2437 if (!lv_empty(parent
))
2440 if (!_move_lv_segments(parent
, layer_lv
, 0, 0))
2443 /* Replace the empty layer with error segment */
2444 segtype
= get_segtype_from_string(lv
->vg
->cmd
, "error");
2445 if (!lv_add_virtual_segment(layer_lv
, 0, parent
->le_count
, segtype
))
2452 * Create and insert a linear LV "above" lv_where.
2453 * After the insertion, a new LV named lv_where->name + suffix is created
2454 * and all segments of lv_where is moved to the new LV.
2455 * lv_where will have a single segment which maps linearly to the new LV.
2457 struct logical_volume
*insert_layer_for_lv(struct cmd_context
*cmd
,
2458 struct logical_volume
*lv_where
,
2460 const char *layer_suffix
)
2462 struct logical_volume
*layer_lv
;
2465 struct segment_type
*segtype
;
2466 struct lv_segment
*mapseg
;
2468 /* create an empty layer LV */
2469 len
= strlen(lv_where
->name
) + 32;
2470 if (!(name
= alloca(len
))) {
2471 log_error("layer name allocation failed. "
2472 "Remove new LV and retry.");
2476 if (dm_snprintf(name
, len
, "%s%s", lv_where
->name
, layer_suffix
) < 0) {
2477 log_error("layer name allocation failed. "
2478 "Remove new LV and retry.");
2482 if (!(layer_lv
= lv_create_empty(name
, NULL
, LVM_READ
| LVM_WRITE
,
2483 ALLOC_INHERIT
, lv_where
->vg
))) {
2484 log_error("Creation of layer LV failed");
2488 if (lv_is_active(lv_where
) && strstr(name
, "_mimagetmp")) {
2489 log_very_verbose("Creating transient LV %s for mirror conversion in VG %s.", name
, lv_where
->vg
->name
);
2491 segtype
= get_segtype_from_string(cmd
, "error");
2493 if (!lv_add_virtual_segment(layer_lv
, 0, lv_where
->le_count
, segtype
)) {
2494 log_error("Creation of transient LV %s for mirror conversion in VG %s failed.", name
, lv_where
->vg
->name
);
2498 if (!vg_write(lv_where
->vg
)) {
2499 log_error("Failed to write intermediate VG %s metadata for mirror conversion.", lv_where
->vg
->name
);
2503 if (!vg_commit(lv_where
->vg
)) {
2504 log_error("Failed to commit intermediate VG %s metadata for mirror conversion.", lv_where
->vg
->name
);
2505 vg_revert(lv_where
->vg
);
2509 if (!activate_lv(cmd
, layer_lv
)) {
2510 log_error("Failed to resume transient error LV %s for mirror conversion in VG %s.", name
, lv_where
->vg
->name
);
2515 log_very_verbose("Inserting layer %s for %s",
2516 layer_lv
->name
, lv_where
->name
);
2518 if (!_move_lv_segments(layer_lv
, lv_where
, 0, 0))
2521 if (!(segtype
= get_segtype_from_string(cmd
, "striped")))
2524 /* allocate a new linear segment */
2525 if (!(mapseg
= alloc_lv_segment(cmd
->mem
, segtype
,
2526 lv_where
, 0, layer_lv
->le_count
,
2527 status
, 0, NULL
, 1, layer_lv
->le_count
,
2531 /* map the new segment to the original underlying are */
2532 if (!set_lv_segment_area_lv(mapseg
, 0, layer_lv
, 0, 0))
2535 /* add the new segment to the layer LV */
2536 dm_list_add(&lv_where
->segments
, &mapseg
->list
);
2537 lv_where
->le_count
= layer_lv
->le_count
;
2538 lv_where
->size
= lv_where
->le_count
* lv_where
->vg
->extent_size
;
2544 * Extend and insert a linear layer LV beneath the source segment area.
2546 static int _extend_layer_lv_for_segment(struct logical_volume
*layer_lv
,
2547 struct lv_segment
*seg
, uint32_t s
,
2550 struct lv_segment
*mapseg
;
2551 struct segment_type
*segtype
;
2552 struct physical_volume
*src_pv
= seg_pv(seg
, s
);
2553 uint32_t src_pe
= seg_pe(seg
, s
);
2555 if (seg_type(seg
, s
) != AREA_PV
&& seg_type(seg
, s
) != AREA_LV
)
2558 if (!(segtype
= get_segtype_from_string(layer_lv
->vg
->cmd
, "striped")))
2561 /* FIXME Incomplete message? Needs more context */
2562 log_very_verbose("Inserting %s:%" PRIu32
"-%" PRIu32
" of %s/%s",
2563 pv_dev_name(src_pv
),
2564 src_pe
, src_pe
+ seg
->area_len
- 1,
2565 seg
->lv
->vg
->name
, seg
->lv
->name
);
2567 /* allocate a new segment */
2568 if (!(mapseg
= alloc_lv_segment(layer_lv
->vg
->cmd
->mem
, segtype
,
2569 layer_lv
, layer_lv
->le_count
,
2570 seg
->area_len
, status
, 0,
2571 NULL
, 1, seg
->area_len
, 0, 0, 0)))
2574 /* map the new segment to the original underlying are */
2575 if (!move_lv_segment_area(mapseg
, 0, seg
, s
))
2578 /* add the new segment to the layer LV */
2579 dm_list_add(&layer_lv
->segments
, &mapseg
->list
);
2580 layer_lv
->le_count
+= seg
->area_len
;
2581 layer_lv
->size
+= seg
->area_len
* layer_lv
->vg
->extent_size
;
2583 /* map the original area to the new segment */
2584 if (!set_lv_segment_area_lv(seg
, s
, layer_lv
, mapseg
->le
, 0))
2591 * Match the segment area to PEs in the pvl
2592 * (the segment area boundary should be aligned to PE ranges by
2593 * _adjust_layer_segments() so that there is no partial overlap.)
2595 static int _match_seg_area_to_pe_range(struct lv_segment
*seg
, uint32_t s
,
2596 struct pv_list
*pvl
)
2598 struct pe_range
*per
;
2599 uint32_t pe_start
, per_end
;
2604 if (seg_type(seg
, s
) != AREA_PV
|| seg_dev(seg
, s
) != pvl
->pv
->dev
)
2607 pe_start
= seg_pe(seg
, s
);
2609 /* Do these PEs match to any of the PEs in pvl? */
2610 dm_list_iterate_items(per
, pvl
->pe_ranges
) {
2611 per_end
= per
->start
+ per
->count
- 1;
2613 if ((pe_start
< per
->start
) || (pe_start
> per_end
))
2616 /* FIXME Missing context in this message - add LV/seg details */
2617 log_debug("Matched PE range %s:%" PRIu32
"-%" PRIu32
" against "
2618 "%s %" PRIu32
" len %" PRIu32
, dev_name(pvl
->pv
->dev
),
2619 per
->start
, per_end
, dev_name(seg_dev(seg
, s
)),
2620 seg_pe(seg
, s
), seg
->area_len
);
2629 * For each segment in lv_where that uses a PV in pvl directly,
2630 * split the segment if it spans more than one underlying PV.
2632 static int _align_segment_boundary_to_pe_range(struct logical_volume
*lv_where
,
2633 struct pv_list
*pvl
)
2635 struct lv_segment
*seg
;
2636 struct pe_range
*per
;
2637 uint32_t pe_start
, pe_end
, per_end
, stripe_multiplier
, s
;
2642 /* Split LV segments to match PE ranges */
2643 dm_list_iterate_items(seg
, &lv_where
->segments
) {
2644 for (s
= 0; s
< seg
->area_count
; s
++) {
2645 if (seg_type(seg
, s
) != AREA_PV
||
2646 seg_dev(seg
, s
) != pvl
->pv
->dev
)
2649 /* Do these PEs match with the condition? */
2650 dm_list_iterate_items(per
, pvl
->pe_ranges
) {
2651 pe_start
= seg_pe(seg
, s
);
2652 pe_end
= pe_start
+ seg
->area_len
- 1;
2653 per_end
= per
->start
+ per
->count
- 1;
2656 if ((pe_end
< per
->start
) ||
2657 (pe_start
> per_end
))
2660 if (seg_is_striped(seg
))
2661 stripe_multiplier
= seg
->area_count
;
2663 stripe_multiplier
= 1;
2665 if ((per
->start
!= pe_start
&&
2666 per
->start
> pe_start
) &&
2667 !lv_split_segment(lv_where
, seg
->le
+
2668 (per
->start
- pe_start
) *
2672 if ((per_end
!= pe_end
&&
2673 per_end
< pe_end
) &&
2674 !lv_split_segment(lv_where
, seg
->le
+
2675 (per_end
- pe_start
+ 1) *
2686 * Scan lv_where for segments on a PV in pvl, and for each one found
2687 * append a linear segment to lv_layer and insert it between the two.
2689 * If pvl is empty, a layer is placed under the whole of lv_where.
2690 * If the layer is inserted, lv_where is added to lvs_changed.
2692 int insert_layer_for_segments_on_pv(struct cmd_context
*cmd
,
2693 struct logical_volume
*lv_where
,
2694 struct logical_volume
*layer_lv
,
2696 struct pv_list
*pvl
,
2697 struct dm_list
*lvs_changed
)
2699 struct lv_segment
*seg
;
2700 struct lv_list
*lvl
;
2704 log_very_verbose("Inserting layer %s for segments of %s on %s",
2705 layer_lv
->name
, lv_where
->name
,
2706 pvl
? pv_dev_name(pvl
->pv
) : "any");
2708 if (!_align_segment_boundary_to_pe_range(lv_where
, pvl
))
2711 /* Work through all segments on the supplied PV */
2712 dm_list_iterate_items(seg
, &lv_where
->segments
) {
2713 for (s
= 0; s
< seg
->area_count
; s
++) {
2714 if (!_match_seg_area_to_pe_range(seg
, s
, pvl
))
2717 /* First time, add LV to list of LVs affected */
2718 if (!lv_used
&& lvs_changed
) {
2719 if (!(lvl
= dm_pool_alloc(cmd
->mem
, sizeof(*lvl
)))) {
2720 log_error("lv_list alloc failed");
2724 dm_list_add(lvs_changed
, &lvl
->list
);
2728 if (!_extend_layer_lv_for_segment(layer_lv
, seg
, s
,
2730 log_error("Failed to insert segment in layer "
2731 "LV %s under %s:%" PRIu32
"-%" PRIu32
,
2732 layer_lv
->name
, lv_where
->name
,
2733 seg
->le
, seg
->le
+ seg
->len
);
2743 * Initialize the LV with 'value'.
2745 int set_lv(struct cmd_context
*cmd
, struct logical_volume
*lv
,
2746 uint64_t sectors
, int value
)
2753 * <clausen> also, more than 4k
2754 * <clausen> say, reiserfs puts it's superblock 32k in, IIRC
2755 * <ejt_> k, I'll drop a fixme to that effect
2756 * (I know the device is at least 4k, but not 32k)
2758 if (!(name
= dm_pool_alloc(cmd
->mem
, PATH_MAX
))) {
2759 log_error("Name allocation failed - device not cleared");
2763 if (dm_snprintf(name
, PATH_MAX
, "%s%s/r%s", cmd
->dev_dir
,
2764 lv
->vg
->name
, lv
->name
) < 0) {
2765 log_error("Name too long - device not cleared (%s)", lv
->name
);
2769 if (dm_snprintf(name
, PATH_MAX
, "%s%s/%s", cmd
->dev_dir
,
2770 lv
->vg
->name
, lv
->name
) < 0) {
2771 log_error("Name too long - device not cleared (%s)", lv
->name
);
2775 log_verbose("Clearing start of logical volume \"%s\"", lv
->name
);
2777 if (!(dev
= dev_cache_get(name
, NULL
))) {
2778 log_error("%s: not found: device not cleared", name
);
2782 if (!dev_open_quiet(dev
))
2786 sectors
= UINT64_C(4096) >> SECTOR_SHIFT
;
2788 if (sectors
> lv
->size
)
2791 dev_set(dev
, UINT64_C(0), (size_t) sectors
<< SECTOR_SHIFT
, value
);
2793 dev_close_immediate(dev
);
2799 static struct logical_volume
*_create_virtual_origin(struct cmd_context
*cmd
,
2800 struct volume_group
*vg
,
2801 const char *lv_name
,
2802 uint32_t permission
,
2803 uint64_t voriginextents
)
2805 const struct segment_type
*segtype
;
2808 struct logical_volume
*lv
;
2810 if (!(segtype
= get_segtype_from_string(cmd
, "zero"))) {
2811 log_error("Zero segment type for virtual origin not found");
2815 len
= strlen(lv_name
) + 32;
2816 if (!(vorigin_name
= alloca(len
)) ||
2817 dm_snprintf(vorigin_name
, len
, "%s_vorigin", lv_name
) < 0) {
2818 log_error("Virtual origin name allocation failed.");
2822 if (!(lv
= lv_create_empty(vorigin_name
, NULL
, permission
,
2823 ALLOC_INHERIT
, vg
)))
2826 if (!lv_extend(lv
, segtype
, 1, 0, 1, voriginextents
, NULL
, 0u, 0u,
2827 NULL
, ALLOC_INHERIT
))
2830 /* store vg on disk(s) */
2831 if (!vg_write(vg
) || !vg_commit(vg
))
2839 int lv_create_single(struct volume_group
*vg
,
2840 struct lvcreate_params
*lp
)
2842 struct cmd_context
*cmd
= vg
->cmd
;
2844 uint32_t status
= 0;
2845 struct logical_volume
*lv
, *org
= NULL
;
2846 int origin_active
= 0;
2847 char lv_name_buf
[128];
2848 const char *lv_name
;
2851 if (lp
->lv_name
&& find_lv_in_vg(vg
, lp
->lv_name
)) {
2852 log_error("Logical volume \"%s\" already exists in "
2853 "volume group \"%s\"", lp
->lv_name
, lp
->vg_name
);
2857 if (vg_max_lv_reached(vg
)) {
2858 log_error("Maximum number of logical volumes (%u) reached "
2859 "in volume group %s", vg
->max_lv
, vg
->name
);
2863 if (lp
->mirrors
> 1 && !(vg
->fid
->fmt
->features
& FMT_SEGMENTS
)) {
2864 log_error("Metadata does not support mirroring.");
2868 if (lp
->read_ahead
!= DM_READ_AHEAD_AUTO
&&
2869 lp
->read_ahead
!= DM_READ_AHEAD_NONE
&&
2870 (vg
->fid
->fmt
->features
& FMT_RESTRICTED_READAHEAD
) &&
2871 (lp
->read_ahead
< 2 || lp
->read_ahead
> 120)) {
2872 log_error("Metadata only supports readahead values between 2 and 120.");
2876 if (lp
->stripe_size
> vg
->extent_size
) {
2877 log_error("Reducing requested stripe size %s to maximum, "
2878 "physical extent size %s",
2879 display_size(cmd
, (uint64_t) lp
->stripe_size
),
2880 display_size(cmd
, (uint64_t) vg
->extent_size
));
2881 lp
->stripe_size
= vg
->extent_size
;
2884 /* Need to check the vg's format to verify this - the cmd format isn't setup properly yet */
2885 if (lp
->stripes
> 1 &&
2886 !(vg
->fid
->fmt
->features
& FMT_UNLIMITED_STRIPESIZE
) &&
2887 (lp
->stripe_size
> STRIPE_SIZE_MAX
)) {
2888 log_error("Stripe size may not exceed %s",
2889 display_size(cmd
, (uint64_t) STRIPE_SIZE_MAX
));
2893 if ((size_rest
= lp
->extents
% lp
->stripes
)) {
2894 log_print("Rounding size (%d extents) up to stripe boundary "
2895 "size (%d extents)", lp
->extents
,
2896 lp
->extents
- size_rest
+ lp
->stripes
);
2897 lp
->extents
= lp
->extents
- size_rest
+ lp
->stripes
;
2900 if (lp
->zero
&& !activation()) {
2901 log_error("Can't wipe start of new LV without using "
2902 "device-mapper kernel driver");
2906 status
|= lp
->permission
| VISIBLE_LV
;
2909 if (!activation()) {
2910 log_error("Can't create snapshot without using "
2911 "device-mapper kernel driver");
2914 /* FIXME Allow exclusive activation. */
2915 if (vg_is_clustered(vg
)) {
2916 log_error("Clustered snapshots are not yet supported.");
2921 status
|= LVM_WRITE
;
2923 if (lp
->voriginsize
)
2927 if (!(org
= find_lv(vg
, lp
->origin
))) {
2928 log_error("Couldn't find origin volume '%s'.",
2932 if (lv_is_virtual_origin(org
)) {
2933 log_error("Can't share virtual origins. "
2934 "Use --virtualsize.");
2937 if (lv_is_cow(org
)) {
2938 log_error("Snapshots of snapshots are not "
2942 if (org
->status
& LOCKED
) {
2943 log_error("Snapshots of locked devices are not "
2947 if ((org
->status
& MIRROR_IMAGE
) ||
2948 (org
->status
& MIRROR_LOG
)) {
2949 log_error("Snapshots of mirror %ss "
2950 "are not supported",
2951 (org
->status
& MIRROR_LOG
) ?
2956 if (!lv_info(cmd
, org
, &info
, 0, 0)) {
2957 log_error("Check for existence of snapshot "
2958 "origin '%s' failed.", org
->name
);
2961 origin_active
= info
.exists
;
2966 log_error("Unable to create new logical volume with no extents");
2970 if (!seg_is_virtual(lp
) &&
2971 vg
->free_count
< lp
->extents
) {
2972 log_error("Insufficient free extents (%u) in volume group %s: "
2973 "%u required", vg
->free_count
, vg
->name
, lp
->extents
);
2977 if (lp
->stripes
> dm_list_size(lp
->pvh
) && lp
->alloc
!= ALLOC_ANYWHERE
) {
2978 log_error("Number of stripes (%u) must not exceed "
2979 "number of physical volumes (%d)", lp
->stripes
,
2980 dm_list_size(lp
->pvh
));
2984 if (lp
->mirrors
> 1 && !activation()) {
2985 log_error("Can't create mirror without using "
2986 "device-mapper kernel driver.");
2990 /* The snapshot segment gets created later */
2992 !(lp
->segtype
= get_segtype_from_string(cmd
, "striped")))
2999 lv_name
= lp
->lv_name
;
3001 if (!generate_lv_name(vg
, "lvol%d", lv_name_buf
, sizeof(lv_name_buf
))) {
3002 log_error("Failed to generate LV name.");
3005 lv_name
= &lv_name_buf
[0];
3009 if (!(vg
->fid
->fmt
->features
& FMT_TAGS
)) {
3010 log_error("Volume group %s does not support tags",
3016 if (lp
->mirrors
> 1) {
3017 init_mirror_in_sync(lp
->nosync
);
3020 log_warn("WARNING: New mirror won't be synchronised. "
3021 "Don't read what you didn't write!");
3022 status
|= MIRROR_NOTSYNCED
;
3026 if (!(lv
= lv_create_empty(lv_name
? lv_name
: "lvol%d", NULL
,
3027 status
, lp
->alloc
, vg
)))
3030 if (lp
->read_ahead
) {
3031 log_verbose("Setting read ahead sectors");
3032 lv
->read_ahead
= lp
->read_ahead
;
3035 if (lp
->minor
>= 0) {
3036 lv
->major
= lp
->major
;
3037 lv
->minor
= lp
->minor
;
3038 lv
->status
|= FIXED_MINOR
;
3039 log_verbose("Setting device number to (%d, %d)", lv
->major
,
3043 if (lp
->tag
&& !str_list_add(cmd
->mem
, &lv
->tags
, lp
->tag
)) {
3044 log_error("Failed to add tag %s to %s/%s",
3045 lp
->tag
, lv
->vg
->name
, lv
->name
);
3049 if (!lv_extend(lv
, lp
->segtype
, lp
->stripes
, lp
->stripe_size
,
3050 1, lp
->extents
, NULL
, 0u, 0u, lp
->pvh
, lp
->alloc
))
3053 if (lp
->mirrors
> 1) {
3054 if (!lv_add_mirrors(cmd
, lv
, lp
->mirrors
- 1, lp
->stripes
,
3055 adjusted_mirror_region_size(
3059 lp
->corelog
? 0U : 1U, lp
->pvh
, lp
->alloc
,
3061 (lp
->nosync
? MIRROR_SKIP_INIT_SYNC
: 0))) {
3067 /* store vg on disk(s) */
3068 if (!vg_write(vg
) || !vg_commit(vg
))
3074 if (!activate_lv_excl(cmd
, lv
)) {
3075 log_error("Aborting. Failed to activate snapshot "
3076 "exception store.");
3079 } else if (!activate_lv(cmd
, lv
)) {
3081 log_error("Aborting. Failed to activate new LV to wipe "
3082 "the start of it.");
3083 goto deactivate_and_revert_new_lv
;
3085 log_error("Failed to activate new LV.");
3089 if (!lp
->zero
&& !lp
->snapshot
)
3090 log_error("WARNING: \"%s\" not zeroed", lv
->name
);
3091 else if (!set_lv(cmd
, lv
, UINT64_C(0), 0)) {
3092 log_error("Aborting. Failed to wipe %s.",
3093 lp
->snapshot
? "snapshot exception store" :
3095 goto deactivate_and_revert_new_lv
;
3099 /* Reset permission after zeroing */
3100 if (!(lp
->permission
& LVM_WRITE
))
3101 lv
->status
&= ~LVM_WRITE
;
3103 /* COW area must be deactivated if origin is not active */
3104 if (!origin_active
&& !deactivate_lv(cmd
, lv
)) {
3105 log_error("Aborting. Couldn't deactivate snapshot "
3106 "COW area. Manual intervention required.");
3110 /* A virtual origin must be activated explicitly. */
3111 if (lp
->voriginsize
&&
3112 (!(org
= _create_virtual_origin(cmd
, vg
, lv
->name
,
3114 lp
->voriginextents
)) ||
3115 !activate_lv(cmd
, org
))) {
3116 log_error("Couldn't create virtual origin for LV %s",
3118 if (org
&& !lv_remove(org
))
3120 goto deactivate_and_revert_new_lv
;
3123 /* cow LV remains active and becomes snapshot LV */
3125 if (!vg_add_snapshot(org
, lv
, NULL
,
3126 org
->le_count
, lp
->chunk_size
)) {
3127 log_error("Couldn't create snapshot.");
3128 goto deactivate_and_revert_new_lv
;
3131 /* store vg on disk(s) */
3135 if (!suspend_lv(cmd
, org
)) {
3136 log_error("Failed to suspend origin %s", org
->name
);
3144 if (!resume_lv(cmd
, org
)) {
3145 log_error("Problem reactivating origin %s", org
->name
);
3149 /* FIXME out of sequence */
3152 log_print("Logical volume \"%s\" created", lv
->name
);
3155 * FIXME: as a sanity check we could try reading the
3156 * last block of the device ?
3161 deactivate_and_revert_new_lv
:
3162 if (!deactivate_lv(cmd
, lv
)) {
3163 log_error("Unable to deactivate failed new LV. "
3164 "Manual intervention required.");
3169 /* FIXME Better to revert to backup of metadata? */
3170 if (!lv_remove(lv
) || !vg_write(vg
) || !vg_commit(vg
))
3171 log_error("Manual intervention may be required to remove "
3172 "abandoned LV(s) before retrying.");