4 * Copyright (C) 1997-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 * This file is part of LVM2.
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 /* FIXME: memcpy might not be portable */
29 #define CPIN_8(x, y, z) {memcpy((x), (y), (z));}
30 #define CPOUT_8(x, y, z) {memcpy((y), (x), (z));}
31 #define CPIN_16(x, y) {(x) = xlate16_be((y));}
32 #define CPOUT_16(x, y) {(y) = xlate16_be((x));}
33 #define CPIN_32(x, y) {(x) = xlate32_be((y));}
34 #define CPOUT_32(x, y) {(y) = xlate32_be((x));}
35 #define CPIN_64(x, y) {(x) = xlate64_be((y));}
36 #define CPOUT_64(x, y) {(y) = xlate64_be((x));}
38 static int __read_pool_disk(const struct format_type
*fmt
, struct device
*dev
,
39 struct dm_pool
*mem
__attribute((unused
)), struct pool_list
*pl
,
40 const char *vg_name
__attribute((unused
)))
42 char buf
[512] __attribute((aligned(8)));
44 /* FIXME: Need to check the cache here first */
45 if (!dev_read(dev
, UINT64_C(0), 512, buf
)) {
46 log_very_verbose("Failed to read PV data from %s",
51 if (!read_pool_label(pl
, fmt
->labeller
, dev
, buf
, NULL
))
57 static void _add_pl_to_list(struct dm_list
*head
, struct pool_list
*data
)
61 dm_list_iterate_items(pl
, head
) {
62 if (id_equal(&data
->pv_uuid
, &pl
->pv_uuid
)) {
63 char uuid
[ID_LEN
+ 7] __attribute((aligned(8)));
65 id_write_format(&pl
->pv_uuid
, uuid
, ID_LEN
+ 7);
67 if (!dev_subsystem_part_major(data
->dev
)) {
68 log_very_verbose("Ignoring duplicate PV %s on "
73 log_very_verbose("Duplicate PV %s - using %s %s",
74 uuid
, dev_subsystem_name(data
->dev
),
76 dm_list_del(&pl
->list
);
80 dm_list_add(head
, &data
->list
);
83 int read_pool_label(struct pool_list
*pl
, struct labeller
*l
,
84 struct device
*dev
, char *buf
, struct label
**label
)
86 struct lvmcache_info
*info
;
89 char uuid
[ID_LEN
+ 7] __attribute((aligned(8)));
90 struct pool_disk
*pd
= &pl
->pd
;
92 pool_label_in(pd
, buf
);
94 get_pool_pv_uuid(&pvid
, pd
);
95 id_write_format(&pvid
, uuid
, ID_LEN
+ 7);
96 log_debug("Calculated uuid %s for %s", uuid
, dev_name(dev
));
98 get_pool_vg_uuid(&vgid
, pd
);
99 id_write_format(&vgid
, uuid
, ID_LEN
+ 7);
100 log_debug("Calculated uuid %s for %s", uuid
, pd
->pl_pool_name
);
102 if (!(info
= lvmcache_add(l
, (char *) &pvid
, dev
, pd
->pl_pool_name
,
106 *label
= info
->label
;
108 info
->device_size
= xlate32_be(pd
->pl_blocks
) << SECTOR_SHIFT
;
109 dm_list_init(&info
->mdas
);
111 info
->status
&= ~CACHE_INVALID
;
115 memcpy(&pl
->pv_uuid
, &pvid
, sizeof(pvid
));
121 * pool_label_out - copies a pool_label_t into a char buffer
122 * @pl: ptr to a pool_label_t struct
123 * @buf: ptr to raw space where label info will be copied
125 * This function is important because it takes care of all of
126 * the endian issues when copying to disk. This way, when
127 * machines of different architectures are used, they will
128 * be able to interpret ondisk labels correctly. Always use
129 * this function before writing to disk.
131 void pool_label_out(struct pool_disk
*pl
, void *buf
)
133 struct pool_disk
*bufpl
= (struct pool_disk
*) buf
;
135 CPOUT_64(pl
->pl_magic
, bufpl
->pl_magic
);
136 CPOUT_64(pl
->pl_pool_id
, bufpl
->pl_pool_id
);
137 CPOUT_8(pl
->pl_pool_name
, bufpl
->pl_pool_name
, POOL_NAME_SIZE
);
138 CPOUT_32(pl
->pl_version
, bufpl
->pl_version
);
139 CPOUT_32(pl
->pl_subpools
, bufpl
->pl_subpools
);
140 CPOUT_32(pl
->pl_sp_id
, bufpl
->pl_sp_id
);
141 CPOUT_32(pl
->pl_sp_devs
, bufpl
->pl_sp_devs
);
142 CPOUT_32(pl
->pl_sp_devid
, bufpl
->pl_sp_devid
);
143 CPOUT_32(pl
->pl_sp_type
, bufpl
->pl_sp_type
);
144 CPOUT_64(pl
->pl_blocks
, bufpl
->pl_blocks
);
145 CPOUT_32(pl
->pl_striping
, bufpl
->pl_striping
);
146 CPOUT_32(pl
->pl_sp_dmepdevs
, bufpl
->pl_sp_dmepdevs
);
147 CPOUT_32(pl
->pl_sp_dmepid
, bufpl
->pl_sp_dmepid
);
148 CPOUT_32(pl
->pl_sp_weight
, bufpl
->pl_sp_weight
);
149 CPOUT_32(pl
->pl_minor
, bufpl
->pl_minor
);
150 CPOUT_32(pl
->pl_padding
, bufpl
->pl_padding
);
151 CPOUT_8(pl
->pl_reserve
, bufpl
->pl_reserve
, 184);
155 * pool_label_in - copies a char buffer into a pool_label_t
156 * @pl: ptr to a pool_label_t struct
157 * @buf: ptr to raw space where label info is copied from
159 * This function is important because it takes care of all of
160 * the endian issues when information from disk is about to be
161 * used. This way, when machines of different architectures
162 * are used, they will be able to interpret ondisk labels
163 * correctly. Always use this function before using labels that
164 * were read from disk.
166 void pool_label_in(struct pool_disk
*pl
, void *buf
)
168 struct pool_disk
*bufpl
= (struct pool_disk
*) buf
;
170 CPIN_64(pl
->pl_magic
, bufpl
->pl_magic
);
171 CPIN_64(pl
->pl_pool_id
, bufpl
->pl_pool_id
);
172 CPIN_8(pl
->pl_pool_name
, bufpl
->pl_pool_name
, POOL_NAME_SIZE
);
173 CPIN_32(pl
->pl_version
, bufpl
->pl_version
);
174 CPIN_32(pl
->pl_subpools
, bufpl
->pl_subpools
);
175 CPIN_32(pl
->pl_sp_id
, bufpl
->pl_sp_id
);
176 CPIN_32(pl
->pl_sp_devs
, bufpl
->pl_sp_devs
);
177 CPIN_32(pl
->pl_sp_devid
, bufpl
->pl_sp_devid
);
178 CPIN_32(pl
->pl_sp_type
, bufpl
->pl_sp_type
);
179 CPIN_64(pl
->pl_blocks
, bufpl
->pl_blocks
);
180 CPIN_32(pl
->pl_striping
, bufpl
->pl_striping
);
181 CPIN_32(pl
->pl_sp_dmepdevs
, bufpl
->pl_sp_dmepdevs
);
182 CPIN_32(pl
->pl_sp_dmepid
, bufpl
->pl_sp_dmepid
);
183 CPIN_32(pl
->pl_sp_weight
, bufpl
->pl_sp_weight
);
184 CPIN_32(pl
->pl_minor
, bufpl
->pl_minor
);
185 CPIN_32(pl
->pl_padding
, bufpl
->pl_padding
);
186 CPIN_8(pl
->pl_reserve
, bufpl
->pl_reserve
, 184);
189 static char _calc_char(unsigned int id
)
192 * [0-9A-Za-z!#] - 64 printable chars (6-bits)
198 return (id
- 10) + 65;
200 return (id
- 36) + 97;
209 void get_pool_uuid(char *uuid
, uint64_t poolid
, uint32_t spid
, uint32_t devid
)
212 unsigned shifter
= 0x003F;
214 assert(ID_LEN
== 32);
215 memset(uuid
, 0, ID_LEN
);
216 strcat(uuid
, "POOL0000000000");
218 /* We grab the entire 64 bits (+2 that get shifted in) */
219 for (i
= 13; i
< 24; i
++) {
220 uuid
[i
] = _calc_char(((unsigned) poolid
) & shifter
);
221 poolid
= poolid
>> 6;
224 /* We grab the entire 32 bits (+4 that get shifted in) */
225 for (i
= 24; i
< 30; i
++) {
226 uuid
[i
] = _calc_char((unsigned) (spid
& shifter
));
231 * Since we can only have 128 devices, we only worry about the
234 for (i
= 30; i
< 32; i
++) {
235 uuid
[i
] = _calc_char((unsigned) (devid
& shifter
));
241 static int _read_vg_pds(const struct format_type
*fmt
, struct dm_pool
*mem
,
242 struct lvmcache_vginfo
*vginfo
, struct dm_list
*head
,
245 struct lvmcache_info
*info
;
246 struct pool_list
*pl
= NULL
;
247 struct dm_pool
*tmpmem
;
249 uint32_t sp_count
= 0;
250 uint32_t *sp_devs
= NULL
;
253 /* FIXME: maybe should return a different error in memory
254 * allocation failure */
255 if (!(tmpmem
= dm_pool_create("pool read_vg", 512)))
258 dm_list_iterate_items(info
, &vginfo
->infos
) {
260 !(pl
= read_pool_disk(fmt
, info
->dev
, mem
, vginfo
->vgname
)))
263 * We need to keep track of the total expected number
264 * of devices per subpool
267 /* FIXME pl left uninitialised if !info->dev */
268 sp_count
= pl
->pd
.pl_subpools
;
270 dm_pool_zalloc(tmpmem
,
271 sizeof(uint32_t) * sp_count
))) {
272 log_error("Unable to allocate %d 32-bit uints",
274 dm_pool_destroy(tmpmem
);
279 * watch out for a pool label with a different subpool
280 * count than the original - give up if it does
282 if (sp_count
!= pl
->pd
.pl_subpools
)
285 _add_pl_to_list(head
, pl
);
287 if (sp_count
> pl
->pd
.pl_sp_id
&& sp_devs
[pl
->pd
.pl_sp_id
] == 0)
288 sp_devs
[pl
->pd
.pl_sp_id
] = pl
->pd
.pl_sp_devs
;
292 for (i
= 0; i
< sp_count
; i
++)
293 *devcount
+= sp_devs
[i
];
295 dm_pool_destroy(tmpmem
);
297 if (pl
&& *pl
->pd
.pl_pool_name
)
304 int read_pool_pds(const struct format_type
*fmt
, const char *vg_name
,
305 struct dm_pool
*mem
, struct dm_list
*pdhead
)
307 struct lvmcache_vginfo
*vginfo
;
313 * If the cache scanning doesn't work, this will never work
315 if (vg_name
&& (vginfo
= vginfo_from_vgname(vg_name
, NULL
)) &&
318 if (_read_vg_pds(fmt
, mem
, vginfo
, pdhead
, &totaldevs
)) {
320 * If we found all the devices we were
321 * expecting, return success
323 if (dm_list_size(pdhead
) == totaldevs
)
327 * accept partial pool if we've done a full
328 * rescan of the cache
335 dm_list_init(pdhead
);
339 log_debug("No devices for vg %s found in cache",
343 lvmcache_label_scan(fmt
->cmd
, full_scan
);
349 struct pool_list
*read_pool_disk(const struct format_type
*fmt
,
350 struct device
*dev
, struct dm_pool
*mem
,
353 struct pool_list
*pl
;
358 if (!(pl
= dm_pool_zalloc(mem
, sizeof(*pl
)))) {
359 log_error("Unable to allocate pool list structure");
363 if (!__read_pool_disk(fmt
, dev
, mem
, pl
, vg_name
))