3 #include "kvm/disk-image.h"
4 #include "kvm/read-write.h"
16 #include <linux/byteorder.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
20 static int l2_table_insert(struct rb_root
*root
, struct qcow_l2_table
*new)
22 struct rb_node
**link
= &(root
->rb_node
), *parent
= NULL
;
23 u64 offset
= new->offset
;
27 struct qcow_l2_table
*t
;
29 t
= rb_entry(*link
, struct qcow_l2_table
, node
);
35 if (t
->offset
> offset
)
36 link
= &(*link
)->rb_left
;
37 else if (t
->offset
< offset
)
38 link
= &(*link
)->rb_right
;
44 rb_link_node(&new->node
, parent
, link
);
45 rb_insert_color(&new->node
, root
);
52 static struct qcow_l2_table
*l2_table_lookup(struct rb_root
*root
, u64 offset
)
54 struct rb_node
*link
= root
->rb_node
;
57 struct qcow_l2_table
*t
;
59 t
= rb_entry(link
, struct qcow_l2_table
, node
);
63 if (t
->offset
> offset
)
65 else if (t
->offset
< offset
)
66 link
= link
->rb_right
;
74 static void l1_table_free_cache(struct qcow_l1_table
*l1t
)
76 struct rb_root
*r
= &l1t
->root
;
77 struct list_head
*pos
, *n
;
78 struct qcow_l2_table
*t
;
80 list_for_each_safe(pos
, n
, &l1t
->lru_list
) {
81 /* Remove cache table from the list and RB tree */
83 t
= list_entry(pos
, struct qcow_l2_table
, list
);
84 rb_erase(&t
->node
, r
);
86 /* Free the cached node */
91 static int qcow_l2_cache_write(struct qcow
*q
, struct qcow_l2_table
*c
)
93 struct qcow_header
*header
= q
->header
;
99 size
= 1 << header
->l2_bits
;
101 if (pwrite_in_full(q
->fd
, c
->table
, size
* sizeof(u64
), c
->offset
) < 0)
109 static int cache_table(struct qcow
*q
, struct qcow_l2_table
*c
)
111 struct qcow_l1_table
*l1t
= &q
->table
;
112 struct rb_root
*r
= &l1t
->root
;
113 struct qcow_l2_table
*lru
;
115 if (l1t
->nr_cached
== MAX_CACHE_NODES
) {
117 * The node at the head of the list is least recently used
118 * node. Remove it from the list and replaced with a new node.
120 lru
= list_first_entry(&l1t
->lru_list
, struct qcow_l2_table
, list
);
122 if (qcow_l2_cache_write(q
, lru
) < 0)
125 /* Remove the node from the cache */
126 rb_erase(&lru
->node
, r
);
127 list_del_init(&lru
->list
);
130 /* Free the LRUed node */
134 /* Add new node in RB Tree: Helps in searching faster */
135 if (l2_table_insert(r
, c
) < 0)
138 /* Add in LRU replacement list */
139 list_add_tail(&c
->list
, &l1t
->lru_list
);
147 static struct qcow_l2_table
*l2_table_search(struct qcow
*q
, u64 offset
)
149 struct qcow_l1_table
*l1t
= &q
->table
;
150 struct qcow_l2_table
*l2t
;
152 l2t
= l2_table_lookup(&l1t
->root
, offset
);
156 /* Update the LRU state, by moving the searched node to list tail */
157 list_move_tail(&l2t
->list
, &l1t
->lru_list
);
162 /* Allocates a new node for caching L2 table */
163 static struct qcow_l2_table
*new_cache_table(struct qcow
*q
, u64 offset
)
165 struct qcow_header
*header
= q
->header
;
166 struct qcow_l2_table
*c
;
170 l2t_sz
= 1 << header
->l2_bits
;
171 size
= sizeof(*c
) + l2t_sz
* sizeof(u64
);
177 RB_CLEAR_NODE(&c
->node
);
178 INIT_LIST_HEAD(&c
->list
);
183 static inline u64
get_l1_index(struct qcow
*q
, u64 offset
)
185 struct qcow_header
*header
= q
->header
;
187 return offset
>> (header
->l2_bits
+ header
->cluster_bits
);
190 static inline u64
get_l2_index(struct qcow
*q
, u64 offset
)
192 struct qcow_header
*header
= q
->header
;
194 return (offset
>> (header
->cluster_bits
)) & ((1 << header
->l2_bits
)-1);
197 static inline u64
get_cluster_offset(struct qcow
*q
, u64 offset
)
199 struct qcow_header
*header
= q
->header
;
201 return offset
& ((1 << header
->cluster_bits
)-1);
204 static struct qcow_l2_table
*qcow_read_l2_table(struct qcow
*q
, u64 offset
)
206 struct qcow_header
*header
= q
->header
;
207 struct qcow_l2_table
*l2t
;
210 size
= 1 << header
->l2_bits
;
212 /* search an entry for offset in cache */
213 l2t
= l2_table_search(q
, offset
);
217 /* allocate new node for caching l2 table */
218 l2t
= new_cache_table(q
, offset
);
222 /* table not cached: read from the disk */
223 if (pread_in_full(q
->fd
, l2t
->table
, size
* sizeof(u64
), offset
) < 0)
226 /* cache the table */
227 if (cache_table(q
, l2t
) < 0)
236 static ssize_t
qcow_read_cluster(struct qcow
*q
, u64 offset
, void *dst
, u32 dst_len
)
238 struct qcow_header
*header
= q
->header
;
239 struct qcow_l1_table
*l1t
= &q
->table
;
240 struct qcow_l2_table
*l2t
;
250 cluster_size
= 1 << header
->cluster_bits
;
252 l1_idx
= get_l1_index(q
, offset
);
253 if (l1_idx
>= l1t
->table_size
)
256 clust_offset
= get_cluster_offset(q
, offset
);
257 if (clust_offset
>= cluster_size
)
260 length
= cluster_size
- clust_offset
;
261 if (length
> dst_len
)
264 mutex_lock(&q
->mutex
);
266 l2t_offset
= be64_to_cpu(l1t
->l1_table
[l1_idx
]);
267 if (l2t_offset
& QCOW_OFLAG_COMPRESSED
) {
268 pr_warning("compressed sectors are not supported");
272 l2t_offset
&= QCOW_OFFSET_MASK
;
276 l2t_size
= 1 << header
->l2_bits
;
278 /* read and cache level 2 table */
279 l2t
= qcow_read_l2_table(q
, l2t_offset
);
283 l2_idx
= get_l2_index(q
, offset
);
284 if (l2_idx
>= l2t_size
)
287 clust_start
= be64_to_cpu(l2t
->table
[l2_idx
]);
288 if (clust_start
& QCOW_OFLAG_COMPRESSED
) {
289 pr_warning("compressed sectors are not supported");
293 clust_start
&= QCOW_OFFSET_MASK
;
297 mutex_unlock(&q
->mutex
);
299 if (pread_in_full(q
->fd
, dst
, length
, clust_start
+ clust_offset
) < 0)
305 mutex_unlock(&q
->mutex
);
306 memset(dst
, 0, length
);
310 mutex_unlock(&q
->mutex
);
315 static ssize_t
qcow_read_sector(struct disk_image
*disk
, u64 sector
, void *dst
, u32 dst_len
)
317 struct qcow
*q
= disk
->priv
;
318 struct qcow_header
*header
= q
->header
;
327 while (nr_read
< dst_len
) {
328 offset
= sector
<< SECTOR_SHIFT
;
329 if (offset
>= header
->size
)
332 nr
= qcow_read_cluster(q
, offset
, buf
, dst_len
- nr_read
);
338 sector
+= (nr
>> SECTOR_SHIFT
);
344 static inline u64
file_size(int fd
)
348 if (fstat(fd
, &st
) < 0)
354 static inline int qcow_pwrite_sync(int fd
, void *buf
, size_t count
, off_t offset
)
356 if (pwrite_in_full(fd
, buf
, count
, offset
) < 0)
359 return fdatasync(fd
);
362 /* Writes a level 2 table at the end of the file. */
363 static u64
qcow_write_l2_table(struct qcow
*q
, u64
*table
)
365 struct qcow_header
*header
= q
->header
;
371 f_sz
= file_size(q
->fd
);
375 sz
= 1 << header
->l2_bits
;
376 clust_sz
= 1 << header
->cluster_bits
;
377 off
= ALIGN(f_sz
, clust_sz
);
379 if (pwrite_in_full(q
->fd
, table
, sz
* sizeof(u64
), off
) < 0)
385 static void refcount_table_free_cache(struct qcow_refcount_table
*rft
)
387 struct rb_root
*r
= &rft
->root
;
388 struct list_head
*pos
, *n
;
389 struct qcow_refcount_block
*t
;
391 list_for_each_safe(pos
, n
, &rft
->lru_list
) {
393 t
= list_entry(pos
, struct qcow_refcount_block
, list
);
394 rb_erase(&t
->node
, r
);
400 static int refcount_block_insert(struct rb_root
*root
, struct qcow_refcount_block
*new)
402 struct rb_node
**link
= &(root
->rb_node
), *parent
= NULL
;
403 u64 offset
= new->offset
;
405 /* search the tree */
407 struct qcow_refcount_block
*t
;
409 t
= rb_entry(*link
, struct qcow_refcount_block
, node
);
415 if (t
->offset
> offset
)
416 link
= &(*link
)->rb_left
;
417 else if (t
->offset
< offset
)
418 link
= &(*link
)->rb_right
;
424 rb_link_node(&new->node
, parent
, link
);
425 rb_insert_color(&new->node
, root
);
432 static int write_refcount_block(struct qcow
*q
, struct qcow_refcount_block
*rfb
)
437 if (pwrite_in_full(q
->fd
, rfb
->entries
, rfb
->size
* sizeof(u16
), rfb
->offset
) < 0)
445 static int cache_refcount_block(struct qcow
*q
, struct qcow_refcount_block
*c
)
447 struct qcow_refcount_table
*rft
= &q
->refcount_table
;
448 struct rb_root
*r
= &rft
->root
;
449 struct qcow_refcount_block
*lru
;
451 if (rft
->nr_cached
== MAX_CACHE_NODES
) {
452 lru
= list_first_entry(&rft
->lru_list
, struct qcow_refcount_block
, list
);
454 if (write_refcount_block(q
, lru
) < 0)
457 rb_erase(&lru
->node
, r
);
458 list_del_init(&lru
->list
);
464 if (refcount_block_insert(r
, c
) < 0)
467 list_add_tail(&c
->list
, &rft
->lru_list
);
475 static struct qcow_refcount_block
*new_refcount_block(struct qcow
*q
, u64 rfb_offset
)
477 struct qcow_header
*header
= q
->header
;
478 struct qcow_refcount_block
*rfb
;
481 cluster_size
= 1 << header
->cluster_bits
;
483 rfb
= malloc(sizeof *rfb
+ cluster_size
);
487 rfb
->offset
= rfb_offset
;
488 rfb
->size
= cluster_size
/ sizeof(u16
);
489 RB_CLEAR_NODE(&rfb
->node
);
490 INIT_LIST_HEAD(&rfb
->list
);
495 static struct qcow_refcount_block
*refcount_block_lookup(struct rb_root
*root
, u64 offset
)
497 struct rb_node
*link
= root
->rb_node
;
500 struct qcow_refcount_block
*t
;
502 t
= rb_entry(link
, struct qcow_refcount_block
, node
);
506 if (t
->offset
> offset
)
507 link
= link
->rb_left
;
508 else if (t
->offset
< offset
)
509 link
= link
->rb_right
;
517 static struct qcow_refcount_block
*refcount_block_search(struct qcow
*q
, u64 offset
)
519 struct qcow_refcount_table
*rft
= &q
->refcount_table
;
520 struct qcow_refcount_block
*rfb
;
522 rfb
= refcount_block_lookup(&rft
->root
, offset
);
526 /* Update the LRU state, by moving the searched node to list tail */
527 list_move_tail(&rfb
->list
, &rft
->lru_list
);
532 static struct qcow_refcount_block
*qcow_read_refcount_block(struct qcow
*q
, u64 clust_idx
)
534 struct qcow_header
*header
= q
->header
;
535 struct qcow_refcount_table
*rft
= &q
->refcount_table
;
536 struct qcow_refcount_block
*rfb
;
540 rft_idx
= clust_idx
>> (header
->cluster_bits
- QCOW_REFCOUNT_BLOCK_SHIFT
);
541 if (rft_idx
>= rft
->rf_size
)
544 rfb_offset
= be64_to_cpu(rft
->rf_table
[rft_idx
]);
546 rfb
= refcount_block_search(q
, rfb_offset
);
550 rfb
= new_refcount_block(q
, rfb_offset
);
554 if (pread_in_full(q
->fd
, rfb
->entries
, rfb
->size
* sizeof(u16
), rfb_offset
) < 0)
557 if (cache_refcount_block(q
, rfb
) < 0)
569 * QCOW file might grow during a write operation. Not only data but metadata is
570 * also written at the end of the file. Therefore it is necessary to ensure
571 * every write is committed to disk. Hence we use uses qcow_pwrite_sync() to
572 * synchronize the in-core state of QCOW image to disk.
574 * We also try to restore the image to a consistent state if the metdata
575 * operation fails. The two metadat operations are: level 1 and level 2 table
576 * update. If either of them fails the image is truncated to a consistent state.
578 static ssize_t
qcow_write_cluster(struct qcow
*q
, u64 offset
, void *buf
, u32 src_len
)
580 struct qcow_header
*header
= q
->header
;
581 struct qcow_l1_table
*l1t
= &q
->table
;
582 struct qcow_l2_table
*l2t
;
595 l2t_size
= 1 << header
->l2_bits
;
596 clust_sz
= 1 << header
->cluster_bits
;
598 l1t_idx
= get_l1_index(q
, offset
);
599 if (l1t_idx
>= l1t
->table_size
)
602 l2t_idx
= get_l2_index(q
, offset
);
603 if (l2t_idx
>= l2t_size
)
606 clust_off
= get_cluster_offset(q
, offset
);
607 if (clust_off
>= clust_sz
)
610 len
= clust_sz
- clust_off
;
614 mutex_lock(&q
->mutex
);
616 l2t_offset
= be64_to_cpu(l1t
->l1_table
[l1t_idx
]);
617 if (l2t_offset
& QCOW_OFLAG_COMPRESSED
) {
618 pr_warning("compressed clusters are not supported");
621 if (!(l2t_offset
& QCOW_OFLAG_COPIED
)) {
622 pr_warning("L2 copy-on-write clusters are not supported");
626 l2t_offset
&= QCOW_OFFSET_MASK
;
628 /* read and cache l2 table */
629 l2t
= qcow_read_l2_table(q
, l2t_offset
);
633 l2t
= new_cache_table(q
, l2t_offset
);
637 /* Capture the state of the consistent QCOW image */
638 f_sz
= file_size(q
->fd
);
642 /* Write the l2 table of 0's at the end of the file */
643 l2t_offset
= qcow_write_l2_table(q
, l2t
->table
);
647 if (cache_table(q
, l2t
) < 0) {
648 if (ftruncate(q
->fd
, f_sz
) < 0)
654 /* Update the in-core entry */
655 l1t
->l1_table
[l1t_idx
] = cpu_to_be64(l2t_offset
);
658 /* Capture the state of the consistent QCOW image */
659 f_sz
= file_size(q
->fd
);
663 clust_start
= be64_to_cpu(l2t
->table
[l2t_idx
]);
665 clust_flags
= clust_start
& QCOW_OFLAGS_MASK
;
666 if (clust_flags
& QCOW_OFLAG_COMPRESSED
) {
667 pr_warning("compressed clusters are not supported");
671 clust_start
&= QCOW_OFFSET_MASK
;
673 clust_start
= ALIGN(f_sz
, clust_sz
);
674 l2t
->table
[l2t_idx
] = cpu_to_be64(clust_start
| QCOW_OFLAG_COPIED
);
678 if (!(clust_flags
& QCOW_OFLAG_COPIED
)) {
679 struct qcow_refcount_block
*rfb
= NULL
;
684 clust_idx
= (clust_start
& QCOW_OFFSET_MASK
) >> (header
->cluster_bits
);
686 rfb
= qcow_read_refcount_block(q
, clust_idx
);
688 pr_warning("L1: error while reading refcount table");
692 rfb_idx
= clust_idx
& (((1ULL << (header
->cluster_bits
- QCOW_REFCOUNT_BLOCK_SHIFT
)) - 1));
693 if (rfb_idx
>= rfb
->size
) {
694 pr_warning("L1: refcount block index out of bounds");
698 clust_refcount
= be16_to_cpu(rfb
->entries
[rfb_idx
]);
699 if (!clust_refcount
) {
701 rfb
->entries
[rfb_idx
] = cpu_to_be16(clust_refcount
);
705 if (clust_refcount
> 1) {
706 pr_warning("L1 copy-on-write clusters are not supported");
711 mutex_unlock(&q
->mutex
);
713 /* Write actual data */
714 if (pwrite_in_full(q
->fd
, buf
, len
, clust_start
+ clust_off
) < 0)
722 mutex_unlock(&q
->mutex
);
726 static ssize_t
qcow_write_sector(struct disk_image
*disk
, u64 sector
, void *src
, u32 src_len
)
728 struct qcow
*q
= disk
->priv
;
729 struct qcow_header
*header
= q
->header
;
737 offset
= sector
<< SECTOR_SHIFT
;
739 while (nr_written
< src_len
) {
740 if (offset
>= header
->size
)
743 nr
= qcow_write_cluster(q
, offset
, buf
, src_len
- nr_written
);
755 static ssize_t
qcow_nowrite_sector(struct disk_image
*disk
, u64 sector
, void *src
, u32 src_len
)
758 pr_info("%s: no write support\n", __func__
);
762 static int qcow_disk_flush(struct disk_image
*disk
)
764 struct qcow
*q
= disk
->priv
;
765 struct qcow_refcount_table
*rft
;
766 struct qcow_header
*header
;
767 struct list_head
*pos
, *n
;
768 struct qcow_l1_table
*l1t
;
772 rft
= &q
->refcount_table
;
774 mutex_lock(&q
->mutex
);
776 list_for_each_safe(pos
, n
, &rft
->lru_list
) {
777 struct qcow_refcount_block
*c
= list_entry(pos
, struct qcow_refcount_block
, list
);
779 if (write_refcount_block(q
, c
) < 0)
783 if (fdatasync(disk
->fd
) < 0)
786 list_for_each_safe(pos
, n
, &l1t
->lru_list
) {
787 struct qcow_l2_table
*c
= list_entry(pos
, struct qcow_l2_table
, list
);
789 if (qcow_l2_cache_write(q
, c
) < 0)
793 if (fdatasync(disk
->fd
) < 0)
796 if (pwrite_in_full(disk
->fd
, l1t
->l1_table
, l1t
->table_size
* sizeof(u64
), header
->l1_table_offset
) < 0)
799 mutex_unlock(&q
->mutex
);
801 return fsync(disk
->fd
);
804 mutex_unlock(&q
->mutex
);
808 static int qcow_disk_close(struct disk_image
*disk
)
817 refcount_table_free_cache(&q
->refcount_table
);
818 l1_table_free_cache(&q
->table
);
819 free(q
->refcount_table
.rf_table
);
820 free(q
->table
.l1_table
);
827 static struct disk_image_operations qcow_disk_readonly_ops
= {
828 .read_sector
= qcow_read_sector
,
829 .write_sector
= qcow_nowrite_sector
,
830 .close
= qcow_disk_close
,
833 static struct disk_image_operations qcow_disk_ops
= {
834 .read_sector
= qcow_read_sector
,
835 .write_sector
= qcow_write_sector
,
836 .flush
= qcow_disk_flush
,
837 .close
= qcow_disk_close
,
840 static int qcow_read_refcount_table(struct qcow
*q
)
842 struct qcow_header
*header
= q
->header
;
843 struct qcow_refcount_table
*rft
= &q
->refcount_table
;
846 cluster_size
= 1 << header
->cluster_bits
;
848 rft
->rf_size
= (header
->refcount_table_size
* cluster_size
) / sizeof(u64
);
850 rft
->rf_table
= calloc(rft
->rf_size
, sizeof(u64
));
855 INIT_LIST_HEAD(&rft
->lru_list
);
857 return pread_in_full(q
->fd
, rft
->rf_table
, sizeof(u64
) * rft
->rf_size
, header
->refcount_table_offset
);
860 static int qcow_read_l1_table(struct qcow
*q
)
862 struct qcow_header
*header
= q
->header
;
863 struct qcow_l1_table
*table
= &q
->table
;
865 table
->table_size
= header
->l1_size
;
867 table
->l1_table
= calloc(table
->table_size
, sizeof(u64
));
868 if (!table
->l1_table
)
871 return pread_in_full(q
->fd
, table
->l1_table
, sizeof(u64
) * table
->table_size
, header
->l1_table_offset
);
874 static void *qcow2_read_header(int fd
)
876 struct qcow2_header_disk f_header
;
877 struct qcow_header
*header
;
879 header
= malloc(sizeof(struct qcow_header
));
883 if (pread_in_full(fd
, &f_header
, sizeof(struct qcow2_header_disk
), 0) < 0) {
888 be32_to_cpus(&f_header
.magic
);
889 be32_to_cpus(&f_header
.version
);
890 be64_to_cpus(&f_header
.backing_file_offset
);
891 be32_to_cpus(&f_header
.backing_file_size
);
892 be32_to_cpus(&f_header
.cluster_bits
);
893 be64_to_cpus(&f_header
.size
);
894 be32_to_cpus(&f_header
.crypt_method
);
895 be32_to_cpus(&f_header
.l1_size
);
896 be64_to_cpus(&f_header
.l1_table_offset
);
897 be64_to_cpus(&f_header
.refcount_table_offset
);
898 be32_to_cpus(&f_header
.refcount_table_clusters
);
899 be32_to_cpus(&f_header
.nb_snapshots
);
900 be64_to_cpus(&f_header
.snapshots_offset
);
902 *header
= (struct qcow_header
) {
903 .size
= f_header
.size
,
904 .l1_table_offset
= f_header
.l1_table_offset
,
905 .l1_size
= f_header
.l1_size
,
906 .cluster_bits
= f_header
.cluster_bits
,
907 .l2_bits
= f_header
.cluster_bits
- 3,
908 .refcount_table_offset
= f_header
.refcount_table_offset
,
909 .refcount_table_size
= f_header
.refcount_table_clusters
,
915 static struct disk_image
*qcow2_probe(int fd
, bool readonly
)
917 struct disk_image
*disk_image
;
918 struct qcow_l1_table
*l1t
;
919 struct qcow_header
*h
;
922 q
= calloc(1, sizeof(struct qcow
));
926 mutex_init(&q
->mutex
);
932 INIT_LIST_HEAD(&l1t
->lru_list
);
934 h
= q
->header
= qcow2_read_header(fd
);
938 if (qcow_read_l1_table(q
) < 0)
941 if (qcow_read_refcount_table(q
) < 0)
945 * Do not use mmap use read/write instead
948 disk_image
= disk_image__new(fd
, h
->size
, &qcow_disk_readonly_ops
, DISK_IMAGE_NOMMAP
);
950 disk_image
= disk_image__new(fd
, h
->size
, &qcow_disk_ops
, DISK_IMAGE_NOMMAP
);
954 disk_image
->priv
= q
;
961 free(q
->table
.l1_table
);
968 static bool qcow2_check_image(int fd
)
970 struct qcow2_header_disk f_header
;
972 if (pread_in_full(fd
, &f_header
, sizeof(struct qcow2_header_disk
), 0) < 0)
975 be32_to_cpus(&f_header
.magic
);
976 be32_to_cpus(&f_header
.version
);
978 if (f_header
.magic
!= QCOW_MAGIC
)
981 if (f_header
.version
!= QCOW2_VERSION
)
987 static void *qcow1_read_header(int fd
)
989 struct qcow1_header_disk f_header
;
990 struct qcow_header
*header
;
992 header
= malloc(sizeof(struct qcow_header
));
996 if (pread_in_full(fd
, &f_header
, sizeof(struct qcow1_header_disk
), 0) < 0) {
1001 be32_to_cpus(&f_header
.magic
);
1002 be32_to_cpus(&f_header
.version
);
1003 be64_to_cpus(&f_header
.backing_file_offset
);
1004 be32_to_cpus(&f_header
.backing_file_size
);
1005 be32_to_cpus(&f_header
.mtime
);
1006 be64_to_cpus(&f_header
.size
);
1007 be32_to_cpus(&f_header
.crypt_method
);
1008 be64_to_cpus(&f_header
.l1_table_offset
);
1010 *header
= (struct qcow_header
) {
1011 .size
= f_header
.size
,
1012 .l1_table_offset
= f_header
.l1_table_offset
,
1013 .l1_size
= f_header
.size
/ ((1 << f_header
.l2_bits
) * (1 << f_header
.cluster_bits
)),
1014 .cluster_bits
= f_header
.cluster_bits
,
1015 .l2_bits
= f_header
.l2_bits
,
1021 static struct disk_image
*qcow1_probe(int fd
, bool readonly
)
1023 struct disk_image
*disk_image
;
1024 struct qcow_l1_table
*l1t
;
1025 struct qcow_header
*h
;
1028 q
= calloc(1, sizeof(struct qcow
));
1032 mutex_init(&q
->mutex
);
1037 l1t
->root
= RB_ROOT
;
1038 INIT_LIST_HEAD(&l1t
->lru_list
);
1040 h
= q
->header
= qcow1_read_header(fd
);
1044 if (qcow_read_l1_table(q
) < 0)
1048 * Do not use mmap use read/write instead
1051 disk_image
= disk_image__new(fd
, h
->size
, &qcow_disk_readonly_ops
, DISK_IMAGE_NOMMAP
);
1053 disk_image
= disk_image__new(fd
, h
->size
, &qcow_disk_ops
, DISK_IMAGE_NOMMAP
);
1057 disk_image
->priv
= q
;
1064 free(q
->table
.l1_table
);
1071 static bool qcow1_check_image(int fd
)
1073 struct qcow1_header_disk f_header
;
1075 if (pread_in_full(fd
, &f_header
, sizeof(struct qcow1_header_disk
), 0) < 0)
1078 be32_to_cpus(&f_header
.magic
);
1079 be32_to_cpus(&f_header
.version
);
1081 if (f_header
.magic
!= QCOW_MAGIC
)
1084 if (f_header
.version
!= QCOW1_VERSION
)
1090 struct disk_image
*qcow_probe(int fd
, bool readonly
)
1092 if (qcow1_check_image(fd
))
1093 return qcow1_probe(fd
, readonly
);
1095 if (qcow2_check_image(fd
))
1096 return qcow2_probe(fd
, readonly
);