stubs: Add a vmstate_dummy struct for CONFIG_USER_ONLY
[qemu/agraf.git] / block / qed-check.c
blobb473dcd61f63f91d43258c7d80ff70d36b1d0fb7
1 /*
2 * QEMU Enhanced Disk Format Consistency Check
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qed.h"
16 typedef struct {
17 BDRVQEDState *s;
18 BdrvCheckResult *result;
19 bool fix; /* whether to fix invalid offsets */
21 uint64_t nclusters;
22 uint32_t *used_clusters; /* referenced cluster bitmap */
24 QEDRequest request;
25 } QEDCheck;
27 static bool qed_test_bit(uint32_t *bitmap, uint64_t n) {
28 return !!(bitmap[n / 32] & (1 << (n % 32)));
31 static void qed_set_bit(uint32_t *bitmap, uint64_t n) {
32 bitmap[n / 32] |= 1 << (n % 32);
35 /**
36 * Set bitmap bits for clusters
38 * @check: Check structure
39 * @offset: Starting offset in bytes
40 * @n: Number of clusters
42 static bool qed_set_used_clusters(QEDCheck *check, uint64_t offset,
43 unsigned int n)
45 uint64_t cluster = qed_bytes_to_clusters(check->s, offset);
46 unsigned int corruptions = 0;
48 while (n-- != 0) {
49 /* Clusters should only be referenced once */
50 if (qed_test_bit(check->used_clusters, cluster)) {
51 corruptions++;
54 qed_set_bit(check->used_clusters, cluster);
55 cluster++;
58 check->result->corruptions += corruptions;
59 return corruptions == 0;
62 /**
63 * Check an L2 table
65 * @ret: Number of invalid cluster offsets
67 static unsigned int qed_check_l2_table(QEDCheck *check, QEDTable *table)
69 BDRVQEDState *s = check->s;
70 unsigned int i, num_invalid = 0;
71 uint64_t last_offset = 0;
73 for (i = 0; i < s->table_nelems; i++) {
74 uint64_t offset = table->offsets[i];
76 if (qed_offset_is_unalloc_cluster(offset) ||
77 qed_offset_is_zero_cluster(offset)) {
78 continue;
80 check->result->bfi.allocated_clusters++;
81 if (last_offset && (last_offset + s->header.cluster_size != offset)) {
82 check->result->bfi.fragmented_clusters++;
84 last_offset = offset;
86 /* Detect invalid cluster offset */
87 if (!qed_check_cluster_offset(s, offset)) {
88 if (check->fix) {
89 table->offsets[i] = 0;
90 check->result->corruptions_fixed++;
91 } else {
92 check->result->corruptions++;
95 num_invalid++;
96 continue;
99 qed_set_used_clusters(check, offset, 1);
102 return num_invalid;
106 * Descend tables and check each cluster is referenced once only
108 static int qed_check_l1_table(QEDCheck *check, QEDTable *table)
110 BDRVQEDState *s = check->s;
111 unsigned int i, num_invalid_l1 = 0;
112 int ret, last_error = 0;
114 /* Mark L1 table clusters used */
115 qed_set_used_clusters(check, s->header.l1_table_offset,
116 s->header.table_size);
118 for (i = 0; i < s->table_nelems; i++) {
119 unsigned int num_invalid_l2;
120 uint64_t offset = table->offsets[i];
122 if (qed_offset_is_unalloc_cluster(offset)) {
123 continue;
126 /* Detect invalid L2 offset */
127 if (!qed_check_table_offset(s, offset)) {
128 /* Clear invalid offset */
129 if (check->fix) {
130 table->offsets[i] = 0;
131 check->result->corruptions_fixed++;
132 } else {
133 check->result->corruptions++;
136 num_invalid_l1++;
137 continue;
140 if (!qed_set_used_clusters(check, offset, s->header.table_size)) {
141 continue; /* skip an invalid table */
144 ret = qed_read_l2_table_sync(s, &check->request, offset);
145 if (ret) {
146 check->result->check_errors++;
147 last_error = ret;
148 continue;
151 num_invalid_l2 = qed_check_l2_table(check,
152 check->request.l2_table->table);
154 /* Write out fixed L2 table */
155 if (num_invalid_l2 > 0 && check->fix) {
156 ret = qed_write_l2_table_sync(s, &check->request, 0,
157 s->table_nelems, false);
158 if (ret) {
159 check->result->check_errors++;
160 last_error = ret;
161 continue;
166 /* Drop reference to final table */
167 qed_unref_l2_cache_entry(check->request.l2_table);
168 check->request.l2_table = NULL;
170 /* Write out fixed L1 table */
171 if (num_invalid_l1 > 0 && check->fix) {
172 ret = qed_write_l1_table_sync(s, 0, s->table_nelems);
173 if (ret) {
174 check->result->check_errors++;
175 last_error = ret;
179 return last_error;
183 * Check for unreferenced (leaked) clusters
185 static void qed_check_for_leaks(QEDCheck *check)
187 BDRVQEDState *s = check->s;
188 uint64_t i;
190 for (i = s->header.header_size; i < check->nclusters; i++) {
191 if (!qed_test_bit(check->used_clusters, i)) {
192 check->result->leaks++;
198 * Mark an image clean once it passes check or has been repaired
200 static void qed_check_mark_clean(BDRVQEDState *s, BdrvCheckResult *result)
202 /* Skip if there were unfixable corruptions or I/O errors */
203 if (result->corruptions > 0 || result->check_errors > 0) {
204 return;
207 /* Skip if image is already marked clean */
208 if (!(s->header.features & QED_F_NEED_CHECK)) {
209 return;
212 /* Ensure fixes reach storage before clearing check bit */
213 bdrv_flush(s->bs);
215 s->header.features &= ~QED_F_NEED_CHECK;
216 qed_write_header_sync(s);
219 int qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix)
221 QEDCheck check = {
222 .s = s,
223 .result = result,
224 .nclusters = qed_bytes_to_clusters(s, s->file_size),
225 .request = { .l2_table = NULL },
226 .fix = fix,
228 int ret;
230 check.used_clusters = g_malloc0(((check.nclusters + 31) / 32) *
231 sizeof(check.used_clusters[0]));
233 check.result->bfi.total_clusters =
234 (s->header.image_size + s->header.cluster_size - 1) /
235 s->header.cluster_size;
236 ret = qed_check_l1_table(&check, s->l1_table);
237 if (ret == 0) {
238 /* Only check for leaks if entire image was scanned successfully */
239 qed_check_for_leaks(&check);
241 if (fix) {
242 qed_check_mark_clean(s, result);
246 g_free(check.used_clusters);
247 return ret;