scsi: fix tracing of scsi requests with simple backend
[qemu/aliguori.git] / block / qed-cluster.c
blob3e19ad1766c24582d903c2e56a80b692ce39b0cf
1 /*
2 * QEMU Enhanced Disk Format Cluster functions
4 * Copyright IBM, Corp. 2010
6 * Authors:
7 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
8 * Anthony Liguori <aliguori@us.ibm.com>
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qed.h"
17 /**
18 * Count the number of contiguous data clusters
20 * @s: QED state
21 * @table: L2 table
22 * @index: First cluster index
23 * @n: Maximum number of clusters
24 * @offset: Set to first cluster offset
26 * This function scans tables for contiguous clusters. A contiguous run of
27 * clusters may be allocated, unallocated, or zero.
29 static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
30 QEDTable *table,
31 unsigned int index,
32 unsigned int n,
33 uint64_t *offset)
35 unsigned int end = MIN(index + n, s->table_nelems);
36 uint64_t last = table->offsets[index];
37 unsigned int i;
39 *offset = last;
41 for (i = index + 1; i < end; i++) {
42 if (qed_offset_is_unalloc_cluster(last)) {
43 /* Counting unallocated clusters */
44 if (!qed_offset_is_unalloc_cluster(table->offsets[i])) {
45 break;
47 } else if (qed_offset_is_zero_cluster(last)) {
48 /* Counting zero clusters */
49 if (!qed_offset_is_zero_cluster(table->offsets[i])) {
50 break;
52 } else {
53 /* Counting allocated clusters */
54 if (table->offsets[i] != last + s->header.cluster_size) {
55 break;
57 last = table->offsets[i];
60 return i - index;
63 typedef struct {
64 BDRVQEDState *s;
65 uint64_t pos;
66 size_t len;
68 QEDRequest *request;
70 /* User callback */
71 QEDFindClusterFunc *cb;
72 void *opaque;
73 } QEDFindClusterCB;
75 static void qed_find_cluster_cb(void *opaque, int ret)
77 QEDFindClusterCB *find_cluster_cb = opaque;
78 BDRVQEDState *s = find_cluster_cb->s;
79 QEDRequest *request = find_cluster_cb->request;
80 uint64_t offset = 0;
81 size_t len = 0;
82 unsigned int index;
83 unsigned int n;
85 if (ret) {
86 goto out;
89 index = qed_l2_index(s, find_cluster_cb->pos);
90 n = qed_bytes_to_clusters(s,
91 qed_offset_into_cluster(s, find_cluster_cb->pos) +
92 find_cluster_cb->len);
93 n = qed_count_contiguous_clusters(s, request->l2_table->table,
94 index, n, &offset);
96 if (qed_offset_is_unalloc_cluster(offset)) {
97 ret = QED_CLUSTER_L2;
98 } else if (qed_offset_is_zero_cluster(offset)) {
99 ret = QED_CLUSTER_ZERO;
100 } else if (qed_check_cluster_offset(s, offset)) {
101 ret = QED_CLUSTER_FOUND;
102 } else {
103 ret = -EINVAL;
106 len = MIN(find_cluster_cb->len, n * s->header.cluster_size -
107 qed_offset_into_cluster(s, find_cluster_cb->pos));
109 out:
110 find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len);
111 qemu_free(find_cluster_cb);
115 * Find the offset of a data cluster
117 * @s: QED state
118 * @request: L2 cache entry
119 * @pos: Byte position in device
120 * @len: Number of bytes
121 * @cb: Completion function
122 * @opaque: User data for completion function
124 * This function translates a position in the block device to an offset in the
125 * image file. It invokes the cb completion callback to report back the
126 * translated offset or unallocated range in the image file.
128 * If the L2 table exists, request->l2_table points to the L2 table cache entry
129 * and the caller must free the reference when they are finished. The cache
130 * entry is exposed in this way to avoid callers having to read the L2 table
131 * again later during request processing. If request->l2_table is non-NULL it
132 * will be unreferenced before taking on the new cache entry.
134 void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos,
135 size_t len, QEDFindClusterFunc *cb, void *opaque)
137 QEDFindClusterCB *find_cluster_cb;
138 uint64_t l2_offset;
140 /* Limit length to L2 boundary. Requests are broken up at the L2 boundary
141 * so that a request acts on one L2 table at a time.
143 len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
145 l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
146 if (qed_offset_is_unalloc_cluster(l2_offset)) {
147 cb(opaque, QED_CLUSTER_L1, 0, len);
148 return;
150 if (!qed_check_table_offset(s, l2_offset)) {
151 cb(opaque, -EINVAL, 0, 0);
152 return;
155 find_cluster_cb = qemu_malloc(sizeof(*find_cluster_cb));
156 find_cluster_cb->s = s;
157 find_cluster_cb->pos = pos;
158 find_cluster_cb->len = len;
159 find_cluster_cb->cb = cb;
160 find_cluster_cb->opaque = opaque;
161 find_cluster_cb->request = request;
163 qed_read_l2_table(s, request, l2_offset,
164 qed_find_cluster_cb, find_cluster_cb);