Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / xen / include / xen3-public / io / blkif.h
blobcd26ff62ca3a645b96ab08dbaebc5ffa289a6cf4
1 /* $NetBSD: blkif.h,v 1.6 2007/12/25 18:33:35 perry Exp $ */
2 /******************************************************************************
3 * blkif.h
4 *
5 * Unified block-device I/O interface for Xen guest OSes.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to
9 * deal in the Software without restriction, including without limitation the
10 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
11 * sell copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
25 * Copyright (c) 2003-2004, Keir Fraser
28 #ifndef __XEN_PUBLIC_IO_BLKIF_H__
29 #define __XEN_PUBLIC_IO_BLKIF_H__
31 #include "ring.h"
32 #include "../grant_table.h"
35 * Front->back notifications: When enqueuing a new request, sending a
36 * notification can be made conditional on req_event (i.e., the generic
37 * hold-off mechanism provided by the ring macros). Backends must set
38 * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
40 * Back->front notifications: When enqueuing a new response, sending a
41 * notification can be made conditional on rsp_event (i.e., the generic
42 * hold-off mechanism provided by the ring macros). Frontends must set
43 * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
46 #ifndef blkif_vdev_t
47 #define blkif_vdev_t uint16_t
48 #endif
49 #define blkif_sector_t uint64_t
52 * REQUEST CODES.
54 #define BLKIF_OP_READ 0
55 #define BLKIF_OP_WRITE 1
57 * Recognised only if "feature-barrier" is present in backend xenbus info.
58 * The "feature-barrier" node contains a boolean indicating whether barrier
59 * requests are likely to succeed or fail. Either way, a barrier request
60 * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
61 * the underlying block-device hardware. The boolean simply indicates whether
62 * or not it is worthwhile for the frontend to attempt barrier requests.
63 * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
64 * create the "feature-barrier" node!
66 #define BLKIF_OP_WRITE_BARRIER 2
68 * Recognised if "feature-flush-cache" is present in backend xenbus
69 * info. A flush will ask the underlying storage hardware to flush its
70 * non-volatile caches as appropriate. The "feature-flush-cache" node
71 * contains a boolean indicating whether flush requests are likely to
72 * succeed or fail. Either way, a flush request may fail at any time
73 * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
74 * block-device hardware. The boolean simply indicates whether or not it
75 * is worthwhile for the frontend to attempt flushes. If a backend does
76 * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
77 * "feature-flush-cache" node!
79 #define BLKIF_OP_FLUSH_DISKCACHE 3
82 * Maximum scatter/gather segments per request.
83 * This is carefully chosen so that sizeof(blkif_ring_t) <= PAGE_SIZE.
84 * NB. This could be 12 if the ring indexes weren't stored in the same page.
86 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
88 struct blkif_request_segment {
89 grant_ref_t gref; /* reference to I/O buffer frame */
90 /* @first_sect: first sector in frame to transfer (inclusive). */
91 /* @last_sect: last sector in frame to transfer (inclusive). */
92 uint8_t first_sect, last_sect;
95 /* native-type requests/responses (always used in frontends ) */
97 struct blkif_request {
98 uint8_t operation; /* BLKIF_OP_??? */
99 uint8_t nr_segments; /* number of segments */
100 blkif_vdev_t handle; /* only for read/write requests */
101 uint64_t id; /* private guest value, echoed in resp */
102 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
103 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
105 typedef struct blkif_request blkif_request_t;
107 struct blkif_response {
108 uint64_t id; /* copied from request */
109 uint8_t operation; /* copied from request */
110 int16_t status; /* BLKIF_RSP_??? */
112 typedef struct blkif_response blkif_response_t;
114 /* i386 requests/responses */
115 struct blkif_x86_32_request {
116 uint8_t operation; /* BLKIF_OP_??? */
117 uint8_t nr_segments; /* number of segments */
118 blkif_vdev_t handle; /* only for read/write requests */
119 uint64_t id; /* private guest value, echoed in resp */
120 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
121 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
122 } __packed;
123 typedef struct blkif_x86_32_request blkif_x86_32_request_t;
125 struct blkif_x86_32_response {
126 uint64_t id; /* copied from request */
127 uint8_t operation; /* copied from request */
128 uint8_t _pad;
129 int16_t status; /* BLKIF_RSP_??? */
130 } __packed;
131 typedef struct blkif_x86_32_response blkif_x86_32_response_t;
133 /* amd64-type requests/responses (always used in frontends ) */
135 struct blkif_x86_64_request {
136 uint8_t operation; /* BLKIF_OP_??? */
137 uint8_t nr_segments; /* number of segments */
138 blkif_vdev_t handle; /* only for read/write requests */
139 uint64_t __attribute__((__aligned__(8))) id;/* private guest value, echoed in resp */
140 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
141 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
143 typedef struct blkif_x86_64_request blkif_x86_64_request_t;
145 struct blkif_x86_64_response {
146 uint64_t __attribute__((__aligned__(8))) id; /* copied from request */
147 uint8_t operation; /* copied from request */
148 int16_t status; /* BLKIF_RSP_??? */
150 typedef struct blkif_x86_64_response blkif_x86_64_response_t;
153 * STATUS RETURN CODES.
155 /* Operation not supported (only happens on barrier writes). */
156 #define BLKIF_RSP_EOPNOTSUPP -2
157 /* Operation failed for some unspecified reason (-EIO). */
158 #define BLKIF_RSP_ERROR -1
159 /* Operation completed successfully. */
160 #define BLKIF_RSP_OKAY 0
163 * Generate blkif ring structures and types.
166 DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
167 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
168 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
170 union blkif_back_ring_proto {
171 blkif_back_ring_t ring_n; /* native/common members */
172 blkif_x86_32_back_ring_t ring_32;
173 blkif_x86_64_back_ring_t ring_64;
175 typedef union blkif_back_ring_proto blkif_back_ring_proto_t;
177 #define VDISK_CDROM 0x1
178 #define VDISK_REMOVABLE 0x2
179 #define VDISK_READONLY 0x4
181 #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
184 * Local variables:
185 * mode: C
186 * c-set-style: "BSD"
187 * c-basic-offset: 4
188 * tab-width: 4
189 * indent-tabs-mode: nil
190 * End: