2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
43 #include <xen/xenbus.h>
44 #include <xen/interface/io/ring.h>
45 #include <xen/interface/io/blkif.h>
46 #include <xen/interface/io/protocols.h>
48 extern unsigned int xen_blkif_max_ring_order
;
49 extern unsigned int xenblk_max_queues
;
51 * This is the maximum number of segments that would be allowed in indirect
52 * requests. This value will also be passed to the frontend.
54 #define MAX_INDIRECT_SEGMENTS 256
57 * Xen use 4K pages. The guest may use different page size (4K or 64K)
58 * Number of Xen pages per segment
60 #define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
62 #define XEN_PAGES_PER_INDIRECT_FRAME \
63 (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
64 #define SEGS_PER_INDIRECT_FRAME \
65 (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
67 #define MAX_INDIRECT_PAGES \
68 ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
69 #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
71 /* Not a real protocol. Used to generate ring structs which contain
72 * the elements common to all protocols only. This way we get a
73 * compiler-checkable way to use common struct elements, so we can
74 * avoid using switch(protocol) in a number of places. */
75 struct blkif_common_request
{
79 /* i386 protocol version */
81 struct blkif_x86_32_request_rw
{
82 uint8_t nr_segments
; /* number of segments */
83 blkif_vdev_t handle
; /* only for read/write requests */
84 uint64_t id
; /* private guest value, echoed in resp */
85 blkif_sector_t sector_number
;/* start sector idx on disk (r/w only) */
86 struct blkif_request_segment seg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
87 } __attribute__((__packed__
));
89 struct blkif_x86_32_request_discard
{
90 uint8_t flag
; /* BLKIF_DISCARD_SECURE or zero */
91 blkif_vdev_t _pad1
; /* was "handle" for read/write requests */
92 uint64_t id
; /* private guest value, echoed in resp */
93 blkif_sector_t sector_number
;/* start sector idx on disk (r/w only) */
95 } __attribute__((__packed__
));
97 struct blkif_x86_32_request_other
{
100 uint64_t id
; /* private guest value, echoed in resp */
101 } __attribute__((__packed__
));
103 struct blkif_x86_32_request_indirect
{
105 uint16_t nr_segments
;
107 blkif_sector_t sector_number
;
110 grant_ref_t indirect_grefs
[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST
];
112 * The maximum number of indirect segments (and pages) that will
113 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
114 * is also exported to the guest (via xenstore
115 * feature-max-indirect-segments entry), so the frontend knows how
116 * many indirect segments the backend supports.
118 uint64_t _pad2
; /* make it 64 byte aligned */
119 } __attribute__((__packed__
));
121 struct blkif_x86_32_request
{
122 uint8_t operation
; /* BLKIF_OP_??? */
124 struct blkif_x86_32_request_rw rw
;
125 struct blkif_x86_32_request_discard discard
;
126 struct blkif_x86_32_request_other other
;
127 struct blkif_x86_32_request_indirect indirect
;
129 } __attribute__((__packed__
));
131 /* x86_64 protocol version */
133 struct blkif_x86_64_request_rw
{
134 uint8_t nr_segments
; /* number of segments */
135 blkif_vdev_t handle
; /* only for read/write requests */
136 uint32_t _pad1
; /* offsetof(blkif_reqest..,u.rw.id)==8 */
138 blkif_sector_t sector_number
;/* start sector idx on disk (r/w only) */
139 struct blkif_request_segment seg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
140 } __attribute__((__packed__
));
142 struct blkif_x86_64_request_discard
{
143 uint8_t flag
; /* BLKIF_DISCARD_SECURE or zero */
144 blkif_vdev_t _pad1
; /* was "handle" for read/write requests */
145 uint32_t _pad2
; /* offsetof(blkif_..,u.discard.id)==8 */
147 blkif_sector_t sector_number
;/* start sector idx on disk (r/w only) */
149 } __attribute__((__packed__
));
151 struct blkif_x86_64_request_other
{
154 uint32_t _pad3
; /* offsetof(blkif_..,u.discard.id)==8 */
155 uint64_t id
; /* private guest value, echoed in resp */
156 } __attribute__((__packed__
));
158 struct blkif_x86_64_request_indirect
{
160 uint16_t nr_segments
;
161 uint32_t _pad1
; /* offsetof(blkif_..,u.indirect.id)==8 */
163 blkif_sector_t sector_number
;
166 grant_ref_t indirect_grefs
[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST
];
168 * The maximum number of indirect segments (and pages) that will
169 * be used is determined by MAX_INDIRECT_SEGMENTS, this value
170 * is also exported to the guest (via xenstore
171 * feature-max-indirect-segments entry), so the frontend knows how
172 * many indirect segments the backend supports.
174 uint32_t _pad3
; /* make it 64 byte aligned */
175 } __attribute__((__packed__
));
177 struct blkif_x86_64_request
{
178 uint8_t operation
; /* BLKIF_OP_??? */
180 struct blkif_x86_64_request_rw rw
;
181 struct blkif_x86_64_request_discard discard
;
182 struct blkif_x86_64_request_other other
;
183 struct blkif_x86_64_request_indirect indirect
;
185 } __attribute__((__packed__
));
187 DEFINE_RING_TYPES(blkif_common
, struct blkif_common_request
,
188 struct blkif_response
);
189 DEFINE_RING_TYPES(blkif_x86_32
, struct blkif_x86_32_request
,
190 struct blkif_response __packed
);
191 DEFINE_RING_TYPES(blkif_x86_64
, struct blkif_x86_64_request
,
192 struct blkif_response
);
194 union blkif_back_rings
{
195 struct blkif_back_ring native
;
196 struct blkif_common_back_ring common
;
197 struct blkif_x86_32_back_ring x86_32
;
198 struct blkif_x86_64_back_ring x86_64
;
201 enum blkif_protocol
{
202 BLKIF_PROTOCOL_NATIVE
= 1,
203 BLKIF_PROTOCOL_X86_32
= 2,
204 BLKIF_PROTOCOL_X86_64
= 3,
208 * Default protocol if the frontend doesn't specify one.
211 # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
213 # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
217 /* What the domain refers to this vbd as. */
219 /* Non-zero -> read-only */
220 unsigned char readonly
;
223 /* phys device that this vbd maps to. */
225 struct block_device
*bdev
;
226 /* Cached size parameter. */
228 unsigned int flush_support
:1;
229 unsigned int discard_secure
:1;
230 unsigned int feature_gnt_persistent
:1;
231 unsigned int overflow_max_grants
:1;
236 /* Number of available flags */
237 #define PERSISTENT_GNT_FLAGS_SIZE 2
238 /* This persistent grant is currently in use */
239 #define PERSISTENT_GNT_ACTIVE 0
241 * This persistent grant has been used, this flag is set when we remove the
242 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
244 #define PERSISTENT_GNT_WAS_ACTIVE 1
246 /* Number of requests that we can fit in a ring */
247 #define XEN_BLKIF_REQS_PER_PAGE 32
249 struct persistent_gnt
{
252 grant_handle_t handle
;
253 DECLARE_BITMAP(flags
, PERSISTENT_GNT_FLAGS_SIZE
);
255 struct list_head remove_node
;
258 /* Per-ring information. */
259 struct xen_blkif_ring
{
260 /* Physical parameters of the comms window. */
262 union blkif_back_rings blk_rings
;
264 /* Private fields. */
265 spinlock_t blk_ring_lock
;
267 wait_queue_head_t wq
;
270 /* One thread per blkif ring. */
271 struct task_struct
*xenblkd
;
272 unsigned int waiting_reqs
;
274 /* List of all 'pending_req' available */
275 struct list_head pending_free
;
276 /* And its spinlock. */
277 spinlock_t pending_free_lock
;
278 wait_queue_head_t pending_free_wq
;
280 /* Tree to store persistent grants. */
281 spinlock_t pers_gnts_lock
;
282 struct rb_root persistent_gnts
;
283 unsigned int persistent_gnt_c
;
284 atomic_t persistent_gnt_in_use
;
285 unsigned long next_lru
;
288 unsigned long st_print
;
289 unsigned long long st_rd_req
;
290 unsigned long long st_wr_req
;
291 unsigned long long st_oo_req
;
292 unsigned long long st_f_req
;
293 unsigned long long st_ds_req
;
294 unsigned long long st_rd_sect
;
295 unsigned long long st_wr_sect
;
297 /* Used by the kworker that offload work from the persistent purge. */
298 struct list_head persistent_purge_list
;
299 struct work_struct persistent_purge_work
;
301 /* Buffer of free pages to map grant refs. */
302 spinlock_t free_pages_lock
;
304 struct list_head free_pages
;
306 struct work_struct free_work
;
307 /* Thread shutdown wait queue. */
308 wait_queue_head_t shutdown_wq
;
309 struct xen_blkif
*blkif
;
313 /* Unique identifier for this interface. */
316 /* Comms information. */
317 enum blkif_protocol blk_protocol
;
318 /* The VBD attached to this interface. */
320 /* Back pointer to the backend_info. */
321 struct backend_info
*be
;
323 /* for barrier (drain) requests */
324 struct completion drain_complete
;
327 struct work_struct free_work
;
328 unsigned int nr_ring_pages
;
329 /* All rings for this device. */
330 struct xen_blkif_ring
*rings
;
331 unsigned int nr_rings
;
335 unsigned long offset
;
341 struct persistent_gnt
*persistent_gnt
;
342 grant_handle_t handle
;
347 * Each outstanding request that we've passed to the lower device layers has a
348 * 'pending_req' allocated to it. Each buffer_head that completes decrements
349 * the pendcnt towards zero. When it hits zero, the specified domain has a
350 * response queued for it, with the saved 'id' passed back.
353 struct xen_blkif_ring
*ring
;
357 unsigned short operation
;
359 struct list_head free_list
;
360 struct grant_page
*segments
[MAX_INDIRECT_SEGMENTS
];
361 /* Indirect descriptors */
362 struct grant_page
*indirect_pages
[MAX_INDIRECT_PAGES
];
363 struct seg_buf seg
[MAX_INDIRECT_SEGMENTS
];
364 struct bio
*biolist
[MAX_INDIRECT_SEGMENTS
];
365 struct gnttab_unmap_grant_ref unmap
[MAX_INDIRECT_SEGMENTS
];
366 struct page
*unmap_pages
[MAX_INDIRECT_SEGMENTS
];
367 struct gntab_unmap_queue_data gnttab_unmap_data
;
371 #define vbd_sz(_v) ((_v)->bdev->bd_part ? \
372 (_v)->bdev->bd_part->nr_sects : \
373 get_capacity((_v)->bdev->bd_disk))
375 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
376 #define xen_blkif_put(_b) \
378 if (atomic_dec_and_test(&(_b)->refcnt)) \
379 schedule_work(&(_b)->free_work);\
384 blkif_sector_t nr_sects
;
385 struct block_device
*bdev
;
386 blkif_sector_t sector_number
;
388 int xen_blkif_interface_init(void);
390 int xen_blkif_xenbus_init(void);
392 irqreturn_t
xen_blkif_be_int(int irq
, void *dev_id
);
393 int xen_blkif_schedule(void *arg
);
394 int xen_blkif_purge_persistent(void *arg
);
395 void xen_blkbk_free_caches(struct xen_blkif_ring
*ring
);
397 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt
,
398 struct backend_info
*be
, int state
);
400 int xen_blkbk_barrier(struct xenbus_transaction xbt
,
401 struct backend_info
*be
, int state
);
402 struct xenbus_device
*xen_blkbk_xenbus(struct backend_info
*be
);
403 void xen_blkbk_unmap_purged_grants(struct work_struct
*work
);
405 static inline void blkif_get_x86_32_req(struct blkif_request
*dst
,
406 struct blkif_x86_32_request
*src
)
408 int i
, n
= BLKIF_MAX_SEGMENTS_PER_REQUEST
, j
;
409 dst
->operation
= READ_ONCE(src
->operation
);
410 switch (dst
->operation
) {
413 case BLKIF_OP_WRITE_BARRIER
:
414 case BLKIF_OP_FLUSH_DISKCACHE
:
415 dst
->u
.rw
.nr_segments
= src
->u
.rw
.nr_segments
;
416 dst
->u
.rw
.handle
= src
->u
.rw
.handle
;
417 dst
->u
.rw
.id
= src
->u
.rw
.id
;
418 dst
->u
.rw
.sector_number
= src
->u
.rw
.sector_number
;
420 if (n
> dst
->u
.rw
.nr_segments
)
421 n
= dst
->u
.rw
.nr_segments
;
422 for (i
= 0; i
< n
; i
++)
423 dst
->u
.rw
.seg
[i
] = src
->u
.rw
.seg
[i
];
425 case BLKIF_OP_DISCARD
:
426 dst
->u
.discard
.flag
= src
->u
.discard
.flag
;
427 dst
->u
.discard
.id
= src
->u
.discard
.id
;
428 dst
->u
.discard
.sector_number
= src
->u
.discard
.sector_number
;
429 dst
->u
.discard
.nr_sectors
= src
->u
.discard
.nr_sectors
;
431 case BLKIF_OP_INDIRECT
:
432 dst
->u
.indirect
.indirect_op
= src
->u
.indirect
.indirect_op
;
433 dst
->u
.indirect
.nr_segments
= src
->u
.indirect
.nr_segments
;
434 dst
->u
.indirect
.handle
= src
->u
.indirect
.handle
;
435 dst
->u
.indirect
.id
= src
->u
.indirect
.id
;
436 dst
->u
.indirect
.sector_number
= src
->u
.indirect
.sector_number
;
438 j
= min(MAX_INDIRECT_PAGES
, INDIRECT_PAGES(dst
->u
.indirect
.nr_segments
));
439 for (i
= 0; i
< j
; i
++)
440 dst
->u
.indirect
.indirect_grefs
[i
] =
441 src
->u
.indirect
.indirect_grefs
[i
];
445 * Don't know how to translate this op. Only get the
446 * ID so failure can be reported to the frontend.
448 dst
->u
.other
.id
= src
->u
.other
.id
;
453 static inline void blkif_get_x86_64_req(struct blkif_request
*dst
,
454 struct blkif_x86_64_request
*src
)
456 int i
, n
= BLKIF_MAX_SEGMENTS_PER_REQUEST
, j
;
457 dst
->operation
= READ_ONCE(src
->operation
);
458 switch (dst
->operation
) {
461 case BLKIF_OP_WRITE_BARRIER
:
462 case BLKIF_OP_FLUSH_DISKCACHE
:
463 dst
->u
.rw
.nr_segments
= src
->u
.rw
.nr_segments
;
464 dst
->u
.rw
.handle
= src
->u
.rw
.handle
;
465 dst
->u
.rw
.id
= src
->u
.rw
.id
;
466 dst
->u
.rw
.sector_number
= src
->u
.rw
.sector_number
;
468 if (n
> dst
->u
.rw
.nr_segments
)
469 n
= dst
->u
.rw
.nr_segments
;
470 for (i
= 0; i
< n
; i
++)
471 dst
->u
.rw
.seg
[i
] = src
->u
.rw
.seg
[i
];
473 case BLKIF_OP_DISCARD
:
474 dst
->u
.discard
.flag
= src
->u
.discard
.flag
;
475 dst
->u
.discard
.id
= src
->u
.discard
.id
;
476 dst
->u
.discard
.sector_number
= src
->u
.discard
.sector_number
;
477 dst
->u
.discard
.nr_sectors
= src
->u
.discard
.nr_sectors
;
479 case BLKIF_OP_INDIRECT
:
480 dst
->u
.indirect
.indirect_op
= src
->u
.indirect
.indirect_op
;
481 dst
->u
.indirect
.nr_segments
= src
->u
.indirect
.nr_segments
;
482 dst
->u
.indirect
.handle
= src
->u
.indirect
.handle
;
483 dst
->u
.indirect
.id
= src
->u
.indirect
.id
;
484 dst
->u
.indirect
.sector_number
= src
->u
.indirect
.sector_number
;
486 j
= min(MAX_INDIRECT_PAGES
, INDIRECT_PAGES(dst
->u
.indirect
.nr_segments
));
487 for (i
= 0; i
< j
; i
++)
488 dst
->u
.indirect
.indirect_grefs
[i
] =
489 src
->u
.indirect
.indirect_grefs
[i
];
493 * Don't know how to translate this op. Only get the
494 * ID so failure can be reported to the frontend.
496 dst
->u
.other
.id
= src
->u
.other
.id
;
501 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */