1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #define pr_fmt(fmt) "xen-blkback: " fmt
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
46 #include <xen/events.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
56 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
65 static int xen_blkif_max_buffer_pages
= 1024;
66 module_param_named(max_buffer_pages
, xen_blkif_max_buffer_pages
, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages
,
68 "Maximum number of free pages to keep in each block backend buffer");
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
77 * When the list of persistent grants is full we clean it up using a LRU
81 static int xen_blkif_max_pgrants
= 1056;
82 module_param_named(max_persistent_grants
, xen_blkif_max_pgrants
, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants
,
84 "Maximum number of grants to map persistently");
87 * Maximum order of pages to be used for the shared ring between front and
88 * backend, 4KB page granularity is used.
90 unsigned int xen_blkif_max_ring_order
= XENBUS_MAX_RING_GRANT_ORDER
;
91 module_param_named(max_ring_page_order
, xen_blkif_max_ring_order
, int, S_IRUGO
);
92 MODULE_PARM_DESC(max_ring_page_order
, "Maximum order of pages to be used for the shared ring");
94 * The LRU mechanism to clean the lists of persistent grants needs to
95 * be executed periodically. The time interval between consecutive executions
96 * of the purge mechanism is set in ms.
98 #define LRU_INTERVAL 100
101 * When the persistent grants list is full we will remove unused grants
102 * from the list. The percent number of grants to be removed at each LRU
105 #define LRU_PERCENT_CLEAN 5
107 /* Run-time switchable: /sys/module/blkback/parameters/ */
108 static unsigned int log_stats
;
109 module_param(log_stats
, int, 0644);
111 #define BLKBACK_INVALID_HANDLE (~0)
113 /* Number of free pages to remove on each call to gnttab_free_pages */
114 #define NUM_BATCH_FREE_PAGES 10
116 static inline int get_free_page(struct xen_blkif
*blkif
, struct page
**page
)
120 spin_lock_irqsave(&blkif
->free_pages_lock
, flags
);
121 if (list_empty(&blkif
->free_pages
)) {
122 BUG_ON(blkif
->free_pages_num
!= 0);
123 spin_unlock_irqrestore(&blkif
->free_pages_lock
, flags
);
124 return gnttab_alloc_pages(1, page
);
126 BUG_ON(blkif
->free_pages_num
== 0);
127 page
[0] = list_first_entry(&blkif
->free_pages
, struct page
, lru
);
128 list_del(&page
[0]->lru
);
129 blkif
->free_pages_num
--;
130 spin_unlock_irqrestore(&blkif
->free_pages_lock
, flags
);
135 static inline void put_free_pages(struct xen_blkif
*blkif
, struct page
**page
,
141 spin_lock_irqsave(&blkif
->free_pages_lock
, flags
);
142 for (i
= 0; i
< num
; i
++)
143 list_add(&page
[i
]->lru
, &blkif
->free_pages
);
144 blkif
->free_pages_num
+= num
;
145 spin_unlock_irqrestore(&blkif
->free_pages_lock
, flags
);
148 static inline void shrink_free_pagepool(struct xen_blkif
*blkif
, int num
)
150 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
151 struct page
*page
[NUM_BATCH_FREE_PAGES
];
152 unsigned int num_pages
= 0;
155 spin_lock_irqsave(&blkif
->free_pages_lock
, flags
);
156 while (blkif
->free_pages_num
> num
) {
157 BUG_ON(list_empty(&blkif
->free_pages
));
158 page
[num_pages
] = list_first_entry(&blkif
->free_pages
,
160 list_del(&page
[num_pages
]->lru
);
161 blkif
->free_pages_num
--;
162 if (++num_pages
== NUM_BATCH_FREE_PAGES
) {
163 spin_unlock_irqrestore(&blkif
->free_pages_lock
, flags
);
164 gnttab_free_pages(num_pages
, page
);
165 spin_lock_irqsave(&blkif
->free_pages_lock
, flags
);
169 spin_unlock_irqrestore(&blkif
->free_pages_lock
, flags
);
171 gnttab_free_pages(num_pages
, page
);
174 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
176 static int do_block_io_op(struct xen_blkif
*blkif
);
177 static int dispatch_rw_block_io(struct xen_blkif
*blkif
,
178 struct blkif_request
*req
,
179 struct pending_req
*pending_req
);
180 static void make_response(struct xen_blkif
*blkif
, u64 id
,
181 unsigned short op
, int st
);
183 #define foreach_grant_safe(pos, n, rbtree, node) \
184 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
185 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
186 &(pos)->node != NULL; \
187 (pos) = container_of(n, typeof(*(pos)), node), \
188 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
192 * We don't need locking around the persistent grant helpers
193 * because blkback uses a single-thread for each backed, so we
194 * can be sure that this functions will never be called recursively.
196 * The only exception to that is put_persistent_grant, that can be called
197 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
198 * bit operations to modify the flags of a persistent grant and to count
199 * the number of used grants.
201 static int add_persistent_gnt(struct xen_blkif
*blkif
,
202 struct persistent_gnt
*persistent_gnt
)
204 struct rb_node
**new = NULL
, *parent
= NULL
;
205 struct persistent_gnt
*this;
207 if (blkif
->persistent_gnt_c
>= xen_blkif_max_pgrants
) {
208 if (!blkif
->vbd
.overflow_max_grants
)
209 blkif
->vbd
.overflow_max_grants
= 1;
212 /* Figure out where to put new node */
213 new = &blkif
->persistent_gnts
.rb_node
;
215 this = container_of(*new, struct persistent_gnt
, node
);
218 if (persistent_gnt
->gnt
< this->gnt
)
219 new = &((*new)->rb_left
);
220 else if (persistent_gnt
->gnt
> this->gnt
)
221 new = &((*new)->rb_right
);
223 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
228 bitmap_zero(persistent_gnt
->flags
, PERSISTENT_GNT_FLAGS_SIZE
);
229 set_bit(PERSISTENT_GNT_ACTIVE
, persistent_gnt
->flags
);
230 /* Add new node and rebalance tree. */
231 rb_link_node(&(persistent_gnt
->node
), parent
, new);
232 rb_insert_color(&(persistent_gnt
->node
), &blkif
->persistent_gnts
);
233 blkif
->persistent_gnt_c
++;
234 atomic_inc(&blkif
->persistent_gnt_in_use
);
238 static struct persistent_gnt
*get_persistent_gnt(struct xen_blkif
*blkif
,
241 struct persistent_gnt
*data
;
242 struct rb_node
*node
= NULL
;
244 node
= blkif
->persistent_gnts
.rb_node
;
246 data
= container_of(node
, struct persistent_gnt
, node
);
248 if (gref
< data
->gnt
)
249 node
= node
->rb_left
;
250 else if (gref
> data
->gnt
)
251 node
= node
->rb_right
;
253 if(test_bit(PERSISTENT_GNT_ACTIVE
, data
->flags
)) {
254 pr_alert_ratelimited("requesting a grant already in use\n");
257 set_bit(PERSISTENT_GNT_ACTIVE
, data
->flags
);
258 atomic_inc(&blkif
->persistent_gnt_in_use
);
265 static void put_persistent_gnt(struct xen_blkif
*blkif
,
266 struct persistent_gnt
*persistent_gnt
)
268 if(!test_bit(PERSISTENT_GNT_ACTIVE
, persistent_gnt
->flags
))
269 pr_alert_ratelimited("freeing a grant already unused\n");
270 set_bit(PERSISTENT_GNT_WAS_ACTIVE
, persistent_gnt
->flags
);
271 clear_bit(PERSISTENT_GNT_ACTIVE
, persistent_gnt
->flags
);
272 atomic_dec(&blkif
->persistent_gnt_in_use
);
275 static void free_persistent_gnts(struct xen_blkif
*blkif
, struct rb_root
*root
,
278 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
279 struct page
*pages
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
280 struct persistent_gnt
*persistent_gnt
;
282 int segs_to_unmap
= 0;
283 struct gntab_unmap_queue_data unmap_data
;
285 unmap_data
.pages
= pages
;
286 unmap_data
.unmap_ops
= unmap
;
287 unmap_data
.kunmap_ops
= NULL
;
289 foreach_grant_safe(persistent_gnt
, n
, root
, node
) {
290 BUG_ON(persistent_gnt
->handle
==
291 BLKBACK_INVALID_HANDLE
);
292 gnttab_set_unmap_op(&unmap
[segs_to_unmap
],
293 (unsigned long) pfn_to_kaddr(page_to_pfn(
294 persistent_gnt
->page
)),
296 persistent_gnt
->handle
);
298 pages
[segs_to_unmap
] = persistent_gnt
->page
;
300 if (++segs_to_unmap
== BLKIF_MAX_SEGMENTS_PER_REQUEST
||
301 !rb_next(&persistent_gnt
->node
)) {
303 unmap_data
.count
= segs_to_unmap
;
304 BUG_ON(gnttab_unmap_refs_sync(&unmap_data
));
306 put_free_pages(blkif
, pages
, segs_to_unmap
);
310 rb_erase(&persistent_gnt
->node
, root
);
311 kfree(persistent_gnt
);
317 void xen_blkbk_unmap_purged_grants(struct work_struct
*work
)
319 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
320 struct page
*pages
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
321 struct persistent_gnt
*persistent_gnt
;
322 int segs_to_unmap
= 0;
323 struct xen_blkif
*blkif
= container_of(work
, typeof(*blkif
), persistent_purge_work
);
324 struct gntab_unmap_queue_data unmap_data
;
326 unmap_data
.pages
= pages
;
327 unmap_data
.unmap_ops
= unmap
;
328 unmap_data
.kunmap_ops
= NULL
;
330 while(!list_empty(&blkif
->persistent_purge_list
)) {
331 persistent_gnt
= list_first_entry(&blkif
->persistent_purge_list
,
332 struct persistent_gnt
,
334 list_del(&persistent_gnt
->remove_node
);
336 gnttab_set_unmap_op(&unmap
[segs_to_unmap
],
337 vaddr(persistent_gnt
->page
),
339 persistent_gnt
->handle
);
341 pages
[segs_to_unmap
] = persistent_gnt
->page
;
343 if (++segs_to_unmap
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
344 unmap_data
.count
= segs_to_unmap
;
345 BUG_ON(gnttab_unmap_refs_sync(&unmap_data
));
346 put_free_pages(blkif
, pages
, segs_to_unmap
);
349 kfree(persistent_gnt
);
351 if (segs_to_unmap
> 0) {
352 unmap_data
.count
= segs_to_unmap
;
353 BUG_ON(gnttab_unmap_refs_sync(&unmap_data
));
354 put_free_pages(blkif
, pages
, segs_to_unmap
);
358 static void purge_persistent_gnt(struct xen_blkif
*blkif
)
360 struct persistent_gnt
*persistent_gnt
;
362 unsigned int num_clean
, total
;
363 bool scan_used
= false, clean_used
= false;
364 struct rb_root
*root
;
366 if (blkif
->persistent_gnt_c
< xen_blkif_max_pgrants
||
367 (blkif
->persistent_gnt_c
== xen_blkif_max_pgrants
&&
368 !blkif
->vbd
.overflow_max_grants
)) {
372 if (work_busy(&blkif
->persistent_purge_work
)) {
373 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
377 num_clean
= (xen_blkif_max_pgrants
/ 100) * LRU_PERCENT_CLEAN
;
378 num_clean
= blkif
->persistent_gnt_c
- xen_blkif_max_pgrants
+ num_clean
;
379 num_clean
= min(blkif
->persistent_gnt_c
, num_clean
);
380 if ((num_clean
== 0) ||
381 (num_clean
> (blkif
->persistent_gnt_c
- atomic_read(&blkif
->persistent_gnt_in_use
))))
385 * At this point, we can assure that there will be no calls
386 * to get_persistent_grant (because we are executing this code from
387 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
388 * which means that the number of currently used grants will go down,
389 * but never up, so we will always be able to remove the requested
395 pr_debug("Going to purge %u persistent grants\n", num_clean
);
397 BUG_ON(!list_empty(&blkif
->persistent_purge_list
));
398 root
= &blkif
->persistent_gnts
;
400 foreach_grant_safe(persistent_gnt
, n
, root
, node
) {
401 BUG_ON(persistent_gnt
->handle
==
402 BLKBACK_INVALID_HANDLE
);
405 clear_bit(PERSISTENT_GNT_WAS_ACTIVE
, persistent_gnt
->flags
);
409 if (test_bit(PERSISTENT_GNT_ACTIVE
, persistent_gnt
->flags
))
412 (test_bit(PERSISTENT_GNT_WAS_ACTIVE
, persistent_gnt
->flags
)))
415 rb_erase(&persistent_gnt
->node
, root
);
416 list_add(&persistent_gnt
->remove_node
,
417 &blkif
->persistent_purge_list
);
418 if (--num_clean
== 0)
422 * If we get here it means we also need to start cleaning
423 * grants that were used since last purge in order to cope
424 * with the requested num
426 if (!scan_used
&& !clean_used
) {
427 pr_debug("Still missing %u purged frames\n", num_clean
);
433 pr_debug("Finished scanning for grants to clean, removing used flag\n");
438 blkif
->persistent_gnt_c
-= (total
- num_clean
);
439 blkif
->vbd
.overflow_max_grants
= 0;
441 /* We can defer this work */
442 schedule_work(&blkif
->persistent_purge_work
);
443 pr_debug("Purged %u/%u\n", (total
- num_clean
), total
);
448 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
450 static struct pending_req
*alloc_req(struct xen_blkif
*blkif
)
452 struct pending_req
*req
= NULL
;
455 spin_lock_irqsave(&blkif
->pending_free_lock
, flags
);
456 if (!list_empty(&blkif
->pending_free
)) {
457 req
= list_entry(blkif
->pending_free
.next
, struct pending_req
,
459 list_del(&req
->free_list
);
461 spin_unlock_irqrestore(&blkif
->pending_free_lock
, flags
);
466 * Return the 'pending_req' structure back to the freepool. We also
467 * wake up the thread if it was waiting for a free page.
469 static void free_req(struct xen_blkif
*blkif
, struct pending_req
*req
)
474 spin_lock_irqsave(&blkif
->pending_free_lock
, flags
);
475 was_empty
= list_empty(&blkif
->pending_free
);
476 list_add(&req
->free_list
, &blkif
->pending_free
);
477 spin_unlock_irqrestore(&blkif
->pending_free_lock
, flags
);
479 wake_up(&blkif
->pending_free_wq
);
483 * Routines for managing virtual block devices (vbds).
485 static int xen_vbd_translate(struct phys_req
*req
, struct xen_blkif
*blkif
,
488 struct xen_vbd
*vbd
= &blkif
->vbd
;
491 if ((operation
!= READ
) && vbd
->readonly
)
494 if (likely(req
->nr_sects
)) {
495 blkif_sector_t end
= req
->sector_number
+ req
->nr_sects
;
497 if (unlikely(end
< req
->sector_number
))
499 if (unlikely(end
> vbd_sz(vbd
)))
503 req
->dev
= vbd
->pdevice
;
504 req
->bdev
= vbd
->bdev
;
511 static void xen_vbd_resize(struct xen_blkif
*blkif
)
513 struct xen_vbd
*vbd
= &blkif
->vbd
;
514 struct xenbus_transaction xbt
;
516 struct xenbus_device
*dev
= xen_blkbk_xenbus(blkif
->be
);
517 unsigned long long new_size
= vbd_sz(vbd
);
519 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
520 blkif
->domid
, MAJOR(vbd
->pdevice
), MINOR(vbd
->pdevice
));
521 pr_info("VBD Resize: new size %llu\n", new_size
);
522 vbd
->size
= new_size
;
524 err
= xenbus_transaction_start(&xbt
);
526 pr_warn("Error starting transaction\n");
529 err
= xenbus_printf(xbt
, dev
->nodename
, "sectors", "%llu",
530 (unsigned long long)vbd_sz(vbd
));
532 pr_warn("Error writing new size\n");
536 * Write the current state; we will use this to synchronize
537 * the front-end. If the current state is "connected" the
538 * front-end will get the new size information online.
540 err
= xenbus_printf(xbt
, dev
->nodename
, "state", "%d", dev
->state
);
542 pr_warn("Error writing the state\n");
546 err
= xenbus_transaction_end(xbt
, 0);
550 pr_warn("Error ending transaction\n");
553 xenbus_transaction_end(xbt
, 1);
557 * Notification from the guest OS.
559 static void blkif_notify_work(struct xen_blkif
*blkif
)
561 blkif
->waiting_reqs
= 1;
565 irqreturn_t
xen_blkif_be_int(int irq
, void *dev_id
)
567 blkif_notify_work(dev_id
);
572 * SCHEDULER FUNCTIONS
575 static void print_stats(struct xen_blkif
*blkif
)
577 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
578 " | ds %4llu | pg: %4u/%4d\n",
579 current
->comm
, blkif
->st_oo_req
,
580 blkif
->st_rd_req
, blkif
->st_wr_req
,
581 blkif
->st_f_req
, blkif
->st_ds_req
,
582 blkif
->persistent_gnt_c
,
583 xen_blkif_max_pgrants
);
584 blkif
->st_print
= jiffies
+ msecs_to_jiffies(10 * 1000);
585 blkif
->st_rd_req
= 0;
586 blkif
->st_wr_req
= 0;
587 blkif
->st_oo_req
= 0;
588 blkif
->st_ds_req
= 0;
591 int xen_blkif_schedule(void *arg
)
593 struct xen_blkif
*blkif
= arg
;
594 struct xen_vbd
*vbd
= &blkif
->vbd
;
595 unsigned long timeout
;
598 xen_blkif_get(blkif
);
600 while (!kthread_should_stop()) {
603 if (unlikely(vbd
->size
!= vbd_sz(vbd
)))
604 xen_vbd_resize(blkif
);
606 timeout
= msecs_to_jiffies(LRU_INTERVAL
);
608 timeout
= wait_event_interruptible_timeout(
610 blkif
->waiting_reqs
|| kthread_should_stop(),
614 timeout
= wait_event_interruptible_timeout(
615 blkif
->pending_free_wq
,
616 !list_empty(&blkif
->pending_free
) ||
617 kthread_should_stop(),
622 blkif
->waiting_reqs
= 0;
623 smp_mb(); /* clear flag *before* checking for work */
625 ret
= do_block_io_op(blkif
);
627 blkif
->waiting_reqs
= 1;
629 wait_event_interruptible(blkif
->shutdown_wq
,
630 kthread_should_stop());
633 if (blkif
->vbd
.feature_gnt_persistent
&&
634 time_after(jiffies
, blkif
->next_lru
)) {
635 purge_persistent_gnt(blkif
);
636 blkif
->next_lru
= jiffies
+ msecs_to_jiffies(LRU_INTERVAL
);
639 /* Shrink if we have more than xen_blkif_max_buffer_pages */
640 shrink_free_pagepool(blkif
, xen_blkif_max_buffer_pages
);
642 if (log_stats
&& time_after(jiffies
, blkif
->st_print
))
646 /* Drain pending purge work */
647 flush_work(&blkif
->persistent_purge_work
);
652 blkif
->xenblkd
= NULL
;
653 xen_blkif_put(blkif
);
659 * Remove persistent grants and empty the pool of free pages
661 void xen_blkbk_free_caches(struct xen_blkif
*blkif
)
663 /* Free all persistent grant pages */
664 if (!RB_EMPTY_ROOT(&blkif
->persistent_gnts
))
665 free_persistent_gnts(blkif
, &blkif
->persistent_gnts
,
666 blkif
->persistent_gnt_c
);
668 BUG_ON(!RB_EMPTY_ROOT(&blkif
->persistent_gnts
));
669 blkif
->persistent_gnt_c
= 0;
671 /* Since we are shutting down remove all pages from the buffer */
672 shrink_free_pagepool(blkif
, 0 /* All */);
675 static unsigned int xen_blkbk_unmap_prepare(
676 struct xen_blkif
*blkif
,
677 struct grant_page
**pages
,
679 struct gnttab_unmap_grant_ref
*unmap_ops
,
680 struct page
**unmap_pages
)
682 unsigned int i
, invcount
= 0;
684 for (i
= 0; i
< num
; i
++) {
685 if (pages
[i
]->persistent_gnt
!= NULL
) {
686 put_persistent_gnt(blkif
, pages
[i
]->persistent_gnt
);
689 if (pages
[i
]->handle
== BLKBACK_INVALID_HANDLE
)
691 unmap_pages
[invcount
] = pages
[i
]->page
;
692 gnttab_set_unmap_op(&unmap_ops
[invcount
], vaddr(pages
[i
]->page
),
693 GNTMAP_host_map
, pages
[i
]->handle
);
694 pages
[i
]->handle
= BLKBACK_INVALID_HANDLE
;
701 static void xen_blkbk_unmap_and_respond_callback(int result
, struct gntab_unmap_queue_data
*data
)
703 struct pending_req
* pending_req
= (struct pending_req
*) (data
->data
);
704 struct xen_blkif
*blkif
= pending_req
->blkif
;
706 /* BUG_ON used to reproduce existing behaviour,
707 but is this the best way to deal with this? */
710 put_free_pages(blkif
, data
->pages
, data
->count
);
711 make_response(blkif
, pending_req
->id
,
712 pending_req
->operation
, pending_req
->status
);
713 free_req(blkif
, pending_req
);
715 * Make sure the request is freed before releasing blkif,
716 * or there could be a race between free_req and the
717 * cleanup done in xen_blkif_free during shutdown.
719 * NB: The fact that we might try to wake up pending_free_wq
720 * before drain_complete (in case there's a drain going on)
721 * it's not a problem with our current implementation
722 * because we can assure there's no thread waiting on
723 * pending_free_wq if there's a drain going on, but it has
724 * to be taken into account if the current model is changed.
726 if (atomic_dec_and_test(&blkif
->inflight
) && atomic_read(&blkif
->drain
)) {
727 complete(&blkif
->drain_complete
);
729 xen_blkif_put(blkif
);
732 static void xen_blkbk_unmap_and_respond(struct pending_req
*req
)
734 struct gntab_unmap_queue_data
* work
= &req
->gnttab_unmap_data
;
735 struct xen_blkif
*blkif
= req
->blkif
;
736 struct grant_page
**pages
= req
->segments
;
737 unsigned int invcount
;
739 invcount
= xen_blkbk_unmap_prepare(blkif
, pages
, req
->nr_segs
,
740 req
->unmap
, req
->unmap_pages
);
743 work
->done
= xen_blkbk_unmap_and_respond_callback
;
744 work
->unmap_ops
= req
->unmap
;
745 work
->kunmap_ops
= NULL
;
746 work
->pages
= req
->unmap_pages
;
747 work
->count
= invcount
;
749 gnttab_unmap_refs_async(&req
->gnttab_unmap_data
);
754 * Unmap the grant references.
756 * This could accumulate ops up to the batch size to reduce the number
757 * of hypercalls, but since this is only used in error paths there's
760 static void xen_blkbk_unmap(struct xen_blkif
*blkif
,
761 struct grant_page
*pages
[],
764 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
765 struct page
*unmap_pages
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
766 unsigned int invcount
= 0;
770 unsigned int batch
= min(num
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
772 invcount
= xen_blkbk_unmap_prepare(blkif
, pages
, batch
,
775 ret
= gnttab_unmap_refs(unmap
, NULL
, unmap_pages
, invcount
);
777 put_free_pages(blkif
, unmap_pages
, invcount
);
784 static int xen_blkbk_map(struct xen_blkif
*blkif
,
785 struct grant_page
*pages
[],
788 struct gnttab_map_grant_ref map
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
789 struct page
*pages_to_gnt
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
790 struct persistent_gnt
*persistent_gnt
= NULL
;
791 phys_addr_t addr
= 0;
792 int i
, seg_idx
, new_map_idx
;
795 int last_map
= 0, map_until
= 0;
796 int use_persistent_gnts
;
798 use_persistent_gnts
= (blkif
->vbd
.feature_gnt_persistent
);
801 * Fill out preq.nr_sects with proper amount of sectors, and setup
802 * assign map[..] with the PFN of the page in our domain with the
803 * corresponding grant reference for each page.
806 for (i
= map_until
; i
< num
; i
++) {
809 if (use_persistent_gnts
)
810 persistent_gnt
= get_persistent_gnt(
814 if (persistent_gnt
) {
816 * We are using persistent grants and
817 * the grant is already mapped
819 pages
[i
]->page
= persistent_gnt
->page
;
820 pages
[i
]->persistent_gnt
= persistent_gnt
;
822 if (get_free_page(blkif
, &pages
[i
]->page
))
824 addr
= vaddr(pages
[i
]->page
);
825 pages_to_gnt
[segs_to_map
] = pages
[i
]->page
;
826 pages
[i
]->persistent_gnt
= NULL
;
827 flags
= GNTMAP_host_map
;
828 if (!use_persistent_gnts
&& ro
)
829 flags
|= GNTMAP_readonly
;
830 gnttab_set_map_op(&map
[segs_to_map
++], addr
,
831 flags
, pages
[i
]->gref
,
835 if (segs_to_map
== BLKIF_MAX_SEGMENTS_PER_REQUEST
)
840 ret
= gnttab_map_refs(map
, NULL
, pages_to_gnt
, segs_to_map
);
845 * Now swizzle the MFN in our domain with the MFN from the other domain
846 * so that when we access vaddr(pending_req,i) it has the contents of
847 * the page from the other domain.
849 for (seg_idx
= last_map
, new_map_idx
= 0; seg_idx
< map_until
; seg_idx
++) {
850 if (!pages
[seg_idx
]->persistent_gnt
) {
851 /* This is a newly mapped grant */
852 BUG_ON(new_map_idx
>= segs_to_map
);
853 if (unlikely(map
[new_map_idx
].status
!= 0)) {
854 pr_debug("invalid buffer -- could not remap it\n");
855 put_free_pages(blkif
, &pages
[seg_idx
]->page
, 1);
856 pages
[seg_idx
]->handle
= BLKBACK_INVALID_HANDLE
;
860 pages
[seg_idx
]->handle
= map
[new_map_idx
].handle
;
864 if (use_persistent_gnts
&&
865 blkif
->persistent_gnt_c
< xen_blkif_max_pgrants
) {
867 * We are using persistent grants, the grant is
868 * not mapped but we might have room for it.
870 persistent_gnt
= kmalloc(sizeof(struct persistent_gnt
),
872 if (!persistent_gnt
) {
874 * If we don't have enough memory to
875 * allocate the persistent_gnt struct
876 * map this grant non-persistenly
880 persistent_gnt
->gnt
= map
[new_map_idx
].ref
;
881 persistent_gnt
->handle
= map
[new_map_idx
].handle
;
882 persistent_gnt
->page
= pages
[seg_idx
]->page
;
883 if (add_persistent_gnt(blkif
,
885 kfree(persistent_gnt
);
886 persistent_gnt
= NULL
;
889 pages
[seg_idx
]->persistent_gnt
= persistent_gnt
;
890 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
891 persistent_gnt
->gnt
, blkif
->persistent_gnt_c
,
892 xen_blkif_max_pgrants
);
895 if (use_persistent_gnts
&& !blkif
->vbd
.overflow_max_grants
) {
896 blkif
->vbd
.overflow_max_grants
= 1;
897 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
898 blkif
->domid
, blkif
->vbd
.handle
);
901 * We could not map this grant persistently, so use it as
902 * a non-persistent grant.
908 last_map
= map_until
;
909 if (map_until
!= num
)
915 pr_alert("%s: out of memory\n", __func__
);
916 put_free_pages(blkif
, pages_to_gnt
, segs_to_map
);
920 static int xen_blkbk_map_seg(struct pending_req
*pending_req
)
924 rc
= xen_blkbk_map(pending_req
->blkif
, pending_req
->segments
,
925 pending_req
->nr_segs
,
926 (pending_req
->operation
!= BLKIF_OP_READ
));
931 static int xen_blkbk_parse_indirect(struct blkif_request
*req
,
932 struct pending_req
*pending_req
,
933 struct seg_buf seg
[],
934 struct phys_req
*preq
)
936 struct grant_page
**pages
= pending_req
->indirect_pages
;
937 struct xen_blkif
*blkif
= pending_req
->blkif
;
938 int indirect_grefs
, rc
, n
, nseg
, i
;
939 struct blkif_request_segment
*segments
= NULL
;
941 nseg
= pending_req
->nr_segs
;
942 indirect_grefs
= INDIRECT_PAGES(nseg
);
943 BUG_ON(indirect_grefs
> BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST
);
945 for (i
= 0; i
< indirect_grefs
; i
++)
946 pages
[i
]->gref
= req
->u
.indirect
.indirect_grefs
[i
];
948 rc
= xen_blkbk_map(blkif
, pages
, indirect_grefs
, true);
952 for (n
= 0, i
= 0; n
< nseg
; n
++) {
953 uint8_t first_sect
, last_sect
;
955 if ((n
% SEGS_PER_INDIRECT_FRAME
) == 0) {
956 /* Map indirect segments */
958 kunmap_atomic(segments
);
959 segments
= kmap_atomic(pages
[n
/SEGS_PER_INDIRECT_FRAME
]->page
);
961 i
= n
% SEGS_PER_INDIRECT_FRAME
;
963 pending_req
->segments
[n
]->gref
= segments
[i
].gref
;
965 first_sect
= READ_ONCE(segments
[i
].first_sect
);
966 last_sect
= READ_ONCE(segments
[i
].last_sect
);
967 if (last_sect
>= (XEN_PAGE_SIZE
>> 9) || last_sect
< first_sect
) {
972 seg
[n
].nsec
= last_sect
- first_sect
+ 1;
973 seg
[n
].offset
= first_sect
<< 9;
974 preq
->nr_sects
+= seg
[n
].nsec
;
979 kunmap_atomic(segments
);
980 xen_blkbk_unmap(blkif
, pages
, indirect_grefs
);
984 static int dispatch_discard_io(struct xen_blkif
*blkif
,
985 struct blkif_request
*req
)
988 int status
= BLKIF_RSP_OKAY
;
989 struct block_device
*bdev
= blkif
->vbd
.bdev
;
990 unsigned long secure
;
991 struct phys_req preq
;
993 xen_blkif_get(blkif
);
995 preq
.sector_number
= req
->u
.discard
.sector_number
;
996 preq
.nr_sects
= req
->u
.discard
.nr_sectors
;
998 err
= xen_vbd_translate(&preq
, blkif
, WRITE
);
1000 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1002 preq
.sector_number
+ preq
.nr_sects
, blkif
->vbd
.pdevice
);
1007 secure
= (blkif
->vbd
.discard_secure
&&
1008 (req
->u
.discard
.flag
& BLKIF_DISCARD_SECURE
)) ?
1009 BLKDEV_DISCARD_SECURE
: 0;
1011 err
= blkdev_issue_discard(bdev
, req
->u
.discard
.sector_number
,
1012 req
->u
.discard
.nr_sectors
,
1013 GFP_KERNEL
, secure
);
1015 if (err
== -EOPNOTSUPP
) {
1016 pr_debug("discard op failed, not supported\n");
1017 status
= BLKIF_RSP_EOPNOTSUPP
;
1019 status
= BLKIF_RSP_ERROR
;
1021 make_response(blkif
, req
->u
.discard
.id
, req
->operation
, status
);
1022 xen_blkif_put(blkif
);
1026 static int dispatch_other_io(struct xen_blkif
*blkif
,
1027 struct blkif_request
*req
,
1028 struct pending_req
*pending_req
)
1030 free_req(blkif
, pending_req
);
1031 make_response(blkif
, req
->u
.other
.id
, req
->operation
,
1032 BLKIF_RSP_EOPNOTSUPP
);
1036 static void xen_blk_drain_io(struct xen_blkif
*blkif
)
1038 atomic_set(&blkif
->drain
, 1);
1040 if (atomic_read(&blkif
->inflight
) == 0)
1042 wait_for_completion_interruptible_timeout(
1043 &blkif
->drain_complete
, HZ
);
1045 if (!atomic_read(&blkif
->drain
))
1047 } while (!kthread_should_stop());
1048 atomic_set(&blkif
->drain
, 0);
1052 * Completion callback on the bio's. Called as bh->b_end_io()
1055 static void __end_block_io_op(struct pending_req
*pending_req
, int error
)
1057 /* An error fails the entire request. */
1058 if ((pending_req
->operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
1059 (error
== -EOPNOTSUPP
)) {
1060 pr_debug("flush diskcache op failed, not supported\n");
1061 xen_blkbk_flush_diskcache(XBT_NIL
, pending_req
->blkif
->be
, 0);
1062 pending_req
->status
= BLKIF_RSP_EOPNOTSUPP
;
1063 } else if ((pending_req
->operation
== BLKIF_OP_WRITE_BARRIER
) &&
1064 (error
== -EOPNOTSUPP
)) {
1065 pr_debug("write barrier op failed, not supported\n");
1066 xen_blkbk_barrier(XBT_NIL
, pending_req
->blkif
->be
, 0);
1067 pending_req
->status
= BLKIF_RSP_EOPNOTSUPP
;
1069 pr_debug("Buffer not up-to-date at end of operation,"
1070 " error=%d\n", error
);
1071 pending_req
->status
= BLKIF_RSP_ERROR
;
1075 * If all of the bio's have completed it is time to unmap
1076 * the grant references associated with 'request' and provide
1077 * the proper response on the ring.
1079 if (atomic_dec_and_test(&pending_req
->pendcnt
))
1080 xen_blkbk_unmap_and_respond(pending_req
);
1086 static void end_block_io_op(struct bio
*bio
)
1088 __end_block_io_op(bio
->bi_private
, bio
->bi_error
);
1095 * Function to copy the from the ring buffer the 'struct blkif_request'
1096 * (which has the sectors we want, number of them, grant references, etc),
1097 * and transmute it to the block API to hand it over to the proper block disk.
1100 __do_block_io_op(struct xen_blkif
*blkif
)
1102 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
1103 struct blkif_request req
;
1104 struct pending_req
*pending_req
;
1108 rc
= blk_rings
->common
.req_cons
;
1109 rp
= blk_rings
->common
.sring
->req_prod
;
1110 rmb(); /* Ensure we see queued requests up to 'rp'. */
1112 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings
->common
, rp
)) {
1113 rc
= blk_rings
->common
.rsp_prod_pvt
;
1114 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1115 rp
, rc
, rp
- rc
, blkif
->vbd
.pdevice
);
1120 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings
->common
, rc
))
1123 if (kthread_should_stop()) {
1128 pending_req
= alloc_req(blkif
);
1129 if (NULL
== pending_req
) {
1135 switch (blkif
->blk_protocol
) {
1136 case BLKIF_PROTOCOL_NATIVE
:
1137 memcpy(&req
, RING_GET_REQUEST(&blk_rings
->native
, rc
), sizeof(req
));
1139 case BLKIF_PROTOCOL_X86_32
:
1140 blkif_get_x86_32_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_32
, rc
));
1142 case BLKIF_PROTOCOL_X86_64
:
1143 blkif_get_x86_64_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_64
, rc
));
1148 blk_rings
->common
.req_cons
= ++rc
; /* before make_response() */
1150 /* Apply all sanity checks to /private copy/ of request. */
1153 switch (req
.operation
) {
1155 case BLKIF_OP_WRITE
:
1156 case BLKIF_OP_WRITE_BARRIER
:
1157 case BLKIF_OP_FLUSH_DISKCACHE
:
1158 case BLKIF_OP_INDIRECT
:
1159 if (dispatch_rw_block_io(blkif
, &req
, pending_req
))
1162 case BLKIF_OP_DISCARD
:
1163 free_req(blkif
, pending_req
);
1164 if (dispatch_discard_io(blkif
, &req
))
1168 if (dispatch_other_io(blkif
, &req
, pending_req
))
1173 /* Yield point for this unbounded loop. */
1181 do_block_io_op(struct xen_blkif
*blkif
)
1183 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
1187 more_to_do
= __do_block_io_op(blkif
);
1191 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings
->common
, more_to_do
);
1192 } while (more_to_do
);
1197 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1198 * and call the 'submit_bio' to pass it to the underlying storage.
1200 static int dispatch_rw_block_io(struct xen_blkif
*blkif
,
1201 struct blkif_request
*req
,
1202 struct pending_req
*pending_req
)
1204 struct phys_req preq
;
1205 struct seg_buf
*seg
= pending_req
->seg
;
1207 struct bio
*bio
= NULL
;
1208 struct bio
**biolist
= pending_req
->biolist
;
1211 struct blk_plug plug
;
1213 struct grant_page
**pages
= pending_req
->segments
;
1214 unsigned short req_operation
;
1216 req_operation
= req
->operation
== BLKIF_OP_INDIRECT
?
1217 req
->u
.indirect
.indirect_op
: req
->operation
;
1219 if ((req
->operation
== BLKIF_OP_INDIRECT
) &&
1220 (req_operation
!= BLKIF_OP_READ
) &&
1221 (req_operation
!= BLKIF_OP_WRITE
)) {
1222 pr_debug("Invalid indirect operation (%u)\n", req_operation
);
1226 switch (req_operation
) {
1231 case BLKIF_OP_WRITE
:
1233 operation
= WRITE_ODIRECT
;
1235 case BLKIF_OP_WRITE_BARRIER
:
1237 case BLKIF_OP_FLUSH_DISKCACHE
:
1239 operation
= WRITE_FLUSH
;
1242 operation
= 0; /* make gcc happy */
1247 /* Check that the number of segments is sane. */
1248 nseg
= req
->operation
== BLKIF_OP_INDIRECT
?
1249 req
->u
.indirect
.nr_segments
: req
->u
.rw
.nr_segments
;
1251 if (unlikely(nseg
== 0 && operation
!= WRITE_FLUSH
) ||
1252 unlikely((req
->operation
!= BLKIF_OP_INDIRECT
) &&
1253 (nseg
> BLKIF_MAX_SEGMENTS_PER_REQUEST
)) ||
1254 unlikely((req
->operation
== BLKIF_OP_INDIRECT
) &&
1255 (nseg
> MAX_INDIRECT_SEGMENTS
))) {
1256 pr_debug("Bad number of segments in request (%d)\n", nseg
);
1257 /* Haven't submitted any bio's yet. */
1263 pending_req
->blkif
= blkif
;
1264 pending_req
->id
= req
->u
.rw
.id
;
1265 pending_req
->operation
= req_operation
;
1266 pending_req
->status
= BLKIF_RSP_OKAY
;
1267 pending_req
->nr_segs
= nseg
;
1269 if (req
->operation
!= BLKIF_OP_INDIRECT
) {
1270 preq
.dev
= req
->u
.rw
.handle
;
1271 preq
.sector_number
= req
->u
.rw
.sector_number
;
1272 for (i
= 0; i
< nseg
; i
++) {
1273 pages
[i
]->gref
= req
->u
.rw
.seg
[i
].gref
;
1274 seg
[i
].nsec
= req
->u
.rw
.seg
[i
].last_sect
-
1275 req
->u
.rw
.seg
[i
].first_sect
+ 1;
1276 seg
[i
].offset
= (req
->u
.rw
.seg
[i
].first_sect
<< 9);
1277 if ((req
->u
.rw
.seg
[i
].last_sect
>= (XEN_PAGE_SIZE
>> 9)) ||
1278 (req
->u
.rw
.seg
[i
].last_sect
<
1279 req
->u
.rw
.seg
[i
].first_sect
))
1281 preq
.nr_sects
+= seg
[i
].nsec
;
1284 preq
.dev
= req
->u
.indirect
.handle
;
1285 preq
.sector_number
= req
->u
.indirect
.sector_number
;
1286 if (xen_blkbk_parse_indirect(req
, pending_req
, seg
, &preq
))
1290 if (xen_vbd_translate(&preq
, blkif
, operation
) != 0) {
1291 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1292 operation
== READ
? "read" : "write",
1294 preq
.sector_number
+ preq
.nr_sects
,
1295 blkif
->vbd
.pdevice
);
1300 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1303 for (i
= 0; i
< nseg
; i
++) {
1304 if (((int)preq
.sector_number
|(int)seg
[i
].nsec
) &
1305 ((bdev_logical_block_size(preq
.bdev
) >> 9) - 1)) {
1306 pr_debug("Misaligned I/O request from domain %d\n",
1312 /* Wait on all outstanding I/O's and once that has been completed
1313 * issue the WRITE_FLUSH.
1316 xen_blk_drain_io(pending_req
->blkif
);
1319 * If we have failed at this point, we need to undo the M2P override,
1320 * set gnttab_set_unmap_op on all of the grant references and perform
1321 * the hypercall to unmap the grants - that is all done in
1324 if (xen_blkbk_map_seg(pending_req
))
1328 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1329 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1331 xen_blkif_get(blkif
);
1332 atomic_inc(&blkif
->inflight
);
1334 for (i
= 0; i
< nseg
; i
++) {
1335 while ((bio
== NULL
) ||
1339 seg
[i
].offset
) == 0)) {
1341 int nr_iovecs
= min_t(int, (nseg
-i
), BIO_MAX_PAGES
);
1342 bio
= bio_alloc(GFP_KERNEL
, nr_iovecs
);
1343 if (unlikely(bio
== NULL
))
1346 biolist
[nbio
++] = bio
;
1347 bio
->bi_bdev
= preq
.bdev
;
1348 bio
->bi_private
= pending_req
;
1349 bio
->bi_end_io
= end_block_io_op
;
1350 bio
->bi_iter
.bi_sector
= preq
.sector_number
;
1353 preq
.sector_number
+= seg
[i
].nsec
;
1356 /* This will be hit if the operation was a flush or discard. */
1358 BUG_ON(operation
!= WRITE_FLUSH
);
1360 bio
= bio_alloc(GFP_KERNEL
, 0);
1361 if (unlikely(bio
== NULL
))
1364 biolist
[nbio
++] = bio
;
1365 bio
->bi_bdev
= preq
.bdev
;
1366 bio
->bi_private
= pending_req
;
1367 bio
->bi_end_io
= end_block_io_op
;
1370 atomic_set(&pending_req
->pendcnt
, nbio
);
1371 blk_start_plug(&plug
);
1373 for (i
= 0; i
< nbio
; i
++)
1374 submit_bio(operation
, biolist
[i
]);
1376 /* Let the I/Os go.. */
1377 blk_finish_plug(&plug
);
1379 if (operation
== READ
)
1380 blkif
->st_rd_sect
+= preq
.nr_sects
;
1381 else if (operation
& WRITE
)
1382 blkif
->st_wr_sect
+= preq
.nr_sects
;
1387 xen_blkbk_unmap(blkif
, pending_req
->segments
,
1388 pending_req
->nr_segs
);
1390 /* Haven't submitted any bio's yet. */
1391 make_response(blkif
, req
->u
.rw
.id
, req_operation
, BLKIF_RSP_ERROR
);
1392 free_req(blkif
, pending_req
);
1393 msleep(1); /* back off a bit */
1397 for (i
= 0; i
< nbio
; i
++)
1398 bio_put(biolist
[i
]);
1399 atomic_set(&pending_req
->pendcnt
, 1);
1400 __end_block_io_op(pending_req
, -EINVAL
);
1401 msleep(1); /* back off a bit */
1408 * Put a response on the ring on how the operation fared.
1410 static void make_response(struct xen_blkif
*blkif
, u64 id
,
1411 unsigned short op
, int st
)
1413 struct blkif_response resp
;
1414 unsigned long flags
;
1415 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
1419 resp
.operation
= op
;
1422 spin_lock_irqsave(&blkif
->blk_ring_lock
, flags
);
1423 /* Place on the response ring for the relevant domain. */
1424 switch (blkif
->blk_protocol
) {
1425 case BLKIF_PROTOCOL_NATIVE
:
1426 memcpy(RING_GET_RESPONSE(&blk_rings
->native
, blk_rings
->native
.rsp_prod_pvt
),
1427 &resp
, sizeof(resp
));
1429 case BLKIF_PROTOCOL_X86_32
:
1430 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_32
, blk_rings
->x86_32
.rsp_prod_pvt
),
1431 &resp
, sizeof(resp
));
1433 case BLKIF_PROTOCOL_X86_64
:
1434 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_64
, blk_rings
->x86_64
.rsp_prod_pvt
),
1435 &resp
, sizeof(resp
));
1440 blk_rings
->common
.rsp_prod_pvt
++;
1441 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings
->common
, notify
);
1442 spin_unlock_irqrestore(&blkif
->blk_ring_lock
, flags
);
1444 notify_remote_via_irq(blkif
->irq
);
1447 static int __init
xen_blkif_init(void)
1454 if (xen_blkif_max_ring_order
> XENBUS_MAX_RING_GRANT_ORDER
) {
1455 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1456 xen_blkif_max_ring_order
, XENBUS_MAX_RING_GRANT_ORDER
);
1457 xen_blkif_max_ring_order
= XENBUS_MAX_RING_GRANT_ORDER
;
1460 rc
= xen_blkif_interface_init();
1464 rc
= xen_blkif_xenbus_init();
1472 module_init(xen_blkif_init
);
1474 MODULE_LICENSE("Dual BSD/GPL");
1475 MODULE_ALIAS("xen-backend:vbd");