mm/zsmalloc: allocate exactly size of struct zs_pool
[linux/fpc-iii.git] / drivers / block / xen-blkback / blkback.c
blob63fc7f06a0146821296e2d4c625f680d6c9ac008
1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 #include <linux/bitmap.h>
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <xen/xen.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include <xen/balloon.h>
50 #include "common.h"
53 * Maximum number of unused free pages to keep in the internal buffer.
54 * Setting this to a value too low will reduce memory used in each backend,
55 * but can have a performance penalty.
57 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58 * be set to a lower value that might degrade performance on some intensive
59 * IO workloads.
62 static int xen_blkif_max_buffer_pages = 1024;
63 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
64 MODULE_PARM_DESC(max_buffer_pages,
65 "Maximum number of free pages to keep in each block backend buffer");
68 * Maximum number of grants to map persistently in blkback. For maximum
69 * performance this should be the total numbers of grants that can be used
70 * to fill the ring, but since this might become too high, specially with
71 * the use of indirect descriptors, we set it to a value that provides good
72 * performance without using too much memory.
74 * When the list of persistent grants is full we clean it up using a LRU
75 * algorithm.
78 static int xen_blkif_max_pgrants = 1056;
79 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80 MODULE_PARM_DESC(max_persistent_grants,
81 "Maximum number of grants to map persistently");
84 * The LRU mechanism to clean the lists of persistent grants needs to
85 * be executed periodically. The time interval between consecutive executions
86 * of the purge mechanism is set in ms.
88 #define LRU_INTERVAL 100
91 * When the persistent grants list is full we will remove unused grants
92 * from the list. The percent number of grants to be removed at each LRU
93 * execution.
95 #define LRU_PERCENT_CLEAN 5
97 /* Run-time switchable: /sys/module/blkback/parameters/ */
98 static unsigned int log_stats;
99 module_param(log_stats, int, 0644);
101 #define BLKBACK_INVALID_HANDLE (~0)
103 /* Number of free pages to remove on each call to free_xenballooned_pages */
104 #define NUM_BATCH_FREE_PAGES 10
106 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
108 unsigned long flags;
110 spin_lock_irqsave(&blkif->free_pages_lock, flags);
111 if (list_empty(&blkif->free_pages)) {
112 BUG_ON(blkif->free_pages_num != 0);
113 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 return alloc_xenballooned_pages(1, page, false);
116 BUG_ON(blkif->free_pages_num == 0);
117 page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 list_del(&page[0]->lru);
119 blkif->free_pages_num--;
120 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
122 return 0;
125 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
126 int num)
128 unsigned long flags;
129 int i;
131 spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 for (i = 0; i < num; i++)
133 list_add(&page[i]->lru, &blkif->free_pages);
134 blkif->free_pages_num += num;
135 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
138 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
140 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
141 struct page *page[NUM_BATCH_FREE_PAGES];
142 unsigned int num_pages = 0;
143 unsigned long flags;
145 spin_lock_irqsave(&blkif->free_pages_lock, flags);
146 while (blkif->free_pages_num > num) {
147 BUG_ON(list_empty(&blkif->free_pages));
148 page[num_pages] = list_first_entry(&blkif->free_pages,
149 struct page, lru);
150 list_del(&page[num_pages]->lru);
151 blkif->free_pages_num--;
152 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 free_xenballooned_pages(num_pages, page);
155 spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 num_pages = 0;
159 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 if (num_pages != 0)
161 free_xenballooned_pages(num_pages, page);
164 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
166 static int do_block_io_op(struct xen_blkif *blkif);
167 static int dispatch_rw_block_io(struct xen_blkif *blkif,
168 struct blkif_request *req,
169 struct pending_req *pending_req);
170 static void make_response(struct xen_blkif *blkif, u64 id,
171 unsigned short op, int st);
173 #define foreach_grant_safe(pos, n, rbtree, node) \
174 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
175 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
176 &(pos)->node != NULL; \
177 (pos) = container_of(n, typeof(*(pos)), node), \
178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
182 * We don't need locking around the persistent grant helpers
183 * because blkback uses a single-thread for each backed, so we
184 * can be sure that this functions will never be called recursively.
186 * The only exception to that is put_persistent_grant, that can be called
187 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188 * bit operations to modify the flags of a persistent grant and to count
189 * the number of used grants.
191 static int add_persistent_gnt(struct xen_blkif *blkif,
192 struct persistent_gnt *persistent_gnt)
194 struct rb_node **new = NULL, *parent = NULL;
195 struct persistent_gnt *this;
197 if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 if (!blkif->vbd.overflow_max_grants)
199 blkif->vbd.overflow_max_grants = 1;
200 return -EBUSY;
202 /* Figure out where to put new node */
203 new = &blkif->persistent_gnts.rb_node;
204 while (*new) {
205 this = container_of(*new, struct persistent_gnt, node);
207 parent = *new;
208 if (persistent_gnt->gnt < this->gnt)
209 new = &((*new)->rb_left);
210 else if (persistent_gnt->gnt > this->gnt)
211 new = &((*new)->rb_right);
212 else {
213 pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
214 return -EINVAL;
218 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
220 /* Add new node and rebalance tree. */
221 rb_link_node(&(persistent_gnt->node), parent, new);
222 rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 blkif->persistent_gnt_c++;
224 atomic_inc(&blkif->persistent_gnt_in_use);
225 return 0;
228 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
229 grant_ref_t gref)
231 struct persistent_gnt *data;
232 struct rb_node *node = NULL;
234 node = blkif->persistent_gnts.rb_node;
235 while (node) {
236 data = container_of(node, struct persistent_gnt, node);
238 if (gref < data->gnt)
239 node = node->rb_left;
240 else if (gref > data->gnt)
241 node = node->rb_right;
242 else {
243 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 return NULL;
247 set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 atomic_inc(&blkif->persistent_gnt_in_use);
249 return data;
252 return NULL;
255 static void put_persistent_gnt(struct xen_blkif *blkif,
256 struct persistent_gnt *persistent_gnt)
258 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 atomic_dec(&blkif->persistent_gnt_in_use);
265 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266 unsigned int num)
268 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
269 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 struct persistent_gnt *persistent_gnt;
271 struct rb_node *n;
272 int ret = 0;
273 int segs_to_unmap = 0;
275 foreach_grant_safe(persistent_gnt, n, root, node) {
276 BUG_ON(persistent_gnt->handle ==
277 BLKBACK_INVALID_HANDLE);
278 gnttab_set_unmap_op(&unmap[segs_to_unmap],
279 (unsigned long) pfn_to_kaddr(page_to_pfn(
280 persistent_gnt->page)),
281 GNTMAP_host_map,
282 persistent_gnt->handle);
284 pages[segs_to_unmap] = persistent_gnt->page;
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, NULL, pages,
289 segs_to_unmap);
290 BUG_ON(ret);
291 put_free_pages(blkif, pages, segs_to_unmap);
292 segs_to_unmap = 0;
295 rb_erase(&persistent_gnt->node, root);
296 kfree(persistent_gnt);
297 num--;
299 BUG_ON(num != 0);
302 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
304 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
306 struct persistent_gnt *persistent_gnt;
307 int ret, segs_to_unmap = 0;
308 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
310 while(!list_empty(&blkif->persistent_purge_list)) {
311 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
312 struct persistent_gnt,
313 remove_node);
314 list_del(&persistent_gnt->remove_node);
316 gnttab_set_unmap_op(&unmap[segs_to_unmap],
317 vaddr(persistent_gnt->page),
318 GNTMAP_host_map,
319 persistent_gnt->handle);
321 pages[segs_to_unmap] = persistent_gnt->page;
323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324 ret = gnttab_unmap_refs(unmap, NULL, pages,
325 segs_to_unmap);
326 BUG_ON(ret);
327 put_free_pages(blkif, pages, segs_to_unmap);
328 segs_to_unmap = 0;
330 kfree(persistent_gnt);
332 if (segs_to_unmap > 0) {
333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
334 BUG_ON(ret);
335 put_free_pages(blkif, pages, segs_to_unmap);
339 static void purge_persistent_gnt(struct xen_blkif *blkif)
341 struct persistent_gnt *persistent_gnt;
342 struct rb_node *n;
343 unsigned int num_clean, total;
344 bool scan_used = false, clean_used = false;
345 struct rb_root *root;
347 if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
348 (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
349 !blkif->vbd.overflow_max_grants)) {
350 return;
353 if (work_pending(&blkif->persistent_purge_work)) {
354 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
355 return;
358 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
359 num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
360 num_clean = min(blkif->persistent_gnt_c, num_clean);
361 if ((num_clean == 0) ||
362 (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
363 return;
366 * At this point, we can assure that there will be no calls
367 * to get_persistent_grant (because we are executing this code from
368 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
369 * which means that the number of currently used grants will go down,
370 * but never up, so we will always be able to remove the requested
371 * number of grants.
374 total = num_clean;
376 pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
378 BUG_ON(!list_empty(&blkif->persistent_purge_list));
379 root = &blkif->persistent_gnts;
380 purge_list:
381 foreach_grant_safe(persistent_gnt, n, root, node) {
382 BUG_ON(persistent_gnt->handle ==
383 BLKBACK_INVALID_HANDLE);
385 if (clean_used) {
386 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
387 continue;
390 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
391 continue;
392 if (!scan_used &&
393 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
394 continue;
396 rb_erase(&persistent_gnt->node, root);
397 list_add(&persistent_gnt->remove_node,
398 &blkif->persistent_purge_list);
399 if (--num_clean == 0)
400 goto finished;
403 * If we get here it means we also need to start cleaning
404 * grants that were used since last purge in order to cope
405 * with the requested num
407 if (!scan_used && !clean_used) {
408 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
409 scan_used = true;
410 goto purge_list;
412 finished:
413 if (!clean_used) {
414 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
415 clean_used = true;
416 goto purge_list;
419 blkif->persistent_gnt_c -= (total - num_clean);
420 blkif->vbd.overflow_max_grants = 0;
422 /* We can defer this work */
423 schedule_work(&blkif->persistent_purge_work);
424 pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
425 return;
429 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
431 static struct pending_req *alloc_req(struct xen_blkif *blkif)
433 struct pending_req *req = NULL;
434 unsigned long flags;
436 spin_lock_irqsave(&blkif->pending_free_lock, flags);
437 if (!list_empty(&blkif->pending_free)) {
438 req = list_entry(blkif->pending_free.next, struct pending_req,
439 free_list);
440 list_del(&req->free_list);
442 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
443 return req;
447 * Return the 'pending_req' structure back to the freepool. We also
448 * wake up the thread if it was waiting for a free page.
450 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
452 unsigned long flags;
453 int was_empty;
455 spin_lock_irqsave(&blkif->pending_free_lock, flags);
456 was_empty = list_empty(&blkif->pending_free);
457 list_add(&req->free_list, &blkif->pending_free);
458 spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
459 if (was_empty)
460 wake_up(&blkif->pending_free_wq);
464 * Routines for managing virtual block devices (vbds).
466 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
467 int operation)
469 struct xen_vbd *vbd = &blkif->vbd;
470 int rc = -EACCES;
472 if ((operation != READ) && vbd->readonly)
473 goto out;
475 if (likely(req->nr_sects)) {
476 blkif_sector_t end = req->sector_number + req->nr_sects;
478 if (unlikely(end < req->sector_number))
479 goto out;
480 if (unlikely(end > vbd_sz(vbd)))
481 goto out;
484 req->dev = vbd->pdevice;
485 req->bdev = vbd->bdev;
486 rc = 0;
488 out:
489 return rc;
492 static void xen_vbd_resize(struct xen_blkif *blkif)
494 struct xen_vbd *vbd = &blkif->vbd;
495 struct xenbus_transaction xbt;
496 int err;
497 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
498 unsigned long long new_size = vbd_sz(vbd);
500 pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
501 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
502 pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
503 vbd->size = new_size;
504 again:
505 err = xenbus_transaction_start(&xbt);
506 if (err) {
507 pr_warn(DRV_PFX "Error starting transaction");
508 return;
510 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
511 (unsigned long long)vbd_sz(vbd));
512 if (err) {
513 pr_warn(DRV_PFX "Error writing new size");
514 goto abort;
517 * Write the current state; we will use this to synchronize
518 * the front-end. If the current state is "connected" the
519 * front-end will get the new size information online.
521 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
522 if (err) {
523 pr_warn(DRV_PFX "Error writing the state");
524 goto abort;
527 err = xenbus_transaction_end(xbt, 0);
528 if (err == -EAGAIN)
529 goto again;
530 if (err)
531 pr_warn(DRV_PFX "Error ending transaction");
532 return;
533 abort:
534 xenbus_transaction_end(xbt, 1);
538 * Notification from the guest OS.
540 static void blkif_notify_work(struct xen_blkif *blkif)
542 blkif->waiting_reqs = 1;
543 wake_up(&blkif->wq);
546 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
548 blkif_notify_work(dev_id);
549 return IRQ_HANDLED;
553 * SCHEDULER FUNCTIONS
556 static void print_stats(struct xen_blkif *blkif)
558 pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
559 " | ds %4llu | pg: %4u/%4d\n",
560 current->comm, blkif->st_oo_req,
561 blkif->st_rd_req, blkif->st_wr_req,
562 blkif->st_f_req, blkif->st_ds_req,
563 blkif->persistent_gnt_c,
564 xen_blkif_max_pgrants);
565 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
566 blkif->st_rd_req = 0;
567 blkif->st_wr_req = 0;
568 blkif->st_oo_req = 0;
569 blkif->st_ds_req = 0;
572 int xen_blkif_schedule(void *arg)
574 struct xen_blkif *blkif = arg;
575 struct xen_vbd *vbd = &blkif->vbd;
576 unsigned long timeout;
577 int ret;
579 xen_blkif_get(blkif);
581 while (!kthread_should_stop()) {
582 if (try_to_freeze())
583 continue;
584 if (unlikely(vbd->size != vbd_sz(vbd)))
585 xen_vbd_resize(blkif);
587 timeout = msecs_to_jiffies(LRU_INTERVAL);
589 timeout = wait_event_interruptible_timeout(
590 blkif->wq,
591 blkif->waiting_reqs || kthread_should_stop(),
592 timeout);
593 if (timeout == 0)
594 goto purge_gnt_list;
595 timeout = wait_event_interruptible_timeout(
596 blkif->pending_free_wq,
597 !list_empty(&blkif->pending_free) ||
598 kthread_should_stop(),
599 timeout);
600 if (timeout == 0)
601 goto purge_gnt_list;
603 blkif->waiting_reqs = 0;
604 smp_mb(); /* clear flag *before* checking for work */
606 ret = do_block_io_op(blkif);
607 if (ret > 0)
608 blkif->waiting_reqs = 1;
609 if (ret == -EACCES)
610 wait_event_interruptible(blkif->shutdown_wq,
611 kthread_should_stop());
613 purge_gnt_list:
614 if (blkif->vbd.feature_gnt_persistent &&
615 time_after(jiffies, blkif->next_lru)) {
616 purge_persistent_gnt(blkif);
617 blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
620 /* Shrink if we have more than xen_blkif_max_buffer_pages */
621 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
623 if (log_stats && time_after(jiffies, blkif->st_print))
624 print_stats(blkif);
627 /* Drain pending purge work */
628 flush_work(&blkif->persistent_purge_work);
630 if (log_stats)
631 print_stats(blkif);
633 blkif->xenblkd = NULL;
634 xen_blkif_put(blkif);
636 return 0;
640 * Remove persistent grants and empty the pool of free pages
642 void xen_blkbk_free_caches(struct xen_blkif *blkif)
644 /* Free all persistent grant pages */
645 if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
646 free_persistent_gnts(blkif, &blkif->persistent_gnts,
647 blkif->persistent_gnt_c);
649 BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
650 blkif->persistent_gnt_c = 0;
652 /* Since we are shutting down remove all pages from the buffer */
653 shrink_free_pagepool(blkif, 0 /* All */);
657 * Unmap the grant references, and also remove the M2P over-rides
658 * used in the 'pending_req'.
660 static void xen_blkbk_unmap(struct xen_blkif *blkif,
661 struct grant_page *pages[],
662 int num)
664 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
665 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
666 unsigned int i, invcount = 0;
667 int ret;
669 for (i = 0; i < num; i++) {
670 if (pages[i]->persistent_gnt != NULL) {
671 put_persistent_gnt(blkif, pages[i]->persistent_gnt);
672 continue;
674 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
675 continue;
676 unmap_pages[invcount] = pages[i]->page;
677 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
678 GNTMAP_host_map, pages[i]->handle);
679 pages[i]->handle = BLKBACK_INVALID_HANDLE;
680 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
682 invcount);
683 BUG_ON(ret);
684 put_free_pages(blkif, unmap_pages, invcount);
685 invcount = 0;
688 if (invcount) {
689 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
690 BUG_ON(ret);
691 put_free_pages(blkif, unmap_pages, invcount);
695 static int xen_blkbk_map(struct xen_blkif *blkif,
696 struct grant_page *pages[],
697 int num, bool ro)
699 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
700 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
701 struct persistent_gnt *persistent_gnt = NULL;
702 phys_addr_t addr = 0;
703 int i, seg_idx, new_map_idx;
704 int segs_to_map = 0;
705 int ret = 0;
706 int last_map = 0, map_until = 0;
707 int use_persistent_gnts;
709 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
712 * Fill out preq.nr_sects with proper amount of sectors, and setup
713 * assign map[..] with the PFN of the page in our domain with the
714 * corresponding grant reference for each page.
716 again:
717 for (i = map_until; i < num; i++) {
718 uint32_t flags;
720 if (use_persistent_gnts)
721 persistent_gnt = get_persistent_gnt(
722 blkif,
723 pages[i]->gref);
725 if (persistent_gnt) {
727 * We are using persistent grants and
728 * the grant is already mapped
730 pages[i]->page = persistent_gnt->page;
731 pages[i]->persistent_gnt = persistent_gnt;
732 } else {
733 if (get_free_page(blkif, &pages[i]->page))
734 goto out_of_memory;
735 addr = vaddr(pages[i]->page);
736 pages_to_gnt[segs_to_map] = pages[i]->page;
737 pages[i]->persistent_gnt = NULL;
738 flags = GNTMAP_host_map;
739 if (!use_persistent_gnts && ro)
740 flags |= GNTMAP_readonly;
741 gnttab_set_map_op(&map[segs_to_map++], addr,
742 flags, pages[i]->gref,
743 blkif->domid);
745 map_until = i + 1;
746 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
747 break;
750 if (segs_to_map) {
751 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
752 BUG_ON(ret);
756 * Now swizzle the MFN in our domain with the MFN from the other domain
757 * so that when we access vaddr(pending_req,i) it has the contents of
758 * the page from the other domain.
760 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
761 if (!pages[seg_idx]->persistent_gnt) {
762 /* This is a newly mapped grant */
763 BUG_ON(new_map_idx >= segs_to_map);
764 if (unlikely(map[new_map_idx].status != 0)) {
765 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
766 put_free_pages(blkif, &pages[seg_idx]->page, 1);
767 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
768 ret |= 1;
769 goto next;
771 pages[seg_idx]->handle = map[new_map_idx].handle;
772 } else {
773 continue;
775 if (use_persistent_gnts &&
776 blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
778 * We are using persistent grants, the grant is
779 * not mapped but we might have room for it.
781 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
782 GFP_KERNEL);
783 if (!persistent_gnt) {
785 * If we don't have enough memory to
786 * allocate the persistent_gnt struct
787 * map this grant non-persistenly
789 goto next;
791 persistent_gnt->gnt = map[new_map_idx].ref;
792 persistent_gnt->handle = map[new_map_idx].handle;
793 persistent_gnt->page = pages[seg_idx]->page;
794 if (add_persistent_gnt(blkif,
795 persistent_gnt)) {
796 kfree(persistent_gnt);
797 persistent_gnt = NULL;
798 goto next;
800 pages[seg_idx]->persistent_gnt = persistent_gnt;
801 pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
802 persistent_gnt->gnt, blkif->persistent_gnt_c,
803 xen_blkif_max_pgrants);
804 goto next;
806 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
807 blkif->vbd.overflow_max_grants = 1;
808 pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
809 blkif->domid, blkif->vbd.handle);
812 * We could not map this grant persistently, so use it as
813 * a non-persistent grant.
815 next:
816 new_map_idx++;
818 segs_to_map = 0;
819 last_map = map_until;
820 if (map_until != num)
821 goto again;
823 return ret;
825 out_of_memory:
826 pr_alert(DRV_PFX "%s: out of memory\n", __func__);
827 put_free_pages(blkif, pages_to_gnt, segs_to_map);
828 return -ENOMEM;
831 static int xen_blkbk_map_seg(struct pending_req *pending_req)
833 int rc;
835 rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
836 pending_req->nr_pages,
837 (pending_req->operation != BLKIF_OP_READ));
839 return rc;
842 static int xen_blkbk_parse_indirect(struct blkif_request *req,
843 struct pending_req *pending_req,
844 struct seg_buf seg[],
845 struct phys_req *preq)
847 struct grant_page **pages = pending_req->indirect_pages;
848 struct xen_blkif *blkif = pending_req->blkif;
849 int indirect_grefs, rc, n, nseg, i;
850 struct blkif_request_segment *segments = NULL;
852 nseg = pending_req->nr_pages;
853 indirect_grefs = INDIRECT_PAGES(nseg);
854 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
856 for (i = 0; i < indirect_grefs; i++)
857 pages[i]->gref = req->u.indirect.indirect_grefs[i];
859 rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
860 if (rc)
861 goto unmap;
863 for (n = 0, i = 0; n < nseg; n++) {
864 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
865 /* Map indirect segments */
866 if (segments)
867 kunmap_atomic(segments);
868 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
870 i = n % SEGS_PER_INDIRECT_FRAME;
871 pending_req->segments[n]->gref = segments[i].gref;
872 seg[n].nsec = segments[i].last_sect -
873 segments[i].first_sect + 1;
874 seg[n].offset = (segments[i].first_sect << 9);
875 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
876 (segments[i].last_sect < segments[i].first_sect)) {
877 rc = -EINVAL;
878 goto unmap;
880 preq->nr_sects += seg[n].nsec;
883 unmap:
884 if (segments)
885 kunmap_atomic(segments);
886 xen_blkbk_unmap(blkif, pages, indirect_grefs);
887 return rc;
890 static int dispatch_discard_io(struct xen_blkif *blkif,
891 struct blkif_request *req)
893 int err = 0;
894 int status = BLKIF_RSP_OKAY;
895 struct block_device *bdev = blkif->vbd.bdev;
896 unsigned long secure;
897 struct phys_req preq;
899 xen_blkif_get(blkif);
901 preq.sector_number = req->u.discard.sector_number;
902 preq.nr_sects = req->u.discard.nr_sectors;
904 err = xen_vbd_translate(&preq, blkif, WRITE);
905 if (err) {
906 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
907 preq.sector_number,
908 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
909 goto fail_response;
911 blkif->st_ds_req++;
913 secure = (blkif->vbd.discard_secure &&
914 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
915 BLKDEV_DISCARD_SECURE : 0;
917 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
918 req->u.discard.nr_sectors,
919 GFP_KERNEL, secure);
920 fail_response:
921 if (err == -EOPNOTSUPP) {
922 pr_debug(DRV_PFX "discard op failed, not supported\n");
923 status = BLKIF_RSP_EOPNOTSUPP;
924 } else if (err)
925 status = BLKIF_RSP_ERROR;
927 make_response(blkif, req->u.discard.id, req->operation, status);
928 xen_blkif_put(blkif);
929 return err;
932 static int dispatch_other_io(struct xen_blkif *blkif,
933 struct blkif_request *req,
934 struct pending_req *pending_req)
936 free_req(blkif, pending_req);
937 make_response(blkif, req->u.other.id, req->operation,
938 BLKIF_RSP_EOPNOTSUPP);
939 return -EIO;
942 static void xen_blk_drain_io(struct xen_blkif *blkif)
944 atomic_set(&blkif->drain, 1);
945 do {
946 if (atomic_read(&blkif->inflight) == 0)
947 break;
948 wait_for_completion_interruptible_timeout(
949 &blkif->drain_complete, HZ);
951 if (!atomic_read(&blkif->drain))
952 break;
953 } while (!kthread_should_stop());
954 atomic_set(&blkif->drain, 0);
958 * Completion callback on the bio's. Called as bh->b_end_io()
961 static void __end_block_io_op(struct pending_req *pending_req, int error)
963 /* An error fails the entire request. */
964 if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
965 (error == -EOPNOTSUPP)) {
966 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
967 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
968 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
969 } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
970 (error == -EOPNOTSUPP)) {
971 pr_debug(DRV_PFX "write barrier op failed, not supported\n");
972 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
973 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
974 } else if (error) {
975 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
976 " error=%d\n", error);
977 pending_req->status = BLKIF_RSP_ERROR;
981 * If all of the bio's have completed it is time to unmap
982 * the grant references associated with 'request' and provide
983 * the proper response on the ring.
985 if (atomic_dec_and_test(&pending_req->pendcnt)) {
986 struct xen_blkif *blkif = pending_req->blkif;
988 xen_blkbk_unmap(blkif,
989 pending_req->segments,
990 pending_req->nr_pages);
991 make_response(blkif, pending_req->id,
992 pending_req->operation, pending_req->status);
993 free_req(blkif, pending_req);
995 * Make sure the request is freed before releasing blkif,
996 * or there could be a race between free_req and the
997 * cleanup done in xen_blkif_free during shutdown.
999 * NB: The fact that we might try to wake up pending_free_wq
1000 * before drain_complete (in case there's a drain going on)
1001 * it's not a problem with our current implementation
1002 * because we can assure there's no thread waiting on
1003 * pending_free_wq if there's a drain going on, but it has
1004 * to be taken into account if the current model is changed.
1006 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1007 complete(&blkif->drain_complete);
1009 xen_blkif_put(blkif);
1014 * bio callback.
1016 static void end_block_io_op(struct bio *bio, int error)
1018 __end_block_io_op(bio->bi_private, error);
1019 bio_put(bio);
1025 * Function to copy the from the ring buffer the 'struct blkif_request'
1026 * (which has the sectors we want, number of them, grant references, etc),
1027 * and transmute it to the block API to hand it over to the proper block disk.
1029 static int
1030 __do_block_io_op(struct xen_blkif *blkif)
1032 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1033 struct blkif_request req;
1034 struct pending_req *pending_req;
1035 RING_IDX rc, rp;
1036 int more_to_do = 0;
1038 rc = blk_rings->common.req_cons;
1039 rp = blk_rings->common.sring->req_prod;
1040 rmb(); /* Ensure we see queued requests up to 'rp'. */
1042 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1043 rc = blk_rings->common.rsp_prod_pvt;
1044 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1045 rp, rc, rp - rc, blkif->vbd.pdevice);
1046 return -EACCES;
1048 while (rc != rp) {
1050 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1051 break;
1053 if (kthread_should_stop()) {
1054 more_to_do = 1;
1055 break;
1058 pending_req = alloc_req(blkif);
1059 if (NULL == pending_req) {
1060 blkif->st_oo_req++;
1061 more_to_do = 1;
1062 break;
1065 switch (blkif->blk_protocol) {
1066 case BLKIF_PROTOCOL_NATIVE:
1067 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1068 break;
1069 case BLKIF_PROTOCOL_X86_32:
1070 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1071 break;
1072 case BLKIF_PROTOCOL_X86_64:
1073 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1074 break;
1075 default:
1076 BUG();
1078 blk_rings->common.req_cons = ++rc; /* before make_response() */
1080 /* Apply all sanity checks to /private copy/ of request. */
1081 barrier();
1083 switch (req.operation) {
1084 case BLKIF_OP_READ:
1085 case BLKIF_OP_WRITE:
1086 case BLKIF_OP_WRITE_BARRIER:
1087 case BLKIF_OP_FLUSH_DISKCACHE:
1088 case BLKIF_OP_INDIRECT:
1089 if (dispatch_rw_block_io(blkif, &req, pending_req))
1090 goto done;
1091 break;
1092 case BLKIF_OP_DISCARD:
1093 free_req(blkif, pending_req);
1094 if (dispatch_discard_io(blkif, &req))
1095 goto done;
1096 break;
1097 default:
1098 if (dispatch_other_io(blkif, &req, pending_req))
1099 goto done;
1100 break;
1103 /* Yield point for this unbounded loop. */
1104 cond_resched();
1106 done:
1107 return more_to_do;
1110 static int
1111 do_block_io_op(struct xen_blkif *blkif)
1113 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1114 int more_to_do;
1116 do {
1117 more_to_do = __do_block_io_op(blkif);
1118 if (more_to_do)
1119 break;
1121 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1122 } while (more_to_do);
1124 return more_to_do;
1127 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1128 * and call the 'submit_bio' to pass it to the underlying storage.
1130 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1131 struct blkif_request *req,
1132 struct pending_req *pending_req)
1134 struct phys_req preq;
1135 struct seg_buf *seg = pending_req->seg;
1136 unsigned int nseg;
1137 struct bio *bio = NULL;
1138 struct bio **biolist = pending_req->biolist;
1139 int i, nbio = 0;
1140 int operation;
1141 struct blk_plug plug;
1142 bool drain = false;
1143 struct grant_page **pages = pending_req->segments;
1144 unsigned short req_operation;
1146 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1147 req->u.indirect.indirect_op : req->operation;
1148 if ((req->operation == BLKIF_OP_INDIRECT) &&
1149 (req_operation != BLKIF_OP_READ) &&
1150 (req_operation != BLKIF_OP_WRITE)) {
1151 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1152 req_operation);
1153 goto fail_response;
1156 switch (req_operation) {
1157 case BLKIF_OP_READ:
1158 blkif->st_rd_req++;
1159 operation = READ;
1160 break;
1161 case BLKIF_OP_WRITE:
1162 blkif->st_wr_req++;
1163 operation = WRITE_ODIRECT;
1164 break;
1165 case BLKIF_OP_WRITE_BARRIER:
1166 drain = true;
1167 case BLKIF_OP_FLUSH_DISKCACHE:
1168 blkif->st_f_req++;
1169 operation = WRITE_FLUSH;
1170 break;
1171 default:
1172 operation = 0; /* make gcc happy */
1173 goto fail_response;
1174 break;
1177 /* Check that the number of segments is sane. */
1178 nseg = req->operation == BLKIF_OP_INDIRECT ?
1179 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1181 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1182 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1183 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1184 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1185 (nseg > MAX_INDIRECT_SEGMENTS))) {
1186 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
1187 nseg);
1188 /* Haven't submitted any bio's yet. */
1189 goto fail_response;
1192 preq.nr_sects = 0;
1194 pending_req->blkif = blkif;
1195 pending_req->id = req->u.rw.id;
1196 pending_req->operation = req_operation;
1197 pending_req->status = BLKIF_RSP_OKAY;
1198 pending_req->nr_pages = nseg;
1200 if (req->operation != BLKIF_OP_INDIRECT) {
1201 preq.dev = req->u.rw.handle;
1202 preq.sector_number = req->u.rw.sector_number;
1203 for (i = 0; i < nseg; i++) {
1204 pages[i]->gref = req->u.rw.seg[i].gref;
1205 seg[i].nsec = req->u.rw.seg[i].last_sect -
1206 req->u.rw.seg[i].first_sect + 1;
1207 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1208 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1209 (req->u.rw.seg[i].last_sect <
1210 req->u.rw.seg[i].first_sect))
1211 goto fail_response;
1212 preq.nr_sects += seg[i].nsec;
1214 } else {
1215 preq.dev = req->u.indirect.handle;
1216 preq.sector_number = req->u.indirect.sector_number;
1217 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1218 goto fail_response;
1221 if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1222 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
1223 operation == READ ? "read" : "write",
1224 preq.sector_number,
1225 preq.sector_number + preq.nr_sects,
1226 blkif->vbd.pdevice);
1227 goto fail_response;
1231 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1232 * is set there.
1234 for (i = 0; i < nseg; i++) {
1235 if (((int)preq.sector_number|(int)seg[i].nsec) &
1236 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1237 pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
1238 blkif->domid);
1239 goto fail_response;
1243 /* Wait on all outstanding I/O's and once that has been completed
1244 * issue the WRITE_FLUSH.
1246 if (drain)
1247 xen_blk_drain_io(pending_req->blkif);
1250 * If we have failed at this point, we need to undo the M2P override,
1251 * set gnttab_set_unmap_op on all of the grant references and perform
1252 * the hypercall to unmap the grants - that is all done in
1253 * xen_blkbk_unmap.
1255 if (xen_blkbk_map_seg(pending_req))
1256 goto fail_flush;
1259 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1260 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1262 xen_blkif_get(blkif);
1263 atomic_inc(&blkif->inflight);
1265 for (i = 0; i < nseg; i++) {
1266 while ((bio == NULL) ||
1267 (bio_add_page(bio,
1268 pages[i]->page,
1269 seg[i].nsec << 9,
1270 seg[i].offset) == 0)) {
1272 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1273 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1274 if (unlikely(bio == NULL))
1275 goto fail_put_bio;
1277 biolist[nbio++] = bio;
1278 bio->bi_bdev = preq.bdev;
1279 bio->bi_private = pending_req;
1280 bio->bi_end_io = end_block_io_op;
1281 bio->bi_iter.bi_sector = preq.sector_number;
1284 preq.sector_number += seg[i].nsec;
1287 /* This will be hit if the operation was a flush or discard. */
1288 if (!bio) {
1289 BUG_ON(operation != WRITE_FLUSH);
1291 bio = bio_alloc(GFP_KERNEL, 0);
1292 if (unlikely(bio == NULL))
1293 goto fail_put_bio;
1295 biolist[nbio++] = bio;
1296 bio->bi_bdev = preq.bdev;
1297 bio->bi_private = pending_req;
1298 bio->bi_end_io = end_block_io_op;
1301 atomic_set(&pending_req->pendcnt, nbio);
1302 blk_start_plug(&plug);
1304 for (i = 0; i < nbio; i++)
1305 submit_bio(operation, biolist[i]);
1307 /* Let the I/Os go.. */
1308 blk_finish_plug(&plug);
1310 if (operation == READ)
1311 blkif->st_rd_sect += preq.nr_sects;
1312 else if (operation & WRITE)
1313 blkif->st_wr_sect += preq.nr_sects;
1315 return 0;
1317 fail_flush:
1318 xen_blkbk_unmap(blkif, pending_req->segments,
1319 pending_req->nr_pages);
1320 fail_response:
1321 /* Haven't submitted any bio's yet. */
1322 make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1323 free_req(blkif, pending_req);
1324 msleep(1); /* back off a bit */
1325 return -EIO;
1327 fail_put_bio:
1328 for (i = 0; i < nbio; i++)
1329 bio_put(biolist[i]);
1330 atomic_set(&pending_req->pendcnt, 1);
1331 __end_block_io_op(pending_req, -EINVAL);
1332 msleep(1); /* back off a bit */
1333 return -EIO;
1339 * Put a response on the ring on how the operation fared.
1341 static void make_response(struct xen_blkif *blkif, u64 id,
1342 unsigned short op, int st)
1344 struct blkif_response resp;
1345 unsigned long flags;
1346 union blkif_back_rings *blk_rings = &blkif->blk_rings;
1347 int notify;
1349 resp.id = id;
1350 resp.operation = op;
1351 resp.status = st;
1353 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1354 /* Place on the response ring for the relevant domain. */
1355 switch (blkif->blk_protocol) {
1356 case BLKIF_PROTOCOL_NATIVE:
1357 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1358 &resp, sizeof(resp));
1359 break;
1360 case BLKIF_PROTOCOL_X86_32:
1361 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1362 &resp, sizeof(resp));
1363 break;
1364 case BLKIF_PROTOCOL_X86_64:
1365 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1366 &resp, sizeof(resp));
1367 break;
1368 default:
1369 BUG();
1371 blk_rings->common.rsp_prod_pvt++;
1372 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1373 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1374 if (notify)
1375 notify_remote_via_irq(blkif->irq);
1378 static int __init xen_blkif_init(void)
1380 int rc = 0;
1382 if (!xen_domain())
1383 return -ENODEV;
1385 rc = xen_blkif_interface_init();
1386 if (rc)
1387 goto failed_init;
1389 rc = xen_blkif_xenbus_init();
1390 if (rc)
1391 goto failed_init;
1393 failed_init:
1394 return rc;
1397 module_init(xen_blkif_init);
1399 MODULE_LICENSE("Dual BSD/GPL");
1400 MODULE_ALIAS("xen-backend:vbd");