2 * Xen hypercall batching.
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface. This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
8 * This file implements a simple interface for multicalls. There's a
9 * per-cpu buffer of outstanding multicalls. When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc). When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested. There's no way to get per-multicall
18 * return results back. It will BUG if any of the multicalls fail.
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 #include <linux/percpu.h>
23 #include <linux/hardirq.h>
25 #include <asm/xen/hypercall.h>
27 #include "multicalls.h"
32 #define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
35 struct multicall_entry entries
[MC_BATCH
];
37 struct multicall_entry debug
[MC_BATCH
];
43 } callbacks
[MC_BATCH
];
44 unsigned mcidx
, argidx
, cbidx
;
47 static DEFINE_PER_CPU(struct mc_buffer
, mc_buffer
);
48 DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags
);
50 void xen_mc_flush(void)
52 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
57 BUG_ON(preemptible());
59 /* Disable interrupts in case someone comes in and queues
60 something in the middle */
61 local_irq_save(flags
);
65 memcpy(b
->debug
, b
->entries
,
66 b
->mcidx
* sizeof(struct multicall_entry
));
69 if (HYPERVISOR_multicall(b
->entries
, b
->mcidx
) != 0)
71 for (i
= 0; i
< b
->mcidx
; i
++)
72 if (b
->entries
[i
].result
< 0)
77 printk(KERN_ERR
"%d multicall(s) failed: cpu %d\n",
78 ret
, smp_processor_id());
79 for (i
= 0; i
< b
->mcidx
; i
++) {
80 printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
84 b
->entries
[i
].result
);
92 BUG_ON(b
->argidx
!= 0);
94 local_irq_restore(flags
);
96 for (i
= 0; i
< b
->cbidx
; i
++) {
97 struct callback
*cb
= &b
->callbacks
[i
];
106 struct multicall_space
__xen_mc_entry(size_t args
)
108 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
109 struct multicall_space ret
;
110 unsigned argspace
= (args
+ sizeof(u64
) - 1) / sizeof(u64
);
112 BUG_ON(preemptible());
113 BUG_ON(argspace
> MC_ARGS
);
115 if (b
->mcidx
== MC_BATCH
||
116 (b
->argidx
+ argspace
) > MC_ARGS
)
119 ret
.mc
= &b
->entries
[b
->mcidx
];
121 ret
.args
= &b
->args
[b
->argidx
];
122 b
->argidx
+= argspace
;
127 void xen_mc_callback(void (*fn
)(void *), void *data
)
129 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
132 if (b
->cbidx
== MC_BATCH
)
135 cb
= &b
->callbacks
[b
->cbidx
++];