fed up with those stupid warnings
[mmotm.git] / arch / x86 / xen / multicalls.c
blob8bff7e7c290b7d30ef805061115690e7267dc4a9
1 /*
2 * Xen hypercall batching.
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface. This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
8 * This file implements a simple interface for multicalls. There's a
9 * per-cpu buffer of outstanding multicalls. When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc). When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested. There's no way to get per-multicall
18 * return results back. It will BUG if any of the multicalls fail.
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 #include <linux/percpu.h>
23 #include <linux/hardirq.h>
24 #include <linux/debugfs.h>
26 #include <asm/xen/hypercall.h>
28 #include "multicalls.h"
29 #include "debugfs.h"
31 #define MC_BATCH 32
33 #define MC_DEBUG 1
35 #define MC_ARGS (MC_BATCH * 16)
38 struct mc_buffer {
39 struct multicall_entry entries[MC_BATCH];
40 #if MC_DEBUG
41 struct multicall_entry debug[MC_BATCH];
42 void *caller[MC_BATCH];
43 #endif
44 unsigned char args[MC_ARGS];
45 struct callback {
46 void (*fn)(void *);
47 void *data;
48 } callbacks[MC_BATCH];
49 unsigned mcidx, argidx, cbidx;
52 static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
53 DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
55 /* flush reasons 0- slots, 1- args, 2- callbacks */
56 enum flush_reasons
58 FL_SLOTS,
59 FL_ARGS,
60 FL_CALLBACKS,
62 FL_N_REASONS
65 #ifdef CONFIG_XEN_DEBUG_FS
66 #define NHYPERCALLS 40 /* not really */
68 static struct {
69 unsigned histo[MC_BATCH+1];
71 unsigned issued;
72 unsigned arg_total;
73 unsigned hypercalls;
74 unsigned histo_hypercalls[NHYPERCALLS];
76 unsigned flush[FL_N_REASONS];
77 } mc_stats;
79 static u8 zero_stats;
81 static inline void check_zero(void)
83 if (unlikely(zero_stats)) {
84 memset(&mc_stats, 0, sizeof(mc_stats));
85 zero_stats = 0;
89 static void mc_add_stats(const struct mc_buffer *mc)
91 int i;
93 check_zero();
95 mc_stats.issued++;
96 mc_stats.hypercalls += mc->mcidx;
97 mc_stats.arg_total += mc->argidx;
99 mc_stats.histo[mc->mcidx]++;
100 for(i = 0; i < mc->mcidx; i++) {
101 unsigned op = mc->entries[i].op;
102 if (op < NHYPERCALLS)
103 mc_stats.histo_hypercalls[op]++;
107 static void mc_stats_flush(enum flush_reasons idx)
109 check_zero();
111 mc_stats.flush[idx]++;
114 #else /* !CONFIG_XEN_DEBUG_FS */
116 static inline void mc_add_stats(const struct mc_buffer *mc)
120 static inline void mc_stats_flush(enum flush_reasons idx)
123 #endif /* CONFIG_XEN_DEBUG_FS */
125 void xen_mc_flush(void)
127 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
128 int ret = 0;
129 unsigned long flags;
130 int i;
132 BUG_ON(preemptible());
134 /* Disable interrupts in case someone comes in and queues
135 something in the middle */
136 local_irq_save(flags);
138 mc_add_stats(b);
140 if (b->mcidx) {
141 #if MC_DEBUG
142 memcpy(b->debug, b->entries,
143 b->mcidx * sizeof(struct multicall_entry));
144 #endif
146 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
147 BUG();
148 for (i = 0; i < b->mcidx; i++)
149 if (b->entries[i].result < 0)
150 ret++;
152 #if MC_DEBUG
153 if (ret) {
154 printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
155 ret, smp_processor_id());
156 dump_stack();
157 for (i = 0; i < b->mcidx; i++) {
158 printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
159 i+1, b->mcidx,
160 b->debug[i].op,
161 b->debug[i].args[0],
162 b->entries[i].result,
163 b->caller[i]);
166 #endif
168 b->mcidx = 0;
169 b->argidx = 0;
170 } else
171 BUG_ON(b->argidx != 0);
173 for (i = 0; i < b->cbidx; i++) {
174 struct callback *cb = &b->callbacks[i];
176 (*cb->fn)(cb->data);
178 b->cbidx = 0;
180 local_irq_restore(flags);
182 WARN_ON(ret);
185 struct multicall_space __xen_mc_entry(size_t args)
187 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
188 struct multicall_space ret;
189 unsigned argidx = roundup(b->argidx, sizeof(u64));
191 BUG_ON(preemptible());
192 BUG_ON(b->argidx > MC_ARGS);
194 if (b->mcidx == MC_BATCH ||
195 (argidx + args) > MC_ARGS) {
196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
197 xen_mc_flush();
198 argidx = roundup(b->argidx, sizeof(u64));
201 ret.mc = &b->entries[b->mcidx];
202 #ifdef MC_DEBUG
203 b->caller[b->mcidx] = __builtin_return_address(0);
204 #endif
205 b->mcidx++;
206 ret.args = &b->args[argidx];
207 b->argidx = argidx + args;
209 BUG_ON(b->argidx > MC_ARGS);
210 return ret;
213 struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
215 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
216 struct multicall_space ret = { NULL, NULL };
218 BUG_ON(preemptible());
219 BUG_ON(b->argidx > MC_ARGS);
221 if (b->mcidx == 0)
222 return ret;
224 if (b->entries[b->mcidx - 1].op != op)
225 return ret;
227 if ((b->argidx + size) > MC_ARGS)
228 return ret;
230 ret.mc = &b->entries[b->mcidx - 1];
231 ret.args = &b->args[b->argidx];
232 b->argidx += size;
234 BUG_ON(b->argidx > MC_ARGS);
235 return ret;
238 void xen_mc_callback(void (*fn)(void *), void *data)
240 struct mc_buffer *b = &__get_cpu_var(mc_buffer);
241 struct callback *cb;
243 if (b->cbidx == MC_BATCH) {
244 mc_stats_flush(FL_CALLBACKS);
245 xen_mc_flush();
248 cb = &b->callbacks[b->cbidx++];
249 cb->fn = fn;
250 cb->data = data;
253 #ifdef CONFIG_XEN_DEBUG_FS
255 static struct dentry *d_mc_debug;
257 static int __init xen_mc_debugfs(void)
259 struct dentry *d_xen = xen_init_debugfs();
261 if (d_xen == NULL)
262 return -ENOMEM;
264 d_mc_debug = debugfs_create_dir("multicalls", d_xen);
266 debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats);
268 debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued);
269 debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls);
270 debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total);
272 xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug,
273 mc_stats.histo, MC_BATCH);
274 xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug,
275 mc_stats.histo_hypercalls, NHYPERCALLS);
276 xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug,
277 mc_stats.flush, FL_N_REASONS);
279 return 0;
281 fs_initcall(xen_mc_debugfs);
283 #endif /* CONFIG_XEN_DEBUG_FS */