2 * Xen hypercall batching.
4 * Xen allows multiple hypercalls to be issued at once, using the
5 * multicall interface. This allows the cost of trapping into the
6 * hypervisor to be amortized over several calls.
8 * This file implements a simple interface for multicalls. There's a
9 * per-cpu buffer of outstanding multicalls. When you want to queue a
10 * multicall for issuing, you can allocate a multicall slot for the
11 * call and its arguments, along with storage for space which is
12 * pointed to by the arguments (for passing pointers to structures,
13 * etc). When the multicall is actually issued, all the space for the
14 * commands and allocated memory is freed for reuse.
16 * Multicalls are flushed whenever any of the buffers get full, or
17 * when explicitly requested. There's no way to get per-multicall
18 * return results back. It will BUG if any of the multicalls fail.
20 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 #include <linux/percpu.h>
23 #include <linux/hardirq.h>
24 #include <linux/debugfs.h>
26 #include <asm/xen/hypercall.h>
28 #include "multicalls.h"
35 #define MC_ARGS (MC_BATCH * 16)
39 struct multicall_entry entries
[MC_BATCH
];
41 struct multicall_entry debug
[MC_BATCH
];
42 void *caller
[MC_BATCH
];
44 unsigned char args
[MC_ARGS
];
48 } callbacks
[MC_BATCH
];
49 unsigned mcidx
, argidx
, cbidx
;
52 static DEFINE_PER_CPU(struct mc_buffer
, mc_buffer
);
53 DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags
);
55 /* flush reasons 0- slots, 1- args, 2- callbacks */
65 #ifdef CONFIG_XEN_DEBUG_FS
66 #define NHYPERCALLS 40 /* not really */
69 unsigned histo
[MC_BATCH
+1];
74 unsigned histo_hypercalls
[NHYPERCALLS
];
76 unsigned flush
[FL_N_REASONS
];
81 static inline void check_zero(void)
83 if (unlikely(zero_stats
)) {
84 memset(&mc_stats
, 0, sizeof(mc_stats
));
89 static void mc_add_stats(const struct mc_buffer
*mc
)
96 mc_stats
.hypercalls
+= mc
->mcidx
;
97 mc_stats
.arg_total
+= mc
->argidx
;
99 mc_stats
.histo
[mc
->mcidx
]++;
100 for(i
= 0; i
< mc
->mcidx
; i
++) {
101 unsigned op
= mc
->entries
[i
].op
;
102 if (op
< NHYPERCALLS
)
103 mc_stats
.histo_hypercalls
[op
]++;
107 static void mc_stats_flush(enum flush_reasons idx
)
111 mc_stats
.flush
[idx
]++;
114 #else /* !CONFIG_XEN_DEBUG_FS */
116 static inline void mc_add_stats(const struct mc_buffer
*mc
)
120 static inline void mc_stats_flush(enum flush_reasons idx
)
123 #endif /* CONFIG_XEN_DEBUG_FS */
125 void xen_mc_flush(void)
127 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
132 BUG_ON(preemptible());
134 /* Disable interrupts in case someone comes in and queues
135 something in the middle */
136 local_irq_save(flags
);
142 memcpy(b
->debug
, b
->entries
,
143 b
->mcidx
* sizeof(struct multicall_entry
));
146 if (HYPERVISOR_multicall(b
->entries
, b
->mcidx
) != 0)
148 for (i
= 0; i
< b
->mcidx
; i
++)
149 if (b
->entries
[i
].result
< 0)
154 printk(KERN_ERR
"%d multicall(s) failed: cpu %d\n",
155 ret
, smp_processor_id());
157 for (i
= 0; i
< b
->mcidx
; i
++) {
158 printk(KERN_DEBUG
" call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
162 b
->entries
[i
].result
,
171 BUG_ON(b
->argidx
!= 0);
173 for (i
= 0; i
< b
->cbidx
; i
++) {
174 struct callback
*cb
= &b
->callbacks
[i
];
180 local_irq_restore(flags
);
185 struct multicall_space
__xen_mc_entry(size_t args
)
187 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
188 struct multicall_space ret
;
189 unsigned argidx
= roundup(b
->argidx
, sizeof(u64
));
191 BUG_ON(preemptible());
192 BUG_ON(b
->argidx
> MC_ARGS
);
194 if (b
->mcidx
== MC_BATCH
||
195 (argidx
+ args
) > MC_ARGS
) {
196 mc_stats_flush(b
->mcidx
== MC_BATCH
? FL_SLOTS
: FL_ARGS
);
198 argidx
= roundup(b
->argidx
, sizeof(u64
));
201 ret
.mc
= &b
->entries
[b
->mcidx
];
203 b
->caller
[b
->mcidx
] = __builtin_return_address(0);
206 ret
.args
= &b
->args
[argidx
];
207 b
->argidx
= argidx
+ args
;
209 BUG_ON(b
->argidx
> MC_ARGS
);
213 struct multicall_space
xen_mc_extend_args(unsigned long op
, size_t size
)
215 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
216 struct multicall_space ret
= { NULL
, NULL
};
218 BUG_ON(preemptible());
219 BUG_ON(b
->argidx
> MC_ARGS
);
224 if (b
->entries
[b
->mcidx
- 1].op
!= op
)
227 if ((b
->argidx
+ size
) > MC_ARGS
)
230 ret
.mc
= &b
->entries
[b
->mcidx
- 1];
231 ret
.args
= &b
->args
[b
->argidx
];
234 BUG_ON(b
->argidx
> MC_ARGS
);
238 void xen_mc_callback(void (*fn
)(void *), void *data
)
240 struct mc_buffer
*b
= &__get_cpu_var(mc_buffer
);
243 if (b
->cbidx
== MC_BATCH
) {
244 mc_stats_flush(FL_CALLBACKS
);
248 cb
= &b
->callbacks
[b
->cbidx
++];
253 #ifdef CONFIG_XEN_DEBUG_FS
255 static struct dentry
*d_mc_debug
;
257 static int __init
xen_mc_debugfs(void)
259 struct dentry
*d_xen
= xen_init_debugfs();
264 d_mc_debug
= debugfs_create_dir("multicalls", d_xen
);
266 debugfs_create_u8("zero_stats", 0644, d_mc_debug
, &zero_stats
);
268 debugfs_create_u32("batches", 0444, d_mc_debug
, &mc_stats
.issued
);
269 debugfs_create_u32("hypercalls", 0444, d_mc_debug
, &mc_stats
.hypercalls
);
270 debugfs_create_u32("arg_total", 0444, d_mc_debug
, &mc_stats
.arg_total
);
272 xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug
,
273 mc_stats
.histo
, MC_BATCH
);
274 xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug
,
275 mc_stats
.histo_hypercalls
, NHYPERCALLS
);
276 xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug
,
277 mc_stats
.flush
, FL_N_REASONS
);
281 fs_initcall(xen_mc_debugfs
);
283 #endif /* CONFIG_XEN_DEBUG_FS */