1 // SPDX-License-Identifier: GPL-2.0
3 * Xen hypercall batching.
5 * Xen allows multiple hypercalls to be issued at once, using the
6 * multicall interface. This allows the cost of trapping into the
7 * hypervisor to be amortized over several calls.
9 * This file implements a simple interface for multicalls. There's a
10 * per-cpu buffer of outstanding multicalls. When you want to queue a
11 * multicall for issuing, you can allocate a multicall slot for the
12 * call and its arguments, along with storage for space which is
13 * pointed to by the arguments (for passing pointers to structures,
14 * etc). When the multicall is actually issued, all the space for the
15 * commands and allocated memory is freed for reuse.
17 * Multicalls are flushed whenever any of the buffers get full, or
18 * when explicitly requested. There's no way to get per-multicall
19 * return results back. It will BUG if any of the multicalls fail.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
23 #include <linux/percpu.h>
24 #include <linux/hardirq.h>
25 #include <linux/debugfs.h>
27 #include <asm/xen/hypercall.h>
29 #include "multicalls.h"
36 #define MC_ARGS (MC_BATCH * 16)
40 unsigned mcidx
, argidx
, cbidx
;
41 struct multicall_entry entries
[MC_BATCH
];
43 struct multicall_entry debug
[MC_BATCH
];
44 void *caller
[MC_BATCH
];
46 unsigned char args
[MC_ARGS
];
50 } callbacks
[MC_BATCH
];
53 static DEFINE_PER_CPU(struct mc_buffer
, mc_buffer
);
54 DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags
);
56 void xen_mc_flush(void)
58 struct mc_buffer
*b
= this_cpu_ptr(&mc_buffer
);
59 struct multicall_entry
*mc
;
64 BUG_ON(preemptible());
66 /* Disable interrupts in case someone comes in and queues
67 something in the middle */
68 local_irq_save(flags
);
70 trace_xen_mc_flush(b
->mcidx
, b
->argidx
, b
->cbidx
);
73 memcpy(b
->debug
, b
->entries
,
74 b
->mcidx
* sizeof(struct multicall_entry
));
80 BUG_ON(b
->argidx
!= 0);
84 /* Singleton multicall - bypass multicall machinery
85 and just do the call directly. */
88 mc
->result
= xen_single_call(mc
->op
, mc
->args
[0], mc
->args
[1],
89 mc
->args
[2], mc
->args
[3],
95 if (HYPERVISOR_multicall(b
->entries
, b
->mcidx
) != 0)
97 for (i
= 0; i
< b
->mcidx
; i
++)
98 if (b
->entries
[i
].result
< 0)
103 pr_err("%d of %d multicall(s) failed: cpu %d\n",
104 ret
, b
->mcidx
, smp_processor_id());
105 for (i
= 0; i
< b
->mcidx
; i
++) {
106 if (b
->entries
[i
].result
< 0) {
108 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
112 b
->entries
[i
].result
,
115 pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
118 b
->entries
[i
].args
[0],
119 b
->entries
[i
].result
);
128 for (i
= 0; i
< b
->cbidx
; i
++) {
129 struct callback
*cb
= &b
->callbacks
[i
];
135 local_irq_restore(flags
);
138 struct multicall_space
__xen_mc_entry(size_t args
)
140 struct mc_buffer
*b
= this_cpu_ptr(&mc_buffer
);
141 struct multicall_space ret
;
142 unsigned argidx
= roundup(b
->argidx
, sizeof(u64
));
144 trace_xen_mc_entry_alloc(args
);
146 BUG_ON(preemptible());
147 BUG_ON(b
->argidx
>= MC_ARGS
);
149 if (unlikely(b
->mcidx
== MC_BATCH
||
150 (argidx
+ args
) >= MC_ARGS
)) {
151 trace_xen_mc_flush_reason((b
->mcidx
== MC_BATCH
) ?
152 XEN_MC_FL_BATCH
: XEN_MC_FL_ARGS
);
154 argidx
= roundup(b
->argidx
, sizeof(u64
));
157 ret
.mc
= &b
->entries
[b
->mcidx
];
159 b
->caller
[b
->mcidx
] = __builtin_return_address(0);
162 ret
.args
= &b
->args
[argidx
];
163 b
->argidx
= argidx
+ args
;
165 BUG_ON(b
->argidx
>= MC_ARGS
);
169 struct multicall_space
xen_mc_extend_args(unsigned long op
, size_t size
)
171 struct mc_buffer
*b
= this_cpu_ptr(&mc_buffer
);
172 struct multicall_space ret
= { NULL
, NULL
};
174 BUG_ON(preemptible());
175 BUG_ON(b
->argidx
>= MC_ARGS
);
177 if (unlikely(b
->mcidx
== 0 ||
178 b
->entries
[b
->mcidx
- 1].op
!= op
)) {
179 trace_xen_mc_extend_args(op
, size
, XEN_MC_XE_BAD_OP
);
183 if (unlikely((b
->argidx
+ size
) >= MC_ARGS
)) {
184 trace_xen_mc_extend_args(op
, size
, XEN_MC_XE_NO_SPACE
);
188 ret
.mc
= &b
->entries
[b
->mcidx
- 1];
189 ret
.args
= &b
->args
[b
->argidx
];
192 BUG_ON(b
->argidx
>= MC_ARGS
);
194 trace_xen_mc_extend_args(op
, size
, XEN_MC_XE_OK
);
199 void xen_mc_callback(void (*fn
)(void *), void *data
)
201 struct mc_buffer
*b
= this_cpu_ptr(&mc_buffer
);
204 if (b
->cbidx
== MC_BATCH
) {
205 trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK
);
209 trace_xen_mc_callback(fn
, data
);
211 cb
= &b
->callbacks
[b
->cbidx
++];