No empty .Rs/.Re
[netbsd-mini2440.git] / sys / kern / subr_xcall.c
blobcfff0a52178ec0ce759cc57fb079720a6d9ce627
1 /* $NetBSD: subr_xcall.c,v 1.10 2009/03/05 13:18:51 uebayasi Exp $ */
3 /*-
4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Cross call support
35 * Background
37 * Sometimes it is necessary to modify hardware state that is tied
38 * directly to individual CPUs (such as a CPU's local timer), and
39 * these updates can not be done remotely by another CPU. The LWP
40 * requesting the update may be unable to guarantee that it will be
41 * running on the CPU where the update must occur, when the update
42 * occurs.
44 * Additionally, it's sometimes necessary to modify per-CPU software
45 * state from a remote CPU. Where these update operations are so
46 * rare or the access to the per-CPU data so frequent that the cost
47 * of using locking or atomic operations to provide coherency is
48 * prohibitive, another way must be found.
50 * Cross calls help to solve these types of problem by allowing
51 * any CPU in the system to request that an arbitrary function be
52 * executed on any other CPU.
54 * Implementation
56 * A slow mechanism for making 'low priority' cross calls is
57 * provided. The function to be executed runs on the remote CPU
58 * within a bound kthread. No queueing is provided, and the
59 * implementation uses global state. The function being called may
60 * block briefly on locks, but in doing so must be careful to not
61 * interfere with other cross calls in the system. The function is
62 * called with thread context and not from a soft interrupt, so it
63 * can ensure that it is not interrupting other code running on the
64 * CPU, and so has exclusive access to the CPU. Since this facility
65 * is heavyweight, it's expected that it will not be used often.
67 * Cross calls must not allocate memory, as the pagedaemon uses
68 * them (and memory allocation may need to wait on the pagedaemon).
70 * Future directions
72 * Add a low-overhead mechanism to run cross calls in interrupt
73 * context (XC_HIGHPRI).
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.10 2009/03/05 13:18:51 uebayasi Exp $");
79 #include <sys/types.h>
80 #include <sys/param.h>
81 #include <sys/xcall.h>
82 #include <sys/mutex.h>
83 #include <sys/condvar.h>
84 #include <sys/evcnt.h>
85 #include <sys/kthread.h>
86 #include <sys/cpu.h>
88 static void xc_thread(void *);
89 static uint64_t xc_lowpri(u_int, xcfunc_t, void *, void *, struct cpu_info *);
91 static kmutex_t xc_lock;
92 static xcfunc_t xc_func;
93 static void *xc_arg1;
94 static void *xc_arg2;
95 static kcondvar_t xc_busy;
96 static struct evcnt xc_unicast_ev;
97 static struct evcnt xc_broadcast_ev;
98 static uint64_t xc_headp;
99 static uint64_t xc_tailp;
100 static uint64_t xc_donep;
103 * xc_init_cpu:
105 * Initialize the cross-call subsystem. Called once for each CPU
106 * in the system as they are attached.
108 void
109 xc_init_cpu(struct cpu_info *ci)
111 static bool again = false;
112 int error;
114 if (!again) {
115 /* Autoconfiguration will prevent re-entry. */
116 again = true;
117 mutex_init(&xc_lock, MUTEX_DEFAULT, IPL_NONE);
118 cv_init(&xc_busy, "xcallbsy");
119 evcnt_attach_dynamic(&xc_unicast_ev, EVCNT_TYPE_MISC, NULL,
120 "crosscall", "unicast");
121 evcnt_attach_dynamic(&xc_broadcast_ev, EVCNT_TYPE_MISC, NULL,
122 "crosscall", "broadcast");
125 cv_init(&ci->ci_data.cpu_xcall, "xcall");
126 error = kthread_create(PRI_XCALL, KTHREAD_MPSAFE, ci, xc_thread,
127 NULL, NULL, "xcall/%u", ci->ci_index);
128 if (error != 0)
129 panic("xc_init_cpu: error %d", error);
133 * xc_broadcast:
135 * Trigger a call on all CPUs in the system.
137 uint64_t
138 xc_broadcast(u_int flags, xcfunc_t func, void *arg1, void *arg2)
141 if ((flags & XC_HIGHPRI) != 0) {
142 panic("xc_broadcast: no high priority crosscalls yet");
143 } else {
144 return xc_lowpri(flags, func, arg1, arg2, NULL);
149 * xc_unicast:
151 * Trigger a call on one CPU.
153 uint64_t
154 xc_unicast(u_int flags, xcfunc_t func, void *arg1, void *arg2,
155 struct cpu_info *ci)
158 if ((flags & XC_HIGHPRI) != 0) {
159 panic("xc_unicast: no high priority crosscalls yet");
160 } else {
161 KASSERT(ci != NULL);
162 return xc_lowpri(flags, func, arg1, arg2, ci);
167 * xc_lowpri:
169 * Trigger a low priority call on one or more CPUs.
171 static uint64_t
172 xc_lowpri(u_int flags, xcfunc_t func, void *arg1, void *arg2,
173 struct cpu_info *ci)
175 CPU_INFO_ITERATOR cii;
176 uint64_t where;
178 mutex_enter(&xc_lock);
179 while (xc_headp != xc_tailp)
180 cv_wait(&xc_busy, &xc_lock);
181 xc_arg1 = arg1;
182 xc_arg2 = arg2;
183 xc_func = func;
184 if (ci == NULL) {
185 xc_broadcast_ev.ev_count++;
186 for (CPU_INFO_FOREACH(cii, ci)) {
187 if ((ci->ci_schedstate.spc_flags & SPCF_RUNNING) == 0)
188 continue;
189 xc_headp += 1;
190 ci->ci_data.cpu_xcall_pending = true;
191 cv_signal(&ci->ci_data.cpu_xcall);
193 } else {
194 xc_unicast_ev.ev_count++;
195 xc_headp += 1;
196 ci->ci_data.cpu_xcall_pending = true;
197 cv_signal(&ci->ci_data.cpu_xcall);
199 KASSERT(xc_tailp < xc_headp);
200 where = xc_headp;
201 mutex_exit(&xc_lock);
203 return where;
207 * xc_wait:
209 * Wait for a cross call to complete.
211 void
212 xc_wait(uint64_t where)
215 if (xc_donep >= where)
216 return;
218 mutex_enter(&xc_lock);
219 while (xc_donep < where)
220 cv_wait(&xc_busy, &xc_lock);
221 mutex_exit(&xc_lock);
225 * xc_thread:
227 * One thread per-CPU to dispatch low priority calls.
229 static void
230 xc_thread(void *cookie)
232 void *arg1, *arg2;
233 struct cpu_info *ci;
234 xcfunc_t func;
236 ci = curcpu();
238 mutex_enter(&xc_lock);
239 for (;;) {
240 while (!ci->ci_data.cpu_xcall_pending) {
241 if (xc_headp == xc_tailp)
242 cv_broadcast(&xc_busy);
243 cv_wait(&ci->ci_data.cpu_xcall, &xc_lock);
244 KASSERT(ci == curcpu());
246 ci->ci_data.cpu_xcall_pending = false;
247 func = xc_func;
248 arg1 = xc_arg1;
249 arg2 = xc_arg2;
250 xc_tailp++;
251 mutex_exit(&xc_lock);
253 (*func)(arg1, arg2);
255 mutex_enter(&xc_lock);
256 xc_donep++;
258 /* NOTREACHED */