Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / xen / events / events_2l.c
blobda87f3a1e351b0bb2794b11aa900dc540d09351b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xen event channels (2-level ABI)
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
8 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
10 #include <linux/linkage.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
14 #include <asm/sync_bitops.h>
15 #include <asm/xen/hypercall.h>
16 #include <asm/xen/hypervisor.h>
18 #include <xen/xen.h>
19 #include <xen/xen-ops.h>
20 #include <xen/events.h>
21 #include <xen/interface/xen.h>
22 #include <xen/interface/event_channel.h>
24 #include "events_internal.h"
27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
28 * careful to only use bitops which allow for this (e.g
29 * test_bit/find_first_bit and friends but not __ffs) and to pass
30 * BITS_PER_EVTCHN_WORD as the bitmask length.
32 #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
35 * array. Primarily to avoid long lines (hence the terse name).
37 #define BM(x) (unsigned long *)(x)
38 /* Find the first set bit in a evtchn mask */
39 #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
41 #define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
43 static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
45 static unsigned evtchn_2l_max_channels(void)
47 return EVTCHN_2L_NR_CHANNELS;
50 static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
51 unsigned int old_cpu)
53 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu)));
54 set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
57 static void evtchn_2l_clear_pending(evtchn_port_t port)
59 struct shared_info *s = HYPERVISOR_shared_info;
60 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
63 static void evtchn_2l_set_pending(evtchn_port_t port)
65 struct shared_info *s = HYPERVISOR_shared_info;
66 sync_set_bit(port, BM(&s->evtchn_pending[0]));
69 static bool evtchn_2l_is_pending(evtchn_port_t port)
71 struct shared_info *s = HYPERVISOR_shared_info;
72 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
75 static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
77 struct shared_info *s = HYPERVISOR_shared_info;
78 return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
81 static void evtchn_2l_mask(evtchn_port_t port)
83 struct shared_info *s = HYPERVISOR_shared_info;
84 sync_set_bit(port, BM(&s->evtchn_mask[0]));
87 static void evtchn_2l_unmask(evtchn_port_t port)
89 struct shared_info *s = HYPERVISOR_shared_info;
90 unsigned int cpu = get_cpu();
91 int do_hypercall = 0, evtchn_pending = 0;
93 BUG_ON(!irqs_disabled());
95 smp_wmb(); /* All writes before unmask must be visible. */
97 if (unlikely((cpu != cpu_from_evtchn(port))))
98 do_hypercall = 1;
99 else {
101 * Need to clear the mask before checking pending to
102 * avoid a race with an event becoming pending.
104 * EVTCHNOP_unmask will only trigger an upcall if the
105 * mask bit was set, so if a hypercall is needed
106 * remask the event.
108 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
109 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
111 if (unlikely(evtchn_pending && xen_hvm_domain())) {
112 sync_set_bit(port, BM(&s->evtchn_mask[0]));
113 do_hypercall = 1;
117 /* Slow path (hypercall) if this is a non-local port or if this is
118 * an hvm domain and an event is pending (hvm domains don't have
119 * their own implementation of irq_enable). */
120 if (do_hypercall) {
121 struct evtchn_unmask unmask = { .port = port };
122 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
123 } else {
124 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
127 * The following is basically the equivalent of
128 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
129 * the interrupt edge' if the channel is masked.
131 if (evtchn_pending &&
132 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
133 BM(&vcpu_info->evtchn_pending_sel)))
134 vcpu_info->evtchn_upcall_pending = 1;
137 put_cpu();
140 static DEFINE_PER_CPU(unsigned int, current_word_idx);
141 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
144 * Mask out the i least significant bits of w
146 #define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
148 static inline xen_ulong_t active_evtchns(unsigned int cpu,
149 struct shared_info *sh,
150 unsigned int idx)
152 return sh->evtchn_pending[idx] &
153 per_cpu(cpu_evtchn_mask, cpu)[idx] &
154 ~sh->evtchn_mask[idx];
158 * Search the CPU's pending events bitmasks. For each one found, map
159 * the event number to an irq, and feed it into do_IRQ() for handling.
161 * Xen uses a two-level bitmap to speed searching. The first level is
162 * a bitset of words which contain pending event bits. The second
163 * level is a bitset of pending events themselves.
165 static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
167 int irq;
168 xen_ulong_t pending_words;
169 xen_ulong_t pending_bits;
170 int start_word_idx, start_bit_idx;
171 int word_idx, bit_idx;
172 int i;
173 struct shared_info *s = HYPERVISOR_shared_info;
174 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
176 /* Timer interrupt has highest priority. */
177 irq = irq_from_virq(cpu, VIRQ_TIMER);
178 if (irq != -1) {
179 evtchn_port_t evtchn = evtchn_from_irq(irq);
180 word_idx = evtchn / BITS_PER_LONG;
181 bit_idx = evtchn % BITS_PER_LONG;
182 if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
183 generic_handle_irq(irq);
187 * Master flag must be cleared /before/ clearing
188 * selector flag. xchg_xen_ulong must contain an
189 * appropriate barrier.
191 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
193 start_word_idx = __this_cpu_read(current_word_idx);
194 start_bit_idx = __this_cpu_read(current_bit_idx);
196 word_idx = start_word_idx;
198 for (i = 0; pending_words != 0; i++) {
199 xen_ulong_t words;
201 words = MASK_LSBS(pending_words, word_idx);
204 * If we masked out all events, wrap to beginning.
206 if (words == 0) {
207 word_idx = 0;
208 bit_idx = 0;
209 continue;
211 word_idx = EVTCHN_FIRST_BIT(words);
213 pending_bits = active_evtchns(cpu, s, word_idx);
214 bit_idx = 0; /* usually scan entire word from start */
216 * We scan the starting word in two parts.
218 * 1st time: start in the middle, scanning the
219 * upper bits.
221 * 2nd time: scan the whole word (not just the
222 * parts skipped in the first pass) -- if an
223 * event in the previously scanned bits is
224 * pending again it would just be scanned on
225 * the next loop anyway.
227 if (word_idx == start_word_idx) {
228 if (i == 0)
229 bit_idx = start_bit_idx;
232 do {
233 xen_ulong_t bits;
234 evtchn_port_t port;
236 bits = MASK_LSBS(pending_bits, bit_idx);
238 /* If we masked out all events, move on. */
239 if (bits == 0)
240 break;
242 bit_idx = EVTCHN_FIRST_BIT(bits);
244 /* Process port. */
245 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
246 handle_irq_for_port(port, ctrl);
248 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
250 /* Next caller starts at last processed + 1 */
251 __this_cpu_write(current_word_idx,
252 bit_idx ? word_idx :
253 (word_idx+1) % BITS_PER_EVTCHN_WORD);
254 __this_cpu_write(current_bit_idx, bit_idx);
255 } while (bit_idx != 0);
257 /* Scan start_l1i twice; all others once. */
258 if ((word_idx != start_word_idx) || (i != 0))
259 pending_words &= ~(1UL << word_idx);
261 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
265 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
267 struct shared_info *sh = HYPERVISOR_shared_info;
268 int cpu = smp_processor_id();
269 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
270 int i;
271 unsigned long flags;
272 static DEFINE_SPINLOCK(debug_lock);
273 struct vcpu_info *v;
275 spin_lock_irqsave(&debug_lock, flags);
277 printk("\nvcpu %d\n ", cpu);
279 for_each_online_cpu(i) {
280 int pending;
281 v = per_cpu(xen_vcpu, i);
282 pending = (get_irq_regs() && i == cpu)
283 ? xen_irqs_disabled(get_irq_regs())
284 : v->evtchn_upcall_mask;
285 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
286 pending, v->evtchn_upcall_pending,
287 (int)(sizeof(v->evtchn_pending_sel)*2),
288 v->evtchn_pending_sel);
290 v = per_cpu(xen_vcpu, cpu);
292 printk("\npending:\n ");
293 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
294 printk("%0*"PRI_xen_ulong"%s",
295 (int)sizeof(sh->evtchn_pending[0])*2,
296 sh->evtchn_pending[i],
297 i % 8 == 0 ? "\n " : " ");
298 printk("\nglobal mask:\n ");
299 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
300 printk("%0*"PRI_xen_ulong"%s",
301 (int)(sizeof(sh->evtchn_mask[0])*2),
302 sh->evtchn_mask[i],
303 i % 8 == 0 ? "\n " : " ");
305 printk("\nglobally unmasked:\n ");
306 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
307 printk("%0*"PRI_xen_ulong"%s",
308 (int)(sizeof(sh->evtchn_mask[0])*2),
309 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
310 i % 8 == 0 ? "\n " : " ");
312 printk("\nlocal cpu%d mask:\n ", cpu);
313 for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
314 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
315 cpu_evtchn[i],
316 i % 8 == 0 ? "\n " : " ");
318 printk("\nlocally unmasked:\n ");
319 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
320 xen_ulong_t pending = sh->evtchn_pending[i]
321 & ~sh->evtchn_mask[i]
322 & cpu_evtchn[i];
323 printk("%0*"PRI_xen_ulong"%s",
324 (int)(sizeof(sh->evtchn_mask[0])*2),
325 pending, i % 8 == 0 ? "\n " : " ");
328 printk("\npending list:\n");
329 for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
330 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
331 int word_idx = i / BITS_PER_EVTCHN_WORD;
332 printk(" %d: event %d -> irq %d%s%s%s\n",
333 cpu_from_evtchn(i), i,
334 get_evtchn_to_irq(i),
335 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
336 ? "" : " l2-clear",
337 !sync_test_bit(i, BM(sh->evtchn_mask))
338 ? "" : " globally-masked",
339 sync_test_bit(i, BM(cpu_evtchn))
340 ? "" : " locally-masked");
344 spin_unlock_irqrestore(&debug_lock, flags);
346 return IRQ_HANDLED;
349 static void evtchn_2l_resume(void)
351 int i;
353 for_each_online_cpu(i)
354 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
355 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
358 static const struct evtchn_ops evtchn_ops_2l = {
359 .max_channels = evtchn_2l_max_channels,
360 .nr_channels = evtchn_2l_max_channels,
361 .bind_to_cpu = evtchn_2l_bind_to_cpu,
362 .clear_pending = evtchn_2l_clear_pending,
363 .set_pending = evtchn_2l_set_pending,
364 .is_pending = evtchn_2l_is_pending,
365 .test_and_set_mask = evtchn_2l_test_and_set_mask,
366 .mask = evtchn_2l_mask,
367 .unmask = evtchn_2l_unmask,
368 .handle_events = evtchn_2l_handle_events,
369 .resume = evtchn_2l_resume,
372 void __init xen_evtchn_2l_init(void)
374 pr_info("Using 2-level ABI\n");
375 evtchn_ops = &evtchn_ops_2l;