1 /* sun4v_ivec.S: Sun4v interrupt vector handling.
3 * Copyright (C) 2006 <davem@davemloft.net>
6 #include <asm/cpudata.h>
7 #include <asm/intr_queue.h>
14 /* Head offset in %g2, tail offset in %g4.
15 * If they are the same, no work.
17 mov INTRQ_CPU_MONDO_HEAD, %g2
18 ldxa [%g2] ASI_QUEUE, %g2
19 mov INTRQ_CPU_MONDO_TAIL, %g4
20 ldxa [%g4] ASI_QUEUE, %g4
22 be,pn %xcc, sun4v_cpu_mondo_queue_empty
25 /* Get &trap_block[smp_processor_id()] into %g4. */
26 ldxa [%g0] ASI_SCRATCHPAD, %g4
27 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
29 /* Get smp_processor_id() into %g3 */
30 sethi %hi(trap_block), %g5
31 or %g5, %lo(trap_block), %g5
33 srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
35 /* Increment cpu_mondo_counter[smp_processor_id()] */
36 sethi %hi(cpu_mondo_counter), %g5
37 or %g5, %lo(cpu_mondo_counter), %g5
44 /* Get CPU mondo queue base phys address into %g7. */
45 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
47 /* Now get the cross-call arguments and handler PC, same
50 * 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
51 * high half is context arg to MMU flushes, into %g5
52 * 2nd 64-bit word: 64-bit arg, load into %g1
53 * 3rd 64-bit word: 64-bit arg, load into %g7
55 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
58 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
61 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
62 add %g2, 0x40 - 0x8 - 0x8, %g2
64 /* Update queue head pointer. */
65 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
68 mov INTRQ_CPU_MONDO_HEAD, %g4
69 stxa %g2, [%g4] ASI_QUEUE
75 sun4v_cpu_mondo_queue_empty:
79 /* Head offset in %g2, tail offset in %g4. */
80 mov INTRQ_DEVICE_MONDO_HEAD, %g2
81 ldxa [%g2] ASI_QUEUE, %g2
82 mov INTRQ_DEVICE_MONDO_TAIL, %g4
83 ldxa [%g4] ASI_QUEUE, %g4
85 be,pn %xcc, sun4v_dev_mondo_queue_empty
88 /* Get &trap_block[smp_processor_id()] into %g4. */
89 ldxa [%g0] ASI_SCRATCHPAD, %g4
90 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
92 /* Get DEV mondo queue base phys address into %g5. */
93 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
95 /* Load IVEC into %g3. */
96 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
99 /* XXX There can be a full 64-byte block of data here.
100 * XXX This is how we can get at MSI vector data.
101 * XXX Current we do not capture this, but when we do we'll
102 * XXX need to add a 64-byte storage area in the struct ino_bucket
103 * XXX or the struct irq_desc.
106 /* Update queue head pointer, this frees up some registers. */
107 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
110 mov INTRQ_DEVICE_MONDO_HEAD, %g4
111 stxa %g2, [%g4] ASI_QUEUE
114 TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
116 /* For VIRQs, cookie is encoded as ~bucket_phys_addr */
120 /* Get __pa(&ivector_table[IVEC]) into %g4. */
121 sethi %hi(ivector_table_pa), %g4
122 ldx [%g4 + %lo(ivector_table_pa)], %g4
127 stxa %g2, [%g4] ASI_PHYS_USE_EC
130 /* Signal the interrupt by setting (1 << pil) in %softint. */
131 wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
133 sun4v_dev_mondo_queue_empty:
137 /* Head offset in %g2, tail offset in %g4. */
138 mov INTRQ_RESUM_MONDO_HEAD, %g2
139 ldxa [%g2] ASI_QUEUE, %g2
140 mov INTRQ_RESUM_MONDO_TAIL, %g4
141 ldxa [%g4] ASI_QUEUE, %g4
143 be,pn %xcc, sun4v_res_mondo_queue_empty
146 /* Get &trap_block[smp_processor_id()] into %g3. */
147 ldxa [%g0] ASI_SCRATCHPAD, %g3
148 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
150 /* Get RES mondo queue base phys address into %g5. */
151 ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
153 /* Get RES kernel buffer base phys address into %g7. */
154 ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
156 /* If the first word is non-zero, queue is full. */
157 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
158 brnz,pn %g1, sun4v_res_mondo_queue_full
161 lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
163 /* Remember this entry's offset in %g1. */
166 /* Copy 64-byte queue entry into kernel buffer. */
167 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
168 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
170 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
171 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
173 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
174 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
176 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
177 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
179 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
180 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
182 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
183 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
185 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
186 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
188 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
189 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
192 /* Update queue head pointer. */
195 mov INTRQ_RESUM_MONDO_HEAD, %g4
196 stxa %g2, [%g4] ASI_QUEUE
199 /* Disable interrupts and save register state so we can call
200 * C code. The etrap handling will leave %g4 in %l4 for us
204 wrpr %g0, PIL_NORMAL_MAX, %pil
206 ba,pt %xcc, etrap_irq
208 #ifdef CONFIG_TRACE_IRQFLAGS
209 call trace_hardirqs_off
213 add %sp, PTREGS_OFF, %o0
214 call sun4v_resum_error
217 /* Return from trap. */
218 ba,pt %xcc, rtrap_irq
221 sun4v_res_mondo_queue_empty:
224 sun4v_res_mondo_queue_full:
225 /* The queue is full, consolidate our damage by setting
226 * the head equal to the tail. We'll just trap again otherwise.
227 * Call C code to log the event.
229 mov INTRQ_RESUM_MONDO_HEAD, %g2
230 stxa %g4, [%g2] ASI_QUEUE
234 wrpr %g0, PIL_NORMAL_MAX, %pil
235 ba,pt %xcc, etrap_irq
237 #ifdef CONFIG_TRACE_IRQFLAGS
238 call trace_hardirqs_off
241 call sun4v_resum_overflow
242 add %sp, PTREGS_OFF, %o0
244 ba,pt %xcc, rtrap_irq
248 /* Head offset in %g2, tail offset in %g4. */
249 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
250 ldxa [%g2] ASI_QUEUE, %g2
251 mov INTRQ_NONRESUM_MONDO_TAIL, %g4
252 ldxa [%g4] ASI_QUEUE, %g4
254 be,pn %xcc, sun4v_nonres_mondo_queue_empty
257 /* Get &trap_block[smp_processor_id()] into %g3. */
258 ldxa [%g0] ASI_SCRATCHPAD, %g3
259 sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
261 /* Get RES mondo queue base phys address into %g5. */
262 ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
264 /* Get RES kernel buffer base phys address into %g7. */
265 ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
267 /* If the first word is non-zero, queue is full. */
268 ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
269 brnz,pn %g1, sun4v_nonres_mondo_queue_full
272 lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
274 /* Remember this entry's offset in %g1. */
277 /* Copy 64-byte queue entry into kernel buffer. */
278 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
279 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
281 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
282 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
284 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
285 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
287 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
288 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
290 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
291 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
293 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
294 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
296 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
297 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
299 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
300 stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
303 /* Update queue head pointer. */
306 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
307 stxa %g2, [%g4] ASI_QUEUE
310 /* Disable interrupts and save register state so we can call
311 * C code. The etrap handling will leave %g4 in %l4 for us
315 wrpr %g0, PIL_NORMAL_MAX, %pil
317 ba,pt %xcc, etrap_irq
319 #ifdef CONFIG_TRACE_IRQFLAGS
320 call trace_hardirqs_off
324 add %sp, PTREGS_OFF, %o0
325 call sun4v_nonresum_error
328 /* Return from trap. */
329 ba,pt %xcc, rtrap_irq
332 sun4v_nonres_mondo_queue_empty:
335 sun4v_nonres_mondo_queue_full:
336 /* The queue is full, consolidate our damage by setting
337 * the head equal to the tail. We'll just trap again otherwise.
338 * Call C code to log the event.
340 mov INTRQ_NONRESUM_MONDO_HEAD, %g2
341 stxa %g4, [%g2] ASI_QUEUE
345 wrpr %g0, PIL_NORMAL_MAX, %pil
346 ba,pt %xcc, etrap_irq
348 #ifdef CONFIG_TRACE_IRQFLAGS
349 call trace_hardirqs_off
352 call sun4v_nonresum_overflow
353 add %sp, PTREGS_OFF, %o0
355 ba,pt %xcc, rtrap_irq