mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / ia64 / kernel / ivt.S
blobb1c3cfc93e715b54f485521ed25b36ca8af46b50
1 /*
2  * arch/ia64/kernel/ivt.S
3  *
4  * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
5  *      Stephane Eranian <eranian@hpl.hp.com>
6  *      David Mosberger <davidm@hpl.hp.com>
7  * Copyright (C) 2000, 2002-2003 Intel Co
8  *      Asit Mallick <asit.k.mallick@intel.com>
9  *      Suresh Siddha <suresh.b.siddha@intel.com>
10  *      Kenneth Chen <kenneth.w.chen@intel.com>
11  *      Fenghua Yu <fenghua.yu@intel.com>
12  *
13  * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
14  * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
15  *
16  * Copyright (C) 2005 Hewlett-Packard Co
17  *      Dan Magenheimer <dan.magenheimer@hp.com>
18  *      Xen paravirtualization
19  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
20  *                    VA Linux Systems Japan K.K.
21  *                    pv_ops.
22  *      Yaozu (Eddie) Dong <eddie.dong@intel.com>
23  */
25  * This file defines the interruption vector table used by the CPU.
26  * It does not include one entry per possible cause of interruption.
27  *
28  * The first 20 entries of the table contain 64 bundles each while the
29  * remaining 48 entries contain only 16 bundles each.
30  *
31  * The 64 bundles are used to allow inlining the whole handler for critical
32  * interruptions like TLB misses.
33  *
34  *  For each entry, the comment is as follows:
35  *
36  *              // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
37  *  entry offset ----/     /         /                  /          /
38  *  entry number ---------/         /                  /          /
39  *  size of the entry -------------/                  /          /
40  *  vector name -------------------------------------/          /
41  *  interruptions triggering this vector ----------------------/
42  *
43  * The table is 32KB in size and must be aligned on 32KB boundary.
44  * (The CPU ignores the 15 lower bits of the address)
45  *
46  * Table is based upon EAS2.6 (Oct 1999)
47  */
50 #include <asm/asmmacro.h>
51 #include <asm/break.h>
52 #include <asm/kregs.h>
53 #include <asm/asm-offsets.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
57 #include <asm/thread_info.h>
58 #include <asm/unistd.h>
59 #include <asm/errno.h>
61 #if 0
62 # define PSR_DEFAULT_BITS       psr.ac
63 #else
64 # define PSR_DEFAULT_BITS       0
65 #endif
67 #if 0
68   /*
69    * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
70    * needed for something else before enabling this...
71    */
72 # define DBG_FAULT(i)   mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
73 #else
74 # define DBG_FAULT(i)
75 #endif
77 #include "minstate.h"
79 #define FAULT(n)                                                                        \
80         mov r31=pr;                                                                     \
81         mov r19=n;;                     /* prepare to save predicates */                \
82         br.sptk.many dispatch_to_fault_handler
84         .section .text..ivt,"ax"
86         .align 32768    // align on 32KB boundary
87         .global ia64_ivt
88 ia64_ivt:
89 /////////////////////////////////////////////////////////////////////////////////////////
90 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
91 ENTRY(vhpt_miss)
92         DBG_FAULT(0)
93         /*
94          * The VHPT vector is invoked when the TLB entry for the virtual page table
95          * is missing.  This happens only as a result of a previous
96          * (the "original") TLB miss, which may either be caused by an instruction
97          * fetch or a data access (or non-access).
98          *
99          * What we do here is normal TLB miss handing for the _original_ miss,
100          * followed by inserting the TLB entry for the virtual page table page
101          * that the VHPT walker was attempting to access.  The latter gets
102          * inserted as long as page table entry above pte level have valid
103          * mappings for the faulting address.  The TLB entry for the original
104          * miss gets inserted only if the pte entry indicates that the page is
105          * present.
106          *
107          * do_page_fault gets invoked in the following cases:
108          *      - the faulting virtual address uses unimplemented address bits
109          *      - the faulting virtual address has no valid page table mapping
110          */
111         MOV_FROM_IFA(r16)                       // get address that caused the TLB miss
112 #ifdef CONFIG_HUGETLB_PAGE
113         movl r18=PAGE_SHIFT
114         MOV_FROM_ITIR(r25)
115 #endif
116         ;;
117         RSM_PSR_DT                              // use physical addressing for data
118         mov r31=pr                              // save the predicate registers
119         mov r19=IA64_KR(PT_BASE)                // get page table base address
120         shl r21=r16,3                           // shift bit 60 into sign bit
121         shr.u r17=r16,61                        // get the region number into r17
122         ;;
123         shr.u r22=r21,3
124 #ifdef CONFIG_HUGETLB_PAGE
125         extr.u r26=r25,2,6
126         ;;
127         cmp.ne p8,p0=r18,r26
128         sub r27=r26,r18
129         ;;
130 (p8)    dep r25=r18,r25,2,6
131 (p8)    shr r22=r22,r27
132 #endif
133         ;;
134         cmp.eq p6,p7=5,r17                      // is IFA pointing into to region 5?
135         shr.u r18=r22,PGDIR_SHIFT               // get bottom portion of pgd index bit
136         ;;
137 (p7)    dep r17=r17,r19,(PAGE_SHIFT-3),3        // put region number bits in place
139         srlz.d
140         LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at swapper_pg_dir
142         .pred.rel "mutex", p6, p7
143 (p6)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
144 (p7)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
145         ;;
146 (p6)    dep r17=r18,r19,3,(PAGE_SHIFT-3)        // r17=pgd_offset for region 5
147 (p7)    dep r17=r18,r17,3,(PAGE_SHIFT-6)        // r17=pgd_offset for region[0-4]
148         cmp.eq p7,p6=0,r21                      // unused address bits all zeroes?
149 #if CONFIG_PGTABLE_LEVELS == 4
150         shr.u r28=r22,PUD_SHIFT                 // shift pud index into position
151 #else
152         shr.u r18=r22,PMD_SHIFT                 // shift pmd index into position
153 #endif
154         ;;
155         ld8 r17=[r17]                           // get *pgd (may be 0)
156         ;;
157 (p7)    cmp.eq p6,p7=r17,r0                     // was pgd_present(*pgd) == NULL?
158 #if CONFIG_PGTABLE_LEVELS == 4
159         dep r28=r28,r17,3,(PAGE_SHIFT-3)        // r28=pud_offset(pgd,addr)
160         ;;
161         shr.u r18=r22,PMD_SHIFT                 // shift pmd index into position
162 (p7)    ld8 r29=[r28]                           // get *pud (may be 0)
163         ;;
164 (p7)    cmp.eq.or.andcm p6,p7=r29,r0            // was pud_present(*pud) == NULL?
165         dep r17=r18,r29,3,(PAGE_SHIFT-3)        // r17=pmd_offset(pud,addr)
166 #else
167         dep r17=r18,r17,3,(PAGE_SHIFT-3)        // r17=pmd_offset(pgd,addr)
168 #endif
169         ;;
170 (p7)    ld8 r20=[r17]                           // get *pmd (may be 0)
171         shr.u r19=r22,PAGE_SHIFT                // shift pte index into position
172         ;;
173 (p7)    cmp.eq.or.andcm p6,p7=r20,r0            // was pmd_present(*pmd) == NULL?
174         dep r21=r19,r20,3,(PAGE_SHIFT-3)        // r21=pte_offset(pmd,addr)
175         ;;
176 (p7)    ld8 r18=[r21]                           // read *pte
177         MOV_FROM_ISR(r19)                       // cr.isr bit 32 tells us if this is an insn miss
178         ;;
179 (p7)    tbit.z p6,p7=r18,_PAGE_P_BIT            // page present bit cleared?
180         MOV_FROM_IHA(r22)                       // get the VHPT address that caused the TLB miss
181         ;;                                      // avoid RAW on p7
182 (p7)    tbit.nz.unc p10,p11=r19,32              // is it an instruction TLB miss?
183         dep r23=0,r20,0,PAGE_SHIFT              // clear low bits to get page address
184         ;;
185         ITC_I_AND_D(p10, p11, r18, r24)         // insert the instruction TLB entry and
186                                                 // insert the data TLB entry
187 (p6)    br.cond.spnt.many page_fault            // handle bad address/page not present (page fault)
188         MOV_TO_IFA(r22, r24)
190 #ifdef CONFIG_HUGETLB_PAGE
191         MOV_TO_ITIR(p8, r25, r24)               // change to default page-size for VHPT
192 #endif
194         /*
195          * Now compute and insert the TLB entry for the virtual page table.  We never
196          * execute in a page table page so there is no need to set the exception deferral
197          * bit.
198          */
199         adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
200         ;;
201         ITC_D(p7, r24, r25)
202         ;;
203 #ifdef CONFIG_SMP
204         /*
205          * Tell the assemblers dependency-violation checker that the above "itc" instructions
206          * cannot possibly affect the following loads:
207          */
208         dv_serialize_data
210         /*
211          * Re-check pagetable entry.  If they changed, we may have received a ptc.g
212          * between reading the pagetable and the "itc".  If so, flush the entry we
213          * inserted and retry.  At this point, we have:
214          *
215          * r28 = equivalent of pud_offset(pgd, ifa)
216          * r17 = equivalent of pmd_offset(pud, ifa)
217          * r21 = equivalent of pte_offset(pmd, ifa)
218          *
219          * r29 = *pud
220          * r20 = *pmd
221          * r18 = *pte
222          */
223         ld8 r25=[r21]                           // read *pte again
224         ld8 r26=[r17]                           // read *pmd again
225 #if CONFIG_PGTABLE_LEVELS == 4
226         ld8 r19=[r28]                           // read *pud again
227 #endif
228         cmp.ne p6,p7=r0,r0
229         ;;
230         cmp.ne.or.andcm p6,p7=r26,r20           // did *pmd change
231 #if CONFIG_PGTABLE_LEVELS == 4
232         cmp.ne.or.andcm p6,p7=r19,r29           // did *pud change
233 #endif
234         mov r27=PAGE_SHIFT<<2
235         ;;
236 (p6)    ptc.l r22,r27                           // purge PTE page translation
237 (p7)    cmp.ne.or.andcm p6,p7=r25,r18           // did *pte change
238         ;;
239 (p6)    ptc.l r16,r27                           // purge translation
240 #endif
242         mov pr=r31,-1                           // restore predicate registers
243         RFI
244 END(vhpt_miss)
246         .org ia64_ivt+0x400
247 /////////////////////////////////////////////////////////////////////////////////////////
248 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
249 ENTRY(itlb_miss)
250         DBG_FAULT(1)
251         /*
252          * The ITLB handler accesses the PTE via the virtually mapped linear
253          * page table.  If a nested TLB miss occurs, we switch into physical
254          * mode, walk the page table, and then re-execute the PTE read and
255          * go on normally after that.
256          */
257         MOV_FROM_IFA(r16)                       // get virtual address
258         mov r29=b0                              // save b0
259         mov r31=pr                              // save predicates
260 .itlb_fault:
261         MOV_FROM_IHA(r17)                       // get virtual address of PTE
262         movl r30=1f                             // load nested fault continuation point
263         ;;
264 1:      ld8 r18=[r17]                           // read *pte
265         ;;
266         mov b0=r29
267         tbit.z p6,p0=r18,_PAGE_P_BIT            // page present bit cleared?
268 (p6)    br.cond.spnt page_fault
269         ;;
270         ITC_I(p0, r18, r19)
271         ;;
272 #ifdef CONFIG_SMP
273         /*
274          * Tell the assemblers dependency-violation checker that the above "itc" instructions
275          * cannot possibly affect the following loads:
276          */
277         dv_serialize_data
279         ld8 r19=[r17]                           // read *pte again and see if same
280         mov r20=PAGE_SHIFT<<2                   // setup page size for purge
281         ;;
282         cmp.ne p7,p0=r18,r19
283         ;;
284 (p7)    ptc.l r16,r20
285 #endif
286         mov pr=r31,-1
287         RFI
288 END(itlb_miss)
290         .org ia64_ivt+0x0800
291 /////////////////////////////////////////////////////////////////////////////////////////
292 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
293 ENTRY(dtlb_miss)
294         DBG_FAULT(2)
295         /*
296          * The DTLB handler accesses the PTE via the virtually mapped linear
297          * page table.  If a nested TLB miss occurs, we switch into physical
298          * mode, walk the page table, and then re-execute the PTE read and
299          * go on normally after that.
300          */
301         MOV_FROM_IFA(r16)                       // get virtual address
302         mov r29=b0                              // save b0
303         mov r31=pr                              // save predicates
304 dtlb_fault:
305         MOV_FROM_IHA(r17)                       // get virtual address of PTE
306         movl r30=1f                             // load nested fault continuation point
307         ;;
308 1:      ld8 r18=[r17]                           // read *pte
309         ;;
310         mov b0=r29
311         tbit.z p6,p0=r18,_PAGE_P_BIT            // page present bit cleared?
312 (p6)    br.cond.spnt page_fault
313         ;;
314         ITC_D(p0, r18, r19)
315         ;;
316 #ifdef CONFIG_SMP
317         /*
318          * Tell the assemblers dependency-violation checker that the above "itc" instructions
319          * cannot possibly affect the following loads:
320          */
321         dv_serialize_data
323         ld8 r19=[r17]                           // read *pte again and see if same
324         mov r20=PAGE_SHIFT<<2                   // setup page size for purge
325         ;;
326         cmp.ne p7,p0=r18,r19
327         ;;
328 (p7)    ptc.l r16,r20
329 #endif
330         mov pr=r31,-1
331         RFI
332 END(dtlb_miss)
334         .org ia64_ivt+0x0c00
335 /////////////////////////////////////////////////////////////////////////////////////////
336 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
337 ENTRY(alt_itlb_miss)
338         DBG_FAULT(3)
339         MOV_FROM_IFA(r16)       // get address that caused the TLB miss
340         movl r17=PAGE_KERNEL
341         MOV_FROM_IPSR(p0, r21)
342         movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
343         mov r31=pr
344         ;;
345 #ifdef CONFIG_DISABLE_VHPT
346         shr.u r22=r16,61                        // get the region number into r21
347         ;;
348         cmp.gt p8,p0=6,r22                      // user mode
349         ;;
350         THASH(p8, r17, r16, r23)
351         ;;
352         MOV_TO_IHA(p8, r17, r23)
353 (p8)    mov r29=b0                              // save b0
354 (p8)    br.cond.dptk .itlb_fault
355 #endif
356         extr.u r23=r21,IA64_PSR_CPL0_BIT,2      // extract psr.cpl
357         and r19=r19,r16         // clear ed, reserved bits, and PTE control bits
358         shr.u r18=r16,57        // move address bit 61 to bit 4
359         ;;
360         andcm r18=0x10,r18      // bit 4=~address-bit(61)
361         cmp.ne p8,p0=r0,r23     // psr.cpl != 0?
362         or r19=r17,r19          // insert PTE control bits into r19
363         ;;
364         or r19=r19,r18          // set bit 4 (uncached) if the access was to region 6
365 (p8)    br.cond.spnt page_fault
366         ;;
367         ITC_I(p0, r19, r18)     // insert the TLB entry
368         mov pr=r31,-1
369         RFI
370 END(alt_itlb_miss)
372         .org ia64_ivt+0x1000
373 /////////////////////////////////////////////////////////////////////////////////////////
374 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
375 ENTRY(alt_dtlb_miss)
376         DBG_FAULT(4)
377         MOV_FROM_IFA(r16)       // get address that caused the TLB miss
378         movl r17=PAGE_KERNEL
379         MOV_FROM_ISR(r20)
380         movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
381         MOV_FROM_IPSR(p0, r21)
382         mov r31=pr
383         mov r24=PERCPU_ADDR
384         ;;
385 #ifdef CONFIG_DISABLE_VHPT
386         shr.u r22=r16,61                        // get the region number into r21
387         ;;
388         cmp.gt p8,p0=6,r22                      // access to region 0-5
389         ;;
390         THASH(p8, r17, r16, r25)
391         ;;
392         MOV_TO_IHA(p8, r17, r25)
393 (p8)    mov r29=b0                              // save b0
394 (p8)    br.cond.dptk dtlb_fault
395 #endif
396         cmp.ge p10,p11=r16,r24                  // access to per_cpu_data?
397         tbit.z p12,p0=r16,61                    // access to region 6?
398         mov r25=PERCPU_PAGE_SHIFT << 2
399         mov r26=PERCPU_PAGE_SIZE
400         nop.m 0
401         nop.b 0
402         ;;
403 (p10)   mov r19=IA64_KR(PER_CPU_DATA)
404 (p11)   and r19=r19,r16                         // clear non-ppn fields
405         extr.u r23=r21,IA64_PSR_CPL0_BIT,2      // extract psr.cpl
406         and r22=IA64_ISR_CODE_MASK,r20          // get the isr.code field
407         tbit.nz p6,p7=r20,IA64_ISR_SP_BIT       // is speculation bit on?
408         tbit.nz p9,p0=r20,IA64_ISR_NA_BIT       // is non-access bit on?
409         ;;
410 (p10)   sub r19=r19,r26
411         MOV_TO_ITIR(p10, r25, r24)
412         cmp.ne p8,p0=r0,r23
413 (p9)    cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22  // check isr.code field
414 (p12)   dep r17=-1,r17,4,1                      // set ma=UC for region 6 addr
415 (p8)    br.cond.spnt page_fault
417         dep r21=-1,r21,IA64_PSR_ED_BIT,1
418         ;;
419         or r19=r19,r17          // insert PTE control bits into r19
420         MOV_TO_IPSR(p6, r21, r24)
421         ;;
422         ITC_D(p7, r19, r18)     // insert the TLB entry
423         mov pr=r31,-1
424         RFI
425 END(alt_dtlb_miss)
427         .org ia64_ivt+0x1400
428 /////////////////////////////////////////////////////////////////////////////////////////
429 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
430 ENTRY(nested_dtlb_miss)
431         /*
432          * In the absence of kernel bugs, we get here when the virtually mapped linear
433          * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
434          * Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
435          * table is missing, a nested TLB miss fault is triggered and control is
436          * transferred to this point.  When this happens, we lookup the pte for the
437          * faulting address by walking the page table in physical mode and return to the
438          * continuation point passed in register r30 (or call page_fault if the address is
439          * not mapped).
440          *
441          * Input:       r16:    faulting address
442          *              r29:    saved b0
443          *              r30:    continuation address
444          *              r31:    saved pr
445          *
446          * Output:      r17:    physical address of PTE of faulting address
447          *              r29:    saved b0
448          *              r30:    continuation address
449          *              r31:    saved pr
450          *
451          * Clobbered:   b0, r18, r19, r21, r22, psr.dt (cleared)
452          */
453         RSM_PSR_DT                              // switch to using physical data addressing
454         mov r19=IA64_KR(PT_BASE)                // get the page table base address
455         shl r21=r16,3                           // shift bit 60 into sign bit
456         MOV_FROM_ITIR(r18)
457         ;;
458         shr.u r17=r16,61                        // get the region number into r17
459         extr.u r18=r18,2,6                      // get the faulting page size
460         ;;
461         cmp.eq p6,p7=5,r17                      // is faulting address in region 5?
462         add r22=-PAGE_SHIFT,r18                 // adjustment for hugetlb address
463         add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
464         ;;
465         shr.u r22=r16,r22
466         shr.u r18=r16,r18
467 (p7)    dep r17=r17,r19,(PAGE_SHIFT-3),3        // put region number bits in place
469         srlz.d
470         LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at swapper_pg_dir
472         .pred.rel "mutex", p6, p7
473 (p6)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
474 (p7)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
475         ;;
476 (p6)    dep r17=r18,r19,3,(PAGE_SHIFT-3)        // r17=pgd_offset for region 5
477 (p7)    dep r17=r18,r17,3,(PAGE_SHIFT-6)        // r17=pgd_offset for region[0-4]
478         cmp.eq p7,p6=0,r21                      // unused address bits all zeroes?
479 #if CONFIG_PGTABLE_LEVELS == 4
480         shr.u r18=r22,PUD_SHIFT                 // shift pud index into position
481 #else
482         shr.u r18=r22,PMD_SHIFT                 // shift pmd index into position
483 #endif
484         ;;
485         ld8 r17=[r17]                           // get *pgd (may be 0)
486         ;;
487 (p7)    cmp.eq p6,p7=r17,r0                     // was pgd_present(*pgd) == NULL?
488         dep r17=r18,r17,3,(PAGE_SHIFT-3)        // r17=p[u|m]d_offset(pgd,addr)
489         ;;
490 #if CONFIG_PGTABLE_LEVELS == 4
491 (p7)    ld8 r17=[r17]                           // get *pud (may be 0)
492         shr.u r18=r22,PMD_SHIFT                 // shift pmd index into position
493         ;;
494 (p7)    cmp.eq.or.andcm p6,p7=r17,r0            // was pud_present(*pud) == NULL?
495         dep r17=r18,r17,3,(PAGE_SHIFT-3)        // r17=pmd_offset(pud,addr)
496         ;;
497 #endif
498 (p7)    ld8 r17=[r17]                           // get *pmd (may be 0)
499         shr.u r19=r22,PAGE_SHIFT                // shift pte index into position
500         ;;
501 (p7)    cmp.eq.or.andcm p6,p7=r17,r0            // was pmd_present(*pmd) == NULL?
502         dep r17=r19,r17,3,(PAGE_SHIFT-3)        // r17=pte_offset(pmd,addr);
503 (p6)    br.cond.spnt page_fault
504         mov b0=r30
505         br.sptk.many b0                         // return to continuation point
506 END(nested_dtlb_miss)
508         .org ia64_ivt+0x1800
509 /////////////////////////////////////////////////////////////////////////////////////////
510 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
511 ENTRY(ikey_miss)
512         DBG_FAULT(6)
513         FAULT(6)
514 END(ikey_miss)
516         .org ia64_ivt+0x1c00
517 /////////////////////////////////////////////////////////////////////////////////////////
518 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
519 ENTRY(dkey_miss)
520         DBG_FAULT(7)
521         FAULT(7)
522 END(dkey_miss)
524         .org ia64_ivt+0x2000
525 /////////////////////////////////////////////////////////////////////////////////////////
526 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
527 ENTRY(dirty_bit)
528         DBG_FAULT(8)
529         /*
530          * What we do here is to simply turn on the dirty bit in the PTE.  We need to
531          * update both the page-table and the TLB entry.  To efficiently access the PTE,
532          * we address it through the virtual page table.  Most likely, the TLB entry for
533          * the relevant virtual page table page is still present in the TLB so we can
534          * normally do this without additional TLB misses.  In case the necessary virtual
535          * page table TLB entry isn't present, we take a nested TLB miss hit where we look
536          * up the physical address of the L3 PTE and then continue at label 1 below.
537          */
538         MOV_FROM_IFA(r16)                       // get the address that caused the fault
539         movl r30=1f                             // load continuation point in case of nested fault
540         ;;
541         THASH(p0, r17, r16, r18)                // compute virtual address of L3 PTE
542         mov r29=b0                              // save b0 in case of nested fault
543         mov r31=pr                              // save pr
544 #ifdef CONFIG_SMP
545         mov r28=ar.ccv                          // save ar.ccv
546         ;;
547 1:      ld8 r18=[r17]
548         ;;                                      // avoid RAW on r18
549         mov ar.ccv=r18                          // set compare value for cmpxchg
550         or r25=_PAGE_D|_PAGE_A,r18              // set the dirty and accessed bits
551         tbit.z p7,p6 = r18,_PAGE_P_BIT          // Check present bit
552         ;;
553 (p6)    cmpxchg8.acq r26=[r17],r25,ar.ccv       // Only update if page is present
554         mov r24=PAGE_SHIFT<<2
555         ;;
556 (p6)    cmp.eq p6,p7=r26,r18                    // Only compare if page is present
557         ;;
558         ITC_D(p6, r25, r18)                     // install updated PTE
559         ;;
560         /*
561          * Tell the assemblers dependency-violation checker that the above "itc" instructions
562          * cannot possibly affect the following loads:
563          */
564         dv_serialize_data
566         ld8 r18=[r17]                           // read PTE again
567         ;;
568         cmp.eq p6,p7=r18,r25                    // is it same as the newly installed
569         ;;
570 (p7)    ptc.l r16,r24
571         mov b0=r29                              // restore b0
572         mov ar.ccv=r28
573 #else
574         ;;
575 1:      ld8 r18=[r17]
576         ;;                                      // avoid RAW on r18
577         or r18=_PAGE_D|_PAGE_A,r18              // set the dirty and accessed bits
578         mov b0=r29                              // restore b0
579         ;;
580         st8 [r17]=r18                           // store back updated PTE
581         ITC_D(p0, r18, r16)                     // install updated PTE
582 #endif
583         mov pr=r31,-1                           // restore pr
584         RFI
585 END(dirty_bit)
587         .org ia64_ivt+0x2400
588 /////////////////////////////////////////////////////////////////////////////////////////
589 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
590 ENTRY(iaccess_bit)
591         DBG_FAULT(9)
592         // Like Entry 8, except for instruction access
593         MOV_FROM_IFA(r16)                       // get the address that caused the fault
594         movl r30=1f                             // load continuation point in case of nested fault
595         mov r31=pr                              // save predicates
596 #ifdef CONFIG_ITANIUM
597         /*
598          * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
599          */
600         MOV_FROM_IPSR(p0, r17)
601         ;;
602         MOV_FROM_IIP(r18)
603         tbit.z p6,p0=r17,IA64_PSR_IS_BIT        // IA64 instruction set?
604         ;;
605 (p6)    mov r16=r18                             // if so, use cr.iip instead of cr.ifa
606 #endif /* CONFIG_ITANIUM */
607         ;;
608         THASH(p0, r17, r16, r18)                // compute virtual address of L3 PTE
609         mov r29=b0                              // save b0 in case of nested fault)
610 #ifdef CONFIG_SMP
611         mov r28=ar.ccv                          // save ar.ccv
612         ;;
613 1:      ld8 r18=[r17]
614         ;;
615         mov ar.ccv=r18                          // set compare value for cmpxchg
616         or r25=_PAGE_A,r18                      // set the accessed bit
617         tbit.z p7,p6 = r18,_PAGE_P_BIT          // Check present bit
618         ;;
619 (p6)    cmpxchg8.acq r26=[r17],r25,ar.ccv       // Only if page present
620         mov r24=PAGE_SHIFT<<2
621         ;;
622 (p6)    cmp.eq p6,p7=r26,r18                    // Only if page present
623         ;;
624         ITC_I(p6, r25, r26)                     // install updated PTE
625         ;;
626         /*
627          * Tell the assemblers dependency-violation checker that the above "itc" instructions
628          * cannot possibly affect the following loads:
629          */
630         dv_serialize_data
632         ld8 r18=[r17]                           // read PTE again
633         ;;
634         cmp.eq p6,p7=r18,r25                    // is it same as the newly installed
635         ;;
636 (p7)    ptc.l r16,r24
637         mov b0=r29                              // restore b0
638         mov ar.ccv=r28
639 #else /* !CONFIG_SMP */
640         ;;
641 1:      ld8 r18=[r17]
642         ;;
643         or r18=_PAGE_A,r18                      // set the accessed bit
644         mov b0=r29                              // restore b0
645         ;;
646         st8 [r17]=r18                           // store back updated PTE
647         ITC_I(p0, r18, r16)                     // install updated PTE
648 #endif /* !CONFIG_SMP */
649         mov pr=r31,-1
650         RFI
651 END(iaccess_bit)
653         .org ia64_ivt+0x2800
654 /////////////////////////////////////////////////////////////////////////////////////////
655 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
656 ENTRY(daccess_bit)
657         DBG_FAULT(10)
658         // Like Entry 8, except for data access
659         MOV_FROM_IFA(r16)                       // get the address that caused the fault
660         movl r30=1f                             // load continuation point in case of nested fault
661         ;;
662         THASH(p0, r17, r16, r18)                // compute virtual address of L3 PTE
663         mov r31=pr
664         mov r29=b0                              // save b0 in case of nested fault)
665 #ifdef CONFIG_SMP
666         mov r28=ar.ccv                          // save ar.ccv
667         ;;
668 1:      ld8 r18=[r17]
669         ;;                                      // avoid RAW on r18
670         mov ar.ccv=r18                          // set compare value for cmpxchg
671         or r25=_PAGE_A,r18                      // set the dirty bit
672         tbit.z p7,p6 = r18,_PAGE_P_BIT          // Check present bit
673         ;;
674 (p6)    cmpxchg8.acq r26=[r17],r25,ar.ccv       // Only if page is present
675         mov r24=PAGE_SHIFT<<2
676         ;;
677 (p6)    cmp.eq p6,p7=r26,r18                    // Only if page is present
678         ;;
679         ITC_D(p6, r25, r26)                     // install updated PTE
680         /*
681          * Tell the assemblers dependency-violation checker that the above "itc" instructions
682          * cannot possibly affect the following loads:
683          */
684         dv_serialize_data
685         ;;
686         ld8 r18=[r17]                           // read PTE again
687         ;;
688         cmp.eq p6,p7=r18,r25                    // is it same as the newly installed
689         ;;
690 (p7)    ptc.l r16,r24
691         mov ar.ccv=r28
692 #else
693         ;;
694 1:      ld8 r18=[r17]
695         ;;                                      // avoid RAW on r18
696         or r18=_PAGE_A,r18                      // set the accessed bit
697         ;;
698         st8 [r17]=r18                           // store back updated PTE
699         ITC_D(p0, r18, r16)                     // install updated PTE
700 #endif
701         mov b0=r29                              // restore b0
702         mov pr=r31,-1
703         RFI
704 END(daccess_bit)
706         .org ia64_ivt+0x2c00
707 /////////////////////////////////////////////////////////////////////////////////////////
708 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
709 ENTRY(break_fault)
710         /*
711          * The streamlined system call entry/exit paths only save/restore the initial part
712          * of pt_regs.  This implies that the callers of system-calls must adhere to the
713          * normal procedure calling conventions.
714          *
715          *   Registers to be saved & restored:
716          *      CR registers: cr.ipsr, cr.iip, cr.ifs
717          *      AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
718          *      others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
719          *   Registers to be restored only:
720          *      r8-r11: output value from the system call.
721          *
722          * During system call exit, scratch registers (including r15) are modified/cleared
723          * to prevent leaking bits from kernel to user level.
724          */
725         DBG_FAULT(11)
726         mov.m r16=IA64_KR(CURRENT)              // M2 r16 <- current task (12 cyc)
727         MOV_FROM_IPSR(p0, r29)                  // M2 (12 cyc)
728         mov r31=pr                              // I0 (2 cyc)
730         MOV_FROM_IIM(r17)                       // M2 (2 cyc)
731         mov.m r27=ar.rsc                        // M2 (12 cyc)
732         mov r18=__IA64_BREAK_SYSCALL            // A
734         mov.m ar.rsc=0                          // M2
735         mov.m r21=ar.fpsr                       // M2 (12 cyc)
736         mov r19=b6                              // I0 (2 cyc)
737         ;;
738         mov.m r23=ar.bspstore                   // M2 (12 cyc)
739         mov.m r24=ar.rnat                       // M2 (5 cyc)
740         mov.i r26=ar.pfs                        // I0 (2 cyc)
742         invala                                  // M0|1
743         nop.m 0                                 // M
744         mov r20=r1                              // A                    save r1
746         nop.m 0
747         movl r30=sys_call_table                 // X
749         MOV_FROM_IIP(r28)                       // M2 (2 cyc)
750         cmp.eq p0,p7=r18,r17                    // I0 is this a system call?
751 (p7)    br.cond.spnt non_syscall                // B  no ->
752         //
753         // From this point on, we are definitely on the syscall-path
754         // and we can use (non-banked) scratch registers.
755         //
756 ///////////////////////////////////////////////////////////////////////
757         mov r1=r16                              // A    move task-pointer to "addl"-addressable reg
758         mov r2=r16                              // A    setup r2 for ia64_syscall_setup
759         add r9=TI_FLAGS+IA64_TASK_SIZE,r16      // A    r9 = &current_thread_info()->flags
761         adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
762         adds r15=-1024,r15                      // A    subtract 1024 from syscall number
763         mov r3=NR_syscalls - 1
764         ;;
765         ld1.bias r17=[r16]                      // M0|1 r17 = current->thread.on_ustack flag
766         ld4 r9=[r9]                             // M0|1 r9 = current_thread_info()->flags
767         extr.u r8=r29,41,2                      // I0   extract ei field from cr.ipsr
769         shladd r30=r15,3,r30                    // A    r30 = sys_call_table + 8*(syscall-1024)
770         addl r22=IA64_RBS_OFFSET,r1             // A    compute base of RBS
771         cmp.leu p6,p7=r15,r3                    // A    syscall number in range?
772         ;;
774         lfetch.fault.excl.nt1 [r22]             // M0|1 prefetch RBS
775 (p6)    ld8 r30=[r30]                           // M0|1 load address of syscall entry point
776         tnat.nz.or p7,p0=r15                    // I0   is syscall nr a NaT?
778         mov.m ar.bspstore=r22                   // M2   switch to kernel RBS
779         cmp.eq p8,p9=2,r8                       // A    isr.ei==2?
780         ;;
782 (p8)    mov r8=0                                // A    clear ei to 0
783 (p7)    movl r30=sys_ni_syscall                 // X
785 (p8)    adds r28=16,r28                         // A    switch cr.iip to next bundle
786 (p9)    adds r8=1,r8                            // A    increment ei to next slot
787 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
788         ;;
789         mov b6=r30                              // I0   setup syscall handler branch reg early
790 #else
791         nop.i 0
792         ;;
793 #endif
795         mov.m r25=ar.unat                       // M2 (5 cyc)
796         dep r29=r8,r29,41,2                     // I0   insert new ei into cr.ipsr
797         adds r15=1024,r15                       // A    restore original syscall number
798         //
799         // If any of the above loads miss in L1D, we'll stall here until
800         // the data arrives.
801         //
802 ///////////////////////////////////////////////////////////////////////
803         st1 [r16]=r0                            // M2|3 clear current->thread.on_ustack flag
804 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
805         MOV_FROM_ITC(p0, p14, r30, r18)         // M    get cycle for accounting
806 #else
807         mov b6=r30                              // I0   setup syscall handler branch reg early
808 #endif
809         cmp.eq pKStk,pUStk=r0,r17               // A    were we on kernel stacks already?
811         and r9=_TIF_SYSCALL_TRACEAUDIT,r9       // A    mask trace or audit
812         mov r18=ar.bsp                          // M2 (12 cyc)
813 (pKStk) br.cond.spnt .break_fixup               // B    we're already in kernel-mode -- fix up RBS
814         ;;
815 .back_from_break_fixup:
816 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A    compute base of memory stack
817         cmp.eq p14,p0=r9,r0                     // A    are syscalls being traced/audited?
818         br.call.sptk.many b7=ia64_syscall_setup // B
820 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
821         // mov.m r30=ar.itc is called in advance, and r13 is current
822         add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13  // A
823         add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13  // A
824 (pKStk) br.cond.spnt .skip_accounting           // B    unlikely skip
825         ;;
826         ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP   // M  get last stamp
827         ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE   // M  time at leave
828         ;;
829         ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME   // M  cumulated stime
830         ld8 r21=[r17]                           // M  cumulated utime
831         sub r22=r19,r18                         // A  stime before leave
832         ;;
833         st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP   // M  update stamp
834         sub r18=r30,r19                         // A  elapsed time in user
835         ;;
836         add r20=r20,r22                         // A  sum stime
837         add r21=r21,r18                         // A  sum utime
838         ;;
839         st8 [r16]=r20                           // M  update stime
840         st8 [r17]=r21                           // M  update utime
841         ;;
842 .skip_accounting:
843 #endif
844         mov ar.rsc=0x3                          // M2   set eager mode, pl 0, LE, loadrs=0
845         nop 0
846         BSW_1(r2, r14)                          // B (6 cyc) regs are saved, switch to bank 1
847         ;;
849         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2   now it's safe to re-enable intr.-collection
850                                                 // M0   ensure interruption collection is on
851         movl r3=ia64_ret_from_syscall           // X
852         ;;
853         mov rp=r3                               // I0   set the real return addr
854 (p10)   br.cond.spnt.many ia64_ret_from_syscall // B    return if bad call-frame or r15 is a NaT
856         SSM_PSR_I(p15, p15, r16)                // M2   restore psr.i
857 (p14)   br.call.sptk.many b6=b6                 // B    invoke syscall-handker (ignore return addr)
858         br.cond.spnt.many ia64_trace_syscall    // B    do syscall-tracing thingamagic
859         // NOT REACHED
860 ///////////////////////////////////////////////////////////////////////
861         // On entry, we optimistically assumed that we're coming from user-space.
862         // For the rare cases where a system-call is done from within the kernel,
863         // we fix things up at this point:
864 .break_fixup:
865         add r1=-IA64_PT_REGS_SIZE,sp            // A    allocate space for pt_regs structure
866         mov ar.rnat=r24                         // M2   restore kernel's AR.RNAT
867         ;;
868         mov ar.bspstore=r23                     // M2   restore kernel's AR.BSPSTORE
869         br.cond.sptk .back_from_break_fixup
870 END(break_fault)
872         .org ia64_ivt+0x3000
873 /////////////////////////////////////////////////////////////////////////////////////////
874 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
875 ENTRY(interrupt)
876         /* interrupt handler has become too big to fit this area. */
877         br.sptk.many __interrupt
878 END(interrupt)
880         .org ia64_ivt+0x3400
881 /////////////////////////////////////////////////////////////////////////////////////////
882 // 0x3400 Entry 13 (size 64 bundles) Reserved
883         DBG_FAULT(13)
884         FAULT(13)
886         .org ia64_ivt+0x3800
887 /////////////////////////////////////////////////////////////////////////////////////////
888 // 0x3800 Entry 14 (size 64 bundles) Reserved
889         DBG_FAULT(14)
890         FAULT(14)
892         /*
893          * There is no particular reason for this code to be here, other than that
894          * there happens to be space here that would go unused otherwise.  If this
895          * fault ever gets "unreserved", simply moved the following code to a more
896          * suitable spot...
897          *
898          * ia64_syscall_setup() is a separate subroutine so that it can
899          *      allocate stacked registers so it can safely demine any
900          *      potential NaT values from the input registers.
901          *
902          * On entry:
903          *      - executing on bank 0 or bank 1 register set (doesn't matter)
904          *      -  r1: stack pointer
905          *      -  r2: current task pointer
906          *      -  r3: preserved
907          *      - r11: original contents (saved ar.pfs to be saved)
908          *      - r12: original contents (sp to be saved)
909          *      - r13: original contents (tp to be saved)
910          *      - r15: original contents (syscall # to be saved)
911          *      - r18: saved bsp (after switching to kernel stack)
912          *      - r19: saved b6
913          *      - r20: saved r1 (gp)
914          *      - r21: saved ar.fpsr
915          *      - r22: kernel's register backing store base (krbs_base)
916          *      - r23: saved ar.bspstore
917          *      - r24: saved ar.rnat
918          *      - r25: saved ar.unat
919          *      - r26: saved ar.pfs
920          *      - r27: saved ar.rsc
921          *      - r28: saved cr.iip
922          *      - r29: saved cr.ipsr
923          *      - r30: ar.itc for accounting (don't touch)
924          *      - r31: saved pr
925          *      -  b0: original contents (to be saved)
926          * On exit:
927          *      -  p10: TRUE if syscall is invoked with more than 8 out
928          *              registers or r15's Nat is true
929          *      -  r1: kernel's gp
930          *      -  r3: preserved (same as on entry)
931          *      -  r8: -EINVAL if p10 is true
932          *      - r12: points to kernel stack
933          *      - r13: points to current task
934          *      - r14: preserved (same as on entry)
935          *      - p13: preserved
936          *      - p15: TRUE if interrupts need to be re-enabled
937          *      - ar.fpsr: set to kernel settings
938          *      -  b6: preserved (same as on entry)
939          */
940 GLOBAL_ENTRY(ia64_syscall_setup)
941 #if PT(B6) != 0
942 # error This code assumes that b6 is the first field in pt_regs.
943 #endif
944         st8 [r1]=r19                            // save b6
945         add r16=PT(CR_IPSR),r1                  // initialize first base pointer
946         add r17=PT(R11),r1                      // initialize second base pointer
947         ;;
948         alloc r19=ar.pfs,8,0,0,0                // ensure in0-in7 are writable
949         st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)    // save cr.ipsr
950         tnat.nz p8,p0=in0
952         st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)  // save r11
953         tnat.nz p9,p0=in1
954 (pKStk) mov r18=r0                              // make sure r18 isn't NaT
955         ;;
957         st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS)     // save ar.pfs
958         st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)    // save cr.iip
959         mov r28=b0                              // save b0 (2 cyc)
960         ;;
962         st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)    // save ar.unat
963         dep r19=0,r19,38,26                     // clear all bits but 0..37 [I0]
964 (p8)    mov in0=-1
965         ;;
967         st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)    // store ar.pfs.pfm in cr.ifs
968         extr.u r11=r19,7,7      // I0           // get sol of ar.pfs
969         and r8=0x7f,r19         // A            // get sof of ar.pfs
971         st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
972         tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
973 (p9)    mov in1=-1
974         ;;
976 (pUStk) sub r18=r18,r22                         // r18=RSE.ndirty*8
977         tnat.nz p10,p0=in2
978         add r11=8,r11
979         ;;
980 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16         // skip over ar_rnat field
981 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17     // skip over ar_bspstore field
982         tnat.nz p11,p0=in3
983         ;;
984 (p10)   mov in2=-1
985         tnat.nz p12,p0=in4                              // [I0]
986 (p11)   mov in3=-1
987         ;;
988 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)        // save ar.rnat
989 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)    // save ar.bspstore
990         shl r18=r18,16                          // compute ar.rsc to be used for "loadrs"
991         ;;
992         st8 [r16]=r31,PT(LOADRS)-PT(PR)         // save predicates
993         st8 [r17]=r28,PT(R1)-PT(B0)             // save b0
994         tnat.nz p13,p0=in5                              // [I0]
995         ;;
996         st8 [r16]=r18,PT(R12)-PT(LOADRS)        // save ar.rsc value for "loadrs"
997         st8.spill [r17]=r20,PT(R13)-PT(R1)      // save original r1
998 (p12)   mov in4=-1
999         ;;
1001 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)        // save r12
1002 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)            // save r13
1003 (p13)   mov in5=-1
1004         ;;
1005         st8 [r16]=r21,PT(R8)-PT(AR_FPSR)        // save ar.fpsr
1006         tnat.nz p13,p0=in6
1007         cmp.lt p10,p9=r11,r8    // frame size can't be more than local+8
1008         ;;
1009         mov r8=1
1010 (p9)    tnat.nz p10,p0=r15
1011         adds r12=-16,r1         // switch to kernel memory stack (with 16 bytes of scratch)
1013         st8.spill [r17]=r15                     // save r15
1014         tnat.nz p8,p0=in7
1015         nop.i 0
1017         mov r13=r2                              // establish `current'
1018         movl r1=__gp                            // establish kernel global pointer
1019         ;;
1020         st8 [r16]=r8            // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1021 (p13)   mov in6=-1
1022 (p8)    mov in7=-1
1024         cmp.eq pSys,pNonSys=r0,r0               // set pSys=1, pNonSys=0
1025         movl r17=FPSR_DEFAULT
1026         ;;
1027         mov.m ar.fpsr=r17                       // set ar.fpsr to kernel default value
1028 (p10)   mov r8=-EINVAL
1029         br.ret.sptk.many b7
1030 END(ia64_syscall_setup)
1032         .org ia64_ivt+0x3c00
1033 /////////////////////////////////////////////////////////////////////////////////////////
1034 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1035         DBG_FAULT(15)
1036         FAULT(15)
1038         .org ia64_ivt+0x4000
1039 /////////////////////////////////////////////////////////////////////////////////////////
1040 // 0x4000 Entry 16 (size 64 bundles) Reserved
1041         DBG_FAULT(16)
1042         FAULT(16)
1044 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
1045         /*
1046          * There is no particular reason for this code to be here, other than
1047          * that there happens to be space here that would go unused otherwise.
1048          * If this fault ever gets "unreserved", simply moved the following
1049          * code to a more suitable spot...
1050          *
1051          * account_sys_enter is called from SAVE_MIN* macros if accounting is
1052          * enabled and if the macro is entered from user mode.
1053          */
1054 GLOBAL_ENTRY(account_sys_enter)
1055         // mov.m r20=ar.itc is called in advance, and r13 is current
1056         add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1057         add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
1058         ;;
1059         ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP   // time at last check in kernel
1060         ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE   // time at left from kernel
1061         ;;
1062         ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME   // cumulated stime
1063         ld8 r21=[r17]                           // cumulated utime
1064         sub r22=r19,r18                         // stime before leave kernel
1065         ;;
1066         st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP   // update stamp
1067         sub r18=r20,r19                         // elapsed time in user mode
1068         ;;
1069         add r23=r23,r22                         // sum stime
1070         add r21=r21,r18                         // sum utime
1071         ;;
1072         st8 [r16]=r23                           // update stime
1073         st8 [r17]=r21                           // update utime
1074         ;;
1075         br.ret.sptk.many rp
1076 END(account_sys_enter)
1077 #endif
1079         .org ia64_ivt+0x4400
1080 /////////////////////////////////////////////////////////////////////////////////////////
1081 // 0x4400 Entry 17 (size 64 bundles) Reserved
1082         DBG_FAULT(17)
1083         FAULT(17)
1085         .org ia64_ivt+0x4800
1086 /////////////////////////////////////////////////////////////////////////////////////////
1087 // 0x4800 Entry 18 (size 64 bundles) Reserved
1088         DBG_FAULT(18)
1089         FAULT(18)
1091         .org ia64_ivt+0x4c00
1092 /////////////////////////////////////////////////////////////////////////////////////////
1093 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1094         DBG_FAULT(19)
1095         FAULT(19)
1098 // --- End of long entries, Beginning of short entries
1101         .org ia64_ivt+0x5000
1102 /////////////////////////////////////////////////////////////////////////////////////////
1103 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1104 ENTRY(page_not_present)
1105         DBG_FAULT(20)
1106         MOV_FROM_IFA(r16)
1107         RSM_PSR_DT
1108         /*
1109          * The Linux page fault handler doesn't expect non-present pages to be in
1110          * the TLB.  Flush the existing entry now, so we meet that expectation.
1111          */
1112         mov r17=PAGE_SHIFT<<2
1113         ;;
1114         ptc.l r16,r17
1115         ;;
1116         mov r31=pr
1117         srlz.d
1118         br.sptk.many page_fault
1119 END(page_not_present)
1121         .org ia64_ivt+0x5100
1122 /////////////////////////////////////////////////////////////////////////////////////////
1123 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1124 ENTRY(key_permission)
1125         DBG_FAULT(21)
1126         MOV_FROM_IFA(r16)
1127         RSM_PSR_DT
1128         mov r31=pr
1129         ;;
1130         srlz.d
1131         br.sptk.many page_fault
1132 END(key_permission)
1134         .org ia64_ivt+0x5200
1135 /////////////////////////////////////////////////////////////////////////////////////////
1136 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1137 ENTRY(iaccess_rights)
1138         DBG_FAULT(22)
1139         MOV_FROM_IFA(r16)
1140         RSM_PSR_DT
1141         mov r31=pr
1142         ;;
1143         srlz.d
1144         br.sptk.many page_fault
1145 END(iaccess_rights)
1147         .org ia64_ivt+0x5300
1148 /////////////////////////////////////////////////////////////////////////////////////////
1149 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1150 ENTRY(daccess_rights)
1151         DBG_FAULT(23)
1152         MOV_FROM_IFA(r16)
1153         RSM_PSR_DT
1154         mov r31=pr
1155         ;;
1156         srlz.d
1157         br.sptk.many page_fault
1158 END(daccess_rights)
1160         .org ia64_ivt+0x5400
1161 /////////////////////////////////////////////////////////////////////////////////////////
1162 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1163 ENTRY(general_exception)
1164         DBG_FAULT(24)
1165         MOV_FROM_ISR(r16)
1166         mov r31=pr
1167         ;;
1168         cmp4.eq p6,p0=0,r16
1169 (p6)    br.sptk.many dispatch_illegal_op_fault
1170         ;;
1171         mov r19=24              // fault number
1172         br.sptk.many dispatch_to_fault_handler
1173 END(general_exception)
1175         .org ia64_ivt+0x5500
1176 /////////////////////////////////////////////////////////////////////////////////////////
1177 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1178 ENTRY(disabled_fp_reg)
1179         DBG_FAULT(25)
1180         rsm psr.dfh             // ensure we can access fph
1181         ;;
1182         srlz.d
1183         mov r31=pr
1184         mov r19=25
1185         br.sptk.many dispatch_to_fault_handler
1186 END(disabled_fp_reg)
1188         .org ia64_ivt+0x5600
1189 /////////////////////////////////////////////////////////////////////////////////////////
1190 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1191 ENTRY(nat_consumption)
1192         DBG_FAULT(26)
1194         MOV_FROM_IPSR(p0, r16)
1195         MOV_FROM_ISR(r17)
1196         mov r31=pr                              // save PR
1197         ;;
1198         and r18=0xf,r17                         // r18 = cr.ipsr.code{3:0}
1199         tbit.z p6,p0=r17,IA64_ISR_NA_BIT
1200         ;;
1201         cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
1202         dep r16=-1,r16,IA64_PSR_ED_BIT,1
1203 (p6)    br.cond.spnt 1f         // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1204         ;;
1205         MOV_TO_IPSR(p0, r16, r18)
1206         mov pr=r31,-1
1207         ;;
1208         RFI
1210 1:      mov pr=r31,-1
1211         ;;
1212         FAULT(26)
1213 END(nat_consumption)
1215         .org ia64_ivt+0x5700
1216 /////////////////////////////////////////////////////////////////////////////////////////
1217 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1218 ENTRY(speculation_vector)
1219         DBG_FAULT(27)
1220         /*
1221          * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1222          * this part of the architecture is not implemented in hardware on some CPUs, such
1223          * as Itanium.  Thus, in general we need to emulate the behavior.  IIM contains
1224          * the relative target (not yet sign extended).  So after sign extending it we
1225          * simply add it to IIP.  We also need to reset the EI field of the IPSR to zero,
1226          * i.e., the slot to restart into.
1227          *
1228          * cr.imm contains zero_ext(imm21)
1229          */
1230         MOV_FROM_IIM(r18)
1231         ;;
1232         MOV_FROM_IIP(r17)
1233         shl r18=r18,43                  // put sign bit in position (43=64-21)
1234         ;;
1236         MOV_FROM_IPSR(p0, r16)
1237         shr r18=r18,39                  // sign extend (39=43-4)
1238         ;;
1240         add r17=r17,r18                 // now add the offset
1241         ;;
1242         MOV_TO_IIP(r17, r19)
1243         dep r16=0,r16,41,2              // clear EI
1244         ;;
1246         MOV_TO_IPSR(p0, r16, r19)
1247         ;;
1249         RFI
1250 END(speculation_vector)
1252         .org ia64_ivt+0x5800
1253 /////////////////////////////////////////////////////////////////////////////////////////
1254 // 0x5800 Entry 28 (size 16 bundles) Reserved
1255         DBG_FAULT(28)
1256         FAULT(28)
1258         .org ia64_ivt+0x5900
1259 /////////////////////////////////////////////////////////////////////////////////////////
1260 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1261 ENTRY(debug_vector)
1262         DBG_FAULT(29)
1263         FAULT(29)
1264 END(debug_vector)
1266         .org ia64_ivt+0x5a00
1267 /////////////////////////////////////////////////////////////////////////////////////////
1268 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1269 ENTRY(unaligned_access)
1270         DBG_FAULT(30)
1271         mov r31=pr              // prepare to save predicates
1272         ;;
1273         br.sptk.many dispatch_unaligned_handler
1274 END(unaligned_access)
1276         .org ia64_ivt+0x5b00
1277 /////////////////////////////////////////////////////////////////////////////////////////
1278 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1279 ENTRY(unsupported_data_reference)
1280         DBG_FAULT(31)
1281         FAULT(31)
1282 END(unsupported_data_reference)
1284         .org ia64_ivt+0x5c00
1285 /////////////////////////////////////////////////////////////////////////////////////////
1286 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1287 ENTRY(floating_point_fault)
1288         DBG_FAULT(32)
1289         FAULT(32)
1290 END(floating_point_fault)
1292         .org ia64_ivt+0x5d00
1293 /////////////////////////////////////////////////////////////////////////////////////////
1294 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1295 ENTRY(floating_point_trap)
1296         DBG_FAULT(33)
1297         FAULT(33)
1298 END(floating_point_trap)
1300         .org ia64_ivt+0x5e00
1301 /////////////////////////////////////////////////////////////////////////////////////////
1302 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1303 ENTRY(lower_privilege_trap)
1304         DBG_FAULT(34)
1305         FAULT(34)
1306 END(lower_privilege_trap)
1308         .org ia64_ivt+0x5f00
1309 /////////////////////////////////////////////////////////////////////////////////////////
1310 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1311 ENTRY(taken_branch_trap)
1312         DBG_FAULT(35)
1313         FAULT(35)
1314 END(taken_branch_trap)
1316         .org ia64_ivt+0x6000
1317 /////////////////////////////////////////////////////////////////////////////////////////
1318 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1319 ENTRY(single_step_trap)
1320         DBG_FAULT(36)
1321         FAULT(36)
1322 END(single_step_trap)
1324         .org ia64_ivt+0x6100
1325 /////////////////////////////////////////////////////////////////////////////////////////
1326 // 0x6100 Entry 37 (size 16 bundles) Reserved
1327         DBG_FAULT(37)
1328         FAULT(37)
1330         .org ia64_ivt+0x6200
1331 /////////////////////////////////////////////////////////////////////////////////////////
1332 // 0x6200 Entry 38 (size 16 bundles) Reserved
1333         DBG_FAULT(38)
1334         FAULT(38)
1336         .org ia64_ivt+0x6300
1337 /////////////////////////////////////////////////////////////////////////////////////////
1338 // 0x6300 Entry 39 (size 16 bundles) Reserved
1339         DBG_FAULT(39)
1340         FAULT(39)
1342         .org ia64_ivt+0x6400
1343 /////////////////////////////////////////////////////////////////////////////////////////
1344 // 0x6400 Entry 40 (size 16 bundles) Reserved
1345         DBG_FAULT(40)
1346         FAULT(40)
1348         .org ia64_ivt+0x6500
1349 /////////////////////////////////////////////////////////////////////////////////////////
1350 // 0x6500 Entry 41 (size 16 bundles) Reserved
1351         DBG_FAULT(41)
1352         FAULT(41)
1354         .org ia64_ivt+0x6600
1355 /////////////////////////////////////////////////////////////////////////////////////////
1356 // 0x6600 Entry 42 (size 16 bundles) Reserved
1357         DBG_FAULT(42)
1358         FAULT(42)
1360         .org ia64_ivt+0x6700
1361 /////////////////////////////////////////////////////////////////////////////////////////
1362 // 0x6700 Entry 43 (size 16 bundles) Reserved
1363         DBG_FAULT(43)
1364         FAULT(43)
1366         .org ia64_ivt+0x6800
1367 /////////////////////////////////////////////////////////////////////////////////////////
1368 // 0x6800 Entry 44 (size 16 bundles) Reserved
1369         DBG_FAULT(44)
1370         FAULT(44)
1372         .org ia64_ivt+0x6900
1373 /////////////////////////////////////////////////////////////////////////////////////////
1374 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1375 ENTRY(ia32_exception)
1376         DBG_FAULT(45)
1377         FAULT(45)
1378 END(ia32_exception)
1380         .org ia64_ivt+0x6a00
1381 /////////////////////////////////////////////////////////////////////////////////////////
1382 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
1383 ENTRY(ia32_intercept)
1384         DBG_FAULT(46)
1385         FAULT(46)
1386 END(ia32_intercept)
1388         .org ia64_ivt+0x6b00
1389 /////////////////////////////////////////////////////////////////////////////////////////
1390 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
1391 ENTRY(ia32_interrupt)
1392         DBG_FAULT(47)
1393         FAULT(47)
1394 END(ia32_interrupt)
1396         .org ia64_ivt+0x6c00
1397 /////////////////////////////////////////////////////////////////////////////////////////
1398 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1399         DBG_FAULT(48)
1400         FAULT(48)
1402         .org ia64_ivt+0x6d00
1403 /////////////////////////////////////////////////////////////////////////////////////////
1404 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1405         DBG_FAULT(49)
1406         FAULT(49)
1408         .org ia64_ivt+0x6e00
1409 /////////////////////////////////////////////////////////////////////////////////////////
1410 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1411         DBG_FAULT(50)
1412         FAULT(50)
1414         .org ia64_ivt+0x6f00
1415 /////////////////////////////////////////////////////////////////////////////////////////
1416 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1417         DBG_FAULT(51)
1418         FAULT(51)
1420         .org ia64_ivt+0x7000
1421 /////////////////////////////////////////////////////////////////////////////////////////
1422 // 0x7000 Entry 52 (size 16 bundles) Reserved
1423         DBG_FAULT(52)
1424         FAULT(52)
1426         .org ia64_ivt+0x7100
1427 /////////////////////////////////////////////////////////////////////////////////////////
1428 // 0x7100 Entry 53 (size 16 bundles) Reserved
1429         DBG_FAULT(53)
1430         FAULT(53)
1432         .org ia64_ivt+0x7200
1433 /////////////////////////////////////////////////////////////////////////////////////////
1434 // 0x7200 Entry 54 (size 16 bundles) Reserved
1435         DBG_FAULT(54)
1436         FAULT(54)
1438         .org ia64_ivt+0x7300
1439 /////////////////////////////////////////////////////////////////////////////////////////
1440 // 0x7300 Entry 55 (size 16 bundles) Reserved
1441         DBG_FAULT(55)
1442         FAULT(55)
1444         .org ia64_ivt+0x7400
1445 /////////////////////////////////////////////////////////////////////////////////////////
1446 // 0x7400 Entry 56 (size 16 bundles) Reserved
1447         DBG_FAULT(56)
1448         FAULT(56)
1450         .org ia64_ivt+0x7500
1451 /////////////////////////////////////////////////////////////////////////////////////////
1452 // 0x7500 Entry 57 (size 16 bundles) Reserved
1453         DBG_FAULT(57)
1454         FAULT(57)
1456         .org ia64_ivt+0x7600
1457 /////////////////////////////////////////////////////////////////////////////////////////
1458 // 0x7600 Entry 58 (size 16 bundles) Reserved
1459         DBG_FAULT(58)
1460         FAULT(58)
1462         .org ia64_ivt+0x7700
1463 /////////////////////////////////////////////////////////////////////////////////////////
1464 // 0x7700 Entry 59 (size 16 bundles) Reserved
1465         DBG_FAULT(59)
1466         FAULT(59)
1468         .org ia64_ivt+0x7800
1469 /////////////////////////////////////////////////////////////////////////////////////////
1470 // 0x7800 Entry 60 (size 16 bundles) Reserved
1471         DBG_FAULT(60)
1472         FAULT(60)
1474         .org ia64_ivt+0x7900
1475 /////////////////////////////////////////////////////////////////////////////////////////
1476 // 0x7900 Entry 61 (size 16 bundles) Reserved
1477         DBG_FAULT(61)
1478         FAULT(61)
1480         .org ia64_ivt+0x7a00
1481 /////////////////////////////////////////////////////////////////////////////////////////
1482 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1483         DBG_FAULT(62)
1484         FAULT(62)
1486         .org ia64_ivt+0x7b00
1487 /////////////////////////////////////////////////////////////////////////////////////////
1488 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1489         DBG_FAULT(63)
1490         FAULT(63)
1492         .org ia64_ivt+0x7c00
1493 /////////////////////////////////////////////////////////////////////////////////////////
1494 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1495         DBG_FAULT(64)
1496         FAULT(64)
1498         .org ia64_ivt+0x7d00
1499 /////////////////////////////////////////////////////////////////////////////////////////
1500 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1501         DBG_FAULT(65)
1502         FAULT(65)
1504         .org ia64_ivt+0x7e00
1505 /////////////////////////////////////////////////////////////////////////////////////////
1506 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1507         DBG_FAULT(66)
1508         FAULT(66)
1510         .org ia64_ivt+0x7f00
1511 /////////////////////////////////////////////////////////////////////////////////////////
1512 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1513         DBG_FAULT(67)
1514         FAULT(67)
1516         //-----------------------------------------------------------------------------------
1517         // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
1518 ENTRY(page_fault)
1519         SSM_PSR_DT_AND_SRLZ_I
1520         ;;
1521         SAVE_MIN_WITH_COVER
1522         alloc r15=ar.pfs,0,0,3,0
1523         MOV_FROM_IFA(out0)
1524         MOV_FROM_ISR(out1)
1525         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
1526         adds r3=8,r2                            // set up second base pointer
1527         SSM_PSR_I(p15, p15, r14)                // restore psr.i
1528         movl r14=ia64_leave_kernel
1529         ;;
1530         SAVE_REST
1531         mov rp=r14
1532         ;;
1533         adds out2=16,r12                        // out2 = pointer to pt_regs
1534         br.call.sptk.many b6=ia64_do_page_fault // ignore return address
1535 END(page_fault)
1537 ENTRY(non_syscall)
1538         mov ar.rsc=r27                  // restore ar.rsc before SAVE_MIN_WITH_COVER
1539         ;;
1540         SAVE_MIN_WITH_COVER
1542         // There is no particular reason for this code to be here, other than that
1543         // there happens to be space here that would go unused otherwise.  If this
1544         // fault ever gets "unreserved", simply moved the following code to a more
1545         // suitable spot...
1547         alloc r14=ar.pfs,0,0,2,0
1548         MOV_FROM_IIM(out0)
1549         add out1=16,sp
1550         adds r3=8,r2                    // set up second base pointer for SAVE_REST
1552         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
1553                                         // guarantee that interruption collection is on
1554         SSM_PSR_I(p15, p15, r15)        // restore psr.i
1555         movl r15=ia64_leave_kernel
1556         ;;
1557         SAVE_REST
1558         mov rp=r15
1559         ;;
1560         br.call.sptk.many b6=ia64_bad_break     // avoid WAW on CFM and ignore return addr
1561 END(non_syscall)
1563 ENTRY(__interrupt)
1564         DBG_FAULT(12)
1565         mov r31=pr              // prepare to save predicates
1566         ;;
1567         SAVE_MIN_WITH_COVER     // uses r31; defines r2 and r3
1568         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
1569                                 // ensure everybody knows psr.ic is back on
1570         adds r3=8,r2            // set up second base pointer for SAVE_REST
1571         ;;
1572         SAVE_REST
1573         ;;
1574         MCA_RECOVER_RANGE(interrupt)
1575         alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1576         MOV_FROM_IVR(out0, r8)  // pass cr.ivr as first arg
1577         add out1=16,sp          // pass pointer to pt_regs as second arg
1578         ;;
1579         srlz.d                  // make sure we see the effect of cr.ivr
1580         movl r14=ia64_leave_kernel
1581         ;;
1582         mov rp=r14
1583         br.call.sptk.many b6=ia64_handle_irq
1584 END(__interrupt)
1586         /*
1587          * There is no particular reason for this code to be here, other than that
1588          * there happens to be space here that would go unused otherwise.  If this
1589          * fault ever gets "unreserved", simply moved the following code to a more
1590          * suitable spot...
1591          */
1593 ENTRY(dispatch_unaligned_handler)
1594         SAVE_MIN_WITH_COVER
1595         ;;
1596         alloc r14=ar.pfs,0,0,2,0                // now it's safe (must be first in insn group!)
1597         MOV_FROM_IFA(out0)
1598         adds out1=16,sp
1600         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1601                                                 // guarantee that interruption collection is on
1602         SSM_PSR_I(p15, p15, r3)                 // restore psr.i
1603         adds r3=8,r2                            // set up second base pointer
1604         ;;
1605         SAVE_REST
1606         movl r14=ia64_leave_kernel
1607         ;;
1608         mov rp=r14
1609         br.sptk.many ia64_prepare_handle_unaligned
1610 END(dispatch_unaligned_handler)
1612         /*
1613          * There is no particular reason for this code to be here, other than that
1614          * there happens to be space here that would go unused otherwise.  If this
1615          * fault ever gets "unreserved", simply moved the following code to a more
1616          * suitable spot...
1617          */
1619 ENTRY(dispatch_to_fault_handler)
1620         /*
1621          * Input:
1622          *      psr.ic: off
1623          *      r19:    fault vector number (e.g., 24 for General Exception)
1624          *      r31:    contains saved predicates (pr)
1625          */
1626         SAVE_MIN_WITH_COVER_R19
1627         alloc r14=ar.pfs,0,0,5,0
1628         MOV_FROM_ISR(out1)
1629         MOV_FROM_IFA(out2)
1630         MOV_FROM_IIM(out3)
1631         MOV_FROM_ITIR(out4)
1632         ;;
1633         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
1634                                                 // guarantee that interruption collection is on
1635         mov out0=r15
1636         ;;
1637         SSM_PSR_I(p15, p15, r3)                 // restore psr.i
1638         adds r3=8,r2                            // set up second base pointer for SAVE_REST
1639         ;;
1640         SAVE_REST
1641         movl r14=ia64_leave_kernel
1642         ;;
1643         mov rp=r14
1644         br.call.sptk.many b6=ia64_fault
1645 END(dispatch_to_fault_handler)
1647         /*
1648          * Squatting in this space ...
1649          *
1650          * This special case dispatcher for illegal operation faults allows preserved
1651          * registers to be modified through a callback function (asm only) that is handed
1652          * back from the fault handler in r8. Up to three arguments can be passed to the
1653          * callback function by returning an aggregate with the callback as its first
1654          * element, followed by the arguments.
1655          */
1656 ENTRY(dispatch_illegal_op_fault)
1657         .prologue
1658         .body
1659         SAVE_MIN_WITH_COVER
1660         SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1661                                 // guarantee that interruption collection is on
1662         ;;
1663         SSM_PSR_I(p15, p15, r3) // restore psr.i
1664         adds r3=8,r2    // set up second base pointer for SAVE_REST
1665         ;;
1666         alloc r14=ar.pfs,0,0,1,0        // must be first in insn group
1667         mov out0=ar.ec
1668         ;;
1669         SAVE_REST
1670         PT_REGS_UNWIND_INFO(0)
1671         ;;
1672         br.call.sptk.many rp=ia64_illegal_op_fault
1673 .ret0:  ;;
1674         alloc r14=ar.pfs,0,0,3,0        // must be first in insn group
1675         mov out0=r9
1676         mov out1=r10
1677         mov out2=r11
1678         movl r15=ia64_leave_kernel
1679         ;;
1680         mov rp=r15
1681         mov b6=r8
1682         ;;
1683         cmp.ne p6,p0=0,r8
1684 (p6)    br.call.dpnt.many b6=b6         // call returns to ia64_leave_kernel
1685         br.sptk.many ia64_leave_kernel
1686 END(dispatch_illegal_op_fault)