Back to home page

LXR

 
 

    


0001 /*
0002  * arch/ia64/kernel/ivt.S
0003  *
0004  * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
0005  *  Stephane Eranian <eranian@hpl.hp.com>
0006  *  David Mosberger <davidm@hpl.hp.com>
0007  * Copyright (C) 2000, 2002-2003 Intel Co
0008  *  Asit Mallick <asit.k.mallick@intel.com>
0009  *      Suresh Siddha <suresh.b.siddha@intel.com>
0010  *      Kenneth Chen <kenneth.w.chen@intel.com>
0011  *      Fenghua Yu <fenghua.yu@intel.com>
0012  *
0013  * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
0014  * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
0015  *
0016  * Copyright (C) 2005 Hewlett-Packard Co
0017  *  Dan Magenheimer <dan.magenheimer@hp.com>
0018  *      Xen paravirtualization
0019  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
0020  *                    VA Linux Systems Japan K.K.
0021  *                    pv_ops.
0022  *      Yaozu (Eddie) Dong <eddie.dong@intel.com>
0023  */
0024 /*
0025  * This file defines the interruption vector table used by the CPU.
0026  * It does not include one entry per possible cause of interruption.
0027  *
0028  * The first 20 entries of the table contain 64 bundles each while the
0029  * remaining 48 entries contain only 16 bundles each.
0030  *
0031  * The 64 bundles are used to allow inlining the whole handler for critical
0032  * interruptions like TLB misses.
0033  *
0034  *  For each entry, the comment is as follows:
0035  *
0036  *      // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
0037  *  entry offset ----/     /         /                  /          /
0038  *  entry number ---------/         /                  /          /
0039  *  size of the entry -------------/                  /          /
0040  *  vector name -------------------------------------/          /
0041  *  interruptions triggering this vector ----------------------/
0042  *
0043  * The table is 32KB in size and must be aligned on 32KB boundary.
0044  * (The CPU ignores the 15 lower bits of the address)
0045  *
0046  * Table is based upon EAS2.6 (Oct 1999)
0047  */
0048 
0049 
0050 #include <asm/asmmacro.h>
0051 #include <asm/break.h>
0052 #include <asm/kregs.h>
0053 #include <asm/asm-offsets.h>
0054 #include <asm/pgtable.h>
0055 #include <asm/processor.h>
0056 #include <asm/ptrace.h>
0057 #include <asm/thread_info.h>
0058 #include <asm/unistd.h>
0059 #include <asm/errno.h>
0060 #include <asm/export.h>
0061 
0062 #if 0
0063 # define PSR_DEFAULT_BITS   psr.ac
0064 #else
0065 # define PSR_DEFAULT_BITS   0
0066 #endif
0067 
0068 #if 0
0069   /*
0070    * This lets you track the last eight faults that occurred on the CPU.  Make sure ar.k2 isn't
0071    * needed for something else before enabling this...
0072    */
0073 # define DBG_FAULT(i)   mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
0074 #else
0075 # define DBG_FAULT(i)
0076 #endif
0077 
0078 #include "minstate.h"
0079 
0080 #define FAULT(n)                                    \
0081     mov r31=pr;                                 \
0082     mov r19=n;;         /* prepare to save predicates */        \
0083     br.sptk.many dispatch_to_fault_handler
0084 
0085     .section .text..ivt,"ax"
0086 
0087     .align 32768    // align on 32KB boundary
0088     .global ia64_ivt
0089     EXPORT_DATA_SYMBOL(ia64_ivt)
0090 ia64_ivt:
0091 /////////////////////////////////////////////////////////////////////////////////////////
0092 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
0093 ENTRY(vhpt_miss)
0094     DBG_FAULT(0)
0095     /*
0096      * The VHPT vector is invoked when the TLB entry for the virtual page table
0097      * is missing.  This happens only as a result of a previous
0098      * (the "original") TLB miss, which may either be caused by an instruction
0099      * fetch or a data access (or non-access).
0100      *
0101      * What we do here is normal TLB miss handing for the _original_ miss,
0102      * followed by inserting the TLB entry for the virtual page table page
0103      * that the VHPT walker was attempting to access.  The latter gets
0104      * inserted as long as page table entry above pte level have valid
0105      * mappings for the faulting address.  The TLB entry for the original
0106      * miss gets inserted only if the pte entry indicates that the page is
0107      * present.
0108      *
0109      * do_page_fault gets invoked in the following cases:
0110      *  - the faulting virtual address uses unimplemented address bits
0111      *  - the faulting virtual address has no valid page table mapping
0112      */
0113     MOV_FROM_IFA(r16)           // get address that caused the TLB miss
0114 #ifdef CONFIG_HUGETLB_PAGE
0115     movl r18=PAGE_SHIFT
0116     MOV_FROM_ITIR(r25)
0117 #endif
0118     ;;
0119     RSM_PSR_DT              // use physical addressing for data
0120     mov r31=pr              // save the predicate registers
0121     mov r19=IA64_KR(PT_BASE)        // get page table base address
0122     shl r21=r16,3               // shift bit 60 into sign bit
0123     shr.u r17=r16,61            // get the region number into r17
0124     ;;
0125     shr.u r22=r21,3
0126 #ifdef CONFIG_HUGETLB_PAGE
0127     extr.u r26=r25,2,6
0128     ;;
0129     cmp.ne p8,p0=r18,r26
0130     sub r27=r26,r18
0131     ;;
0132 (p8)    dep r25=r18,r25,2,6
0133 (p8)    shr r22=r22,r27
0134 #endif
0135     ;;
0136     cmp.eq p6,p7=5,r17          // is IFA pointing into to region 5?
0137     shr.u r18=r22,PGDIR_SHIFT       // get bottom portion of pgd index bit
0138     ;;
0139 (p7)    dep r17=r17,r19,(PAGE_SHIFT-3),3    // put region number bits in place
0140 
0141     srlz.d
0142     LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at swapper_pg_dir
0143 
0144     .pred.rel "mutex", p6, p7
0145 (p6)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
0146 (p7)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
0147     ;;
0148 (p6)    dep r17=r18,r19,3,(PAGE_SHIFT-3)    // r17=pgd_offset for region 5
0149 (p7)    dep r17=r18,r17,3,(PAGE_SHIFT-6)    // r17=pgd_offset for region[0-4]
0150     cmp.eq p7,p6=0,r21          // unused address bits all zeroes?
0151 #if CONFIG_PGTABLE_LEVELS == 4
0152     shr.u r28=r22,PUD_SHIFT         // shift pud index into position
0153 #else
0154     shr.u r18=r22,PMD_SHIFT         // shift pmd index into position
0155 #endif
0156     ;;
0157     ld8 r17=[r17]               // get *pgd (may be 0)
0158     ;;
0159 (p7)    cmp.eq p6,p7=r17,r0         // was pgd_present(*pgd) == NULL?
0160 #if CONFIG_PGTABLE_LEVELS == 4
0161     dep r28=r28,r17,3,(PAGE_SHIFT-3)    // r28=pud_offset(pgd,addr)
0162     ;;
0163     shr.u r18=r22,PMD_SHIFT         // shift pmd index into position
0164 (p7)    ld8 r29=[r28]               // get *pud (may be 0)
0165     ;;
0166 (p7)    cmp.eq.or.andcm p6,p7=r29,r0        // was pud_present(*pud) == NULL?
0167     dep r17=r18,r29,3,(PAGE_SHIFT-3)    // r17=pmd_offset(pud,addr)
0168 #else
0169     dep r17=r18,r17,3,(PAGE_SHIFT-3)    // r17=pmd_offset(pgd,addr)
0170 #endif
0171     ;;
0172 (p7)    ld8 r20=[r17]               // get *pmd (may be 0)
0173     shr.u r19=r22,PAGE_SHIFT        // shift pte index into position
0174     ;;
0175 (p7)    cmp.eq.or.andcm p6,p7=r20,r0        // was pmd_present(*pmd) == NULL?
0176     dep r21=r19,r20,3,(PAGE_SHIFT-3)    // r21=pte_offset(pmd,addr)
0177     ;;
0178 (p7)    ld8 r18=[r21]               // read *pte
0179     MOV_FROM_ISR(r19)           // cr.isr bit 32 tells us if this is an insn miss
0180     ;;
0181 (p7)    tbit.z p6,p7=r18,_PAGE_P_BIT        // page present bit cleared?
0182     MOV_FROM_IHA(r22)           // get the VHPT address that caused the TLB miss
0183     ;;                  // avoid RAW on p7
0184 (p7)    tbit.nz.unc p10,p11=r19,32      // is it an instruction TLB miss?
0185     dep r23=0,r20,0,PAGE_SHIFT      // clear low bits to get page address
0186     ;;
0187     ITC_I_AND_D(p10, p11, r18, r24)     // insert the instruction TLB entry and
0188                         // insert the data TLB entry
0189 (p6)    br.cond.spnt.many page_fault        // handle bad address/page not present (page fault)
0190     MOV_TO_IFA(r22, r24)
0191 
0192 #ifdef CONFIG_HUGETLB_PAGE
0193     MOV_TO_ITIR(p8, r25, r24)       // change to default page-size for VHPT
0194 #endif
0195 
0196     /*
0197      * Now compute and insert the TLB entry for the virtual page table.  We never
0198      * execute in a page table page so there is no need to set the exception deferral
0199      * bit.
0200      */
0201     adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
0202     ;;
0203     ITC_D(p7, r24, r25)
0204     ;;
0205 #ifdef CONFIG_SMP
0206     /*
0207      * Tell the assemblers dependency-violation checker that the above "itc" instructions
0208      * cannot possibly affect the following loads:
0209      */
0210     dv_serialize_data
0211 
0212     /*
0213      * Re-check pagetable entry.  If they changed, we may have received a ptc.g
0214      * between reading the pagetable and the "itc".  If so, flush the entry we
0215      * inserted and retry.  At this point, we have:
0216      *
0217      * r28 = equivalent of pud_offset(pgd, ifa)
0218      * r17 = equivalent of pmd_offset(pud, ifa)
0219      * r21 = equivalent of pte_offset(pmd, ifa)
0220      *
0221      * r29 = *pud
0222      * r20 = *pmd
0223      * r18 = *pte
0224      */
0225     ld8 r25=[r21]               // read *pte again
0226     ld8 r26=[r17]               // read *pmd again
0227 #if CONFIG_PGTABLE_LEVELS == 4
0228     ld8 r19=[r28]               // read *pud again
0229 #endif
0230     cmp.ne p6,p7=r0,r0
0231     ;;
0232     cmp.ne.or.andcm p6,p7=r26,r20       // did *pmd change
0233 #if CONFIG_PGTABLE_LEVELS == 4
0234     cmp.ne.or.andcm p6,p7=r19,r29       // did *pud change
0235 #endif
0236     mov r27=PAGE_SHIFT<<2
0237     ;;
0238 (p6)    ptc.l r22,r27               // purge PTE page translation
0239 (p7)    cmp.ne.or.andcm p6,p7=r25,r18       // did *pte change
0240     ;;
0241 (p6)    ptc.l r16,r27               // purge translation
0242 #endif
0243 
0244     mov pr=r31,-1               // restore predicate registers
0245     RFI
0246 END(vhpt_miss)
0247 
0248     .org ia64_ivt+0x400
0249 /////////////////////////////////////////////////////////////////////////////////////////
0250 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
0251 ENTRY(itlb_miss)
0252     DBG_FAULT(1)
0253     /*
0254      * The ITLB handler accesses the PTE via the virtually mapped linear
0255      * page table.  If a nested TLB miss occurs, we switch into physical
0256      * mode, walk the page table, and then re-execute the PTE read and
0257      * go on normally after that.
0258      */
0259     MOV_FROM_IFA(r16)           // get virtual address
0260     mov r29=b0              // save b0
0261     mov r31=pr              // save predicates
0262 .itlb_fault:
0263     MOV_FROM_IHA(r17)           // get virtual address of PTE
0264     movl r30=1f             // load nested fault continuation point
0265     ;;
0266 1:  ld8 r18=[r17]               // read *pte
0267     ;;
0268     mov b0=r29
0269     tbit.z p6,p0=r18,_PAGE_P_BIT        // page present bit cleared?
0270 (p6)    br.cond.spnt page_fault
0271     ;;
0272     ITC_I(p0, r18, r19)
0273     ;;
0274 #ifdef CONFIG_SMP
0275     /*
0276      * Tell the assemblers dependency-violation checker that the above "itc" instructions
0277      * cannot possibly affect the following loads:
0278      */
0279     dv_serialize_data
0280 
0281     ld8 r19=[r17]               // read *pte again and see if same
0282     mov r20=PAGE_SHIFT<<2           // setup page size for purge
0283     ;;
0284     cmp.ne p7,p0=r18,r19
0285     ;;
0286 (p7)    ptc.l r16,r20
0287 #endif
0288     mov pr=r31,-1
0289     RFI
0290 END(itlb_miss)
0291 
0292     .org ia64_ivt+0x0800
0293 /////////////////////////////////////////////////////////////////////////////////////////
0294 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
0295 ENTRY(dtlb_miss)
0296     DBG_FAULT(2)
0297     /*
0298      * The DTLB handler accesses the PTE via the virtually mapped linear
0299      * page table.  If a nested TLB miss occurs, we switch into physical
0300      * mode, walk the page table, and then re-execute the PTE read and
0301      * go on normally after that.
0302      */
0303     MOV_FROM_IFA(r16)           // get virtual address
0304     mov r29=b0              // save b0
0305     mov r31=pr              // save predicates
0306 dtlb_fault:
0307     MOV_FROM_IHA(r17)           // get virtual address of PTE
0308     movl r30=1f             // load nested fault continuation point
0309     ;;
0310 1:  ld8 r18=[r17]               // read *pte
0311     ;;
0312     mov b0=r29
0313     tbit.z p6,p0=r18,_PAGE_P_BIT        // page present bit cleared?
0314 (p6)    br.cond.spnt page_fault
0315     ;;
0316     ITC_D(p0, r18, r19)
0317     ;;
0318 #ifdef CONFIG_SMP
0319     /*
0320      * Tell the assemblers dependency-violation checker that the above "itc" instructions
0321      * cannot possibly affect the following loads:
0322      */
0323     dv_serialize_data
0324 
0325     ld8 r19=[r17]               // read *pte again and see if same
0326     mov r20=PAGE_SHIFT<<2           // setup page size for purge
0327     ;;
0328     cmp.ne p7,p0=r18,r19
0329     ;;
0330 (p7)    ptc.l r16,r20
0331 #endif
0332     mov pr=r31,-1
0333     RFI
0334 END(dtlb_miss)
0335 
0336     .org ia64_ivt+0x0c00
0337 /////////////////////////////////////////////////////////////////////////////////////////
0338 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
0339 ENTRY(alt_itlb_miss)
0340     DBG_FAULT(3)
0341     MOV_FROM_IFA(r16)   // get address that caused the TLB miss
0342     movl r17=PAGE_KERNEL
0343     MOV_FROM_IPSR(p0, r21)
0344     movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
0345     mov r31=pr
0346     ;;
0347 #ifdef CONFIG_DISABLE_VHPT
0348     shr.u r22=r16,61            // get the region number into r21
0349     ;;
0350     cmp.gt p8,p0=6,r22          // user mode
0351     ;;
0352     THASH(p8, r17, r16, r23)
0353     ;;
0354     MOV_TO_IHA(p8, r17, r23)
0355 (p8)    mov r29=b0              // save b0
0356 (p8)    br.cond.dptk .itlb_fault
0357 #endif
0358     extr.u r23=r21,IA64_PSR_CPL0_BIT,2  // extract psr.cpl
0359     and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
0360     shr.u r18=r16,57    // move address bit 61 to bit 4
0361     ;;
0362     andcm r18=0x10,r18  // bit 4=~address-bit(61)
0363     cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
0364     or r19=r17,r19      // insert PTE control bits into r19
0365     ;;
0366     or r19=r19,r18      // set bit 4 (uncached) if the access was to region 6
0367 (p8)    br.cond.spnt page_fault
0368     ;;
0369     ITC_I(p0, r19, r18) // insert the TLB entry
0370     mov pr=r31,-1
0371     RFI
0372 END(alt_itlb_miss)
0373 
0374     .org ia64_ivt+0x1000
0375 /////////////////////////////////////////////////////////////////////////////////////////
0376 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
0377 ENTRY(alt_dtlb_miss)
0378     DBG_FAULT(4)
0379     MOV_FROM_IFA(r16)   // get address that caused the TLB miss
0380     movl r17=PAGE_KERNEL
0381     MOV_FROM_ISR(r20)
0382     movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
0383     MOV_FROM_IPSR(p0, r21)
0384     mov r31=pr
0385     mov r24=PERCPU_ADDR
0386     ;;
0387 #ifdef CONFIG_DISABLE_VHPT
0388     shr.u r22=r16,61            // get the region number into r21
0389     ;;
0390     cmp.gt p8,p0=6,r22          // access to region 0-5
0391     ;;
0392     THASH(p8, r17, r16, r25)
0393     ;;
0394     MOV_TO_IHA(p8, r17, r25)
0395 (p8)    mov r29=b0              // save b0
0396 (p8)    br.cond.dptk dtlb_fault
0397 #endif
0398     cmp.ge p10,p11=r16,r24          // access to per_cpu_data?
0399     tbit.z p12,p0=r16,61            // access to region 6?
0400     mov r25=PERCPU_PAGE_SHIFT << 2
0401     mov r26=PERCPU_PAGE_SIZE
0402     nop.m 0
0403     nop.b 0
0404     ;;
0405 (p10)   mov r19=IA64_KR(PER_CPU_DATA)
0406 (p11)   and r19=r19,r16             // clear non-ppn fields
0407     extr.u r23=r21,IA64_PSR_CPL0_BIT,2  // extract psr.cpl
0408     and r22=IA64_ISR_CODE_MASK,r20      // get the isr.code field
0409     tbit.nz p6,p7=r20,IA64_ISR_SP_BIT   // is speculation bit on?
0410     tbit.nz p9,p0=r20,IA64_ISR_NA_BIT   // is non-access bit on?
0411     ;;
0412 (p10)   sub r19=r19,r26
0413     MOV_TO_ITIR(p10, r25, r24)
0414     cmp.ne p8,p0=r0,r23
0415 (p9)    cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22  // check isr.code field
0416 (p12)   dep r17=-1,r17,4,1          // set ma=UC for region 6 addr
0417 (p8)    br.cond.spnt page_fault
0418 
0419     dep r21=-1,r21,IA64_PSR_ED_BIT,1
0420     ;;
0421     or r19=r19,r17      // insert PTE control bits into r19
0422     MOV_TO_IPSR(p6, r21, r24)
0423     ;;
0424     ITC_D(p7, r19, r18) // insert the TLB entry
0425     mov pr=r31,-1
0426     RFI
0427 END(alt_dtlb_miss)
0428 
0429     .org ia64_ivt+0x1400
0430 /////////////////////////////////////////////////////////////////////////////////////////
0431 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
0432 ENTRY(nested_dtlb_miss)
0433     /*
0434      * In the absence of kernel bugs, we get here when the virtually mapped linear
0435      * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
0436      * Access-bit, or Data Access-bit faults).  If the DTLB entry for the virtual page
0437      * table is missing, a nested TLB miss fault is triggered and control is
0438      * transferred to this point.  When this happens, we lookup the pte for the
0439      * faulting address by walking the page table in physical mode and return to the
0440      * continuation point passed in register r30 (or call page_fault if the address is
0441      * not mapped).
0442      *
0443      * Input:   r16:    faulting address
0444      *      r29:    saved b0
0445      *      r30:    continuation address
0446      *      r31:    saved pr
0447      *
0448      * Output:  r17:    physical address of PTE of faulting address
0449      *      r29:    saved b0
0450      *      r30:    continuation address
0451      *      r31:    saved pr
0452      *
0453      * Clobbered:   b0, r18, r19, r21, r22, psr.dt (cleared)
0454      */
0455     RSM_PSR_DT              // switch to using physical data addressing
0456     mov r19=IA64_KR(PT_BASE)        // get the page table base address
0457     shl r21=r16,3               // shift bit 60 into sign bit
0458     MOV_FROM_ITIR(r18)
0459     ;;
0460     shr.u r17=r16,61            // get the region number into r17
0461     extr.u r18=r18,2,6          // get the faulting page size
0462     ;;
0463     cmp.eq p6,p7=5,r17          // is faulting address in region 5?
0464     add r22=-PAGE_SHIFT,r18         // adjustment for hugetlb address
0465     add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
0466     ;;
0467     shr.u r22=r16,r22
0468     shr.u r18=r16,r18
0469 (p7)    dep r17=r17,r19,(PAGE_SHIFT-3),3    // put region number bits in place
0470 
0471     srlz.d
0472     LOAD_PHYSICAL(p6, r19, swapper_pg_dir)  // region 5 is rooted at swapper_pg_dir
0473 
0474     .pred.rel "mutex", p6, p7
0475 (p6)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
0476 (p7)    shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
0477     ;;
0478 (p6)    dep r17=r18,r19,3,(PAGE_SHIFT-3)    // r17=pgd_offset for region 5
0479 (p7)    dep r17=r18,r17,3,(PAGE_SHIFT-6)    // r17=pgd_offset for region[0-4]
0480     cmp.eq p7,p6=0,r21          // unused address bits all zeroes?
0481 #if CONFIG_PGTABLE_LEVELS == 4
0482     shr.u r18=r22,PUD_SHIFT         // shift pud index into position
0483 #else
0484     shr.u r18=r22,PMD_SHIFT         // shift pmd index into position
0485 #endif
0486     ;;
0487     ld8 r17=[r17]               // get *pgd (may be 0)
0488     ;;
0489 (p7)    cmp.eq p6,p7=r17,r0         // was pgd_present(*pgd) == NULL?
0490     dep r17=r18,r17,3,(PAGE_SHIFT-3)    // r17=p[u|m]d_offset(pgd,addr)
0491     ;;
0492 #if CONFIG_PGTABLE_LEVELS == 4
0493 (p7)    ld8 r17=[r17]               // get *pud (may be 0)
0494     shr.u r18=r22,PMD_SHIFT         // shift pmd index into position
0495     ;;
0496 (p7)    cmp.eq.or.andcm p6,p7=r17,r0        // was pud_present(*pud) == NULL?
0497     dep r17=r18,r17,3,(PAGE_SHIFT-3)    // r17=pmd_offset(pud,addr)
0498     ;;
0499 #endif
0500 (p7)    ld8 r17=[r17]               // get *pmd (may be 0)
0501     shr.u r19=r22,PAGE_SHIFT        // shift pte index into position
0502     ;;
0503 (p7)    cmp.eq.or.andcm p6,p7=r17,r0        // was pmd_present(*pmd) == NULL?
0504     dep r17=r19,r17,3,(PAGE_SHIFT-3)    // r17=pte_offset(pmd,addr);
0505 (p6)    br.cond.spnt page_fault
0506     mov b0=r30
0507     br.sptk.many b0             // return to continuation point
0508 END(nested_dtlb_miss)
0509 
0510     .org ia64_ivt+0x1800
0511 /////////////////////////////////////////////////////////////////////////////////////////
0512 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
0513 ENTRY(ikey_miss)
0514     DBG_FAULT(6)
0515     FAULT(6)
0516 END(ikey_miss)
0517 
0518     .org ia64_ivt+0x1c00
0519 /////////////////////////////////////////////////////////////////////////////////////////
0520 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
0521 ENTRY(dkey_miss)
0522     DBG_FAULT(7)
0523     FAULT(7)
0524 END(dkey_miss)
0525 
0526     .org ia64_ivt+0x2000
0527 /////////////////////////////////////////////////////////////////////////////////////////
0528 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
0529 ENTRY(dirty_bit)
0530     DBG_FAULT(8)
0531     /*
0532      * What we do here is to simply turn on the dirty bit in the PTE.  We need to
0533      * update both the page-table and the TLB entry.  To efficiently access the PTE,
0534      * we address it through the virtual page table.  Most likely, the TLB entry for
0535      * the relevant virtual page table page is still present in the TLB so we can
0536      * normally do this without additional TLB misses.  In case the necessary virtual
0537      * page table TLB entry isn't present, we take a nested TLB miss hit where we look
0538      * up the physical address of the L3 PTE and then continue at label 1 below.
0539      */
0540     MOV_FROM_IFA(r16)           // get the address that caused the fault
0541     movl r30=1f             // load continuation point in case of nested fault
0542     ;;
0543     THASH(p0, r17, r16, r18)        // compute virtual address of L3 PTE
0544     mov r29=b0              // save b0 in case of nested fault
0545     mov r31=pr              // save pr
0546 #ifdef CONFIG_SMP
0547     mov r28=ar.ccv              // save ar.ccv
0548     ;;
0549 1:  ld8 r18=[r17]
0550     ;;                  // avoid RAW on r18
0551     mov ar.ccv=r18              // set compare value for cmpxchg
0552     or r25=_PAGE_D|_PAGE_A,r18      // set the dirty and accessed bits
0553     tbit.z p7,p6 = r18,_PAGE_P_BIT      // Check present bit
0554     ;;
0555 (p6)    cmpxchg8.acq r26=[r17],r25,ar.ccv   // Only update if page is present
0556     mov r24=PAGE_SHIFT<<2
0557     ;;
0558 (p6)    cmp.eq p6,p7=r26,r18            // Only compare if page is present
0559     ;;
0560     ITC_D(p6, r25, r18)         // install updated PTE
0561     ;;
0562     /*
0563      * Tell the assemblers dependency-violation checker that the above "itc" instructions
0564      * cannot possibly affect the following loads:
0565      */
0566     dv_serialize_data
0567 
0568     ld8 r18=[r17]               // read PTE again
0569     ;;
0570     cmp.eq p6,p7=r18,r25            // is it same as the newly installed
0571     ;;
0572 (p7)    ptc.l r16,r24
0573     mov b0=r29              // restore b0
0574     mov ar.ccv=r28
0575 #else
0576     ;;
0577 1:  ld8 r18=[r17]
0578     ;;                  // avoid RAW on r18
0579     or r18=_PAGE_D|_PAGE_A,r18      // set the dirty and accessed bits
0580     mov b0=r29              // restore b0
0581     ;;
0582     st8 [r17]=r18               // store back updated PTE
0583     ITC_D(p0, r18, r16)         // install updated PTE
0584 #endif
0585     mov pr=r31,-1               // restore pr
0586     RFI
0587 END(dirty_bit)
0588 
0589     .org ia64_ivt+0x2400
0590 /////////////////////////////////////////////////////////////////////////////////////////
0591 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
0592 ENTRY(iaccess_bit)
0593     DBG_FAULT(9)
0594     // Like Entry 8, except for instruction access
0595     MOV_FROM_IFA(r16)           // get the address that caused the fault
0596     movl r30=1f             // load continuation point in case of nested fault
0597     mov r31=pr              // save predicates
0598 #ifdef CONFIG_ITANIUM
0599     /*
0600      * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
0601      */
0602     MOV_FROM_IPSR(p0, r17)
0603     ;;
0604     MOV_FROM_IIP(r18)
0605     tbit.z p6,p0=r17,IA64_PSR_IS_BIT    // IA64 instruction set?
0606     ;;
0607 (p6)    mov r16=r18             // if so, use cr.iip instead of cr.ifa
0608 #endif /* CONFIG_ITANIUM */
0609     ;;
0610     THASH(p0, r17, r16, r18)        // compute virtual address of L3 PTE
0611     mov r29=b0              // save b0 in case of nested fault)
0612 #ifdef CONFIG_SMP
0613     mov r28=ar.ccv              // save ar.ccv
0614     ;;
0615 1:  ld8 r18=[r17]
0616     ;;
0617     mov ar.ccv=r18              // set compare value for cmpxchg
0618     or r25=_PAGE_A,r18          // set the accessed bit
0619     tbit.z p7,p6 = r18,_PAGE_P_BIT      // Check present bit
0620     ;;
0621 (p6)    cmpxchg8.acq r26=[r17],r25,ar.ccv   // Only if page present
0622     mov r24=PAGE_SHIFT<<2
0623     ;;
0624 (p6)    cmp.eq p6,p7=r26,r18            // Only if page present
0625     ;;
0626     ITC_I(p6, r25, r26)         // install updated PTE
0627     ;;
0628     /*
0629      * Tell the assemblers dependency-violation checker that the above "itc" instructions
0630      * cannot possibly affect the following loads:
0631      */
0632     dv_serialize_data
0633 
0634     ld8 r18=[r17]               // read PTE again
0635     ;;
0636     cmp.eq p6,p7=r18,r25            // is it same as the newly installed
0637     ;;
0638 (p7)    ptc.l r16,r24
0639     mov b0=r29              // restore b0
0640     mov ar.ccv=r28
0641 #else /* !CONFIG_SMP */
0642     ;;
0643 1:  ld8 r18=[r17]
0644     ;;
0645     or r18=_PAGE_A,r18          // set the accessed bit
0646     mov b0=r29              // restore b0
0647     ;;
0648     st8 [r17]=r18               // store back updated PTE
0649     ITC_I(p0, r18, r16)         // install updated PTE
0650 #endif /* !CONFIG_SMP */
0651     mov pr=r31,-1
0652     RFI
0653 END(iaccess_bit)
0654 
0655     .org ia64_ivt+0x2800
0656 /////////////////////////////////////////////////////////////////////////////////////////
0657 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
0658 ENTRY(daccess_bit)
0659     DBG_FAULT(10)
0660     // Like Entry 8, except for data access
0661     MOV_FROM_IFA(r16)           // get the address that caused the fault
0662     movl r30=1f             // load continuation point in case of nested fault
0663     ;;
0664     THASH(p0, r17, r16, r18)        // compute virtual address of L3 PTE
0665     mov r31=pr
0666     mov r29=b0              // save b0 in case of nested fault)
0667 #ifdef CONFIG_SMP
0668     mov r28=ar.ccv              // save ar.ccv
0669     ;;
0670 1:  ld8 r18=[r17]
0671     ;;                  // avoid RAW on r18
0672     mov ar.ccv=r18              // set compare value for cmpxchg
0673     or r25=_PAGE_A,r18          // set the dirty bit
0674     tbit.z p7,p6 = r18,_PAGE_P_BIT      // Check present bit
0675     ;;
0676 (p6)    cmpxchg8.acq r26=[r17],r25,ar.ccv   // Only if page is present
0677     mov r24=PAGE_SHIFT<<2
0678     ;;
0679 (p6)    cmp.eq p6,p7=r26,r18            // Only if page is present
0680     ;;
0681     ITC_D(p6, r25, r26)         // install updated PTE
0682     /*
0683      * Tell the assemblers dependency-violation checker that the above "itc" instructions
0684      * cannot possibly affect the following loads:
0685      */
0686     dv_serialize_data
0687     ;;
0688     ld8 r18=[r17]               // read PTE again
0689     ;;
0690     cmp.eq p6,p7=r18,r25            // is it same as the newly installed
0691     ;;
0692 (p7)    ptc.l r16,r24
0693     mov ar.ccv=r28
0694 #else
0695     ;;
0696 1:  ld8 r18=[r17]
0697     ;;                  // avoid RAW on r18
0698     or r18=_PAGE_A,r18          // set the accessed bit
0699     ;;
0700     st8 [r17]=r18               // store back updated PTE
0701     ITC_D(p0, r18, r16)         // install updated PTE
0702 #endif
0703     mov b0=r29              // restore b0
0704     mov pr=r31,-1
0705     RFI
0706 END(daccess_bit)
0707 
0708     .org ia64_ivt+0x2c00
0709 /////////////////////////////////////////////////////////////////////////////////////////
0710 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
0711 ENTRY(break_fault)
0712     /*
0713      * The streamlined system call entry/exit paths only save/restore the initial part
0714      * of pt_regs.  This implies that the callers of system-calls must adhere to the
0715      * normal procedure calling conventions.
0716      *
0717      *   Registers to be saved & restored:
0718      *  CR registers: cr.ipsr, cr.iip, cr.ifs
0719      *  AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
0720      *  others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
0721      *   Registers to be restored only:
0722      *  r8-r11: output value from the system call.
0723      *
0724      * During system call exit, scratch registers (including r15) are modified/cleared
0725      * to prevent leaking bits from kernel to user level.
0726      */
0727     DBG_FAULT(11)
0728     mov.m r16=IA64_KR(CURRENT)      // M2 r16 <- current task (12 cyc)
0729     MOV_FROM_IPSR(p0, r29)          // M2 (12 cyc)
0730     mov r31=pr              // I0 (2 cyc)
0731 
0732     MOV_FROM_IIM(r17)           // M2 (2 cyc)
0733     mov.m r27=ar.rsc            // M2 (12 cyc)
0734     mov r18=__IA64_BREAK_SYSCALL        // A
0735 
0736     mov.m ar.rsc=0              // M2
0737     mov.m r21=ar.fpsr           // M2 (12 cyc)
0738     mov r19=b6              // I0 (2 cyc)
0739     ;;
0740     mov.m r23=ar.bspstore           // M2 (12 cyc)
0741     mov.m r24=ar.rnat           // M2 (5 cyc)
0742     mov.i r26=ar.pfs            // I0 (2 cyc)
0743 
0744     invala                  // M0|1
0745     nop.m 0                 // M
0746     mov r20=r1              // A            save r1
0747 
0748     nop.m 0
0749     movl r30=sys_call_table         // X
0750 
0751     MOV_FROM_IIP(r28)           // M2 (2 cyc)
0752     cmp.eq p0,p7=r18,r17            // I0 is this a system call?
0753 (p7)    br.cond.spnt non_syscall        // B  no ->
0754     //
0755     // From this point on, we are definitely on the syscall-path
0756     // and we can use (non-banked) scratch registers.
0757     //
0758 ///////////////////////////////////////////////////////////////////////
0759     mov r1=r16              // A    move task-pointer to "addl"-addressable reg
0760     mov r2=r16              // A    setup r2 for ia64_syscall_setup
0761     add r9=TI_FLAGS+IA64_TASK_SIZE,r16  // A    r9 = &current_thread_info()->flags
0762 
0763     adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
0764     adds r15=-1024,r15          // A    subtract 1024 from syscall number
0765     mov r3=NR_syscalls - 1
0766     ;;
0767     ld1.bias r17=[r16]          // M0|1 r17 = current->thread.on_ustack flag
0768     ld4 r9=[r9]             // M0|1 r9 = current_thread_info()->flags
0769     extr.u r8=r29,41,2          // I0   extract ei field from cr.ipsr
0770 
0771     shladd r30=r15,3,r30            // A    r30 = sys_call_table + 8*(syscall-1024)
0772     addl r22=IA64_RBS_OFFSET,r1     // A    compute base of RBS
0773     cmp.leu p6,p7=r15,r3            // A    syscall number in range?
0774     ;;
0775 
0776     lfetch.fault.excl.nt1 [r22]     // M0|1 prefetch RBS
0777 (p6)    ld8 r30=[r30]               // M0|1 load address of syscall entry point
0778     tnat.nz.or p7,p0=r15            // I0   is syscall nr a NaT?
0779 
0780     mov.m ar.bspstore=r22           // M2   switch to kernel RBS
0781     cmp.eq p8,p9=2,r8           // A    isr.ei==2?
0782     ;;
0783 
0784 (p8)    mov r8=0                // A    clear ei to 0
0785 (p7)    movl r30=sys_ni_syscall         // X
0786 
0787 (p8)    adds r28=16,r28             // A    switch cr.iip to next bundle
0788 (p9)    adds r8=1,r8                // A    increment ei to next slot
0789 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0790     ;;
0791     mov b6=r30              // I0   setup syscall handler branch reg early
0792 #else
0793     nop.i 0
0794     ;;
0795 #endif
0796 
0797     mov.m r25=ar.unat           // M2 (5 cyc)
0798     dep r29=r8,r29,41,2         // I0   insert new ei into cr.ipsr
0799     adds r15=1024,r15           // A    restore original syscall number
0800     //
0801     // If any of the above loads miss in L1D, we'll stall here until
0802     // the data arrives.
0803     //
0804 ///////////////////////////////////////////////////////////////////////
0805     st1 [r16]=r0                // M2|3 clear current->thread.on_ustack flag
0806 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0807     MOV_FROM_ITC(p0, p14, r30, r18)     // M    get cycle for accounting
0808 #else
0809     mov b6=r30              // I0   setup syscall handler branch reg early
0810 #endif
0811     cmp.eq pKStk,pUStk=r0,r17       // A    were we on kernel stacks already?
0812 
0813     and r9=_TIF_SYSCALL_TRACEAUDIT,r9   // A    mask trace or audit
0814     mov r18=ar.bsp              // M2 (12 cyc)
0815 (pKStk) br.cond.spnt .break_fixup       // B    we're already in kernel-mode -- fix up RBS
0816     ;;
0817 .back_from_break_fixup:
0818 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A    compute base of memory stack
0819     cmp.eq p14,p0=r9,r0         // A    are syscalls being traced/audited?
0820     br.call.sptk.many b7=ia64_syscall_setup // B
0821 1:
0822 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
0823     // mov.m r30=ar.itc is called in advance, and r13 is current
0824     add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13  // A
0825     add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13  // A
0826 (pKStk) br.cond.spnt .skip_accounting       // B    unlikely skip
0827     ;;
0828     ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP   // M  get last stamp
0829     ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE   // M  time at leave
0830     ;;
0831     ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME   // M  cumulated stime
0832     ld8 r21=[r17]               // M  cumulated utime
0833     sub r22=r19,r18             // A  stime before leave
0834     ;;
0835     st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP   // M  update stamp
0836     sub r18=r30,r19             // A  elapsed time in user
0837     ;;
0838     add r20=r20,r22             // A  sum stime
0839     add r21=r21,r18             // A  sum utime
0840     ;;
0841     st8 [r16]=r20               // M  update stime
0842     st8 [r17]=r21               // M  update utime
0843     ;;
0844 .skip_accounting:
0845 #endif
0846     mov ar.rsc=0x3              // M2   set eager mode, pl 0, LE, loadrs=0
0847     nop 0
0848     BSW_1(r2, r14)              // B (6 cyc) regs are saved, switch to bank 1
0849     ;;
0850 
0851     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2   now it's safe to re-enable intr.-collection
0852                         // M0   ensure interruption collection is on
0853     movl r3=ia64_ret_from_syscall       // X
0854     ;;
0855     mov rp=r3               // I0   set the real return addr
0856 (p10)   br.cond.spnt.many ia64_ret_from_syscall // B    return if bad call-frame or r15 is a NaT
0857 
0858     SSM_PSR_I(p15, p15, r16)        // M2   restore psr.i
0859 (p14)   br.call.sptk.many b6=b6         // B    invoke syscall-handker (ignore return addr)
0860     br.cond.spnt.many ia64_trace_syscall    // B    do syscall-tracing thingamagic
0861     // NOT REACHED
0862 ///////////////////////////////////////////////////////////////////////
0863     // On entry, we optimistically assumed that we're coming from user-space.
0864     // For the rare cases where a system-call is done from within the kernel,
0865     // we fix things up at this point:
0866 .break_fixup:
0867     add r1=-IA64_PT_REGS_SIZE,sp        // A    allocate space for pt_regs structure
0868     mov ar.rnat=r24             // M2   restore kernel's AR.RNAT
0869     ;;
0870     mov ar.bspstore=r23         // M2   restore kernel's AR.BSPSTORE
0871     br.cond.sptk .back_from_break_fixup
0872 END(break_fault)
0873 
0874     .org ia64_ivt+0x3000
0875 /////////////////////////////////////////////////////////////////////////////////////////
0876 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
0877 ENTRY(interrupt)
0878     /* interrupt handler has become too big to fit this area. */
0879     br.sptk.many __interrupt
0880 END(interrupt)
0881 
0882     .org ia64_ivt+0x3400
0883 /////////////////////////////////////////////////////////////////////////////////////////
0884 // 0x3400 Entry 13 (size 64 bundles) Reserved
0885     DBG_FAULT(13)
0886     FAULT(13)
0887 
0888     .org ia64_ivt+0x3800
0889 /////////////////////////////////////////////////////////////////////////////////////////
0890 // 0x3800 Entry 14 (size 64 bundles) Reserved
0891     DBG_FAULT(14)
0892     FAULT(14)
0893 
0894     /*
0895      * There is no particular reason for this code to be here, other than that
0896      * there happens to be space here that would go unused otherwise.  If this
0897      * fault ever gets "unreserved", simply moved the following code to a more
0898      * suitable spot...
0899      *
0900      * ia64_syscall_setup() is a separate subroutine so that it can
0901      *  allocate stacked registers so it can safely demine any
0902      *  potential NaT values from the input registers.
0903      *
0904      * On entry:
0905      *  - executing on bank 0 or bank 1 register set (doesn't matter)
0906      *  -  r1: stack pointer
0907      *  -  r2: current task pointer
0908      *  -  r3: preserved
0909      *  - r11: original contents (saved ar.pfs to be saved)
0910      *  - r12: original contents (sp to be saved)
0911      *  - r13: original contents (tp to be saved)
0912      *  - r15: original contents (syscall # to be saved)
0913      *  - r18: saved bsp (after switching to kernel stack)
0914      *  - r19: saved b6
0915      *  - r20: saved r1 (gp)
0916      *  - r21: saved ar.fpsr
0917      *  - r22: kernel's register backing store base (krbs_base)
0918      *  - r23: saved ar.bspstore
0919      *  - r24: saved ar.rnat
0920      *  - r25: saved ar.unat
0921      *  - r26: saved ar.pfs
0922      *  - r27: saved ar.rsc
0923      *  - r28: saved cr.iip
0924      *  - r29: saved cr.ipsr
0925      *  - r30: ar.itc for accounting (don't touch)
0926      *  - r31: saved pr
0927      *  -  b0: original contents (to be saved)
0928      * On exit:
0929      *  -  p10: TRUE if syscall is invoked with more than 8 out
0930      *      registers or r15's Nat is true
0931      *  -  r1: kernel's gp
0932      *  -  r3: preserved (same as on entry)
0933      *  -  r8: -EINVAL if p10 is true
0934      *  - r12: points to kernel stack
0935      *  - r13: points to current task
0936      *  - r14: preserved (same as on entry)
0937      *  - p13: preserved
0938      *  - p15: TRUE if interrupts need to be re-enabled
0939      *  - ar.fpsr: set to kernel settings
0940      *  -  b6: preserved (same as on entry)
0941      */
0942 GLOBAL_ENTRY(ia64_syscall_setup)
0943 #if PT(B6) != 0
0944 # error This code assumes that b6 is the first field in pt_regs.
0945 #endif
0946     st8 [r1]=r19                // save b6
0947     add r16=PT(CR_IPSR),r1          // initialize first base pointer
0948     add r17=PT(R11),r1          // initialize second base pointer
0949     ;;
0950     alloc r19=ar.pfs,8,0,0,0        // ensure in0-in7 are writable
0951     st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR)    // save cr.ipsr
0952     tnat.nz p8,p0=in0
0953 
0954     st8.spill [r17]=r11,PT(CR_IIP)-PT(R11)  // save r11
0955     tnat.nz p9,p0=in1
0956 (pKStk) mov r18=r0              // make sure r18 isn't NaT
0957     ;;
0958 
0959     st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
0960     st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP)    // save cr.iip
0961     mov r28=b0              // save b0 (2 cyc)
0962     ;;
0963 
0964     st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT)    // save ar.unat
0965     dep r19=0,r19,38,26         // clear all bits but 0..37 [I0]
0966 (p8)    mov in0=-1
0967     ;;
0968 
0969     st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS)    // store ar.pfs.pfm in cr.ifs
0970     extr.u r11=r19,7,7  // I0       // get sol of ar.pfs
0971     and r8=0x7f,r19     // A        // get sof of ar.pfs
0972 
0973     st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
0974     tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
0975 (p9)    mov in1=-1
0976     ;;
0977 
0978 (pUStk) sub r18=r18,r22             // r18=RSE.ndirty*8
0979     tnat.nz p10,p0=in2
0980     add r11=8,r11
0981     ;;
0982 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16     // skip over ar_rnat field
0983 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
0984     tnat.nz p11,p0=in3
0985     ;;
0986 (p10)   mov in2=-1
0987     tnat.nz p12,p0=in4              // [I0]
0988 (p11)   mov in3=-1
0989     ;;
0990 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT)    // save ar.rnat
0991 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE)    // save ar.bspstore
0992     shl r18=r18,16              // compute ar.rsc to be used for "loadrs"
0993     ;;
0994     st8 [r16]=r31,PT(LOADRS)-PT(PR)     // save predicates
0995     st8 [r17]=r28,PT(R1)-PT(B0)     // save b0
0996     tnat.nz p13,p0=in5              // [I0]
0997     ;;
0998     st8 [r16]=r18,PT(R12)-PT(LOADRS)    // save ar.rsc value for "loadrs"
0999     st8.spill [r17]=r20,PT(R13)-PT(R1)  // save original r1
1000 (p12)   mov in4=-1
1001     ;;
1002 
1003 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12)    // save r12
1004 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13)        // save r13
1005 (p13)   mov in5=-1
1006     ;;
1007     st8 [r16]=r21,PT(R8)-PT(AR_FPSR)    // save ar.fpsr
1008     tnat.nz p13,p0=in6
1009     cmp.lt p10,p9=r11,r8    // frame size can't be more than local+8
1010     ;;
1011     mov r8=1
1012 (p9)    tnat.nz p10,p0=r15
1013     adds r12=-16,r1     // switch to kernel memory stack (with 16 bytes of scratch)
1014 
1015     st8.spill [r17]=r15         // save r15
1016     tnat.nz p8,p0=in7
1017     nop.i 0
1018 
1019     mov r13=r2              // establish `current'
1020     movl r1=__gp                // establish kernel global pointer
1021     ;;
1022     st8 [r16]=r8        // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1023 (p13)   mov in6=-1
1024 (p8)    mov in7=-1
1025 
1026     cmp.eq pSys,pNonSys=r0,r0       // set pSys=1, pNonSys=0
1027     movl r17=FPSR_DEFAULT
1028     ;;
1029     mov.m ar.fpsr=r17           // set ar.fpsr to kernel default value
1030 (p10)   mov r8=-EINVAL
1031     br.ret.sptk.many b7
1032 END(ia64_syscall_setup)
1033 
1034     .org ia64_ivt+0x3c00
1035 /////////////////////////////////////////////////////////////////////////////////////////
1036 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1037     DBG_FAULT(15)
1038     FAULT(15)
1039 
1040     .org ia64_ivt+0x4000
1041 /////////////////////////////////////////////////////////////////////////////////////////
1042 // 0x4000 Entry 16 (size 64 bundles) Reserved
1043     DBG_FAULT(16)
1044     FAULT(16)
1045 
1046 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
1047     /*
1048      * There is no particular reason for this code to be here, other than
1049      * that there happens to be space here that would go unused otherwise.
1050      * If this fault ever gets "unreserved", simply moved the following
1051      * code to a more suitable spot...
1052      *
1053      * account_sys_enter is called from SAVE_MIN* macros if accounting is
1054      * enabled and if the macro is entered from user mode.
1055      */
1056 GLOBAL_ENTRY(account_sys_enter)
1057     // mov.m r20=ar.itc is called in advance, and r13 is current
1058     add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1059     add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
1060     ;;
1061     ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP   // time at last check in kernel
1062     ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE   // time at left from kernel
1063         ;;
1064     ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME   // cumulated stime
1065     ld8 r21=[r17]               // cumulated utime
1066     sub r22=r19,r18             // stime before leave kernel
1067     ;;
1068     st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP   // update stamp
1069     sub r18=r20,r19             // elapsed time in user mode
1070     ;;
1071     add r23=r23,r22             // sum stime
1072     add r21=r21,r18             // sum utime
1073     ;;
1074     st8 [r16]=r23               // update stime
1075     st8 [r17]=r21               // update utime
1076     ;;
1077     br.ret.sptk.many rp
1078 END(account_sys_enter)
1079 #endif
1080 
1081     .org ia64_ivt+0x4400
1082 /////////////////////////////////////////////////////////////////////////////////////////
1083 // 0x4400 Entry 17 (size 64 bundles) Reserved
1084     DBG_FAULT(17)
1085     FAULT(17)
1086 
1087     .org ia64_ivt+0x4800
1088 /////////////////////////////////////////////////////////////////////////////////////////
1089 // 0x4800 Entry 18 (size 64 bundles) Reserved
1090     DBG_FAULT(18)
1091     FAULT(18)
1092 
1093     .org ia64_ivt+0x4c00
1094 /////////////////////////////////////////////////////////////////////////////////////////
1095 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1096     DBG_FAULT(19)
1097     FAULT(19)
1098 
1099 //
1100 // --- End of long entries, Beginning of short entries
1101 //
1102 
1103     .org ia64_ivt+0x5000
1104 /////////////////////////////////////////////////////////////////////////////////////////
1105 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1106 ENTRY(page_not_present)
1107     DBG_FAULT(20)
1108     MOV_FROM_IFA(r16)
1109     RSM_PSR_DT
1110     /*
1111      * The Linux page fault handler doesn't expect non-present pages to be in
1112      * the TLB.  Flush the existing entry now, so we meet that expectation.
1113      */
1114     mov r17=PAGE_SHIFT<<2
1115     ;;
1116     ptc.l r16,r17
1117     ;;
1118     mov r31=pr
1119     srlz.d
1120     br.sptk.many page_fault
1121 END(page_not_present)
1122 
1123     .org ia64_ivt+0x5100
1124 /////////////////////////////////////////////////////////////////////////////////////////
1125 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1126 ENTRY(key_permission)
1127     DBG_FAULT(21)
1128     MOV_FROM_IFA(r16)
1129     RSM_PSR_DT
1130     mov r31=pr
1131     ;;
1132     srlz.d
1133     br.sptk.many page_fault
1134 END(key_permission)
1135 
1136     .org ia64_ivt+0x5200
1137 /////////////////////////////////////////////////////////////////////////////////////////
1138 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1139 ENTRY(iaccess_rights)
1140     DBG_FAULT(22)
1141     MOV_FROM_IFA(r16)
1142     RSM_PSR_DT
1143     mov r31=pr
1144     ;;
1145     srlz.d
1146     br.sptk.many page_fault
1147 END(iaccess_rights)
1148 
1149     .org ia64_ivt+0x5300
1150 /////////////////////////////////////////////////////////////////////////////////////////
1151 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1152 ENTRY(daccess_rights)
1153     DBG_FAULT(23)
1154     MOV_FROM_IFA(r16)
1155     RSM_PSR_DT
1156     mov r31=pr
1157     ;;
1158     srlz.d
1159     br.sptk.many page_fault
1160 END(daccess_rights)
1161 
1162     .org ia64_ivt+0x5400
1163 /////////////////////////////////////////////////////////////////////////////////////////
1164 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1165 ENTRY(general_exception)
1166     DBG_FAULT(24)
1167     MOV_FROM_ISR(r16)
1168     mov r31=pr
1169     ;;
1170     cmp4.eq p6,p0=0,r16
1171 (p6)    br.sptk.many dispatch_illegal_op_fault
1172     ;;
1173     mov r19=24      // fault number
1174     br.sptk.many dispatch_to_fault_handler
1175 END(general_exception)
1176 
1177     .org ia64_ivt+0x5500
1178 /////////////////////////////////////////////////////////////////////////////////////////
1179 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1180 ENTRY(disabled_fp_reg)
1181     DBG_FAULT(25)
1182     rsm psr.dfh     // ensure we can access fph
1183     ;;
1184     srlz.d
1185     mov r31=pr
1186     mov r19=25
1187     br.sptk.many dispatch_to_fault_handler
1188 END(disabled_fp_reg)
1189 
1190     .org ia64_ivt+0x5600
1191 /////////////////////////////////////////////////////////////////////////////////////////
1192 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1193 ENTRY(nat_consumption)
1194     DBG_FAULT(26)
1195 
1196     MOV_FROM_IPSR(p0, r16)
1197     MOV_FROM_ISR(r17)
1198     mov r31=pr              // save PR
1199     ;;
1200     and r18=0xf,r17             // r18 = cr.ipsr.code{3:0}
1201     tbit.z p6,p0=r17,IA64_ISR_NA_BIT
1202     ;;
1203     cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
1204     dep r16=-1,r16,IA64_PSR_ED_BIT,1
1205 (p6)    br.cond.spnt 1f     // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1206     ;;
1207     MOV_TO_IPSR(p0, r16, r18)
1208     mov pr=r31,-1
1209     ;;
1210     RFI
1211 
1212 1:  mov pr=r31,-1
1213     ;;
1214     FAULT(26)
1215 END(nat_consumption)
1216 
1217     .org ia64_ivt+0x5700
1218 /////////////////////////////////////////////////////////////////////////////////////////
1219 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1220 ENTRY(speculation_vector)
1221     DBG_FAULT(27)
1222     /*
1223      * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1224      * this part of the architecture is not implemented in hardware on some CPUs, such
1225      * as Itanium.  Thus, in general we need to emulate the behavior.  IIM contains
1226      * the relative target (not yet sign extended).  So after sign extending it we
1227      * simply add it to IIP.  We also need to reset the EI field of the IPSR to zero,
1228      * i.e., the slot to restart into.
1229      *
1230      * cr.imm contains zero_ext(imm21)
1231      */
1232     MOV_FROM_IIM(r18)
1233     ;;
1234     MOV_FROM_IIP(r17)
1235     shl r18=r18,43          // put sign bit in position (43=64-21)
1236     ;;
1237 
1238     MOV_FROM_IPSR(p0, r16)
1239     shr r18=r18,39          // sign extend (39=43-4)
1240     ;;
1241 
1242     add r17=r17,r18         // now add the offset
1243     ;;
1244     MOV_TO_IIP(r17, r19)
1245     dep r16=0,r16,41,2      // clear EI
1246     ;;
1247 
1248     MOV_TO_IPSR(p0, r16, r19)
1249     ;;
1250 
1251     RFI
1252 END(speculation_vector)
1253 
1254     .org ia64_ivt+0x5800
1255 /////////////////////////////////////////////////////////////////////////////////////////
1256 // 0x5800 Entry 28 (size 16 bundles) Reserved
1257     DBG_FAULT(28)
1258     FAULT(28)
1259 
1260     .org ia64_ivt+0x5900
1261 /////////////////////////////////////////////////////////////////////////////////////////
1262 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1263 ENTRY(debug_vector)
1264     DBG_FAULT(29)
1265     FAULT(29)
1266 END(debug_vector)
1267 
1268     .org ia64_ivt+0x5a00
1269 /////////////////////////////////////////////////////////////////////////////////////////
1270 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1271 ENTRY(unaligned_access)
1272     DBG_FAULT(30)
1273     mov r31=pr      // prepare to save predicates
1274     ;;
1275     br.sptk.many dispatch_unaligned_handler
1276 END(unaligned_access)
1277 
1278     .org ia64_ivt+0x5b00
1279 /////////////////////////////////////////////////////////////////////////////////////////
1280 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1281 ENTRY(unsupported_data_reference)
1282     DBG_FAULT(31)
1283     FAULT(31)
1284 END(unsupported_data_reference)
1285 
1286     .org ia64_ivt+0x5c00
1287 /////////////////////////////////////////////////////////////////////////////////////////
1288 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1289 ENTRY(floating_point_fault)
1290     DBG_FAULT(32)
1291     FAULT(32)
1292 END(floating_point_fault)
1293 
1294     .org ia64_ivt+0x5d00
1295 /////////////////////////////////////////////////////////////////////////////////////////
1296 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1297 ENTRY(floating_point_trap)
1298     DBG_FAULT(33)
1299     FAULT(33)
1300 END(floating_point_trap)
1301 
1302     .org ia64_ivt+0x5e00
1303 /////////////////////////////////////////////////////////////////////////////////////////
1304 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1305 ENTRY(lower_privilege_trap)
1306     DBG_FAULT(34)
1307     FAULT(34)
1308 END(lower_privilege_trap)
1309 
1310     .org ia64_ivt+0x5f00
1311 /////////////////////////////////////////////////////////////////////////////////////////
1312 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1313 ENTRY(taken_branch_trap)
1314     DBG_FAULT(35)
1315     FAULT(35)
1316 END(taken_branch_trap)
1317 
1318     .org ia64_ivt+0x6000
1319 /////////////////////////////////////////////////////////////////////////////////////////
1320 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1321 ENTRY(single_step_trap)
1322     DBG_FAULT(36)
1323     FAULT(36)
1324 END(single_step_trap)
1325 
1326     .org ia64_ivt+0x6100
1327 /////////////////////////////////////////////////////////////////////////////////////////
1328 // 0x6100 Entry 37 (size 16 bundles) Reserved
1329     DBG_FAULT(37)
1330     FAULT(37)
1331 
1332     .org ia64_ivt+0x6200
1333 /////////////////////////////////////////////////////////////////////////////////////////
1334 // 0x6200 Entry 38 (size 16 bundles) Reserved
1335     DBG_FAULT(38)
1336     FAULT(38)
1337 
1338     .org ia64_ivt+0x6300
1339 /////////////////////////////////////////////////////////////////////////////////////////
1340 // 0x6300 Entry 39 (size 16 bundles) Reserved
1341     DBG_FAULT(39)
1342     FAULT(39)
1343 
1344     .org ia64_ivt+0x6400
1345 /////////////////////////////////////////////////////////////////////////////////////////
1346 // 0x6400 Entry 40 (size 16 bundles) Reserved
1347     DBG_FAULT(40)
1348     FAULT(40)
1349 
1350     .org ia64_ivt+0x6500
1351 /////////////////////////////////////////////////////////////////////////////////////////
1352 // 0x6500 Entry 41 (size 16 bundles) Reserved
1353     DBG_FAULT(41)
1354     FAULT(41)
1355 
1356     .org ia64_ivt+0x6600
1357 /////////////////////////////////////////////////////////////////////////////////////////
1358 // 0x6600 Entry 42 (size 16 bundles) Reserved
1359     DBG_FAULT(42)
1360     FAULT(42)
1361 
1362     .org ia64_ivt+0x6700
1363 /////////////////////////////////////////////////////////////////////////////////////////
1364 // 0x6700 Entry 43 (size 16 bundles) Reserved
1365     DBG_FAULT(43)
1366     FAULT(43)
1367 
1368     .org ia64_ivt+0x6800
1369 /////////////////////////////////////////////////////////////////////////////////////////
1370 // 0x6800 Entry 44 (size 16 bundles) Reserved
1371     DBG_FAULT(44)
1372     FAULT(44)
1373 
1374     .org ia64_ivt+0x6900
1375 /////////////////////////////////////////////////////////////////////////////////////////
1376 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1377 ENTRY(ia32_exception)
1378     DBG_FAULT(45)
1379     FAULT(45)
1380 END(ia32_exception)
1381 
1382     .org ia64_ivt+0x6a00
1383 /////////////////////////////////////////////////////////////////////////////////////////
1384 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
1385 ENTRY(ia32_intercept)
1386     DBG_FAULT(46)
1387     FAULT(46)
1388 END(ia32_intercept)
1389 
1390     .org ia64_ivt+0x6b00
1391 /////////////////////////////////////////////////////////////////////////////////////////
1392 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt  (74)
1393 ENTRY(ia32_interrupt)
1394     DBG_FAULT(47)
1395     FAULT(47)
1396 END(ia32_interrupt)
1397 
1398     .org ia64_ivt+0x6c00
1399 /////////////////////////////////////////////////////////////////////////////////////////
1400 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1401     DBG_FAULT(48)
1402     FAULT(48)
1403 
1404     .org ia64_ivt+0x6d00
1405 /////////////////////////////////////////////////////////////////////////////////////////
1406 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1407     DBG_FAULT(49)
1408     FAULT(49)
1409 
1410     .org ia64_ivt+0x6e00
1411 /////////////////////////////////////////////////////////////////////////////////////////
1412 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1413     DBG_FAULT(50)
1414     FAULT(50)
1415 
1416     .org ia64_ivt+0x6f00
1417 /////////////////////////////////////////////////////////////////////////////////////////
1418 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1419     DBG_FAULT(51)
1420     FAULT(51)
1421 
1422     .org ia64_ivt+0x7000
1423 /////////////////////////////////////////////////////////////////////////////////////////
1424 // 0x7000 Entry 52 (size 16 bundles) Reserved
1425     DBG_FAULT(52)
1426     FAULT(52)
1427 
1428     .org ia64_ivt+0x7100
1429 /////////////////////////////////////////////////////////////////////////////////////////
1430 // 0x7100 Entry 53 (size 16 bundles) Reserved
1431     DBG_FAULT(53)
1432     FAULT(53)
1433 
1434     .org ia64_ivt+0x7200
1435 /////////////////////////////////////////////////////////////////////////////////////////
1436 // 0x7200 Entry 54 (size 16 bundles) Reserved
1437     DBG_FAULT(54)
1438     FAULT(54)
1439 
1440     .org ia64_ivt+0x7300
1441 /////////////////////////////////////////////////////////////////////////////////////////
1442 // 0x7300 Entry 55 (size 16 bundles) Reserved
1443     DBG_FAULT(55)
1444     FAULT(55)
1445 
1446     .org ia64_ivt+0x7400
1447 /////////////////////////////////////////////////////////////////////////////////////////
1448 // 0x7400 Entry 56 (size 16 bundles) Reserved
1449     DBG_FAULT(56)
1450     FAULT(56)
1451 
1452     .org ia64_ivt+0x7500
1453 /////////////////////////////////////////////////////////////////////////////////////////
1454 // 0x7500 Entry 57 (size 16 bundles) Reserved
1455     DBG_FAULT(57)
1456     FAULT(57)
1457 
1458     .org ia64_ivt+0x7600
1459 /////////////////////////////////////////////////////////////////////////////////////////
1460 // 0x7600 Entry 58 (size 16 bundles) Reserved
1461     DBG_FAULT(58)
1462     FAULT(58)
1463 
1464     .org ia64_ivt+0x7700
1465 /////////////////////////////////////////////////////////////////////////////////////////
1466 // 0x7700 Entry 59 (size 16 bundles) Reserved
1467     DBG_FAULT(59)
1468     FAULT(59)
1469 
1470     .org ia64_ivt+0x7800
1471 /////////////////////////////////////////////////////////////////////////////////////////
1472 // 0x7800 Entry 60 (size 16 bundles) Reserved
1473     DBG_FAULT(60)
1474     FAULT(60)
1475 
1476     .org ia64_ivt+0x7900
1477 /////////////////////////////////////////////////////////////////////////////////////////
1478 // 0x7900 Entry 61 (size 16 bundles) Reserved
1479     DBG_FAULT(61)
1480     FAULT(61)
1481 
1482     .org ia64_ivt+0x7a00
1483 /////////////////////////////////////////////////////////////////////////////////////////
1484 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1485     DBG_FAULT(62)
1486     FAULT(62)
1487 
1488     .org ia64_ivt+0x7b00
1489 /////////////////////////////////////////////////////////////////////////////////////////
1490 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1491     DBG_FAULT(63)
1492     FAULT(63)
1493 
1494     .org ia64_ivt+0x7c00
1495 /////////////////////////////////////////////////////////////////////////////////////////
1496 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1497     DBG_FAULT(64)
1498     FAULT(64)
1499 
1500     .org ia64_ivt+0x7d00
1501 /////////////////////////////////////////////////////////////////////////////////////////
1502 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1503     DBG_FAULT(65)
1504     FAULT(65)
1505 
1506     .org ia64_ivt+0x7e00
1507 /////////////////////////////////////////////////////////////////////////////////////////
1508 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1509     DBG_FAULT(66)
1510     FAULT(66)
1511 
1512     .org ia64_ivt+0x7f00
1513 /////////////////////////////////////////////////////////////////////////////////////////
1514 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1515     DBG_FAULT(67)
1516     FAULT(67)
1517 
1518     //-----------------------------------------------------------------------------------
1519     // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
1520 ENTRY(page_fault)
1521     SSM_PSR_DT_AND_SRLZ_I
1522     ;;
1523     SAVE_MIN_WITH_COVER
1524     alloc r15=ar.pfs,0,0,3,0
1525     MOV_FROM_IFA(out0)
1526     MOV_FROM_ISR(out1)
1527     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
1528     adds r3=8,r2                // set up second base pointer
1529     SSM_PSR_I(p15, p15, r14)        // restore psr.i
1530     movl r14=ia64_leave_kernel
1531     ;;
1532     SAVE_REST
1533     mov rp=r14
1534     ;;
1535     adds out2=16,r12            // out2 = pointer to pt_regs
1536     br.call.sptk.many b6=ia64_do_page_fault // ignore return address
1537 END(page_fault)
1538 
1539 ENTRY(non_syscall)
1540     mov ar.rsc=r27          // restore ar.rsc before SAVE_MIN_WITH_COVER
1541     ;;
1542     SAVE_MIN_WITH_COVER
1543 
1544     // There is no particular reason for this code to be here, other than that
1545     // there happens to be space here that would go unused otherwise.  If this
1546     // fault ever gets "unreserved", simply moved the following code to a more
1547     // suitable spot...
1548 
1549     alloc r14=ar.pfs,0,0,2,0
1550     MOV_FROM_IIM(out0)
1551     add out1=16,sp
1552     adds r3=8,r2            // set up second base pointer for SAVE_REST
1553 
1554     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
1555                     // guarantee that interruption collection is on
1556     SSM_PSR_I(p15, p15, r15)    // restore psr.i
1557     movl r15=ia64_leave_kernel
1558     ;;
1559     SAVE_REST
1560     mov rp=r15
1561     ;;
1562     br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1563 END(non_syscall)
1564 
1565 ENTRY(__interrupt)
1566     DBG_FAULT(12)
1567     mov r31=pr      // prepare to save predicates
1568     ;;
1569     SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
1570     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
1571                 // ensure everybody knows psr.ic is back on
1572     adds r3=8,r2        // set up second base pointer for SAVE_REST
1573     ;;
1574     SAVE_REST
1575     ;;
1576     MCA_RECOVER_RANGE(interrupt)
1577     alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
1578     MOV_FROM_IVR(out0, r8)  // pass cr.ivr as first arg
1579     add out1=16,sp      // pass pointer to pt_regs as second arg
1580     ;;
1581     srlz.d          // make sure we see the effect of cr.ivr
1582     movl r14=ia64_leave_kernel
1583     ;;
1584     mov rp=r14
1585     br.call.sptk.many b6=ia64_handle_irq
1586 END(__interrupt)
1587 
1588     /*
1589      * There is no particular reason for this code to be here, other than that
1590      * there happens to be space here that would go unused otherwise.  If this
1591      * fault ever gets "unreserved", simply moved the following code to a more
1592      * suitable spot...
1593      */
1594 
1595 ENTRY(dispatch_unaligned_handler)
1596     SAVE_MIN_WITH_COVER
1597     ;;
1598     alloc r14=ar.pfs,0,0,2,0        // now it's safe (must be first in insn group!)
1599     MOV_FROM_IFA(out0)
1600     adds out1=16,sp
1601 
1602     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1603                         // guarantee that interruption collection is on
1604     SSM_PSR_I(p15, p15, r3)         // restore psr.i
1605     adds r3=8,r2                // set up second base pointer
1606     ;;
1607     SAVE_REST
1608     movl r14=ia64_leave_kernel
1609     ;;
1610     mov rp=r14
1611     br.sptk.many ia64_prepare_handle_unaligned
1612 END(dispatch_unaligned_handler)
1613 
1614     /*
1615      * There is no particular reason for this code to be here, other than that
1616      * there happens to be space here that would go unused otherwise.  If this
1617      * fault ever gets "unreserved", simply moved the following code to a more
1618      * suitable spot...
1619      */
1620 
1621 ENTRY(dispatch_to_fault_handler)
1622     /*
1623      * Input:
1624      *  psr.ic: off
1625      *  r19:    fault vector number (e.g., 24 for General Exception)
1626      *  r31:    contains saved predicates (pr)
1627      */
1628     SAVE_MIN_WITH_COVER_R19
1629     alloc r14=ar.pfs,0,0,5,0
1630     MOV_FROM_ISR(out1)
1631     MOV_FROM_IFA(out2)
1632     MOV_FROM_IIM(out3)
1633     MOV_FROM_ITIR(out4)
1634     ;;
1635     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
1636                         // guarantee that interruption collection is on
1637     mov out0=r15
1638     ;;
1639     SSM_PSR_I(p15, p15, r3)         // restore psr.i
1640     adds r3=8,r2                // set up second base pointer for SAVE_REST
1641     ;;
1642     SAVE_REST
1643     movl r14=ia64_leave_kernel
1644     ;;
1645     mov rp=r14
1646     br.call.sptk.many b6=ia64_fault
1647 END(dispatch_to_fault_handler)
1648 
1649     /*
1650      * Squatting in this space ...
1651      *
1652      * This special case dispatcher for illegal operation faults allows preserved
1653      * registers to be modified through a callback function (asm only) that is handed
1654      * back from the fault handler in r8. Up to three arguments can be passed to the
1655      * callback function by returning an aggregate with the callback as its first
1656      * element, followed by the arguments.
1657      */
1658 ENTRY(dispatch_illegal_op_fault)
1659     .prologue
1660     .body
1661     SAVE_MIN_WITH_COVER
1662     SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1663                 // guarantee that interruption collection is on
1664     ;;
1665     SSM_PSR_I(p15, p15, r3) // restore psr.i
1666     adds r3=8,r2    // set up second base pointer for SAVE_REST
1667     ;;
1668     alloc r14=ar.pfs,0,0,1,0    // must be first in insn group
1669     mov out0=ar.ec
1670     ;;
1671     SAVE_REST
1672     PT_REGS_UNWIND_INFO(0)
1673     ;;
1674     br.call.sptk.many rp=ia64_illegal_op_fault
1675 .ret0:  ;;
1676     alloc r14=ar.pfs,0,0,3,0    // must be first in insn group
1677     mov out0=r9
1678     mov out1=r10
1679     mov out2=r11
1680     movl r15=ia64_leave_kernel
1681     ;;
1682     mov rp=r15
1683     mov b6=r8
1684     ;;
1685     cmp.ne p6,p0=0,r8
1686 (p6)    br.call.dpnt.many b6=b6     // call returns to ia64_leave_kernel
1687     br.sptk.many ia64_leave_kernel
1688 END(dispatch_illegal_op_fault)