Back to home page

LXR

 
 

    


0001 /* tlb-miss.S: TLB miss handlers
0002  *
0003  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
0004  * Written by David Howells (dhowells@redhat.com)
0005  *
0006  * This program is free software; you can redistribute it and/or
0007  * modify it under the terms of the GNU General Public License
0008  * as published by the Free Software Foundation; either version
0009  * 2 of the License, or (at your option) any later version.
0010  */
0011 
0012 #include <linux/sys.h>
0013 #include <linux/linkage.h>
0014 #include <asm/page.h>
0015 #include <asm/pgtable.h>
0016 #include <asm/spr-regs.h>
0017 
0018     .section    .text..tlbmiss
0019     .balign     4
0020 
0021     .globl      __entry_insn_mmu_miss
0022 __entry_insn_mmu_miss:
0023     break
0024     nop
0025 
0026     .globl      __entry_insn_mmu_exception
0027 __entry_insn_mmu_exception:
0028     break
0029     nop
0030 
0031     .globl      __entry_data_mmu_miss
0032 __entry_data_mmu_miss:
0033     break
0034     nop
0035 
0036     .globl      __entry_data_mmu_exception
0037 __entry_data_mmu_exception:
0038     break
0039     nop
0040 
0041 ###############################################################################
0042 #
0043 # handle a lookup failure of one sort or another in a kernel TLB handler
0044 # On entry:
0045 #   GR29 - faulting address
0046 #   SCR2 - saved CCR
0047 #
0048 ###############################################################################
0049     .type       __tlb_kernel_fault,@function
0050 __tlb_kernel_fault:
0051     # see if we're supposed to re-enable single-step mode upon return
0052     sethi.p     %hi(__break_tlb_miss_return_break),gr30
0053     setlo       %lo(__break_tlb_miss_return_break),gr30
0054     movsg       pcsr,gr31
0055 
0056     subcc       gr31,gr30,gr0,icc0
0057     beq     icc0,#0,__tlb_kernel_fault_sstep
0058 
0059     movsg       scr2,gr30
0060     movgs       gr30,ccr
0061     movgs       gr29,scr2           /* save EAR0 value */
0062     sethi.p     %hi(__kernel_current_task),gr29
0063     setlo       %lo(__kernel_current_task),gr29
0064     ldi.p       @(gr29,#0),gr29         /* restore GR29 */
0065 
0066     bra     __entry_kernel_handle_mmu_fault
0067 
0068     # we've got to re-enable single-stepping
0069 __tlb_kernel_fault_sstep:
0070     sethi.p     %hi(__break_tlb_miss_real_return_info),gr30
0071     setlo       %lo(__break_tlb_miss_real_return_info),gr30
0072     lddi        @(gr30,0),gr30
0073     movgs       gr30,pcsr
0074     movgs       gr31,psr
0075 
0076     movsg       scr2,gr30
0077     movgs       gr30,ccr
0078     movgs       gr29,scr2           /* save EAR0 value */
0079     sethi.p     %hi(__kernel_current_task),gr29
0080     setlo       %lo(__kernel_current_task),gr29
0081     ldi.p       @(gr29,#0),gr29         /* restore GR29 */
0082     bra     __entry_kernel_handle_mmu_fault_sstep
0083 
0084     .size       __tlb_kernel_fault, .-__tlb_kernel_fault
0085 
0086 ###############################################################################
0087 #
0088 # handle a lookup failure of one sort or another in a user TLB handler
0089 # On entry:
0090 #   GR28 - faulting address
0091 #   SCR2 - saved CCR
0092 #
0093 ###############################################################################
0094     .type       __tlb_user_fault,@function
0095 __tlb_user_fault:
0096     # see if we're supposed to re-enable single-step mode upon return
0097     sethi.p     %hi(__break_tlb_miss_return_break),gr30
0098     setlo       %lo(__break_tlb_miss_return_break),gr30
0099     movsg       pcsr,gr31
0100     subcc       gr31,gr30,gr0,icc0
0101     beq     icc0,#0,__tlb_user_fault_sstep
0102 
0103     movsg       scr2,gr30
0104     movgs       gr30,ccr
0105     bra     __entry_uspace_handle_mmu_fault
0106 
0107     # we've got to re-enable single-stepping
0108 __tlb_user_fault_sstep:
0109     sethi.p     %hi(__break_tlb_miss_real_return_info),gr30
0110     setlo       %lo(__break_tlb_miss_real_return_info),gr30
0111     lddi        @(gr30,0),gr30
0112     movgs       gr30,pcsr
0113     movgs       gr31,psr
0114     movsg       scr2,gr30
0115     movgs       gr30,ccr
0116     bra     __entry_uspace_handle_mmu_fault_sstep
0117 
0118     .size       __tlb_user_fault, .-__tlb_user_fault
0119 
0120 ###############################################################################
0121 #
0122 # Kernel instruction TLB miss handler
0123 # On entry:
0124 #   GR1   - kernel stack pointer
0125 #   GR28  - saved exception frame pointer
0126 #   GR29  - faulting address
0127 #   GR31  - EAR0 ^ SCR0
0128 #   SCR0  - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
0129 #   DAMR3 - mapped page directory
0130 #   DAMR4 - mapped page table as matched by SCR0
0131 #
0132 ###############################################################################
0133     .globl      __entry_kernel_insn_tlb_miss
0134     .type       __entry_kernel_insn_tlb_miss,@function
0135 __entry_kernel_insn_tlb_miss:
0136 #if 0
0137     sethi.p     %hi(0xe1200004),gr30
0138     setlo       %lo(0xe1200004),gr30
0139     st      gr0,@(gr30,gr0)
0140     sethi.p     %hi(0xffc00100),gr30
0141     setlo       %lo(0xffc00100),gr30
0142     sth     gr30,@(gr30,gr0)
0143     membar
0144 #endif
0145 
0146     movsg       ccr,gr30            /* save CCR */
0147     movgs       gr30,scr2
0148 
0149     # see if the cached page table mapping is appropriate
0150     srlicc.p    gr31,#26,gr0,icc0
0151     setlos      0x3ffc,gr30
0152     srli.p      gr29,#12,gr31           /* use EAR0[25:14] as PTE index */
0153     bne     icc0,#0,__itlb_k_PTD_miss
0154 
0155 __itlb_k_PTD_mapped:
0156     # access the PTD with EAR0[25:14]
0157     # - DAMLR4 points to the virtual address of the appropriate page table
0158     # - the PTD holds 4096 PTEs
0159     # - the PTD must be accessed uncached
0160     # - the PTE must be marked accessed if it was valid
0161     #
0162     and     gr31,gr30,gr31
0163     movsg       damlr4,gr30
0164     add     gr30,gr31,gr31
0165     ldi     @(gr31,#0),gr30         /* fetch the PTE */
0166     andicc      gr30,#_PAGE_PRESENT,gr0,icc0
0167     ori.p       gr30,#_PAGE_ACCESSED,gr30
0168     beq     icc0,#0,__tlb_kernel_fault  /* jump if PTE invalid */
0169     sti.p       gr30,@(gr31,#0)         /* update the PTE */
0170     andi        gr30,#~_PAGE_ACCESSED,gr30
0171 
0172     # we're using IAMR1 as an extra TLB entry
0173     # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
0174     # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
0175     # - IAMPR1 has no WP bit, and we mustn't lose WP information
0176     movsg       iampr1,gr31
0177     andicc      gr31,#xAMPRx_V,gr0,icc0
0178     setlos.p    0xfffff000,gr31
0179     beq     icc0,#0,__itlb_k_nopunt     /* punt not required */
0180 
0181     movsg       iamlr1,gr31
0182     movgs       gr31,tplr           /* set TPLR.CXN */
0183     tlbpr       gr31,gr0,#4,#0          /* delete matches from TLB, IAMR1, DAMR1 */
0184 
0185     movsg       dampr1,gr31
0186     ori     gr31,#xAMPRx_V,gr31     /* entry was invalidated by tlbpr #4 */
0187     movgs       gr31,tppr
0188     movsg       iamlr1,gr31         /* set TPLR.CXN */
0189     movgs       gr31,tplr
0190     tlbpr       gr31,gr0,#2,#0          /* save to the TLB */
0191     movsg       tpxr,gr31           /* check the TLB write error flag */
0192     andicc.p    gr31,#TPXR_E,gr0,icc0
0193     setlos      #0xfffff000,gr31
0194     bne     icc0,#0,__tlb_kernel_fault
0195 
0196 __itlb_k_nopunt:
0197 
0198     # assemble the new TLB entry
0199     and     gr29,gr31,gr29
0200     movsg       cxnr,gr31
0201     or      gr29,gr31,gr29
0202     movgs       gr29,iamlr1         /* xAMLR = address | context number */
0203     movgs       gr30,iampr1
0204     movgs       gr29,damlr1
0205     movgs       gr30,dampr1
0206 
0207     # return, restoring registers
0208     movsg       scr2,gr30
0209     movgs       gr30,ccr
0210     sethi.p     %hi(__kernel_current_task),gr29
0211     setlo       %lo(__kernel_current_task),gr29
0212     ldi     @(gr29,#0),gr29
0213     rett        #0
0214     beq     icc0,#3,0           /* prevent icache prefetch */
0215 
0216     # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
0217     # appropriate page table and map that instead
0218     #   - access the PGD with EAR0[31:26]
0219     #   - DAMLR3 points to the virtual address of the page directory
0220     #   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
0221 __itlb_k_PTD_miss:
0222     srli        gr29,#26,gr31           /* calculate PGE offset */
0223     slli        gr31,#8,gr31            /* and clear bottom bits */
0224 
0225     movsg       damlr3,gr30
0226     ld      @(gr31,gr30),gr30       /* access the PGE */
0227 
0228     andicc.p    gr30,#_PAGE_PRESENT,gr0,icc0
0229     andicc      gr30,#xAMPRx_SS,gr0,icc1
0230 
0231     # map this PTD instead and record coverage address
0232     ori.p       gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
0233     beq     icc0,#0,__tlb_kernel_fault  /* jump if PGE not present */
0234     slli.p      gr31,#18,gr31
0235     bne     icc1,#0,__itlb_k_bigpage
0236     movgs       gr30,dampr4
0237     movgs       gr31,scr0
0238 
0239     # we can now resume normal service
0240     setlos      0x3ffc,gr30
0241     srli.p      gr29,#12,gr31           /* use EAR0[25:14] as PTE index */
0242     bra     __itlb_k_PTD_mapped
0243 
0244 __itlb_k_bigpage:
0245     break
0246     nop
0247 
0248     .size       __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
0249 
0250 ###############################################################################
0251 #
0252 # Kernel data TLB miss handler
0253 # On entry:
0254 #   GR1   - kernel stack pointer
0255 #   GR28  - saved exception frame pointer
0256 #   GR29  - faulting address
0257 #   GR31  - EAR0 ^ SCR1
0258 #   SCR1  - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
0259 #   DAMR3 - mapped page directory
0260 #   DAMR5 - mapped page table as matched by SCR1
0261 #
0262 ###############################################################################
0263     .globl      __entry_kernel_data_tlb_miss
0264     .type       __entry_kernel_data_tlb_miss,@function
0265 __entry_kernel_data_tlb_miss:
0266 #if 0
0267     sethi.p     %hi(0xe1200004),gr30
0268     setlo       %lo(0xe1200004),gr30
0269     st      gr0,@(gr30,gr0)
0270     sethi.p     %hi(0xffc00100),gr30
0271     setlo       %lo(0xffc00100),gr30
0272     sth     gr30,@(gr30,gr0)
0273     membar
0274 #endif
0275 
0276     movsg       ccr,gr30            /* save CCR */
0277     movgs       gr30,scr2
0278 
0279     # see if the cached page table mapping is appropriate
0280     srlicc.p    gr31,#26,gr0,icc0
0281     setlos      0x3ffc,gr30
0282     srli.p      gr29,#12,gr31           /* use EAR0[25:14] as PTE index */
0283     bne     icc0,#0,__dtlb_k_PTD_miss
0284 
0285 __dtlb_k_PTD_mapped:
0286     # access the PTD with EAR0[25:14]
0287     # - DAMLR5 points to the virtual address of the appropriate page table
0288     # - the PTD holds 4096 PTEs
0289     # - the PTD must be accessed uncached
0290     # - the PTE must be marked accessed if it was valid
0291     #
0292     and     gr31,gr30,gr31
0293     movsg       damlr5,gr30
0294     add     gr30,gr31,gr31
0295     ldi     @(gr31,#0),gr30         /* fetch the PTE */
0296     andicc      gr30,#_PAGE_PRESENT,gr0,icc0
0297     ori.p       gr30,#_PAGE_ACCESSED,gr30
0298     beq     icc0,#0,__tlb_kernel_fault  /* jump if PTE invalid */
0299     sti.p       gr30,@(gr31,#0)         /* update the PTE */
0300     andi        gr30,#~_PAGE_ACCESSED,gr30
0301 
0302     # we're using DAMR1 as an extra TLB entry
0303     # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
0304     # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
0305     movsg       dampr1,gr31
0306     andicc      gr31,#xAMPRx_V,gr0,icc0
0307     setlos.p    0xfffff000,gr31
0308     beq     icc0,#0,__dtlb_k_nopunt     /* punt not required */
0309 
0310     movsg       damlr1,gr31
0311     movgs       gr31,tplr           /* set TPLR.CXN */
0312     tlbpr       gr31,gr0,#4,#0          /* delete matches from TLB, IAMR1, DAMR1 */
0313 
0314     movsg       dampr1,gr31
0315     ori     gr31,#xAMPRx_V,gr31     /* entry was invalidated by tlbpr #4 */
0316     movgs       gr31,tppr
0317     movsg       damlr1,gr31         /* set TPLR.CXN */
0318     movgs       gr31,tplr
0319     tlbpr       gr31,gr0,#2,#0          /* save to the TLB */
0320     movsg       tpxr,gr31           /* check the TLB write error flag */
0321     andicc.p    gr31,#TPXR_E,gr0,icc0
0322     setlos      #0xfffff000,gr31
0323     bne     icc0,#0,__tlb_kernel_fault
0324 
0325 __dtlb_k_nopunt:
0326 
0327     # assemble the new TLB entry
0328     and     gr29,gr31,gr29
0329     movsg       cxnr,gr31
0330     or      gr29,gr31,gr29
0331     movgs       gr29,iamlr1         /* xAMLR = address | context number */
0332     movgs       gr30,iampr1
0333     movgs       gr29,damlr1
0334     movgs       gr30,dampr1
0335 
0336     # return, restoring registers
0337     movsg       scr2,gr30
0338     movgs       gr30,ccr
0339     sethi.p     %hi(__kernel_current_task),gr29
0340     setlo       %lo(__kernel_current_task),gr29
0341     ldi     @(gr29,#0),gr29
0342     rett        #0
0343     beq     icc0,#3,0           /* prevent icache prefetch */
0344 
0345     # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
0346     # appropriate page table and map that instead
0347     #   - access the PGD with EAR0[31:26]
0348     #   - DAMLR3 points to the virtual address of the page directory
0349     #   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
0350 __dtlb_k_PTD_miss:
0351     srli        gr29,#26,gr31           /* calculate PGE offset */
0352     slli        gr31,#8,gr31            /* and clear bottom bits */
0353 
0354     movsg       damlr3,gr30
0355     ld      @(gr31,gr30),gr30       /* access the PGE */
0356 
0357     andicc.p    gr30,#_PAGE_PRESENT,gr0,icc0
0358     andicc      gr30,#xAMPRx_SS,gr0,icc1
0359 
0360     # map this PTD instead and record coverage address
0361     ori.p       gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
0362     beq     icc0,#0,__tlb_kernel_fault  /* jump if PGE not present */
0363     slli.p      gr31,#18,gr31
0364     bne     icc1,#0,__dtlb_k_bigpage
0365     movgs       gr30,dampr5
0366     movgs       gr31,scr1
0367 
0368     # we can now resume normal service
0369     setlos      0x3ffc,gr30
0370     srli.p      gr29,#12,gr31           /* use EAR0[25:14] as PTE index */
0371     bra     __dtlb_k_PTD_mapped
0372 
0373 __dtlb_k_bigpage:
0374     break
0375     nop
0376 
0377     .size       __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
0378 
0379 ###############################################################################
0380 #
0381 # Userspace instruction TLB miss handler (with PGE prediction)
0382 # On entry:
0383 #   GR28  - faulting address
0384 #   GR31  - EAR0 ^ SCR0
0385 #   SCR0  - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
0386 #   DAMR3 - mapped page directory
0387 #   DAMR4 - mapped page table as matched by SCR0
0388 #
0389 ###############################################################################
0390     .globl      __entry_user_insn_tlb_miss
0391     .type       __entry_user_insn_tlb_miss,@function
0392 __entry_user_insn_tlb_miss:
0393 #if 0
0394     sethi.p     %hi(0xe1200004),gr30
0395     setlo       %lo(0xe1200004),gr30
0396     st      gr0,@(gr30,gr0)
0397     sethi.p     %hi(0xffc00100),gr30
0398     setlo       %lo(0xffc00100),gr30
0399     sth     gr30,@(gr30,gr0)
0400     membar
0401 #endif
0402 
0403     movsg       ccr,gr30            /* save CCR */
0404     movgs       gr30,scr2
0405 
0406     # see if the cached page table mapping is appropriate
0407     srlicc.p    gr31,#26,gr0,icc0
0408     setlos      0x3ffc,gr30
0409     srli.p      gr28,#12,gr31           /* use EAR0[25:14] as PTE index */
0410     bne     icc0,#0,__itlb_u_PTD_miss
0411 
0412 __itlb_u_PTD_mapped:
0413     # access the PTD with EAR0[25:14]
0414     # - DAMLR4 points to the virtual address of the appropriate page table
0415     # - the PTD holds 4096 PTEs
0416     # - the PTD must be accessed uncached
0417     # - the PTE must be marked accessed if it was valid
0418     #
0419     and     gr31,gr30,gr31
0420     movsg       damlr4,gr30
0421     add     gr30,gr31,gr31
0422     ldi     @(gr31,#0),gr30         /* fetch the PTE */
0423     andicc      gr30,#_PAGE_PRESENT,gr0,icc0
0424     ori.p       gr30,#_PAGE_ACCESSED,gr30
0425     beq     icc0,#0,__tlb_user_fault    /* jump if PTE invalid */
0426     sti.p       gr30,@(gr31,#0)         /* update the PTE */
0427     andi        gr30,#~_PAGE_ACCESSED,gr30
0428 
0429     # we're using IAMR1/DAMR1 as an extra TLB entry
0430     # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
0431     movsg       dampr1,gr31
0432     andicc      gr31,#xAMPRx_V,gr0,icc0
0433     setlos.p    0xfffff000,gr31
0434     beq     icc0,#0,__itlb_u_nopunt     /* punt not required */
0435 
0436     movsg       dampr1,gr31
0437     movgs       gr31,tppr
0438     movsg       damlr1,gr31         /* set TPLR.CXN */
0439     movgs       gr31,tplr
0440     tlbpr       gr31,gr0,#2,#0          /* save to the TLB */
0441     movsg       tpxr,gr31           /* check the TLB write error flag */
0442     andicc.p    gr31,#TPXR_E,gr0,icc0
0443     setlos      #0xfffff000,gr31
0444     bne     icc0,#0,__tlb_user_fault
0445 
0446 __itlb_u_nopunt:
0447 
0448     # assemble the new TLB entry
0449     and     gr28,gr31,gr28
0450     movsg       cxnr,gr31
0451     or      gr28,gr31,gr28
0452     movgs       gr28,iamlr1         /* xAMLR = address | context number */
0453     movgs       gr30,iampr1
0454     movgs       gr28,damlr1
0455     movgs       gr30,dampr1
0456 
0457     # return, restoring registers
0458     movsg       scr2,gr30
0459     movgs       gr30,ccr
0460     rett        #0
0461     beq     icc0,#3,0           /* prevent icache prefetch */
0462 
0463     # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
0464     # appropriate page table and map that instead
0465     #   - access the PGD with EAR0[31:26]
0466     #   - DAMLR3 points to the virtual address of the page directory
0467     #   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
0468 __itlb_u_PTD_miss:
0469     srli        gr28,#26,gr31           /* calculate PGE offset */
0470     slli        gr31,#8,gr31            /* and clear bottom bits */
0471 
0472     movsg       damlr3,gr30
0473     ld      @(gr31,gr30),gr30       /* access the PGE */
0474 
0475     andicc.p    gr30,#_PAGE_PRESENT,gr0,icc0
0476     andicc      gr30,#xAMPRx_SS,gr0,icc1
0477 
0478     # map this PTD instead and record coverage address
0479     ori.p       gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
0480     beq     icc0,#0,__tlb_user_fault    /* jump if PGE not present */
0481     slli.p      gr31,#18,gr31
0482     bne     icc1,#0,__itlb_u_bigpage
0483     movgs       gr30,dampr4
0484     movgs       gr31,scr0
0485 
0486     # we can now resume normal service
0487     setlos      0x3ffc,gr30
0488     srli.p      gr28,#12,gr31           /* use EAR0[25:14] as PTE index */
0489     bra     __itlb_u_PTD_mapped
0490 
0491 __itlb_u_bigpage:
0492     break
0493     nop
0494 
0495     .size       __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
0496 
0497 ###############################################################################
0498 #
0499 # Userspace data TLB miss handler
0500 # On entry:
0501 #   GR28  - faulting address
0502 #   GR31  - EAR0 ^ SCR1
0503 #   SCR1  - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
0504 #   DAMR3 - mapped page directory
0505 #   DAMR5 - mapped page table as matched by SCR1
0506 #
0507 ###############################################################################
0508     .globl      __entry_user_data_tlb_miss
0509     .type       __entry_user_data_tlb_miss,@function
0510 __entry_user_data_tlb_miss:
0511 #if 0
0512     sethi.p     %hi(0xe1200004),gr30
0513     setlo       %lo(0xe1200004),gr30
0514     st      gr0,@(gr30,gr0)
0515     sethi.p     %hi(0xffc00100),gr30
0516     setlo       %lo(0xffc00100),gr30
0517     sth     gr30,@(gr30,gr0)
0518     membar
0519 #endif
0520 
0521     movsg       ccr,gr30            /* save CCR */
0522     movgs       gr30,scr2
0523 
0524     # see if the cached page table mapping is appropriate
0525     srlicc.p    gr31,#26,gr0,icc0
0526     setlos      0x3ffc,gr30
0527     srli.p      gr28,#12,gr31           /* use EAR0[25:14] as PTE index */
0528     bne     icc0,#0,__dtlb_u_PTD_miss
0529 
0530 __dtlb_u_PTD_mapped:
0531     # access the PTD with EAR0[25:14]
0532     # - DAMLR5 points to the virtual address of the appropriate page table
0533     # - the PTD holds 4096 PTEs
0534     # - the PTD must be accessed uncached
0535     # - the PTE must be marked accessed if it was valid
0536     #
0537     and     gr31,gr30,gr31
0538     movsg       damlr5,gr30
0539 
0540 __dtlb_u_using_iPTD:
0541     add     gr30,gr31,gr31
0542     ldi     @(gr31,#0),gr30         /* fetch the PTE */
0543     andicc      gr30,#_PAGE_PRESENT,gr0,icc0
0544     ori.p       gr30,#_PAGE_ACCESSED,gr30
0545     beq     icc0,#0,__tlb_user_fault    /* jump if PTE invalid */
0546     sti.p       gr30,@(gr31,#0)         /* update the PTE */
0547     andi        gr30,#~_PAGE_ACCESSED,gr30
0548 
0549     # we're using DAMR1 as an extra TLB entry
0550     # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
0551     movsg       dampr1,gr31
0552     andicc      gr31,#xAMPRx_V,gr0,icc0
0553     setlos.p    0xfffff000,gr31
0554     beq     icc0,#0,__dtlb_u_nopunt     /* punt not required */
0555 
0556     movsg       dampr1,gr31
0557     movgs       gr31,tppr
0558     movsg       damlr1,gr31         /* set TPLR.CXN */
0559     movgs       gr31,tplr
0560     tlbpr       gr31,gr0,#2,#0          /* save to the TLB */
0561     movsg       tpxr,gr31           /* check the TLB write error flag */
0562     andicc.p    gr31,#TPXR_E,gr0,icc0
0563     setlos      #0xfffff000,gr31
0564     bne     icc0,#0,__tlb_user_fault
0565 
0566 __dtlb_u_nopunt:
0567 
0568     # assemble the new TLB entry
0569     and     gr28,gr31,gr28
0570     movsg       cxnr,gr31
0571     or      gr28,gr31,gr28
0572     movgs       gr28,iamlr1         /* xAMLR = address | context number */
0573     movgs       gr30,iampr1
0574     movgs       gr28,damlr1
0575     movgs       gr30,dampr1
0576 
0577     # return, restoring registers
0578     movsg       scr2,gr30
0579     movgs       gr30,ccr
0580     rett        #0
0581     beq     icc0,#3,0           /* prevent icache prefetch */
0582 
0583     # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
0584     # appropriate page table and map that instead
0585     #   - first of all, check the insn PGE cache - we may well get a hit there
0586     #   - access the PGD with EAR0[31:26]
0587     #   - DAMLR3 points to the virtual address of the page directory
0588     #   - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
0589 __dtlb_u_PTD_miss:
0590     movsg       scr0,gr31           /* consult the insn-PGE-cache key */
0591     xor     gr28,gr31,gr31
0592     srlicc      gr31,#26,gr0,icc0
0593     srli        gr28,#12,gr31           /* use EAR0[25:14] as PTE index */
0594     bne     icc0,#0,__dtlb_u_iPGE_miss
0595 
0596     # what we're looking for is covered by the insn-PGE-cache
0597     setlos      0x3ffc,gr30
0598     and     gr31,gr30,gr31
0599     movsg       damlr4,gr30
0600     bra     __dtlb_u_using_iPTD
0601 
0602 __dtlb_u_iPGE_miss:
0603     srli        gr28,#26,gr31           /* calculate PGE offset */
0604     slli        gr31,#8,gr31            /* and clear bottom bits */
0605 
0606     movsg       damlr3,gr30
0607     ld      @(gr31,gr30),gr30       /* access the PGE */
0608 
0609     andicc.p    gr30,#_PAGE_PRESENT,gr0,icc0
0610     andicc      gr30,#xAMPRx_SS,gr0,icc1
0611 
0612     # map this PTD instead and record coverage address
0613     ori.p       gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
0614     beq     icc0,#0,__tlb_user_fault    /* jump if PGE not present */
0615     slli.p      gr31,#18,gr31
0616     bne     icc1,#0,__dtlb_u_bigpage
0617     movgs       gr30,dampr5
0618     movgs       gr31,scr1
0619 
0620     # we can now resume normal service
0621     setlos      0x3ffc,gr30
0622     srli.p      gr28,#12,gr31           /* use EAR0[25:14] as PTE index */
0623     bra     __dtlb_u_PTD_mapped
0624 
0625 __dtlb_u_bigpage:
0626     break
0627     nop
0628 
0629     .size       __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss