Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
0003  *
0004  * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
0005  * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
0006  * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
0007  * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
0008  */
0009 
0010 #include <linux/pgtable.h>
0011 #include <asm/head.h>
0012 #include <asm/asi.h>
0013 #include <asm/page.h>
0014 #include <asm/tsb.h>
0015 
0016     .text
0017     .align      32
0018 
0019 kvmap_itlb:
0020     /* g6: TAG TARGET */
0021     mov     TLB_TAG_ACCESS, %g4
0022     ldxa        [%g4] ASI_IMMU, %g4
0023 
0024     /* The kernel executes in context zero, therefore we do not
0025      * need to clear the context ID bits out of %g4 here.
0026      */
0027 
0028     /* sun4v_itlb_miss branches here with the missing virtual
0029      * address already loaded into %g4
0030      */
0031 kvmap_itlb_4v:
0032 
0033     /* Catch kernel NULL pointer calls.  */
0034     sethi       %hi(PAGE_SIZE), %g5
0035     cmp     %g4, %g5
0036     blu,pn      %xcc, kvmap_itlb_longpath
0037      nop
0038 
0039     KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
0040 
0041 kvmap_itlb_tsb_miss:
0042     sethi       %hi(LOW_OBP_ADDRESS), %g5
0043     cmp     %g4, %g5
0044     blu,pn      %xcc, kvmap_itlb_vmalloc_addr
0045      mov        0x1, %g5
0046     sllx        %g5, 32, %g5
0047     cmp     %g4, %g5
0048     blu,pn      %xcc, kvmap_itlb_obp
0049      nop
0050 
0051 kvmap_itlb_vmalloc_addr:
0052     KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
0053 
0054     TSB_LOCK_TAG(%g1, %g2, %g7)
0055     TSB_WRITE(%g1, %g5, %g6)
0056 
0057     /* fallthrough to TLB load */
0058 
0059 kvmap_itlb_load:
0060 
0061 661:    stxa        %g5, [%g0] ASI_ITLB_DATA_IN
0062     retry
0063     .section    .sun4v_2insn_patch, "ax"
0064     .word       661b
0065     nop
0066     nop
0067     .previous
0068 
0069     /* For sun4v the ASI_ITLB_DATA_IN store and the retry
0070      * instruction get nop'd out and we get here to branch
0071      * to the sun4v tlb load code.  The registers are setup
0072      * as follows:
0073      *
0074      * %g4: vaddr
0075      * %g5: PTE
0076      * %g6: TAG
0077      *
0078      * The sun4v TLB load wants the PTE in %g3 so we fix that
0079      * up here.
0080      */
0081     ba,pt       %xcc, sun4v_itlb_load
0082      mov        %g5, %g3
0083 
0084 kvmap_itlb_longpath:
0085 
0086 661:    rdpr    %pstate, %g5
0087     wrpr    %g5, PSTATE_AG | PSTATE_MG, %pstate
0088     .section .sun4v_2insn_patch, "ax"
0089     .word   661b
0090     SET_GL(1)
0091     nop
0092     .previous
0093 
0094     rdpr    %tpc, %g5
0095     ba,pt   %xcc, sparc64_realfault_common
0096      mov    FAULT_CODE_ITLB, %g4
0097 
0098 kvmap_itlb_obp:
0099     OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
0100 
0101     TSB_LOCK_TAG(%g1, %g2, %g7)
0102 
0103     TSB_WRITE(%g1, %g5, %g6)
0104 
0105     ba,pt       %xcc, kvmap_itlb_load
0106      nop
0107 
0108 kvmap_dtlb_obp:
0109     OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
0110 
0111     TSB_LOCK_TAG(%g1, %g2, %g7)
0112 
0113     TSB_WRITE(%g1, %g5, %g6)
0114 
0115     ba,pt       %xcc, kvmap_dtlb_load
0116      nop
0117 
0118 kvmap_linear_early:
0119     sethi       %hi(kern_linear_pte_xor), %g7
0120     ldx     [%g7 + %lo(kern_linear_pte_xor)], %g2
0121     ba,pt       %xcc, kvmap_dtlb_tsb4m_load
0122      xor        %g2, %g4, %g5
0123 
0124     .align      32
0125 kvmap_dtlb_tsb4m_load:
0126     TSB_LOCK_TAG(%g1, %g2, %g7)
0127     TSB_WRITE(%g1, %g5, %g6)
0128     ba,pt       %xcc, kvmap_dtlb_load
0129      nop
0130 
0131 kvmap_dtlb:
0132     /* %g6: TAG TARGET */
0133     mov     TLB_TAG_ACCESS, %g4
0134     ldxa        [%g4] ASI_DMMU, %g4
0135 
0136     /* The kernel executes in context zero, therefore we do not
0137      * need to clear the context ID bits out of %g4 here.
0138      */
0139 
0140     /* sun4v_dtlb_miss branches here with the missing virtual
0141      * address already loaded into %g4
0142      */
0143 kvmap_dtlb_4v:
0144     brgez,pn    %g4, kvmap_dtlb_nonlinear
0145      nop
0146 
0147 #ifdef CONFIG_DEBUG_PAGEALLOC
0148     /* Index through the base page size TSB even for linear
0149      * mappings when using page allocation debugging.
0150      */
0151     KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
0152 #else
0153     /* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
0154     KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
0155 #endif
0156     /* Linear mapping TSB lookup failed.  Fallthrough to kernel
0157      * page table based lookup.
0158      */
0159     .globl      kvmap_linear_patch
0160 kvmap_linear_patch:
0161     ba,a,pt     %xcc, kvmap_linear_early
0162 
0163 kvmap_dtlb_vmalloc_addr:
0164     KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
0165 
0166     TSB_LOCK_TAG(%g1, %g2, %g7)
0167     TSB_WRITE(%g1, %g5, %g6)
0168 
0169     /* fallthrough to TLB load */
0170 
0171 kvmap_dtlb_load:
0172 
0173 661:    stxa        %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
0174     retry
0175     .section    .sun4v_2insn_patch, "ax"
0176     .word       661b
0177     nop
0178     nop
0179     .previous
0180 
0181     /* For sun4v the ASI_DTLB_DATA_IN store and the retry
0182      * instruction get nop'd out and we get here to branch
0183      * to the sun4v tlb load code.  The registers are setup
0184      * as follows:
0185      *
0186      * %g4: vaddr
0187      * %g5: PTE
0188      * %g6: TAG
0189      *
0190      * The sun4v TLB load wants the PTE in %g3 so we fix that
0191      * up here.
0192      */
0193     ba,pt       %xcc, sun4v_dtlb_load
0194      mov        %g5, %g3
0195 
0196 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0197 kvmap_vmemmap:
0198     KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
0199     ba,a,pt     %xcc, kvmap_dtlb_load
0200 #endif
0201 
0202 kvmap_dtlb_nonlinear:
0203     /* Catch kernel NULL pointer derefs.  */
0204     sethi       %hi(PAGE_SIZE), %g5
0205     cmp     %g4, %g5
0206     bleu,pn     %xcc, kvmap_dtlb_longpath
0207      nop
0208 
0209 #ifdef CONFIG_SPARSEMEM_VMEMMAP
0210     /* Do not use the TSB for vmemmap.  */
0211     sethi       %hi(VMEMMAP_BASE), %g5
0212     ldx     [%g5 + %lo(VMEMMAP_BASE)], %g5
0213     cmp     %g4,%g5
0214     bgeu,pn     %xcc, kvmap_vmemmap
0215      nop
0216 #endif
0217 
0218     KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
0219 
0220 kvmap_dtlb_tsbmiss:
0221     sethi       %hi(MODULES_VADDR), %g5
0222     cmp     %g4, %g5
0223     blu,pn      %xcc, kvmap_dtlb_longpath
0224      sethi      %hi(VMALLOC_END), %g5
0225     ldx     [%g5 + %lo(VMALLOC_END)], %g5
0226     cmp     %g4, %g5
0227     bgeu,pn     %xcc, kvmap_dtlb_longpath
0228      nop
0229 
0230 kvmap_check_obp:
0231     sethi       %hi(LOW_OBP_ADDRESS), %g5
0232     cmp     %g4, %g5
0233     blu,pn      %xcc, kvmap_dtlb_vmalloc_addr
0234      mov        0x1, %g5
0235     sllx        %g5, 32, %g5
0236     cmp     %g4, %g5
0237     blu,pn      %xcc, kvmap_dtlb_obp
0238      nop
0239     ba,pt       %xcc, kvmap_dtlb_vmalloc_addr
0240      nop
0241 
0242 kvmap_dtlb_longpath:
0243 
0244 661:    rdpr    %pstate, %g5
0245     wrpr    %g5, PSTATE_AG | PSTATE_MG, %pstate
0246     .section .sun4v_2insn_patch, "ax"
0247     .word   661b
0248     SET_GL(1)
0249     ldxa        [%g0] ASI_SCRATCHPAD, %g5
0250     .previous
0251 
0252     rdpr    %tl, %g3
0253     cmp %g3, 1
0254 
0255 661:    mov TLB_TAG_ACCESS, %g4
0256     ldxa    [%g4] ASI_DMMU, %g5
0257     .section .sun4v_2insn_patch, "ax"
0258     .word   661b
0259     ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
0260     nop
0261     .previous
0262 
0263     /* The kernel executes in context zero, therefore we do not
0264      * need to clear the context ID bits out of %g5 here.
0265      */
0266 
0267     be,pt   %xcc, sparc64_realfault_common
0268      mov    FAULT_CODE_DTLB, %g4
0269     ba,pt   %xcc, winfix_trampoline
0270      nop