Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* tsb.S: Sparc64 TSB table handling.
0003  *
0004  * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
0005  */
0006 
0007 
0008 #include <asm/tsb.h>
0009 #include <asm/hypervisor.h>
0010 #include <asm/page.h>
0011 #include <asm/cpudata.h>
0012 #include <asm/mmu.h>
0013 
0014     .text
0015     .align  32
0016 
0017     /* Invoked from TLB miss handler, we are in the
0018      * MMU global registers and they are setup like
0019      * this:
0020      *
0021      * %g1: TSB entry pointer
0022      * %g2: available temporary
0023      * %g3: FAULT_CODE_{D,I}TLB
0024      * %g4: available temporary
0025      * %g5: available temporary
0026      * %g6: TAG TARGET
0027      * %g7: available temporary, will be loaded by us with
0028      *      the physical address base of the linux page
0029      *      tables for the current address space
0030      */
0031 tsb_miss_dtlb:
0032     mov     TLB_TAG_ACCESS, %g4
0033     ldxa        [%g4] ASI_DMMU, %g4
0034     srlx        %g4, PAGE_SHIFT, %g4
0035     ba,pt       %xcc, tsb_miss_page_table_walk
0036      sllx       %g4, PAGE_SHIFT, %g4
0037 
0038 tsb_miss_itlb:
0039     mov     TLB_TAG_ACCESS, %g4
0040     ldxa        [%g4] ASI_IMMU, %g4
0041     srlx        %g4, PAGE_SHIFT, %g4
0042     ba,pt       %xcc, tsb_miss_page_table_walk
0043      sllx       %g4, PAGE_SHIFT, %g4
0044 
0045     /* At this point we have:
0046      * %g1 --   PAGE_SIZE TSB entry address
0047      * %g3 --   FAULT_CODE_{D,I}TLB
0048      * %g4 --   missing virtual address
0049      * %g6 --   TAG TARGET (vaddr >> 22)
0050      */
0051 tsb_miss_page_table_walk:
0052     TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
0053 
0054     /* Before committing to a full page table walk,
0055      * check the huge page TSB.
0056      */
0057 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0058 
0059 661:    ldx     [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
0060     nop
0061     .section    .sun4v_2insn_patch, "ax"
0062     .word       661b
0063     mov     SCRATCHPAD_UTSBREG2, %g5
0064     ldxa        [%g5] ASI_SCRATCHPAD, %g5
0065     .previous
0066 
0067     cmp     %g5, -1
0068     be,pt       %xcc, 80f
0069      nop
0070 
0071     /* We need an aligned pair of registers containing 2 values
0072      * which can be easily rematerialized.  %g6 and %g7 foot the
0073      * bill just nicely.  We'll save %g6 away into %g2 for the
0074      * huge page TSB TAG comparison.
0075      *
0076      * Perform a huge page TSB lookup.
0077      */
0078     mov     %g6, %g2
0079     and     %g5, 0x7, %g6
0080     mov     512, %g7
0081     andn        %g5, 0x7, %g5
0082     sllx        %g7, %g6, %g7
0083     srlx        %g4, REAL_HPAGE_SHIFT, %g6
0084     sub     %g7, 1, %g7
0085     and     %g6, %g7, %g6
0086     sllx        %g6, 4, %g6
0087     add     %g5, %g6, %g5
0088 
0089     TSB_LOAD_QUAD(%g5, %g6)
0090     cmp     %g6, %g2
0091     be,a,pt     %xcc, tsb_tlb_reload
0092      mov        %g7, %g5
0093 
0094     /* No match, remember the huge page TSB entry address,
0095      * and restore %g6 and %g7.
0096      */
0097     TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
0098     srlx        %g4, 22, %g6
0099 80: stx     %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
0100 
0101 #endif
0102 
0103     ldx     [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
0104 
0105     /* At this point we have:
0106      * %g1 --   TSB entry address
0107      * %g3 --   FAULT_CODE_{D,I}TLB
0108      * %g4 --   missing virtual address
0109      * %g6 --   TAG TARGET (vaddr >> 22)
0110      * %g7 --   page table physical address
0111      *
0112      * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
0113      * TSB both lack a matching entry.
0114      */
0115 tsb_miss_page_table_walk_sun4v_fastpath:
0116     USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
0117 
0118     /* Valid PTE is now in %g5.  */
0119 
0120 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
0121     sethi       %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7
0122     sllx        %g7, 32, %g7
0123 
0124     andcc       %g5, %g7, %g0
0125     be,pt       %xcc, 60f
0126      nop
0127 
0128     /* It is a huge page, use huge page TSB entry address we
0129      * calculated above.  If the huge page TSB has not been
0130      * allocated, setup a trap stack and call hugetlb_setup()
0131      * to do so, then return from the trap to replay the TLB
0132      * miss.
0133      *
0134      * This is necessary to handle the case of transparent huge
0135      * pages where we don't really have a non-atomic context
0136      * in which to allocate the hugepage TSB hash table.  When
0137      * the 'mm' faults in the hugepage for the first time, we
0138      * thus handle it here.  This also makes sure that we can
0139      * allocate the TSB hash table on the correct NUMA node.
0140      */
0141     TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
0142     ldx     [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
0143     cmp     %g1, -1
0144     bne,pt      %xcc, 60f
0145      nop
0146 
0147 661:    rdpr        %pstate, %g5
0148     wrpr        %g5, PSTATE_AG | PSTATE_MG, %pstate
0149     .section    .sun4v_2insn_patch, "ax"
0150     .word       661b
0151     SET_GL(1)
0152     nop
0153     .previous
0154 
0155     rdpr    %tl, %g7
0156     cmp %g7, 1
0157     bne,pn  %xcc, winfix_trampoline
0158      mov    %g3, %g4
0159     ba,pt   %xcc, etrap
0160      rd %pc, %g7
0161     call    hugetlb_setup
0162      add    %sp, PTREGS_OFF, %o0
0163     ba,pt   %xcc, rtrap
0164      nop
0165 
0166 60:
0167 #endif
0168 
0169     /* At this point we have:
0170      * %g1 --   TSB entry address
0171      * %g3 --   FAULT_CODE_{D,I}TLB
0172      * %g5 --   valid PTE
0173      * %g6 --   TAG TARGET (vaddr >> 22)
0174      */
0175 tsb_reload:
0176     TSB_LOCK_TAG(%g1, %g2, %g7)
0177     TSB_WRITE(%g1, %g5, %g6)
0178 
0179     /* Finally, load TLB and return from trap.  */
0180 tsb_tlb_reload:
0181     cmp     %g3, FAULT_CODE_DTLB
0182     bne,pn      %xcc, tsb_itlb_load
0183      nop
0184 
0185 tsb_dtlb_load:
0186 
0187 661:    stxa        %g5, [%g0] ASI_DTLB_DATA_IN
0188     retry
0189     .section    .sun4v_2insn_patch, "ax"
0190     .word       661b
0191     nop
0192     nop
0193     .previous
0194 
0195     /* For sun4v the ASI_DTLB_DATA_IN store and the retry
0196      * instruction get nop'd out and we get here to branch
0197      * to the sun4v tlb load code.  The registers are setup
0198      * as follows:
0199      *
0200      * %g4: vaddr
0201      * %g5: PTE
0202      * %g6: TAG
0203      *
0204      * The sun4v TLB load wants the PTE in %g3 so we fix that
0205      * up here.
0206      */
0207     ba,pt       %xcc, sun4v_dtlb_load
0208      mov        %g5, %g3
0209 
0210 tsb_itlb_load:
0211     /* Executable bit must be set.  */
0212 661:    sethi       %hi(_PAGE_EXEC_4U), %g4
0213     andcc       %g5, %g4, %g0
0214     .section    .sun4v_2insn_patch, "ax"
0215     .word       661b
0216     andcc       %g5, _PAGE_EXEC_4V, %g0
0217     nop
0218     .previous
0219 
0220     be,pn       %xcc, tsb_do_fault
0221      nop
0222 
0223 661:    stxa        %g5, [%g0] ASI_ITLB_DATA_IN
0224     retry
0225     .section    .sun4v_2insn_patch, "ax"
0226     .word       661b
0227     nop
0228     nop
0229     .previous
0230 
0231     /* For sun4v the ASI_ITLB_DATA_IN store and the retry
0232      * instruction get nop'd out and we get here to branch
0233      * to the sun4v tlb load code.  The registers are setup
0234      * as follows:
0235      *
0236      * %g4: vaddr
0237      * %g5: PTE
0238      * %g6: TAG
0239      *
0240      * The sun4v TLB load wants the PTE in %g3 so we fix that
0241      * up here.
0242      */
0243     ba,pt       %xcc, sun4v_itlb_load
0244      mov        %g5, %g3
0245 
0246     /* No valid entry in the page tables, do full fault
0247      * processing.
0248      */
0249 
0250     .globl      tsb_do_fault
0251 tsb_do_fault:
0252     cmp     %g3, FAULT_CODE_DTLB
0253 
0254 661:    rdpr        %pstate, %g5
0255     wrpr        %g5, PSTATE_AG | PSTATE_MG, %pstate
0256     .section    .sun4v_2insn_patch, "ax"
0257     .word       661b
0258     SET_GL(1)
0259     ldxa        [%g0] ASI_SCRATCHPAD, %g4
0260     .previous
0261 
0262     bne,pn      %xcc, tsb_do_itlb_fault
0263      nop
0264 
0265 tsb_do_dtlb_fault:
0266     rdpr    %tl, %g3
0267     cmp %g3, 1
0268 
0269 661:    mov TLB_TAG_ACCESS, %g4
0270     ldxa    [%g4] ASI_DMMU, %g5
0271     .section .sun4v_2insn_patch, "ax"
0272     .word   661b
0273     ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
0274     nop
0275     .previous
0276 
0277     /* Clear context ID bits.  */
0278     srlx        %g5, PAGE_SHIFT, %g5
0279     sllx        %g5, PAGE_SHIFT, %g5
0280 
0281     be,pt   %xcc, sparc64_realfault_common
0282      mov    FAULT_CODE_DTLB, %g4
0283     ba,pt   %xcc, winfix_trampoline
0284      nop
0285 
0286 tsb_do_itlb_fault:
0287     rdpr    %tpc, %g5
0288     ba,pt   %xcc, sparc64_realfault_common
0289      mov    FAULT_CODE_ITLB, %g4
0290 
0291     .globl  sparc64_realfault_common
0292 sparc64_realfault_common:
0293     /* fault code in %g4, fault address in %g5, etrap will
0294      * preserve these two values in %l4 and %l5 respectively
0295      */
0296     ba,pt   %xcc, etrap         ! Save trap state
0297 1:   rd %pc, %g7            ! ...
0298     stb %l4, [%g6 + TI_FAULT_CODE]  ! Save fault code
0299     stx %l5, [%g6 + TI_FAULT_ADDR]  ! Save fault address
0300     call    do_sparc64_fault        ! Call fault handler
0301      add    %sp, PTREGS_OFF, %o0        ! Compute pt_regs arg
0302     ba,pt   %xcc, rtrap         ! Restore cpu state
0303      nop                    ! Delay slot (fill me)
0304 
0305 winfix_trampoline:
0306     rdpr    %tpc, %g3           ! Prepare winfixup TNPC
0307     or  %g3, 0x7c, %g3          ! Compute branch offset
0308     wrpr    %g3, %tnpc          ! Write it into TNPC
0309     done                    ! Trap return
0310 
0311     /* Insert an entry into the TSB.
0312      *
0313      * %o0: TSB entry pointer (virt or phys address)
0314      * %o1: tag
0315      * %o2: pte
0316      */
0317     .align  32
0318     .globl  __tsb_insert
0319 __tsb_insert:
0320     rdpr    %pstate, %o5
0321     wrpr    %o5, PSTATE_IE, %pstate
0322     TSB_LOCK_TAG(%o0, %g2, %g3)
0323     TSB_WRITE(%o0, %o2, %o1)
0324     wrpr    %o5, %pstate
0325     retl
0326      nop
0327     .size   __tsb_insert, .-__tsb_insert
0328 
0329     /* Flush the given TSB entry if it has the matching
0330      * tag.
0331      *
0332      * %o0: TSB entry pointer (virt or phys address)
0333      * %o1: tag
0334      */
0335     .align  32
0336     .globl  tsb_flush
0337     .type   tsb_flush,#function
0338 tsb_flush:
0339     sethi   %hi(TSB_TAG_LOCK_HIGH), %g2
0340 1:  TSB_LOAD_TAG(%o0, %g1)
0341     srlx    %g1, 32, %o3
0342     andcc   %o3, %g2, %g0
0343     bne,pn  %icc, 1b
0344      nop
0345     cmp %g1, %o1
0346     mov 1, %o3
0347     bne,pt  %xcc, 2f
0348      sllx   %o3, TSB_TAG_INVALID_BIT, %o3
0349     TSB_CAS_TAG(%o0, %g1, %o3)
0350     cmp %g1, %o3
0351     bne,pn  %xcc, 1b
0352      nop
0353 2:  retl
0354      nop
0355     .size   tsb_flush, .-tsb_flush
0356 
0357     /* Reload MMU related context switch state at
0358      * schedule() time.
0359      *
0360      * %o0: page table physical address
0361      * %o1: TSB base config pointer
0362      * %o2: TSB huge config pointer, or NULL if none
0363      * %o3: Hypervisor TSB descriptor physical address
0364      * %o4: Secondary context to load, if non-zero
0365      *
0366      * We have to run this whole thing with interrupts
0367      * disabled so that the current cpu doesn't change
0368      * due to preemption.
0369      */
0370     .align  32
0371     .globl  __tsb_context_switch
0372     .type   __tsb_context_switch,#function
0373 __tsb_context_switch:
0374     rdpr    %pstate, %g1
0375     wrpr    %g1, PSTATE_IE, %pstate
0376 
0377     brz,pn  %o4, 1f
0378      mov    SECONDARY_CONTEXT, %o5
0379 
0380 661:    stxa    %o4, [%o5] ASI_DMMU
0381     .section .sun4v_1insn_patch, "ax"
0382     .word   661b
0383     stxa    %o4, [%o5] ASI_MMU
0384     .previous
0385     flush   %g6
0386 
0387 1:
0388     TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
0389 
0390     stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
0391 
0392     ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
0393     brz,pt  %o2, 1f
0394      mov    -1, %g3
0395 
0396     ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
0397 
0398 1:  stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
0399 
0400     sethi   %hi(tlb_type), %g2
0401     lduw    [%g2 + %lo(tlb_type)], %g2
0402     cmp %g2, 3
0403     bne,pt  %icc, 50f
0404      nop
0405 
0406     /* Hypervisor TSB switch. */
0407     mov SCRATCHPAD_UTSBREG1, %o5
0408     stxa    %o0, [%o5] ASI_SCRATCHPAD
0409     mov SCRATCHPAD_UTSBREG2, %o5
0410     stxa    %g3, [%o5] ASI_SCRATCHPAD
0411 
0412     mov 2, %o0
0413     cmp %g3, -1
0414     move    %xcc, 1, %o0
0415 
0416     mov HV_FAST_MMU_TSB_CTXNON0, %o5
0417     mov %o3, %o1
0418     ta  HV_FAST_TRAP
0419 
0420     /* Finish up.  */
0421     ba,pt   %xcc, 9f
0422      nop
0423 
0424     /* SUN4U TSB switch.  */
0425 50: mov TSB_REG, %o5
0426     stxa    %o0, [%o5] ASI_DMMU
0427     membar  #Sync
0428     stxa    %o0, [%o5] ASI_IMMU
0429     membar  #Sync
0430 
0431 2:  ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
0432     brz %o4, 9f
0433      ldx    [%o1 + TSB_CONFIG_MAP_PTE], %o5
0434 
0435     sethi   %hi(sparc64_highest_unlocked_tlb_ent), %g2
0436     mov TLB_TAG_ACCESS, %g3
0437     lduw    [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
0438     stxa    %o4, [%g3] ASI_DMMU
0439     membar  #Sync
0440     sllx    %g2, 3, %g2
0441     stxa    %o5, [%g2] ASI_DTLB_DATA_ACCESS
0442     membar  #Sync
0443 
0444     brz,pt  %o2, 9f
0445      nop
0446 
0447     ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
0448     ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
0449     mov TLB_TAG_ACCESS, %g3
0450     stxa    %o4, [%g3] ASI_DMMU
0451     membar  #Sync
0452     sub %g2, (1 << 3), %g2
0453     stxa    %o5, [%g2] ASI_DTLB_DATA_ACCESS
0454     membar  #Sync
0455 
0456 9:
0457     wrpr    %g1, %pstate
0458 
0459     retl
0460      nop
0461     .size   __tsb_context_switch, .-__tsb_context_switch
0462 
0463 #define TSB_PASS_BITS   ((1 << TSB_TAG_LOCK_BIT) | \
0464              (1 << TSB_TAG_INVALID_BIT))
0465 
0466     .align  32
0467     .globl  copy_tsb
0468     .type   copy_tsb,#function
0469 copy_tsb:       /* %o0=old_tsb_base, %o1=old_tsb_size
0470              * %o2=new_tsb_base, %o3=new_tsb_size
0471              * %o4=page_size_shift
0472              */
0473     sethi       %uhi(TSB_PASS_BITS), %g7
0474     srlx        %o3, 4, %o3
0475     add     %o0, %o1, %o1   /* end of old tsb */
0476     sllx        %g7, 32, %g7
0477     sub     %o3, 1, %o3 /* %o3 == new tsb hash mask */
0478 
0479     mov     %o4, %g1    /* page_size_shift */
0480 
0481 661:    prefetcha   [%o0] ASI_N, #one_read
0482     .section    .tsb_phys_patch, "ax"
0483     .word       661b
0484     prefetcha   [%o0] ASI_PHYS_USE_EC, #one_read
0485     .previous
0486 
0487 90: andcc       %o0, (64 - 1), %g0
0488     bne     1f
0489      add        %o0, 64, %o5
0490 
0491 661:    prefetcha   [%o5] ASI_N, #one_read
0492     .section    .tsb_phys_patch, "ax"
0493     .word       661b
0494     prefetcha   [%o5] ASI_PHYS_USE_EC, #one_read
0495     .previous
0496 
0497 1:  TSB_LOAD_QUAD(%o0, %g2)     /* %g2/%g3 == TSB entry */
0498     andcc       %g2, %g7, %g0   /* LOCK or INVALID set? */
0499     bne,pn      %xcc, 80f   /* Skip it */
0500      sllx       %g2, 22, %o4    /* TAG --> VADDR */
0501 
0502     /* This can definitely be computed faster... */
0503     srlx        %o0, 4, %o5 /* Build index */
0504     and     %o5, 511, %o5   /* Mask index */
0505     sllx        %o5, %g1, %o5   /* Put into vaddr position */
0506     or      %o4, %o5, %o4   /* Full VADDR. */
0507     srlx        %o4, %g1, %o4   /* Shift down to create index */
0508     and     %o4, %o3, %o4   /* Mask with new_tsb_nents-1 */
0509     sllx        %o4, 4, %o4 /* Shift back up into tsb ent offset */
0510     TSB_STORE(%o2 + %o4, %g2)   /* Store TAG */
0511     add     %o4, 0x8, %o4   /* Advance to TTE */
0512     TSB_STORE(%o2 + %o4, %g3)   /* Store TTE */
0513 
0514 80: add     %o0, 16, %o0
0515     cmp     %o0, %o1
0516     bne,pt      %xcc, 90b
0517      nop
0518 
0519     retl
0520      nop
0521     .size       copy_tsb, .-copy_tsb
0522 
0523     /* Set the invalid bit in all TSB entries.  */
0524     .align      32
0525     .globl      tsb_init
0526     .type       tsb_init,#function
0527 tsb_init:       /* %o0 = TSB vaddr, %o1 = size in bytes */
0528     prefetch    [%o0 + 0x000], #n_writes
0529     mov     1, %g1
0530     prefetch    [%o0 + 0x040], #n_writes
0531     sllx        %g1, TSB_TAG_INVALID_BIT, %g1
0532     prefetch    [%o0 + 0x080], #n_writes
0533 1:  prefetch    [%o0 + 0x0c0], #n_writes
0534     stx     %g1, [%o0 + 0x00]
0535     stx     %g1, [%o0 + 0x10]
0536     stx     %g1, [%o0 + 0x20]
0537     stx     %g1, [%o0 + 0x30]
0538     prefetch    [%o0 + 0x100], #n_writes
0539     stx     %g1, [%o0 + 0x40]
0540     stx     %g1, [%o0 + 0x50]
0541     stx     %g1, [%o0 + 0x60]
0542     stx     %g1, [%o0 + 0x70]
0543     prefetch    [%o0 + 0x140], #n_writes
0544     stx     %g1, [%o0 + 0x80]
0545     stx     %g1, [%o0 + 0x90]
0546     stx     %g1, [%o0 + 0xa0]
0547     stx     %g1, [%o0 + 0xb0]
0548     prefetch    [%o0 + 0x180], #n_writes
0549     stx     %g1, [%o0 + 0xc0]
0550     stx     %g1, [%o0 + 0xd0]
0551     stx     %g1, [%o0 + 0xe0]
0552     stx     %g1, [%o0 + 0xf0]
0553     subcc       %o1, 0x100, %o1
0554     bne,pt      %xcc, 1b
0555      add        %o0, 0x100, %o0
0556     retl
0557      nop
0558     nop
0559     nop
0560     .size       tsb_init, .-tsb_init
0561 
0562     .globl      NGtsb_init
0563     .type       NGtsb_init,#function
0564 NGtsb_init:
0565     rd      %asi, %g2
0566     mov     1, %g1
0567     wr      %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
0568     sllx        %g1, TSB_TAG_INVALID_BIT, %g1
0569 1:  stxa        %g1, [%o0 + 0x00] %asi
0570     stxa        %g1, [%o0 + 0x10] %asi
0571     stxa        %g1, [%o0 + 0x20] %asi
0572     stxa        %g1, [%o0 + 0x30] %asi
0573     stxa        %g1, [%o0 + 0x40] %asi
0574     stxa        %g1, [%o0 + 0x50] %asi
0575     stxa        %g1, [%o0 + 0x60] %asi
0576     stxa        %g1, [%o0 + 0x70] %asi
0577     stxa        %g1, [%o0 + 0x80] %asi
0578     stxa        %g1, [%o0 + 0x90] %asi
0579     stxa        %g1, [%o0 + 0xa0] %asi
0580     stxa        %g1, [%o0 + 0xb0] %asi
0581     stxa        %g1, [%o0 + 0xc0] %asi
0582     stxa        %g1, [%o0 + 0xd0] %asi
0583     stxa        %g1, [%o0 + 0xe0] %asi
0584     stxa        %g1, [%o0 + 0xf0] %asi
0585     subcc       %o1, 0x100, %o1
0586     bne,pt      %xcc, 1b
0587      add        %o0, 0x100, %o0
0588     membar      #Sync
0589     retl
0590      wr     %g2, 0x0, %asi
0591     .size       NGtsb_init, .-NGtsb_init