Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
0004  *
0005  * Copyright (C) 1996-2000 Russell King
0006  * Copyright (C) 2012 ARM Ltd.
0007  */
0008 #ifndef __ASSEMBLY__
0009 #error "Only include this from assembly code"
0010 #endif
0011 
0012 #ifndef __ASM_ASSEMBLER_H
0013 #define __ASM_ASSEMBLER_H
0014 
0015 #include <asm-generic/export.h>
0016 
0017 #include <asm/alternative.h>
0018 #include <asm/asm-bug.h>
0019 #include <asm/asm-extable.h>
0020 #include <asm/asm-offsets.h>
0021 #include <asm/cpufeature.h>
0022 #include <asm/cputype.h>
0023 #include <asm/debug-monitors.h>
0024 #include <asm/page.h>
0025 #include <asm/pgtable-hwdef.h>
0026 #include <asm/ptrace.h>
0027 #include <asm/thread_info.h>
0028 
0029     /*
0030      * Provide a wxN alias for each wN register so what we can paste a xN
0031      * reference after a 'w' to obtain the 32-bit version.
0032      */
0033     .irp    n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
0034     wx\n    .req    w\n
0035     .endr
0036 
0037     .macro save_and_disable_daif, flags
0038     mrs \flags, daif
0039     msr daifset, #0xf
0040     .endm
0041 
0042     .macro disable_daif
0043     msr daifset, #0xf
0044     .endm
0045 
0046     .macro enable_daif
0047     msr daifclr, #0xf
0048     .endm
0049 
0050     .macro  restore_daif, flags:req
0051     msr daif, \flags
0052     .endm
0053 
0054     /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
0055     .macro enable_da
0056     msr daifclr, #(8 | 4)
0057     .endm
0058 
0059 /*
0060  * Save/restore interrupts.
0061  */
0062     .macro  save_and_disable_irq, flags
0063     mrs \flags, daif
0064     msr daifset, #3
0065     .endm
0066 
0067     .macro  restore_irq, flags
0068     msr daif, \flags
0069     .endm
0070 
0071     .macro  enable_dbg
0072     msr daifclr, #8
0073     .endm
0074 
0075     .macro  disable_step_tsk, flgs, tmp
0076     tbz \flgs, #TIF_SINGLESTEP, 9990f
0077     mrs \tmp, mdscr_el1
0078     bic \tmp, \tmp, #DBG_MDSCR_SS
0079     msr mdscr_el1, \tmp
0080     isb // Synchronise with enable_dbg
0081 9990:
0082     .endm
0083 
0084     /* call with daif masked */
0085     .macro  enable_step_tsk, flgs, tmp
0086     tbz \flgs, #TIF_SINGLESTEP, 9990f
0087     mrs \tmp, mdscr_el1
0088     orr \tmp, \tmp, #DBG_MDSCR_SS
0089     msr mdscr_el1, \tmp
0090 9990:
0091     .endm
0092 
0093 /*
0094  * RAS Error Synchronization barrier
0095  */
0096     .macro  esb
0097 #ifdef CONFIG_ARM64_RAS_EXTN
0098     hint    #16
0099 #else
0100     nop
0101 #endif
0102     .endm
0103 
0104 /*
0105  * Value prediction barrier
0106  */
0107     .macro  csdb
0108     hint    #20
0109     .endm
0110 
0111 /*
0112  * Clear Branch History instruction
0113  */
0114     .macro clearbhb
0115     hint    #22
0116     .endm
0117 
0118 /*
0119  * Speculation barrier
0120  */
0121     .macro  sb
0122 alternative_if_not ARM64_HAS_SB
0123     dsb nsh
0124     isb
0125 alternative_else
0126     SB_BARRIER_INSN
0127     nop
0128 alternative_endif
0129     .endm
0130 
0131 /*
0132  * NOP sequence
0133  */
0134     .macro  nops, num
0135     .rept   \num
0136     nop
0137     .endr
0138     .endm
0139 
0140 /*
0141  * Register aliases.
0142  */
0143 lr  .req    x30     // link register
0144 
0145 /*
0146  * Vector entry
0147  */
0148      .macro ventry  label
0149     .align  7
0150     b   \label
0151     .endm
0152 
0153 /*
0154  * Select code when configured for BE.
0155  */
0156 #ifdef CONFIG_CPU_BIG_ENDIAN
0157 #define CPU_BE(code...) code
0158 #else
0159 #define CPU_BE(code...)
0160 #endif
0161 
0162 /*
0163  * Select code when configured for LE.
0164  */
0165 #ifdef CONFIG_CPU_BIG_ENDIAN
0166 #define CPU_LE(code...)
0167 #else
0168 #define CPU_LE(code...) code
0169 #endif
0170 
0171 /*
0172  * Define a macro that constructs a 64-bit value by concatenating two
0173  * 32-bit registers. Note that on big endian systems the order of the
0174  * registers is swapped.
0175  */
0176 #ifndef CONFIG_CPU_BIG_ENDIAN
0177     .macro  regs_to_64, rd, lbits, hbits
0178 #else
0179     .macro  regs_to_64, rd, hbits, lbits
0180 #endif
0181     orr \rd, \lbits, \hbits, lsl #32
0182     .endm
0183 
0184 /*
0185  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
0186  * <symbol> is within the range +/- 4 GB of the PC.
0187  */
0188     /*
0189      * @dst: destination register (64 bit wide)
0190      * @sym: name of the symbol
0191      */
0192     .macro  adr_l, dst, sym
0193     adrp    \dst, \sym
0194     add \dst, \dst, :lo12:\sym
0195     .endm
0196 
0197     /*
0198      * @dst: destination register (32 or 64 bit wide)
0199      * @sym: name of the symbol
0200      * @tmp: optional 64-bit scratch register to be used if <dst> is a
0201      *       32-bit wide register, in which case it cannot be used to hold
0202      *       the address
0203      */
0204     .macro  ldr_l, dst, sym, tmp=
0205     .ifb    \tmp
0206     adrp    \dst, \sym
0207     ldr \dst, [\dst, :lo12:\sym]
0208     .else
0209     adrp    \tmp, \sym
0210     ldr \dst, [\tmp, :lo12:\sym]
0211     .endif
0212     .endm
0213 
0214     /*
0215      * @src: source register (32 or 64 bit wide)
0216      * @sym: name of the symbol
0217      * @tmp: mandatory 64-bit scratch register to calculate the address
0218      *       while <src> needs to be preserved.
0219      */
0220     .macro  str_l, src, sym, tmp
0221     adrp    \tmp, \sym
0222     str \src, [\tmp, :lo12:\sym]
0223     .endm
0224 
0225     /*
0226      * @dst: destination register
0227      */
0228 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
0229     .macro  get_this_cpu_offset, dst
0230     mrs \dst, tpidr_el2
0231     .endm
0232 #else
0233     .macro  get_this_cpu_offset, dst
0234 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
0235     mrs \dst, tpidr_el1
0236 alternative_else
0237     mrs \dst, tpidr_el2
0238 alternative_endif
0239     .endm
0240 
0241     .macro  set_this_cpu_offset, src
0242 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
0243     msr tpidr_el1, \src
0244 alternative_else
0245     msr tpidr_el2, \src
0246 alternative_endif
0247     .endm
0248 #endif
0249 
0250     /*
0251      * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
0252      * @sym: The name of the per-cpu variable
0253      * @tmp: scratch register
0254      */
0255     .macro adr_this_cpu, dst, sym, tmp
0256     adrp    \tmp, \sym
0257     add \dst, \tmp, #:lo12:\sym
0258     get_this_cpu_offset \tmp
0259     add \dst, \dst, \tmp
0260     .endm
0261 
0262     /*
0263      * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
0264      * @sym: The name of the per-cpu variable
0265      * @tmp: scratch register
0266      */
0267     .macro ldr_this_cpu dst, sym, tmp
0268     adr_l   \dst, \sym
0269     get_this_cpu_offset \tmp
0270     ldr \dst, [\dst, \tmp]
0271     .endm
0272 
0273 /*
0274  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
0275  */
0276     .macro  vma_vm_mm, rd, rn
0277     ldr \rd, [\rn, #VMA_VM_MM]
0278     .endm
0279 
0280 /*
0281  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
0282  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
0283  */
0284     .macro  read_ctr, reg
0285 #ifndef __KVM_NVHE_HYPERVISOR__
0286 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
0287     mrs \reg, ctr_el0           // read CTR
0288     nop
0289 alternative_else
0290     ldr_l   \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
0291 alternative_endif
0292 #else
0293 alternative_if_not ARM64_KVM_PROTECTED_MODE
0294     ASM_BUG()
0295 alternative_else_nop_endif
0296 alternative_cb kvm_compute_final_ctr_el0
0297     movz    \reg, #0
0298     movk    \reg, #0, lsl #16
0299     movk    \reg, #0, lsl #32
0300     movk    \reg, #0, lsl #48
0301 alternative_cb_end
0302 #endif
0303     .endm
0304 
0305 
0306 /*
0307  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
0308  * from the CTR register.
0309  */
0310     .macro  raw_dcache_line_size, reg, tmp
0311     mrs \tmp, ctr_el0           // read CTR
0312     ubfm    \tmp, \tmp, #16, #19        // cache line size encoding
0313     mov \reg, #4            // bytes per word
0314     lsl \reg, \reg, \tmp        // actual cache line size
0315     .endm
0316 
0317 /*
0318  * dcache_line_size - get the safe D-cache line size across all CPUs
0319  */
0320     .macro  dcache_line_size, reg, tmp
0321     read_ctr    \tmp
0322     ubfm        \tmp, \tmp, #16, #19    // cache line size encoding
0323     mov     \reg, #4        // bytes per word
0324     lsl     \reg, \reg, \tmp    // actual cache line size
0325     .endm
0326 
0327 /*
0328  * raw_icache_line_size - get the minimum I-cache line size on this CPU
0329  * from the CTR register.
0330  */
0331     .macro  raw_icache_line_size, reg, tmp
0332     mrs \tmp, ctr_el0           // read CTR
0333     and \tmp, \tmp, #0xf        // cache line size encoding
0334     mov \reg, #4            // bytes per word
0335     lsl \reg, \reg, \tmp        // actual cache line size
0336     .endm
0337 
0338 /*
0339  * icache_line_size - get the safe I-cache line size across all CPUs
0340  */
0341     .macro  icache_line_size, reg, tmp
0342     read_ctr    \tmp
0343     and     \tmp, \tmp, #0xf    // cache line size encoding
0344     mov     \reg, #4        // bytes per word
0345     lsl     \reg, \reg, \tmp    // actual cache line size
0346     .endm
0347 
0348 /*
0349  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
0350  */
0351     .macro  tcr_set_t0sz, valreg, t0sz
0352     bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
0353     .endm
0354 
0355 /*
0356  * tcr_set_t1sz - update TCR.T1SZ
0357  */
0358     .macro  tcr_set_t1sz, valreg, t1sz
0359     bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
0360     .endm
0361 
0362 /*
0363  * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
0364  *
0365  * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
0366  * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
0367  * this number conveniently equals the number of leading zeroes in
0368  * the physical address of _end.
0369  */
0370     .macro  idmap_get_t0sz, reg
0371     adrp    \reg, _end
0372     orr \reg, \reg, #(1 << VA_BITS_MIN) - 1
0373     clz \reg, \reg
0374     .endm
0375 
0376 /*
0377  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
0378  * ID_AA64MMFR0_EL1.PARange value
0379  *
0380  *  tcr:        register with the TCR_ELx value to be updated
0381  *  pos:        IPS or PS bitfield position
0382  *  tmp{0,1}:   temporary registers
0383  */
0384     .macro  tcr_compute_pa_size, tcr, pos, tmp0, tmp1
0385     mrs \tmp0, ID_AA64MMFR0_EL1
0386     // Narrow PARange to fit the PS field in TCR_ELx
0387     ubfx    \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
0388     mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
0389     cmp \tmp0, \tmp1
0390     csel    \tmp0, \tmp1, \tmp0, hi
0391     bfi \tcr, \tmp0, \pos, #3
0392     .endm
0393 
0394     .macro __dcache_op_workaround_clean_cache, op, addr
0395 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
0396     dc  \op, \addr
0397 alternative_else
0398     dc  civac, \addr
0399 alternative_endif
0400     .endm
0401 
0402 /*
0403  * Macro to perform a data cache maintenance for the interval
0404  * [start, end) with dcache line size explicitly provided.
0405  *
0406  *  op:     operation passed to dc instruction
0407  *  domain:     domain used in dsb instruciton
0408  *  start:          starting virtual address of the region
0409  *  end:            end virtual address of the region
0410  *  linesz:     dcache line size
0411  *  fixup:      optional label to branch to on user fault
0412  *  Corrupts:       start, end, tmp
0413  */
0414     .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
0415     sub \tmp, \linesz, #1
0416     bic \start, \start, \tmp
0417 .Ldcache_op\@:
0418     .ifc    \op, cvau
0419     __dcache_op_workaround_clean_cache \op, \start
0420     .else
0421     .ifc    \op, cvac
0422     __dcache_op_workaround_clean_cache \op, \start
0423     .else
0424     .ifc    \op, cvap
0425     sys 3, c7, c12, 1, \start   // dc cvap
0426     .else
0427     .ifc    \op, cvadp
0428     sys 3, c7, c13, 1, \start   // dc cvadp
0429     .else
0430     dc  \op, \start
0431     .endif
0432     .endif
0433     .endif
0434     .endif
0435     add \start, \start, \linesz
0436     cmp \start, \end
0437     b.lo    .Ldcache_op\@
0438     dsb \domain
0439 
0440     _cond_uaccess_extable .Ldcache_op\@, \fixup
0441     .endm
0442 
0443 /*
0444  * Macro to perform a data cache maintenance for the interval
0445  * [start, end)
0446  *
0447  *  op:     operation passed to dc instruction
0448  *  domain:     domain used in dsb instruciton
0449  *  start:          starting virtual address of the region
0450  *  end:            end virtual address of the region
0451  *  fixup:      optional label to branch to on user fault
0452  *  Corrupts:       start, end, tmp1, tmp2
0453  */
0454     .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
0455     dcache_line_size \tmp1, \tmp2
0456     dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
0457     .endm
0458 
0459 /*
0460  * Macro to perform an instruction cache maintenance for the interval
0461  * [start, end)
0462  *
0463  *  start, end: virtual addresses describing the region
0464  *  fixup:      optional label to branch to on user fault
0465  *  Corrupts:   tmp1, tmp2
0466  */
0467     .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
0468     icache_line_size \tmp1, \tmp2
0469     sub \tmp2, \tmp1, #1
0470     bic \tmp2, \start, \tmp2
0471 .Licache_op\@:
0472     ic  ivau, \tmp2         // invalidate I line PoU
0473     add \tmp2, \tmp2, \tmp1
0474     cmp \tmp2, \end
0475     b.lo    .Licache_op\@
0476     dsb ish
0477     isb
0478 
0479     _cond_uaccess_extable .Licache_op\@, \fixup
0480     .endm
0481 
0482 /*
0483  * load_ttbr1 - install @pgtbl as a TTBR1 page table
0484  * pgtbl preserved
0485  * tmp1/tmp2 clobbered, either may overlap with pgtbl
0486  */
0487     .macro      load_ttbr1, pgtbl, tmp1, tmp2
0488     phys_to_ttbr    \tmp1, \pgtbl
0489     offset_ttbr1    \tmp1, \tmp2
0490     msr     ttbr1_el1, \tmp1
0491     isb
0492     .endm
0493 
0494 /*
0495  * To prevent the possibility of old and new partial table walks being visible
0496  * in the tlb, switch the ttbr to a zero page when we invalidate the old
0497  * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
0498  * Even switching to our copied tables will cause a changed output address at
0499  * each stage of the walk.
0500  */
0501     .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
0502     phys_to_ttbr \tmp, \zero_page
0503     msr ttbr1_el1, \tmp
0504     isb
0505     tlbi    vmalle1
0506     dsb nsh
0507     load_ttbr1 \page_table, \tmp, \tmp2
0508     .endm
0509 
0510 /*
0511  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
0512  */
0513     .macro  reset_pmuserenr_el0, tmpreg
0514     mrs \tmpreg, id_aa64dfr0_el1
0515     sbfx    \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
0516     cmp \tmpreg, #1         // Skip if no PMU present
0517     b.lt    9000f
0518     msr pmuserenr_el0, xzr      // Disable PMU access from EL0
0519 9000:
0520     .endm
0521 
0522 /*
0523  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
0524  */
0525     .macro  reset_amuserenr_el0, tmpreg
0526     mrs \tmpreg, id_aa64pfr0_el1    // Check ID_AA64PFR0_EL1
0527     ubfx    \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
0528     cbz \tmpreg, .Lskip_\@      // Skip if no AMU present
0529     msr_s   SYS_AMUSERENR_EL0, xzr      // Disable AMU access from EL0
0530 .Lskip_\@:
0531     .endm
0532 /*
0533  * copy_page - copy src to dest using temp registers t1-t8
0534  */
0535     .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
0536 9998:   ldp \t1, \t2, [\src]
0537     ldp \t3, \t4, [\src, #16]
0538     ldp \t5, \t6, [\src, #32]
0539     ldp \t7, \t8, [\src, #48]
0540     add \src, \src, #64
0541     stnp    \t1, \t2, [\dest]
0542     stnp    \t3, \t4, [\dest, #16]
0543     stnp    \t5, \t6, [\dest, #32]
0544     stnp    \t7, \t8, [\dest, #48]
0545     add \dest, \dest, #64
0546     tst \src, #(PAGE_SIZE - 1)
0547     b.ne    9998b
0548     .endm
0549 
0550 /*
0551  * Annotate a function as being unsuitable for kprobes.
0552  */
0553 #ifdef CONFIG_KPROBES
0554 #define NOKPROBE(x)             \
0555     .pushsection "_kprobe_blacklist", "aw"; \
0556     .quad   x;              \
0557     .popsection;
0558 #else
0559 #define NOKPROBE(x)
0560 #endif
0561 
0562 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
0563 #define EXPORT_SYMBOL_NOKASAN(name)
0564 #else
0565 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
0566 #endif
0567 
0568     /*
0569      * Emit a 64-bit absolute little endian symbol reference in a way that
0570      * ensures that it will be resolved at build time, even when building a
0571      * PIE binary. This requires cooperation from the linker script, which
0572      * must emit the lo32/hi32 halves individually.
0573      */
0574     .macro  le64sym, sym
0575     .long   \sym\()_lo32
0576     .long   \sym\()_hi32
0577     .endm
0578 
0579     /*
0580      * mov_q - move an immediate constant into a 64-bit register using
0581      *         between 2 and 4 movz/movk instructions (depending on the
0582      *         magnitude and sign of the operand)
0583      */
0584     .macro  mov_q, reg, val
0585     .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
0586     movz    \reg, :abs_g1_s:\val
0587     .else
0588     .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
0589     movz    \reg, :abs_g2_s:\val
0590     .else
0591     movz    \reg, :abs_g3:\val
0592     movk    \reg, :abs_g2_nc:\val
0593     .endif
0594     movk    \reg, :abs_g1_nc:\val
0595     .endif
0596     movk    \reg, :abs_g0_nc:\val
0597     .endm
0598 
0599 /*
0600  * Return the current task_struct.
0601  */
0602     .macro  get_current_task, rd
0603     mrs \rd, sp_el0
0604     .endm
0605 
0606 /*
0607  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
0608  * orr is used as it can cover the immediate value (and is idempotent).
0609  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
0610  *  ttbr: Value of ttbr to set, modified.
0611  */
0612     .macro  offset_ttbr1, ttbr, tmp
0613 #ifdef CONFIG_ARM64_VA_BITS_52
0614     mrs_s   \tmp, SYS_ID_AA64MMFR2_EL1
0615     and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
0616     cbnz    \tmp, .Lskipoffs_\@
0617     orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
0618 .Lskipoffs_\@ :
0619 #endif
0620     .endm
0621 
0622 /*
0623  * Perform the reverse of offset_ttbr1.
0624  * bic is used as it can cover the immediate value and, in future, won't need
0625  * to be nop'ed out when dealing with 52-bit kernel VAs.
0626  */
0627     .macro  restore_ttbr1, ttbr
0628 #ifdef CONFIG_ARM64_VA_BITS_52
0629     bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
0630 #endif
0631     .endm
0632 
0633 /*
0634  * Arrange a physical address in a TTBR register, taking care of 52-bit
0635  * addresses.
0636  *
0637  *  phys:   physical address, preserved
0638  *  ttbr:   returns the TTBR value
0639  */
0640     .macro  phys_to_ttbr, ttbr, phys
0641 #ifdef CONFIG_ARM64_PA_BITS_52
0642     orr \ttbr, \phys, \phys, lsr #46
0643     and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
0644 #else
0645     mov \ttbr, \phys
0646 #endif
0647     .endm
0648 
0649     .macro  phys_to_pte, pte, phys
0650 #ifdef CONFIG_ARM64_PA_BITS_52
0651     /*
0652      * We assume \phys is 64K aligned and this is guaranteed by only
0653      * supporting this configuration with 64K pages.
0654      */
0655     orr \pte, \phys, \phys, lsr #36
0656     and \pte, \pte, #PTE_ADDR_MASK
0657 #else
0658     mov \pte, \phys
0659 #endif
0660     .endm
0661 
0662     .macro  pte_to_phys, phys, pte
0663 #ifdef CONFIG_ARM64_PA_BITS_52
0664     ubfiz   \phys, \pte, #(48 - 16 - 12), #16
0665     bfxil   \phys, \pte, #16, #32
0666     lsl \phys, \phys, #16
0667 #else
0668     and \phys, \pte, #PTE_ADDR_MASK
0669 #endif
0670     .endm
0671 
0672 /*
0673  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
0674  */
0675     .macro  tcr_clear_errata_bits, tcr, tmp1, tmp2
0676 #ifdef CONFIG_FUJITSU_ERRATUM_010001
0677     mrs \tmp1, midr_el1
0678 
0679     mov_q   \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
0680     and \tmp1, \tmp1, \tmp2
0681     mov_q   \tmp2, MIDR_FUJITSU_ERRATUM_010001
0682     cmp \tmp1, \tmp2
0683     b.ne    10f
0684 
0685     mov_q   \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
0686     bic \tcr, \tcr, \tmp2
0687 10:
0688 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
0689     .endm
0690 
0691 /**
0692  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
0693  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
0694  */
0695     .macro pre_disable_mmu_workaround
0696 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
0697     isb
0698 #endif
0699     .endm
0700 
0701     /*
0702      * frame_push - Push @regcount callee saved registers to the stack,
0703      *              starting at x19, as well as x29/x30, and set x29 to
0704      *              the new value of sp. Add @extra bytes of stack space
0705      *              for locals.
0706      */
0707     .macro      frame_push, regcount:req, extra
0708     __frame     st, \regcount, \extra
0709     .endm
0710 
0711     /*
0712      * frame_pop  - Pop the callee saved registers from the stack that were
0713      *              pushed in the most recent call to frame_push, as well
0714      *              as x29/x30 and any extra stack space that may have been
0715      *              allocated.
0716      */
0717     .macro      frame_pop
0718     __frame     ld
0719     .endm
0720 
0721     .macro      __frame_regs, reg1, reg2, op, num
0722     .if     .Lframe_regcount == \num
0723     \op\()r     \reg1, [sp, #(\num + 1) * 8]
0724     .elseif     .Lframe_regcount > \num
0725     \op\()p     \reg1, \reg2, [sp, #(\num + 1) * 8]
0726     .endif
0727     .endm
0728 
0729     .macro      __frame, op, regcount, extra=0
0730     .ifc        \op, st
0731     .if     (\regcount) < 0 || (\regcount) > 10
0732     .error      "regcount should be in the range [0 ... 10]"
0733     .endif
0734     .if     ((\extra) % 16) != 0
0735     .error      "extra should be a multiple of 16 bytes"
0736     .endif
0737     .ifdef      .Lframe_regcount
0738     .if     .Lframe_regcount != -1
0739     .error      "frame_push/frame_pop may not be nested"
0740     .endif
0741     .endif
0742     .set        .Lframe_regcount, \regcount
0743     .set        .Lframe_extra, \extra
0744     .set        .Lframe_local_offset, ((\regcount + 3) / 2) * 16
0745     stp     x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
0746     mov     x29, sp
0747     .endif
0748 
0749     __frame_regs    x19, x20, \op, 1
0750     __frame_regs    x21, x22, \op, 3
0751     __frame_regs    x23, x24, \op, 5
0752     __frame_regs    x25, x26, \op, 7
0753     __frame_regs    x27, x28, \op, 9
0754 
0755     .ifc        \op, ld
0756     .if     .Lframe_regcount == -1
0757     .error      "frame_push/frame_pop may not be nested"
0758     .endif
0759     ldp     x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
0760     .set        .Lframe_regcount, -1
0761     .endif
0762     .endm
0763 
0764 /*
0765  * Set SCTLR_ELx to the @reg value, and invalidate the local icache
0766  * in the process. This is called when setting the MMU on.
0767  */
0768 .macro set_sctlr, sreg, reg
0769     msr \sreg, \reg
0770     isb
0771     /*
0772      * Invalidate the local I-cache so that any instructions fetched
0773      * speculatively from the PoC are discarded, since they may have
0774      * been dynamically patched at the PoU.
0775      */
0776     ic  iallu
0777     dsb nsh
0778     isb
0779 .endm
0780 
0781 .macro set_sctlr_el1, reg
0782     set_sctlr sctlr_el1, \reg
0783 .endm
0784 
0785 .macro set_sctlr_el2, reg
0786     set_sctlr sctlr_el2, \reg
0787 .endm
0788 
0789     /*
0790      * Check whether preempt/bh-disabled asm code should yield as soon as
0791      * it is able. This is the case if we are currently running in task
0792      * context, and either a softirq is pending, or the TIF_NEED_RESCHED
0793      * flag is set and re-enabling preemption a single time would result in
0794      * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
0795      * stored negated in the top word of the thread_info::preempt_count
0796      * field)
0797      */
0798     .macro      cond_yield, lbl:req, tmp:req, tmp2:req
0799     get_current_task \tmp
0800     ldr     \tmp, [\tmp, #TSK_TI_PREEMPT]
0801     /*
0802      * If we are serving a softirq, there is no point in yielding: the
0803      * softirq will not be preempted no matter what we do, so we should
0804      * run to completion as quickly as we can.
0805      */
0806     tbnz        \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
0807 #ifdef CONFIG_PREEMPTION
0808     sub     \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
0809     cbz     \tmp, \lbl
0810 #endif
0811     adr_l       \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
0812     get_this_cpu_offset \tmp2
0813     ldr     w\tmp, [\tmp, \tmp2]
0814     cbnz        w\tmp, \lbl // yield on pending softirq in task context
0815 .Lnoyield_\@:
0816     .endm
0817 
0818 /*
0819  * Branch Target Identifier (BTI)
0820  */
0821     .macro  bti, targets
0822     .equ    .L__bti_targets_c, 34
0823     .equ    .L__bti_targets_j, 36
0824     .equ    .L__bti_targets_jc,38
0825     hint    #.L__bti_targets_\targets
0826     .endm
0827 
0828 /*
0829  * This macro emits a program property note section identifying
0830  * architecture features which require special handling, mainly for
0831  * use in assembly files included in the VDSO.
0832  */
0833 
0834 #define NT_GNU_PROPERTY_TYPE_0  5
0835 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
0836 
0837 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
0838 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
0839 
0840 #ifdef CONFIG_ARM64_BTI_KERNEL
0841 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT      \
0842         ((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |  \
0843           GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
0844 #endif
0845 
0846 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
0847 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
0848     .pushsection .note.gnu.property, "a"
0849     .align  3
0850     .long   2f - 1f
0851     .long   6f - 3f
0852     .long   NT_GNU_PROPERTY_TYPE_0
0853 1:      .string "GNU"
0854 2:
0855     .align  3
0856 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
0857     .long   5f - 4f
0858 4:
0859     /*
0860      * This is described with an array of char in the Linux API
0861      * spec but the text and all other usage (including binutils,
0862      * clang and GCC) treat this as a 32 bit value so no swizzling
0863      * is required for big endian.
0864      */
0865     .long   \feat
0866 5:
0867     .align  3
0868 6:
0869     .popsection
0870 .endm
0871 
0872 #else
0873 .macro emit_aarch64_feature_1_and, feat=0
0874 .endm
0875 
0876 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
0877 
0878     .macro __mitigate_spectre_bhb_loop      tmp
0879 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
0880 alternative_cb  spectre_bhb_patch_loop_iter
0881     mov \tmp, #32       // Patched to correct the immediate
0882 alternative_cb_end
0883 .Lspectre_bhb_loop\@:
0884     b   . + 4
0885     subs    \tmp, \tmp, #1
0886     b.ne    .Lspectre_bhb_loop\@
0887     sb
0888 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
0889     .endm
0890 
0891     .macro mitigate_spectre_bhb_loop    tmp
0892 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
0893 alternative_cb  spectre_bhb_patch_loop_mitigation_enable
0894     b   .L_spectre_bhb_loop_done\@  // Patched to NOP
0895 alternative_cb_end
0896     __mitigate_spectre_bhb_loop \tmp
0897 .L_spectre_bhb_loop_done\@:
0898 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
0899     .endm
0900 
0901     /* Save/restores x0-x3 to the stack */
0902     .macro __mitigate_spectre_bhb_fw
0903 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
0904     stp x0, x1, [sp, #-16]!
0905     stp x2, x3, [sp, #-16]!
0906     mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
0907 alternative_cb  smccc_patch_fw_mitigation_conduit
0908     nop                 // Patched to SMC/HVC #0
0909 alternative_cb_end
0910     ldp x2, x3, [sp], #16
0911     ldp x0, x1, [sp], #16
0912 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
0913     .endm
0914 
0915     .macro mitigate_spectre_bhb_clear_insn
0916 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
0917 alternative_cb  spectre_bhb_patch_clearbhb
0918     /* Patched to NOP when not supported */
0919     clearbhb
0920     isb
0921 alternative_cb_end
0922 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
0923     .endm
0924 #endif  /* __ASM_ASSEMBLER_H */