Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  arch/arm/include/asm/assembler.h
0004  *
0005  *  Copyright (C) 1996-2000 Russell King
0006  *
0007  *  This file contains arm architecture specific defines
0008  *  for the different processors.
0009  *
0010  *  Do not include any C declarations in this file - it is included by
0011  *  assembler source.
0012  */
0013 #ifndef __ASM_ASSEMBLER_H__
0014 #define __ASM_ASSEMBLER_H__
0015 
0016 #ifndef __ASSEMBLY__
0017 #error "Only include this from assembly code"
0018 #endif
0019 
0020 #include <asm/ptrace.h>
0021 #include <asm/opcodes-virt.h>
0022 #include <asm/asm-offsets.h>
0023 #include <asm/page.h>
0024 #include <asm/thread_info.h>
0025 #include <asm/uaccess-asm.h>
0026 
0027 #define IOMEM(x)    (x)
0028 
0029 /*
0030  * Endian independent macros for shifting bytes within registers.
0031  */
0032 #ifndef __ARMEB__
0033 #define lspull          lsr
0034 #define lspush          lsl
0035 #define get_byte_0      lsl #0
0036 #define get_byte_1  lsr #8
0037 #define get_byte_2  lsr #16
0038 #define get_byte_3  lsr #24
0039 #define put_byte_0      lsl #0
0040 #define put_byte_1  lsl #8
0041 #define put_byte_2  lsl #16
0042 #define put_byte_3  lsl #24
0043 #else
0044 #define lspull          lsl
0045 #define lspush          lsr
0046 #define get_byte_0  lsr #24
0047 #define get_byte_1  lsr #16
0048 #define get_byte_2  lsr #8
0049 #define get_byte_3      lsl #0
0050 #define put_byte_0  lsl #24
0051 #define put_byte_1  lsl #16
0052 #define put_byte_2  lsl #8
0053 #define put_byte_3      lsl #0
0054 #endif
0055 
0056 /* Select code for any configuration running in BE8 mode */
0057 #ifdef CONFIG_CPU_ENDIAN_BE8
0058 #define ARM_BE8(code...) code
0059 #else
0060 #define ARM_BE8(code...)
0061 #endif
0062 
0063 /*
0064  * Data preload for architectures that support it
0065  */
0066 #if __LINUX_ARM_ARCH__ >= 5
0067 #define PLD(code...)    code
0068 #else
0069 #define PLD(code...)
0070 #endif
0071 
0072 /*
0073  * This can be used to enable code to cacheline align the destination
0074  * pointer when bulk writing to memory.  Experiments on StrongARM and
0075  * XScale didn't show this a worthwhile thing to do when the cache is not
0076  * set to write-allocate (this would need further testing on XScale when WA
0077  * is used).
0078  *
0079  * On Feroceon there is much to gain however, regardless of cache mode.
0080  */
0081 #ifdef CONFIG_CPU_FEROCEON
0082 #define CALGN(code...) code
0083 #else
0084 #define CALGN(code...)
0085 #endif
0086 
0087 #define IMM12_MASK 0xfff
0088 
0089 /* the frame pointer used for stack unwinding */
0090 ARM(    fpreg   .req    r11 )
0091 THUMB(  fpreg   .req    r7  )
0092 
0093 /*
0094  * Enable and disable interrupts
0095  */
0096 #if __LINUX_ARM_ARCH__ >= 6
0097     .macro  disable_irq_notrace
0098     cpsid   i
0099     .endm
0100 
0101     .macro  enable_irq_notrace
0102     cpsie   i
0103     .endm
0104 #else
0105     .macro  disable_irq_notrace
0106     msr cpsr_c, #PSR_I_BIT | SVC_MODE
0107     .endm
0108 
0109     .macro  enable_irq_notrace
0110     msr cpsr_c, #SVC_MODE
0111     .endm
0112 #endif
0113 
0114 #if __LINUX_ARM_ARCH__ < 7
0115     .macro  dsb, args
0116     mcr p15, 0, r0, c7, c10, 4
0117     .endm
0118 
0119     .macro  isb, args
0120     mcr p15, 0, r0, c7, c5, 4
0121     .endm
0122 #endif
0123 
0124     .macro asm_trace_hardirqs_off, save=1
0125 #if defined(CONFIG_TRACE_IRQFLAGS)
0126     .if \save
0127     stmdb   sp!, {r0-r3, ip, lr}
0128     .endif
0129     bl  trace_hardirqs_off
0130     .if \save
0131     ldmia   sp!, {r0-r3, ip, lr}
0132     .endif
0133 #endif
0134     .endm
0135 
0136     .macro asm_trace_hardirqs_on, cond=al, save=1
0137 #if defined(CONFIG_TRACE_IRQFLAGS)
0138     /*
0139      * actually the registers should be pushed and pop'd conditionally, but
0140      * after bl the flags are certainly clobbered
0141      */
0142     .if \save
0143     stmdb   sp!, {r0-r3, ip, lr}
0144     .endif
0145     bl\cond trace_hardirqs_on
0146     .if \save
0147     ldmia   sp!, {r0-r3, ip, lr}
0148     .endif
0149 #endif
0150     .endm
0151 
0152     .macro disable_irq, save=1
0153     disable_irq_notrace
0154     asm_trace_hardirqs_off \save
0155     .endm
0156 
0157     .macro enable_irq
0158     asm_trace_hardirqs_on
0159     enable_irq_notrace
0160     .endm
0161 /*
0162  * Save the current IRQ state and disable IRQs.  Note that this macro
0163  * assumes FIQs are enabled, and that the processor is in SVC mode.
0164  */
0165     .macro  save_and_disable_irqs, oldcpsr
0166 #ifdef CONFIG_CPU_V7M
0167     mrs \oldcpsr, primask
0168 #else
0169     mrs \oldcpsr, cpsr
0170 #endif
0171     disable_irq
0172     .endm
0173 
0174     .macro  save_and_disable_irqs_notrace, oldcpsr
0175 #ifdef CONFIG_CPU_V7M
0176     mrs \oldcpsr, primask
0177 #else
0178     mrs \oldcpsr, cpsr
0179 #endif
0180     disable_irq_notrace
0181     .endm
0182 
0183 /*
0184  * Restore interrupt state previously stored in a register.  We don't
0185  * guarantee that this will preserve the flags.
0186  */
0187     .macro  restore_irqs_notrace, oldcpsr
0188 #ifdef CONFIG_CPU_V7M
0189     msr primask, \oldcpsr
0190 #else
0191     msr cpsr_c, \oldcpsr
0192 #endif
0193     .endm
0194 
0195     .macro restore_irqs, oldcpsr
0196     tst \oldcpsr, #PSR_I_BIT
0197     asm_trace_hardirqs_on cond=eq
0198     restore_irqs_notrace \oldcpsr
0199     .endm
0200 
0201 /*
0202  * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
0203  * reference local symbols in the same assembly file which are to be
0204  * resolved by the assembler.  Other usage is undefined.
0205  */
0206     .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
0207     .macro  badr\c, rd, sym
0208 #ifdef CONFIG_THUMB2_KERNEL
0209     adr\c   \rd, \sym + 1
0210 #else
0211     adr\c   \rd, \sym
0212 #endif
0213     .endm
0214     .endr
0215 
0216 /*
0217  * Get current thread_info.
0218  */
0219     .macro  get_thread_info, rd
0220     /* thread_info is the first member of struct task_struct */
0221     get_current \rd
0222     .endm
0223 
0224 /*
0225  * Increment/decrement the preempt count.
0226  */
0227 #ifdef CONFIG_PREEMPT_COUNT
0228     .macro  inc_preempt_count, ti, tmp
0229     ldr \tmp, [\ti, #TI_PREEMPT]    @ get preempt count
0230     add \tmp, \tmp, #1          @ increment it
0231     str \tmp, [\ti, #TI_PREEMPT]
0232     .endm
0233 
0234     .macro  dec_preempt_count, ti, tmp
0235     ldr \tmp, [\ti, #TI_PREEMPT]    @ get preempt count
0236     sub \tmp, \tmp, #1          @ decrement it
0237     str \tmp, [\ti, #TI_PREEMPT]
0238     .endm
0239 
0240     .macro  dec_preempt_count_ti, ti, tmp
0241     get_thread_info \ti
0242     dec_preempt_count \ti, \tmp
0243     .endm
0244 #else
0245     .macro  inc_preempt_count, ti, tmp
0246     .endm
0247 
0248     .macro  dec_preempt_count, ti, tmp
0249     .endm
0250 
0251     .macro  dec_preempt_count_ti, ti, tmp
0252     .endm
0253 #endif
0254 
0255 #define USERL(l, x...)              \
0256 9999:   x;                  \
0257     .pushsection __ex_table,"a";        \
0258     .align  3;              \
0259     .long   9999b,l;            \
0260     .popsection
0261 
0262 #define USER(x...)  USERL(9001f, x)
0263 
0264 #ifdef CONFIG_SMP
0265 #define ALT_SMP(instr...)                   \
0266 9998:   instr
0267 /*
0268  * Note: if you get assembler errors from ALT_UP() when building with
0269  * CONFIG_THUMB2_KERNEL, you almost certainly need to use
0270  * ALT_SMP( W(instr) ... )
0271  */
0272 #define ALT_UP(instr...)                    \
0273     .pushsection ".alt.smp.init", "a"           ;\
0274     .align  2                       ;\
0275     .long   9998b - .                   ;\
0276 9997:   instr                           ;\
0277     .if . - 9997b == 2                  ;\
0278         nop                     ;\
0279     .endif                          ;\
0280     .if . - 9997b != 4                  ;\
0281         .error "ALT_UP() content must assemble to exactly 4 bytes";\
0282     .endif                          ;\
0283     .popsection
0284 #define ALT_UP_B(label)                 \
0285     .pushsection ".alt.smp.init", "a"           ;\
0286     .align  2                       ;\
0287     .long   9998b - .                   ;\
0288     W(b)    . + (label - 9998b)                 ;\
0289     .popsection
0290 #else
0291 #define ALT_SMP(instr...)
0292 #define ALT_UP(instr...) instr
0293 #define ALT_UP_B(label) b label
0294 #endif
0295 
0296     /*
0297      * this_cpu_offset - load the per-CPU offset of this CPU into
0298      *           register 'rd'
0299      */
0300     .macro      this_cpu_offset, rd:req
0301 #ifdef CONFIG_SMP
0302 ALT_SMP(mrc     p15, 0, \rd, c13, c0, 4)
0303 #ifdef CONFIG_CPU_V6
0304 ALT_UP_B(.L1_\@)
0305 .L0_\@:
0306     .subsection 1
0307 .L1_\@: ldr_va      \rd, __per_cpu_offset
0308     b       .L0_\@
0309     .previous
0310 #endif
0311 #else
0312     mov     \rd, #0
0313 #endif
0314     .endm
0315 
0316     /*
0317      * set_current - store the task pointer of this CPU's current task
0318      */
0319     .macro      set_current, rn:req, tmp:req
0320 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
0321 9998:   mcr     p15, 0, \rn, c13, c0, 3     @ set TPIDRURO register
0322 #ifdef CONFIG_CPU_V6
0323 ALT_UP_B(.L0_\@)
0324     .subsection 1
0325 .L0_\@: str_va      \rn, __current, \tmp
0326     b       .L1_\@
0327     .previous
0328 .L1_\@:
0329 #endif
0330 #else
0331     str_va      \rn, __current, \tmp
0332 #endif
0333     .endm
0334 
0335     /*
0336      * get_current - load the task pointer of this CPU's current task
0337      */
0338     .macro      get_current, rd:req
0339 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
0340 9998:   mrc     p15, 0, \rd, c13, c0, 3     @ get TPIDRURO register
0341 #ifdef CONFIG_CPU_V6
0342 ALT_UP_B(.L0_\@)
0343     .subsection 1
0344 .L0_\@: ldr_va      \rd, __current
0345     b       .L1_\@
0346     .previous
0347 .L1_\@:
0348 #endif
0349 #else
0350     ldr_va      \rd, __current
0351 #endif
0352     .endm
0353 
0354     /*
0355      * reload_current - reload the task pointer of this CPU's current task
0356      *          into the TLS register
0357      */
0358     .macro      reload_current, t1:req, t2:req
0359 #if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
0360 #ifdef CONFIG_CPU_V6
0361 ALT_SMP(nop)
0362 ALT_UP_B(.L0_\@)
0363 #endif
0364     ldr_this_cpu    \t1, __entry_task, \t1, \t2
0365     mcr     p15, 0, \t1, c13, c0, 3     @ store in TPIDRURO
0366 .L0_\@:
0367 #endif
0368     .endm
0369 
0370 /*
0371  * Instruction barrier
0372  */
0373     .macro  instr_sync
0374 #if __LINUX_ARM_ARCH__ >= 7
0375     isb
0376 #elif __LINUX_ARM_ARCH__ == 6
0377     mcr p15, 0, r0, c7, c5, 4
0378 #endif
0379     .endm
0380 
0381 /*
0382  * SMP data memory barrier
0383  */
0384     .macro  smp_dmb mode
0385 #ifdef CONFIG_SMP
0386 #if __LINUX_ARM_ARCH__ >= 7
0387     .ifeqs "\mode","arm"
0388     ALT_SMP(dmb ish)
0389     .else
0390     ALT_SMP(W(dmb)  ish)
0391     .endif
0392 #elif __LINUX_ARM_ARCH__ == 6
0393     ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
0394 #else
0395 #error Incompatible SMP platform
0396 #endif
0397     .ifeqs "\mode","arm"
0398     ALT_UP(nop)
0399     .else
0400     ALT_UP(W(nop))
0401     .endif
0402 #endif
0403     .endm
0404 
0405 #if defined(CONFIG_CPU_V7M)
0406     /*
0407      * setmode is used to assert to be in svc mode during boot. For v7-M
0408      * this is done in __v7m_setup, so setmode can be empty here.
0409      */
0410     .macro  setmode, mode, reg
0411     .endm
0412 #elif defined(CONFIG_THUMB2_KERNEL)
0413     .macro  setmode, mode, reg
0414     mov \reg, #\mode
0415     msr cpsr_c, \reg
0416     .endm
0417 #else
0418     .macro  setmode, mode, reg
0419     msr cpsr_c, #\mode
0420     .endm
0421 #endif
0422 
0423 /*
0424  * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
0425  * a scratch register for the macro to overwrite.
0426  *
0427  * This macro is intended for forcing the CPU into SVC mode at boot time.
0428  * you cannot return to the original mode.
0429  */
0430 .macro safe_svcmode_maskall reg:req
0431 #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
0432     mrs \reg , cpsr
0433     eor \reg, \reg, #HYP_MODE
0434     tst \reg, #MODE_MASK
0435     bic \reg , \reg , #MODE_MASK
0436     orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
0437 THUMB(  orr \reg , \reg , #PSR_T_BIT    )
0438     bne 1f
0439     orr \reg, \reg, #PSR_A_BIT
0440     badr    lr, 2f
0441     msr spsr_cxsf, \reg
0442     __MSR_ELR_HYP(14)
0443     __ERET
0444 1:  msr cpsr_c, \reg
0445 2:
0446 #else
0447 /*
0448  * workaround for possibly broken pre-v6 hardware
0449  * (akita, Sharp Zaurus C-1000, PXA270-based)
0450  */
0451     setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
0452 #endif
0453 .endm
0454 
0455 /*
0456  * STRT/LDRT access macros with ARM and Thumb-2 variants
0457  */
0458 #ifdef CONFIG_THUMB2_KERNEL
0459 
0460     .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
0461 9999:
0462     .if \inc == 1
0463     \instr\()b\t\cond\().w \reg, [\ptr, #\off]
0464     .elseif \inc == 4
0465     \instr\t\cond\().w \reg, [\ptr, #\off]
0466     .else
0467     .error  "Unsupported inc macro argument"
0468     .endif
0469 
0470     .pushsection __ex_table,"a"
0471     .align  3
0472     .long   9999b, \abort
0473     .popsection
0474     .endm
0475 
0476     .macro  usracc, instr, reg, ptr, inc, cond, rept, abort
0477     @ explicit IT instruction needed because of the label
0478     @ introduced by the USER macro
0479     .ifnc   \cond,al
0480     .if \rept == 1
0481     itt \cond
0482     .elseif \rept == 2
0483     ittt    \cond
0484     .else
0485     .error  "Unsupported rept macro argument"
0486     .endif
0487     .endif
0488 
0489     @ Slightly optimised to avoid incrementing the pointer twice
0490     usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
0491     .if \rept == 2
0492     usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
0493     .endif
0494 
0495     add\cond \ptr, #\rept * \inc
0496     .endm
0497 
0498 #else   /* !CONFIG_THUMB2_KERNEL */
0499 
0500     .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
0501     .rept   \rept
0502 9999:
0503     .if \inc == 1
0504     \instr\()b\t\cond \reg, [\ptr], #\inc
0505     .elseif \inc == 4
0506     \instr\t\cond \reg, [\ptr], #\inc
0507     .else
0508     .error  "Unsupported inc macro argument"
0509     .endif
0510 
0511     .pushsection __ex_table,"a"
0512     .align  3
0513     .long   9999b, \abort
0514     .popsection
0515     .endr
0516     .endm
0517 
0518 #endif  /* CONFIG_THUMB2_KERNEL */
0519 
0520     .macro  strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
0521     usracc  str, \reg, \ptr, \inc, \cond, \rept, \abort
0522     .endm
0523 
0524     .macro  ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
0525     usracc  ldr, \reg, \ptr, \inc, \cond, \rept, \abort
0526     .endm
0527 
0528 /* Utility macro for declaring string literals */
0529     .macro  string name:req, string
0530     .type \name , #object
0531 \name:
0532     .asciz "\string"
0533     .size \name , . - \name
0534     .endm
0535 
0536     .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
0537     .macro  ret\c, reg
0538 #if __LINUX_ARM_ARCH__ < 6
0539     mov\c   pc, \reg
0540 #else
0541     .ifeqs  "\reg", "lr"
0542     bx\c    \reg
0543     .else
0544     mov\c   pc, \reg
0545     .endif
0546 #endif
0547     .endm
0548     .endr
0549 
0550     .macro  ret.w, reg
0551     ret \reg
0552 #ifdef CONFIG_THUMB2_KERNEL
0553     nop
0554 #endif
0555     .endm
0556 
0557     .macro  bug, msg, line
0558 #ifdef CONFIG_THUMB2_KERNEL
0559 1:  .inst   0xde02
0560 #else
0561 1:  .inst   0xe7f001f2
0562 #endif
0563 #ifdef CONFIG_DEBUG_BUGVERBOSE
0564     .pushsection .rodata.str, "aMS", %progbits, 1
0565 2:  .asciz  "\msg"
0566     .popsection
0567     .pushsection __bug_table, "aw"
0568     .align  2
0569     .word   1b, 2b
0570     .hword  \line
0571     .popsection
0572 #endif
0573     .endm
0574 
0575 #ifdef CONFIG_KPROBES
0576 #define _ASM_NOKPROBE(entry)                \
0577     .pushsection "_kprobe_blacklist", "aw" ;    \
0578     .balign 4 ;                 \
0579     .long entry;                    \
0580     .popsection
0581 #else
0582 #define _ASM_NOKPROBE(entry)
0583 #endif
0584 
0585     .macro      __adldst_l, op, reg, sym, tmp, c
0586     .if     __LINUX_ARM_ARCH__ < 7
0587     ldr\c       \tmp, .La\@
0588     .subsection 1
0589     .align      2
0590 .La\@:  .long       \sym - .Lpc\@
0591     .previous
0592     .else
0593     .ifnb       \c
0594  THUMB( ittt        \c          )
0595     .endif
0596     movw\c      \tmp, #:lower16:\sym - .Lpc\@
0597     movt\c      \tmp, #:upper16:\sym - .Lpc\@
0598     .endif
0599 
0600 #ifndef CONFIG_THUMB2_KERNEL
0601     .set        .Lpc\@, . + 8           // PC bias
0602     .ifc        \op, add
0603     add\c       \reg, \tmp, pc
0604     .else
0605     \op\c       \reg, [pc, \tmp]
0606     .endif
0607 #else
0608 .Lb\@:  add\c       \tmp, \tmp, pc
0609     /*
0610      * In Thumb-2 builds, the PC bias depends on whether we are currently
0611      * emitting into a .arm or a .thumb section. The size of the add opcode
0612      * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
0613      * emitting in ARM mode, so let's use this to account for the bias.
0614      */
0615     .set        .Lpc\@, . + (. - .Lb\@)
0616 
0617     .ifnc       \op, add
0618     \op\c       \reg, [\tmp]
0619     .endif
0620 #endif
0621     .endm
0622 
0623     /*
0624      * mov_l - move a constant value or [relocated] address into a register
0625      */
0626     .macro      mov_l, dst:req, imm:req, cond
0627     .if     __LINUX_ARM_ARCH__ < 7
0628     ldr\cond    \dst, =\imm
0629     .else
0630     movw\cond   \dst, #:lower16:\imm
0631     movt\cond   \dst, #:upper16:\imm
0632     .endif
0633     .endm
0634 
0635     /*
0636      * adr_l - adr pseudo-op with unlimited range
0637      *
0638      * @dst: destination register
0639      * @sym: name of the symbol
0640      * @cond: conditional opcode suffix
0641      */
0642     .macro      adr_l, dst:req, sym:req, cond
0643     __adldst_l  add, \dst, \sym, \dst, \cond
0644     .endm
0645 
0646     /*
0647      * ldr_l - ldr <literal> pseudo-op with unlimited range
0648      *
0649      * @dst: destination register
0650      * @sym: name of the symbol
0651      * @cond: conditional opcode suffix
0652      */
0653     .macro      ldr_l, dst:req, sym:req, cond
0654     __adldst_l  ldr, \dst, \sym, \dst, \cond
0655     .endm
0656 
0657     /*
0658      * str_l - str <literal> pseudo-op with unlimited range
0659      *
0660      * @src: source register
0661      * @sym: name of the symbol
0662      * @tmp: mandatory scratch register
0663      * @cond: conditional opcode suffix
0664      */
0665     .macro      str_l, src:req, sym:req, tmp:req, cond
0666     __adldst_l  str, \src, \sym, \tmp, \cond
0667     .endm
0668 
0669     .macro      __ldst_va, op, reg, tmp, sym, cond, offset
0670 #if __LINUX_ARM_ARCH__ >= 7 || \
0671     !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
0672     (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
0673     mov_l       \tmp, \sym, \cond
0674 #else
0675     /*
0676      * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
0677      * with the appropriate relocations. The combined sequence has a range
0678      * of -/+ 256 MiB, which should be sufficient for the core kernel and
0679      * for modules loaded into the module region.
0680      */
0681     .globl      \sym
0682     .reloc      .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
0683     .reloc      .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
0684     .reloc      .L2_\@, R_ARM_LDR_PC_G2, \sym
0685 .L0_\@: sub\cond    \tmp, pc, #8 - \offset
0686 .L1_\@: sub\cond    \tmp, \tmp, #4 - \offset
0687 .L2_\@:
0688 #endif
0689     \op\cond    \reg, [\tmp, #\offset]
0690     .endm
0691 
0692     /*
0693      * ldr_va - load a 32-bit word from the virtual address of \sym
0694      */
0695     .macro      ldr_va, rd:req, sym:req, cond, tmp, offset=0
0696     .ifnb       \tmp
0697     __ldst_va   ldr, \rd, \tmp, \sym, \cond, \offset
0698     .else
0699     __ldst_va   ldr, \rd, \rd, \sym, \cond, \offset
0700     .endif
0701     .endm
0702 
0703     /*
0704      * str_va - store a 32-bit word to the virtual address of \sym
0705      */
0706     .macro      str_va, rn:req, sym:req, tmp:req, cond
0707     __ldst_va   str, \rn, \tmp, \sym, \cond, 0
0708     .endm
0709 
0710     /*
0711      * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
0712      *          without using a temp register. Supported in ARM mode
0713      *          only.
0714      */
0715     .macro      ldr_this_cpu_armv6, rd:req, sym:req
0716     this_cpu_offset \rd
0717     .globl      \sym
0718     .reloc      .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
0719     .reloc      .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
0720     .reloc      .L2_\@, R_ARM_LDR_PC_G2, \sym
0721     add     \rd, \rd, pc
0722 .L0_\@: sub     \rd, \rd, #4
0723 .L1_\@: sub     \rd, \rd, #0
0724 .L2_\@: ldr     \rd, [\rd, #4]
0725     .endm
0726 
0727     /*
0728      * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
0729      *        into register 'rd', which may be the stack pointer,
0730      *        using 't1' and 't2' as general temp registers. These
0731      *        are permitted to overlap with 'rd' if != sp
0732      */
0733     .macro      ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
0734 #ifndef CONFIG_SMP
0735     ldr_va      \rd, \sym, tmp=\t1
0736 #elif __LINUX_ARM_ARCH__ >= 7 || \
0737       !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
0738       (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
0739     this_cpu_offset \t1
0740     mov_l       \t2, \sym
0741     ldr     \rd, [\t1, \t2]
0742 #else
0743     ldr_this_cpu_armv6 \rd, \sym
0744 #endif
0745     .endm
0746 
0747     /*
0748      * rev_l - byte-swap a 32-bit value
0749      *
0750      * @val: source/destination register
0751      * @tmp: scratch register
0752      */
0753     .macro      rev_l, val:req, tmp:req
0754     .if     __LINUX_ARM_ARCH__ < 6
0755     eor     \tmp, \val, \val, ror #16
0756     bic     \tmp, \tmp, #0x00ff0000
0757     mov     \val, \val, ror #8
0758     eor     \val, \val, \tmp, lsr #8
0759     .else
0760     rev     \val, \val
0761     .endif
0762     .endm
0763 
0764     /*
0765      * bl_r - branch and link to register
0766      *
0767      * @dst: target to branch to
0768      * @c: conditional opcode suffix
0769      */
0770     .macro      bl_r, dst:req, c
0771     .if     __LINUX_ARM_ARCH__ < 6
0772     mov\c       lr, pc
0773     mov\c       pc, \dst
0774     .else
0775     blx\c       \dst
0776     .endif
0777     .endm
0778 
0779 #endif /* __ASM_ASSEMBLER_H__ */