Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_IRQ_STACK_H
0003 #define _ASM_X86_IRQ_STACK_H
0004 
0005 #include <linux/ptrace.h>
0006 #include <linux/objtool.h>
0007 
0008 #include <asm/processor.h>
0009 
0010 #ifdef CONFIG_X86_64
0011 
0012 /*
0013  * Macro to inline switching to an interrupt stack and invoking function
0014  * calls from there. The following rules apply:
0015  *
0016  * - Ordering:
0017  *
0018  *   1. Write the stack pointer into the top most place of the irq
0019  *  stack. This ensures that the various unwinders can link back to the
0020  *  original stack.
0021  *
0022  *   2. Switch the stack pointer to the top of the irq stack.
0023  *
0024  *   3. Invoke whatever needs to be done (@asm_call argument)
0025  *
0026  *   4. Pop the original stack pointer from the top of the irq stack
0027  *  which brings it back to the original stack where it left off.
0028  *
0029  * - Function invocation:
0030  *
0031  *   To allow flexible usage of the macro, the actual function code including
0032  *   the store of the arguments in the call ABI registers is handed in via
0033  *   the @asm_call argument.
0034  *
0035  * - Local variables:
0036  *
0037  *   @tos:
0038  *  The @tos variable holds a pointer to the top of the irq stack and
0039  *  _must_ be allocated in a non-callee saved register as this is a
0040  *  restriction coming from objtool.
0041  *
0042  *  Note, that (tos) is both in input and output constraints to ensure
0043  *  that the compiler does not assume that R11 is left untouched in
0044  *  case this macro is used in some place where the per cpu interrupt
0045  *  stack pointer is used again afterwards
0046  *
0047  * - Function arguments:
0048  *  The function argument(s), if any, have to be defined in register
0049  *  variables at the place where this is invoked. Storing the
0050  *  argument(s) in the proper register(s) is part of the @asm_call
0051  *
0052  * - Constraints:
0053  *
0054  *   The constraints have to be done very carefully because the compiler
0055  *   does not know about the assembly call.
0056  *
0057  *   output:
0058  *     As documented already above the @tos variable is required to be in
0059  *     the output constraints to make the compiler aware that R11 cannot be
0060  *     reused after the asm() statement.
0061  *
0062  *     For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is
0063  *     required as well as this prevents certain creative GCC variants from
0064  *     misplacing the ASM code.
0065  *
0066  *  input:
0067  *    - func:
0068  *    Immediate, which tells the compiler that the function is referenced.
0069  *
0070  *    - tos:
0071  *    Register. The actual register is defined by the variable declaration.
0072  *
0073  *    - function arguments:
0074  *    The constraints are handed in via the 'argconstr' argument list. They
0075  *    describe the register arguments which are used in @asm_call.
0076  *
0077  *  clobbers:
0078  *     Function calls can clobber anything except the callee-saved
0079  *     registers. Tell the compiler.
0080  */
0081 #define call_on_stack(stack, func, asm_call, argconstr...)      \
0082 {                                   \
0083     register void *tos asm("r11");                  \
0084                                     \
0085     tos = ((void *)(stack));                    \
0086                                     \
0087     asm_inline volatile(                        \
0088     "movq   %%rsp, (%[tos])             \n"     \
0089     "movq   %[tos], %%rsp               \n"     \
0090                                     \
0091     asm_call                            \
0092                                     \
0093     "popq   %%rsp                   \n"     \
0094                                     \
0095     : "+r" (tos), ASM_CALL_CONSTRAINT               \
0096     : [__func] "i" (func), [tos] "r" (tos) argconstr        \
0097     : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10",   \
0098       "memory"                          \
0099     );                              \
0100 }
0101 
0102 #define ASM_CALL_ARG0                           \
0103     "call %P[__func]                \n"     \
0104     ASM_REACHABLE
0105 
0106 #define ASM_CALL_ARG1                           \
0107     "movq   %[arg1], %%rdi              \n"     \
0108     ASM_CALL_ARG0
0109 
0110 #define ASM_CALL_ARG2                           \
0111     "movq   %[arg2], %%rsi              \n"     \
0112     ASM_CALL_ARG1
0113 
0114 #define ASM_CALL_ARG3                           \
0115     "movq   %[arg3], %%rdx              \n"     \
0116     ASM_CALL_ARG2
0117 
0118 #define call_on_irqstack(func, asm_call, argconstr...)          \
0119     call_on_stack(__this_cpu_read(hardirq_stack_ptr),       \
0120               func, asm_call, argconstr)
0121 
0122 /* Macros to assert type correctness for run_*_on_irqstack macros */
0123 #define assert_function_type(func, proto)               \
0124     static_assert(__builtin_types_compatible_p(typeof(&func), proto))
0125 
0126 #define assert_arg_type(arg, proto)                 \
0127     static_assert(__builtin_types_compatible_p(typeof(arg), proto))
0128 
0129 /*
0130  * Macro to invoke system vector and device interrupt C handlers.
0131  */
0132 #define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...)  \
0133 {                                   \
0134     /*                              \
0135      * User mode entry and interrupt on the irq stack do not    \
0136      * switch stacks. If from user mode the task stack is empty.    \
0137      */                             \
0138     if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) {  \
0139         irq_enter_rcu();                    \
0140         func(c_args);                       \
0141         irq_exit_rcu();                     \
0142     } else {                            \
0143         /*                          \
0144          * Mark the irq stack inuse _before_ and unmark _after_ \
0145          * switching stacks. Interrupts are disabled in both    \
0146          * places. Invoke the stack switch macro with the call  \
0147          * sequence which matches the above direct invocation.  \
0148          */                         \
0149         __this_cpu_write(hardirq_stack_inuse, true);        \
0150         call_on_irqstack(func, asm_call, constr);       \
0151         __this_cpu_write(hardirq_stack_inuse, false);       \
0152     }                               \
0153 }
0154 
0155 /*
0156  * Function call sequence for __call_on_irqstack() for system vectors.
0157  *
0158  * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
0159  * mechanism because these functions are global and cannot be optimized out
0160  * when compiling a particular source file which uses one of these macros.
0161  *
0162  * The argument (regs) does not need to be pushed or stashed in a callee
0163  * saved register to be safe vs. the irq_enter_rcu() call because the
0164  * clobbers already prevent the compiler from storing it in a callee
0165  * clobbered register. As the compiler has to preserve @regs for the final
0166  * call to idtentry_exit() anyway, it's likely that it does not cause extra
0167  * effort for this asm magic.
0168  */
0169 #define ASM_CALL_SYSVEC                         \
0170     "call irq_enter_rcu             \n"     \
0171     ASM_CALL_ARG1                           \
0172     "call irq_exit_rcu              \n"
0173 
0174 #define SYSVEC_CONSTRAINTS  , [arg1] "r" (regs)
0175 
0176 #define run_sysvec_on_irqstack_cond(func, regs)             \
0177 {                                   \
0178     assert_function_type(func, void (*)(struct pt_regs *));     \
0179     assert_arg_type(regs, struct pt_regs *);            \
0180                                     \
0181     call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC,      \
0182                   SYSVEC_CONSTRAINTS, regs);        \
0183 }
0184 
0185 /*
0186  * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
0187  * @regs and @vector in callee saved registers.
0188  */
0189 #define ASM_CALL_IRQ                            \
0190     "call irq_enter_rcu             \n"     \
0191     ASM_CALL_ARG2                           \
0192     "call irq_exit_rcu              \n"
0193 
0194 #define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector)
0195 
0196 #define run_irq_on_irqstack_cond(func, regs, vector)            \
0197 {                                   \
0198     assert_function_type(func, void (*)(struct pt_regs *, u32));    \
0199     assert_arg_type(regs, struct pt_regs *);            \
0200     assert_arg_type(vector, u32);                   \
0201                                     \
0202     call_on_irqstack_cond(func, regs, ASM_CALL_IRQ,         \
0203                   IRQ_CONSTRAINTS, regs, vector);       \
0204 }
0205 
0206 #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
0207 /*
0208  * Macro to invoke __do_softirq on the irq stack. This is only called from
0209  * task context when bottom halves are about to be reenabled and soft
0210  * interrupts are pending to be processed. The interrupt stack cannot be in
0211  * use here.
0212  */
0213 #define do_softirq_own_stack()                      \
0214 {                                   \
0215     __this_cpu_write(hardirq_stack_inuse, true);            \
0216     call_on_irqstack(__do_softirq, ASM_CALL_ARG0);          \
0217     __this_cpu_write(hardirq_stack_inuse, false);           \
0218 }
0219 
0220 #endif
0221 
0222 #else /* CONFIG_X86_64 */
0223 /* System vector handlers always run on the stack they interrupted. */
0224 #define run_sysvec_on_irqstack_cond(func, regs)             \
0225 {                                   \
0226     irq_enter_rcu();                        \
0227     func(regs);                         \
0228     irq_exit_rcu();                         \
0229 }
0230 
0231 /* Switches to the irq stack within func() */
0232 #define run_irq_on_irqstack_cond(func, regs, vector)            \
0233 {                                   \
0234     irq_enter_rcu();                        \
0235     func(regs, vector);                     \
0236     irq_exit_rcu();                         \
0237 }
0238 
0239 #endif /* !CONFIG_X86_64 */
0240 
0241 #endif