Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_SYNC_CORE_H
0003 #define _ASM_X86_SYNC_CORE_H
0004 
0005 #include <linux/preempt.h>
0006 #include <asm/processor.h>
0007 #include <asm/cpufeature.h>
0008 #include <asm/special_insns.h>
0009 
0010 #ifdef CONFIG_X86_32
0011 static inline void iret_to_self(void)
0012 {
0013     asm volatile (
0014         "pushfl\n\t"
0015         "pushl %%cs\n\t"
0016         "pushl $1f\n\t"
0017         "iret\n\t"
0018         "1:"
0019         : ASM_CALL_CONSTRAINT : : "memory");
0020 }
0021 #else
0022 static inline void iret_to_self(void)
0023 {
0024     unsigned int tmp;
0025 
0026     asm volatile (
0027         "mov %%ss, %0\n\t"
0028         "pushq %q0\n\t"
0029         "pushq %%rsp\n\t"
0030         "addq $8, (%%rsp)\n\t"
0031         "pushfq\n\t"
0032         "mov %%cs, %0\n\t"
0033         "pushq %q0\n\t"
0034         "pushq $1f\n\t"
0035         "iretq\n\t"
0036         "1:"
0037         : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
0038 }
0039 #endif /* CONFIG_X86_32 */
0040 
0041 /*
0042  * This function forces the icache and prefetched instruction stream to
0043  * catch up with reality in two very specific cases:
0044  *
0045  *  a) Text was modified using one virtual address and is about to be executed
0046  *     from the same physical page at a different virtual address.
0047  *
0048  *  b) Text was modified on a different CPU, may subsequently be
0049  *     executed on this CPU, and you want to make sure the new version
0050  *     gets executed.  This generally means you're calling this in an IPI.
0051  *
0052  * If you're calling this for a different reason, you're probably doing
0053  * it wrong.
0054  *
0055  * Like all of Linux's memory ordering operations, this is a
0056  * compiler barrier as well.
0057  */
0058 static inline void sync_core(void)
0059 {
0060     /*
0061      * The SERIALIZE instruction is the most straightforward way to
0062      * do this, but it is not universally available.
0063      */
0064     if (static_cpu_has(X86_FEATURE_SERIALIZE)) {
0065         serialize();
0066         return;
0067     }
0068 
0069     /*
0070      * For all other processors, there are quite a few ways to do this.
0071      * IRET-to-self is nice because it works on every CPU, at any CPL
0072      * (so it's compatible with paravirtualization), and it never exits
0073      * to a hypervisor.  The only downsides are that it's a bit slow
0074      * (it seems to be a bit more than 2x slower than the fastest
0075      * options) and that it unmasks NMIs.  The "push %cs" is needed,
0076      * because in paravirtual environments __KERNEL_CS may not be a
0077      * valid CS value when we do IRET directly.
0078      *
0079      * In case NMI unmasking or performance ever becomes a problem,
0080      * the next best option appears to be MOV-to-CR2 and an
0081      * unconditional jump.  That sequence also works on all CPUs,
0082      * but it will fault at CPL3 (i.e. Xen PV).
0083      *
0084      * CPUID is the conventional way, but it's nasty: it doesn't
0085      * exist on some 486-like CPUs, and it usually exits to a
0086      * hypervisor.
0087      */
0088     iret_to_self();
0089 }
0090 
0091 /*
0092  * Ensure that a core serializing instruction is issued before returning
0093  * to user-mode. x86 implements return to user-space through sysexit,
0094  * sysrel, and sysretq, which are not core serializing.
0095  */
0096 static inline void sync_core_before_usermode(void)
0097 {
0098     /* With PTI, we unconditionally serialize before running user code. */
0099     if (static_cpu_has(X86_FEATURE_PTI))
0100         return;
0101 
0102     /*
0103      * Even if we're in an interrupt, we might reschedule before returning,
0104      * in which case we could switch to a different thread in the same mm
0105      * and return using SYSRET or SYSEXIT.  Instead of trying to keep
0106      * track of our need to sync the core, just sync right away.
0107      */
0108     sync_core();
0109 }
0110 
0111 #endif /* _ASM_X86_SYNC_CORE_H */