Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_X86_PERCPU_H
0003 #define _ASM_X86_PERCPU_H
0004 
0005 #ifdef CONFIG_X86_64
0006 #define __percpu_seg        gs
0007 #else
0008 #define __percpu_seg        fs
0009 #endif
0010 
0011 #ifdef __ASSEMBLY__
0012 
0013 #ifdef CONFIG_SMP
0014 #define PER_CPU_VAR(var)    %__percpu_seg:var
0015 #else /* ! SMP */
0016 #define PER_CPU_VAR(var)    var
0017 #endif  /* SMP */
0018 
0019 #ifdef CONFIG_X86_64_SMP
0020 #define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
0021 #else
0022 #define INIT_PER_CPU_VAR(var)  var
0023 #endif
0024 
0025 #else /* ...!ASSEMBLY */
0026 
0027 #include <linux/kernel.h>
0028 #include <linux/stringify.h>
0029 
0030 #ifdef CONFIG_SMP
0031 #define __percpu_prefix     "%%"__stringify(__percpu_seg)":"
0032 #define __my_cpu_offset     this_cpu_read(this_cpu_off)
0033 
0034 /*
0035  * Compared to the generic __my_cpu_offset version, the following
0036  * saves one instruction and avoids clobbering a temp register.
0037  */
0038 #define arch_raw_cpu_ptr(ptr)               \
0039 ({                          \
0040     unsigned long tcp_ptr__;            \
0041     asm ("add " __percpu_arg(1) ", %0"      \
0042          : "=r" (tcp_ptr__)             \
0043          : "m" (this_cpu_off), "0" (ptr));      \
0044     (typeof(*(ptr)) __kernel __force *)tcp_ptr__;   \
0045 })
0046 #else
0047 #define __percpu_prefix     ""
0048 #endif
0049 
0050 #define __percpu_arg(x)     __percpu_prefix "%" #x
0051 
0052 /*
0053  * Initialized pointers to per-cpu variables needed for the boot
0054  * processor need to use these macros to get the proper address
0055  * offset from __per_cpu_load on SMP.
0056  *
0057  * There also must be an entry in vmlinux_64.lds.S
0058  */
0059 #define DECLARE_INIT_PER_CPU(var) \
0060        extern typeof(var) init_per_cpu_var(var)
0061 
0062 #ifdef CONFIG_X86_64_SMP
0063 #define init_per_cpu_var(var)  init_per_cpu__##var
0064 #else
0065 #define init_per_cpu_var(var)  var
0066 #endif
0067 
0068 /* For arch-specific code, we can use direct single-insn ops (they
0069  * don't give an lvalue though). */
0070 
0071 #define __pcpu_type_1 u8
0072 #define __pcpu_type_2 u16
0073 #define __pcpu_type_4 u32
0074 #define __pcpu_type_8 u64
0075 
0076 #define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
0077 #define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
0078 #define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
0079 #define __pcpu_cast_8(val) ((u64)(val))
0080 
0081 #define __pcpu_op1_1(op, dst) op "b " dst
0082 #define __pcpu_op1_2(op, dst) op "w " dst
0083 #define __pcpu_op1_4(op, dst) op "l " dst
0084 #define __pcpu_op1_8(op, dst) op "q " dst
0085 
0086 #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
0087 #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
0088 #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
0089 #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
0090 
0091 #define __pcpu_reg_1(mod, x) mod "q" (x)
0092 #define __pcpu_reg_2(mod, x) mod "r" (x)
0093 #define __pcpu_reg_4(mod, x) mod "r" (x)
0094 #define __pcpu_reg_8(mod, x) mod "r" (x)
0095 
0096 #define __pcpu_reg_imm_1(x) "qi" (x)
0097 #define __pcpu_reg_imm_2(x) "ri" (x)
0098 #define __pcpu_reg_imm_4(x) "ri" (x)
0099 #define __pcpu_reg_imm_8(x) "re" (x)
0100 
0101 #define percpu_to_op(size, qual, op, _var, _val)            \
0102 do {                                    \
0103     __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);    \
0104     if (0) {                                                \
0105         typeof(_var) pto_tmp__;                 \
0106         pto_tmp__ = (_val);                 \
0107         (void)pto_tmp__;                    \
0108     }                               \
0109     asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var]))   \
0110         : [var] "+m" (_var)                     \
0111         : [val] __pcpu_reg_imm_##size(pto_val__));          \
0112 } while (0)
0113 
0114 #define percpu_unary_op(size, qual, op, _var)               \
0115 ({                                  \
0116     asm qual (__pcpu_op1_##size(op, __percpu_arg([var]))        \
0117         : [var] "+m" (_var));                   \
0118 })
0119 
0120 /*
0121  * Generate a percpu add to memory instruction and optimize code
0122  * if one is added or subtracted.
0123  */
0124 #define percpu_add_op(size, qual, var, val)             \
0125 do {                                    \
0126     const int pao_ID__ = (__builtin_constant_p(val) &&      \
0127                   ((val) == 1 || (val) == -1)) ?        \
0128                 (int)(val) : 0;             \
0129     if (0) {                            \
0130         typeof(var) pao_tmp__;                  \
0131         pao_tmp__ = (val);                  \
0132         (void)pao_tmp__;                    \
0133     }                               \
0134     if (pao_ID__ == 1)                      \
0135         percpu_unary_op(size, qual, "inc", var);        \
0136     else if (pao_ID__ == -1)                    \
0137         percpu_unary_op(size, qual, "dec", var);        \
0138     else                                \
0139         percpu_to_op(size, qual, "add", var, val);      \
0140 } while (0)
0141 
0142 #define percpu_from_op(size, qual, op, _var)                \
0143 ({                                  \
0144     __pcpu_type_##size pfo_val__;                   \
0145     asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]")  \
0146         : [val] __pcpu_reg_##size("=", pfo_val__)           \
0147         : [var] "m" (_var));                    \
0148     (typeof(_var))(unsigned long) pfo_val__;            \
0149 })
0150 
0151 #define percpu_stable_op(size, op, _var)                \
0152 ({                                  \
0153     __pcpu_type_##size pfo_val__;                   \
0154     asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]")   \
0155         : [val] __pcpu_reg_##size("=", pfo_val__)           \
0156         : [var] "p" (&(_var)));                 \
0157     (typeof(_var))(unsigned long) pfo_val__;            \
0158 })
0159 
0160 /*
0161  * Add return operation
0162  */
0163 #define percpu_add_return_op(size, qual, _var, _val)            \
0164 ({                                  \
0165     __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);   \
0166     asm qual (__pcpu_op2_##size("xadd", "%[tmp]",           \
0167                      __percpu_arg([var]))       \
0168           : [tmp] __pcpu_reg_##size("+", paro_tmp__),       \
0169             [var] "+m" (_var)                   \
0170           : : "memory");                    \
0171     (typeof(_var))(unsigned long) (paro_tmp__ + _val);      \
0172 })
0173 
0174 /*
0175  * xchg is implemented using cmpxchg without a lock prefix. xchg is
0176  * expensive due to the implied lock prefix.  The processor cannot prefetch
0177  * cachelines if xchg is used.
0178  */
0179 #define percpu_xchg_op(size, qual, _var, _nval)             \
0180 ({                                  \
0181     __pcpu_type_##size pxo_old__;                   \
0182     __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval);   \
0183     asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]),     \
0184                     "%[oval]")              \
0185           "\n1:\t"                      \
0186           __pcpu_op2_##size("cmpxchg", "%[nval]",       \
0187                     __percpu_arg([var]))        \
0188           "\n\tjnz 1b"                      \
0189           : [oval] "=&a" (pxo_old__),               \
0190             [var] "+m" (_var)                   \
0191           : [nval] __pcpu_reg_##size(, pxo_new__)       \
0192           : "memory");                      \
0193     (typeof(_var))(unsigned long) pxo_old__;            \
0194 })
0195 
0196 /*
0197  * cmpxchg has no such implied lock semantics as a result it is much
0198  * more efficient for cpu local operations.
0199  */
0200 #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)       \
0201 ({                                  \
0202     __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);   \
0203     __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);   \
0204     asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",       \
0205                     __percpu_arg([var]))        \
0206           : [oval] "+a" (pco_old__),                \
0207             [var] "+m" (_var)                   \
0208           : [nval] __pcpu_reg_##size(, pco_new__)       \
0209           : "memory");                      \
0210     (typeof(_var))(unsigned long) pco_old__;            \
0211 })
0212 
0213 /*
0214  * this_cpu_read() makes gcc load the percpu variable every time it is
0215  * accessed while this_cpu_read_stable() allows the value to be cached.
0216  * this_cpu_read_stable() is more efficient and can be used if its value
0217  * is guaranteed to be valid across cpus.  The current users include
0218  * get_current() and get_thread_info() both of which are actually
0219  * per-thread variables implemented as per-cpu variables and thus
0220  * stable for the duration of the respective task.
0221  */
0222 #define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
0223 #define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
0224 #define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
0225 #define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
0226 #define this_cpu_read_stable(pcp)   __pcpu_size_call_return(this_cpu_read_stable_, pcp)
0227 
0228 #define raw_cpu_read_1(pcp)     percpu_from_op(1, , "mov", pcp)
0229 #define raw_cpu_read_2(pcp)     percpu_from_op(2, , "mov", pcp)
0230 #define raw_cpu_read_4(pcp)     percpu_from_op(4, , "mov", pcp)
0231 
0232 #define raw_cpu_write_1(pcp, val)   percpu_to_op(1, , "mov", (pcp), val)
0233 #define raw_cpu_write_2(pcp, val)   percpu_to_op(2, , "mov", (pcp), val)
0234 #define raw_cpu_write_4(pcp, val)   percpu_to_op(4, , "mov", (pcp), val)
0235 #define raw_cpu_add_1(pcp, val)     percpu_add_op(1, , (pcp), val)
0236 #define raw_cpu_add_2(pcp, val)     percpu_add_op(2, , (pcp), val)
0237 #define raw_cpu_add_4(pcp, val)     percpu_add_op(4, , (pcp), val)
0238 #define raw_cpu_and_1(pcp, val)     percpu_to_op(1, , "and", (pcp), val)
0239 #define raw_cpu_and_2(pcp, val)     percpu_to_op(2, , "and", (pcp), val)
0240 #define raw_cpu_and_4(pcp, val)     percpu_to_op(4, , "and", (pcp), val)
0241 #define raw_cpu_or_1(pcp, val)      percpu_to_op(1, , "or", (pcp), val)
0242 #define raw_cpu_or_2(pcp, val)      percpu_to_op(2, , "or", (pcp), val)
0243 #define raw_cpu_or_4(pcp, val)      percpu_to_op(4, , "or", (pcp), val)
0244 
0245 /*
0246  * raw_cpu_xchg() can use a load-store since it is not required to be
0247  * IRQ-safe.
0248  */
0249 #define raw_percpu_xchg_op(var, nval)                   \
0250 ({                                  \
0251     typeof(var) pxo_ret__ = raw_cpu_read(var);          \
0252     raw_cpu_write(var, (nval));                 \
0253     pxo_ret__;                          \
0254 })
0255 
0256 #define raw_cpu_xchg_1(pcp, val)    raw_percpu_xchg_op(pcp, val)
0257 #define raw_cpu_xchg_2(pcp, val)    raw_percpu_xchg_op(pcp, val)
0258 #define raw_cpu_xchg_4(pcp, val)    raw_percpu_xchg_op(pcp, val)
0259 
0260 #define this_cpu_read_1(pcp)        percpu_from_op(1, volatile, "mov", pcp)
0261 #define this_cpu_read_2(pcp)        percpu_from_op(2, volatile, "mov", pcp)
0262 #define this_cpu_read_4(pcp)        percpu_from_op(4, volatile, "mov", pcp)
0263 #define this_cpu_write_1(pcp, val)  percpu_to_op(1, volatile, "mov", (pcp), val)
0264 #define this_cpu_write_2(pcp, val)  percpu_to_op(2, volatile, "mov", (pcp), val)
0265 #define this_cpu_write_4(pcp, val)  percpu_to_op(4, volatile, "mov", (pcp), val)
0266 #define this_cpu_add_1(pcp, val)    percpu_add_op(1, volatile, (pcp), val)
0267 #define this_cpu_add_2(pcp, val)    percpu_add_op(2, volatile, (pcp), val)
0268 #define this_cpu_add_4(pcp, val)    percpu_add_op(4, volatile, (pcp), val)
0269 #define this_cpu_and_1(pcp, val)    percpu_to_op(1, volatile, "and", (pcp), val)
0270 #define this_cpu_and_2(pcp, val)    percpu_to_op(2, volatile, "and", (pcp), val)
0271 #define this_cpu_and_4(pcp, val)    percpu_to_op(4, volatile, "and", (pcp), val)
0272 #define this_cpu_or_1(pcp, val)     percpu_to_op(1, volatile, "or", (pcp), val)
0273 #define this_cpu_or_2(pcp, val)     percpu_to_op(2, volatile, "or", (pcp), val)
0274 #define this_cpu_or_4(pcp, val)     percpu_to_op(4, volatile, "or", (pcp), val)
0275 #define this_cpu_xchg_1(pcp, nval)  percpu_xchg_op(1, volatile, pcp, nval)
0276 #define this_cpu_xchg_2(pcp, nval)  percpu_xchg_op(2, volatile, pcp, nval)
0277 #define this_cpu_xchg_4(pcp, nval)  percpu_xchg_op(4, volatile, pcp, nval)
0278 
0279 #define raw_cpu_add_return_1(pcp, val)      percpu_add_return_op(1, , pcp, val)
0280 #define raw_cpu_add_return_2(pcp, val)      percpu_add_return_op(2, , pcp, val)
0281 #define raw_cpu_add_return_4(pcp, val)      percpu_add_return_op(4, , pcp, val)
0282 #define raw_cpu_cmpxchg_1(pcp, oval, nval)  percpu_cmpxchg_op(1, , pcp, oval, nval)
0283 #define raw_cpu_cmpxchg_2(pcp, oval, nval)  percpu_cmpxchg_op(2, , pcp, oval, nval)
0284 #define raw_cpu_cmpxchg_4(pcp, oval, nval)  percpu_cmpxchg_op(4, , pcp, oval, nval)
0285 
0286 #define this_cpu_add_return_1(pcp, val)     percpu_add_return_op(1, volatile, pcp, val)
0287 #define this_cpu_add_return_2(pcp, val)     percpu_add_return_op(2, volatile, pcp, val)
0288 #define this_cpu_add_return_4(pcp, val)     percpu_add_return_op(4, volatile, pcp, val)
0289 #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
0290 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
0291 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
0292 
0293 #ifdef CONFIG_X86_CMPXCHG64
0294 #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2)     \
0295 ({                                  \
0296     bool __ret;                         \
0297     typeof(pcp1) __o1 = (o1), __n1 = (n1);              \
0298     typeof(pcp2) __o2 = (o2), __n2 = (n2);              \
0299     asm volatile("cmpxchg8b "__percpu_arg(1)            \
0300              CC_SET(z)                      \
0301              : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
0302              : "b" (__n1), "c" (__n2));             \
0303     __ret;                              \
0304 })
0305 
0306 #define raw_cpu_cmpxchg_double_4    percpu_cmpxchg8b_double
0307 #define this_cpu_cmpxchg_double_4   percpu_cmpxchg8b_double
0308 #endif /* CONFIG_X86_CMPXCHG64 */
0309 
0310 /*
0311  * Per cpu atomic 64 bit operations are only available under 64 bit.
0312  * 32 bit must fall back to generic operations.
0313  */
0314 #ifdef CONFIG_X86_64
0315 #define raw_cpu_read_8(pcp)         percpu_from_op(8, , "mov", pcp)
0316 #define raw_cpu_write_8(pcp, val)       percpu_to_op(8, , "mov", (pcp), val)
0317 #define raw_cpu_add_8(pcp, val)         percpu_add_op(8, , (pcp), val)
0318 #define raw_cpu_and_8(pcp, val)         percpu_to_op(8, , "and", (pcp), val)
0319 #define raw_cpu_or_8(pcp, val)          percpu_to_op(8, , "or", (pcp), val)
0320 #define raw_cpu_add_return_8(pcp, val)      percpu_add_return_op(8, , pcp, val)
0321 #define raw_cpu_xchg_8(pcp, nval)       raw_percpu_xchg_op(pcp, nval)
0322 #define raw_cpu_cmpxchg_8(pcp, oval, nval)  percpu_cmpxchg_op(8, , pcp, oval, nval)
0323 
0324 #define this_cpu_read_8(pcp)            percpu_from_op(8, volatile, "mov", pcp)
0325 #define this_cpu_write_8(pcp, val)      percpu_to_op(8, volatile, "mov", (pcp), val)
0326 #define this_cpu_add_8(pcp, val)        percpu_add_op(8, volatile, (pcp), val)
0327 #define this_cpu_and_8(pcp, val)        percpu_to_op(8, volatile, "and", (pcp), val)
0328 #define this_cpu_or_8(pcp, val)         percpu_to_op(8, volatile, "or", (pcp), val)
0329 #define this_cpu_add_return_8(pcp, val)     percpu_add_return_op(8, volatile, pcp, val)
0330 #define this_cpu_xchg_8(pcp, nval)      percpu_xchg_op(8, volatile, pcp, nval)
0331 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
0332 
0333 /*
0334  * Pretty complex macro to generate cmpxchg16 instruction.  The instruction
0335  * is not supported on early AMD64 processors so we must be able to emulate
0336  * it in software.  The address used in the cmpxchg16 instruction must be
0337  * aligned to a 16 byte boundary.
0338  */
0339 #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2)        \
0340 ({                                  \
0341     bool __ret;                         \
0342     typeof(pcp1) __o1 = (o1), __n1 = (n1);              \
0343     typeof(pcp2) __o2 = (o2), __n2 = (n2);              \
0344     alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
0345                "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
0346                X86_FEATURE_CX16,                \
0347                ASM_OUTPUT2("=a" (__ret), "+m" (pcp1),       \
0348                    "+m" (pcp2), "+d" (__o2)),       \
0349                "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
0350     __ret;                              \
0351 })
0352 
0353 #define raw_cpu_cmpxchg_double_8    percpu_cmpxchg16b_double
0354 #define this_cpu_cmpxchg_double_8   percpu_cmpxchg16b_double
0355 
0356 #endif
0357 
0358 static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
0359                         const unsigned long __percpu *addr)
0360 {
0361     unsigned long __percpu *a =
0362         (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
0363 
0364 #ifdef CONFIG_X86_64
0365     return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
0366 #else
0367     return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
0368 #endif
0369 }
0370 
0371 static inline bool x86_this_cpu_variable_test_bit(int nr,
0372                         const unsigned long __percpu *addr)
0373 {
0374     bool oldbit;
0375 
0376     asm volatile("btl "__percpu_arg(2)",%1"
0377             CC_SET(c)
0378             : CC_OUT(c) (oldbit)
0379             : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
0380 
0381     return oldbit;
0382 }
0383 
0384 #define x86_this_cpu_test_bit(nr, addr)         \
0385     (__builtin_constant_p((nr))         \
0386      ? x86_this_cpu_constant_test_bit((nr), (addr)) \
0387      : x86_this_cpu_variable_test_bit((nr), (addr)))
0388 
0389 
0390 #include <asm-generic/percpu.h>
0391 
0392 /* We can use this directly for local CPU (faster). */
0393 DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
0394 
0395 #endif /* !__ASSEMBLY__ */
0396 
0397 #ifdef CONFIG_SMP
0398 
0399 /*
0400  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
0401  * variables that are initialized and accessed before there are per_cpu
0402  * areas allocated.
0403  */
0404 
0405 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
0406     DEFINE_PER_CPU(_type, _name) = _initvalue;          \
0407     __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =   \
0408                 { [0 ... NR_CPUS-1] = _initvalue }; \
0409     __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
0410 
0411 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)  \
0412     DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;      \
0413     __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =   \
0414                 { [0 ... NR_CPUS-1] = _initvalue }; \
0415     __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
0416 
0417 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)          \
0418     EXPORT_PER_CPU_SYMBOL(_name)
0419 
0420 #define DECLARE_EARLY_PER_CPU(_type, _name)         \
0421     DECLARE_PER_CPU(_type, _name);              \
0422     extern __typeof__(_type) *_name##_early_ptr;        \
0423     extern __typeof__(_type)  _name##_early_map[]
0424 
0425 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)     \
0426     DECLARE_PER_CPU_READ_MOSTLY(_type, _name);      \
0427     extern __typeof__(_type) *_name##_early_ptr;        \
0428     extern __typeof__(_type)  _name##_early_map[]
0429 
0430 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
0431 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
0432 #define early_per_cpu(_name, _cpu)              \
0433     *(early_per_cpu_ptr(_name) ?                \
0434         &early_per_cpu_ptr(_name)[_cpu] :       \
0435         &per_cpu(_name, _cpu))
0436 
0437 #else   /* !CONFIG_SMP */
0438 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)      \
0439     DEFINE_PER_CPU(_type, _name) = _initvalue
0440 
0441 #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)  \
0442     DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
0443 
0444 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)          \
0445     EXPORT_PER_CPU_SYMBOL(_name)
0446 
0447 #define DECLARE_EARLY_PER_CPU(_type, _name)         \
0448     DECLARE_PER_CPU(_type, _name)
0449 
0450 #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)     \
0451     DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
0452 
0453 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
0454 #define early_per_cpu_ptr(_name) NULL
0455 /* no early_per_cpu_map() */
0456 
0457 #endif  /* !CONFIG_SMP */
0458 
0459 #endif /* _ASM_X86_PERCPU_H */