Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_STATIC_CALL_H
0003 #define _LINUX_STATIC_CALL_H
0004 
0005 /*
0006  * Static call support
0007  *
0008  * Static calls use code patching to hard-code function pointers into direct
0009  * branch instructions. They give the flexibility of function pointers, but
0010  * with improved performance. This is especially important for cases where
0011  * retpolines would otherwise be used, as retpolines can significantly impact
0012  * performance.
0013  *
0014  *
0015  * API overview:
0016  *
0017  *   DECLARE_STATIC_CALL(name, func);
0018  *   DEFINE_STATIC_CALL(name, func);
0019  *   DEFINE_STATIC_CALL_NULL(name, typename);
0020  *   DEFINE_STATIC_CALL_RET0(name, typename);
0021  *
0022  *   __static_call_return0;
0023  *
0024  *   static_call(name)(args...);
0025  *   static_call_cond(name)(args...);
0026  *   static_call_update(name, func);
0027  *   static_call_query(name);
0028  *
0029  *   EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
0030  *
0031  * Usage example:
0032  *
0033  *   # Start with the following functions (with identical prototypes):
0034  *   int func_a(int arg1, int arg2);
0035  *   int func_b(int arg1, int arg2);
0036  *
0037  *   # Define a 'my_name' reference, associated with func_a() by default
0038  *   DEFINE_STATIC_CALL(my_name, func_a);
0039  *
0040  *   # Call func_a()
0041  *   static_call(my_name)(arg1, arg2);
0042  *
0043  *   # Update 'my_name' to point to func_b()
0044  *   static_call_update(my_name, &func_b);
0045  *
0046  *   # Call func_b()
0047  *   static_call(my_name)(arg1, arg2);
0048  *
0049  *
0050  * Implementation details:
0051  *
0052  *   This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
0053  *   Otherwise basic indirect calls are used (with function pointers).
0054  *
0055  *   Each static_call() site calls into a trampoline associated with the name.
0056  *   The trampoline has a direct branch to the default function.  Updates to a
0057  *   name will modify the trampoline's branch destination.
0058  *
0059  *   If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
0060  *   themselves will be patched at runtime to call the functions directly,
0061  *   rather than calling through the trampoline.  This requires objtool or a
0062  *   compiler plugin to detect all the static_call() sites and annotate them
0063  *   in the .static_call_sites section.
0064  *
0065  *
0066  * Notes on NULL function pointers:
0067  *
0068  *   Static_call()s support NULL functions, with many of the caveats that
0069  *   regular function pointers have.
0070  *
0071  *   Clearly calling a NULL function pointer is 'BAD', so too for
0072  *   static_call()s (although when HAVE_STATIC_CALL it might not be immediately
0073  *   fatal). A NULL static_call can be the result of:
0074  *
0075  *     DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
0076  *
0077  *   which is equivalent to declaring a NULL function pointer with just a
0078  *   typename:
0079  *
0080  *     void (*my_func_ptr)(int arg1) = NULL;
0081  *
0082  *   or using static_call_update() with a NULL function. In both cases the
0083  *   HAVE_STATIC_CALL implementation will patch the trampoline with a RET
0084  *   instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
0085  *   architectures can patch the trampoline call to a NOP.
0086  *
0087  *   In all cases, any argument evaluation is unconditional. Unlike a regular
0088  *   conditional function pointer call:
0089  *
0090  *     if (my_func_ptr)
0091  *         my_func_ptr(arg1)
0092  *
0093  *   where the argument evaludation also depends on the pointer value.
0094  *
0095  *   When calling a static_call that can be NULL, use:
0096  *
0097  *     static_call_cond(name)(arg1);
0098  *
0099  *   which will include the required value tests to avoid NULL-pointer
0100  *   dereferences.
0101  *
0102  *   To query which function is currently set to be called, use:
0103  *
0104  *   func = static_call_query(name);
0105  *
0106  *
0107  * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
0108  *
0109  *   Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
0110  *   conditional void function call, DEFINE_STATIC_CALL_RET0 /
0111  *   __static_call_return0 optimize the do nothing return 0 function.
0112  *
0113  *   This feature is strictly UB per the C standard (since it casts a function
0114  *   pointer to a different signature) and relies on the architecture ABI to
0115  *   make things work. In particular it relies on Caller Stack-cleanup and the
0116  *   whole return register being clobbered for short return values. All normal
0117  *   CDECL style ABIs conform.
0118  *
0119  *   In particular the x86_64 implementation replaces the 5 byte CALL
0120  *   instruction at the callsite with a 5 byte clear of the RAX register,
0121  *   completely eliding any function call overhead.
0122  *
0123  *   Notably argument setup is unconditional.
0124  *
0125  *
0126  * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
0127  *
0128  *   The difference is that the _TRAMP variant tries to only export the
0129  *   trampoline with the result that a module can use static_call{,_cond}() but
0130  *   not static_call_update().
0131  *
0132  */
0133 
0134 #include <linux/types.h>
0135 #include <linux/cpu.h>
0136 #include <linux/static_call_types.h>
0137 
0138 #ifdef CONFIG_HAVE_STATIC_CALL
0139 #include <asm/static_call.h>
0140 
0141 /*
0142  * Either @site or @tramp can be NULL.
0143  */
0144 extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
0145 
0146 #define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
0147 
0148 #else
0149 #define STATIC_CALL_TRAMP_ADDR(name) NULL
0150 #endif
0151 
0152 #define static_call_update(name, func)                  \
0153 ({                                  \
0154     typeof(&STATIC_CALL_TRAMP(name)) __F = (func);          \
0155     __static_call_update(&STATIC_CALL_KEY(name),            \
0156                  STATIC_CALL_TRAMP_ADDR(name), __F);    \
0157 })
0158 
0159 #define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
0160 
0161 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
0162 
0163 extern int __init static_call_init(void);
0164 
0165 struct static_call_mod {
0166     struct static_call_mod *next;
0167     struct module *mod; /* for vmlinux, mod == NULL */
0168     struct static_call_site *sites;
0169 };
0170 
0171 /* For finding the key associated with a trampoline */
0172 struct static_call_tramp_key {
0173     s32 tramp;
0174     s32 key;
0175 };
0176 
0177 extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
0178 extern int static_call_mod_init(struct module *mod);
0179 extern int static_call_text_reserved(void *start, void *end);
0180 
0181 extern long __static_call_return0(void);
0182 
0183 #define DEFINE_STATIC_CALL(name, _func)                 \
0184     DECLARE_STATIC_CALL(name, _func);               \
0185     struct static_call_key STATIC_CALL_KEY(name) = {        \
0186         .func = _func,                      \
0187         .type = 1,                      \
0188     };                              \
0189     ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
0190 
0191 #define DEFINE_STATIC_CALL_NULL(name, _func)                \
0192     DECLARE_STATIC_CALL(name, _func);               \
0193     struct static_call_key STATIC_CALL_KEY(name) = {        \
0194         .func = NULL,                       \
0195         .type = 1,                      \
0196     };                              \
0197     ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
0198 
0199 #define DEFINE_STATIC_CALL_RET0(name, _func)                \
0200     DECLARE_STATIC_CALL(name, _func);               \
0201     struct static_call_key STATIC_CALL_KEY(name) = {        \
0202         .func = __static_call_return0,              \
0203         .type = 1,                      \
0204     };                              \
0205     ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
0206 
0207 #define static_call_cond(name)  (void)__static_call(name)
0208 
0209 #define EXPORT_STATIC_CALL(name)                    \
0210     EXPORT_SYMBOL(STATIC_CALL_KEY(name));               \
0211     EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
0212 #define EXPORT_STATIC_CALL_GPL(name)                    \
0213     EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));           \
0214     EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
0215 
0216 /* Leave the key unexported, so modules can't change static call targets: */
0217 #define EXPORT_STATIC_CALL_TRAMP(name)                  \
0218     EXPORT_SYMBOL(STATIC_CALL_TRAMP(name));             \
0219     ARCH_ADD_TRAMP_KEY(name)
0220 #define EXPORT_STATIC_CALL_TRAMP_GPL(name)              \
0221     EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name));         \
0222     ARCH_ADD_TRAMP_KEY(name)
0223 
0224 #elif defined(CONFIG_HAVE_STATIC_CALL)
0225 
0226 static inline int static_call_init(void) { return 0; }
0227 
0228 #define DEFINE_STATIC_CALL(name, _func)                 \
0229     DECLARE_STATIC_CALL(name, _func);               \
0230     struct static_call_key STATIC_CALL_KEY(name) = {        \
0231         .func = _func,                      \
0232     };                              \
0233     ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
0234 
0235 #define DEFINE_STATIC_CALL_NULL(name, _func)                \
0236     DECLARE_STATIC_CALL(name, _func);               \
0237     struct static_call_key STATIC_CALL_KEY(name) = {        \
0238         .func = NULL,                       \
0239     };                              \
0240     ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
0241 
0242 #define DEFINE_STATIC_CALL_RET0(name, _func)                \
0243     DECLARE_STATIC_CALL(name, _func);               \
0244     struct static_call_key STATIC_CALL_KEY(name) = {        \
0245         .func = __static_call_return0,              \
0246     };                              \
0247     ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
0248 
0249 #define static_call_cond(name)  (void)__static_call(name)
0250 
0251 static inline
0252 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
0253 {
0254     cpus_read_lock();
0255     WRITE_ONCE(key->func, func);
0256     arch_static_call_transform(NULL, tramp, func, false);
0257     cpus_read_unlock();
0258 }
0259 
0260 static inline int static_call_text_reserved(void *start, void *end)
0261 {
0262     return 0;
0263 }
0264 
0265 extern long __static_call_return0(void);
0266 
0267 #define EXPORT_STATIC_CALL(name)                    \
0268     EXPORT_SYMBOL(STATIC_CALL_KEY(name));               \
0269     EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
0270 #define EXPORT_STATIC_CALL_GPL(name)                    \
0271     EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name));           \
0272     EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
0273 
0274 /* Leave the key unexported, so modules can't change static call targets: */
0275 #define EXPORT_STATIC_CALL_TRAMP(name)                  \
0276     EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
0277 #define EXPORT_STATIC_CALL_TRAMP_GPL(name)              \
0278     EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
0279 
0280 #else /* Generic implementation */
0281 
0282 static inline int static_call_init(void) { return 0; }
0283 
0284 static inline long __static_call_return0(void)
0285 {
0286     return 0;
0287 }
0288 
0289 #define __DEFINE_STATIC_CALL(name, _func, _func_init)           \
0290     DECLARE_STATIC_CALL(name, _func);               \
0291     struct static_call_key STATIC_CALL_KEY(name) = {        \
0292         .func = _func_init,                 \
0293     }
0294 
0295 #define DEFINE_STATIC_CALL(name, _func)                 \
0296     __DEFINE_STATIC_CALL(name, _func, _func)
0297 
0298 #define DEFINE_STATIC_CALL_NULL(name, _func)                \
0299     __DEFINE_STATIC_CALL(name, _func, NULL)
0300 
0301 #define DEFINE_STATIC_CALL_RET0(name, _func)                \
0302     __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
0303 
0304 static inline void __static_call_nop(void) { }
0305 
0306 /*
0307  * This horrific hack takes care of two things:
0308  *
0309  *  - it ensures the compiler will only load the function pointer ONCE,
0310  *    which avoids a reload race.
0311  *
0312  *  - it ensures the argument evaluation is unconditional, similar
0313  *    to the HAVE_STATIC_CALL variant.
0314  *
0315  * Sadly current GCC/Clang (10 for both) do not optimize this properly
0316  * and will emit an indirect call for the NULL case :-(
0317  */
0318 #define __static_call_cond(name)                    \
0319 ({                                  \
0320     void *func = READ_ONCE(STATIC_CALL_KEY(name).func);     \
0321     if (!func)                          \
0322         func = &__static_call_nop;              \
0323     (typeof(STATIC_CALL_TRAMP(name))*)func;             \
0324 })
0325 
0326 #define static_call_cond(name)  (void)__static_call_cond(name)
0327 
0328 static inline
0329 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
0330 {
0331     WRITE_ONCE(key->func, func);
0332 }
0333 
0334 static inline int static_call_text_reserved(void *start, void *end)
0335 {
0336     return 0;
0337 }
0338 
0339 #define EXPORT_STATIC_CALL(name)    EXPORT_SYMBOL(STATIC_CALL_KEY(name))
0340 #define EXPORT_STATIC_CALL_GPL(name)    EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
0341 
0342 #endif /* CONFIG_HAVE_STATIC_CALL */
0343 
0344 #endif /* _LINUX_STATIC_CALL_H */