Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
0002 /*
0003  * rseq.h
0004  *
0005  * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
0006  */
0007 
0008 #ifndef RSEQ_H
0009 #define RSEQ_H
0010 
0011 #include <stdint.h>
0012 #include <stdbool.h>
0013 #include <pthread.h>
0014 #include <signal.h>
0015 #include <sched.h>
0016 #include <errno.h>
0017 #include <stdio.h>
0018 #include <stdlib.h>
0019 #include <stddef.h>
0020 #include "rseq-abi.h"
0021 #include "compiler.h"
0022 
0023 /*
0024  * Empty code injection macros, override when testing.
0025  * It is important to consider that the ASM injection macros need to be
0026  * fully reentrant (e.g. do not modify the stack).
0027  */
0028 #ifndef RSEQ_INJECT_ASM
0029 #define RSEQ_INJECT_ASM(n)
0030 #endif
0031 
0032 #ifndef RSEQ_INJECT_C
0033 #define RSEQ_INJECT_C(n)
0034 #endif
0035 
0036 #ifndef RSEQ_INJECT_INPUT
0037 #define RSEQ_INJECT_INPUT
0038 #endif
0039 
0040 #ifndef RSEQ_INJECT_CLOBBER
0041 #define RSEQ_INJECT_CLOBBER
0042 #endif
0043 
0044 #ifndef RSEQ_INJECT_FAILED
0045 #define RSEQ_INJECT_FAILED
0046 #endif
0047 
0048 #include "rseq-thread-pointer.h"
0049 
0050 /* Offset from the thread pointer to the rseq area.  */
0051 extern ptrdiff_t rseq_offset;
0052 /* Size of the registered rseq area.  0 if the registration was
0053    unsuccessful.  */
0054 extern unsigned int rseq_size;
0055 /* Flags used during rseq registration.  */
0056 extern unsigned int rseq_flags;
0057 
0058 static inline struct rseq_abi *rseq_get_abi(void)
0059 {
0060     return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
0061 }
0062 
0063 #define rseq_likely(x)      __builtin_expect(!!(x), 1)
0064 #define rseq_unlikely(x)    __builtin_expect(!!(x), 0)
0065 #define rseq_barrier()      __asm__ __volatile__("" : : : "memory")
0066 
0067 #define RSEQ_ACCESS_ONCE(x) (*(__volatile__  __typeof__(x) *)&(x))
0068 #define RSEQ_WRITE_ONCE(x, v)   __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
0069 #define RSEQ_READ_ONCE(x)   RSEQ_ACCESS_ONCE(x)
0070 
0071 #define __rseq_str_1(x) #x
0072 #define __rseq_str(x)       __rseq_str_1(x)
0073 
0074 #define rseq_log(fmt, args...)                             \
0075     fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
0076         ## args, __func__)
0077 
0078 #define rseq_bug(fmt, args...)      \
0079     do {                \
0080         rseq_log(fmt, ##args);  \
0081         abort();        \
0082     } while (0)
0083 
0084 #if defined(__x86_64__) || defined(__i386__)
0085 #include <rseq-x86.h>
0086 #elif defined(__ARMEL__)
0087 #include <rseq-arm.h>
0088 #elif defined (__AARCH64EL__)
0089 #include <rseq-arm64.h>
0090 #elif defined(__PPC__)
0091 #include <rseq-ppc.h>
0092 #elif defined(__mips__)
0093 #include <rseq-mips.h>
0094 #elif defined(__s390__)
0095 #include <rseq-s390.h>
0096 #elif defined(__riscv)
0097 #include <rseq-riscv.h>
0098 #else
0099 #error unsupported target
0100 #endif
0101 
0102 /*
0103  * Register rseq for the current thread. This needs to be called once
0104  * by any thread which uses restartable sequences, before they start
0105  * using restartable sequences, to ensure restartable sequences
0106  * succeed. A restartable sequence executed from a non-registered
0107  * thread will always fail.
0108  */
0109 int rseq_register_current_thread(void);
0110 
0111 /*
0112  * Unregister rseq for current thread.
0113  */
0114 int rseq_unregister_current_thread(void);
0115 
0116 /*
0117  * Restartable sequence fallback for reading the current CPU number.
0118  */
0119 int32_t rseq_fallback_current_cpu(void);
0120 
0121 /*
0122  * Values returned can be either the current CPU number, -1 (rseq is
0123  * uninitialized), or -2 (rseq initialization has failed).
0124  */
0125 static inline int32_t rseq_current_cpu_raw(void)
0126 {
0127     return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id);
0128 }
0129 
0130 /*
0131  * Returns a possible CPU number, which is typically the current CPU.
0132  * The returned CPU number can be used to prepare for an rseq critical
0133  * section, which will confirm whether the cpu number is indeed the
0134  * current one, and whether rseq is initialized.
0135  *
0136  * The CPU number returned by rseq_cpu_start should always be validated
0137  * by passing it to a rseq asm sequence, or by comparing it to the
0138  * return value of rseq_current_cpu_raw() if the rseq asm sequence
0139  * does not need to be invoked.
0140  */
0141 static inline uint32_t rseq_cpu_start(void)
0142 {
0143     return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start);
0144 }
0145 
0146 static inline uint32_t rseq_current_cpu(void)
0147 {
0148     int32_t cpu;
0149 
0150     cpu = rseq_current_cpu_raw();
0151     if (rseq_unlikely(cpu < 0))
0152         cpu = rseq_fallback_current_cpu();
0153     return cpu;
0154 }
0155 
0156 static inline void rseq_clear_rseq_cs(void)
0157 {
0158     RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
0159 }
0160 
0161 /*
0162  * rseq_prepare_unload() should be invoked by each thread executing a rseq
0163  * critical section at least once between their last critical section and
0164  * library unload of the library defining the rseq critical section (struct
0165  * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
0166  * post_commit_offset fields. This also applies to use of rseq in code
0167  * generated by JIT: rseq_prepare_unload() should be invoked at least once by
0168  * each thread executing a rseq critical section before reclaim of the memory
0169  * holding the struct rseq_cs or reclaim of the code pointed to by struct
0170  * rseq_cs start_ip and post_commit_offset fields.
0171  */
0172 static inline void rseq_prepare_unload(void)
0173 {
0174     rseq_clear_rseq_cs();
0175 }
0176 
0177 #endif  /* RSEQ_H_ */