Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2016 Red Hat, Inc.
0004  * Author: Michael S. Tsirkin <mst@redhat.com>
0005  *
0006  * Common macros and functions for ring benchmarking.
0007  */
0008 #ifndef MAIN_H
0009 #define MAIN_H
0010 
0011 #include <stdbool.h>
0012 
0013 extern int param;
0014 
0015 extern bool do_exit;
0016 
0017 #if defined(__x86_64__) || defined(__i386__)
0018 #include "x86intrin.h"
0019 
0020 static inline void wait_cycles(unsigned long long cycles)
0021 {
0022     unsigned long long t;
0023 
0024     t = __rdtsc();
0025     while (__rdtsc() - t < cycles) {}
0026 }
0027 
0028 #define VMEXIT_CYCLES 500
0029 #define VMENTRY_CYCLES 500
0030 
0031 #elif defined(__s390x__)
0032 static inline void wait_cycles(unsigned long long cycles)
0033 {
0034     asm volatile("0: brctg %0,0b" : : "d" (cycles));
0035 }
0036 
0037 /* tweak me */
0038 #define VMEXIT_CYCLES 200
0039 #define VMENTRY_CYCLES 200
0040 
0041 #else
0042 static inline void wait_cycles(unsigned long long cycles)
0043 {
0044     _Exit(5);
0045 }
0046 #define VMEXIT_CYCLES 0
0047 #define VMENTRY_CYCLES 0
0048 #endif
0049 
0050 static inline void vmexit(void)
0051 {
0052     if (!do_exit)
0053         return;
0054     
0055     wait_cycles(VMEXIT_CYCLES);
0056 }
0057 static inline void vmentry(void)
0058 {
0059     if (!do_exit)
0060         return;
0061     
0062     wait_cycles(VMENTRY_CYCLES);
0063 }
0064 
0065 /* implemented by ring */
0066 void alloc_ring(void);
0067 /* guest side */
0068 int add_inbuf(unsigned, void *, void *);
0069 void *get_buf(unsigned *, void **);
0070 void disable_call();
0071 bool used_empty();
0072 bool enable_call();
0073 void kick_available();
0074 /* host side */
0075 void disable_kick();
0076 bool avail_empty();
0077 bool enable_kick();
0078 bool use_buf(unsigned *, void **);
0079 void call_used();
0080 
0081 /* implemented by main */
0082 extern bool do_sleep;
0083 void kick(void);
0084 void wait_for_kick(void);
0085 void call(void);
0086 void wait_for_call(void);
0087 
0088 extern unsigned ring_size;
0089 
0090 /* Compiler barrier - similar to what Linux uses */
0091 #define barrier() asm volatile("" ::: "memory")
0092 
0093 /* Is there a portable way to do this? */
0094 #if defined(__x86_64__) || defined(__i386__)
0095 #define cpu_relax() asm ("rep; nop" ::: "memory")
0096 #elif defined(__s390x__)
0097 #define cpu_relax() barrier()
0098 #else
0099 #define cpu_relax() assert(0)
0100 #endif
0101 
0102 extern bool do_relax;
0103 
0104 static inline void busy_wait(void)
0105 {
0106     if (do_relax)
0107         cpu_relax();
0108     else
0109         /* prevent compiler from removing busy loops */
0110         barrier();
0111 } 
0112 
0113 #if defined(__x86_64__) || defined(__i386__)
0114 #define smp_mb()     asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
0115 #else
0116 /*
0117  * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
0118  * with other __ATOMIC_SEQ_CST calls.
0119  */
0120 #define smp_mb() __sync_synchronize()
0121 #endif
0122 
0123 /*
0124  * This abuses the atomic builtins for thread fences, and
0125  * adds a compiler barrier.
0126  */
0127 #define smp_release() do { \
0128     barrier(); \
0129     __atomic_thread_fence(__ATOMIC_RELEASE); \
0130 } while (0)
0131 
0132 #define smp_acquire() do { \
0133     __atomic_thread_fence(__ATOMIC_ACQUIRE); \
0134     barrier(); \
0135 } while (0)
0136 
0137 #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
0138 #define smp_wmb() barrier()
0139 #else
0140 #define smp_wmb() smp_release()
0141 #endif
0142 
0143 #ifdef __alpha__
0144 #define smp_read_barrier_depends() smp_acquire()
0145 #else
0146 #define smp_read_barrier_depends() do {} while(0)
0147 #endif
0148 
0149 static __always_inline
0150 void __read_once_size(const volatile void *p, void *res, int size)
0151 {
0152         switch (size) {                                                 \
0153         case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
0154         case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
0155         case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
0156         case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
0157         default:                                                        \
0158                 barrier();                                              \
0159                 __builtin_memcpy((void *)res, (const void *)p, size);   \
0160                 barrier();                                              \
0161         }                                                               \
0162 }
0163 
0164 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
0165 {
0166     switch (size) {
0167     case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
0168     case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
0169     case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
0170     case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
0171     default:
0172         barrier();
0173         __builtin_memcpy((void *)p, (const void *)res, size);
0174         barrier();
0175     }
0176 }
0177 
0178 #define READ_ONCE(x) \
0179 ({                                  \
0180     union { typeof(x) __val; char __c[1]; } __u;            \
0181     __read_once_size(&(x), __u.__c, sizeof(x));     \
0182     smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
0183     __u.__val;                          \
0184 })
0185 
0186 #define WRITE_ONCE(x, val) \
0187 ({                          \
0188     union { typeof(x) __val; char __c[1]; } __u =   \
0189         { .__val = (typeof(x)) (val) }; \
0190     __write_once_size(&(x), __u.__c, sizeof(x));    \
0191     __u.__val;                  \
0192 })
0193 
0194 #endif