Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #define _GNU_SOURCE
0003 #include "main.h"
0004 #include <stdlib.h>
0005 #include <stdio.h>
0006 #include <string.h>
0007 #include <pthread.h>
0008 #include <malloc.h>
0009 #include <assert.h>
0010 #include <errno.h>
0011 #include <limits.h>
0012 
0013 #define SMP_CACHE_BYTES 64
0014 #define cache_line_size() SMP_CACHE_BYTES
0015 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
0016 #define unlikely(x)    (__builtin_expect(!!(x), 0))
0017 #define likely(x)    (__builtin_expect(!!(x), 1))
0018 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
0019 #define SIZE_MAX        (~(size_t)0)
0020 #define KMALLOC_MAX_SIZE SIZE_MAX
0021 
0022 typedef pthread_spinlock_t  spinlock_t;
0023 
0024 typedef int gfp_t;
0025 #define __GFP_ZERO 0x1
0026 
0027 static void *kmalloc(unsigned size, gfp_t gfp)
0028 {
0029     void *p = memalign(64, size);
0030     if (!p)
0031         return p;
0032 
0033     if (gfp & __GFP_ZERO)
0034         memset(p, 0, size);
0035     return p;
0036 }
0037 
0038 static inline void *kzalloc(unsigned size, gfp_t flags)
0039 {
0040     return kmalloc(size, flags | __GFP_ZERO);
0041 }
0042 
0043 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
0044 {
0045     if (size != 0 && n > SIZE_MAX / size)
0046         return NULL;
0047     return kmalloc(n * size, flags);
0048 }
0049 
0050 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
0051 {
0052     return kmalloc_array(n, size, flags | __GFP_ZERO);
0053 }
0054 
0055 static void kfree(void *p)
0056 {
0057     if (p)
0058         free(p);
0059 }
0060 
0061 #define kvmalloc_array kmalloc_array
0062 #define kvfree kfree
0063 
0064 static void spin_lock_init(spinlock_t *lock)
0065 {
0066     int r = pthread_spin_init(lock, 0);
0067     assert(!r);
0068 }
0069 
0070 static void spin_lock(spinlock_t *lock)
0071 {
0072     int ret = pthread_spin_lock(lock);
0073     assert(!ret);
0074 }
0075 
0076 static void spin_unlock(spinlock_t *lock)
0077 {
0078     int ret = pthread_spin_unlock(lock);
0079     assert(!ret);
0080 }
0081 
0082 static void spin_lock_bh(spinlock_t *lock)
0083 {
0084     spin_lock(lock);
0085 }
0086 
0087 static void spin_unlock_bh(spinlock_t *lock)
0088 {
0089     spin_unlock(lock);
0090 }
0091 
0092 static void spin_lock_irq(spinlock_t *lock)
0093 {
0094     spin_lock(lock);
0095 }
0096 
0097 static void spin_unlock_irq(spinlock_t *lock)
0098 {
0099     spin_unlock(lock);
0100 }
0101 
0102 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
0103 {
0104     spin_lock(lock);
0105 }
0106 
0107 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
0108 {
0109     spin_unlock(lock);
0110 }
0111 
0112 #include "../../../include/linux/ptr_ring.h"
0113 
0114 static unsigned long long headcnt, tailcnt;
0115 static struct ptr_ring array ____cacheline_aligned_in_smp;
0116 
0117 /* implemented by ring */
0118 void alloc_ring(void)
0119 {
0120     int ret = ptr_ring_init(&array, ring_size, 0);
0121     assert(!ret);
0122     /* Hacky way to poke at ring internals. Useful for testing though. */
0123     if (param)
0124         array.batch = param;
0125 }
0126 
0127 /* guest side */
0128 int add_inbuf(unsigned len, void *buf, void *datap)
0129 {
0130     int ret;
0131 
0132     ret = __ptr_ring_produce(&array, buf);
0133     if (ret >= 0) {
0134         ret = 0;
0135         headcnt++;
0136     }
0137 
0138     return ret;
0139 }
0140 
0141 /*
0142  * ptr_ring API provides no way for producer to find out whether a given
0143  * buffer was consumed.  Our tests merely require that a successful get_buf
0144  * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
0145  * fake it accordingly.
0146  */
0147 void *get_buf(unsigned *lenp, void **bufp)
0148 {
0149     void *datap;
0150 
0151     if (tailcnt == headcnt || __ptr_ring_full(&array))
0152         datap = NULL;
0153     else {
0154         datap = "Buffer\n";
0155         ++tailcnt;
0156     }
0157 
0158     return datap;
0159 }
0160 
0161 bool used_empty()
0162 {
0163     return (tailcnt == headcnt || __ptr_ring_full(&array));
0164 }
0165 
0166 void disable_call()
0167 {
0168     assert(0);
0169 }
0170 
0171 bool enable_call()
0172 {
0173     assert(0);
0174 }
0175 
0176 void kick_available(void)
0177 {
0178     assert(0);
0179 }
0180 
0181 /* host side */
0182 void disable_kick()
0183 {
0184     assert(0);
0185 }
0186 
0187 bool enable_kick()
0188 {
0189     assert(0);
0190 }
0191 
0192 bool avail_empty()
0193 {
0194     return __ptr_ring_empty(&array);
0195 }
0196 
0197 bool use_buf(unsigned *lenp, void **bufp)
0198 {
0199     void *ptr;
0200 
0201     ptr = __ptr_ring_consume(&array);
0202 
0203     return ptr;
0204 }
0205 
0206 void call_used(void)
0207 {
0208     assert(0);
0209 }