0001
0002
0003
0004
0005
0006
0007 #ifndef RXE_QUEUE_H
0008 #define RXE_QUEUE_H
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 enum queue_type {
0047 QUEUE_TYPE_TO_CLIENT,
0048 QUEUE_TYPE_FROM_CLIENT,
0049 QUEUE_TYPE_TO_DRIVER,
0050 QUEUE_TYPE_FROM_DRIVER,
0051 };
0052
0053 struct rxe_queue_buf;
0054
0055 struct rxe_queue {
0056 struct rxe_dev *rxe;
0057 struct rxe_queue_buf *buf;
0058 struct rxe_mmap_info *ip;
0059 size_t buf_size;
0060 size_t elem_size;
0061 unsigned int log2_elem_size;
0062 u32 index_mask;
0063 enum queue_type type;
0064
0065
0066
0067
0068
0069 u32 index;
0070 };
0071
0072 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
0073 struct ib_udata *udata, struct rxe_queue_buf *buf,
0074 size_t buf_size, struct rxe_mmap_info **ip_p);
0075
0076 void rxe_queue_reset(struct rxe_queue *q);
0077
0078 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
0079 unsigned int elem_size, enum queue_type type);
0080
0081 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
0082 unsigned int elem_size, struct ib_udata *udata,
0083 struct mminfo __user *outbuf,
0084 spinlock_t *producer_lock, spinlock_t *consumer_lock);
0085
0086 void rxe_queue_cleanup(struct rxe_queue *queue);
0087
0088 static inline u32 queue_next_index(struct rxe_queue *q, int index)
0089 {
0090 return (index + 1) & q->index_mask;
0091 }
0092
0093 static inline u32 queue_get_producer(const struct rxe_queue *q,
0094 enum queue_type type)
0095 {
0096 u32 prod;
0097
0098 switch (type) {
0099 case QUEUE_TYPE_FROM_CLIENT:
0100
0101 prod = smp_load_acquire(&q->buf->producer_index);
0102 break;
0103 case QUEUE_TYPE_TO_CLIENT:
0104 prod = q->index;
0105 break;
0106 case QUEUE_TYPE_FROM_DRIVER:
0107
0108 prod = smp_load_acquire(&q->buf->producer_index);
0109 break;
0110 case QUEUE_TYPE_TO_DRIVER:
0111 prod = q->buf->producer_index;
0112 break;
0113 }
0114
0115 return prod;
0116 }
0117
0118 static inline u32 queue_get_consumer(const struct rxe_queue *q,
0119 enum queue_type type)
0120 {
0121 u32 cons;
0122
0123 switch (type) {
0124 case QUEUE_TYPE_FROM_CLIENT:
0125 cons = q->index;
0126 break;
0127 case QUEUE_TYPE_TO_CLIENT:
0128
0129 cons = smp_load_acquire(&q->buf->consumer_index);
0130 break;
0131 case QUEUE_TYPE_FROM_DRIVER:
0132 cons = q->buf->consumer_index;
0133 break;
0134 case QUEUE_TYPE_TO_DRIVER:
0135
0136 cons = smp_load_acquire(&q->buf->consumer_index);
0137 break;
0138 }
0139
0140 return cons;
0141 }
0142
0143 static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
0144 {
0145 u32 prod = queue_get_producer(q, type);
0146 u32 cons = queue_get_consumer(q, type);
0147
0148 return ((prod - cons) & q->index_mask) == 0;
0149 }
0150
0151 static inline int queue_full(struct rxe_queue *q, enum queue_type type)
0152 {
0153 u32 prod = queue_get_producer(q, type);
0154 u32 cons = queue_get_consumer(q, type);
0155
0156 return ((prod + 1 - cons) & q->index_mask) == 0;
0157 }
0158
0159 static inline u32 queue_count(const struct rxe_queue *q,
0160 enum queue_type type)
0161 {
0162 u32 prod = queue_get_producer(q, type);
0163 u32 cons = queue_get_consumer(q, type);
0164
0165 return (prod - cons) & q->index_mask;
0166 }
0167
0168 static inline void queue_advance_producer(struct rxe_queue *q,
0169 enum queue_type type)
0170 {
0171 u32 prod;
0172
0173 switch (type) {
0174 case QUEUE_TYPE_FROM_CLIENT:
0175 pr_warn("%s: attempt to advance client index\n",
0176 __func__);
0177 break;
0178 case QUEUE_TYPE_TO_CLIENT:
0179 prod = q->index;
0180 prod = (prod + 1) & q->index_mask;
0181 q->index = prod;
0182
0183 smp_store_release(&q->buf->producer_index, prod);
0184 break;
0185 case QUEUE_TYPE_FROM_DRIVER:
0186 pr_warn("%s: attempt to advance driver index\n",
0187 __func__);
0188 break;
0189 case QUEUE_TYPE_TO_DRIVER:
0190 prod = q->buf->producer_index;
0191 prod = (prod + 1) & q->index_mask;
0192 q->buf->producer_index = prod;
0193 break;
0194 }
0195 }
0196
0197 static inline void queue_advance_consumer(struct rxe_queue *q,
0198 enum queue_type type)
0199 {
0200 u32 cons;
0201
0202 switch (type) {
0203 case QUEUE_TYPE_FROM_CLIENT:
0204 cons = q->index;
0205 cons = (cons + 1) & q->index_mask;
0206 q->index = cons;
0207
0208 smp_store_release(&q->buf->consumer_index, cons);
0209 break;
0210 case QUEUE_TYPE_TO_CLIENT:
0211 pr_warn("%s: attempt to advance client index\n",
0212 __func__);
0213 break;
0214 case QUEUE_TYPE_FROM_DRIVER:
0215 cons = q->buf->consumer_index;
0216 cons = (cons + 1) & q->index_mask;
0217 q->buf->consumer_index = cons;
0218 break;
0219 case QUEUE_TYPE_TO_DRIVER:
0220 pr_warn("%s: attempt to advance driver index\n",
0221 __func__);
0222 break;
0223 }
0224 }
0225
0226 static inline void *queue_producer_addr(struct rxe_queue *q,
0227 enum queue_type type)
0228 {
0229 u32 prod = queue_get_producer(q, type);
0230
0231 return q->buf->data + (prod << q->log2_elem_size);
0232 }
0233
0234 static inline void *queue_consumer_addr(struct rxe_queue *q,
0235 enum queue_type type)
0236 {
0237 u32 cons = queue_get_consumer(q, type);
0238
0239 return q->buf->data + (cons << q->log2_elem_size);
0240 }
0241
0242 static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
0243 {
0244 return q->buf->data + ((index & q->index_mask)
0245 << q->log2_elem_size);
0246 }
0247
0248 static inline u32 queue_index_from_addr(const struct rxe_queue *q,
0249 const void *addr)
0250 {
0251 return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
0252 & q->index_mask;
0253 }
0254
0255 static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
0256 {
0257 return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
0258 }
0259
0260 #endif