0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef __XEN_PUBLIC_IO_RING_H__
0011 #define __XEN_PUBLIC_IO_RING_H__
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <xen/interface/grant_table.h>
0029
0030 typedef unsigned int RING_IDX;
0031
0032
0033 #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
0034 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
0035 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
0036 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
0037 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
0038
0039
0040
0041
0042
0043
0044
0045 #define __CONST_RING_SIZE(_s, _sz) \
0046 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
0047 sizeof(((struct _s##_sring *)0)->ring[0])))
0048
0049
0050
0051 #define __RING_SIZE(_s, _sz) \
0052 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
0086 \
0087 \
0088 union __name##_sring_entry { \
0089 __req_t req; \
0090 __rsp_t rsp; \
0091 }; \
0092 \
0093 \
0094 struct __name##_sring { \
0095 RING_IDX req_prod, req_event; \
0096 RING_IDX rsp_prod, rsp_event; \
0097 uint8_t __pad[48]; \
0098 union __name##_sring_entry ring[1]; \
0099 }; \
0100 \
0101 \
0102 struct __name##_front_ring { \
0103 RING_IDX req_prod_pvt; \
0104 RING_IDX rsp_cons; \
0105 unsigned int nr_ents; \
0106 struct __name##_sring *sring; \
0107 }; \
0108 \
0109 \
0110 struct __name##_back_ring { \
0111 RING_IDX rsp_prod_pvt; \
0112 RING_IDX req_cons; \
0113 unsigned int nr_ents; \
0114 struct __name##_sring *sring; \
0115 }; \
0116 \
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 #define SHARED_RING_INIT(_s) do { \
0134 (_s)->req_prod = (_s)->rsp_prod = 0; \
0135 (_s)->req_event = (_s)->rsp_event = 1; \
0136 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
0137 } while(0)
0138
0139 #define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
0140 (_r)->req_prod_pvt = (_i); \
0141 (_r)->rsp_cons = (_i); \
0142 (_r)->nr_ents = __RING_SIZE(_s, __size); \
0143 (_r)->sring = (_s); \
0144 } while (0)
0145
0146 #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
0147
0148 #define XEN_FRONT_RING_INIT(r, s, size) do { \
0149 SHARED_RING_INIT(s); \
0150 FRONT_RING_INIT(r, s, size); \
0151 } while (0)
0152
0153 #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
0154 (_r)->rsp_prod_pvt = (_i); \
0155 (_r)->req_cons = (_i); \
0156 (_r)->nr_ents = __RING_SIZE(_s, __size); \
0157 (_r)->sring = (_s); \
0158 } while (0)
0159
0160 #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
0161
0162
0163 #define RING_SIZE(_r) \
0164 ((_r)->nr_ents)
0165
0166
0167 #define RING_FREE_REQUESTS(_r) \
0168 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
0169
0170
0171
0172
0173 #define RING_FULL(_r) \
0174 (RING_FREE_REQUESTS(_r) == 0)
0175
0176
0177 #define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \
0178 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
0179
0180 #define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \
0181 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
0182 unsigned int rsp = RING_SIZE(_r) - \
0183 ((_r)->req_cons - (_r)->rsp_prod_pvt); \
0184 req < rsp ? req : rsp; \
0185 })
0186
0187 #define RING_HAS_UNCONSUMED_RESPONSES(_r) \
0188 (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r))
0189 #define RING_HAS_UNCONSUMED_REQUESTS(_r) \
0190 (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r))
0191
0192
0193 #define RING_GET_REQUEST(_r, _idx) \
0194 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
0195
0196 #define RING_GET_RESPONSE(_r, _idx) \
0197 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 #define RING_COPY_(type, r, idx, dest) do { \
0209 \
0210 *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
0211 } while (0)
0212
0213 #define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
0214 #define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
0215
0216
0217 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
0218 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
0219
0220
0221 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
0222 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
0223
0224
0225 #define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
0226 (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
0227
0228 #define RING_PUSH_REQUESTS(_r) do { \
0229 virt_wmb(); \
0230 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
0231 } while (0)
0232
0233 #define RING_PUSH_RESPONSES(_r) do { \
0234 virt_wmb(); \
0235 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
0236 } while (0)
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
0269 RING_IDX __old = (_r)->sring->req_prod; \
0270 RING_IDX __new = (_r)->req_prod_pvt; \
0271 virt_wmb(); \
0272 (_r)->sring->req_prod = __new; \
0273 virt_mb(); \
0274 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
0275 (RING_IDX)(__new - __old)); \
0276 } while (0)
0277
0278 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
0279 RING_IDX __old = (_r)->sring->rsp_prod; \
0280 RING_IDX __new = (_r)->rsp_prod_pvt; \
0281 virt_wmb(); \
0282 (_r)->sring->rsp_prod = __new; \
0283 virt_mb(); \
0284 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
0285 (RING_IDX)(__new - __old)); \
0286 } while (0)
0287
0288 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
0289 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
0290 if (_work_to_do) break; \
0291 (_r)->sring->req_event = (_r)->req_cons + 1; \
0292 virt_mb(); \
0293 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
0294 } while (0)
0295
0296 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
0297 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
0298 if (_work_to_do) break; \
0299 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
0300 virt_mb(); \
0301 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
0302 } while (0)
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346 #ifndef XEN_PAGE_SHIFT
0347
0348
0349
0350
0351 #define XEN_PAGE_SHIFT 12
0352 #endif
0353 #define XEN_FLEX_RING_SIZE(order) \
0354 (1UL << ((order) + XEN_PAGE_SHIFT - 1))
0355
0356 #define DEFINE_XEN_FLEX_RING(name) \
0357 static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
0358 { \
0359 return idx & (ring_size - 1); \
0360 } \
0361 \
0362 static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \
0363 RING_IDX idx, \
0364 RING_IDX ring_size) \
0365 { \
0366 return buf + name##_mask(idx, ring_size); \
0367 } \
0368 \
0369 static inline void name##_read_packet(void *opaque, \
0370 const unsigned char *buf, \
0371 size_t size, \
0372 RING_IDX masked_prod, \
0373 RING_IDX *masked_cons, \
0374 RING_IDX ring_size) \
0375 { \
0376 if (*masked_cons < masked_prod || \
0377 size <= ring_size - *masked_cons) { \
0378 memcpy(opaque, buf + *masked_cons, size); \
0379 } else { \
0380 memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
0381 memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
0382 size - (ring_size - *masked_cons)); \
0383 } \
0384 *masked_cons = name##_mask(*masked_cons + size, ring_size); \
0385 } \
0386 \
0387 static inline void name##_write_packet(unsigned char *buf, \
0388 const void *opaque, \
0389 size_t size, \
0390 RING_IDX *masked_prod, \
0391 RING_IDX masked_cons, \
0392 RING_IDX ring_size) \
0393 { \
0394 if (*masked_prod < masked_cons || \
0395 size <= ring_size - *masked_prod) { \
0396 memcpy(buf + *masked_prod, opaque, size); \
0397 } else { \
0398 memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
0399 memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
0400 size - (ring_size - *masked_prod)); \
0401 } \
0402 *masked_prod = name##_mask(*masked_prod + size, ring_size); \
0403 } \
0404 \
0405 static inline RING_IDX name##_queued(RING_IDX prod, \
0406 RING_IDX cons, \
0407 RING_IDX ring_size) \
0408 { \
0409 RING_IDX size; \
0410 \
0411 if (prod == cons) \
0412 return 0; \
0413 \
0414 prod = name##_mask(prod, ring_size); \
0415 cons = name##_mask(cons, ring_size); \
0416 \
0417 if (prod == cons) \
0418 return ring_size; \
0419 \
0420 if (prod > cons) \
0421 size = prod - cons; \
0422 else \
0423 size = ring_size - (cons - prod); \
0424 return size; \
0425 } \
0426 \
0427 struct name##_data { \
0428 unsigned char *in; \
0429 unsigned char *out; \
0430 }
0431
0432 #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
0433 struct name##_data_intf { \
0434 RING_IDX in_cons, in_prod; \
0435 \
0436 uint8_t pad1[56]; \
0437 \
0438 RING_IDX out_cons, out_prod; \
0439 \
0440 uint8_t pad2[56]; \
0441 \
0442 RING_IDX ring_order; \
0443 grant_ref_t ref[]; \
0444 }; \
0445 DEFINE_XEN_FLEX_RING(name)
0446
0447 #endif