0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/module.h>
0013 #include <linux/errno.h>
0014 #include <linux/mm.h>
0015
0016 #include <asm/xen/hypervisor.h>
0017 #include <xen/balloon.h>
0018 #include <xen/xen.h>
0019 #include <xen/xenbus.h>
0020 #include <xen/interface/io/ring.h>
0021
0022 #include <xen/xen-front-pgdir-shbuf.h>
0023
0024
0025
0026
0027
0028
0029
0030 struct xen_page_directory {
0031 grant_ref_t gref_dir_next_page;
0032 #define XEN_GREF_LIST_END 0
0033 grant_ref_t gref[1];
0034 };
0035
0036
0037
0038
0039
0040
0041
0042 struct xen_front_pgdir_shbuf_ops {
0043
0044
0045
0046
0047
0048 void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
0049
0050
0051 void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
0052
0053
0054 int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
0055 grant_ref_t *priv_gref_head, int gref_idx);
0056
0057
0058 int (*map)(struct xen_front_pgdir_shbuf *buf);
0059
0060
0061 int (*unmap)(struct xen_front_pgdir_shbuf *buf);
0062 };
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074 grant_ref_t
0075 xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
0076 {
0077 if (!buf->grefs)
0078 return INVALID_GRANT_REF;
0079
0080 return buf->grefs[0];
0081 }
0082 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
0096 {
0097 if (buf->ops && buf->ops->map)
0098 return buf->ops->map(buf);
0099
0100
0101 return 0;
0102 }
0103 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
0117 {
0118 if (buf->ops && buf->ops->unmap)
0119 return buf->ops->unmap(buf);
0120
0121
0122 return 0;
0123 }
0124 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
0125
0126
0127
0128
0129
0130
0131 void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
0132 {
0133 if (buf->grefs) {
0134 int i;
0135
0136 for (i = 0; i < buf->num_grefs; i++)
0137 if (buf->grefs[i] != INVALID_GRANT_REF)
0138 gnttab_end_foreign_access(buf->grefs[i], NULL);
0139 }
0140 kfree(buf->grefs);
0141 kfree(buf->directory);
0142 }
0143 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
0144
0145
0146
0147
0148
0149 #define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
0150 offsetof(struct xen_page_directory, \
0151 gref)) / sizeof(grant_ref_t))
0152
0153
0154
0155
0156
0157
0158 static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
0159 {
0160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
0161 }
0162
0163
0164
0165
0166
0167
0168
0169 static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
0170 {
0171
0172 buf->num_grefs = get_num_pages_dir(buf);
0173 }
0174
0175
0176
0177
0178
0179
0180
0181 static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
0182 {
0183
0184
0185
0186
0187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
0188 }
0189
0190 #define xen_page_to_vaddr(page) \
0191 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
0192
0193
0194
0195
0196
0197
0198
0199
0200 static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
0201 {
0202 struct gnttab_unmap_grant_ref *unmap_ops;
0203 int i, ret;
0204
0205 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
0206 return 0;
0207
0208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
0209 GFP_KERNEL);
0210 if (!unmap_ops)
0211 return -ENOMEM;
0212
0213 for (i = 0; i < buf->num_pages; i++) {
0214 phys_addr_t addr;
0215
0216 addr = xen_page_to_vaddr(buf->pages[i]);
0217 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
0218 buf->backend_map_handles[i]);
0219 }
0220
0221 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
0222 buf->num_pages);
0223
0224 for (i = 0; i < buf->num_pages; i++) {
0225 if (unlikely(unmap_ops[i].status != GNTST_okay))
0226 dev_err(&buf->xb_dev->dev,
0227 "Failed to unmap page %d: %d\n",
0228 i, unmap_ops[i].status);
0229 }
0230
0231 if (ret)
0232 dev_err(&buf->xb_dev->dev,
0233 "Failed to unmap grant references, ret %d", ret);
0234
0235 kfree(unmap_ops);
0236 kfree(buf->backend_map_handles);
0237 buf->backend_map_handles = NULL;
0238 return ret;
0239 }
0240
0241
0242
0243
0244
0245
0246
0247 static int backend_map(struct xen_front_pgdir_shbuf *buf)
0248 {
0249 struct gnttab_map_grant_ref *map_ops = NULL;
0250 unsigned char *ptr;
0251 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
0252
0253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
0254 if (!map_ops)
0255 return -ENOMEM;
0256
0257 buf->backend_map_handles = kcalloc(buf->num_pages,
0258 sizeof(*buf->backend_map_handles),
0259 GFP_KERNEL);
0260 if (!buf->backend_map_handles) {
0261 kfree(map_ops);
0262 return -ENOMEM;
0263 }
0264
0265
0266
0267
0268
0269
0270 ptr = buf->directory;
0271 grefs_left = buf->num_pages;
0272 cur_page = 0;
0273 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
0274 struct xen_page_directory *page_dir =
0275 (struct xen_page_directory *)ptr;
0276 int to_copy = XEN_NUM_GREFS_PER_PAGE;
0277
0278 if (to_copy > grefs_left)
0279 to_copy = grefs_left;
0280
0281 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
0282 phys_addr_t addr;
0283
0284 addr = xen_page_to_vaddr(buf->pages[cur_page]);
0285 gnttab_set_map_op(&map_ops[cur_page], addr,
0286 GNTMAP_host_map,
0287 page_dir->gref[cur_gref],
0288 buf->xb_dev->otherend_id);
0289 cur_page++;
0290 }
0291
0292 grefs_left -= to_copy;
0293 ptr += PAGE_SIZE;
0294 }
0295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
0296
0297
0298 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
0299 if (likely(map_ops[cur_page].status == GNTST_okay)) {
0300 buf->backend_map_handles[cur_page] =
0301 map_ops[cur_page].handle;
0302 } else {
0303 buf->backend_map_handles[cur_page] =
0304 INVALID_GRANT_HANDLE;
0305 if (!ret)
0306 ret = -ENXIO;
0307 dev_err(&buf->xb_dev->dev,
0308 "Failed to map page %d: %d\n",
0309 cur_page, map_ops[cur_page].status);
0310 }
0311 }
0312
0313 if (ret) {
0314 dev_err(&buf->xb_dev->dev,
0315 "Failed to map grant references, ret %d", ret);
0316 backend_unmap(buf);
0317 }
0318
0319 kfree(map_ops);
0320 return ret;
0321 }
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
0333 {
0334 struct xen_page_directory *page_dir;
0335 unsigned char *ptr;
0336 int i, num_pages_dir;
0337
0338 ptr = buf->directory;
0339 num_pages_dir = get_num_pages_dir(buf);
0340
0341
0342 for (i = 0; i < num_pages_dir - 1; i++) {
0343 page_dir = (struct xen_page_directory *)ptr;
0344
0345 page_dir->gref_dir_next_page = buf->grefs[i + 1];
0346 ptr += PAGE_SIZE;
0347 }
0348
0349 page_dir = (struct xen_page_directory *)ptr;
0350 page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
0351 }
0352
0353
0354
0355
0356
0357
0358
0359 static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
0360 {
0361 unsigned char *ptr;
0362 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
0363
0364 ptr = buf->directory;
0365 num_pages_dir = get_num_pages_dir(buf);
0366
0367
0368
0369
0370
0371 cur_gref = num_pages_dir;
0372 grefs_left = buf->num_pages;
0373 for (i = 0; i < num_pages_dir; i++) {
0374 struct xen_page_directory *page_dir =
0375 (struct xen_page_directory *)ptr;
0376
0377 if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
0378 to_copy = grefs_left;
0379 page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
0380 } else {
0381 to_copy = XEN_NUM_GREFS_PER_PAGE;
0382 page_dir->gref_dir_next_page = buf->grefs[i + 1];
0383 }
0384 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
0385 to_copy * sizeof(grant_ref_t));
0386 ptr += PAGE_SIZE;
0387 grefs_left -= to_copy;
0388 cur_gref += to_copy;
0389 }
0390 }
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401 static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
0402 grant_ref_t *priv_gref_head,
0403 int gref_idx)
0404 {
0405 int i, cur_ref, otherend_id;
0406
0407 otherend_id = buf->xb_dev->otherend_id;
0408 for (i = 0; i < buf->num_pages; i++) {
0409 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
0410 if (cur_ref < 0)
0411 return cur_ref;
0412
0413 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
0414 xen_page_to_gfn(buf->pages[i]),
0415 0);
0416 buf->grefs[gref_idx++] = cur_ref;
0417 }
0418 return 0;
0419 }
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static int grant_references(struct xen_front_pgdir_shbuf *buf)
0431 {
0432 grant_ref_t priv_gref_head;
0433 int ret, i, j, cur_ref;
0434 int otherend_id, num_pages_dir;
0435
0436 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
0437 if (ret < 0) {
0438 dev_err(&buf->xb_dev->dev,
0439 "Cannot allocate grant references\n");
0440 return ret;
0441 }
0442
0443 otherend_id = buf->xb_dev->otherend_id;
0444 j = 0;
0445 num_pages_dir = get_num_pages_dir(buf);
0446 for (i = 0; i < num_pages_dir; i++) {
0447 unsigned long frame;
0448
0449 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
0450 if (cur_ref < 0)
0451 return cur_ref;
0452
0453 frame = xen_page_to_gfn(virt_to_page(buf->directory +
0454 PAGE_SIZE * i));
0455 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
0456 buf->grefs[j++] = cur_ref;
0457 }
0458
0459 if (buf->ops->grant_refs_for_buffer) {
0460 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
0461 if (ret)
0462 return ret;
0463 }
0464
0465 gnttab_free_grant_references(priv_gref_head);
0466 return 0;
0467 }
0468
0469
0470
0471
0472
0473
0474
0475 static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
0476 {
0477 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
0478 if (!buf->grefs)
0479 return -ENOMEM;
0480
0481 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
0482 if (!buf->directory)
0483 return -ENOMEM;
0484
0485 return 0;
0486 }
0487
0488
0489
0490
0491
0492 static const struct xen_front_pgdir_shbuf_ops backend_ops = {
0493 .calc_num_grefs = backend_calc_num_grefs,
0494 .fill_page_dir = backend_fill_page_dir,
0495 .map = backend_map,
0496 .unmap = backend_unmap
0497 };
0498
0499
0500
0501
0502
0503 static const struct xen_front_pgdir_shbuf_ops local_ops = {
0504 .calc_num_grefs = guest_calc_num_grefs,
0505 .fill_page_dir = guest_fill_page_dir,
0506 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
0507 };
0508
0509
0510
0511
0512
0513
0514
0515 int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
0516 {
0517 struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
0518 int ret;
0519
0520 if (cfg->be_alloc)
0521 buf->ops = &backend_ops;
0522 else
0523 buf->ops = &local_ops;
0524 buf->xb_dev = cfg->xb_dev;
0525 buf->num_pages = cfg->num_pages;
0526 buf->pages = cfg->pages;
0527
0528 buf->ops->calc_num_grefs(buf);
0529
0530 ret = alloc_storage(buf);
0531 if (ret)
0532 goto fail;
0533
0534 ret = grant_references(buf);
0535 if (ret)
0536 goto fail;
0537
0538 buf->ops->fill_page_dir(buf);
0539
0540 return 0;
0541
0542 fail:
0543 xen_front_pgdir_shbuf_free(buf);
0544 return ret;
0545 }
0546 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
0547
0548 MODULE_DESCRIPTION("Xen frontend/backend page directory based "
0549 "shared buffer handling");
0550 MODULE_AUTHOR("Oleksandr Andrushchenko");
0551 MODULE_LICENSE("GPL");