0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/highmem.h>
0029
0030 #include "vmwgfx_drv.h"
0031
0032 #ifdef CONFIG_64BIT
0033 #define VMW_PPN_SIZE 8
0034 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
0035 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT64_1
0036 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT64_2
0037 #else
0038 #define VMW_PPN_SIZE 4
0039 #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT_0
0040 #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PT_1
0041 #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PT_2
0042 #endif
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 struct vmw_mob {
0053 struct ttm_buffer_object *pt_bo;
0054 unsigned long num_pages;
0055 unsigned pt_level;
0056 dma_addr_t pt_root_page;
0057 uint32_t id;
0058 };
0059
0060
0061
0062
0063
0064
0065
0066 static const struct vmw_otable pre_dx_tables[] = {
0067 {VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true},
0068 {VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true},
0069 {VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
0070 {VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
0071 {VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
0072 NULL, true}
0073 };
0074
0075 static const struct vmw_otable dx_tables[] = {
0076 {VMWGFX_NUM_MOB * sizeof(SVGAOTableMobEntry), NULL, true},
0077 {VMWGFX_NUM_GB_SURFACE * sizeof(SVGAOTableSurfaceEntry), NULL, true},
0078 {VMWGFX_NUM_GB_CONTEXT * sizeof(SVGAOTableContextEntry), NULL, true},
0079 {VMWGFX_NUM_GB_SHADER * sizeof(SVGAOTableShaderEntry), NULL, true},
0080 {VMWGFX_NUM_GB_SCREEN_TARGET * sizeof(SVGAOTableScreenTargetEntry),
0081 NULL, true},
0082 {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true},
0083 };
0084
0085 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
0086 struct vmw_mob *mob);
0087 static void vmw_mob_pt_setup(struct vmw_mob *mob,
0088 struct vmw_piter data_iter,
0089 unsigned long num_data_pages);
0090
0091
0092 static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
0093 {
0094 int ret = ttm_bo_reserve(bo, false, true, NULL);
0095 BUG_ON(ret != 0);
0096 ttm_bo_unpin(bo);
0097 ttm_bo_unreserve(bo);
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113 static int vmw_setup_otable_base(struct vmw_private *dev_priv,
0114 SVGAOTableType type,
0115 struct ttm_buffer_object *otable_bo,
0116 unsigned long offset,
0117 struct vmw_otable *otable)
0118 {
0119 struct {
0120 SVGA3dCmdHeader header;
0121 SVGA3dCmdSetOTableBase64 body;
0122 } *cmd;
0123 struct vmw_mob *mob;
0124 const struct vmw_sg_table *vsgt;
0125 struct vmw_piter iter;
0126 int ret;
0127
0128 BUG_ON(otable->page_table != NULL);
0129
0130 vsgt = vmw_bo_sg_table(otable_bo);
0131 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT);
0132 WARN_ON(!vmw_piter_next(&iter));
0133
0134 mob = vmw_mob_create(otable->size >> PAGE_SHIFT);
0135 if (unlikely(mob == NULL)) {
0136 DRM_ERROR("Failed creating OTable page table.\n");
0137 return -ENOMEM;
0138 }
0139
0140 if (otable->size <= PAGE_SIZE) {
0141 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
0142 mob->pt_root_page = vmw_piter_dma_addr(&iter);
0143 } else {
0144 ret = vmw_mob_pt_populate(dev_priv, mob);
0145 if (unlikely(ret != 0))
0146 goto out_no_populate;
0147
0148 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT);
0149 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1;
0150 }
0151
0152 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0153 if (unlikely(cmd == NULL)) {
0154 ret = -ENOMEM;
0155 goto out_no_fifo;
0156 }
0157
0158 memset(cmd, 0, sizeof(*cmd));
0159 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
0160 cmd->header.size = sizeof(cmd->body);
0161 cmd->body.type = type;
0162 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
0163 cmd->body.sizeInBytes = otable->size;
0164 cmd->body.validSizeInBytes = 0;
0165 cmd->body.ptDepth = mob->pt_level;
0166
0167
0168
0169
0170
0171
0172 BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
0173
0174 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0175 otable->page_table = mob;
0176
0177 return 0;
0178
0179 out_no_fifo:
0180 out_no_populate:
0181 vmw_mob_destroy(mob);
0182 return ret;
0183 }
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193 static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
0194 SVGAOTableType type,
0195 struct vmw_otable *otable)
0196 {
0197 struct {
0198 SVGA3dCmdHeader header;
0199 SVGA3dCmdSetOTableBase body;
0200 } *cmd;
0201 struct ttm_buffer_object *bo;
0202
0203 if (otable->page_table == NULL)
0204 return;
0205
0206 bo = otable->page_table->pt_bo;
0207 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0208 if (unlikely(cmd == NULL))
0209 return;
0210
0211 memset(cmd, 0, sizeof(*cmd));
0212 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
0213 cmd->header.size = sizeof(cmd->body);
0214 cmd->body.type = type;
0215 cmd->body.baseAddress = 0;
0216 cmd->body.sizeInBytes = 0;
0217 cmd->body.validSizeInBytes = 0;
0218 cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
0219 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0220
0221 if (bo) {
0222 int ret;
0223
0224 ret = ttm_bo_reserve(bo, false, true, NULL);
0225 BUG_ON(ret != 0);
0226
0227 vmw_bo_fence_single(bo, NULL);
0228 ttm_bo_unreserve(bo);
0229 }
0230
0231 vmw_mob_destroy(otable->page_table);
0232 otable->page_table = NULL;
0233 }
0234
0235
0236 static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
0237 struct vmw_otable_batch *batch)
0238 {
0239 unsigned long offset;
0240 unsigned long bo_size;
0241 struct vmw_otable *otables = batch->otables;
0242 SVGAOTableType i;
0243 int ret;
0244
0245 bo_size = 0;
0246 for (i = 0; i < batch->num_otables; ++i) {
0247 if (!otables[i].enabled)
0248 continue;
0249
0250 otables[i].size = PFN_ALIGN(otables[i].size);
0251 bo_size += otables[i].size;
0252 }
0253
0254 ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
0255 if (unlikely(ret != 0))
0256 return ret;
0257
0258 offset = 0;
0259 for (i = 0; i < batch->num_otables; ++i) {
0260 if (!batch->otables[i].enabled)
0261 continue;
0262
0263 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
0264 offset,
0265 &otables[i]);
0266 if (unlikely(ret != 0))
0267 goto out_no_setup;
0268 offset += otables[i].size;
0269 }
0270
0271 return 0;
0272
0273 out_no_setup:
0274 for (i = 0; i < batch->num_otables; ++i) {
0275 if (batch->otables[i].enabled)
0276 vmw_takedown_otable_base(dev_priv, i,
0277 &batch->otables[i]);
0278 }
0279
0280 vmw_bo_unpin_unlocked(batch->otable_bo);
0281 ttm_bo_put(batch->otable_bo);
0282 batch->otable_bo = NULL;
0283 return ret;
0284 }
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 int vmw_otables_setup(struct vmw_private *dev_priv)
0298 {
0299 struct vmw_otable **otables = &dev_priv->otable_batch.otables;
0300 int ret;
0301
0302 if (has_sm4_context(dev_priv)) {
0303 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
0304 if (!(*otables))
0305 return -ENOMEM;
0306
0307 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
0308 } else {
0309 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
0310 GFP_KERNEL);
0311 if (!(*otables))
0312 return -ENOMEM;
0313
0314 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
0315 }
0316
0317 ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch);
0318 if (unlikely(ret != 0))
0319 goto out_setup;
0320
0321 return 0;
0322
0323 out_setup:
0324 kfree(*otables);
0325 return ret;
0326 }
0327
0328 static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
0329 struct vmw_otable_batch *batch)
0330 {
0331 SVGAOTableType i;
0332 struct ttm_buffer_object *bo = batch->otable_bo;
0333 int ret;
0334
0335 for (i = 0; i < batch->num_otables; ++i)
0336 if (batch->otables[i].enabled)
0337 vmw_takedown_otable_base(dev_priv, i,
0338 &batch->otables[i]);
0339
0340 ret = ttm_bo_reserve(bo, false, true, NULL);
0341 BUG_ON(ret != 0);
0342
0343 vmw_bo_fence_single(bo, NULL);
0344 ttm_bo_unpin(bo);
0345 ttm_bo_unreserve(bo);
0346
0347 ttm_bo_put(batch->otable_bo);
0348 batch->otable_bo = NULL;
0349 }
0350
0351
0352
0353
0354
0355
0356
0357
0358 void vmw_otables_takedown(struct vmw_private *dev_priv)
0359 {
0360 vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
0361 kfree(dev_priv->otable_batch.otables);
0362 }
0363
0364
0365
0366
0367
0368
0369
0370 static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
0371 {
0372 unsigned long data_size = data_pages * PAGE_SIZE;
0373 unsigned long tot_size = 0;
0374
0375 while (likely(data_size > PAGE_SIZE)) {
0376 data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
0377 data_size *= VMW_PPN_SIZE;
0378 tot_size += PFN_ALIGN(data_size);
0379 }
0380
0381 return tot_size >> PAGE_SHIFT;
0382 }
0383
0384
0385
0386
0387
0388
0389 struct vmw_mob *vmw_mob_create(unsigned long data_pages)
0390 {
0391 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
0392
0393 if (unlikely(!mob))
0394 return NULL;
0395
0396 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
0397
0398 return mob;
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
0412 struct vmw_mob *mob)
0413 {
0414 BUG_ON(mob->pt_bo != NULL);
0415
0416 return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
0417 }
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428 #if (VMW_PPN_SIZE == 8)
0429 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
0430 {
0431 *((u64 *) *addr) = val >> PAGE_SHIFT;
0432 *addr += 2;
0433 }
0434 #else
0435 static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
0436 {
0437 *(*addr)++ = val >> PAGE_SHIFT;
0438 }
0439 #endif
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
0453 unsigned long num_data_pages,
0454 struct vmw_piter *pt_iter)
0455 {
0456 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
0457 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
0458 unsigned long pt_page;
0459 u32 *addr, *save_addr;
0460 unsigned long i;
0461 struct page *page;
0462
0463 for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) {
0464 page = vmw_piter_page(pt_iter);
0465
0466 save_addr = addr = kmap_atomic(page);
0467
0468 for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) {
0469 vmw_mob_assign_ppn(&addr,
0470 vmw_piter_dma_addr(data_iter));
0471 if (unlikely(--num_data_pages == 0))
0472 break;
0473 WARN_ON(!vmw_piter_next(data_iter));
0474 }
0475 kunmap_atomic(save_addr);
0476 vmw_piter_next(pt_iter);
0477 }
0478
0479 return num_pt_pages;
0480 }
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492 static void vmw_mob_pt_setup(struct vmw_mob *mob,
0493 struct vmw_piter data_iter,
0494 unsigned long num_data_pages)
0495 {
0496 unsigned long num_pt_pages = 0;
0497 struct ttm_buffer_object *bo = mob->pt_bo;
0498 struct vmw_piter save_pt_iter = {0};
0499 struct vmw_piter pt_iter;
0500 const struct vmw_sg_table *vsgt;
0501 int ret;
0502
0503 BUG_ON(num_data_pages == 0);
0504
0505 ret = ttm_bo_reserve(bo, false, true, NULL);
0506 BUG_ON(ret != 0);
0507
0508 vsgt = vmw_bo_sg_table(bo);
0509 vmw_piter_start(&pt_iter, vsgt, 0);
0510 BUG_ON(!vmw_piter_next(&pt_iter));
0511 mob->pt_level = 0;
0512 while (likely(num_data_pages > 1)) {
0513 ++mob->pt_level;
0514 BUG_ON(mob->pt_level > 2);
0515 save_pt_iter = pt_iter;
0516 num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages,
0517 &pt_iter);
0518 data_iter = save_pt_iter;
0519 num_data_pages = num_pt_pages;
0520 }
0521
0522 mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter);
0523 ttm_bo_unreserve(bo);
0524 }
0525
0526
0527
0528
0529
0530
0531 void vmw_mob_destroy(struct vmw_mob *mob)
0532 {
0533 if (mob->pt_bo) {
0534 vmw_bo_unpin_unlocked(mob->pt_bo);
0535 ttm_bo_put(mob->pt_bo);
0536 mob->pt_bo = NULL;
0537 }
0538 kfree(mob);
0539 }
0540
0541
0542
0543
0544
0545
0546
0547 void vmw_mob_unbind(struct vmw_private *dev_priv,
0548 struct vmw_mob *mob)
0549 {
0550 struct {
0551 SVGA3dCmdHeader header;
0552 SVGA3dCmdDestroyGBMob body;
0553 } *cmd;
0554 int ret;
0555 struct ttm_buffer_object *bo = mob->pt_bo;
0556
0557 if (bo) {
0558 ret = ttm_bo_reserve(bo, false, true, NULL);
0559
0560
0561
0562 BUG_ON(ret != 0);
0563 }
0564
0565 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0566 if (cmd) {
0567 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
0568 cmd->header.size = sizeof(cmd->body);
0569 cmd->body.mobid = mob->id;
0570 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0571 }
0572
0573 if (bo) {
0574 vmw_bo_fence_single(bo, NULL);
0575 ttm_bo_unreserve(bo);
0576 }
0577 vmw_fifo_resource_dec(dev_priv);
0578 }
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595 int vmw_mob_bind(struct vmw_private *dev_priv,
0596 struct vmw_mob *mob,
0597 const struct vmw_sg_table *vsgt,
0598 unsigned long num_data_pages,
0599 int32_t mob_id)
0600 {
0601 int ret;
0602 bool pt_set_up = false;
0603 struct vmw_piter data_iter;
0604 struct {
0605 SVGA3dCmdHeader header;
0606 SVGA3dCmdDefineGBMob64 body;
0607 } *cmd;
0608
0609 mob->id = mob_id;
0610 vmw_piter_start(&data_iter, vsgt, 0);
0611 if (unlikely(!vmw_piter_next(&data_iter)))
0612 return 0;
0613
0614 if (likely(num_data_pages == 1)) {
0615 mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
0616 mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
0617 } else if (unlikely(mob->pt_bo == NULL)) {
0618 ret = vmw_mob_pt_populate(dev_priv, mob);
0619 if (unlikely(ret != 0))
0620 return ret;
0621
0622 vmw_mob_pt_setup(mob, data_iter, num_data_pages);
0623 pt_set_up = true;
0624 mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PT_1;
0625 }
0626
0627 vmw_fifo_resource_inc(dev_priv);
0628
0629 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
0630 if (unlikely(cmd == NULL))
0631 goto out_no_cmd_space;
0632
0633 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
0634 cmd->header.size = sizeof(cmd->body);
0635 cmd->body.mobid = mob_id;
0636 cmd->body.ptDepth = mob->pt_level;
0637 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
0638 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
0639
0640 vmw_cmd_commit(dev_priv, sizeof(*cmd));
0641
0642 return 0;
0643
0644 out_no_cmd_space:
0645 vmw_fifo_resource_dec(dev_priv);
0646 if (pt_set_up) {
0647 vmw_bo_unpin_unlocked(mob->pt_bo);
0648 ttm_bo_put(mob->pt_bo);
0649 mob->pt_bo = NULL;
0650 }
0651
0652 return -ENOMEM;
0653 }