0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/dmapool.h>
0029 #include <linux/pci.h>
0030
0031 #include <drm/ttm/ttm_bo_api.h>
0032
0033 #include "vmwgfx_drv.h"
0034
0035
0036
0037
0038
0039 #define VMW_CMDBUF_INLINE_ALIGN 64
0040 #define VMW_CMDBUF_INLINE_SIZE \
0041 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 struct vmw_cmdbuf_context {
0054 struct list_head submitted;
0055 struct list_head hw_submitted;
0056 struct list_head preempted;
0057 unsigned num_hw_submitted;
0058 bool block_submission;
0059 };
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 struct vmw_cmdbuf_man {
0111 struct mutex cur_mutex;
0112 struct mutex space_mutex;
0113 struct mutex error_mutex;
0114 struct work_struct work;
0115 struct vmw_private *dev_priv;
0116 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
0117 struct list_head error;
0118 struct drm_mm mm;
0119 struct ttm_buffer_object *cmd_space;
0120 struct ttm_bo_kmap_obj map_obj;
0121 u8 *map;
0122 struct vmw_cmdbuf_header *cur;
0123 size_t cur_pos;
0124 size_t default_size;
0125 unsigned max_hw_submitted;
0126 spinlock_t lock;
0127 struct dma_pool *headers;
0128 struct dma_pool *dheaders;
0129 wait_queue_head_t alloc_queue;
0130 wait_queue_head_t idle_queue;
0131 bool irq_on;
0132 bool using_mob;
0133 bool has_pool;
0134 dma_addr_t handle;
0135 size_t size;
0136 u32 num_contexts;
0137 };
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154 struct vmw_cmdbuf_header {
0155 struct vmw_cmdbuf_man *man;
0156 SVGACBHeader *cb_header;
0157 SVGACBContext cb_context;
0158 struct list_head list;
0159 struct drm_mm_node node;
0160 dma_addr_t handle;
0161 u8 *cmd;
0162 size_t size;
0163 size_t reserved;
0164 bool inline_space;
0165 };
0166
0167
0168
0169
0170
0171
0172
0173
0174 struct vmw_cmdbuf_dheader {
0175 SVGACBHeader cb_header;
0176 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
0177 };
0178
0179
0180
0181
0182
0183
0184
0185
0186 struct vmw_cmdbuf_alloc_info {
0187 size_t page_size;
0188 struct drm_mm_node *node;
0189 bool done;
0190 };
0191
0192
0193 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
0194 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
0195 ++(_i), ++(_ctx))
0196
0197 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
0198 bool enable);
0199 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
0200
0201
0202
0203
0204
0205
0206
0207 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
0208 {
0209 if (interruptible) {
0210 if (mutex_lock_interruptible(&man->cur_mutex))
0211 return -ERESTARTSYS;
0212 } else {
0213 mutex_lock(&man->cur_mutex);
0214 }
0215
0216 return 0;
0217 }
0218
0219
0220
0221
0222
0223
0224 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
0225 {
0226 mutex_unlock(&man->cur_mutex);
0227 }
0228
0229
0230
0231
0232
0233
0234
0235
0236 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
0237 {
0238 struct vmw_cmdbuf_dheader *dheader;
0239
0240 if (WARN_ON_ONCE(!header->inline_space))
0241 return;
0242
0243 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
0244 cb_header);
0245 dma_pool_free(header->man->dheaders, dheader, header->handle);
0246 kfree(header);
0247 }
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
0258 {
0259 struct vmw_cmdbuf_man *man = header->man;
0260
0261 lockdep_assert_held_once(&man->lock);
0262
0263 if (header->inline_space) {
0264 vmw_cmdbuf_header_inline_free(header);
0265 return;
0266 }
0267
0268 drm_mm_remove_node(&header->node);
0269 wake_up_all(&man->alloc_queue);
0270 if (header->cb_header)
0271 dma_pool_free(man->headers, header->cb_header,
0272 header->handle);
0273 kfree(header);
0274 }
0275
0276
0277
0278
0279
0280
0281
0282 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
0283 {
0284 struct vmw_cmdbuf_man *man = header->man;
0285
0286
0287 if (header->inline_space) {
0288 vmw_cmdbuf_header_inline_free(header);
0289 return;
0290 }
0291 spin_lock(&man->lock);
0292 __vmw_cmdbuf_header_free(header);
0293 spin_unlock(&man->lock);
0294 }
0295
0296
0297
0298
0299
0300
0301
0302 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
0303 {
0304 struct vmw_cmdbuf_man *man = header->man;
0305 u32 val;
0306
0307 val = upper_32_bits(header->handle);
0308 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
0309
0310 val = lower_32_bits(header->handle);
0311 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
0312 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
0313
0314 return header->cb_header->status;
0315 }
0316
0317
0318
0319
0320
0321
0322 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
0323 {
0324 INIT_LIST_HEAD(&ctx->hw_submitted);
0325 INIT_LIST_HEAD(&ctx->submitted);
0326 INIT_LIST_HEAD(&ctx->preempted);
0327 ctx->num_hw_submitted = 0;
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
0341 struct vmw_cmdbuf_context *ctx)
0342 {
0343 while (ctx->num_hw_submitted < man->max_hw_submitted &&
0344 !list_empty(&ctx->submitted) &&
0345 !ctx->block_submission) {
0346 struct vmw_cmdbuf_header *entry;
0347 SVGACBStatus status;
0348
0349 entry = list_first_entry(&ctx->submitted,
0350 struct vmw_cmdbuf_header,
0351 list);
0352
0353 status = vmw_cmdbuf_header_submit(entry);
0354
0355
0356 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
0357 entry->cb_header->status = SVGA_CB_STATUS_NONE;
0358 break;
0359 }
0360
0361 list_move_tail(&entry->list, &ctx->hw_submitted);
0362 ctx->num_hw_submitted++;
0363 }
0364
0365 }
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
0379 struct vmw_cmdbuf_context *ctx,
0380 int *notempty)
0381 {
0382 struct vmw_cmdbuf_header *entry, *next;
0383
0384 vmw_cmdbuf_ctx_submit(man, ctx);
0385
0386 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
0387 SVGACBStatus status = entry->cb_header->status;
0388
0389 if (status == SVGA_CB_STATUS_NONE)
0390 break;
0391
0392 list_del(&entry->list);
0393 wake_up_all(&man->idle_queue);
0394 ctx->num_hw_submitted--;
0395 switch (status) {
0396 case SVGA_CB_STATUS_COMPLETED:
0397 __vmw_cmdbuf_header_free(entry);
0398 break;
0399 case SVGA_CB_STATUS_COMMAND_ERROR:
0400 WARN_ONCE(true, "Command buffer error.\n");
0401 entry->cb_header->status = SVGA_CB_STATUS_NONE;
0402 list_add_tail(&entry->list, &man->error);
0403 schedule_work(&man->work);
0404 break;
0405 case SVGA_CB_STATUS_PREEMPTED:
0406 entry->cb_header->status = SVGA_CB_STATUS_NONE;
0407 list_add_tail(&entry->list, &ctx->preempted);
0408 break;
0409 case SVGA_CB_STATUS_CB_HEADER_ERROR:
0410 WARN_ONCE(true, "Command buffer header error.\n");
0411 __vmw_cmdbuf_header_free(entry);
0412 break;
0413 default:
0414 WARN_ONCE(true, "Undefined command buffer status.\n");
0415 __vmw_cmdbuf_header_free(entry);
0416 break;
0417 }
0418 }
0419
0420 vmw_cmdbuf_ctx_submit(man, ctx);
0421 if (!list_empty(&ctx->submitted))
0422 (*notempty)++;
0423 }
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
0436 {
0437 int notempty;
0438 struct vmw_cmdbuf_context *ctx;
0439 int i;
0440
0441 retry:
0442 notempty = 0;
0443 for_each_cmdbuf_ctx(man, i, ctx)
0444 vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
0445
0446 if (man->irq_on && !notempty) {
0447 vmw_generic_waiter_remove(man->dev_priv,
0448 SVGA_IRQFLAG_COMMAND_BUFFER,
0449 &man->dev_priv->cmdbuf_waiters);
0450 man->irq_on = false;
0451 } else if (!man->irq_on && notempty) {
0452 vmw_generic_waiter_add(man->dev_priv,
0453 SVGA_IRQFLAG_COMMAND_BUFFER,
0454 &man->dev_priv->cmdbuf_waiters);
0455 man->irq_on = true;
0456
0457
0458 goto retry;
0459 }
0460 }
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
0476 struct vmw_cmdbuf_header *header,
0477 SVGACBContext cb_context)
0478 {
0479 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
0480 header->cb_header->dxContext = 0;
0481 header->cb_context = cb_context;
0482 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
0483
0484 vmw_cmdbuf_man_process(man);
0485 }
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
0498 {
0499 spin_lock(&man->lock);
0500 vmw_cmdbuf_man_process(man);
0501 spin_unlock(&man->lock);
0502 }
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513 static void vmw_cmdbuf_work_func(struct work_struct *work)
0514 {
0515 struct vmw_cmdbuf_man *man =
0516 container_of(work, struct vmw_cmdbuf_man, work);
0517 struct vmw_cmdbuf_header *entry, *next;
0518 uint32_t dummy = 0;
0519 bool send_fence = false;
0520 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
0521 int i;
0522 struct vmw_cmdbuf_context *ctx;
0523 bool global_block = false;
0524
0525 for_each_cmdbuf_ctx(man, i, ctx)
0526 INIT_LIST_HEAD(&restart_head[i]);
0527
0528 mutex_lock(&man->error_mutex);
0529 spin_lock(&man->lock);
0530 list_for_each_entry_safe(entry, next, &man->error, list) {
0531 SVGACBHeader *cb_hdr = entry->cb_header;
0532 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
0533 (entry->cmd + cb_hdr->errorOffset);
0534 u32 error_cmd_size, new_start_offset;
0535 const char *cmd_name;
0536
0537 list_del_init(&entry->list);
0538 global_block = true;
0539
0540 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
0541 VMW_DEBUG_USER("Unknown command causing device error.\n");
0542 VMW_DEBUG_USER("Command buffer offset is %lu\n",
0543 (unsigned long) cb_hdr->errorOffset);
0544 __vmw_cmdbuf_header_free(entry);
0545 send_fence = true;
0546 continue;
0547 }
0548
0549 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
0550 cmd_name);
0551 VMW_DEBUG_USER("Command buffer offset is %lu\n",
0552 (unsigned long) cb_hdr->errorOffset);
0553 VMW_DEBUG_USER("Command size is %lu\n",
0554 (unsigned long) error_cmd_size);
0555
0556 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
0557
0558 if (new_start_offset >= cb_hdr->length) {
0559 __vmw_cmdbuf_header_free(entry);
0560 send_fence = true;
0561 continue;
0562 }
0563
0564 if (man->using_mob)
0565 cb_hdr->ptr.mob.mobOffset += new_start_offset;
0566 else
0567 cb_hdr->ptr.pa += (u64) new_start_offset;
0568
0569 entry->cmd += new_start_offset;
0570 cb_hdr->length -= new_start_offset;
0571 cb_hdr->errorOffset = 0;
0572 cb_hdr->offset = 0;
0573
0574 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
0575 }
0576
0577 for_each_cmdbuf_ctx(man, i, ctx)
0578 man->ctx[i].block_submission = true;
0579
0580 spin_unlock(&man->lock);
0581
0582
0583 if (global_block && vmw_cmdbuf_preempt(man, 0))
0584 DRM_ERROR("Failed preempting command buffer contexts\n");
0585
0586 spin_lock(&man->lock);
0587 for_each_cmdbuf_ctx(man, i, ctx) {
0588
0589 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
0590
0591
0592
0593
0594
0595 list_splice_init(&ctx->preempted, restart_head[i].prev);
0596
0597
0598
0599
0600
0601
0602 ctx->block_submission = false;
0603 list_splice_init(&restart_head[i], &ctx->submitted);
0604 }
0605
0606 vmw_cmdbuf_man_process(man);
0607 spin_unlock(&man->lock);
0608
0609 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
0610 DRM_ERROR("Failed restarting command buffer contexts\n");
0611
0612
0613 if (send_fence) {
0614 vmw_cmd_send_fence(man->dev_priv, &dummy);
0615 wake_up_all(&man->idle_queue);
0616 }
0617
0618 mutex_unlock(&man->error_mutex);
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
0629 bool check_preempted)
0630 {
0631 struct vmw_cmdbuf_context *ctx;
0632 bool idle = false;
0633 int i;
0634
0635 spin_lock(&man->lock);
0636 vmw_cmdbuf_man_process(man);
0637 for_each_cmdbuf_ctx(man, i, ctx) {
0638 if (!list_empty(&ctx->submitted) ||
0639 !list_empty(&ctx->hw_submitted) ||
0640 (check_preempted && !list_empty(&ctx->preempted)))
0641 goto out_unlock;
0642 }
0643
0644 idle = list_empty(&man->error);
0645
0646 out_unlock:
0647 spin_unlock(&man->lock);
0648
0649 return idle;
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
0662 {
0663 struct vmw_cmdbuf_header *cur = man->cur;
0664
0665 lockdep_assert_held_once(&man->cur_mutex);
0666
0667 if (!cur)
0668 return;
0669
0670 spin_lock(&man->lock);
0671 if (man->cur_pos == 0) {
0672 __vmw_cmdbuf_header_free(cur);
0673 goto out_unlock;
0674 }
0675
0676 man->cur->cb_header->length = man->cur_pos;
0677 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
0678 out_unlock:
0679 spin_unlock(&man->lock);
0680 man->cur = NULL;
0681 man->cur_pos = 0;
0682 }
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
0695 bool interruptible)
0696 {
0697 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
0698
0699 if (ret)
0700 return ret;
0701
0702 __vmw_cmdbuf_cur_flush(man);
0703 vmw_cmdbuf_cur_unlock(man);
0704
0705 return 0;
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
0720 unsigned long timeout)
0721 {
0722 int ret;
0723
0724 ret = vmw_cmdbuf_cur_flush(man, interruptible);
0725 vmw_generic_waiter_add(man->dev_priv,
0726 SVGA_IRQFLAG_COMMAND_BUFFER,
0727 &man->dev_priv->cmdbuf_waiters);
0728
0729 if (interruptible) {
0730 ret = wait_event_interruptible_timeout
0731 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
0732 timeout);
0733 } else {
0734 ret = wait_event_timeout
0735 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
0736 timeout);
0737 }
0738 vmw_generic_waiter_remove(man->dev_priv,
0739 SVGA_IRQFLAG_COMMAND_BUFFER,
0740 &man->dev_priv->cmdbuf_waiters);
0741 if (ret == 0) {
0742 if (!vmw_cmdbuf_man_idle(man, true))
0743 ret = -EBUSY;
0744 else
0745 ret = 0;
0746 }
0747 if (ret > 0)
0748 ret = 0;
0749
0750 return ret;
0751 }
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
0764 struct vmw_cmdbuf_alloc_info *info)
0765 {
0766 int ret;
0767
0768 if (info->done)
0769 return true;
0770
0771 memset(info->node, 0, sizeof(*info->node));
0772 spin_lock(&man->lock);
0773 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
0774 if (ret) {
0775 vmw_cmdbuf_man_process(man);
0776 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
0777 }
0778
0779 spin_unlock(&man->lock);
0780 info->done = !ret;
0781
0782 return info->done;
0783 }
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
0798 struct drm_mm_node *node,
0799 size_t size,
0800 bool interruptible)
0801 {
0802 struct vmw_cmdbuf_alloc_info info;
0803
0804 info.page_size = PFN_UP(size);
0805 info.node = node;
0806 info.done = false;
0807
0808
0809
0810
0811
0812 if (interruptible) {
0813 if (mutex_lock_interruptible(&man->space_mutex))
0814 return -ERESTARTSYS;
0815 } else {
0816 mutex_lock(&man->space_mutex);
0817 }
0818
0819
0820 if (vmw_cmdbuf_try_alloc(man, &info))
0821 goto out_unlock;
0822
0823 vmw_generic_waiter_add(man->dev_priv,
0824 SVGA_IRQFLAG_COMMAND_BUFFER,
0825 &man->dev_priv->cmdbuf_waiters);
0826
0827 if (interruptible) {
0828 int ret;
0829
0830 ret = wait_event_interruptible
0831 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
0832 if (ret) {
0833 vmw_generic_waiter_remove
0834 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
0835 &man->dev_priv->cmdbuf_waiters);
0836 mutex_unlock(&man->space_mutex);
0837 return ret;
0838 }
0839 } else {
0840 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
0841 }
0842 vmw_generic_waiter_remove(man->dev_priv,
0843 SVGA_IRQFLAG_COMMAND_BUFFER,
0844 &man->dev_priv->cmdbuf_waiters);
0845
0846 out_unlock:
0847 mutex_unlock(&man->space_mutex);
0848
0849 return 0;
0850 }
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
0862 struct vmw_cmdbuf_header *header,
0863 size_t size,
0864 bool interruptible)
0865 {
0866 SVGACBHeader *cb_hdr;
0867 size_t offset;
0868 int ret;
0869
0870 if (!man->has_pool)
0871 return -ENOMEM;
0872
0873 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
0874
0875 if (ret)
0876 return ret;
0877
0878 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
0879 &header->handle);
0880 if (!header->cb_header) {
0881 ret = -ENOMEM;
0882 goto out_no_cb_header;
0883 }
0884
0885 header->size = header->node.size << PAGE_SHIFT;
0886 cb_hdr = header->cb_header;
0887 offset = header->node.start << PAGE_SHIFT;
0888 header->cmd = man->map + offset;
0889 if (man->using_mob) {
0890 cb_hdr->flags = SVGA_CB_FLAG_MOB;
0891 cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
0892 cb_hdr->ptr.mob.mobOffset = offset;
0893 } else {
0894 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
0895 }
0896
0897 return 0;
0898
0899 out_no_cb_header:
0900 spin_lock(&man->lock);
0901 drm_mm_remove_node(&header->node);
0902 spin_unlock(&man->lock);
0903
0904 return ret;
0905 }
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
0916 struct vmw_cmdbuf_header *header,
0917 int size)
0918 {
0919 struct vmw_cmdbuf_dheader *dheader;
0920 SVGACBHeader *cb_hdr;
0921
0922 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
0923 return -ENOMEM;
0924
0925 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
0926 &header->handle);
0927 if (!dheader)
0928 return -ENOMEM;
0929
0930 header->inline_space = true;
0931 header->size = VMW_CMDBUF_INLINE_SIZE;
0932 cb_hdr = &dheader->cb_header;
0933 header->cb_header = cb_hdr;
0934 header->cmd = dheader->cmd;
0935 cb_hdr->status = SVGA_CB_STATUS_NONE;
0936 cb_hdr->flags = SVGA_CB_FLAG_NONE;
0937 cb_hdr->ptr.pa = (u64)header->handle +
0938 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
0939
0940 return 0;
0941 }
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
0957 size_t size, bool interruptible,
0958 struct vmw_cmdbuf_header **p_header)
0959 {
0960 struct vmw_cmdbuf_header *header;
0961 int ret = 0;
0962
0963 *p_header = NULL;
0964
0965 header = kzalloc(sizeof(*header), GFP_KERNEL);
0966 if (!header)
0967 return ERR_PTR(-ENOMEM);
0968
0969 if (size <= VMW_CMDBUF_INLINE_SIZE)
0970 ret = vmw_cmdbuf_space_inline(man, header, size);
0971 else
0972 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
0973
0974 if (ret) {
0975 kfree(header);
0976 return ERR_PTR(ret);
0977 }
0978
0979 header->man = man;
0980 INIT_LIST_HEAD(&header->list);
0981 header->cb_header->status = SVGA_CB_STATUS_NONE;
0982 *p_header = header;
0983
0984 return header->cmd;
0985 }
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1000 size_t size,
1001 int ctx_id,
1002 bool interruptible)
1003 {
1004 struct vmw_cmdbuf_header *cur;
1005 void *ret;
1006
1007 if (vmw_cmdbuf_cur_lock(man, interruptible))
1008 return ERR_PTR(-ERESTARTSYS);
1009
1010 cur = man->cur;
1011 if (cur && (size + man->cur_pos > cur->size ||
1012 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1013 ctx_id != cur->cb_header->dxContext)))
1014 __vmw_cmdbuf_cur_flush(man);
1015
1016 if (!man->cur) {
1017 ret = vmw_cmdbuf_alloc(man,
1018 max_t(size_t, size, man->default_size),
1019 interruptible, &man->cur);
1020 if (IS_ERR(ret)) {
1021 vmw_cmdbuf_cur_unlock(man);
1022 return ret;
1023 }
1024
1025 cur = man->cur;
1026 }
1027
1028 if (ctx_id != SVGA3D_INVALID_ID) {
1029 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1030 cur->cb_header->dxContext = ctx_id;
1031 }
1032
1033 cur->reserved = size;
1034
1035 return (void *) (man->cur->cmd + man->cur_pos);
1036 }
1037
1038
1039
1040
1041
1042
1043
1044
1045 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1046 size_t size, bool flush)
1047 {
1048 struct vmw_cmdbuf_header *cur = man->cur;
1049
1050 lockdep_assert_held_once(&man->cur_mutex);
1051
1052 WARN_ON(size > cur->reserved);
1053 man->cur_pos += size;
1054 if (!size)
1055 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1056 if (flush)
1057 __vmw_cmdbuf_cur_flush(man);
1058 vmw_cmdbuf_cur_unlock(man);
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1075 int ctx_id, bool interruptible,
1076 struct vmw_cmdbuf_header *header)
1077 {
1078 if (!header)
1079 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1080
1081 if (size > header->size)
1082 return ERR_PTR(-EINVAL);
1083
1084 if (ctx_id != SVGA3D_INVALID_ID) {
1085 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1086 header->cb_header->dxContext = ctx_id;
1087 }
1088
1089 header->reserved = size;
1090 return header->cmd;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1103 struct vmw_cmdbuf_header *header, bool flush)
1104 {
1105 if (!header) {
1106 vmw_cmdbuf_commit_cur(man, size, flush);
1107 return;
1108 }
1109
1110 (void) vmw_cmdbuf_cur_lock(man, false);
1111 __vmw_cmdbuf_cur_flush(man);
1112 WARN_ON(size > header->reserved);
1113 man->cur = header;
1114 man->cur_pos = size;
1115 if (!size)
1116 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1117 if (flush)
1118 __vmw_cmdbuf_cur_flush(man);
1119 vmw_cmdbuf_cur_unlock(man);
1120 }
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1133 const void *command,
1134 size_t size)
1135 {
1136 struct vmw_cmdbuf_header *header;
1137 int status;
1138 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1139
1140 if (IS_ERR(cmd))
1141 return PTR_ERR(cmd);
1142
1143 memcpy(cmd, command, size);
1144 header->cb_header->length = size;
1145 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1146 spin_lock(&man->lock);
1147 status = vmw_cmdbuf_header_submit(header);
1148 spin_unlock(&man->lock);
1149 vmw_cmdbuf_header_free(header);
1150
1151 if (status != SVGA_CB_STATUS_COMPLETED) {
1152 DRM_ERROR("Device context command failed with status %d\n",
1153 status);
1154 return -EINVAL;
1155 }
1156
1157 return 0;
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1170 {
1171 struct {
1172 uint32 id;
1173 SVGADCCmdPreempt body;
1174 } __packed cmd;
1175
1176 cmd.id = SVGA_DC_CMD_PREEMPT;
1177 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1178 cmd.body.ignoreIDZero = 0;
1179
1180 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1181 }
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1195 bool enable)
1196 {
1197 struct {
1198 uint32 id;
1199 SVGADCCmdStartStop body;
1200 } __packed cmd;
1201
1202 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1203 cmd.body.enable = (enable) ? 1 : 0;
1204 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1205
1206 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1222 {
1223 struct vmw_private *dev_priv = man->dev_priv;
1224 bool dummy;
1225 int ret;
1226
1227 if (man->has_pool)
1228 return -EINVAL;
1229
1230
1231 size = PAGE_ALIGN(size);
1232 man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1233 &man->handle, GFP_KERNEL);
1234 if (man->map) {
1235 man->using_mob = false;
1236 } else {
1237
1238
1239
1240
1241
1242
1243 if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1244 !dev_priv->has_mob)
1245 return -ENOMEM;
1246
1247 ret = vmw_bo_create_kernel(dev_priv, size,
1248 &vmw_mob_placement,
1249 &man->cmd_space);
1250 if (ret)
1251 return ret;
1252
1253 man->using_mob = true;
1254 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1255 &man->map_obj);
1256 if (ret)
1257 goto out_no_map;
1258
1259 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1260 }
1261
1262 man->size = size;
1263 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1264
1265 man->has_pool = true;
1266
1267
1268
1269
1270
1271
1272
1273 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274 drm_info(&dev_priv->drm,
1275 "Using command buffers with %s pool.\n",
1276 (man->using_mob) ? "MOB" : "DMA");
1277
1278 return 0;
1279
1280 out_no_map:
1281 if (man->using_mob) {
1282 ttm_bo_put(man->cmd_space);
1283 man->cmd_space = NULL;
1284 }
1285
1286 return ret;
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300 {
1301 struct vmw_cmdbuf_man *man;
1302 struct vmw_cmdbuf_context *ctx;
1303 unsigned int i;
1304 int ret;
1305
1306 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307 return ERR_PTR(-ENOSYS);
1308
1309 man = kzalloc(sizeof(*man), GFP_KERNEL);
1310 if (!man)
1311 return ERR_PTR(-ENOMEM);
1312
1313 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314 2 : 1;
1315 man->headers = dma_pool_create("vmwgfx cmdbuf",
1316 dev_priv->drm.dev,
1317 sizeof(SVGACBHeader),
1318 64, PAGE_SIZE);
1319 if (!man->headers) {
1320 ret = -ENOMEM;
1321 goto out_no_pool;
1322 }
1323
1324 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325 dev_priv->drm.dev,
1326 sizeof(struct vmw_cmdbuf_dheader),
1327 64, PAGE_SIZE);
1328 if (!man->dheaders) {
1329 ret = -ENOMEM;
1330 goto out_no_dpool;
1331 }
1332
1333 for_each_cmdbuf_ctx(man, i, ctx)
1334 vmw_cmdbuf_ctx_init(ctx);
1335
1336 INIT_LIST_HEAD(&man->error);
1337 spin_lock_init(&man->lock);
1338 mutex_init(&man->cur_mutex);
1339 mutex_init(&man->space_mutex);
1340 mutex_init(&man->error_mutex);
1341 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342 init_waitqueue_head(&man->alloc_queue);
1343 init_waitqueue_head(&man->idle_queue);
1344 man->dev_priv = dev_priv;
1345 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348 &dev_priv->error_waiters);
1349 ret = vmw_cmdbuf_startstop(man, 0, true);
1350 if (ret) {
1351 DRM_ERROR("Failed starting command buffer contexts\n");
1352 vmw_cmdbuf_man_destroy(man);
1353 return ERR_PTR(ret);
1354 }
1355
1356 return man;
1357
1358 out_no_dpool:
1359 dma_pool_destroy(man->headers);
1360 out_no_pool:
1361 kfree(man);
1362
1363 return ERR_PTR(ret);
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378 {
1379 if (!man->has_pool)
1380 return;
1381
1382 man->has_pool = false;
1383 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385 if (man->using_mob) {
1386 (void) ttm_bo_kunmap(&man->map_obj);
1387 ttm_bo_put(man->cmd_space);
1388 man->cmd_space = NULL;
1389 } else {
1390 dma_free_coherent(man->dev_priv->drm.dev,
1391 man->size, man->map, man->handle);
1392 }
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403 {
1404 WARN_ON_ONCE(man->has_pool);
1405 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407 if (vmw_cmdbuf_startstop(man, 0, false))
1408 DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411 &man->dev_priv->error_waiters);
1412 (void) cancel_work_sync(&man->work);
1413 dma_pool_destroy(man->dheaders);
1414 dma_pool_destroy(man->headers);
1415 mutex_destroy(&man->cur_mutex);
1416 mutex_destroy(&man->space_mutex);
1417 mutex_destroy(&man->error_mutex);
1418 kfree(man);
1419 }