0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <linux/sched/signal.h>
0029
0030 #include "vmwgfx_drv.h"
0031
0032 #define VMW_FENCE_WRAP (1 << 31)
0033
0034 struct vmw_fence_manager {
0035 int num_fence_objects;
0036 struct vmw_private *dev_priv;
0037 spinlock_t lock;
0038 struct list_head fence_list;
0039 struct work_struct work;
0040 bool fifo_down;
0041 struct list_head cleanup_list;
0042 uint32_t pending_actions[VMW_ACTION_MAX];
0043 struct mutex goal_irq_mutex;
0044 bool goal_irq_on;
0045 bool seqno_valid;
0046
0047 u64 ctx;
0048 };
0049
0050 struct vmw_user_fence {
0051 struct ttm_base_object base;
0052 struct vmw_fence_obj fence;
0053 };
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 struct vmw_event_fence_action {
0069 struct vmw_fence_action action;
0070
0071 struct drm_pending_event *event;
0072 struct vmw_fence_obj *fence;
0073 struct drm_device *dev;
0074
0075 uint32_t *tv_sec;
0076 uint32_t *tv_usec;
0077 };
0078
0079 static struct vmw_fence_manager *
0080 fman_from_fence(struct vmw_fence_obj *fence)
0081 {
0082 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
0083 }
0084
0085 static u32 vmw_fence_goal_read(struct vmw_private *vmw)
0086 {
0087 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
0088 return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
0089 else
0090 return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
0091 }
0092
0093 static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
0094 {
0095 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
0096 vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
0097 else
0098 vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
0099 }
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123 static void vmw_fence_obj_destroy(struct dma_fence *f)
0124 {
0125 struct vmw_fence_obj *fence =
0126 container_of(f, struct vmw_fence_obj, base);
0127
0128 struct vmw_fence_manager *fman = fman_from_fence(fence);
0129
0130 spin_lock(&fman->lock);
0131 list_del_init(&fence->head);
0132 --fman->num_fence_objects;
0133 spin_unlock(&fman->lock);
0134 fence->destroy(fence);
0135 }
0136
0137 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
0138 {
0139 return "vmwgfx";
0140 }
0141
0142 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
0143 {
0144 return "svga";
0145 }
0146
0147 static bool vmw_fence_enable_signaling(struct dma_fence *f)
0148 {
0149 struct vmw_fence_obj *fence =
0150 container_of(f, struct vmw_fence_obj, base);
0151
0152 struct vmw_fence_manager *fman = fman_from_fence(fence);
0153 struct vmw_private *dev_priv = fman->dev_priv;
0154
0155 u32 seqno = vmw_fence_read(dev_priv);
0156 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
0157 return false;
0158
0159 return true;
0160 }
0161
0162 struct vmwgfx_wait_cb {
0163 struct dma_fence_cb base;
0164 struct task_struct *task;
0165 };
0166
0167 static void
0168 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
0169 {
0170 struct vmwgfx_wait_cb *wait =
0171 container_of(cb, struct vmwgfx_wait_cb, base);
0172
0173 wake_up_process(wait->task);
0174 }
0175
0176 static void __vmw_fences_update(struct vmw_fence_manager *fman);
0177
0178 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
0179 {
0180 struct vmw_fence_obj *fence =
0181 container_of(f, struct vmw_fence_obj, base);
0182
0183 struct vmw_fence_manager *fman = fman_from_fence(fence);
0184 struct vmw_private *dev_priv = fman->dev_priv;
0185 struct vmwgfx_wait_cb cb;
0186 long ret = timeout;
0187
0188 if (likely(vmw_fence_obj_signaled(fence)))
0189 return timeout;
0190
0191 vmw_seqno_waiter_add(dev_priv);
0192
0193 spin_lock(f->lock);
0194
0195 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
0196 goto out;
0197
0198 if (intr && signal_pending(current)) {
0199 ret = -ERESTARTSYS;
0200 goto out;
0201 }
0202
0203 cb.base.func = vmwgfx_wait_cb;
0204 cb.task = current;
0205 list_add(&cb.base.node, &f->cb_list);
0206
0207 for (;;) {
0208 __vmw_fences_update(fman);
0209
0210
0211
0212
0213
0214
0215 if (intr)
0216 __set_current_state(TASK_INTERRUPTIBLE);
0217 else
0218 __set_current_state(TASK_UNINTERRUPTIBLE);
0219
0220 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
0221 if (ret == 0 && timeout > 0)
0222 ret = 1;
0223 break;
0224 }
0225
0226 if (intr && signal_pending(current)) {
0227 ret = -ERESTARTSYS;
0228 break;
0229 }
0230
0231 if (ret == 0)
0232 break;
0233
0234 spin_unlock(f->lock);
0235
0236 ret = schedule_timeout(ret);
0237
0238 spin_lock(f->lock);
0239 }
0240 __set_current_state(TASK_RUNNING);
0241 if (!list_empty(&cb.base.node))
0242 list_del(&cb.base.node);
0243
0244 out:
0245 spin_unlock(f->lock);
0246
0247 vmw_seqno_waiter_remove(dev_priv);
0248
0249 return ret;
0250 }
0251
0252 static const struct dma_fence_ops vmw_fence_ops = {
0253 .get_driver_name = vmw_fence_get_driver_name,
0254 .get_timeline_name = vmw_fence_get_timeline_name,
0255 .enable_signaling = vmw_fence_enable_signaling,
0256 .wait = vmw_fence_wait,
0257 .release = vmw_fence_obj_destroy,
0258 };
0259
0260
0261
0262
0263
0264
0265
0266
0267 static void vmw_fence_work_func(struct work_struct *work)
0268 {
0269 struct vmw_fence_manager *fman =
0270 container_of(work, struct vmw_fence_manager, work);
0271 struct list_head list;
0272 struct vmw_fence_action *action, *next_action;
0273 bool seqno_valid;
0274
0275 do {
0276 INIT_LIST_HEAD(&list);
0277 mutex_lock(&fman->goal_irq_mutex);
0278
0279 spin_lock(&fman->lock);
0280 list_splice_init(&fman->cleanup_list, &list);
0281 seqno_valid = fman->seqno_valid;
0282 spin_unlock(&fman->lock);
0283
0284 if (!seqno_valid && fman->goal_irq_on) {
0285 fman->goal_irq_on = false;
0286 vmw_goal_waiter_remove(fman->dev_priv);
0287 }
0288 mutex_unlock(&fman->goal_irq_mutex);
0289
0290 if (list_empty(&list))
0291 return;
0292
0293
0294
0295
0296
0297
0298
0299 list_for_each_entry_safe(action, next_action, &list, head) {
0300 list_del_init(&action->head);
0301 if (action->cleanup)
0302 action->cleanup(action);
0303 }
0304 } while (1);
0305 }
0306
0307 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
0308 {
0309 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
0310
0311 if (unlikely(!fman))
0312 return NULL;
0313
0314 fman->dev_priv = dev_priv;
0315 spin_lock_init(&fman->lock);
0316 INIT_LIST_HEAD(&fman->fence_list);
0317 INIT_LIST_HEAD(&fman->cleanup_list);
0318 INIT_WORK(&fman->work, &vmw_fence_work_func);
0319 fman->fifo_down = true;
0320 mutex_init(&fman->goal_irq_mutex);
0321 fman->ctx = dma_fence_context_alloc(1);
0322
0323 return fman;
0324 }
0325
0326 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
0327 {
0328 bool lists_empty;
0329
0330 (void) cancel_work_sync(&fman->work);
0331
0332 spin_lock(&fman->lock);
0333 lists_empty = list_empty(&fman->fence_list) &&
0334 list_empty(&fman->cleanup_list);
0335 spin_unlock(&fman->lock);
0336
0337 BUG_ON(!lists_empty);
0338 kfree(fman);
0339 }
0340
0341 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
0342 struct vmw_fence_obj *fence, u32 seqno,
0343 void (*destroy) (struct vmw_fence_obj *fence))
0344 {
0345 int ret = 0;
0346
0347 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
0348 fman->ctx, seqno);
0349 INIT_LIST_HEAD(&fence->seq_passed_actions);
0350 fence->destroy = destroy;
0351
0352 spin_lock(&fman->lock);
0353 if (unlikely(fman->fifo_down)) {
0354 ret = -EBUSY;
0355 goto out_unlock;
0356 }
0357 list_add_tail(&fence->head, &fman->fence_list);
0358 ++fman->num_fence_objects;
0359
0360 out_unlock:
0361 spin_unlock(&fman->lock);
0362 return ret;
0363
0364 }
0365
0366 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
0367 struct list_head *list)
0368 {
0369 struct vmw_fence_action *action, *next_action;
0370
0371 list_for_each_entry_safe(action, next_action, list, head) {
0372 list_del_init(&action->head);
0373 fman->pending_actions[action->type]--;
0374 if (action->seq_passed != NULL)
0375 action->seq_passed(action);
0376
0377
0378
0379
0380
0381
0382 list_add_tail(&action->head, &fman->cleanup_list);
0383 }
0384 }
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
0403 u32 passed_seqno)
0404 {
0405 u32 goal_seqno;
0406 struct vmw_fence_obj *fence;
0407
0408 if (likely(!fman->seqno_valid))
0409 return false;
0410
0411 goal_seqno = vmw_fence_goal_read(fman->dev_priv);
0412 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
0413 return false;
0414
0415 fman->seqno_valid = false;
0416 list_for_each_entry(fence, &fman->fence_list, head) {
0417 if (!list_empty(&fence->seq_passed_actions)) {
0418 fman->seqno_valid = true;
0419 vmw_fence_goal_write(fman->dev_priv,
0420 fence->base.seqno);
0421 break;
0422 }
0423 }
0424
0425 return true;
0426 }
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
0445 {
0446 struct vmw_fence_manager *fman = fman_from_fence(fence);
0447 u32 goal_seqno;
0448
0449 if (dma_fence_is_signaled_locked(&fence->base))
0450 return false;
0451
0452 goal_seqno = vmw_fence_goal_read(fman->dev_priv);
0453 if (likely(fman->seqno_valid &&
0454 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
0455 return false;
0456
0457 vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
0458 fman->seqno_valid = true;
0459
0460 return true;
0461 }
0462
0463 static void __vmw_fences_update(struct vmw_fence_manager *fman)
0464 {
0465 struct vmw_fence_obj *fence, *next_fence;
0466 struct list_head action_list;
0467 bool needs_rerun;
0468 uint32_t seqno, new_seqno;
0469
0470 seqno = vmw_fence_read(fman->dev_priv);
0471 rerun:
0472 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
0473 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
0474 list_del_init(&fence->head);
0475 dma_fence_signal_locked(&fence->base);
0476 INIT_LIST_HEAD(&action_list);
0477 list_splice_init(&fence->seq_passed_actions,
0478 &action_list);
0479 vmw_fences_perform_actions(fman, &action_list);
0480 } else
0481 break;
0482 }
0483
0484
0485
0486
0487
0488
0489
0490 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
0491 if (unlikely(needs_rerun)) {
0492 new_seqno = vmw_fence_read(fman->dev_priv);
0493 if (new_seqno != seqno) {
0494 seqno = new_seqno;
0495 goto rerun;
0496 }
0497 }
0498
0499 if (!list_empty(&fman->cleanup_list))
0500 (void) schedule_work(&fman->work);
0501 }
0502
0503 void vmw_fences_update(struct vmw_fence_manager *fman)
0504 {
0505 spin_lock(&fman->lock);
0506 __vmw_fences_update(fman);
0507 spin_unlock(&fman->lock);
0508 }
0509
0510 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
0511 {
0512 struct vmw_fence_manager *fman = fman_from_fence(fence);
0513
0514 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
0515 return true;
0516
0517 vmw_fences_update(fman);
0518
0519 return dma_fence_is_signaled(&fence->base);
0520 }
0521
0522 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
0523 bool interruptible, unsigned long timeout)
0524 {
0525 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
0526
0527 if (likely(ret > 0))
0528 return 0;
0529 else if (ret == 0)
0530 return -EBUSY;
0531 else
0532 return ret;
0533 }
0534
0535 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
0536 {
0537 dma_fence_free(&fence->base);
0538 }
0539
0540 int vmw_fence_create(struct vmw_fence_manager *fman,
0541 uint32_t seqno,
0542 struct vmw_fence_obj **p_fence)
0543 {
0544 struct vmw_fence_obj *fence;
0545 int ret;
0546
0547 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
0548 if (unlikely(!fence))
0549 return -ENOMEM;
0550
0551 ret = vmw_fence_obj_init(fman, fence, seqno,
0552 vmw_fence_destroy);
0553 if (unlikely(ret != 0))
0554 goto out_err_init;
0555
0556 *p_fence = fence;
0557 return 0;
0558
0559 out_err_init:
0560 kfree(fence);
0561 return ret;
0562 }
0563
0564
0565 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
0566 {
0567 struct vmw_user_fence *ufence =
0568 container_of(fence, struct vmw_user_fence, fence);
0569
0570 ttm_base_object_kfree(ufence, base);
0571 }
0572
0573 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
0574 {
0575 struct ttm_base_object *base = *p_base;
0576 struct vmw_user_fence *ufence =
0577 container_of(base, struct vmw_user_fence, base);
0578 struct vmw_fence_obj *fence = &ufence->fence;
0579
0580 *p_base = NULL;
0581 vmw_fence_obj_unreference(&fence);
0582 }
0583
0584 int vmw_user_fence_create(struct drm_file *file_priv,
0585 struct vmw_fence_manager *fman,
0586 uint32_t seqno,
0587 struct vmw_fence_obj **p_fence,
0588 uint32_t *p_handle)
0589 {
0590 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0591 struct vmw_user_fence *ufence;
0592 struct vmw_fence_obj *tmp;
0593 int ret;
0594
0595 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
0596 if (unlikely(!ufence)) {
0597 ret = -ENOMEM;
0598 goto out_no_object;
0599 }
0600
0601 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
0602 vmw_user_fence_destroy);
0603 if (unlikely(ret != 0)) {
0604 kfree(ufence);
0605 goto out_no_object;
0606 }
0607
0608
0609
0610
0611
0612 tmp = vmw_fence_obj_reference(&ufence->fence);
0613
0614 ret = ttm_base_object_init(tfile, &ufence->base, false,
0615 VMW_RES_FENCE,
0616 &vmw_user_fence_base_release);
0617
0618
0619 if (unlikely(ret != 0)) {
0620
0621
0622
0623 vmw_fence_obj_unreference(&tmp);
0624 goto out_err;
0625 }
0626
0627 *p_fence = &ufence->fence;
0628 *p_handle = ufence->base.handle;
0629
0630 return 0;
0631 out_err:
0632 tmp = &ufence->fence;
0633 vmw_fence_obj_unreference(&tmp);
0634 out_no_object:
0635 return ret;
0636 }
0637
0638
0639
0640
0641
0642 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
0643 {
0644 struct list_head action_list;
0645 int ret;
0646
0647
0648
0649
0650
0651
0652 spin_lock(&fman->lock);
0653 fman->fifo_down = true;
0654 while (!list_empty(&fman->fence_list)) {
0655 struct vmw_fence_obj *fence =
0656 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
0657 head);
0658 dma_fence_get(&fence->base);
0659 spin_unlock(&fman->lock);
0660
0661 ret = vmw_fence_obj_wait(fence, false, false,
0662 VMW_FENCE_WAIT_TIMEOUT);
0663
0664 if (unlikely(ret != 0)) {
0665 list_del_init(&fence->head);
0666 dma_fence_signal(&fence->base);
0667 INIT_LIST_HEAD(&action_list);
0668 list_splice_init(&fence->seq_passed_actions,
0669 &action_list);
0670 vmw_fences_perform_actions(fman, &action_list);
0671 }
0672
0673 BUG_ON(!list_empty(&fence->head));
0674 dma_fence_put(&fence->base);
0675 spin_lock(&fman->lock);
0676 }
0677 spin_unlock(&fman->lock);
0678 }
0679
0680 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
0681 {
0682 spin_lock(&fman->lock);
0683 fman->fifo_down = false;
0684 spin_unlock(&fman->lock);
0685 }
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 static struct ttm_base_object *
0702 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
0703 {
0704 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
0705
0706 if (!base) {
0707 pr_err("Invalid fence object handle 0x%08lx.\n",
0708 (unsigned long)handle);
0709 return ERR_PTR(-EINVAL);
0710 }
0711
0712 if (base->refcount_release != vmw_user_fence_base_release) {
0713 pr_err("Invalid fence object handle 0x%08lx.\n",
0714 (unsigned long)handle);
0715 ttm_base_object_unref(&base);
0716 return ERR_PTR(-EINVAL);
0717 }
0718
0719 return base;
0720 }
0721
0722
0723 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
0724 struct drm_file *file_priv)
0725 {
0726 struct drm_vmw_fence_wait_arg *arg =
0727 (struct drm_vmw_fence_wait_arg *)data;
0728 unsigned long timeout;
0729 struct ttm_base_object *base;
0730 struct vmw_fence_obj *fence;
0731 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0732 int ret;
0733 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
0734
0735
0736
0737
0738
0739
0740 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
0741 (wait_timeout >> 26);
0742
0743 if (!arg->cookie_valid) {
0744 arg->cookie_valid = 1;
0745 arg->kernel_cookie = jiffies + wait_timeout;
0746 }
0747
0748 base = vmw_fence_obj_lookup(tfile, arg->handle);
0749 if (IS_ERR(base))
0750 return PTR_ERR(base);
0751
0752 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
0753
0754 timeout = jiffies;
0755 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
0756 ret = ((vmw_fence_obj_signaled(fence)) ?
0757 0 : -EBUSY);
0758 goto out;
0759 }
0760
0761 timeout = (unsigned long)arg->kernel_cookie - timeout;
0762
0763 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
0764
0765 out:
0766 ttm_base_object_unref(&base);
0767
0768
0769
0770
0771
0772 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
0773 return ttm_ref_object_base_unref(tfile, arg->handle);
0774 return ret;
0775 }
0776
0777 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
0778 struct drm_file *file_priv)
0779 {
0780 struct drm_vmw_fence_signaled_arg *arg =
0781 (struct drm_vmw_fence_signaled_arg *) data;
0782 struct ttm_base_object *base;
0783 struct vmw_fence_obj *fence;
0784 struct vmw_fence_manager *fman;
0785 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0786 struct vmw_private *dev_priv = vmw_priv(dev);
0787
0788 base = vmw_fence_obj_lookup(tfile, arg->handle);
0789 if (IS_ERR(base))
0790 return PTR_ERR(base);
0791
0792 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
0793 fman = fman_from_fence(fence);
0794
0795 arg->signaled = vmw_fence_obj_signaled(fence);
0796
0797 arg->signaled_flags = arg->flags;
0798 spin_lock(&fman->lock);
0799 arg->passed_seqno = dev_priv->last_read_seqno;
0800 spin_unlock(&fman->lock);
0801
0802 ttm_base_object_unref(&base);
0803
0804 return 0;
0805 }
0806
0807
0808 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
0809 struct drm_file *file_priv)
0810 {
0811 struct drm_vmw_fence_arg *arg =
0812 (struct drm_vmw_fence_arg *) data;
0813
0814 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
0815 arg->handle);
0816 }
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
0829 {
0830 struct vmw_event_fence_action *eaction =
0831 container_of(action, struct vmw_event_fence_action, action);
0832 struct drm_device *dev = eaction->dev;
0833 struct drm_pending_event *event = eaction->event;
0834
0835 if (unlikely(event == NULL))
0836 return;
0837
0838 spin_lock_irq(&dev->event_lock);
0839
0840 if (likely(eaction->tv_sec != NULL)) {
0841 struct timespec64 ts;
0842
0843 ktime_get_ts64(&ts);
0844
0845 *eaction->tv_sec = ts.tv_sec;
0846 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
0847 }
0848
0849 drm_send_event_locked(dev, eaction->event);
0850 eaction->event = NULL;
0851 spin_unlock_irq(&dev->event_lock);
0852 }
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
0864 {
0865 struct vmw_event_fence_action *eaction =
0866 container_of(action, struct vmw_event_fence_action, action);
0867
0868 vmw_fence_obj_unreference(&eaction->fence);
0869 kfree(eaction);
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
0883 struct vmw_fence_action *action)
0884 {
0885 struct vmw_fence_manager *fman = fman_from_fence(fence);
0886 bool run_update = false;
0887
0888 mutex_lock(&fman->goal_irq_mutex);
0889 spin_lock(&fman->lock);
0890
0891 fman->pending_actions[action->type]++;
0892 if (dma_fence_is_signaled_locked(&fence->base)) {
0893 struct list_head action_list;
0894
0895 INIT_LIST_HEAD(&action_list);
0896 list_add_tail(&action->head, &action_list);
0897 vmw_fences_perform_actions(fman, &action_list);
0898 } else {
0899 list_add_tail(&action->head, &fence->seq_passed_actions);
0900
0901
0902
0903
0904
0905 run_update = vmw_fence_goal_check_locked(fence);
0906 }
0907
0908 spin_unlock(&fman->lock);
0909
0910 if (run_update) {
0911 if (!fman->goal_irq_on) {
0912 fman->goal_irq_on = true;
0913 vmw_goal_waiter_add(fman->dev_priv);
0914 }
0915 vmw_fences_update(fman);
0916 }
0917 mutex_unlock(&fman->goal_irq_mutex);
0918
0919 }
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940 int vmw_event_fence_action_queue(struct drm_file *file_priv,
0941 struct vmw_fence_obj *fence,
0942 struct drm_pending_event *event,
0943 uint32_t *tv_sec,
0944 uint32_t *tv_usec,
0945 bool interruptible)
0946 {
0947 struct vmw_event_fence_action *eaction;
0948 struct vmw_fence_manager *fman = fman_from_fence(fence);
0949
0950 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
0951 if (unlikely(!eaction))
0952 return -ENOMEM;
0953
0954 eaction->event = event;
0955
0956 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
0957 eaction->action.cleanup = vmw_event_fence_action_cleanup;
0958 eaction->action.type = VMW_ACTION_EVENT;
0959
0960 eaction->fence = vmw_fence_obj_reference(fence);
0961 eaction->dev = &fman->dev_priv->drm;
0962 eaction->tv_sec = tv_sec;
0963 eaction->tv_usec = tv_usec;
0964
0965 vmw_fence_obj_add_action(fence, &eaction->action);
0966
0967 return 0;
0968 }
0969
0970 struct vmw_event_fence_pending {
0971 struct drm_pending_event base;
0972 struct drm_vmw_event_fence event;
0973 };
0974
0975 static int vmw_event_fence_action_create(struct drm_file *file_priv,
0976 struct vmw_fence_obj *fence,
0977 uint32_t flags,
0978 uint64_t user_data,
0979 bool interruptible)
0980 {
0981 struct vmw_event_fence_pending *event;
0982 struct vmw_fence_manager *fman = fman_from_fence(fence);
0983 struct drm_device *dev = &fman->dev_priv->drm;
0984 int ret;
0985
0986 event = kzalloc(sizeof(*event), GFP_KERNEL);
0987 if (unlikely(!event)) {
0988 DRM_ERROR("Failed to allocate an event.\n");
0989 ret = -ENOMEM;
0990 goto out_no_space;
0991 }
0992
0993 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
0994 event->event.base.length = sizeof(*event);
0995 event->event.user_data = user_data;
0996
0997 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
0998
0999 if (unlikely(ret != 0)) {
1000 DRM_ERROR("Failed to allocate event space for this file.\n");
1001 kfree(event);
1002 goto out_no_space;
1003 }
1004
1005 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1006 ret = vmw_event_fence_action_queue(file_priv, fence,
1007 &event->base,
1008 &event->event.tv_sec,
1009 &event->event.tv_usec,
1010 interruptible);
1011 else
1012 ret = vmw_event_fence_action_queue(file_priv, fence,
1013 &event->base,
1014 NULL,
1015 NULL,
1016 interruptible);
1017 if (ret != 0)
1018 goto out_no_queue;
1019
1020 return 0;
1021
1022 out_no_queue:
1023 drm_event_cancel_free(dev, &event->base);
1024 out_no_space:
1025 return ret;
1026 }
1027
1028 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1029 struct drm_file *file_priv)
1030 {
1031 struct vmw_private *dev_priv = vmw_priv(dev);
1032 struct drm_vmw_fence_event_arg *arg =
1033 (struct drm_vmw_fence_event_arg *) data;
1034 struct vmw_fence_obj *fence = NULL;
1035 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1036 struct ttm_object_file *tfile = vmw_fp->tfile;
1037 struct drm_vmw_fence_rep __user *user_fence_rep =
1038 (struct drm_vmw_fence_rep __user *)(unsigned long)
1039 arg->fence_rep;
1040 uint32_t handle;
1041 int ret;
1042
1043
1044
1045
1046
1047
1048 if (arg->handle) {
1049 struct ttm_base_object *base =
1050 vmw_fence_obj_lookup(tfile, arg->handle);
1051
1052 if (IS_ERR(base))
1053 return PTR_ERR(base);
1054
1055 fence = &(container_of(base, struct vmw_user_fence,
1056 base)->fence);
1057 (void) vmw_fence_obj_reference(fence);
1058
1059 if (user_fence_rep != NULL) {
1060 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1061 NULL, false);
1062 if (unlikely(ret != 0)) {
1063 DRM_ERROR("Failed to reference a fence "
1064 "object.\n");
1065 goto out_no_ref_obj;
1066 }
1067 handle = base->handle;
1068 }
1069 ttm_base_object_unref(&base);
1070 }
1071
1072
1073
1074
1075 if (!fence) {
1076 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1077 &fence,
1078 (user_fence_rep) ?
1079 &handle : NULL);
1080 if (unlikely(ret != 0)) {
1081 DRM_ERROR("Fence event failed to create fence.\n");
1082 return ret;
1083 }
1084 }
1085
1086 BUG_ON(fence == NULL);
1087
1088 ret = vmw_event_fence_action_create(file_priv, fence,
1089 arg->flags,
1090 arg->user_data,
1091 true);
1092 if (unlikely(ret != 0)) {
1093 if (ret != -ERESTARTSYS)
1094 DRM_ERROR("Failed to attach event to fence.\n");
1095 goto out_no_create;
1096 }
1097
1098 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1099 handle, -1);
1100 vmw_fence_obj_unreference(&fence);
1101 return 0;
1102 out_no_create:
1103 if (user_fence_rep != NULL)
1104 ttm_ref_object_base_unref(tfile, handle);
1105 out_no_ref_obj:
1106 vmw_fence_obj_unreference(&fence);
1107 return ret;
1108 }