Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2015 Red Hat, Inc.
0003  * All Rights Reserved.
0004  *
0005  * Permission is hereby granted, free of charge, to any person obtaining
0006  * a copy of this software and associated documentation files (the
0007  * "Software"), to deal in the Software without restriction, including
0008  * without limitation the rights to use, copy, modify, merge, publish,
0009  * distribute, sublicense, and/or sell copies of the Software, and to
0010  * permit persons to whom the Software is furnished to do so, subject to
0011  * the following conditions:
0012  *
0013  * The above copyright notice and this permission notice (including the
0014  * next paragraph) shall be included in all copies or substantial
0015  * portions of the Software.
0016  *
0017  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0018  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0019  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
0020  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
0021  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
0022  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
0023  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
0024  */
0025 
0026 #include <trace/events/dma_fence.h>
0027 
0028 #include "virtgpu_drv.h"
0029 
0030 #define to_virtio_gpu_fence(x) \
0031     container_of(x, struct virtio_gpu_fence, f)
0032 
0033 static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
0034 {
0035     return "virtio_gpu";
0036 }
0037 
0038 static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
0039 {
0040     return "controlq";
0041 }
0042 
0043 static bool virtio_gpu_fence_signaled(struct dma_fence *f)
0044 {
0045     /* leaked fence outside driver before completing
0046      * initialization with virtio_gpu_fence_emit.
0047      */
0048     WARN_ON_ONCE(f->seqno == 0);
0049     return false;
0050 }
0051 
0052 static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
0053 {
0054     snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
0055 }
0056 
0057 static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
0058                       int size)
0059 {
0060     struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
0061 
0062     snprintf(str, size, "%llu",
0063          (u64)atomic64_read(&fence->drv->last_fence_id));
0064 }
0065 
0066 static const struct dma_fence_ops virtio_gpu_fence_ops = {
0067     .get_driver_name     = virtio_gpu_get_driver_name,
0068     .get_timeline_name   = virtio_gpu_get_timeline_name,
0069     .signaled            = virtio_gpu_fence_signaled,
0070     .fence_value_str     = virtio_gpu_fence_value_str,
0071     .timeline_value_str  = virtio_gpu_timeline_value_str,
0072 };
0073 
0074 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
0075                         uint64_t base_fence_ctx,
0076                         uint32_t ring_idx)
0077 {
0078     uint64_t fence_context = base_fence_ctx + ring_idx;
0079     struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
0080     struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
0081                             GFP_KERNEL);
0082 
0083     if (!fence)
0084         return fence;
0085 
0086     fence->drv = drv;
0087     fence->ring_idx = ring_idx;
0088     fence->emit_fence_info = !(base_fence_ctx == drv->context);
0089 
0090     /* This only partially initializes the fence because the seqno is
0091      * unknown yet.  The fence must not be used outside of the driver
0092      * until virtio_gpu_fence_emit is called.
0093      */
0094 
0095     dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
0096                fence_context, 0);
0097 
0098     return fence;
0099 }
0100 
0101 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
0102               struct virtio_gpu_ctrl_hdr *cmd_hdr,
0103               struct virtio_gpu_fence *fence)
0104 {
0105     struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
0106     unsigned long irq_flags;
0107 
0108     spin_lock_irqsave(&drv->lock, irq_flags);
0109     fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
0110     dma_fence_get(&fence->f);
0111     list_add_tail(&fence->node, &drv->fences);
0112     spin_unlock_irqrestore(&drv->lock, irq_flags);
0113 
0114     trace_dma_fence_emit(&fence->f);
0115 
0116     cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
0117     cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
0118 
0119     /* Only currently defined fence param. */
0120     if (fence->emit_fence_info) {
0121         cmd_hdr->flags |=
0122             cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
0123         cmd_hdr->ring_idx = (u8)fence->ring_idx;
0124     }
0125 }
0126 
0127 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
0128                     u64 fence_id)
0129 {
0130     struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
0131     struct virtio_gpu_fence *signaled, *curr, *tmp;
0132     unsigned long irq_flags;
0133 
0134     spin_lock_irqsave(&drv->lock, irq_flags);
0135     atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
0136     list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
0137         if (fence_id != curr->fence_id)
0138             continue;
0139 
0140         signaled = curr;
0141 
0142         /*
0143          * Signal any fences with a strictly smaller sequence number
0144          * than the current signaled fence.
0145          */
0146         list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
0147             /* dma-fence contexts must match */
0148             if (signaled->f.context != curr->f.context)
0149                 continue;
0150 
0151             if (!dma_fence_is_later(&signaled->f, &curr->f))
0152                 continue;
0153 
0154             dma_fence_signal_locked(&curr->f);
0155             if (curr->e) {
0156                 drm_send_event(vgdev->ddev, &curr->e->base);
0157                 curr->e = NULL;
0158             }
0159 
0160             list_del(&curr->node);
0161             dma_fence_put(&curr->f);
0162         }
0163 
0164         dma_fence_signal_locked(&signaled->f);
0165         if (signaled->e) {
0166             drm_send_event(vgdev->ddev, &signaled->e->base);
0167             signaled->e = NULL;
0168         }
0169 
0170         list_del(&signaled->node);
0171         dma_fence_put(&signaled->f);
0172         break;
0173     }
0174     spin_unlock_irqrestore(&drv->lock, irq_flags);
0175 }