0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <trace/events/dma_fence.h>
0027
0028 #include "virtgpu_drv.h"
0029
0030 #define to_virtio_gpu_fence(x) \
0031 container_of(x, struct virtio_gpu_fence, f)
0032
0033 static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
0034 {
0035 return "virtio_gpu";
0036 }
0037
0038 static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
0039 {
0040 return "controlq";
0041 }
0042
0043 static bool virtio_gpu_fence_signaled(struct dma_fence *f)
0044 {
0045
0046
0047
0048 WARN_ON_ONCE(f->seqno == 0);
0049 return false;
0050 }
0051
0052 static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
0053 {
0054 snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
0055 }
0056
0057 static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
0058 int size)
0059 {
0060 struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
0061
0062 snprintf(str, size, "%llu",
0063 (u64)atomic64_read(&fence->drv->last_fence_id));
0064 }
0065
0066 static const struct dma_fence_ops virtio_gpu_fence_ops = {
0067 .get_driver_name = virtio_gpu_get_driver_name,
0068 .get_timeline_name = virtio_gpu_get_timeline_name,
0069 .signaled = virtio_gpu_fence_signaled,
0070 .fence_value_str = virtio_gpu_fence_value_str,
0071 .timeline_value_str = virtio_gpu_timeline_value_str,
0072 };
0073
0074 struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
0075 uint64_t base_fence_ctx,
0076 uint32_t ring_idx)
0077 {
0078 uint64_t fence_context = base_fence_ctx + ring_idx;
0079 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
0080 struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
0081 GFP_KERNEL);
0082
0083 if (!fence)
0084 return fence;
0085
0086 fence->drv = drv;
0087 fence->ring_idx = ring_idx;
0088 fence->emit_fence_info = !(base_fence_ctx == drv->context);
0089
0090
0091
0092
0093
0094
0095 dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
0096 fence_context, 0);
0097
0098 return fence;
0099 }
0100
0101 void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
0102 struct virtio_gpu_ctrl_hdr *cmd_hdr,
0103 struct virtio_gpu_fence *fence)
0104 {
0105 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
0106 unsigned long irq_flags;
0107
0108 spin_lock_irqsave(&drv->lock, irq_flags);
0109 fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
0110 dma_fence_get(&fence->f);
0111 list_add_tail(&fence->node, &drv->fences);
0112 spin_unlock_irqrestore(&drv->lock, irq_flags);
0113
0114 trace_dma_fence_emit(&fence->f);
0115
0116 cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
0117 cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
0118
0119
0120 if (fence->emit_fence_info) {
0121 cmd_hdr->flags |=
0122 cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
0123 cmd_hdr->ring_idx = (u8)fence->ring_idx;
0124 }
0125 }
0126
0127 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
0128 u64 fence_id)
0129 {
0130 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
0131 struct virtio_gpu_fence *signaled, *curr, *tmp;
0132 unsigned long irq_flags;
0133
0134 spin_lock_irqsave(&drv->lock, irq_flags);
0135 atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
0136 list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
0137 if (fence_id != curr->fence_id)
0138 continue;
0139
0140 signaled = curr;
0141
0142
0143
0144
0145
0146 list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
0147
0148 if (signaled->f.context != curr->f.context)
0149 continue;
0150
0151 if (!dma_fence_is_later(&signaled->f, &curr->f))
0152 continue;
0153
0154 dma_fence_signal_locked(&curr->f);
0155 if (curr->e) {
0156 drm_send_event(vgdev->ddev, &curr->e->base);
0157 curr->e = NULL;
0158 }
0159
0160 list_del(&curr->node);
0161 dma_fence_put(&curr->f);
0162 }
0163
0164 dma_fence_signal_locked(&signaled->f);
0165 if (signaled->e) {
0166 drm_send_event(vgdev->ddev, &signaled->e->base);
0167 signaled->e = NULL;
0168 }
0169
0170 list_del(&signaled->node);
0171 dma_fence_put(&signaled->f);
0172 break;
0173 }
0174 spin_unlock_irqrestore(&drv->lock, irq_flags);
0175 }