0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/dma-mapping.h>
0030 #include <linux/virtio.h>
0031 #include <linux/virtio_config.h>
0032 #include <linux/virtio_ring.h>
0033
0034 #include <drm/drm_edid.h>
0035
0036 #include "virtgpu_drv.h"
0037 #include "virtgpu_trace.h"
0038
0039 #define MAX_INLINE_CMD_SIZE 96
0040 #define MAX_INLINE_RESP_SIZE 24
0041 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
0042 + MAX_INLINE_CMD_SIZE \
0043 + MAX_INLINE_RESP_SIZE)
0044
0045 static void convert_to_hw_box(struct virtio_gpu_box *dst,
0046 const struct drm_virtgpu_3d_box *src)
0047 {
0048 dst->x = cpu_to_le32(src->x);
0049 dst->y = cpu_to_le32(src->y);
0050 dst->z = cpu_to_le32(src->z);
0051 dst->w = cpu_to_le32(src->w);
0052 dst->h = cpu_to_le32(src->h);
0053 dst->d = cpu_to_le32(src->d);
0054 }
0055
0056 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
0057 {
0058 struct drm_device *dev = vq->vdev->priv;
0059 struct virtio_gpu_device *vgdev = dev->dev_private;
0060
0061 schedule_work(&vgdev->ctrlq.dequeue_work);
0062 }
0063
0064 void virtio_gpu_cursor_ack(struct virtqueue *vq)
0065 {
0066 struct drm_device *dev = vq->vdev->priv;
0067 struct virtio_gpu_device *vgdev = dev->dev_private;
0068
0069 schedule_work(&vgdev->cursorq.dequeue_work);
0070 }
0071
0072 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
0073 {
0074 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
0075 VBUFFER_SIZE,
0076 __alignof__(struct virtio_gpu_vbuffer),
0077 0, NULL);
0078 if (!vgdev->vbufs)
0079 return -ENOMEM;
0080 return 0;
0081 }
0082
0083 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
0084 {
0085 kmem_cache_destroy(vgdev->vbufs);
0086 vgdev->vbufs = NULL;
0087 }
0088
0089 static struct virtio_gpu_vbuffer*
0090 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
0091 int size, int resp_size, void *resp_buf,
0092 virtio_gpu_resp_cb resp_cb)
0093 {
0094 struct virtio_gpu_vbuffer *vbuf;
0095
0096 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
0097
0098 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
0099 size < sizeof(struct virtio_gpu_ctrl_hdr));
0100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
0101 vbuf->size = size;
0102
0103 vbuf->resp_cb = resp_cb;
0104 vbuf->resp_size = resp_size;
0105 if (resp_size <= MAX_INLINE_RESP_SIZE)
0106 vbuf->resp_buf = (void *)vbuf->buf + size;
0107 else
0108 vbuf->resp_buf = resp_buf;
0109 BUG_ON(!vbuf->resp_buf);
0110 return vbuf;
0111 }
0112
0113 static struct virtio_gpu_ctrl_hdr *
0114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
0115 {
0116
0117
0118
0119
0120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
0121 }
0122
0123 static struct virtio_gpu_update_cursor*
0124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
0125 struct virtio_gpu_vbuffer **vbuffer_p)
0126 {
0127 struct virtio_gpu_vbuffer *vbuf;
0128
0129 vbuf = virtio_gpu_get_vbuf
0130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
0131 0, NULL, NULL);
0132 if (IS_ERR(vbuf)) {
0133 *vbuffer_p = NULL;
0134 return ERR_CAST(vbuf);
0135 }
0136 *vbuffer_p = vbuf;
0137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
0138 }
0139
0140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
0141 virtio_gpu_resp_cb cb,
0142 struct virtio_gpu_vbuffer **vbuffer_p,
0143 int cmd_size, int resp_size,
0144 void *resp_buf)
0145 {
0146 struct virtio_gpu_vbuffer *vbuf;
0147
0148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
0149 resp_size, resp_buf, cb);
0150 *vbuffer_p = vbuf;
0151 return (struct virtio_gpu_command *)vbuf->buf;
0152 }
0153
0154 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
0155 struct virtio_gpu_vbuffer **vbuffer_p,
0156 int size)
0157 {
0158 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
0159 sizeof(struct virtio_gpu_ctrl_hdr),
0160 NULL);
0161 }
0162
0163 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
0164 struct virtio_gpu_vbuffer **vbuffer_p,
0165 int size,
0166 virtio_gpu_resp_cb cb)
0167 {
0168 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
0169 sizeof(struct virtio_gpu_ctrl_hdr),
0170 NULL);
0171 }
0172
0173 static void free_vbuf(struct virtio_gpu_device *vgdev,
0174 struct virtio_gpu_vbuffer *vbuf)
0175 {
0176 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
0177 kfree(vbuf->resp_buf);
0178 kvfree(vbuf->data_buf);
0179 kmem_cache_free(vgdev->vbufs, vbuf);
0180 }
0181
0182 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
0183 {
0184 struct virtio_gpu_vbuffer *vbuf;
0185 unsigned int len;
0186 int freed = 0;
0187
0188 while ((vbuf = virtqueue_get_buf(vq, &len))) {
0189 list_add_tail(&vbuf->list, reclaim_list);
0190 freed++;
0191 }
0192 if (freed == 0)
0193 DRM_DEBUG("Huh? zero vbufs reclaimed");
0194 }
0195
0196 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
0197 {
0198 struct virtio_gpu_device *vgdev =
0199 container_of(work, struct virtio_gpu_device,
0200 ctrlq.dequeue_work);
0201 struct list_head reclaim_list;
0202 struct virtio_gpu_vbuffer *entry, *tmp;
0203 struct virtio_gpu_ctrl_hdr *resp;
0204 u64 fence_id;
0205
0206 INIT_LIST_HEAD(&reclaim_list);
0207 spin_lock(&vgdev->ctrlq.qlock);
0208 do {
0209 virtqueue_disable_cb(vgdev->ctrlq.vq);
0210 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
0211
0212 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
0213 spin_unlock(&vgdev->ctrlq.qlock);
0214
0215 list_for_each_entry(entry, &reclaim_list, list) {
0216 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
0217
0218 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
0219
0220 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
0221 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
0222 struct virtio_gpu_ctrl_hdr *cmd;
0223 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
0224 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
0225 le32_to_cpu(resp->type),
0226 le32_to_cpu(cmd->type));
0227 } else
0228 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
0229 }
0230 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
0231 fence_id = le64_to_cpu(resp->fence_id);
0232 virtio_gpu_fence_event_process(vgdev, fence_id);
0233 }
0234 if (entry->resp_cb)
0235 entry->resp_cb(vgdev, entry);
0236 }
0237 wake_up(&vgdev->ctrlq.ack_queue);
0238
0239 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
0240 if (entry->objs)
0241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
0242 list_del(&entry->list);
0243 free_vbuf(vgdev, entry);
0244 }
0245 }
0246
0247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
0248 {
0249 struct virtio_gpu_device *vgdev =
0250 container_of(work, struct virtio_gpu_device,
0251 cursorq.dequeue_work);
0252 struct list_head reclaim_list;
0253 struct virtio_gpu_vbuffer *entry, *tmp;
0254
0255 INIT_LIST_HEAD(&reclaim_list);
0256 spin_lock(&vgdev->cursorq.qlock);
0257 do {
0258 virtqueue_disable_cb(vgdev->cursorq.vq);
0259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
0260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
0261 spin_unlock(&vgdev->cursorq.qlock);
0262
0263 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
0264 list_del(&entry->list);
0265 free_vbuf(vgdev, entry);
0266 }
0267 wake_up(&vgdev->cursorq.ack_queue);
0268 }
0269
0270
0271 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
0272 {
0273 int ret, s, i;
0274 struct sg_table *sgt;
0275 struct scatterlist *sg;
0276 struct page *pg;
0277
0278 if (WARN_ON(!PAGE_ALIGNED(data)))
0279 return NULL;
0280
0281 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
0282 if (!sgt)
0283 return NULL;
0284
0285 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
0286 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
0287 if (ret) {
0288 kfree(sgt);
0289 return NULL;
0290 }
0291
0292 for_each_sgtable_sg(sgt, sg, i) {
0293 pg = vmalloc_to_page(data);
0294 if (!pg) {
0295 sg_free_table(sgt);
0296 kfree(sgt);
0297 return NULL;
0298 }
0299
0300 s = min_t(int, PAGE_SIZE, size);
0301 sg_set_page(sg, pg, s, 0);
0302
0303 size -= s;
0304 data += s;
0305 }
0306
0307 return sgt;
0308 }
0309
0310 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
0311 struct virtio_gpu_vbuffer *vbuf,
0312 struct virtio_gpu_fence *fence,
0313 int elemcnt,
0314 struct scatterlist **sgs,
0315 int outcnt,
0316 int incnt)
0317 {
0318 struct virtqueue *vq = vgdev->ctrlq.vq;
0319 int ret, idx;
0320
0321 if (!drm_dev_enter(vgdev->ddev, &idx)) {
0322 if (fence && vbuf->objs)
0323 virtio_gpu_array_unlock_resv(vbuf->objs);
0324 free_vbuf(vgdev, vbuf);
0325 return -1;
0326 }
0327
0328 if (vgdev->has_indirect)
0329 elemcnt = 1;
0330
0331 again:
0332 spin_lock(&vgdev->ctrlq.qlock);
0333
0334 if (vq->num_free < elemcnt) {
0335 spin_unlock(&vgdev->ctrlq.qlock);
0336 virtio_gpu_notify(vgdev);
0337 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
0338 goto again;
0339 }
0340
0341
0342
0343
0344 if (fence) {
0345 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
0346 fence);
0347 if (vbuf->objs) {
0348 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
0349 virtio_gpu_array_unlock_resv(vbuf->objs);
0350 }
0351 }
0352
0353 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
0354 WARN_ON(ret);
0355
0356 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
0357
0358 atomic_inc(&vgdev->pending_commands);
0359
0360 spin_unlock(&vgdev->ctrlq.qlock);
0361
0362 drm_dev_exit(idx);
0363 return 0;
0364 }
0365
0366 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
0367 struct virtio_gpu_vbuffer *vbuf,
0368 struct virtio_gpu_fence *fence)
0369 {
0370 struct scatterlist *sgs[3], vcmd, vout, vresp;
0371 struct sg_table *sgt = NULL;
0372 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
0373
0374
0375 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
0376 elemcnt++;
0377 sgs[outcnt] = &vcmd;
0378 outcnt++;
0379
0380
0381 if (vbuf->data_size) {
0382 if (is_vmalloc_addr(vbuf->data_buf)) {
0383 int sg_ents;
0384 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
0385 &sg_ents);
0386 if (!sgt) {
0387 if (fence && vbuf->objs)
0388 virtio_gpu_array_unlock_resv(vbuf->objs);
0389 return -1;
0390 }
0391
0392 elemcnt += sg_ents;
0393 sgs[outcnt] = sgt->sgl;
0394 } else {
0395 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
0396 elemcnt++;
0397 sgs[outcnt] = &vout;
0398 }
0399 outcnt++;
0400 }
0401
0402
0403 if (vbuf->resp_size) {
0404 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
0405 elemcnt++;
0406 sgs[outcnt + incnt] = &vresp;
0407 incnt++;
0408 }
0409
0410 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
0411 incnt);
0412
0413 if (sgt) {
0414 sg_free_table(sgt);
0415 kfree(sgt);
0416 }
0417 return ret;
0418 }
0419
0420 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
0421 {
0422 bool notify;
0423
0424 if (!atomic_read(&vgdev->pending_commands))
0425 return;
0426
0427 spin_lock(&vgdev->ctrlq.qlock);
0428 atomic_set(&vgdev->pending_commands, 0);
0429 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
0430 spin_unlock(&vgdev->ctrlq.qlock);
0431
0432 if (notify)
0433 virtqueue_notify(vgdev->ctrlq.vq);
0434 }
0435
0436 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
0437 struct virtio_gpu_vbuffer *vbuf)
0438 {
0439 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
0440 }
0441
0442 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
0443 struct virtio_gpu_vbuffer *vbuf)
0444 {
0445 struct virtqueue *vq = vgdev->cursorq.vq;
0446 struct scatterlist *sgs[1], ccmd;
0447 int idx, ret, outcnt;
0448 bool notify;
0449
0450 if (!drm_dev_enter(vgdev->ddev, &idx)) {
0451 free_vbuf(vgdev, vbuf);
0452 return;
0453 }
0454
0455 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
0456 sgs[0] = &ccmd;
0457 outcnt = 1;
0458
0459 spin_lock(&vgdev->cursorq.qlock);
0460 retry:
0461 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
0462 if (ret == -ENOSPC) {
0463 spin_unlock(&vgdev->cursorq.qlock);
0464 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
0465 spin_lock(&vgdev->cursorq.qlock);
0466 goto retry;
0467 } else {
0468 trace_virtio_gpu_cmd_queue(vq,
0469 virtio_gpu_vbuf_ctrl_hdr(vbuf));
0470
0471 notify = virtqueue_kick_prepare(vq);
0472 }
0473
0474 spin_unlock(&vgdev->cursorq.qlock);
0475
0476 if (notify)
0477 virtqueue_notify(vq);
0478
0479 drm_dev_exit(idx);
0480 }
0481
0482
0483
0484
0485
0486
0487 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
0488 struct virtio_gpu_object *bo,
0489 struct virtio_gpu_object_params *params,
0490 struct virtio_gpu_object_array *objs,
0491 struct virtio_gpu_fence *fence)
0492 {
0493 struct virtio_gpu_resource_create_2d *cmd_p;
0494 struct virtio_gpu_vbuffer *vbuf;
0495
0496 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0497 memset(cmd_p, 0, sizeof(*cmd_p));
0498 vbuf->objs = objs;
0499
0500 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
0501 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
0502 cmd_p->format = cpu_to_le32(params->format);
0503 cmd_p->width = cpu_to_le32(params->width);
0504 cmd_p->height = cpu_to_le32(params->height);
0505
0506 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
0507 bo->created = true;
0508 }
0509
0510 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
0511 struct virtio_gpu_vbuffer *vbuf)
0512 {
0513 struct virtio_gpu_object *bo;
0514
0515 bo = vbuf->resp_cb_data;
0516 vbuf->resp_cb_data = NULL;
0517
0518 virtio_gpu_cleanup_object(bo);
0519 }
0520
0521 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
0522 struct virtio_gpu_object *bo)
0523 {
0524 struct virtio_gpu_resource_unref *cmd_p;
0525 struct virtio_gpu_vbuffer *vbuf;
0526 int ret;
0527
0528 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
0529 virtio_gpu_cmd_unref_cb);
0530 memset(cmd_p, 0, sizeof(*cmd_p));
0531
0532 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
0533 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
0534
0535 vbuf->resp_cb_data = bo;
0536 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0537 if (ret < 0)
0538 virtio_gpu_cleanup_object(bo);
0539 }
0540
0541 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
0542 uint32_t scanout_id, uint32_t resource_id,
0543 uint32_t width, uint32_t height,
0544 uint32_t x, uint32_t y)
0545 {
0546 struct virtio_gpu_set_scanout *cmd_p;
0547 struct virtio_gpu_vbuffer *vbuf;
0548
0549 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0550 memset(cmd_p, 0, sizeof(*cmd_p));
0551
0552 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
0553 cmd_p->resource_id = cpu_to_le32(resource_id);
0554 cmd_p->scanout_id = cpu_to_le32(scanout_id);
0555 cmd_p->r.width = cpu_to_le32(width);
0556 cmd_p->r.height = cpu_to_le32(height);
0557 cmd_p->r.x = cpu_to_le32(x);
0558 cmd_p->r.y = cpu_to_le32(y);
0559
0560 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0561 }
0562
0563 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
0564 uint32_t resource_id,
0565 uint32_t x, uint32_t y,
0566 uint32_t width, uint32_t height,
0567 struct virtio_gpu_object_array *objs,
0568 struct virtio_gpu_fence *fence)
0569 {
0570 struct virtio_gpu_resource_flush *cmd_p;
0571 struct virtio_gpu_vbuffer *vbuf;
0572
0573 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0574 memset(cmd_p, 0, sizeof(*cmd_p));
0575 vbuf->objs = objs;
0576
0577 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
0578 cmd_p->resource_id = cpu_to_le32(resource_id);
0579 cmd_p->r.width = cpu_to_le32(width);
0580 cmd_p->r.height = cpu_to_le32(height);
0581 cmd_p->r.x = cpu_to_le32(x);
0582 cmd_p->r.y = cpu_to_le32(y);
0583
0584 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
0585 }
0586
0587 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
0588 uint64_t offset,
0589 uint32_t width, uint32_t height,
0590 uint32_t x, uint32_t y,
0591 struct virtio_gpu_object_array *objs,
0592 struct virtio_gpu_fence *fence)
0593 {
0594 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
0595 struct virtio_gpu_transfer_to_host_2d *cmd_p;
0596 struct virtio_gpu_vbuffer *vbuf;
0597 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
0598 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
0599
0600 if (use_dma_api)
0601 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
0602 shmem->pages, DMA_TO_DEVICE);
0603
0604 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0605 memset(cmd_p, 0, sizeof(*cmd_p));
0606 vbuf->objs = objs;
0607
0608 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
0609 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
0610 cmd_p->offset = cpu_to_le64(offset);
0611 cmd_p->r.width = cpu_to_le32(width);
0612 cmd_p->r.height = cpu_to_le32(height);
0613 cmd_p->r.x = cpu_to_le32(x);
0614 cmd_p->r.y = cpu_to_le32(y);
0615
0616 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
0617 }
0618
0619 static void
0620 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
0621 uint32_t resource_id,
0622 struct virtio_gpu_mem_entry *ents,
0623 uint32_t nents,
0624 struct virtio_gpu_fence *fence)
0625 {
0626 struct virtio_gpu_resource_attach_backing *cmd_p;
0627 struct virtio_gpu_vbuffer *vbuf;
0628
0629 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0630 memset(cmd_p, 0, sizeof(*cmd_p));
0631
0632 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
0633 cmd_p->resource_id = cpu_to_le32(resource_id);
0634 cmd_p->nr_entries = cpu_to_le32(nents);
0635
0636 vbuf->data_buf = ents;
0637 vbuf->data_size = sizeof(*ents) * nents;
0638
0639 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
0640 }
0641
0642 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
0643 struct virtio_gpu_vbuffer *vbuf)
0644 {
0645 struct virtio_gpu_resp_display_info *resp =
0646 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
0647 int i;
0648
0649 spin_lock(&vgdev->display_info_lock);
0650 for (i = 0; i < vgdev->num_scanouts; i++) {
0651 vgdev->outputs[i].info = resp->pmodes[i];
0652 if (resp->pmodes[i].enabled) {
0653 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
0654 le32_to_cpu(resp->pmodes[i].r.width),
0655 le32_to_cpu(resp->pmodes[i].r.height),
0656 le32_to_cpu(resp->pmodes[i].r.x),
0657 le32_to_cpu(resp->pmodes[i].r.y));
0658 } else {
0659 DRM_DEBUG("output %d: disabled", i);
0660 }
0661 }
0662
0663 vgdev->display_info_pending = false;
0664 spin_unlock(&vgdev->display_info_lock);
0665 wake_up(&vgdev->resp_wq);
0666
0667 if (!drm_helper_hpd_irq_event(vgdev->ddev))
0668 drm_kms_helper_hotplug_event(vgdev->ddev);
0669 }
0670
0671 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
0672 struct virtio_gpu_vbuffer *vbuf)
0673 {
0674 struct virtio_gpu_get_capset_info *cmd =
0675 (struct virtio_gpu_get_capset_info *)vbuf->buf;
0676 struct virtio_gpu_resp_capset_info *resp =
0677 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
0678 int i = le32_to_cpu(cmd->capset_index);
0679
0680 spin_lock(&vgdev->display_info_lock);
0681 if (vgdev->capsets) {
0682 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
0683 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
0684 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
0685 } else {
0686 DRM_ERROR("invalid capset memory.");
0687 }
0688 spin_unlock(&vgdev->display_info_lock);
0689 wake_up(&vgdev->resp_wq);
0690 }
0691
0692 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
0693 struct virtio_gpu_vbuffer *vbuf)
0694 {
0695 struct virtio_gpu_get_capset *cmd =
0696 (struct virtio_gpu_get_capset *)vbuf->buf;
0697 struct virtio_gpu_resp_capset *resp =
0698 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
0699 struct virtio_gpu_drv_cap_cache *cache_ent;
0700
0701 spin_lock(&vgdev->display_info_lock);
0702 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
0703 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
0704 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
0705 memcpy(cache_ent->caps_cache, resp->capset_data,
0706 cache_ent->size);
0707
0708 smp_wmb();
0709 atomic_set(&cache_ent->is_valid, 1);
0710 break;
0711 }
0712 }
0713 spin_unlock(&vgdev->display_info_lock);
0714 wake_up_all(&vgdev->resp_wq);
0715 }
0716
0717 static int virtio_get_edid_block(void *data, u8 *buf,
0718 unsigned int block, size_t len)
0719 {
0720 struct virtio_gpu_resp_edid *resp = data;
0721 size_t start = block * EDID_LENGTH;
0722
0723 if (start + len > le32_to_cpu(resp->size))
0724 return -1;
0725 memcpy(buf, resp->edid + start, len);
0726 return 0;
0727 }
0728
0729 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
0730 struct virtio_gpu_vbuffer *vbuf)
0731 {
0732 struct virtio_gpu_cmd_get_edid *cmd =
0733 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
0734 struct virtio_gpu_resp_edid *resp =
0735 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
0736 uint32_t scanout = le32_to_cpu(cmd->scanout);
0737 struct virtio_gpu_output *output;
0738 struct edid *new_edid, *old_edid;
0739
0740 if (scanout >= vgdev->num_scanouts)
0741 return;
0742 output = vgdev->outputs + scanout;
0743
0744 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
0745 drm_connector_update_edid_property(&output->conn, new_edid);
0746
0747 spin_lock(&vgdev->display_info_lock);
0748 old_edid = output->edid;
0749 output->edid = new_edid;
0750 spin_unlock(&vgdev->display_info_lock);
0751
0752 kfree(old_edid);
0753 wake_up(&vgdev->resp_wq);
0754 }
0755
0756 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
0757 {
0758 struct virtio_gpu_ctrl_hdr *cmd_p;
0759 struct virtio_gpu_vbuffer *vbuf;
0760 void *resp_buf;
0761
0762 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
0763 GFP_KERNEL);
0764 if (!resp_buf)
0765 return -ENOMEM;
0766
0767 cmd_p = virtio_gpu_alloc_cmd_resp
0768 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
0769 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
0770 resp_buf);
0771 memset(cmd_p, 0, sizeof(*cmd_p));
0772
0773 vgdev->display_info_pending = true;
0774 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
0775 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0776 return 0;
0777 }
0778
0779 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
0780 {
0781 struct virtio_gpu_get_capset_info *cmd_p;
0782 struct virtio_gpu_vbuffer *vbuf;
0783 void *resp_buf;
0784
0785 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
0786 GFP_KERNEL);
0787 if (!resp_buf)
0788 return -ENOMEM;
0789
0790 cmd_p = virtio_gpu_alloc_cmd_resp
0791 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
0792 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
0793 resp_buf);
0794 memset(cmd_p, 0, sizeof(*cmd_p));
0795
0796 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
0797 cmd_p->capset_index = cpu_to_le32(idx);
0798 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0799 return 0;
0800 }
0801
0802 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
0803 int idx, int version,
0804 struct virtio_gpu_drv_cap_cache **cache_p)
0805 {
0806 struct virtio_gpu_get_capset *cmd_p;
0807 struct virtio_gpu_vbuffer *vbuf;
0808 int max_size;
0809 struct virtio_gpu_drv_cap_cache *cache_ent;
0810 struct virtio_gpu_drv_cap_cache *search_ent;
0811 void *resp_buf;
0812
0813 *cache_p = NULL;
0814
0815 if (idx >= vgdev->num_capsets)
0816 return -EINVAL;
0817
0818 if (version > vgdev->capsets[idx].max_version)
0819 return -EINVAL;
0820
0821 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
0822 if (!cache_ent)
0823 return -ENOMEM;
0824
0825 max_size = vgdev->capsets[idx].max_size;
0826 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
0827 if (!cache_ent->caps_cache) {
0828 kfree(cache_ent);
0829 return -ENOMEM;
0830 }
0831
0832 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
0833 GFP_KERNEL);
0834 if (!resp_buf) {
0835 kfree(cache_ent->caps_cache);
0836 kfree(cache_ent);
0837 return -ENOMEM;
0838 }
0839
0840 cache_ent->version = version;
0841 cache_ent->id = vgdev->capsets[idx].id;
0842 atomic_set(&cache_ent->is_valid, 0);
0843 cache_ent->size = max_size;
0844 spin_lock(&vgdev->display_info_lock);
0845
0846 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
0847 if (search_ent->id == vgdev->capsets[idx].id &&
0848 search_ent->version == version) {
0849 *cache_p = search_ent;
0850 break;
0851 }
0852 }
0853 if (!*cache_p)
0854 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
0855 spin_unlock(&vgdev->display_info_lock);
0856
0857 if (*cache_p) {
0858
0859 kfree(resp_buf);
0860 kfree(cache_ent->caps_cache);
0861 kfree(cache_ent);
0862 return 0;
0863 }
0864
0865 cmd_p = virtio_gpu_alloc_cmd_resp
0866 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
0867 sizeof(struct virtio_gpu_resp_capset) + max_size,
0868 resp_buf);
0869 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
0870 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
0871 cmd_p->capset_version = cpu_to_le32(version);
0872 *cache_p = cache_ent;
0873 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0874
0875 return 0;
0876 }
0877
0878 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
0879 {
0880 struct virtio_gpu_cmd_get_edid *cmd_p;
0881 struct virtio_gpu_vbuffer *vbuf;
0882 void *resp_buf;
0883 int scanout;
0884
0885 if (WARN_ON(!vgdev->has_edid))
0886 return -EINVAL;
0887
0888 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
0889 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
0890 GFP_KERNEL);
0891 if (!resp_buf)
0892 return -ENOMEM;
0893
0894 cmd_p = virtio_gpu_alloc_cmd_resp
0895 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
0896 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
0897 resp_buf);
0898 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
0899 cmd_p->scanout = cpu_to_le32(scanout);
0900 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0901 }
0902
0903 return 0;
0904 }
0905
0906 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
0907 uint32_t context_init, uint32_t nlen,
0908 const char *name)
0909 {
0910 struct virtio_gpu_ctx_create *cmd_p;
0911 struct virtio_gpu_vbuffer *vbuf;
0912
0913 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0914 memset(cmd_p, 0, sizeof(*cmd_p));
0915
0916 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
0917 cmd_p->hdr.ctx_id = cpu_to_le32(id);
0918 cmd_p->nlen = cpu_to_le32(nlen);
0919 cmd_p->context_init = cpu_to_le32(context_init);
0920 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
0921 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
0922 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0923 }
0924
0925 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
0926 uint32_t id)
0927 {
0928 struct virtio_gpu_ctx_destroy *cmd_p;
0929 struct virtio_gpu_vbuffer *vbuf;
0930
0931 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0932 memset(cmd_p, 0, sizeof(*cmd_p));
0933
0934 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
0935 cmd_p->hdr.ctx_id = cpu_to_le32(id);
0936 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0937 }
0938
0939 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
0940 uint32_t ctx_id,
0941 struct virtio_gpu_object_array *objs)
0942 {
0943 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
0944 struct virtio_gpu_ctx_resource *cmd_p;
0945 struct virtio_gpu_vbuffer *vbuf;
0946
0947 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0948 memset(cmd_p, 0, sizeof(*cmd_p));
0949 vbuf->objs = objs;
0950
0951 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
0952 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
0953 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
0954 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0955 }
0956
0957 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
0958 uint32_t ctx_id,
0959 struct virtio_gpu_object_array *objs)
0960 {
0961 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
0962 struct virtio_gpu_ctx_resource *cmd_p;
0963 struct virtio_gpu_vbuffer *vbuf;
0964
0965 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0966 memset(cmd_p, 0, sizeof(*cmd_p));
0967 vbuf->objs = objs;
0968
0969 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
0970 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
0971 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
0972 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
0973 }
0974
0975 void
0976 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
0977 struct virtio_gpu_object *bo,
0978 struct virtio_gpu_object_params *params,
0979 struct virtio_gpu_object_array *objs,
0980 struct virtio_gpu_fence *fence)
0981 {
0982 struct virtio_gpu_resource_create_3d *cmd_p;
0983 struct virtio_gpu_vbuffer *vbuf;
0984
0985 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
0986 memset(cmd_p, 0, sizeof(*cmd_p));
0987 vbuf->objs = objs;
0988
0989 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
0990 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
0991 cmd_p->format = cpu_to_le32(params->format);
0992 cmd_p->width = cpu_to_le32(params->width);
0993 cmd_p->height = cpu_to_le32(params->height);
0994
0995 cmd_p->target = cpu_to_le32(params->target);
0996 cmd_p->bind = cpu_to_le32(params->bind);
0997 cmd_p->depth = cpu_to_le32(params->depth);
0998 cmd_p->array_size = cpu_to_le32(params->array_size);
0999 cmd_p->last_level = cpu_to_le32(params->last_level);
1000 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1001 cmd_p->flags = cpu_to_le32(params->flags);
1002
1003 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1004
1005 bo->created = true;
1006 }
1007
1008 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1009 uint32_t ctx_id,
1010 uint64_t offset, uint32_t level,
1011 uint32_t stride,
1012 uint32_t layer_stride,
1013 struct drm_virtgpu_3d_box *box,
1014 struct virtio_gpu_object_array *objs,
1015 struct virtio_gpu_fence *fence)
1016 {
1017 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1018 struct virtio_gpu_transfer_host_3d *cmd_p;
1019 struct virtio_gpu_vbuffer *vbuf;
1020 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1021
1022 if (virtio_gpu_is_shmem(bo) && use_dma_api) {
1023 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
1024 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1025 shmem->pages, DMA_TO_DEVICE);
1026 }
1027
1028 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1029 memset(cmd_p, 0, sizeof(*cmd_p));
1030
1031 vbuf->objs = objs;
1032
1033 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1034 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1035 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1036 convert_to_hw_box(&cmd_p->box, box);
1037 cmd_p->offset = cpu_to_le64(offset);
1038 cmd_p->level = cpu_to_le32(level);
1039 cmd_p->stride = cpu_to_le32(stride);
1040 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1041
1042 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1043 }
1044
1045 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1046 uint32_t ctx_id,
1047 uint64_t offset, uint32_t level,
1048 uint32_t stride,
1049 uint32_t layer_stride,
1050 struct drm_virtgpu_3d_box *box,
1051 struct virtio_gpu_object_array *objs,
1052 struct virtio_gpu_fence *fence)
1053 {
1054 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1055 struct virtio_gpu_transfer_host_3d *cmd_p;
1056 struct virtio_gpu_vbuffer *vbuf;
1057
1058 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1059 memset(cmd_p, 0, sizeof(*cmd_p));
1060
1061 vbuf->objs = objs;
1062
1063 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1064 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1065 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1066 convert_to_hw_box(&cmd_p->box, box);
1067 cmd_p->offset = cpu_to_le64(offset);
1068 cmd_p->level = cpu_to_le32(level);
1069 cmd_p->stride = cpu_to_le32(stride);
1070 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1071
1072 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1073 }
1074
1075 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1076 void *data, uint32_t data_size,
1077 uint32_t ctx_id,
1078 struct virtio_gpu_object_array *objs,
1079 struct virtio_gpu_fence *fence)
1080 {
1081 struct virtio_gpu_cmd_submit *cmd_p;
1082 struct virtio_gpu_vbuffer *vbuf;
1083
1084 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1085 memset(cmd_p, 0, sizeof(*cmd_p));
1086
1087 vbuf->data_buf = data;
1088 vbuf->data_size = data_size;
1089 vbuf->objs = objs;
1090
1091 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1092 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1093 cmd_p->size = cpu_to_le32(data_size);
1094
1095 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1096 }
1097
1098 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1099 struct virtio_gpu_object *obj,
1100 struct virtio_gpu_mem_entry *ents,
1101 unsigned int nents)
1102 {
1103 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1104 ents, nents, NULL);
1105 }
1106
1107 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1108 struct virtio_gpu_output *output)
1109 {
1110 struct virtio_gpu_vbuffer *vbuf;
1111 struct virtio_gpu_update_cursor *cur_p;
1112
1113 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1114 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1115 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1116 virtio_gpu_queue_cursor(vgdev, vbuf);
1117 }
1118
1119 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1120 struct virtio_gpu_vbuffer *vbuf)
1121 {
1122 struct virtio_gpu_object *obj =
1123 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1124 struct virtio_gpu_resp_resource_uuid *resp =
1125 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1126 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1127
1128 spin_lock(&vgdev->resource_export_lock);
1129 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1130
1131 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1132 obj->uuid_state == STATE_INITIALIZING) {
1133 import_uuid(&obj->uuid, resp->uuid);
1134 obj->uuid_state = STATE_OK;
1135 } else {
1136 obj->uuid_state = STATE_ERR;
1137 }
1138 spin_unlock(&vgdev->resource_export_lock);
1139
1140 wake_up_all(&vgdev->resp_wq);
1141 }
1142
1143 int
1144 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1145 struct virtio_gpu_object_array *objs)
1146 {
1147 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1148 struct virtio_gpu_resource_assign_uuid *cmd_p;
1149 struct virtio_gpu_vbuffer *vbuf;
1150 struct virtio_gpu_resp_resource_uuid *resp_buf;
1151
1152 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1153 if (!resp_buf) {
1154 spin_lock(&vgdev->resource_export_lock);
1155 bo->uuid_state = STATE_ERR;
1156 spin_unlock(&vgdev->resource_export_lock);
1157 virtio_gpu_array_put_free(objs);
1158 return -ENOMEM;
1159 }
1160
1161 cmd_p = virtio_gpu_alloc_cmd_resp
1162 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1163 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1164 memset(cmd_p, 0, sizeof(*cmd_p));
1165
1166 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1167 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1168
1169 vbuf->objs = objs;
1170 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1171 return 0;
1172 }
1173
1174 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1175 struct virtio_gpu_vbuffer *vbuf)
1176 {
1177 struct virtio_gpu_object *bo =
1178 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1179 struct virtio_gpu_resp_map_info *resp =
1180 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1181 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1182 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1183
1184 spin_lock(&vgdev->host_visible_lock);
1185
1186 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1187 vram->map_info = resp->map_info;
1188 vram->map_state = STATE_OK;
1189 } else {
1190 vram->map_state = STATE_ERR;
1191 }
1192
1193 spin_unlock(&vgdev->host_visible_lock);
1194 wake_up_all(&vgdev->resp_wq);
1195 }
1196
1197 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1198 struct virtio_gpu_object_array *objs, uint64_t offset)
1199 {
1200 struct virtio_gpu_resource_map_blob *cmd_p;
1201 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1202 struct virtio_gpu_vbuffer *vbuf;
1203 struct virtio_gpu_resp_map_info *resp_buf;
1204
1205 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1206 if (!resp_buf)
1207 return -ENOMEM;
1208
1209 cmd_p = virtio_gpu_alloc_cmd_resp
1210 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1211 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1212 memset(cmd_p, 0, sizeof(*cmd_p));
1213
1214 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1215 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1216 cmd_p->offset = cpu_to_le64(offset);
1217 vbuf->objs = objs;
1218
1219 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1220 return 0;
1221 }
1222
1223 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1224 struct virtio_gpu_object *bo)
1225 {
1226 struct virtio_gpu_resource_unmap_blob *cmd_p;
1227 struct virtio_gpu_vbuffer *vbuf;
1228
1229 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1230 memset(cmd_p, 0, sizeof(*cmd_p));
1231
1232 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1233 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1234
1235 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1236 }
1237
1238 void
1239 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1240 struct virtio_gpu_object *bo,
1241 struct virtio_gpu_object_params *params,
1242 struct virtio_gpu_mem_entry *ents,
1243 uint32_t nents)
1244 {
1245 struct virtio_gpu_resource_create_blob *cmd_p;
1246 struct virtio_gpu_vbuffer *vbuf;
1247
1248 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1249 memset(cmd_p, 0, sizeof(*cmd_p));
1250
1251 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1252 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1253 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1254 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1255 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1256 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1257 cmd_p->size = cpu_to_le64(params->size);
1258 cmd_p->nr_entries = cpu_to_le32(nents);
1259
1260 vbuf->data_buf = ents;
1261 vbuf->data_size = sizeof(*ents) * nents;
1262
1263 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1264 bo->created = true;
1265 }
1266
1267 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1268 uint32_t scanout_id,
1269 struct virtio_gpu_object *bo,
1270 struct drm_framebuffer *fb,
1271 uint32_t width, uint32_t height,
1272 uint32_t x, uint32_t y)
1273 {
1274 uint32_t i;
1275 struct virtio_gpu_set_scanout_blob *cmd_p;
1276 struct virtio_gpu_vbuffer *vbuf;
1277 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1278
1279 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1280 memset(cmd_p, 0, sizeof(*cmd_p));
1281
1282 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1283 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1284 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1285
1286 cmd_p->format = cpu_to_le32(format);
1287 cmd_p->width = cpu_to_le32(fb->width);
1288 cmd_p->height = cpu_to_le32(fb->height);
1289
1290 for (i = 0; i < 4; i++) {
1291 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1292 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1293 }
1294
1295 cmd_p->r.width = cpu_to_le32(width);
1296 cmd_p->r.height = cpu_to_le32(height);
1297 cmd_p->r.x = cpu_to_le32(x);
1298 cmd_p->r.y = cpu_to_le32(y);
1299
1300 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1301 }