0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <drm/ttm/ttm_placement.h>
0029
0030 #include "vmwgfx_drv.h"
0031 #include "vmwgfx_resource_priv.h"
0032 #include "vmwgfx_so.h"
0033 #include "vmwgfx_binding.h"
0034 #include "vmw_surface_cache.h"
0035 #include "device_include/svga3d_surfacedefs.h"
0036
0037 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
0038 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
0039 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
0040 (svga3d_flags & ((uint64_t)U32_MAX))
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 struct vmw_user_surface {
0051 struct ttm_prime_object prime;
0052 struct vmw_surface srf;
0053 struct drm_master *master;
0054 };
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 struct vmw_surface_offset {
0065 uint32_t face;
0066 uint32_t mip;
0067 uint32_t bo_offset;
0068 };
0069
0070
0071
0072
0073
0074
0075
0076 struct vmw_surface_dirty {
0077 struct vmw_surface_cache cache;
0078 u32 num_subres;
0079 SVGA3dBox boxes[];
0080 };
0081
0082 static void vmw_user_surface_free(struct vmw_resource *res);
0083 static struct vmw_resource *
0084 vmw_user_surface_base_to_res(struct ttm_base_object *base);
0085 static int vmw_legacy_srf_bind(struct vmw_resource *res,
0086 struct ttm_validate_buffer *val_buf);
0087 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
0088 bool readback,
0089 struct ttm_validate_buffer *val_buf);
0090 static int vmw_legacy_srf_create(struct vmw_resource *res);
0091 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
0092 static int vmw_gb_surface_create(struct vmw_resource *res);
0093 static int vmw_gb_surface_bind(struct vmw_resource *res,
0094 struct ttm_validate_buffer *val_buf);
0095 static int vmw_gb_surface_unbind(struct vmw_resource *res,
0096 bool readback,
0097 struct ttm_validate_buffer *val_buf);
0098 static int vmw_gb_surface_destroy(struct vmw_resource *res);
0099 static int
0100 vmw_gb_surface_define_internal(struct drm_device *dev,
0101 struct drm_vmw_gb_surface_create_ext_req *req,
0102 struct drm_vmw_gb_surface_create_rep *rep,
0103 struct drm_file *file_priv);
0104 static int
0105 vmw_gb_surface_reference_internal(struct drm_device *dev,
0106 struct drm_vmw_surface_arg *req,
0107 struct drm_vmw_gb_surface_ref_ext_rep *rep,
0108 struct drm_file *file_priv);
0109
0110 static void vmw_surface_dirty_free(struct vmw_resource *res);
0111 static int vmw_surface_dirty_alloc(struct vmw_resource *res);
0112 static int vmw_surface_dirty_sync(struct vmw_resource *res);
0113 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
0114 size_t end);
0115 static int vmw_surface_clean(struct vmw_resource *res);
0116
0117 static const struct vmw_user_resource_conv user_surface_conv = {
0118 .object_type = VMW_RES_SURFACE,
0119 .base_obj_to_res = vmw_user_surface_base_to_res,
0120 .res_free = vmw_user_surface_free
0121 };
0122
0123 const struct vmw_user_resource_conv *user_surface_converter =
0124 &user_surface_conv;
0125
0126 static const struct vmw_res_func vmw_legacy_surface_func = {
0127 .res_type = vmw_res_surface,
0128 .needs_backup = false,
0129 .may_evict = true,
0130 .prio = 1,
0131 .dirty_prio = 1,
0132 .type_name = "legacy surfaces",
0133 .backup_placement = &vmw_srf_placement,
0134 .create = &vmw_legacy_srf_create,
0135 .destroy = &vmw_legacy_srf_destroy,
0136 .bind = &vmw_legacy_srf_bind,
0137 .unbind = &vmw_legacy_srf_unbind
0138 };
0139
0140 static const struct vmw_res_func vmw_gb_surface_func = {
0141 .res_type = vmw_res_surface,
0142 .needs_backup = true,
0143 .may_evict = true,
0144 .prio = 1,
0145 .dirty_prio = 2,
0146 .type_name = "guest backed surfaces",
0147 .backup_placement = &vmw_mob_placement,
0148 .create = vmw_gb_surface_create,
0149 .destroy = vmw_gb_surface_destroy,
0150 .bind = vmw_gb_surface_bind,
0151 .unbind = vmw_gb_surface_unbind,
0152 .dirty_alloc = vmw_surface_dirty_alloc,
0153 .dirty_free = vmw_surface_dirty_free,
0154 .dirty_sync = vmw_surface_dirty_sync,
0155 .dirty_range_add = vmw_surface_dirty_range_add,
0156 .clean = vmw_surface_clean,
0157 };
0158
0159
0160
0161
0162 struct vmw_surface_dma {
0163 SVGA3dCmdHeader header;
0164 SVGA3dCmdSurfaceDMA body;
0165 SVGA3dCopyBox cb;
0166 SVGA3dCmdSurfaceDMASuffix suffix;
0167 };
0168
0169
0170
0171
0172 struct vmw_surface_define {
0173 SVGA3dCmdHeader header;
0174 SVGA3dCmdDefineSurface body;
0175 };
0176
0177
0178
0179
0180 struct vmw_surface_destroy {
0181 SVGA3dCmdHeader header;
0182 SVGA3dCmdDestroySurface body;
0183 };
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
0195 {
0196 return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
0197 }
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
0209 {
0210 return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes *
0211 sizeof(SVGA3dSize);
0212 }
0213
0214
0215
0216
0217
0218
0219
0220
0221 static inline uint32_t vmw_surface_destroy_size(void)
0222 {
0223 return sizeof(struct vmw_surface_destroy);
0224 }
0225
0226
0227
0228
0229
0230
0231
0232 static void vmw_surface_destroy_encode(uint32_t id,
0233 void *cmd_space)
0234 {
0235 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
0236 cmd_space;
0237
0238 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
0239 cmd->header.size = sizeof(cmd->body);
0240 cmd->body.sid = id;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249 static void vmw_surface_define_encode(const struct vmw_surface *srf,
0250 void *cmd_space)
0251 {
0252 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
0253 cmd_space;
0254 struct drm_vmw_size *src_size;
0255 SVGA3dSize *cmd_size;
0256 uint32_t cmd_len;
0257 int i;
0258
0259 cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes *
0260 sizeof(SVGA3dSize);
0261
0262 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
0263 cmd->header.size = cmd_len;
0264 cmd->body.sid = srf->res.id;
0265
0266
0267
0268
0269
0270 cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
0271 cmd->body.format = srf->metadata.format;
0272 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
0273 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
0274
0275 cmd += 1;
0276 cmd_size = (SVGA3dSize *) cmd;
0277 src_size = srf->metadata.sizes;
0278
0279 for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
0280 cmd_size->width = src_size->width;
0281 cmd_size->height = src_size->height;
0282 cmd_size->depth = src_size->depth;
0283 }
0284 }
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295 static void vmw_surface_dma_encode(struct vmw_surface *srf,
0296 void *cmd_space,
0297 const SVGAGuestPtr *ptr,
0298 bool to_surface)
0299 {
0300 uint32_t i;
0301 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
0302 const struct SVGA3dSurfaceDesc *desc =
0303 vmw_surface_get_desc(srf->metadata.format);
0304
0305 for (i = 0; i < srf->metadata.num_sizes; ++i) {
0306 SVGA3dCmdHeader *header = &cmd->header;
0307 SVGA3dCmdSurfaceDMA *body = &cmd->body;
0308 SVGA3dCopyBox *cb = &cmd->cb;
0309 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
0310 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
0311 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i];
0312
0313 header->id = SVGA_3D_CMD_SURFACE_DMA;
0314 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
0315
0316 body->guest.ptr = *ptr;
0317 body->guest.ptr.offset += cur_offset->bo_offset;
0318 body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size);
0319 body->host.sid = srf->res.id;
0320 body->host.face = cur_offset->face;
0321 body->host.mipmap = cur_offset->mip;
0322 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
0323 SVGA3D_READ_HOST_VRAM);
0324 cb->x = 0;
0325 cb->y = 0;
0326 cb->z = 0;
0327 cb->srcx = 0;
0328 cb->srcy = 0;
0329 cb->srcz = 0;
0330 cb->w = cur_size->width;
0331 cb->h = cur_size->height;
0332 cb->d = cur_size->depth;
0333
0334 suffix->suffixSize = sizeof(*suffix);
0335 suffix->maximumOffset =
0336 vmw_surface_get_image_buffer_size(desc, cur_size,
0337 body->guest.pitch);
0338 suffix->flags.discard = 0;
0339 suffix->flags.unsynchronized = 0;
0340 suffix->flags.reserved = 0;
0341 ++cmd;
0342 }
0343 };
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355 static void vmw_hw_surface_destroy(struct vmw_resource *res)
0356 {
0357
0358 struct vmw_private *dev_priv = res->dev_priv;
0359 void *cmd;
0360
0361 if (res->func->destroy == vmw_gb_surface_destroy) {
0362 (void) vmw_gb_surface_destroy(res);
0363 return;
0364 }
0365
0366 if (res->id != -1) {
0367
0368 cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size());
0369 if (unlikely(!cmd))
0370 return;
0371
0372 vmw_surface_destroy_encode(res->id, cmd);
0373 vmw_cmd_commit(dev_priv, vmw_surface_destroy_size());
0374
0375
0376
0377
0378
0379
0380
0381 mutex_lock(&dev_priv->cmdbuf_mutex);
0382 dev_priv->used_memory_size -= res->backup_size;
0383 mutex_unlock(&dev_priv->cmdbuf_mutex);
0384 }
0385 }
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 static int vmw_legacy_srf_create(struct vmw_resource *res)
0401 {
0402 struct vmw_private *dev_priv = res->dev_priv;
0403 struct vmw_surface *srf;
0404 uint32_t submit_size;
0405 uint8_t *cmd;
0406 int ret;
0407
0408 if (likely(res->id != -1))
0409 return 0;
0410
0411 srf = vmw_res_to_srf(res);
0412 if (unlikely(dev_priv->used_memory_size + res->backup_size >=
0413 dev_priv->memory_size))
0414 return -EBUSY;
0415
0416
0417
0418
0419
0420 ret = vmw_resource_alloc_id(res);
0421 if (unlikely(ret != 0)) {
0422 DRM_ERROR("Failed to allocate a surface id.\n");
0423 goto out_no_id;
0424 }
0425
0426 if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) {
0427 ret = -EBUSY;
0428 goto out_no_fifo;
0429 }
0430
0431
0432
0433
0434
0435 submit_size = vmw_surface_define_size(srf);
0436 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
0437 if (unlikely(!cmd)) {
0438 ret = -ENOMEM;
0439 goto out_no_fifo;
0440 }
0441
0442 vmw_surface_define_encode(srf, cmd);
0443 vmw_cmd_commit(dev_priv, submit_size);
0444 vmw_fifo_resource_inc(dev_priv);
0445
0446
0447
0448
0449
0450 dev_priv->used_memory_size += res->backup_size;
0451 return 0;
0452
0453 out_no_fifo:
0454 vmw_resource_release_id(res);
0455 out_no_id:
0456 return ret;
0457 }
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 static int vmw_legacy_srf_dma(struct vmw_resource *res,
0476 struct ttm_validate_buffer *val_buf,
0477 bool bind)
0478 {
0479 SVGAGuestPtr ptr;
0480 struct vmw_fence_obj *fence;
0481 uint32_t submit_size;
0482 struct vmw_surface *srf = vmw_res_to_srf(res);
0483 uint8_t *cmd;
0484 struct vmw_private *dev_priv = res->dev_priv;
0485
0486 BUG_ON(!val_buf->bo);
0487 submit_size = vmw_surface_dma_size(srf);
0488 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
0489 if (unlikely(!cmd))
0490 return -ENOMEM;
0491
0492 vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
0493 vmw_surface_dma_encode(srf, cmd, &ptr, bind);
0494
0495 vmw_cmd_commit(dev_priv, submit_size);
0496
0497
0498
0499
0500
0501 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
0502 &fence, NULL);
0503
0504 vmw_bo_fence_single(val_buf->bo, fence);
0505
0506 if (likely(fence != NULL))
0507 vmw_fence_obj_unreference(&fence);
0508
0509 return 0;
0510 }
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 static int vmw_legacy_srf_bind(struct vmw_resource *res,
0525 struct ttm_validate_buffer *val_buf)
0526 {
0527 if (!res->backup_dirty)
0528 return 0;
0529
0530 return vmw_legacy_srf_dma(res, val_buf, true);
0531 }
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
0547 bool readback,
0548 struct ttm_validate_buffer *val_buf)
0549 {
0550 if (unlikely(readback))
0551 return vmw_legacy_srf_dma(res, val_buf, false);
0552 return 0;
0553 }
0554
0555
0556
0557
0558
0559
0560
0561
0562 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
0563 {
0564 struct vmw_private *dev_priv = res->dev_priv;
0565 uint32_t submit_size;
0566 uint8_t *cmd;
0567
0568 BUG_ON(res->id == -1);
0569
0570
0571
0572
0573
0574 submit_size = vmw_surface_destroy_size();
0575 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
0576 if (unlikely(!cmd))
0577 return -ENOMEM;
0578
0579 vmw_surface_destroy_encode(res->id, cmd);
0580 vmw_cmd_commit(dev_priv, submit_size);
0581
0582
0583
0584
0585
0586 dev_priv->used_memory_size -= res->backup_size;
0587
0588
0589
0590
0591
0592 vmw_resource_release_id(res);
0593 vmw_fifo_resource_dec(dev_priv);
0594
0595 return 0;
0596 }
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 static int vmw_surface_init(struct vmw_private *dev_priv,
0608 struct vmw_surface *srf,
0609 void (*res_free) (struct vmw_resource *res))
0610 {
0611 int ret;
0612 struct vmw_resource *res = &srf->res;
0613
0614 BUG_ON(!res_free);
0615 ret = vmw_resource_init(dev_priv, res, true, res_free,
0616 (dev_priv->has_mob) ? &vmw_gb_surface_func :
0617 &vmw_legacy_surface_func);
0618
0619 if (unlikely(ret != 0)) {
0620 res_free(res);
0621 return ret;
0622 }
0623
0624
0625
0626
0627
0628
0629 INIT_LIST_HEAD(&srf->view_list);
0630 res->hw_destroy = vmw_hw_surface_destroy;
0631 return ret;
0632 }
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643 static struct vmw_resource *
0644 vmw_user_surface_base_to_res(struct ttm_base_object *base)
0645 {
0646 return &(container_of(base, struct vmw_user_surface,
0647 prime.base)->srf.res);
0648 }
0649
0650
0651
0652
0653
0654
0655 static void vmw_user_surface_free(struct vmw_resource *res)
0656 {
0657 struct vmw_surface *srf = vmw_res_to_srf(res);
0658 struct vmw_user_surface *user_srf =
0659 container_of(srf, struct vmw_user_surface, srf);
0660
0661 WARN_ON_ONCE(res->dirty);
0662 if (user_srf->master)
0663 drm_master_put(&user_srf->master);
0664 kfree(srf->offsets);
0665 kfree(srf->metadata.sizes);
0666 kfree(srf->snooper.image);
0667 ttm_prime_object_kfree(user_srf, prime);
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
0680 {
0681 struct ttm_base_object *base = *p_base;
0682 struct vmw_user_surface *user_srf =
0683 container_of(base, struct vmw_user_surface, prime.base);
0684 struct vmw_resource *res = &user_srf->srf.res;
0685
0686 if (base->shareable && res && res->backup)
0687 drm_gem_object_put(&res->backup->base.base);
0688
0689 *p_base = NULL;
0690 vmw_resource_unreference(&res);
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
0702 struct drm_file *file_priv)
0703 {
0704 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
0705 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0706
0707 return ttm_ref_object_base_unref(tfile, arg->sid);
0708 }
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
0719 struct drm_file *file_priv)
0720 {
0721 struct vmw_private *dev_priv = vmw_priv(dev);
0722 struct vmw_user_surface *user_srf;
0723 struct vmw_surface *srf;
0724 struct vmw_surface_metadata *metadata;
0725 struct vmw_resource *res;
0726 struct vmw_resource *tmp;
0727 union drm_vmw_surface_create_arg *arg =
0728 (union drm_vmw_surface_create_arg *)data;
0729 struct drm_vmw_surface_create_req *req = &arg->req;
0730 struct drm_vmw_surface_arg *rep = &arg->rep;
0731 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0732 int ret;
0733 int i, j;
0734 uint32_t cur_bo_offset;
0735 struct drm_vmw_size *cur_size;
0736 struct vmw_surface_offset *cur_offset;
0737 uint32_t num_sizes;
0738 const SVGA3dSurfaceDesc *desc;
0739
0740 num_sizes = 0;
0741 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
0742 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
0743 return -EINVAL;
0744 num_sizes += req->mip_levels[i];
0745 }
0746
0747 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
0748 num_sizes == 0)
0749 return -EINVAL;
0750
0751 desc = vmw_surface_get_desc(req->format);
0752 if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
0753 VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
0754 req->format);
0755 return -EINVAL;
0756 }
0757
0758 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
0759 if (unlikely(!user_srf)) {
0760 ret = -ENOMEM;
0761 goto out_unlock;
0762 }
0763
0764 srf = &user_srf->srf;
0765 metadata = &srf->metadata;
0766 res = &srf->res;
0767
0768
0769 metadata->flags = (SVGA3dSurfaceAllFlags)req->flags;
0770 metadata->format = req->format;
0771 metadata->scanout = req->scanout;
0772
0773 memcpy(metadata->mip_levels, req->mip_levels,
0774 sizeof(metadata->mip_levels));
0775 metadata->num_sizes = num_sizes;
0776 metadata->sizes =
0777 memdup_user((struct drm_vmw_size __user *)(unsigned long)
0778 req->size_addr,
0779 sizeof(*metadata->sizes) * metadata->num_sizes);
0780 if (IS_ERR(metadata->sizes)) {
0781 ret = PTR_ERR(metadata->sizes);
0782 goto out_no_sizes;
0783 }
0784 srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets),
0785 GFP_KERNEL);
0786 if (unlikely(!srf->offsets)) {
0787 ret = -ENOMEM;
0788 goto out_no_offsets;
0789 }
0790
0791 metadata->base_size = *srf->metadata.sizes;
0792 metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE;
0793 metadata->multisample_count = 0;
0794 metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
0795 metadata->quality_level = SVGA3D_MS_QUALITY_NONE;
0796
0797 cur_bo_offset = 0;
0798 cur_offset = srf->offsets;
0799 cur_size = metadata->sizes;
0800
0801 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
0802 for (j = 0; j < metadata->mip_levels[i]; ++j) {
0803 uint32_t stride = vmw_surface_calculate_pitch(
0804 desc, cur_size);
0805
0806 cur_offset->face = i;
0807 cur_offset->mip = j;
0808 cur_offset->bo_offset = cur_bo_offset;
0809 cur_bo_offset += vmw_surface_get_image_buffer_size
0810 (desc, cur_size, stride);
0811 ++cur_offset;
0812 ++cur_size;
0813 }
0814 }
0815 res->backup_size = cur_bo_offset;
0816 if (metadata->scanout &&
0817 metadata->num_sizes == 1 &&
0818 metadata->sizes[0].width == 64 &&
0819 metadata->sizes[0].height == 64 &&
0820 metadata->format == SVGA3D_A8R8G8B8) {
0821
0822 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
0823 if (!srf->snooper.image) {
0824 DRM_ERROR("Failed to allocate cursor_image\n");
0825 ret = -ENOMEM;
0826 goto out_no_copy;
0827 }
0828 } else {
0829 srf->snooper.image = NULL;
0830 }
0831
0832 user_srf->prime.base.shareable = false;
0833 user_srf->prime.base.tfile = NULL;
0834 if (drm_is_primary_client(file_priv))
0835 user_srf->master = drm_file_get_master(file_priv);
0836
0837
0838
0839
0840
0841
0842 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
0843 if (unlikely(ret != 0))
0844 goto out_unlock;
0845
0846
0847
0848
0849
0850 if (dev_priv->has_mob && req->shareable) {
0851 uint32_t backup_handle;
0852
0853 ret = vmw_gem_object_create_with_handle(dev_priv,
0854 file_priv,
0855 res->backup_size,
0856 &backup_handle,
0857 &res->backup);
0858 if (unlikely(ret != 0)) {
0859 vmw_resource_unreference(&res);
0860 goto out_unlock;
0861 }
0862 vmw_bo_reference(res->backup);
0863 drm_gem_object_get(&res->backup->base.base);
0864 }
0865
0866 tmp = vmw_resource_reference(&srf->res);
0867 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
0868 req->shareable, VMW_RES_SURFACE,
0869 &vmw_user_surface_base_release);
0870
0871 if (unlikely(ret != 0)) {
0872 vmw_resource_unreference(&tmp);
0873 vmw_resource_unreference(&res);
0874 goto out_unlock;
0875 }
0876
0877 rep->sid = user_srf->prime.base.handle;
0878 vmw_resource_unreference(&res);
0879
0880 return 0;
0881 out_no_copy:
0882 kfree(srf->offsets);
0883 out_no_offsets:
0884 kfree(metadata->sizes);
0885 out_no_sizes:
0886 ttm_prime_object_kfree(user_srf, prime);
0887 out_unlock:
0888 return ret;
0889 }
0890
0891
0892 static int
0893 vmw_surface_handle_reference(struct vmw_private *dev_priv,
0894 struct drm_file *file_priv,
0895 uint32_t u_handle,
0896 enum drm_vmw_handle_type handle_type,
0897 struct ttm_base_object **base_p)
0898 {
0899 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0900 struct vmw_user_surface *user_srf;
0901 uint32_t handle;
0902 struct ttm_base_object *base;
0903 int ret;
0904
0905 if (handle_type == DRM_VMW_HANDLE_PRIME) {
0906 ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
0907 if (unlikely(ret != 0))
0908 return ret;
0909 } else {
0910 handle = u_handle;
0911 }
0912
0913 ret = -EINVAL;
0914 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
0915 if (unlikely(!base)) {
0916 VMW_DEBUG_USER("Could not find surface to reference.\n");
0917 goto out_no_lookup;
0918 }
0919
0920 if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
0921 VMW_DEBUG_USER("Referenced object is not a surface.\n");
0922 goto out_bad_resource;
0923 }
0924 if (handle_type != DRM_VMW_HANDLE_PRIME) {
0925 bool require_exist = false;
0926
0927 user_srf = container_of(base, struct vmw_user_surface,
0928 prime.base);
0929
0930
0931 if (drm_is_primary_client(file_priv) &&
0932 !file_priv->authenticated) {
0933 ret = -EACCES;
0934 goto out_bad_resource;
0935 }
0936
0937
0938
0939
0940
0941 if (drm_is_primary_client(file_priv) &&
0942 user_srf->master != file_priv->master)
0943 require_exist = true;
0944
0945 if (unlikely(drm_is_render_client(file_priv)))
0946 require_exist = true;
0947
0948 ret = ttm_ref_object_add(tfile, base, NULL, require_exist);
0949 if (unlikely(ret != 0)) {
0950 DRM_ERROR("Could not add a reference to a surface.\n");
0951 goto out_bad_resource;
0952 }
0953 }
0954
0955 *base_p = base;
0956 return 0;
0957
0958 out_bad_resource:
0959 ttm_base_object_unref(&base);
0960 out_no_lookup:
0961 if (handle_type == DRM_VMW_HANDLE_PRIME)
0962 (void) ttm_ref_object_base_unref(tfile, handle);
0963
0964 return ret;
0965 }
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
0976 struct drm_file *file_priv)
0977 {
0978 struct vmw_private *dev_priv = vmw_priv(dev);
0979 union drm_vmw_surface_reference_arg *arg =
0980 (union drm_vmw_surface_reference_arg *)data;
0981 struct drm_vmw_surface_arg *req = &arg->req;
0982 struct drm_vmw_surface_create_req *rep = &arg->rep;
0983 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
0984 struct vmw_surface *srf;
0985 struct vmw_user_surface *user_srf;
0986 struct drm_vmw_size __user *user_sizes;
0987 struct ttm_base_object *base;
0988 int ret;
0989
0990 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
0991 req->handle_type, &base);
0992 if (unlikely(ret != 0))
0993 return ret;
0994
0995 user_srf = container_of(base, struct vmw_user_surface, prime.base);
0996 srf = &user_srf->srf;
0997
0998
0999 rep->flags = (uint32_t)srf->metadata.flags;
1000 rep->format = srf->metadata.format;
1001 memcpy(rep->mip_levels, srf->metadata.mip_levels,
1002 sizeof(srf->metadata.mip_levels));
1003 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1004 rep->size_addr;
1005
1006 if (user_sizes)
1007 ret = copy_to_user(user_sizes, &srf->metadata.base_size,
1008 sizeof(srf->metadata.base_size));
1009 if (unlikely(ret != 0)) {
1010 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1011 srf->metadata.num_sizes);
1012 ttm_ref_object_base_unref(tfile, base->handle);
1013 ret = -EFAULT;
1014 }
1015
1016 ttm_base_object_unref(&base);
1017
1018 return ret;
1019 }
1020
1021
1022
1023
1024
1025
1026
1027 static int vmw_gb_surface_create(struct vmw_resource *res)
1028 {
1029 struct vmw_private *dev_priv = res->dev_priv;
1030 struct vmw_surface *srf = vmw_res_to_srf(res);
1031 struct vmw_surface_metadata *metadata = &srf->metadata;
1032 uint32_t cmd_len, cmd_id, submit_len;
1033 int ret;
1034 struct {
1035 SVGA3dCmdHeader header;
1036 SVGA3dCmdDefineGBSurface body;
1037 } *cmd;
1038 struct {
1039 SVGA3dCmdHeader header;
1040 SVGA3dCmdDefineGBSurface_v2 body;
1041 } *cmd2;
1042 struct {
1043 SVGA3dCmdHeader header;
1044 SVGA3dCmdDefineGBSurface_v3 body;
1045 } *cmd3;
1046 struct {
1047 SVGA3dCmdHeader header;
1048 SVGA3dCmdDefineGBSurface_v4 body;
1049 } *cmd4;
1050
1051 if (likely(res->id != -1))
1052 return 0;
1053
1054 vmw_fifo_resource_inc(dev_priv);
1055 ret = vmw_resource_alloc_id(res);
1056 if (unlikely(ret != 0)) {
1057 DRM_ERROR("Failed to allocate a surface id.\n");
1058 goto out_no_id;
1059 }
1060
1061 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1062 ret = -EBUSY;
1063 goto out_no_fifo;
1064 }
1065
1066 if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1067 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V4;
1068 cmd_len = sizeof(cmd4->body);
1069 submit_len = sizeof(*cmd4);
1070 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1071 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1072 cmd_len = sizeof(cmd3->body);
1073 submit_len = sizeof(*cmd3);
1074 } else if (metadata->array_size > 0) {
1075
1076 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1077 cmd_len = sizeof(cmd2->body);
1078 submit_len = sizeof(*cmd2);
1079 } else {
1080 cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1081 cmd_len = sizeof(cmd->body);
1082 submit_len = sizeof(*cmd);
1083 }
1084
1085 cmd = VMW_CMD_RESERVE(dev_priv, submit_len);
1086 cmd2 = (typeof(cmd2))cmd;
1087 cmd3 = (typeof(cmd3))cmd;
1088 cmd4 = (typeof(cmd4))cmd;
1089 if (unlikely(!cmd)) {
1090 ret = -ENOMEM;
1091 goto out_no_fifo;
1092 }
1093
1094 if (has_sm5_context(dev_priv) && metadata->array_size > 0) {
1095 cmd4->header.id = cmd_id;
1096 cmd4->header.size = cmd_len;
1097 cmd4->body.sid = srf->res.id;
1098 cmd4->body.surfaceFlags = metadata->flags;
1099 cmd4->body.format = metadata->format;
1100 cmd4->body.numMipLevels = metadata->mip_levels[0];
1101 cmd4->body.multisampleCount = metadata->multisample_count;
1102 cmd4->body.multisamplePattern = metadata->multisample_pattern;
1103 cmd4->body.qualityLevel = metadata->quality_level;
1104 cmd4->body.autogenFilter = metadata->autogen_filter;
1105 cmd4->body.size.width = metadata->base_size.width;
1106 cmd4->body.size.height = metadata->base_size.height;
1107 cmd4->body.size.depth = metadata->base_size.depth;
1108 cmd4->body.arraySize = metadata->array_size;
1109 cmd4->body.bufferByteStride = metadata->buffer_byte_stride;
1110 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) {
1111 cmd3->header.id = cmd_id;
1112 cmd3->header.size = cmd_len;
1113 cmd3->body.sid = srf->res.id;
1114 cmd3->body.surfaceFlags = metadata->flags;
1115 cmd3->body.format = metadata->format;
1116 cmd3->body.numMipLevels = metadata->mip_levels[0];
1117 cmd3->body.multisampleCount = metadata->multisample_count;
1118 cmd3->body.multisamplePattern = metadata->multisample_pattern;
1119 cmd3->body.qualityLevel = metadata->quality_level;
1120 cmd3->body.autogenFilter = metadata->autogen_filter;
1121 cmd3->body.size.width = metadata->base_size.width;
1122 cmd3->body.size.height = metadata->base_size.height;
1123 cmd3->body.size.depth = metadata->base_size.depth;
1124 cmd3->body.arraySize = metadata->array_size;
1125 } else if (metadata->array_size > 0) {
1126 cmd2->header.id = cmd_id;
1127 cmd2->header.size = cmd_len;
1128 cmd2->body.sid = srf->res.id;
1129 cmd2->body.surfaceFlags = metadata->flags;
1130 cmd2->body.format = metadata->format;
1131 cmd2->body.numMipLevels = metadata->mip_levels[0];
1132 cmd2->body.multisampleCount = metadata->multisample_count;
1133 cmd2->body.autogenFilter = metadata->autogen_filter;
1134 cmd2->body.size.width = metadata->base_size.width;
1135 cmd2->body.size.height = metadata->base_size.height;
1136 cmd2->body.size.depth = metadata->base_size.depth;
1137 cmd2->body.arraySize = metadata->array_size;
1138 } else {
1139 cmd->header.id = cmd_id;
1140 cmd->header.size = cmd_len;
1141 cmd->body.sid = srf->res.id;
1142 cmd->body.surfaceFlags = metadata->flags;
1143 cmd->body.format = metadata->format;
1144 cmd->body.numMipLevels = metadata->mip_levels[0];
1145 cmd->body.multisampleCount = metadata->multisample_count;
1146 cmd->body.autogenFilter = metadata->autogen_filter;
1147 cmd->body.size.width = metadata->base_size.width;
1148 cmd->body.size.height = metadata->base_size.height;
1149 cmd->body.size.depth = metadata->base_size.depth;
1150 }
1151
1152 vmw_cmd_commit(dev_priv, submit_len);
1153
1154 return 0;
1155
1156 out_no_fifo:
1157 vmw_resource_release_id(res);
1158 out_no_id:
1159 vmw_fifo_resource_dec(dev_priv);
1160 return ret;
1161 }
1162
1163
1164 static int vmw_gb_surface_bind(struct vmw_resource *res,
1165 struct ttm_validate_buffer *val_buf)
1166 {
1167 struct vmw_private *dev_priv = res->dev_priv;
1168 struct {
1169 SVGA3dCmdHeader header;
1170 SVGA3dCmdBindGBSurface body;
1171 } *cmd1;
1172 struct {
1173 SVGA3dCmdHeader header;
1174 SVGA3dCmdUpdateGBSurface body;
1175 } *cmd2;
1176 uint32_t submit_size;
1177 struct ttm_buffer_object *bo = val_buf->bo;
1178
1179 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1180
1181 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1182
1183 cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
1184 if (unlikely(!cmd1))
1185 return -ENOMEM;
1186
1187 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1188 cmd1->header.size = sizeof(cmd1->body);
1189 cmd1->body.sid = res->id;
1190 cmd1->body.mobid = bo->resource->start;
1191 if (res->backup_dirty) {
1192 cmd2 = (void *) &cmd1[1];
1193 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1194 cmd2->header.size = sizeof(cmd2->body);
1195 cmd2->body.sid = res->id;
1196 }
1197 vmw_cmd_commit(dev_priv, submit_size);
1198
1199 if (res->backup->dirty && res->backup_dirty) {
1200
1201 vmw_bo_dirty_clear_res(res);
1202 }
1203
1204 res->backup_dirty = false;
1205
1206 return 0;
1207 }
1208
1209 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1210 bool readback,
1211 struct ttm_validate_buffer *val_buf)
1212 {
1213 struct vmw_private *dev_priv = res->dev_priv;
1214 struct ttm_buffer_object *bo = val_buf->bo;
1215 struct vmw_fence_obj *fence;
1216
1217 struct {
1218 SVGA3dCmdHeader header;
1219 SVGA3dCmdReadbackGBSurface body;
1220 } *cmd1;
1221 struct {
1222 SVGA3dCmdHeader header;
1223 SVGA3dCmdInvalidateGBSurface body;
1224 } *cmd2;
1225 struct {
1226 SVGA3dCmdHeader header;
1227 SVGA3dCmdBindGBSurface body;
1228 } *cmd3;
1229 uint32_t submit_size;
1230 uint8_t *cmd;
1231
1232
1233 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
1234
1235 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1236 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
1237 if (unlikely(!cmd))
1238 return -ENOMEM;
1239
1240 if (readback) {
1241 cmd1 = (void *) cmd;
1242 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1243 cmd1->header.size = sizeof(cmd1->body);
1244 cmd1->body.sid = res->id;
1245 cmd3 = (void *) &cmd1[1];
1246 } else {
1247 cmd2 = (void *) cmd;
1248 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1249 cmd2->header.size = sizeof(cmd2->body);
1250 cmd2->body.sid = res->id;
1251 cmd3 = (void *) &cmd2[1];
1252 }
1253
1254 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1255 cmd3->header.size = sizeof(cmd3->body);
1256 cmd3->body.sid = res->id;
1257 cmd3->body.mobid = SVGA3D_INVALID_ID;
1258
1259 vmw_cmd_commit(dev_priv, submit_size);
1260
1261
1262
1263
1264
1265 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1266 &fence, NULL);
1267
1268 vmw_bo_fence_single(val_buf->bo, fence);
1269
1270 if (likely(fence != NULL))
1271 vmw_fence_obj_unreference(&fence);
1272
1273 return 0;
1274 }
1275
1276 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1277 {
1278 struct vmw_private *dev_priv = res->dev_priv;
1279 struct vmw_surface *srf = vmw_res_to_srf(res);
1280 struct {
1281 SVGA3dCmdHeader header;
1282 SVGA3dCmdDestroyGBSurface body;
1283 } *cmd;
1284
1285 if (likely(res->id == -1))
1286 return 0;
1287
1288 mutex_lock(&dev_priv->binding_mutex);
1289 vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1290 vmw_binding_res_list_scrub(&res->binding_head);
1291
1292 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
1293 if (unlikely(!cmd)) {
1294 mutex_unlock(&dev_priv->binding_mutex);
1295 return -ENOMEM;
1296 }
1297
1298 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1299 cmd->header.size = sizeof(cmd->body);
1300 cmd->body.sid = res->id;
1301 vmw_cmd_commit(dev_priv, sizeof(*cmd));
1302 mutex_unlock(&dev_priv->binding_mutex);
1303 vmw_resource_release_id(res);
1304 vmw_fifo_resource_dec(dev_priv);
1305
1306 return 0;
1307 }
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1318 struct drm_file *file_priv)
1319 {
1320 union drm_vmw_gb_surface_create_arg *arg =
1321 (union drm_vmw_gb_surface_create_arg *)data;
1322 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1323 struct drm_vmw_gb_surface_create_ext_req req_ext;
1324
1325 req_ext.base = arg->req;
1326 req_ext.version = drm_vmw_gb_surface_v1;
1327 req_ext.svga3d_flags_upper_32_bits = 0;
1328 req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1329 req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1330 req_ext.buffer_byte_stride = 0;
1331 req_ext.must_be_zero = 0;
1332
1333 return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1334 }
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1345 struct drm_file *file_priv)
1346 {
1347 union drm_vmw_gb_surface_reference_arg *arg =
1348 (union drm_vmw_gb_surface_reference_arg *)data;
1349 struct drm_vmw_surface_arg *req = &arg->req;
1350 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1351 struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1352 int ret;
1353
1354 ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1355
1356 if (unlikely(ret != 0))
1357 return ret;
1358
1359 rep->creq = rep_ext.creq.base;
1360 rep->crep = rep_ext.crep;
1361
1362 return ret;
1363 }
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1374 struct drm_file *file_priv)
1375 {
1376 union drm_vmw_gb_surface_create_ext_arg *arg =
1377 (union drm_vmw_gb_surface_create_ext_arg *)data;
1378 struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1379 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1380
1381 return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1382 }
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1393 struct drm_file *file_priv)
1394 {
1395 union drm_vmw_gb_surface_reference_ext_arg *arg =
1396 (union drm_vmw_gb_surface_reference_ext_arg *)data;
1397 struct drm_vmw_surface_arg *req = &arg->req;
1398 struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1399
1400 return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 static int
1413 vmw_gb_surface_define_internal(struct drm_device *dev,
1414 struct drm_vmw_gb_surface_create_ext_req *req,
1415 struct drm_vmw_gb_surface_create_rep *rep,
1416 struct drm_file *file_priv)
1417 {
1418 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1419 struct vmw_private *dev_priv = vmw_priv(dev);
1420 struct vmw_user_surface *user_srf;
1421 struct vmw_surface_metadata metadata = {0};
1422 struct vmw_surface *srf;
1423 struct vmw_resource *res;
1424 struct vmw_resource *tmp;
1425 int ret = 0;
1426 uint32_t backup_handle = 0;
1427 SVGA3dSurfaceAllFlags svga3d_flags_64 =
1428 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1429 req->base.svga3d_flags);
1430
1431
1432 if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
1433 VMW_DEBUG_USER("SM4 surface not supported.\n");
1434 return -EINVAL;
1435 }
1436
1437 if (!has_sm4_1_context(dev_priv)) {
1438 if (req->svga3d_flags_upper_32_bits != 0)
1439 ret = -EINVAL;
1440
1441 if (req->base.multisample_count != 0)
1442 ret = -EINVAL;
1443
1444 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1445 ret = -EINVAL;
1446
1447 if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1448 ret = -EINVAL;
1449
1450 if (ret) {
1451 VMW_DEBUG_USER("SM4.1 surface not supported.\n");
1452 return ret;
1453 }
1454 }
1455
1456 if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
1457 VMW_DEBUG_USER("SM5 surface not supported.\n");
1458 return -EINVAL;
1459 }
1460
1461 if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1462 req->base.multisample_count == 0) {
1463 VMW_DEBUG_USER("Invalid sample count.\n");
1464 return -EINVAL;
1465 }
1466
1467 if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) {
1468 VMW_DEBUG_USER("Invalid mip level.\n");
1469 return -EINVAL;
1470 }
1471
1472 metadata.flags = svga3d_flags_64;
1473 metadata.format = req->base.format;
1474 metadata.mip_levels[0] = req->base.mip_levels;
1475 metadata.multisample_count = req->base.multisample_count;
1476 metadata.multisample_pattern = req->multisample_pattern;
1477 metadata.quality_level = req->quality_level;
1478 metadata.array_size = req->base.array_size;
1479 metadata.buffer_byte_stride = req->buffer_byte_stride;
1480 metadata.num_sizes = 1;
1481 metadata.base_size = req->base.base_size;
1482 metadata.scanout = req->base.drm_surface_flags &
1483 drm_vmw_surface_flag_scanout;
1484
1485
1486 ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
1487 if (ret != 0) {
1488 VMW_DEBUG_USER("Failed to define surface.\n");
1489 return ret;
1490 }
1491
1492 user_srf = container_of(srf, struct vmw_user_surface, srf);
1493 if (drm_is_primary_client(file_priv))
1494 user_srf->master = drm_file_get_master(file_priv);
1495
1496 res = &user_srf->srf.res;
1497
1498 if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1499 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
1500 &res->backup);
1501 if (ret == 0) {
1502 if (res->backup->base.base.size < res->backup_size) {
1503 VMW_DEBUG_USER("Surface backup buffer too small.\n");
1504 vmw_bo_unreference(&res->backup);
1505 ret = -EINVAL;
1506 goto out_unlock;
1507 } else {
1508 backup_handle = req->base.buffer_handle;
1509 }
1510 }
1511 } else if (req->base.drm_surface_flags &
1512 (drm_vmw_surface_flag_create_buffer |
1513 drm_vmw_surface_flag_coherent)) {
1514 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
1515 res->backup_size,
1516 &backup_handle,
1517 &res->backup);
1518 if (ret == 0)
1519 vmw_bo_reference(res->backup);
1520 }
1521
1522 if (unlikely(ret != 0)) {
1523 vmw_resource_unreference(&res);
1524 goto out_unlock;
1525 }
1526
1527 if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
1528 struct vmw_buffer_object *backup = res->backup;
1529
1530 ttm_bo_reserve(&backup->base, false, false, NULL);
1531 if (!res->func->dirty_alloc)
1532 ret = -EINVAL;
1533 if (!ret)
1534 ret = vmw_bo_dirty_add(backup);
1535 if (!ret) {
1536 res->coherent = true;
1537 ret = res->func->dirty_alloc(res);
1538 }
1539 ttm_bo_unreserve(&backup->base);
1540 if (ret) {
1541 vmw_resource_unreference(&res);
1542 goto out_unlock;
1543 }
1544
1545 }
1546
1547 tmp = vmw_resource_reference(res);
1548 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1549 req->base.drm_surface_flags &
1550 drm_vmw_surface_flag_shareable,
1551 VMW_RES_SURFACE,
1552 &vmw_user_surface_base_release);
1553
1554 if (unlikely(ret != 0)) {
1555 vmw_resource_unreference(&tmp);
1556 vmw_resource_unreference(&res);
1557 goto out_unlock;
1558 }
1559
1560 rep->handle = user_srf->prime.base.handle;
1561 rep->backup_size = res->backup_size;
1562 if (res->backup) {
1563 rep->buffer_map_handle =
1564 drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
1565 rep->buffer_size = res->backup->base.base.size;
1566 rep->buffer_handle = backup_handle;
1567 if (user_srf->prime.base.shareable)
1568 drm_gem_object_get(&res->backup->base.base);
1569 } else {
1570 rep->buffer_map_handle = 0;
1571 rep->buffer_size = 0;
1572 rep->buffer_handle = SVGA3D_INVALID_ID;
1573 }
1574 vmw_resource_unreference(&res);
1575
1576 out_unlock:
1577 return ret;
1578 }
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589 static int
1590 vmw_gb_surface_reference_internal(struct drm_device *dev,
1591 struct drm_vmw_surface_arg *req,
1592 struct drm_vmw_gb_surface_ref_ext_rep *rep,
1593 struct drm_file *file_priv)
1594 {
1595 struct vmw_private *dev_priv = vmw_priv(dev);
1596 struct vmw_surface *srf;
1597 struct vmw_user_surface *user_srf;
1598 struct vmw_surface_metadata *metadata;
1599 struct ttm_base_object *base;
1600 u32 backup_handle;
1601 int ret;
1602
1603 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1604 req->handle_type, &base);
1605 if (unlikely(ret != 0))
1606 return ret;
1607
1608 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1609 srf = &user_srf->srf;
1610 if (!srf->res.backup) {
1611 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1612 goto out_bad_resource;
1613 }
1614 metadata = &srf->metadata;
1615
1616 mutex_lock(&dev_priv->cmdbuf_mutex);
1617 ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
1618 &backup_handle);
1619 mutex_unlock(&dev_priv->cmdbuf_mutex);
1620 if (ret != 0) {
1621 drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n",
1622 req->sid);
1623 goto out_bad_resource;
1624 }
1625
1626 rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags);
1627 rep->creq.base.format = metadata->format;
1628 rep->creq.base.mip_levels = metadata->mip_levels[0];
1629 rep->creq.base.drm_surface_flags = 0;
1630 rep->creq.base.multisample_count = metadata->multisample_count;
1631 rep->creq.base.autogen_filter = metadata->autogen_filter;
1632 rep->creq.base.array_size = metadata->array_size;
1633 rep->creq.base.buffer_handle = backup_handle;
1634 rep->creq.base.base_size = metadata->base_size;
1635 rep->crep.handle = user_srf->prime.base.handle;
1636 rep->crep.backup_size = srf->res.backup_size;
1637 rep->crep.buffer_handle = backup_handle;
1638 rep->crep.buffer_map_handle =
1639 drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
1640 rep->crep.buffer_size = srf->res.backup->base.base.size;
1641
1642 rep->creq.version = drm_vmw_gb_surface_v1;
1643 rep->creq.svga3d_flags_upper_32_bits =
1644 SVGA3D_FLAGS_UPPER_32(metadata->flags);
1645 rep->creq.multisample_pattern = metadata->multisample_pattern;
1646 rep->creq.quality_level = metadata->quality_level;
1647 rep->creq.must_be_zero = 0;
1648
1649 out_bad_resource:
1650 ttm_base_object_unref(&base);
1651
1652 return ret;
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
1668 const struct vmw_surface_loc *loc_start,
1669 const struct vmw_surface_loc *loc_end)
1670 {
1671 const struct vmw_surface_cache *cache = &dirty->cache;
1672 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
1673 u32 mip = loc_start->sub_resource % cache->num_mip_levels;
1674 const struct drm_vmw_size *size = &cache->mip[mip].size;
1675 u32 box_c2 = box->z + box->d;
1676
1677 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
1678 return;
1679
1680 if (box->d == 0 || box->z > loc_start->z)
1681 box->z = loc_start->z;
1682 if (box_c2 < loc_end->z)
1683 box->d = loc_end->z - box->z;
1684
1685 if (loc_start->z + 1 == loc_end->z) {
1686 box_c2 = box->y + box->h;
1687 if (box->h == 0 || box->y > loc_start->y)
1688 box->y = loc_start->y;
1689 if (box_c2 < loc_end->y)
1690 box->h = loc_end->y - box->y;
1691
1692 if (loc_start->y + 1 == loc_end->y) {
1693 box_c2 = box->x + box->w;
1694 if (box->w == 0 || box->x > loc_start->x)
1695 box->x = loc_start->x;
1696 if (box_c2 < loc_end->x)
1697 box->w = loc_end->x - box->x;
1698 } else {
1699 box->x = 0;
1700 box->w = size->width;
1701 }
1702 } else {
1703 box->y = 0;
1704 box->h = size->height;
1705 box->x = 0;
1706 box->w = size->width;
1707 }
1708 }
1709
1710
1711
1712
1713
1714
1715 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
1716 {
1717 const struct vmw_surface_cache *cache = &dirty->cache;
1718 u32 mip = subres % cache->num_mip_levels;
1719 const struct drm_vmw_size *size = &cache->mip[mip].size;
1720 SVGA3dBox *box = &dirty->boxes[subres];
1721
1722 box->x = 0;
1723 box->y = 0;
1724 box->z = 0;
1725 box->w = size->width;
1726 box->h = size->height;
1727 box->d = size->depth;
1728 }
1729
1730
1731
1732
1733
1734 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
1735 size_t start, size_t end)
1736 {
1737 struct vmw_surface_dirty *dirty =
1738 (struct vmw_surface_dirty *) res->dirty;
1739 size_t backup_end = res->backup_offset + res->backup_size;
1740 struct vmw_surface_loc loc1, loc2;
1741 const struct vmw_surface_cache *cache;
1742
1743 start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1744 end = min(end, backup_end) - res->backup_offset;
1745 cache = &dirty->cache;
1746 vmw_surface_get_loc(cache, &loc1, start);
1747 vmw_surface_get_loc(cache, &loc2, end - 1);
1748 vmw_surface_inc_loc(cache, &loc2);
1749
1750 if (loc1.sheet != loc2.sheet) {
1751 u32 sub_res;
1752
1753
1754
1755
1756
1757
1758
1759 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
1760 vmw_subres_dirty_full(dirty, sub_res);
1761 return;
1762 }
1763 if (loc1.sub_resource + 1 == loc2.sub_resource) {
1764
1765 vmw_subres_dirty_add(dirty, &loc1, &loc2);
1766 } else {
1767
1768 struct vmw_surface_loc loc_min, loc_max;
1769 u32 sub_res;
1770
1771 vmw_surface_max_loc(cache, loc1.sub_resource, &loc_max);
1772 vmw_subres_dirty_add(dirty, &loc1, &loc_max);
1773 vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
1774 vmw_subres_dirty_add(dirty, &loc_min, &loc2);
1775 for (sub_res = loc1.sub_resource + 1;
1776 sub_res < loc2.sub_resource - 1; ++sub_res)
1777 vmw_subres_dirty_full(dirty, sub_res);
1778 }
1779 }
1780
1781
1782
1783
1784
1785 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
1786 size_t start, size_t end)
1787 {
1788 struct vmw_surface_dirty *dirty =
1789 (struct vmw_surface_dirty *) res->dirty;
1790 const struct vmw_surface_cache *cache = &dirty->cache;
1791 size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
1792 SVGA3dBox *box = &dirty->boxes[0];
1793 u32 box_c2;
1794
1795 box->h = box->d = 1;
1796 start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1797 end = min(end, backup_end) - res->backup_offset;
1798 box_c2 = box->x + box->w;
1799 if (box->w == 0 || box->x > start)
1800 box->x = start;
1801 if (box_c2 < end)
1802 box->w = end - box->x;
1803 }
1804
1805
1806
1807
1808 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
1809 size_t end)
1810 {
1811 struct vmw_surface *srf = vmw_res_to_srf(res);
1812
1813 if (WARN_ON(end <= res->backup_offset ||
1814 start >= res->backup_offset + res->backup_size))
1815 return;
1816
1817 if (srf->metadata.format == SVGA3D_BUFFER)
1818 vmw_surface_buf_dirty_range_add(res, start, end);
1819 else
1820 vmw_surface_tex_dirty_range_add(res, start, end);
1821 }
1822
1823
1824
1825
1826 static int vmw_surface_dirty_sync(struct vmw_resource *res)
1827 {
1828 struct vmw_private *dev_priv = res->dev_priv;
1829 u32 i, num_dirty;
1830 struct vmw_surface_dirty *dirty =
1831 (struct vmw_surface_dirty *) res->dirty;
1832 size_t alloc_size;
1833 const struct vmw_surface_cache *cache = &dirty->cache;
1834 struct {
1835 SVGA3dCmdHeader header;
1836 SVGA3dCmdDXUpdateSubResource body;
1837 } *cmd1;
1838 struct {
1839 SVGA3dCmdHeader header;
1840 SVGA3dCmdUpdateGBImage body;
1841 } *cmd2;
1842 void *cmd;
1843
1844 num_dirty = 0;
1845 for (i = 0; i < dirty->num_subres; ++i) {
1846 const SVGA3dBox *box = &dirty->boxes[i];
1847
1848 if (box->d)
1849 num_dirty++;
1850 }
1851
1852 if (!num_dirty)
1853 goto out;
1854
1855 alloc_size = num_dirty * ((has_sm4_context(dev_priv)) ? sizeof(*cmd1) : sizeof(*cmd2));
1856 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
1857 if (!cmd)
1858 return -ENOMEM;
1859
1860 cmd1 = cmd;
1861 cmd2 = cmd;
1862
1863 for (i = 0; i < dirty->num_subres; ++i) {
1864 const SVGA3dBox *box = &dirty->boxes[i];
1865
1866 if (!box->d)
1867 continue;
1868
1869
1870
1871
1872
1873 if (has_sm4_context(dev_priv)) {
1874 cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
1875 cmd1->header.size = sizeof(cmd1->body);
1876 cmd1->body.sid = res->id;
1877 cmd1->body.subResource = i;
1878 cmd1->body.box = *box;
1879 cmd1++;
1880 } else {
1881 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
1882 cmd2->header.size = sizeof(cmd2->body);
1883 cmd2->body.image.sid = res->id;
1884 cmd2->body.image.face = i / cache->num_mip_levels;
1885 cmd2->body.image.mipmap = i -
1886 (cache->num_mip_levels * cmd2->body.image.face);
1887 cmd2->body.box = *box;
1888 cmd2++;
1889 }
1890
1891 }
1892 vmw_cmd_commit(dev_priv, alloc_size);
1893 out:
1894 memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
1895 dirty->num_subres);
1896
1897 return 0;
1898 }
1899
1900
1901
1902
1903 static int vmw_surface_dirty_alloc(struct vmw_resource *res)
1904 {
1905 struct vmw_surface *srf = vmw_res_to_srf(res);
1906 const struct vmw_surface_metadata *metadata = &srf->metadata;
1907 struct vmw_surface_dirty *dirty;
1908 u32 num_layers = 1;
1909 u32 num_mip;
1910 u32 num_subres;
1911 u32 num_samples;
1912 size_t dirty_size;
1913 int ret;
1914
1915 if (metadata->array_size)
1916 num_layers = metadata->array_size;
1917 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
1918 num_layers *= SVGA3D_MAX_SURFACE_FACES;
1919
1920 num_mip = metadata->mip_levels[0];
1921 if (!num_mip)
1922 num_mip = 1;
1923
1924 num_subres = num_layers * num_mip;
1925 dirty_size = struct_size(dirty, boxes, num_subres);
1926
1927 dirty = kvzalloc(dirty_size, GFP_KERNEL);
1928 if (!dirty) {
1929 ret = -ENOMEM;
1930 goto out_no_dirty;
1931 }
1932
1933 num_samples = max_t(u32, 1, metadata->multisample_count);
1934 ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
1935 num_mip, num_layers, num_samples,
1936 &dirty->cache);
1937 if (ret)
1938 goto out_no_cache;
1939
1940 dirty->num_subres = num_subres;
1941 res->dirty = (struct vmw_resource_dirty *) dirty;
1942
1943 return 0;
1944
1945 out_no_cache:
1946 kvfree(dirty);
1947 out_no_dirty:
1948 return ret;
1949 }
1950
1951
1952
1953
1954 static void vmw_surface_dirty_free(struct vmw_resource *res)
1955 {
1956 struct vmw_surface_dirty *dirty =
1957 (struct vmw_surface_dirty *) res->dirty;
1958
1959 kvfree(dirty);
1960 res->dirty = NULL;
1961 }
1962
1963
1964
1965
1966 static int vmw_surface_clean(struct vmw_resource *res)
1967 {
1968 struct vmw_private *dev_priv = res->dev_priv;
1969 size_t alloc_size;
1970 struct {
1971 SVGA3dCmdHeader header;
1972 SVGA3dCmdReadbackGBSurface body;
1973 } *cmd;
1974
1975 alloc_size = sizeof(*cmd);
1976 cmd = VMW_CMD_RESERVE(dev_priv, alloc_size);
1977 if (!cmd)
1978 return -ENOMEM;
1979
1980 cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1981 cmd->header.size = sizeof(cmd->body);
1982 cmd->body.sid = res->id;
1983 vmw_cmd_commit(dev_priv, alloc_size);
1984
1985 return 0;
1986 }
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000 int vmw_gb_surface_define(struct vmw_private *dev_priv,
2001 const struct vmw_surface_metadata *req,
2002 struct vmw_surface **srf_out)
2003 {
2004 struct vmw_surface_metadata *metadata;
2005 struct vmw_user_surface *user_srf;
2006 struct vmw_surface *srf;
2007 u32 sample_count = 1;
2008 u32 num_layers = 1;
2009 int ret;
2010
2011 *srf_out = NULL;
2012
2013 if (req->scanout) {
2014 if (!vmw_surface_is_screen_target_format(req->format)) {
2015 VMW_DEBUG_USER("Invalid Screen Target surface format.");
2016 return -EINVAL;
2017 }
2018
2019 if (req->base_size.width > dev_priv->texture_max_width ||
2020 req->base_size.height > dev_priv->texture_max_height) {
2021 VMW_DEBUG_USER("%ux%u\n, exceed max surface size %ux%u",
2022 req->base_size.width,
2023 req->base_size.height,
2024 dev_priv->texture_max_width,
2025 dev_priv->texture_max_height);
2026 return -EINVAL;
2027 }
2028 } else {
2029 const SVGA3dSurfaceDesc *desc =
2030 vmw_surface_get_desc(req->format);
2031
2032 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) {
2033 VMW_DEBUG_USER("Invalid surface format.\n");
2034 return -EINVAL;
2035 }
2036 }
2037
2038 if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE)
2039 return -EINVAL;
2040
2041 if (req->num_sizes != 1)
2042 return -EINVAL;
2043
2044 if (req->sizes != NULL)
2045 return -EINVAL;
2046
2047 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
2048 if (unlikely(!user_srf)) {
2049 ret = -ENOMEM;
2050 goto out_unlock;
2051 }
2052
2053 *srf_out = &user_srf->srf;
2054 user_srf->prime.base.shareable = false;
2055 user_srf->prime.base.tfile = NULL;
2056
2057 srf = &user_srf->srf;
2058 srf->metadata = *req;
2059 srf->offsets = NULL;
2060
2061 metadata = &srf->metadata;
2062
2063 if (metadata->array_size)
2064 num_layers = req->array_size;
2065 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP)
2066 num_layers = SVGA3D_MAX_SURFACE_FACES;
2067
2068 if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
2069 sample_count = metadata->multisample_count;
2070
2071 srf->res.backup_size =
2072 vmw_surface_get_serialized_size_extended(
2073 metadata->format,
2074 metadata->base_size,
2075 metadata->mip_levels[0],
2076 num_layers,
2077 sample_count);
2078
2079 if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
2080 srf->res.backup_size += sizeof(SVGA3dDXSOState);
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 if (dev_priv->active_display_unit == vmw_du_screen_target &&
2091 metadata->scanout &&
2092 metadata->base_size.width <= dev_priv->stdu_max_width &&
2093 metadata->base_size.height <= dev_priv->stdu_max_height)
2094 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
2095
2096
2097
2098
2099
2100 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
2101
2102 return ret;
2103
2104 out_unlock:
2105 return ret;
2106 }