0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include <drm/drm_atomic.h>
0029 #include <drm/drm_atomic_helper.h>
0030 #include <drm/drm_damage_helper.h>
0031 #include <drm/drm_fourcc.h>
0032 #include <drm/drm_plane_helper.h>
0033 #include <drm/drm_rect.h>
0034 #include <drm/drm_sysfs.h>
0035 #include <drm/drm_vblank.h>
0036
0037 #include "vmwgfx_kms.h"
0038
0039 void vmw_du_cleanup(struct vmw_display_unit *du)
0040 {
0041 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
0042 drm_plane_cleanup(&du->primary);
0043 if (vmw_cmd_supported(dev_priv))
0044 drm_plane_cleanup(&du->cursor.base);
0045
0046 drm_connector_unregister(&du->connector);
0047 drm_crtc_cleanup(&du->crtc);
0048 drm_encoder_cleanup(&du->encoder);
0049 drm_connector_cleanup(&du->connector);
0050 }
0051
0052
0053
0054
0055
0056 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
0057 struct ttm_buffer_object *bo,
0058 struct ttm_bo_kmap_obj *map,
0059 u32 *image, u32 width, u32 height,
0060 u32 hotspotX, u32 hotspotY);
0061
0062 struct vmw_svga_fifo_cmd_define_cursor {
0063 u32 cmd;
0064 SVGAFifoCmdDefineAlphaCursor cursor;
0065 };
0066
0067 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
0068 struct ttm_buffer_object *cm_bo,
0069 struct ttm_bo_kmap_obj *cm_map,
0070 u32 *image, u32 width, u32 height,
0071 u32 hotspotX, u32 hotspotY)
0072 {
0073 struct vmw_svga_fifo_cmd_define_cursor *cmd;
0074 const u32 image_size = width * height * sizeof(*image);
0075 const u32 cmd_size = sizeof(*cmd) + image_size;
0076
0077 if (cm_bo != NULL) {
0078 vmw_cursor_update_mob(dev_priv, cm_bo, cm_map, image,
0079 width, height,
0080 hotspotX, hotspotY);
0081 return;
0082 }
0083
0084
0085
0086
0087
0088
0089 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
0090
0091 if (unlikely(cmd == NULL))
0092 return;
0093
0094 memset(cmd, 0, sizeof(*cmd));
0095
0096 memcpy(&cmd[1], image, image_size);
0097
0098 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
0099 cmd->cursor.id = 0;
0100 cmd->cursor.width = width;
0101 cmd->cursor.height = height;
0102 cmd->cursor.hotspotX = hotspotX;
0103 cmd->cursor.hotspotY = hotspotY;
0104
0105 vmw_cmd_commit_flush(dev_priv, cmd_size);
0106 }
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
0121 struct ttm_buffer_object *bo,
0122 struct ttm_bo_kmap_obj *map,
0123 u32 *image, u32 width, u32 height,
0124 u32 hotspotX, u32 hotspotY)
0125 {
0126 SVGAGBCursorHeader *header;
0127 SVGAGBAlphaCursorHeader *alpha_header;
0128 const u32 image_size = width * height * sizeof(*image);
0129 bool dummy;
0130
0131 BUG_ON(!image);
0132
0133 header = (SVGAGBCursorHeader *)ttm_kmap_obj_virtual(map, &dummy);
0134 alpha_header = &header->header.alphaHeader;
0135
0136 header->type = SVGA_ALPHA_CURSOR;
0137 header->sizeInBytes = image_size;
0138
0139 alpha_header->hotspotX = hotspotX;
0140 alpha_header->hotspotY = hotspotY;
0141 alpha_header->width = width;
0142 alpha_header->height = height;
0143
0144 memcpy(header + 1, image, image_size);
0145
0146 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, bo->resource->start);
0147 }
0148
0149 void vmw_du_destroy_cursor_mob_array(struct vmw_cursor_plane *vcp)
0150 {
0151 size_t i;
0152
0153 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mob); i++) {
0154 if (vcp->cursor_mob[i] != NULL) {
0155 ttm_bo_unpin(vcp->cursor_mob[i]);
0156 ttm_bo_put(vcp->cursor_mob[i]);
0157 kfree(vcp->cursor_mob[i]);
0158 vcp->cursor_mob[i] = NULL;
0159 }
0160 }
0161 }
0162
0163 #define CURSOR_MOB_SIZE(dimension) \
0164 ((dimension) * (dimension) * sizeof(u32) + sizeof(SVGAGBCursorHeader))
0165
0166 int vmw_du_create_cursor_mob_array(struct vmw_cursor_plane *cursor)
0167 {
0168 struct vmw_private *dev_priv = cursor->base.dev->dev_private;
0169 uint32_t cursor_max_dim, mob_max_size;
0170 int ret = 0;
0171 size_t i;
0172
0173 if (!dev_priv->has_mob || (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
0174 return -ENOSYS;
0175
0176 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
0177 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
0178
0179 if (CURSOR_MOB_SIZE(cursor_max_dim) > mob_max_size)
0180 cursor_max_dim = 64;
0181
0182 for (i = 0; i < ARRAY_SIZE(cursor->cursor_mob); i++) {
0183 struct ttm_buffer_object **const bo = &cursor->cursor_mob[i];
0184
0185 ret = vmw_bo_create_kernel(dev_priv,
0186 CURSOR_MOB_SIZE(cursor_max_dim),
0187 &vmw_mob_placement, bo);
0188
0189 if (ret != 0)
0190 goto teardown;
0191
0192 if ((*bo)->resource->mem_type != VMW_PL_MOB) {
0193 DRM_ERROR("Obtained buffer object is not a MOB.\n");
0194 ret = -ENOSYS;
0195 goto teardown;
0196 }
0197
0198
0199 ret = ttm_bo_reserve(*bo, false, false, NULL);
0200
0201 if (ret != 0)
0202 goto teardown;
0203
0204 vmw_bo_fence_single(*bo, NULL);
0205
0206 ttm_bo_unreserve(*bo);
0207
0208 drm_info(&dev_priv->drm, "Using CursorMob mobid %lu, max dimension %u\n",
0209 (*bo)->resource->start, cursor_max_dim);
0210 }
0211
0212 return 0;
0213
0214 teardown:
0215 vmw_du_destroy_cursor_mob_array(cursor);
0216
0217 return ret;
0218 }
0219
0220 #undef CURSOR_MOB_SIZE
0221
0222 static void vmw_cursor_update_bo(struct vmw_private *dev_priv,
0223 struct ttm_buffer_object *cm_bo,
0224 struct ttm_bo_kmap_obj *cm_map,
0225 struct vmw_buffer_object *bo,
0226 u32 width, u32 height,
0227 u32 hotspotX, u32 hotspotY)
0228 {
0229 void *virtual;
0230 bool dummy;
0231
0232 virtual = ttm_kmap_obj_virtual(&bo->map, &dummy);
0233 if (virtual) {
0234 vmw_cursor_update_image(dev_priv, cm_bo, cm_map, virtual,
0235 width, height,
0236 hotspotX, hotspotY);
0237 atomic_dec(&bo->base_mapped_count);
0238 }
0239 }
0240
0241
0242 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
0243 bool show, int x, int y)
0244 {
0245 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
0246 : SVGA_CURSOR_ON_HIDE;
0247 uint32_t count;
0248
0249 spin_lock(&dev_priv->cursor_lock);
0250 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
0251 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
0252 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
0253 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
0254 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
0255 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
0256 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
0257 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
0258 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
0259 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
0260 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
0261 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
0262 } else {
0263 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
0264 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
0265 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
0266 }
0267 spin_unlock(&dev_priv->cursor_lock);
0268 }
0269
0270
0271 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
0272 struct ttm_object_file *tfile,
0273 struct ttm_buffer_object *bo,
0274 SVGA3dCmdHeader *header)
0275 {
0276 struct ttm_bo_kmap_obj map;
0277 unsigned long kmap_offset;
0278 unsigned long kmap_num;
0279 SVGA3dCopyBox *box;
0280 unsigned box_count;
0281 void *virtual;
0282 bool dummy;
0283 struct vmw_dma_cmd {
0284 SVGA3dCmdHeader header;
0285 SVGA3dCmdSurfaceDMA dma;
0286 } *cmd;
0287 int i, ret;
0288
0289 cmd = container_of(header, struct vmw_dma_cmd, header);
0290
0291
0292 if (!srf->snooper.image)
0293 return;
0294
0295 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
0296 DRM_ERROR("face and mipmap for cursors should never != 0\n");
0297 return;
0298 }
0299
0300 if (cmd->header.size < 64) {
0301 DRM_ERROR("at least one full copy box must be given\n");
0302 return;
0303 }
0304
0305 box = (SVGA3dCopyBox *)&cmd[1];
0306 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
0307 sizeof(SVGA3dCopyBox);
0308
0309 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
0310 box->x != 0 || box->y != 0 || box->z != 0 ||
0311 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
0312 box->d != 1 || box_count != 1) {
0313
0314
0315
0316 DRM_ERROR("Can't snoop dma request for cursor!\n");
0317 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
0318 box->srcx, box->srcy, box->srcz,
0319 box->x, box->y, box->z,
0320 box->w, box->h, box->d, box_count,
0321 cmd->dma.guest.ptr.offset);
0322 return;
0323 }
0324
0325 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
0326 kmap_num = (64*64*4) >> PAGE_SHIFT;
0327
0328 ret = ttm_bo_reserve(bo, true, false, NULL);
0329 if (unlikely(ret != 0)) {
0330 DRM_ERROR("reserve failed\n");
0331 return;
0332 }
0333
0334 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
0335 if (unlikely(ret != 0))
0336 goto err_unreserve;
0337
0338 virtual = ttm_kmap_obj_virtual(&map, &dummy);
0339
0340 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
0341 memcpy(srf->snooper.image, virtual, 64*64*4);
0342 } else {
0343
0344 for (i = 0; i < box->h; i++)
0345 memcpy(srf->snooper.image + i * 64,
0346 virtual + i * cmd->dma.guest.pitch,
0347 box->w * 4);
0348 }
0349
0350 srf->snooper.age++;
0351
0352 ttm_bo_kunmap(&map);
0353 err_unreserve:
0354 ttm_bo_unreserve(bo);
0355 }
0356
0357
0358
0359
0360
0361
0362
0363
0364 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
0365 {
0366 struct drm_device *dev = &dev_priv->drm;
0367 struct vmw_display_unit *du;
0368 struct drm_crtc *crtc;
0369
0370 drm_modeset_lock_all(dev);
0371 drm_for_each_crtc(crtc, dev) {
0372 du = vmw_crtc_to_du(crtc);
0373
0374 du->hotspot_x = 0;
0375 du->hotspot_y = 0;
0376 }
0377 drm_modeset_unlock_all(dev);
0378 }
0379
0380 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
0381 {
0382 struct drm_device *dev = &dev_priv->drm;
0383 struct vmw_display_unit *du;
0384 struct drm_crtc *crtc;
0385
0386 mutex_lock(&dev->mode_config.mutex);
0387
0388 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
0389 du = vmw_crtc_to_du(crtc);
0390 if (!du->cursor_surface ||
0391 du->cursor_age == du->cursor_surface->snooper.age)
0392 continue;
0393
0394 du->cursor_age = du->cursor_surface->snooper.age;
0395 vmw_cursor_update_image(dev_priv, NULL, NULL,
0396 du->cursor_surface->snooper.image,
0397 64, 64,
0398 du->hotspot_x + du->core_hotspot_x,
0399 du->hotspot_y + du->core_hotspot_y);
0400 }
0401
0402 mutex_unlock(&dev->mode_config.mutex);
0403 }
0404
0405
0406 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
0407 {
0408 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
0409 vmw_du_destroy_cursor_mob_array(vmw_plane_to_vcp(plane));
0410 drm_plane_cleanup(plane);
0411 }
0412
0413
0414 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
0415 {
0416 drm_plane_cleanup(plane);
0417
0418
0419 }
0420
0421
0422
0423
0424
0425
0426
0427
0428 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
0429 bool unreference)
0430 {
0431 if (vps->surf) {
0432 if (vps->pinned) {
0433 vmw_resource_unpin(&vps->surf->res);
0434 vps->pinned--;
0435 }
0436
0437 if (unreference) {
0438 if (vps->pinned)
0439 DRM_ERROR("Surface still pinned\n");
0440 vmw_surface_unreference(&vps->surf);
0441 }
0442 }
0443 }
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456 void
0457 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
0458 struct drm_plane_state *old_state)
0459 {
0460 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
0461
0462 vmw_du_plane_unpin_surf(vps, false);
0463 }
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476 void
0477 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
0478 struct drm_plane_state *old_state)
0479 {
0480 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
0481 bool dummy;
0482
0483 if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
0484 const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
0485
0486 if (likely(ret == 0)) {
0487 if (atomic_read(&vps->bo->base_mapped_count) == 0)
0488 ttm_bo_kunmap(&vps->bo->map);
0489 ttm_bo_unreserve(&vps->bo->base);
0490 }
0491 }
0492
0493 if (vps->cm_bo != NULL && ttm_kmap_obj_virtual(&vps->cm_map, &dummy) != NULL) {
0494 const int ret = ttm_bo_reserve(vps->cm_bo, true, false, NULL);
0495
0496 if (likely(ret == 0)) {
0497 ttm_bo_kunmap(&vps->cm_map);
0498 ttm_bo_unreserve(vps->cm_bo);
0499 }
0500 }
0501
0502 vmw_du_plane_unpin_surf(vps, false);
0503
0504 if (vps->surf) {
0505 vmw_surface_unreference(&vps->surf);
0506 vps->surf = NULL;
0507 }
0508
0509 if (vps->bo) {
0510 vmw_bo_unreference(&vps->bo);
0511 vps->bo = NULL;
0512 }
0513 }
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 int
0524 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
0525 struct drm_plane_state *new_state)
0526 {
0527 struct drm_framebuffer *fb = new_state->fb;
0528 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
0529 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
0530 struct ttm_buffer_object *cm_bo = NULL;
0531 bool dummy;
0532 int ret = 0;
0533
0534 if (vps->surf) {
0535 vmw_surface_unreference(&vps->surf);
0536 vps->surf = NULL;
0537 }
0538
0539 if (vps->bo) {
0540 vmw_bo_unreference(&vps->bo);
0541 vps->bo = NULL;
0542 }
0543
0544 if (fb) {
0545 if (vmw_framebuffer_to_vfb(fb)->bo) {
0546 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
0547 vmw_bo_reference(vps->bo);
0548 } else {
0549 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
0550 vmw_surface_reference(vps->surf);
0551 }
0552 }
0553
0554 vps->cm_bo = NULL;
0555
0556 if (vps->surf == NULL && vps->bo != NULL) {
0557 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
0558
0559
0560
0561 ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
0562
0563 if (unlikely(ret != 0))
0564 return -ENOMEM;
0565
0566 ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
0567
0568 if (likely(ret == 0))
0569 atomic_inc(&vps->bo->base_mapped_count);
0570
0571 ttm_bo_unreserve(&vps->bo->base);
0572
0573 if (unlikely(ret != 0))
0574 return -ENOMEM;
0575 }
0576
0577 if (vps->surf || vps->bo) {
0578 unsigned cursor_mob_idx = vps->cursor_mob_idx;
0579
0580
0581 if (cursor_mob_idx == 0 && vcp->cursor_mob[0] == NULL)
0582 if (vmw_du_create_cursor_mob_array(vcp) != 0)
0583 vps->cursor_mob_idx = cursor_mob_idx = -1U;
0584
0585 if (cursor_mob_idx < ARRAY_SIZE(vcp->cursor_mob)) {
0586 const u32 size = sizeof(SVGAGBCursorHeader) +
0587 new_state->crtc_w * new_state->crtc_h * sizeof(u32);
0588
0589 cm_bo = vcp->cursor_mob[cursor_mob_idx];
0590
0591 if (cm_bo->resource->num_pages * PAGE_SIZE < size) {
0592 ret = -EINVAL;
0593 goto error_bo_unmap;
0594 }
0595
0596 ret = ttm_bo_reserve(cm_bo, false, false, NULL);
0597
0598 if (unlikely(ret != 0)) {
0599 ret = -ENOMEM;
0600 goto error_bo_unmap;
0601 }
0602
0603 ret = ttm_bo_kmap(cm_bo, 0, PFN_UP(size), &vps->cm_map);
0604
0605
0606
0607
0608
0609
0610
0611 (void) ttm_bo_wait(cm_bo, false, false);
0612
0613 ttm_bo_unreserve(cm_bo);
0614
0615 if (unlikely(ret != 0)) {
0616 ret = -ENOMEM;
0617 goto error_bo_unmap;
0618 }
0619
0620 vps->cursor_mob_idx = cursor_mob_idx ^ 1;
0621 vps->cm_bo = cm_bo;
0622 }
0623 }
0624
0625 return 0;
0626
0627 error_bo_unmap:
0628 if (vps->bo != NULL && ttm_kmap_obj_virtual(&vps->bo->map, &dummy) != NULL) {
0629 const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
0630 if (likely(ret == 0)) {
0631 atomic_dec(&vps->bo->base_mapped_count);
0632 ttm_bo_kunmap(&vps->bo->map);
0633 ttm_bo_unreserve(&vps->bo->base);
0634 }
0635 }
0636
0637 return ret;
0638 }
0639
0640
0641 void
0642 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
0643 struct drm_atomic_state *state)
0644 {
0645 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
0646 plane);
0647 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
0648 plane);
0649 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
0650 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
0651 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
0652 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
0653 s32 hotspot_x, hotspot_y;
0654
0655 hotspot_x = du->hotspot_x;
0656 hotspot_y = du->hotspot_y;
0657
0658 if (new_state->fb) {
0659 hotspot_x += new_state->fb->hot_x;
0660 hotspot_y += new_state->fb->hot_y;
0661 }
0662
0663 du->cursor_surface = vps->surf;
0664 du->cursor_bo = vps->bo;
0665
0666 if (vps->surf) {
0667 du->cursor_age = du->cursor_surface->snooper.age;
0668
0669 vmw_cursor_update_image(dev_priv, vps->cm_bo, &vps->cm_map,
0670 vps->surf->snooper.image,
0671 new_state->crtc_w,
0672 new_state->crtc_h,
0673 hotspot_x, hotspot_y);
0674 } else if (vps->bo) {
0675 vmw_cursor_update_bo(dev_priv, vps->cm_bo, &vps->cm_map,
0676 vps->bo,
0677 new_state->crtc_w,
0678 new_state->crtc_h,
0679 hotspot_x, hotspot_y);
0680 } else {
0681 vmw_cursor_update_position(dev_priv, false, 0, 0);
0682 return;
0683 }
0684
0685 du->cursor_x = new_state->crtc_x + du->set_gui_x;
0686 du->cursor_y = new_state->crtc_y + du->set_gui_y;
0687
0688 vmw_cursor_update_position(dev_priv, true,
0689 du->cursor_x + hotspot_x,
0690 du->cursor_y + hotspot_y);
0691
0692 du->core_hotspot_x = hotspot_x - du->hotspot_x;
0693 du->core_hotspot_y = hotspot_y - du->hotspot_y;
0694 }
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
0710 struct drm_atomic_state *state)
0711 {
0712 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
0713 plane);
0714 struct drm_crtc_state *crtc_state = NULL;
0715 struct drm_framebuffer *new_fb = new_state->fb;
0716 int ret;
0717
0718 if (new_state->crtc)
0719 crtc_state = drm_atomic_get_new_crtc_state(state,
0720 new_state->crtc);
0721
0722 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
0723 DRM_PLANE_HELPER_NO_SCALING,
0724 DRM_PLANE_HELPER_NO_SCALING,
0725 false, true);
0726
0727 if (!ret && new_fb) {
0728 struct drm_crtc *crtc = new_state->crtc;
0729 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
0730
0731 vmw_connector_state_to_vcs(du->connector.state);
0732 }
0733
0734
0735 return ret;
0736 }
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
0751 struct drm_atomic_state *state)
0752 {
0753 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
0754 plane);
0755 int ret = 0;
0756 struct drm_crtc_state *crtc_state = NULL;
0757 struct vmw_surface *surface = NULL;
0758 struct drm_framebuffer *fb = new_state->fb;
0759
0760 if (new_state->crtc)
0761 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
0762 new_state->crtc);
0763
0764 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
0765 DRM_PLANE_HELPER_NO_SCALING,
0766 DRM_PLANE_HELPER_NO_SCALING,
0767 true, true);
0768 if (ret)
0769 return ret;
0770
0771
0772 if (!fb)
0773 return 0;
0774
0775
0776 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
0777 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
0778 new_state->crtc_w, new_state->crtc_h);
0779 return -EINVAL;
0780 }
0781
0782 if (!vmw_framebuffer_to_vfb(fb)->bo)
0783 surface = vmw_framebuffer_to_vfbs(fb)->surface;
0784
0785 if (surface && !surface->snooper.image) {
0786 DRM_ERROR("surface not suitable for cursor\n");
0787 return -EINVAL;
0788 }
0789
0790 return 0;
0791 }
0792
0793
0794 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
0795 struct drm_atomic_state *state)
0796 {
0797 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
0798 crtc);
0799 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
0800 int connector_mask = drm_connector_mask(&du->connector);
0801 bool has_primary = new_state->plane_mask &
0802 drm_plane_mask(crtc->primary);
0803
0804
0805 if (has_primary != new_state->enable)
0806 return -EINVAL;
0807
0808
0809 if (new_state->connector_mask != connector_mask &&
0810 new_state->connector_mask != 0) {
0811 DRM_ERROR("Invalid connectors configuration\n");
0812 return -EINVAL;
0813 }
0814
0815
0816
0817
0818
0819 if (new_state->mode.crtc_clock == 0)
0820 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
0821
0822 return 0;
0823 }
0824
0825
0826 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
0827 struct drm_atomic_state *state)
0828 {
0829 }
0830
0831
0832 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
0833 struct drm_atomic_state *state)
0834 {
0835 struct drm_pending_vblank_event *event = crtc->state->event;
0836
0837 if (event) {
0838 crtc->state->event = NULL;
0839
0840 spin_lock_irq(&crtc->dev->event_lock);
0841 drm_crtc_send_vblank_event(crtc, event);
0842 spin_unlock_irq(&crtc->dev->event_lock);
0843 }
0844 }
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856 struct drm_crtc_state *
0857 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
0858 {
0859 struct drm_crtc_state *state;
0860 struct vmw_crtc_state *vcs;
0861
0862 if (WARN_ON(!crtc->state))
0863 return NULL;
0864
0865 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
0866
0867 if (!vcs)
0868 return NULL;
0869
0870 state = &vcs->base;
0871
0872 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
0873
0874 return state;
0875 }
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886 void vmw_du_crtc_reset(struct drm_crtc *crtc)
0887 {
0888 struct vmw_crtc_state *vcs;
0889
0890
0891 if (crtc->state) {
0892 __drm_atomic_helper_crtc_destroy_state(crtc->state);
0893
0894 kfree(vmw_crtc_state_to_vcs(crtc->state));
0895 }
0896
0897 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
0898
0899 if (!vcs) {
0900 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
0901 return;
0902 }
0903
0904 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
0905 }
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916 void
0917 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
0918 struct drm_crtc_state *state)
0919 {
0920 drm_atomic_helper_crtc_destroy_state(crtc, state);
0921 }
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933 struct drm_plane_state *
0934 vmw_du_plane_duplicate_state(struct drm_plane *plane)
0935 {
0936 struct drm_plane_state *state;
0937 struct vmw_plane_state *vps;
0938
0939 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
0940
0941 if (!vps)
0942 return NULL;
0943
0944 vps->pinned = 0;
0945 vps->cpp = 0;
0946
0947
0948 if (vps->surf)
0949 (void) vmw_surface_reference(vps->surf);
0950
0951 if (vps->bo)
0952 (void) vmw_bo_reference(vps->bo);
0953
0954 state = &vps->base;
0955
0956 __drm_atomic_helper_plane_duplicate_state(plane, state);
0957
0958 return state;
0959 }
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969 void vmw_du_plane_reset(struct drm_plane *plane)
0970 {
0971 struct vmw_plane_state *vps;
0972
0973 if (plane->state)
0974 vmw_du_plane_destroy_state(plane, plane->state);
0975
0976 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
0977
0978 if (!vps) {
0979 DRM_ERROR("Cannot allocate vmw_plane_state\n");
0980 return;
0981 }
0982
0983 __drm_atomic_helper_plane_reset(plane, &vps->base);
0984 }
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995 void
0996 vmw_du_plane_destroy_state(struct drm_plane *plane,
0997 struct drm_plane_state *state)
0998 {
0999 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1000
1001
1002
1003 if (vps->surf)
1004 vmw_surface_unreference(&vps->surf);
1005
1006 if (vps->bo)
1007 vmw_bo_unreference(&vps->bo);
1008
1009 drm_atomic_helper_plane_destroy_state(plane, state);
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 struct drm_connector_state *
1023 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1024 {
1025 struct drm_connector_state *state;
1026 struct vmw_connector_state *vcs;
1027
1028 if (WARN_ON(!connector->state))
1029 return NULL;
1030
1031 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1032
1033 if (!vcs)
1034 return NULL;
1035
1036 state = &vcs->base;
1037
1038 __drm_atomic_helper_connector_duplicate_state(connector, state);
1039
1040 return state;
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 void vmw_du_connector_reset(struct drm_connector *connector)
1053 {
1054 struct vmw_connector_state *vcs;
1055
1056
1057 if (connector->state) {
1058 __drm_atomic_helper_connector_destroy_state(connector->state);
1059
1060 kfree(vmw_connector_state_to_vcs(connector->state));
1061 }
1062
1063 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1064
1065 if (!vcs) {
1066 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1067 return;
1068 }
1069
1070 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 void
1083 vmw_du_connector_destroy_state(struct drm_connector *connector,
1084 struct drm_connector_state *state)
1085 {
1086 drm_atomic_helper_connector_destroy_state(connector, state);
1087 }
1088
1089
1090
1091
1092
1093
1094
1095
1096 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1097 {
1098 struct vmw_framebuffer_surface *vfbs =
1099 vmw_framebuffer_to_vfbs(framebuffer);
1100
1101 drm_framebuffer_cleanup(framebuffer);
1102 vmw_surface_unreference(&vfbs->surface);
1103
1104 kfree(vfbs);
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 int vmw_kms_readback(struct vmw_private *dev_priv,
1124 struct drm_file *file_priv,
1125 struct vmw_framebuffer *vfb,
1126 struct drm_vmw_fence_rep __user *user_fence_rep,
1127 struct drm_vmw_rect *vclips,
1128 uint32_t num_clips)
1129 {
1130 switch (dev_priv->active_display_unit) {
1131 case vmw_du_screen_object:
1132 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1133 user_fence_rep, vclips, num_clips,
1134 NULL);
1135 case vmw_du_screen_target:
1136 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1137 user_fence_rep, NULL, vclips, num_clips,
1138 1, false, true, NULL);
1139 default:
1140 WARN_ONCE(true,
1141 "Readback called with invalid display system.\n");
1142 }
1143
1144 return -ENOSYS;
1145 }
1146
1147
1148 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1149 .destroy = vmw_framebuffer_surface_destroy,
1150 .dirty = drm_atomic_helper_dirtyfb,
1151 };
1152
1153 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1154 struct vmw_surface *surface,
1155 struct vmw_framebuffer **out,
1156 const struct drm_mode_fb_cmd2
1157 *mode_cmd,
1158 bool is_bo_proxy)
1159
1160 {
1161 struct drm_device *dev = &dev_priv->drm;
1162 struct vmw_framebuffer_surface *vfbs;
1163 enum SVGA3dSurfaceFormat format;
1164 int ret;
1165
1166
1167 if (dev_priv->active_display_unit == vmw_du_legacy)
1168 return -ENOSYS;
1169
1170
1171
1172
1173
1174 if (!drm_any_plane_has_format(&dev_priv->drm,
1175 mode_cmd->pixel_format,
1176 mode_cmd->modifier[0])) {
1177 drm_dbg(&dev_priv->drm,
1178 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1179 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1180 return -EINVAL;
1181 }
1182
1183
1184 if (unlikely(!surface->metadata.scanout))
1185 return -EINVAL;
1186
1187 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1188 surface->metadata.num_sizes != 1 ||
1189 surface->metadata.base_size.width < mode_cmd->width ||
1190 surface->metadata.base_size.height < mode_cmd->height ||
1191 surface->metadata.base_size.depth != 1)) {
1192 DRM_ERROR("Incompatible surface dimensions "
1193 "for requested mode.\n");
1194 return -EINVAL;
1195 }
1196
1197 switch (mode_cmd->pixel_format) {
1198 case DRM_FORMAT_ARGB8888:
1199 format = SVGA3D_A8R8G8B8;
1200 break;
1201 case DRM_FORMAT_XRGB8888:
1202 format = SVGA3D_X8R8G8B8;
1203 break;
1204 case DRM_FORMAT_RGB565:
1205 format = SVGA3D_R5G6B5;
1206 break;
1207 case DRM_FORMAT_XRGB1555:
1208 format = SVGA3D_A1R5G5B5;
1209 break;
1210 default:
1211 DRM_ERROR("Invalid pixel format: %p4cc\n",
1212 &mode_cmd->pixel_format);
1213 return -EINVAL;
1214 }
1215
1216
1217
1218
1219
1220 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1221 DRM_ERROR("Invalid surface format for requested mode.\n");
1222 return -EINVAL;
1223 }
1224
1225 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1226 if (!vfbs) {
1227 ret = -ENOMEM;
1228 goto out_err1;
1229 }
1230
1231 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1232 vfbs->surface = vmw_surface_reference(surface);
1233 vfbs->base.user_handle = mode_cmd->handles[0];
1234 vfbs->is_bo_proxy = is_bo_proxy;
1235
1236 *out = &vfbs->base;
1237
1238 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1239 &vmw_framebuffer_surface_funcs);
1240 if (ret)
1241 goto out_err2;
1242
1243 return 0;
1244
1245 out_err2:
1246 vmw_surface_unreference(&surface);
1247 kfree(vfbs);
1248 out_err1:
1249 return ret;
1250 }
1251
1252
1253
1254
1255
1256 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1257 struct drm_file *file_priv,
1258 unsigned int *handle)
1259 {
1260 struct vmw_framebuffer_bo *vfbd =
1261 vmw_framebuffer_to_vfbd(fb);
1262
1263 return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1264 }
1265
1266 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1267 {
1268 struct vmw_framebuffer_bo *vfbd =
1269 vmw_framebuffer_to_vfbd(framebuffer);
1270
1271 drm_framebuffer_cleanup(framebuffer);
1272 vmw_bo_unreference(&vfbd->buffer);
1273
1274 kfree(vfbd);
1275 }
1276
1277 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1278 struct drm_file *file_priv,
1279 unsigned int flags, unsigned int color,
1280 struct drm_clip_rect *clips,
1281 unsigned int num_clips)
1282 {
1283 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1284 struct vmw_framebuffer_bo *vfbd =
1285 vmw_framebuffer_to_vfbd(framebuffer);
1286 struct drm_clip_rect norect;
1287 int ret, increment = 1;
1288
1289 drm_modeset_lock_all(&dev_priv->drm);
1290
1291 if (!num_clips) {
1292 num_clips = 1;
1293 clips = &norect;
1294 norect.x1 = norect.y1 = 0;
1295 norect.x2 = framebuffer->width;
1296 norect.y2 = framebuffer->height;
1297 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1298 num_clips /= 2;
1299 increment = 2;
1300 }
1301
1302 switch (dev_priv->active_display_unit) {
1303 case vmw_du_legacy:
1304 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1305 clips, num_clips, increment);
1306 break;
1307 default:
1308 ret = -EINVAL;
1309 WARN_ONCE(true, "Dirty called with invalid display system.\n");
1310 break;
1311 }
1312
1313 vmw_cmd_flush(dev_priv, false);
1314
1315 drm_modeset_unlock_all(&dev_priv->drm);
1316
1317 return ret;
1318 }
1319
1320 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1321 struct drm_file *file_priv,
1322 unsigned int flags, unsigned int color,
1323 struct drm_clip_rect *clips,
1324 unsigned int num_clips)
1325 {
1326 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1327
1328 if (dev_priv->active_display_unit == vmw_du_legacy &&
1329 vmw_cmd_supported(dev_priv))
1330 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1331 color, clips, num_clips);
1332
1333 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1334 clips, num_clips);
1335 }
1336
1337 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1338 .create_handle = vmw_framebuffer_bo_create_handle,
1339 .destroy = vmw_framebuffer_bo_destroy,
1340 .dirty = vmw_framebuffer_bo_dirty_ext,
1341 };
1342
1343
1344
1345
1346
1347 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1348 {
1349 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1350 struct vmw_buffer_object *buf;
1351 struct ttm_placement *placement;
1352 int ret;
1353
1354 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1355 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1356
1357 if (!buf)
1358 return 0;
1359
1360 switch (dev_priv->active_display_unit) {
1361 case vmw_du_legacy:
1362 vmw_overlay_pause_all(dev_priv);
1363 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1364 vmw_overlay_resume_all(dev_priv);
1365 break;
1366 case vmw_du_screen_object:
1367 case vmw_du_screen_target:
1368 if (vfb->bo) {
1369 if (dev_priv->capabilities & SVGA_CAP_3D) {
1370
1371
1372
1373
1374 placement = &vmw_vram_gmr_placement;
1375 } else {
1376
1377 placement = &vmw_sys_placement;
1378 }
1379 } else {
1380
1381 placement = &vmw_mob_placement;
1382 }
1383
1384 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1385 default:
1386 return -EINVAL;
1387 }
1388
1389 return ret;
1390 }
1391
1392 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1393 {
1394 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1395 struct vmw_buffer_object *buf;
1396
1397 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1398 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1399
1400 if (WARN_ON(!buf))
1401 return 0;
1402
1403 return vmw_bo_unpin(dev_priv, buf, false);
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421 static int vmw_create_bo_proxy(struct drm_device *dev,
1422 const struct drm_mode_fb_cmd2 *mode_cmd,
1423 struct vmw_buffer_object *bo_mob,
1424 struct vmw_surface **srf_out)
1425 {
1426 struct vmw_surface_metadata metadata = {0};
1427 uint32_t format;
1428 struct vmw_resource *res;
1429 unsigned int bytes_pp;
1430 int ret;
1431
1432 switch (mode_cmd->pixel_format) {
1433 case DRM_FORMAT_ARGB8888:
1434 case DRM_FORMAT_XRGB8888:
1435 format = SVGA3D_X8R8G8B8;
1436 bytes_pp = 4;
1437 break;
1438
1439 case DRM_FORMAT_RGB565:
1440 case DRM_FORMAT_XRGB1555:
1441 format = SVGA3D_R5G6B5;
1442 bytes_pp = 2;
1443 break;
1444
1445 case 8:
1446 format = SVGA3D_P8;
1447 bytes_pp = 1;
1448 break;
1449
1450 default:
1451 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1452 &mode_cmd->pixel_format);
1453 return -EINVAL;
1454 }
1455
1456 metadata.format = format;
1457 metadata.mip_levels[0] = 1;
1458 metadata.num_sizes = 1;
1459 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1460 metadata.base_size.height = mode_cmd->height;
1461 metadata.base_size.depth = 1;
1462 metadata.scanout = true;
1463
1464 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1465 if (ret) {
1466 DRM_ERROR("Failed to allocate proxy content buffer\n");
1467 return ret;
1468 }
1469
1470 res = &(*srf_out)->res;
1471
1472
1473 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1474 (void) vmw_resource_reserve(res, false, true);
1475 vmw_bo_unreference(&res->backup);
1476 res->backup = vmw_bo_reference(bo_mob);
1477 res->backup_offset = 0;
1478 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1479 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1480
1481 return 0;
1482 }
1483
1484
1485
1486 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1487 struct vmw_buffer_object *bo,
1488 struct vmw_framebuffer **out,
1489 const struct drm_mode_fb_cmd2
1490 *mode_cmd)
1491
1492 {
1493 struct drm_device *dev = &dev_priv->drm;
1494 struct vmw_framebuffer_bo *vfbd;
1495 unsigned int requested_size;
1496 int ret;
1497
1498 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1499 if (unlikely(requested_size > bo->base.base.size)) {
1500 DRM_ERROR("Screen buffer object size is too small "
1501 "for requested mode.\n");
1502 return -EINVAL;
1503 }
1504
1505 if (!drm_any_plane_has_format(&dev_priv->drm,
1506 mode_cmd->pixel_format,
1507 mode_cmd->modifier[0])) {
1508 drm_dbg(&dev_priv->drm,
1509 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1510 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1511 return -EINVAL;
1512 }
1513
1514 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1515 if (!vfbd) {
1516 ret = -ENOMEM;
1517 goto out_err1;
1518 }
1519
1520 vfbd->base.base.obj[0] = &bo->base.base;
1521 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1522 vfbd->base.bo = true;
1523 vfbd->buffer = vmw_bo_reference(bo);
1524 vfbd->base.user_handle = mode_cmd->handles[0];
1525 *out = &vfbd->base;
1526
1527 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1528 &vmw_framebuffer_bo_funcs);
1529 if (ret)
1530 goto out_err2;
1531
1532 return 0;
1533
1534 out_err2:
1535 vmw_bo_unreference(&bo);
1536 kfree(vfbd);
1537 out_err1:
1538 return ret;
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 static bool
1552 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1553 {
1554 if (width > dev_priv->texture_max_width ||
1555 height > dev_priv->texture_max_height)
1556 return false;
1557
1558 return true;
1559 }
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 struct vmw_framebuffer *
1574 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1575 struct vmw_buffer_object *bo,
1576 struct vmw_surface *surface,
1577 bool only_2d,
1578 const struct drm_mode_fb_cmd2 *mode_cmd)
1579 {
1580 struct vmw_framebuffer *vfb = NULL;
1581 bool is_bo_proxy = false;
1582 int ret;
1583
1584
1585
1586
1587
1588
1589 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1590 bo && only_2d &&
1591 mode_cmd->width > 64 &&
1592 dev_priv->active_display_unit == vmw_du_screen_target) {
1593 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1594 bo, &surface);
1595 if (ret)
1596 return ERR_PTR(ret);
1597
1598 is_bo_proxy = true;
1599 }
1600
1601
1602 if (surface) {
1603 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1604 mode_cmd,
1605 is_bo_proxy);
1606
1607
1608
1609
1610 if (is_bo_proxy)
1611 vmw_surface_unreference(&surface);
1612 } else if (bo) {
1613 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1614 mode_cmd);
1615 } else {
1616 BUG();
1617 }
1618
1619 if (ret)
1620 return ERR_PTR(ret);
1621
1622 vfb->pin = vmw_framebuffer_pin;
1623 vfb->unpin = vmw_framebuffer_unpin;
1624
1625 return vfb;
1626 }
1627
1628
1629
1630
1631
1632 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1633 struct drm_file *file_priv,
1634 const struct drm_mode_fb_cmd2 *mode_cmd)
1635 {
1636 struct vmw_private *dev_priv = vmw_priv(dev);
1637 struct vmw_framebuffer *vfb = NULL;
1638 struct vmw_surface *surface = NULL;
1639 struct vmw_buffer_object *bo = NULL;
1640 int ret;
1641
1642
1643 ret = vmw_user_lookup_handle(dev_priv, file_priv,
1644 mode_cmd->handles[0],
1645 &surface, &bo);
1646 if (ret) {
1647 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1648 mode_cmd->handles[0], mode_cmd->handles[0]);
1649 goto err_out;
1650 }
1651
1652
1653 if (!bo &&
1654 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1655 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1656 dev_priv->texture_max_width,
1657 dev_priv->texture_max_height);
1658 goto err_out;
1659 }
1660
1661
1662 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1663 !(dev_priv->capabilities & SVGA_CAP_3D),
1664 mode_cmd);
1665 if (IS_ERR(vfb)) {
1666 ret = PTR_ERR(vfb);
1667 goto err_out;
1668 }
1669
1670 err_out:
1671
1672 if (bo)
1673 vmw_bo_unreference(&bo);
1674 if (surface)
1675 vmw_surface_unreference(&surface);
1676
1677 if (ret) {
1678 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1679 return ERR_PTR(ret);
1680 }
1681
1682 return &vfb->base;
1683 }
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696 static int vmw_kms_check_display_memory(struct drm_device *dev,
1697 uint32_t num_rects,
1698 struct drm_rect *rects)
1699 {
1700 struct vmw_private *dev_priv = vmw_priv(dev);
1701 struct drm_rect bounding_box = {0};
1702 u64 total_pixels = 0, pixel_mem, bb_mem;
1703 int i;
1704
1705 for (i = 0; i < num_rects; i++) {
1706
1707
1708
1709
1710 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1711 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1712 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1713 VMW_DEBUG_KMS("Screen size not supported.\n");
1714 return -EINVAL;
1715 }
1716
1717
1718 if (rects[i].x2 > bounding_box.x2)
1719 bounding_box.x2 = rects[i].x2;
1720
1721 if (rects[i].y2 > bounding_box.y2)
1722 bounding_box.y2 = rects[i].y2;
1723
1724 total_pixels += (u64) drm_rect_width(&rects[i]) *
1725 (u64) drm_rect_height(&rects[i]);
1726 }
1727
1728
1729 pixel_mem = total_pixels * 4;
1730
1731
1732
1733
1734
1735
1736 if (pixel_mem > dev_priv->max_primary_mem) {
1737 VMW_DEBUG_KMS("Combined output size too large.\n");
1738 return -EINVAL;
1739 }
1740
1741
1742 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1743 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1744 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1745
1746 if (bb_mem > dev_priv->max_primary_mem) {
1747 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1748 return -EINVAL;
1749 }
1750 }
1751
1752 return 0;
1753 }
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768 static struct drm_crtc_state *
1769 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1770 {
1771 struct drm_crtc_state *crtc_state;
1772
1773 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1774 if (crtc_state) {
1775 lockdep_assert_held(&crtc->mutex.mutex.base);
1776 } else {
1777 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1778
1779 if (ret != 0 && ret != -EALREADY)
1780 return ERR_PTR(ret);
1781
1782 crtc_state = crtc->state;
1783 }
1784
1785 return crtc_state;
1786 }
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799 static int vmw_kms_check_implicit(struct drm_device *dev,
1800 struct drm_atomic_state *state)
1801 {
1802 struct drm_framebuffer *implicit_fb = NULL;
1803 struct drm_crtc *crtc;
1804 struct drm_crtc_state *crtc_state;
1805 struct drm_plane_state *plane_state;
1806
1807 drm_for_each_crtc(crtc, dev) {
1808 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1809
1810 if (!du->is_implicit)
1811 continue;
1812
1813 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1814 if (IS_ERR(crtc_state))
1815 return PTR_ERR(crtc_state);
1816
1817 if (!crtc_state || !crtc_state->enable)
1818 continue;
1819
1820
1821
1822
1823
1824 plane_state = du->primary.state;
1825 if (plane_state->crtc != crtc)
1826 continue;
1827
1828 if (!implicit_fb)
1829 implicit_fb = plane_state->fb;
1830 else if (implicit_fb != plane_state->fb)
1831 return -EINVAL;
1832 }
1833
1834 return 0;
1835 }
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845 static int vmw_kms_check_topology(struct drm_device *dev,
1846 struct drm_atomic_state *state)
1847 {
1848 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1849 struct drm_rect *rects;
1850 struct drm_crtc *crtc;
1851 uint32_t i;
1852 int ret = 0;
1853
1854 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1855 GFP_KERNEL);
1856 if (!rects)
1857 return -ENOMEM;
1858
1859 drm_for_each_crtc(crtc, dev) {
1860 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1861 struct drm_crtc_state *crtc_state;
1862
1863 i = drm_crtc_index(crtc);
1864
1865 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1866 if (IS_ERR(crtc_state)) {
1867 ret = PTR_ERR(crtc_state);
1868 goto clean;
1869 }
1870
1871 if (!crtc_state)
1872 continue;
1873
1874 if (crtc_state->enable) {
1875 rects[i].x1 = du->gui_x;
1876 rects[i].y1 = du->gui_y;
1877 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1878 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1879 } else {
1880 rects[i].x1 = 0;
1881 rects[i].y1 = 0;
1882 rects[i].x2 = 0;
1883 rects[i].y2 = 0;
1884 }
1885 }
1886
1887
1888 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1889 new_crtc_state, i) {
1890 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1891 struct drm_connector *connector;
1892 struct drm_connector_state *conn_state;
1893 struct vmw_connector_state *vmw_conn_state;
1894
1895 if (!du->pref_active && new_crtc_state->enable) {
1896 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1897 ret = -EINVAL;
1898 goto clean;
1899 }
1900
1901
1902
1903
1904
1905
1906 connector = &du->connector;
1907 conn_state = drm_atomic_get_connector_state(state, connector);
1908 if (IS_ERR(conn_state)) {
1909 ret = PTR_ERR(conn_state);
1910 goto clean;
1911 }
1912
1913 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1914 vmw_conn_state->gui_x = du->gui_x;
1915 vmw_conn_state->gui_y = du->gui_y;
1916 }
1917
1918 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1919 rects);
1920
1921 clean:
1922 kfree(rects);
1923 return ret;
1924 }
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939 static int
1940 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1941 struct drm_atomic_state *state)
1942 {
1943 struct drm_crtc *crtc;
1944 struct drm_crtc_state *crtc_state;
1945 bool need_modeset = false;
1946 int i, ret;
1947
1948 ret = drm_atomic_helper_check(dev, state);
1949 if (ret)
1950 return ret;
1951
1952 ret = vmw_kms_check_implicit(dev, state);
1953 if (ret) {
1954 VMW_DEBUG_KMS("Invalid implicit state\n");
1955 return ret;
1956 }
1957
1958 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1959 if (drm_atomic_crtc_needs_modeset(crtc_state))
1960 need_modeset = true;
1961 }
1962
1963 if (need_modeset)
1964 return vmw_kms_check_topology(dev, state);
1965
1966 return ret;
1967 }
1968
1969 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1970 .fb_create = vmw_kms_fb_create,
1971 .atomic_check = vmw_kms_atomic_check_modeset,
1972 .atomic_commit = drm_atomic_helper_commit,
1973 };
1974
1975 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1976 struct drm_file *file_priv,
1977 struct vmw_framebuffer *vfb,
1978 struct vmw_surface *surface,
1979 uint32_t sid,
1980 int32_t destX, int32_t destY,
1981 struct drm_vmw_rect *clips,
1982 uint32_t num_clips)
1983 {
1984 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1985 &surface->res, destX, destY,
1986 num_clips, 1, NULL, NULL);
1987 }
1988
1989
1990 int vmw_kms_present(struct vmw_private *dev_priv,
1991 struct drm_file *file_priv,
1992 struct vmw_framebuffer *vfb,
1993 struct vmw_surface *surface,
1994 uint32_t sid,
1995 int32_t destX, int32_t destY,
1996 struct drm_vmw_rect *clips,
1997 uint32_t num_clips)
1998 {
1999 int ret;
2000
2001 switch (dev_priv->active_display_unit) {
2002 case vmw_du_screen_target:
2003 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2004 &surface->res, destX, destY,
2005 num_clips, 1, NULL, NULL);
2006 break;
2007 case vmw_du_screen_object:
2008 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2009 sid, destX, destY, clips,
2010 num_clips);
2011 break;
2012 default:
2013 WARN_ONCE(true,
2014 "Present called with invalid display system.\n");
2015 ret = -ENOSYS;
2016 break;
2017 }
2018 if (ret)
2019 return ret;
2020
2021 vmw_cmd_flush(dev_priv, false);
2022
2023 return 0;
2024 }
2025
2026 static void
2027 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2028 {
2029 if (dev_priv->hotplug_mode_update_property)
2030 return;
2031
2032 dev_priv->hotplug_mode_update_property =
2033 drm_property_create_range(&dev_priv->drm,
2034 DRM_MODE_PROP_IMMUTABLE,
2035 "hotplug_mode_update", 0, 1);
2036 }
2037
2038 int vmw_kms_init(struct vmw_private *dev_priv)
2039 {
2040 struct drm_device *dev = &dev_priv->drm;
2041 int ret;
2042 static const char *display_unit_names[] = {
2043 "Invalid",
2044 "Legacy",
2045 "Screen Object",
2046 "Screen Target",
2047 "Invalid (max)"
2048 };
2049
2050 drm_mode_config_init(dev);
2051 dev->mode_config.funcs = &vmw_kms_funcs;
2052 dev->mode_config.min_width = 1;
2053 dev->mode_config.min_height = 1;
2054 dev->mode_config.max_width = dev_priv->texture_max_width;
2055 dev->mode_config.max_height = dev_priv->texture_max_height;
2056
2057 drm_mode_create_suggested_offset_properties(dev);
2058 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2059
2060 ret = vmw_kms_stdu_init_display(dev_priv);
2061 if (ret) {
2062 ret = vmw_kms_sou_init_display(dev_priv);
2063 if (ret)
2064 ret = vmw_kms_ldu_init_display(dev_priv);
2065 }
2066 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2067 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2068 display_unit_names[dev_priv->active_display_unit]);
2069
2070 return ret;
2071 }
2072
2073 int vmw_kms_close(struct vmw_private *dev_priv)
2074 {
2075 int ret = 0;
2076
2077
2078
2079
2080
2081
2082 drm_mode_config_cleanup(&dev_priv->drm);
2083 if (dev_priv->active_display_unit == vmw_du_legacy)
2084 ret = vmw_kms_ldu_close_display(dev_priv);
2085
2086 return ret;
2087 }
2088
2089 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2090 struct drm_file *file_priv)
2091 {
2092 struct drm_vmw_cursor_bypass_arg *arg = data;
2093 struct vmw_display_unit *du;
2094 struct drm_crtc *crtc;
2095 int ret = 0;
2096
2097
2098 mutex_lock(&dev->mode_config.mutex);
2099 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2100
2101 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2102 du = vmw_crtc_to_du(crtc);
2103 du->hotspot_x = arg->xhot;
2104 du->hotspot_y = arg->yhot;
2105 }
2106
2107 mutex_unlock(&dev->mode_config.mutex);
2108 return 0;
2109 }
2110
2111 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2112 if (!crtc) {
2113 ret = -ENOENT;
2114 goto out;
2115 }
2116
2117 du = vmw_crtc_to_du(crtc);
2118
2119 du->hotspot_x = arg->xhot;
2120 du->hotspot_y = arg->yhot;
2121
2122 out:
2123 mutex_unlock(&dev->mode_config.mutex);
2124
2125 return ret;
2126 }
2127
2128 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2129 unsigned width, unsigned height, unsigned pitch,
2130 unsigned bpp, unsigned depth)
2131 {
2132 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2133 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2134 else if (vmw_fifo_have_pitchlock(vmw_priv))
2135 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2136 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2137 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2138 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2139 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2140
2141 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2142 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2143 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2144 return -EINVAL;
2145 }
2146
2147 return 0;
2148 }
2149
2150 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2151 uint32_t pitch,
2152 uint32_t height)
2153 {
2154 return ((u64) pitch * (u64) height) < (u64)
2155 ((dev_priv->active_display_unit == vmw_du_screen_target) ?
2156 dev_priv->max_primary_mem : dev_priv->vram_size);
2157 }
2158
2159
2160
2161
2162
2163 u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
2164 {
2165 return 0;
2166 }
2167
2168
2169
2170
2171 int vmw_enable_vblank(struct drm_crtc *crtc)
2172 {
2173 return -EINVAL;
2174 }
2175
2176
2177
2178
2179 void vmw_disable_vblank(struct drm_crtc *crtc)
2180 {
2181 }
2182
2183
2184
2185
2186
2187
2188
2189
2190 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2191 unsigned int num_rects, struct drm_rect *rects)
2192 {
2193 struct drm_device *dev = &dev_priv->drm;
2194 struct vmw_display_unit *du;
2195 struct drm_connector *con;
2196 struct drm_connector_list_iter conn_iter;
2197 struct drm_modeset_acquire_ctx ctx;
2198 struct drm_crtc *crtc;
2199 int ret;
2200
2201
2202 mutex_lock(&dev->mode_config.mutex);
2203 drm_modeset_acquire_init(&ctx, 0);
2204 retry:
2205 drm_for_each_crtc(crtc, dev) {
2206 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2207 if (ret < 0) {
2208 if (ret == -EDEADLK) {
2209 drm_modeset_backoff(&ctx);
2210 goto retry;
2211 }
2212 goto out_fini;
2213 }
2214 }
2215
2216 drm_connector_list_iter_begin(dev, &conn_iter);
2217 drm_for_each_connector_iter(con, &conn_iter) {
2218 du = vmw_connector_to_du(con);
2219 if (num_rects > du->unit) {
2220 du->pref_width = drm_rect_width(&rects[du->unit]);
2221 du->pref_height = drm_rect_height(&rects[du->unit]);
2222 du->pref_active = true;
2223 du->gui_x = rects[du->unit].x1;
2224 du->gui_y = rects[du->unit].y1;
2225 } else {
2226 du->pref_width = 800;
2227 du->pref_height = 600;
2228 du->pref_active = false;
2229 du->gui_x = 0;
2230 du->gui_y = 0;
2231 }
2232 }
2233 drm_connector_list_iter_end(&conn_iter);
2234
2235 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2236 du = vmw_connector_to_du(con);
2237 if (num_rects > du->unit) {
2238 drm_object_property_set_value
2239 (&con->base, dev->mode_config.suggested_x_property,
2240 du->gui_x);
2241 drm_object_property_set_value
2242 (&con->base, dev->mode_config.suggested_y_property,
2243 du->gui_y);
2244 } else {
2245 drm_object_property_set_value
2246 (&con->base, dev->mode_config.suggested_x_property,
2247 0);
2248 drm_object_property_set_value
2249 (&con->base, dev->mode_config.suggested_y_property,
2250 0);
2251 }
2252 con->status = vmw_du_connector_detect(con, true);
2253 }
2254
2255 drm_sysfs_hotplug_event(dev);
2256 out_fini:
2257 drm_modeset_drop_locks(&ctx);
2258 drm_modeset_acquire_fini(&ctx);
2259 mutex_unlock(&dev->mode_config.mutex);
2260
2261 return 0;
2262 }
2263
2264 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2265 u16 *r, u16 *g, u16 *b,
2266 uint32_t size,
2267 struct drm_modeset_acquire_ctx *ctx)
2268 {
2269 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2270 int i;
2271
2272 for (i = 0; i < size; i++) {
2273 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2274 r[i], g[i], b[i]);
2275 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2276 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2277 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2278 }
2279
2280 return 0;
2281 }
2282
2283 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2284 {
2285 return 0;
2286 }
2287
2288 enum drm_connector_status
2289 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2290 {
2291 uint32_t num_displays;
2292 struct drm_device *dev = connector->dev;
2293 struct vmw_private *dev_priv = vmw_priv(dev);
2294 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2295
2296 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2297
2298 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2299 du->pref_active) ?
2300 connector_status_connected : connector_status_disconnected);
2301 }
2302
2303 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2304
2305 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2306 752, 800, 0, 480, 489, 492, 525, 0,
2307 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2308
2309 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2310 968, 1056, 0, 600, 601, 605, 628, 0,
2311 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2312
2313 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2314 1184, 1344, 0, 768, 771, 777, 806, 0,
2315 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2316
2317 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2318 1344, 1600, 0, 864, 865, 868, 900, 0,
2319 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2320
2321 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2322 1472, 1664, 0, 720, 723, 728, 748, 0,
2323 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2324
2325 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2326 1472, 1664, 0, 768, 771, 778, 798, 0,
2327 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2328
2329 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2330 1480, 1680, 0, 800, 803, 809, 831, 0,
2331 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2332
2333 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2334 1488, 1800, 0, 960, 961, 964, 1000, 0,
2335 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2336
2337 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2338 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2339 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2340
2341 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2342 1536, 1792, 0, 768, 771, 777, 795, 0,
2343 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2344
2345 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2346 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2347 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2348
2349 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2350 1672, 1904, 0, 900, 903, 909, 934, 0,
2351 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2352
2353 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2354 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2356
2357 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2358 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2359 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2360
2361 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2362 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2363 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2364
2365 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2366 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2367 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2368
2369 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2370 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2371 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2372
2373 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2374 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2375 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2376
2377 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2378 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2379 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2380
2381 { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2382 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2383 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2384
2385 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2386 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2387 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2388
2389 { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2390 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2391 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2392
2393 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2394 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2395 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2396
2397 { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2398 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2399 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2400
2401 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2402 };
2403
2404
2405
2406
2407
2408
2409
2410
2411 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2412 {
2413 mode->hsync_start = mode->hdisplay + 50;
2414 mode->hsync_end = mode->hsync_start + 50;
2415 mode->htotal = mode->hsync_end + 50;
2416
2417 mode->vsync_start = mode->vdisplay + 50;
2418 mode->vsync_end = mode->vsync_start + 50;
2419 mode->vtotal = mode->vsync_end + 50;
2420
2421 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2422 }
2423
2424
2425 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2426 uint32_t max_width, uint32_t max_height)
2427 {
2428 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2429 struct drm_device *dev = connector->dev;
2430 struct vmw_private *dev_priv = vmw_priv(dev);
2431 struct drm_display_mode *mode = NULL;
2432 struct drm_display_mode *bmode;
2433 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2434 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2436 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2437 };
2438 int i;
2439 u32 assumed_bpp = 4;
2440
2441 if (dev_priv->assume_16bpp)
2442 assumed_bpp = 2;
2443
2444 max_width = min(max_width, dev_priv->texture_max_width);
2445 max_height = min(max_height, dev_priv->texture_max_height);
2446
2447
2448
2449
2450
2451 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2452 max_width = min(max_width, dev_priv->stdu_max_width);
2453 max_height = min(max_height, dev_priv->stdu_max_height);
2454 }
2455
2456
2457 mode = drm_mode_duplicate(dev, &prefmode);
2458 if (!mode)
2459 return 0;
2460 mode->hdisplay = du->pref_width;
2461 mode->vdisplay = du->pref_height;
2462 vmw_guess_mode_timing(mode);
2463 drm_mode_set_name(mode);
2464
2465 if (vmw_kms_validate_mode_vram(dev_priv,
2466 mode->hdisplay * assumed_bpp,
2467 mode->vdisplay)) {
2468 drm_mode_probed_add(connector, mode);
2469 } else {
2470 drm_mode_destroy(dev, mode);
2471 mode = NULL;
2472 }
2473
2474 if (du->pref_mode) {
2475 list_del_init(&du->pref_mode->head);
2476 drm_mode_destroy(dev, du->pref_mode);
2477 }
2478
2479
2480 du->pref_mode = mode;
2481
2482 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2483 bmode = &vmw_kms_connector_builtin[i];
2484 if (bmode->hdisplay > max_width ||
2485 bmode->vdisplay > max_height)
2486 continue;
2487
2488 if (!vmw_kms_validate_mode_vram(dev_priv,
2489 bmode->hdisplay * assumed_bpp,
2490 bmode->vdisplay))
2491 continue;
2492
2493 mode = drm_mode_duplicate(dev, bmode);
2494 if (!mode)
2495 return 0;
2496
2497 drm_mode_probed_add(connector, mode);
2498 }
2499
2500 drm_connector_list_update(connector);
2501
2502 drm_mode_sort(&connector->modes);
2503
2504 return 1;
2505 }
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2527 struct drm_file *file_priv)
2528 {
2529 struct vmw_private *dev_priv = vmw_priv(dev);
2530 struct drm_mode_config *mode_config = &dev->mode_config;
2531 struct drm_vmw_update_layout_arg *arg =
2532 (struct drm_vmw_update_layout_arg *)data;
2533 void __user *user_rects;
2534 struct drm_vmw_rect *rects;
2535 struct drm_rect *drm_rects;
2536 unsigned rects_size;
2537 int ret, i;
2538
2539 if (!arg->num_outputs) {
2540 struct drm_rect def_rect = {0, 0, 800, 600};
2541 VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2542 def_rect.x1, def_rect.y1,
2543 def_rect.x2, def_rect.y2);
2544 vmw_du_update_layout(dev_priv, 1, &def_rect);
2545 return 0;
2546 }
2547
2548 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2549 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2550 GFP_KERNEL);
2551 if (unlikely(!rects))
2552 return -ENOMEM;
2553
2554 user_rects = (void __user *)(unsigned long)arg->rects;
2555 ret = copy_from_user(rects, user_rects, rects_size);
2556 if (unlikely(ret != 0)) {
2557 DRM_ERROR("Failed to get rects.\n");
2558 ret = -EFAULT;
2559 goto out_free;
2560 }
2561
2562 drm_rects = (struct drm_rect *)rects;
2563
2564 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2565 for (i = 0; i < arg->num_outputs; i++) {
2566 struct drm_vmw_rect curr_rect;
2567
2568
2569 if ((rects[i].x + rects[i].w > INT_MAX) ||
2570 (rects[i].y + rects[i].h > INT_MAX)) {
2571 ret = -ERANGE;
2572 goto out_free;
2573 }
2574
2575 curr_rect = rects[i];
2576 drm_rects[i].x1 = curr_rect.x;
2577 drm_rects[i].y1 = curr_rect.y;
2578 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2579 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2580
2581 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2582 drm_rects[i].x1, drm_rects[i].y1,
2583 drm_rects[i].x2, drm_rects[i].y2);
2584
2585
2586
2587
2588
2589
2590
2591
2592 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2593 drm_rects[i].x2 > mode_config->max_width ||
2594 drm_rects[i].y2 > mode_config->max_height) {
2595 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2596 drm_rects[i].x1, drm_rects[i].y1,
2597 drm_rects[i].x2, drm_rects[i].y2);
2598 ret = -EINVAL;
2599 goto out_free;
2600 }
2601 }
2602
2603 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2604
2605 if (ret == 0)
2606 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2607
2608 out_free:
2609 kfree(rects);
2610 return ret;
2611 }
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2631 struct vmw_framebuffer *framebuffer,
2632 const struct drm_clip_rect *clips,
2633 const struct drm_vmw_rect *vclips,
2634 s32 dest_x, s32 dest_y,
2635 int num_clips,
2636 int increment,
2637 struct vmw_kms_dirty *dirty)
2638 {
2639 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2640 struct drm_crtc *crtc;
2641 u32 num_units = 0;
2642 u32 i, k;
2643
2644 dirty->dev_priv = dev_priv;
2645
2646
2647 if (dirty->crtc) {
2648 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2649 } else {
2650 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2651 head) {
2652 struct drm_plane *plane = crtc->primary;
2653
2654 if (plane->state->fb == &framebuffer->base)
2655 units[num_units++] = vmw_crtc_to_du(crtc);
2656 }
2657 }
2658
2659 for (k = 0; k < num_units; k++) {
2660 struct vmw_display_unit *unit = units[k];
2661 s32 crtc_x = unit->crtc.x;
2662 s32 crtc_y = unit->crtc.y;
2663 s32 crtc_width = unit->crtc.mode.hdisplay;
2664 s32 crtc_height = unit->crtc.mode.vdisplay;
2665 const struct drm_clip_rect *clips_ptr = clips;
2666 const struct drm_vmw_rect *vclips_ptr = vclips;
2667
2668 dirty->unit = unit;
2669 if (dirty->fifo_reserve_size > 0) {
2670 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2671 dirty->fifo_reserve_size);
2672 if (!dirty->cmd)
2673 return -ENOMEM;
2674
2675 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2676 }
2677 dirty->num_hits = 0;
2678 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2679 vclips_ptr += increment) {
2680 s32 clip_left;
2681 s32 clip_top;
2682
2683
2684
2685
2686
2687
2688 if (clips) {
2689 dirty->fb_x = (s32) clips_ptr->x1;
2690 dirty->fb_y = (s32) clips_ptr->y1;
2691 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2692 crtc_x;
2693 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2694 crtc_y;
2695 } else {
2696 dirty->fb_x = vclips_ptr->x;
2697 dirty->fb_y = vclips_ptr->y;
2698 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2699 dest_x - crtc_x;
2700 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2701 dest_y - crtc_y;
2702 }
2703
2704 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2705 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2706
2707
2708 if (dirty->unit_x1 >= crtc_width ||
2709 dirty->unit_y1 >= crtc_height ||
2710 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2711 continue;
2712
2713
2714 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2715 crtc_width);
2716 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2717 crtc_height);
2718
2719
2720 clip_left = min_t(s32, dirty->unit_x1, 0);
2721 clip_top = min_t(s32, dirty->unit_y1, 0);
2722 dirty->unit_x1 -= clip_left;
2723 dirty->unit_y1 -= clip_top;
2724 dirty->fb_x -= clip_left;
2725 dirty->fb_y -= clip_top;
2726
2727 dirty->clip(dirty);
2728 }
2729
2730 dirty->fifo_commit(dirty);
2731 }
2732
2733 return 0;
2734 }
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2747 struct drm_file *file_priv,
2748 struct vmw_validation_context *ctx,
2749 struct vmw_fence_obj **out_fence,
2750 struct drm_vmw_fence_rep __user *
2751 user_fence_rep)
2752 {
2753 struct vmw_fence_obj *fence = NULL;
2754 uint32_t handle = 0;
2755 int ret = 0;
2756
2757 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2758 out_fence)
2759 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2760 file_priv ? &handle : NULL);
2761 vmw_validation_done(ctx, fence);
2762 if (file_priv)
2763 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2764 ret, user_fence_rep, fence,
2765 handle, -1);
2766 if (out_fence)
2767 *out_fence = fence;
2768 else
2769 vmw_fence_obj_unreference(&fence);
2770 }
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786 int vmw_kms_update_proxy(struct vmw_resource *res,
2787 const struct drm_clip_rect *clips,
2788 unsigned num_clips,
2789 int increment)
2790 {
2791 struct vmw_private *dev_priv = res->dev_priv;
2792 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2793 struct {
2794 SVGA3dCmdHeader header;
2795 SVGA3dCmdUpdateGBImage body;
2796 } *cmd;
2797 SVGA3dBox *box;
2798 size_t copy_size = 0;
2799 int i;
2800
2801 if (!clips)
2802 return 0;
2803
2804 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2805 if (!cmd)
2806 return -ENOMEM;
2807
2808 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2809 box = &cmd->body.box;
2810
2811 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2812 cmd->header.size = sizeof(cmd->body);
2813 cmd->body.image.sid = res->id;
2814 cmd->body.image.face = 0;
2815 cmd->body.image.mipmap = 0;
2816
2817 if (clips->x1 > size->width || clips->x2 > size->width ||
2818 clips->y1 > size->height || clips->y2 > size->height) {
2819 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2820 return -EINVAL;
2821 }
2822
2823 box->x = clips->x1;
2824 box->y = clips->y1;
2825 box->z = 0;
2826 box->w = clips->x2 - clips->x1;
2827 box->h = clips->y2 - clips->y1;
2828 box->d = 1;
2829
2830 copy_size += sizeof(*cmd);
2831 }
2832
2833 vmw_cmd_commit(dev_priv, copy_size);
2834
2835 return 0;
2836 }
2837
2838 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2839 unsigned unit,
2840 u32 max_width,
2841 u32 max_height,
2842 struct drm_connector **p_con,
2843 struct drm_crtc **p_crtc,
2844 struct drm_display_mode **p_mode)
2845 {
2846 struct drm_connector *con;
2847 struct vmw_display_unit *du;
2848 struct drm_display_mode *mode;
2849 int i = 0;
2850 int ret = 0;
2851
2852 mutex_lock(&dev_priv->drm.mode_config.mutex);
2853 list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
2854 head) {
2855 if (i == unit)
2856 break;
2857
2858 ++i;
2859 }
2860
2861 if (&con->head == &dev_priv->drm.mode_config.connector_list) {
2862 DRM_ERROR("Could not find initial display unit.\n");
2863 ret = -EINVAL;
2864 goto out_unlock;
2865 }
2866
2867 if (list_empty(&con->modes))
2868 (void) vmw_du_connector_fill_modes(con, max_width, max_height);
2869
2870 if (list_empty(&con->modes)) {
2871 DRM_ERROR("Could not find initial display mode.\n");
2872 ret = -EINVAL;
2873 goto out_unlock;
2874 }
2875
2876 du = vmw_connector_to_du(con);
2877 *p_con = con;
2878 *p_crtc = &du->crtc;
2879
2880 list_for_each_entry(mode, &con->modes, head) {
2881 if (mode->type & DRM_MODE_TYPE_PREFERRED)
2882 break;
2883 }
2884
2885 if (&mode->head == &con->modes) {
2886 WARN_ONCE(true, "Could not find initial preferred mode.\n");
2887 *p_mode = list_first_entry(&con->modes,
2888 struct drm_display_mode,
2889 head);
2890 } else {
2891 *p_mode = mode;
2892 }
2893
2894 out_unlock:
2895 mutex_unlock(&dev_priv->drm.mode_config.mutex);
2896
2897 return ret;
2898 }
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908 void
2909 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2910 {
2911 if (dev_priv->implicit_placement_property)
2912 return;
2913
2914 dev_priv->implicit_placement_property =
2915 drm_property_create_range(&dev_priv->drm,
2916 DRM_MODE_PROP_IMMUTABLE,
2917 "implicit_placement", 0, 1);
2918 }
2919
2920
2921
2922
2923
2924
2925
2926 int vmw_kms_suspend(struct drm_device *dev)
2927 {
2928 struct vmw_private *dev_priv = vmw_priv(dev);
2929
2930 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2931 if (IS_ERR(dev_priv->suspend_state)) {
2932 int ret = PTR_ERR(dev_priv->suspend_state);
2933
2934 DRM_ERROR("Failed kms suspend: %d\n", ret);
2935 dev_priv->suspend_state = NULL;
2936
2937 return ret;
2938 }
2939
2940 return 0;
2941 }
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953 int vmw_kms_resume(struct drm_device *dev)
2954 {
2955 struct vmw_private *dev_priv = vmw_priv(dev);
2956 int ret;
2957
2958 if (WARN_ON(!dev_priv->suspend_state))
2959 return 0;
2960
2961 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2962 dev_priv->suspend_state = NULL;
2963
2964 return ret;
2965 }
2966
2967
2968
2969
2970
2971
2972 void vmw_kms_lost_device(struct drm_device *dev)
2973 {
2974 drm_atomic_helper_shutdown(dev);
2975 }
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2987 {
2988 struct drm_plane_state *state = update->plane->state;
2989 struct drm_plane_state *old_state = update->old_state;
2990 struct drm_atomic_helper_damage_iter iter;
2991 struct drm_rect clip;
2992 struct drm_rect bb;
2993 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2994 uint32_t reserved_size = 0;
2995 uint32_t submit_size = 0;
2996 uint32_t curr_size = 0;
2997 uint32_t num_hits = 0;
2998 void *cmd_start;
2999 char *cmd_next;
3000 int ret;
3001
3002
3003
3004
3005
3006 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3007 drm_atomic_for_each_plane_damage(&iter, &clip)
3008 num_hits++;
3009
3010 if (num_hits == 0)
3011 return 0;
3012
3013 if (update->vfb->bo) {
3014 struct vmw_framebuffer_bo *vfbbo =
3015 container_of(update->vfb, typeof(*vfbbo), base);
3016
3017 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
3018 update->cpu_blit);
3019 } else {
3020 struct vmw_framebuffer_surface *vfbs =
3021 container_of(update->vfb, typeof(*vfbs), base);
3022
3023 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
3024 0, VMW_RES_DIRTY_NONE, NULL,
3025 NULL);
3026 }
3027
3028 if (ret)
3029 return ret;
3030
3031 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
3032 if (ret)
3033 goto out_unref;
3034
3035 reserved_size = update->calc_fifo_size(update, num_hits);
3036 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
3037 if (!cmd_start) {
3038 ret = -ENOMEM;
3039 goto out_revert;
3040 }
3041
3042 cmd_next = cmd_start;
3043
3044 if (update->post_prepare) {
3045 curr_size = update->post_prepare(update, cmd_next);
3046 cmd_next += curr_size;
3047 submit_size += curr_size;
3048 }
3049
3050 if (update->pre_clip) {
3051 curr_size = update->pre_clip(update, cmd_next, num_hits);
3052 cmd_next += curr_size;
3053 submit_size += curr_size;
3054 }
3055
3056 bb.x1 = INT_MAX;
3057 bb.y1 = INT_MAX;
3058 bb.x2 = INT_MIN;
3059 bb.y2 = INT_MIN;
3060
3061 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3062 drm_atomic_for_each_plane_damage(&iter, &clip) {
3063 uint32_t fb_x = clip.x1;
3064 uint32_t fb_y = clip.y1;
3065
3066 vmw_du_translate_to_crtc(state, &clip);
3067 if (update->clip) {
3068 curr_size = update->clip(update, cmd_next, &clip, fb_x,
3069 fb_y);
3070 cmd_next += curr_size;
3071 submit_size += curr_size;
3072 }
3073 bb.x1 = min_t(int, bb.x1, clip.x1);
3074 bb.y1 = min_t(int, bb.y1, clip.y1);
3075 bb.x2 = max_t(int, bb.x2, clip.x2);
3076 bb.y2 = max_t(int, bb.y2, clip.y2);
3077 }
3078
3079 curr_size = update->post_clip(update, cmd_next, &bb);
3080 submit_size += curr_size;
3081
3082 if (reserved_size < submit_size)
3083 submit_size = 0;
3084
3085 vmw_cmd_commit(update->dev_priv, submit_size);
3086
3087 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3088 update->out_fence, NULL);
3089 return ret;
3090
3091 out_revert:
3092 vmw_validation_revert(&val_ctx);
3093
3094 out_unref:
3095 vmw_validation_unref_lists(&val_ctx);
3096 return ret;
3097 }