0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #include <drm/drm_atomic_helper.h>
0027 #include <drm/drm_damage_helper.h>
0028 #include <drm/drm_fourcc.h>
0029 #include <drm/drm_plane_helper.h>
0030
0031 #include "virtgpu_drv.h"
0032
0033 static const uint32_t virtio_gpu_formats[] = {
0034 DRM_FORMAT_HOST_XRGB8888,
0035 };
0036
0037 static const uint32_t virtio_gpu_cursor_formats[] = {
0038 DRM_FORMAT_HOST_ARGB8888,
0039 };
0040
0041 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
0042 {
0043 uint32_t format;
0044
0045 switch (drm_fourcc) {
0046 case DRM_FORMAT_XRGB8888:
0047 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
0048 break;
0049 case DRM_FORMAT_ARGB8888:
0050 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
0051 break;
0052 case DRM_FORMAT_BGRX8888:
0053 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
0054 break;
0055 case DRM_FORMAT_BGRA8888:
0056 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
0057 break;
0058 default:
0059
0060
0061
0062
0063 format = 0;
0064 break;
0065 }
0066 WARN_ON(format == 0);
0067 return format;
0068 }
0069
0070 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
0071 {
0072 drm_plane_cleanup(plane);
0073 kfree(plane);
0074 }
0075
0076 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
0077 .update_plane = drm_atomic_helper_update_plane,
0078 .disable_plane = drm_atomic_helper_disable_plane,
0079 .destroy = virtio_gpu_plane_destroy,
0080 .reset = drm_atomic_helper_plane_reset,
0081 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
0082 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
0083 };
0084
0085 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
0086 struct drm_atomic_state *state)
0087 {
0088 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
0089 plane);
0090 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
0091 struct drm_crtc_state *crtc_state;
0092 int ret;
0093
0094 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
0095 return 0;
0096
0097 crtc_state = drm_atomic_get_crtc_state(state,
0098 new_plane_state->crtc);
0099 if (IS_ERR(crtc_state))
0100 return PTR_ERR(crtc_state);
0101
0102 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0103 DRM_PLANE_HELPER_NO_SCALING,
0104 DRM_PLANE_HELPER_NO_SCALING,
0105 is_cursor, true);
0106 return ret;
0107 }
0108
0109 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
0110 struct drm_plane_state *state,
0111 struct drm_rect *rect)
0112 {
0113 struct virtio_gpu_object *bo =
0114 gem_to_virtio_gpu_obj(state->fb->obj[0]);
0115 struct virtio_gpu_object_array *objs;
0116 uint32_t w = rect->x2 - rect->x1;
0117 uint32_t h = rect->y2 - rect->y1;
0118 uint32_t x = rect->x1;
0119 uint32_t y = rect->y1;
0120 uint32_t off = x * state->fb->format->cpp[0] +
0121 y * state->fb->pitches[0];
0122
0123 objs = virtio_gpu_array_alloc(1);
0124 if (!objs)
0125 return;
0126 virtio_gpu_array_add_obj(objs, &bo->base.base);
0127
0128 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
0129 objs, NULL);
0130 }
0131
0132 static void virtio_gpu_resource_flush(struct drm_plane *plane,
0133 uint32_t x, uint32_t y,
0134 uint32_t width, uint32_t height)
0135 {
0136 struct drm_device *dev = plane->dev;
0137 struct virtio_gpu_device *vgdev = dev->dev_private;
0138 struct virtio_gpu_framebuffer *vgfb;
0139 struct virtio_gpu_object *bo;
0140
0141 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
0142 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
0143 if (vgfb->fence) {
0144 struct virtio_gpu_object_array *objs;
0145
0146 objs = virtio_gpu_array_alloc(1);
0147 if (!objs)
0148 return;
0149 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
0150 virtio_gpu_array_lock_resv(objs);
0151 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
0152 width, height, objs, vgfb->fence);
0153 virtio_gpu_notify(vgdev);
0154
0155 dma_fence_wait_timeout(&vgfb->fence->f, true,
0156 msecs_to_jiffies(50));
0157 dma_fence_put(&vgfb->fence->f);
0158 vgfb->fence = NULL;
0159 } else {
0160 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y,
0161 width, height, NULL, NULL);
0162 virtio_gpu_notify(vgdev);
0163 }
0164 }
0165
0166 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
0167 struct drm_atomic_state *state)
0168 {
0169 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
0170 plane);
0171 struct drm_device *dev = plane->dev;
0172 struct virtio_gpu_device *vgdev = dev->dev_private;
0173 struct virtio_gpu_output *output = NULL;
0174 struct virtio_gpu_object *bo;
0175 struct drm_rect rect;
0176
0177 if (plane->state->crtc)
0178 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
0179 if (old_state->crtc)
0180 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
0181 if (WARN_ON(!output))
0182 return;
0183
0184 if (!plane->state->fb || !output->crtc.state->active) {
0185 DRM_DEBUG("nofb\n");
0186 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
0187 plane->state->src_w >> 16,
0188 plane->state->src_h >> 16,
0189 0, 0);
0190 virtio_gpu_notify(vgdev);
0191 return;
0192 }
0193
0194 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
0195 return;
0196
0197 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
0198 if (bo->dumb)
0199 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
0200
0201 if (plane->state->fb != old_state->fb ||
0202 plane->state->src_w != old_state->src_w ||
0203 plane->state->src_h != old_state->src_h ||
0204 plane->state->src_x != old_state->src_x ||
0205 plane->state->src_y != old_state->src_y ||
0206 output->needs_modeset) {
0207 output->needs_modeset = false;
0208 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
0209 bo->hw_res_handle,
0210 plane->state->crtc_w, plane->state->crtc_h,
0211 plane->state->crtc_x, plane->state->crtc_y,
0212 plane->state->src_w >> 16,
0213 plane->state->src_h >> 16,
0214 plane->state->src_x >> 16,
0215 plane->state->src_y >> 16);
0216
0217 if (bo->host3d_blob || bo->guest_blob) {
0218 virtio_gpu_cmd_set_scanout_blob
0219 (vgdev, output->index, bo,
0220 plane->state->fb,
0221 plane->state->src_w >> 16,
0222 plane->state->src_h >> 16,
0223 plane->state->src_x >> 16,
0224 plane->state->src_y >> 16);
0225 } else {
0226 virtio_gpu_cmd_set_scanout(vgdev, output->index,
0227 bo->hw_res_handle,
0228 plane->state->src_w >> 16,
0229 plane->state->src_h >> 16,
0230 plane->state->src_x >> 16,
0231 plane->state->src_y >> 16);
0232 }
0233 }
0234
0235 virtio_gpu_resource_flush(plane,
0236 rect.x1,
0237 rect.y1,
0238 rect.x2 - rect.x1,
0239 rect.y2 - rect.y1);
0240 }
0241
0242 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
0243 struct drm_plane_state *new_state)
0244 {
0245 struct drm_device *dev = plane->dev;
0246 struct virtio_gpu_device *vgdev = dev->dev_private;
0247 struct virtio_gpu_framebuffer *vgfb;
0248 struct virtio_gpu_object *bo;
0249
0250 if (!new_state->fb)
0251 return 0;
0252
0253 vgfb = to_virtio_gpu_framebuffer(new_state->fb);
0254 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
0255 if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
0256 return 0;
0257
0258 if (bo->dumb && (plane->state->fb != new_state->fb)) {
0259 vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
0260 0);
0261 if (!vgfb->fence)
0262 return -ENOMEM;
0263 }
0264
0265 return 0;
0266 }
0267
0268 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
0269 struct drm_plane_state *old_state)
0270 {
0271 struct virtio_gpu_framebuffer *vgfb;
0272
0273 if (!plane->state->fb)
0274 return;
0275
0276 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
0277 if (vgfb->fence) {
0278 dma_fence_put(&vgfb->fence->f);
0279 vgfb->fence = NULL;
0280 }
0281 }
0282
0283 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
0284 struct drm_atomic_state *state)
0285 {
0286 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
0287 plane);
0288 struct drm_device *dev = plane->dev;
0289 struct virtio_gpu_device *vgdev = dev->dev_private;
0290 struct virtio_gpu_output *output = NULL;
0291 struct virtio_gpu_framebuffer *vgfb;
0292 struct virtio_gpu_object *bo = NULL;
0293 uint32_t handle;
0294
0295 if (plane->state->crtc)
0296 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
0297 if (old_state->crtc)
0298 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
0299 if (WARN_ON(!output))
0300 return;
0301
0302 if (plane->state->fb) {
0303 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
0304 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
0305 handle = bo->hw_res_handle;
0306 } else {
0307 handle = 0;
0308 }
0309
0310 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
0311
0312 struct virtio_gpu_object_array *objs;
0313
0314 objs = virtio_gpu_array_alloc(1);
0315 if (!objs)
0316 return;
0317 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
0318 virtio_gpu_array_lock_resv(objs);
0319 virtio_gpu_cmd_transfer_to_host_2d
0320 (vgdev, 0,
0321 plane->state->crtc_w,
0322 plane->state->crtc_h,
0323 0, 0, objs, vgfb->fence);
0324 virtio_gpu_notify(vgdev);
0325 dma_fence_wait(&vgfb->fence->f, true);
0326 dma_fence_put(&vgfb->fence->f);
0327 vgfb->fence = NULL;
0328 }
0329
0330 if (plane->state->fb != old_state->fb) {
0331 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
0332 plane->state->crtc_x,
0333 plane->state->crtc_y,
0334 plane->state->fb ? plane->state->fb->hot_x : 0,
0335 plane->state->fb ? plane->state->fb->hot_y : 0);
0336 output->cursor.hdr.type =
0337 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
0338 output->cursor.resource_id = cpu_to_le32(handle);
0339 if (plane->state->fb) {
0340 output->cursor.hot_x =
0341 cpu_to_le32(plane->state->fb->hot_x);
0342 output->cursor.hot_y =
0343 cpu_to_le32(plane->state->fb->hot_y);
0344 } else {
0345 output->cursor.hot_x = cpu_to_le32(0);
0346 output->cursor.hot_y = cpu_to_le32(0);
0347 }
0348 } else {
0349 DRM_DEBUG("move +%d+%d\n",
0350 plane->state->crtc_x,
0351 plane->state->crtc_y);
0352 output->cursor.hdr.type =
0353 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
0354 }
0355 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
0356 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
0357 virtio_gpu_cursor_ping(vgdev, output);
0358 }
0359
0360 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
0361 .prepare_fb = virtio_gpu_plane_prepare_fb,
0362 .cleanup_fb = virtio_gpu_plane_cleanup_fb,
0363 .atomic_check = virtio_gpu_plane_atomic_check,
0364 .atomic_update = virtio_gpu_primary_plane_update,
0365 };
0366
0367 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
0368 .prepare_fb = virtio_gpu_plane_prepare_fb,
0369 .cleanup_fb = virtio_gpu_plane_cleanup_fb,
0370 .atomic_check = virtio_gpu_plane_atomic_check,
0371 .atomic_update = virtio_gpu_cursor_plane_update,
0372 };
0373
0374 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
0375 enum drm_plane_type type,
0376 int index)
0377 {
0378 struct drm_device *dev = vgdev->ddev;
0379 const struct drm_plane_helper_funcs *funcs;
0380 struct drm_plane *plane;
0381 const uint32_t *formats;
0382 int ret, nformats;
0383
0384 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
0385 if (!plane)
0386 return ERR_PTR(-ENOMEM);
0387
0388 if (type == DRM_PLANE_TYPE_CURSOR) {
0389 formats = virtio_gpu_cursor_formats;
0390 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
0391 funcs = &virtio_gpu_cursor_helper_funcs;
0392 } else {
0393 formats = virtio_gpu_formats;
0394 nformats = ARRAY_SIZE(virtio_gpu_formats);
0395 funcs = &virtio_gpu_primary_helper_funcs;
0396 }
0397 ret = drm_universal_plane_init(dev, plane, 1 << index,
0398 &virtio_gpu_plane_funcs,
0399 formats, nformats,
0400 NULL, type, NULL);
0401 if (ret)
0402 goto err_plane_init;
0403
0404 drm_plane_helper_add(plane, funcs);
0405 return plane;
0406
0407 err_plane_init:
0408 kfree(plane);
0409 return ERR_PTR(ret);
0410 }