Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 
0003 /*
0004  *  Xen para-virtual DRM device
0005  *
0006  * Copyright (C) 2016-2018 EPAM Systems Inc.
0007  *
0008  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
0009  */
0010 
0011 #include <drm/drm_atomic.h>
0012 #include <drm/drm_atomic_helper.h>
0013 #include <drm/drm_drv.h>
0014 #include <drm/drm_fourcc.h>
0015 #include <drm/drm_framebuffer.h>
0016 #include <drm/drm_gem.h>
0017 #include <drm/drm_gem_atomic_helper.h>
0018 #include <drm/drm_gem_framebuffer_helper.h>
0019 #include <drm/drm_probe_helper.h>
0020 #include <drm/drm_vblank.h>
0021 
0022 #include "xen_drm_front.h"
0023 #include "xen_drm_front_conn.h"
0024 #include "xen_drm_front_kms.h"
0025 
0026 /*
0027  * Timeout in ms to wait for frame done event from the backend:
0028  * must be a bit more than IO time-out
0029  */
0030 #define FRAME_DONE_TO_MS    (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
0031 
0032 static struct xen_drm_front_drm_pipeline *
0033 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
0034 {
0035     return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
0036 }
0037 
0038 static void fb_destroy(struct drm_framebuffer *fb)
0039 {
0040     struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
0041     int idx;
0042 
0043     if (drm_dev_enter(fb->dev, &idx)) {
0044         xen_drm_front_fb_detach(drm_info->front_info,
0045                     xen_drm_front_fb_to_cookie(fb));
0046         drm_dev_exit(idx);
0047     }
0048     drm_gem_fb_destroy(fb);
0049 }
0050 
0051 static const struct drm_framebuffer_funcs fb_funcs = {
0052     .destroy = fb_destroy,
0053 };
0054 
0055 static struct drm_framebuffer *
0056 fb_create(struct drm_device *dev, struct drm_file *filp,
0057       const struct drm_mode_fb_cmd2 *mode_cmd)
0058 {
0059     struct xen_drm_front_drm_info *drm_info = dev->dev_private;
0060     struct drm_framebuffer *fb;
0061     struct drm_gem_object *gem_obj;
0062     int ret;
0063 
0064     fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
0065     if (IS_ERR(fb))
0066         return fb;
0067 
0068     gem_obj = fb->obj[0];
0069 
0070     ret = xen_drm_front_fb_attach(drm_info->front_info,
0071                       xen_drm_front_dbuf_to_cookie(gem_obj),
0072                       xen_drm_front_fb_to_cookie(fb),
0073                       fb->width, fb->height,
0074                       fb->format->format);
0075     if (ret < 0) {
0076         DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
0077         goto fail;
0078     }
0079 
0080     return fb;
0081 
0082 fail:
0083     drm_gem_fb_destroy(fb);
0084     return ERR_PTR(ret);
0085 }
0086 
0087 static const struct drm_mode_config_funcs mode_config_funcs = {
0088     .fb_create = fb_create,
0089     .atomic_check = drm_atomic_helper_check,
0090     .atomic_commit = drm_atomic_helper_commit,
0091 };
0092 
0093 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
0094 {
0095     struct drm_crtc *crtc = &pipeline->pipe.crtc;
0096     struct drm_device *dev = crtc->dev;
0097     unsigned long flags;
0098 
0099     spin_lock_irqsave(&dev->event_lock, flags);
0100     if (pipeline->pending_event)
0101         drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
0102     pipeline->pending_event = NULL;
0103     spin_unlock_irqrestore(&dev->event_lock, flags);
0104 }
0105 
0106 static void display_enable(struct drm_simple_display_pipe *pipe,
0107                struct drm_crtc_state *crtc_state,
0108                struct drm_plane_state *plane_state)
0109 {
0110     struct xen_drm_front_drm_pipeline *pipeline =
0111             to_xen_drm_pipeline(pipe);
0112     struct drm_crtc *crtc = &pipe->crtc;
0113     struct drm_framebuffer *fb = plane_state->fb;
0114     int ret, idx;
0115 
0116     if (!drm_dev_enter(pipe->crtc.dev, &idx))
0117         return;
0118 
0119     ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
0120                      fb->width, fb->height,
0121                      fb->format->cpp[0] * 8,
0122                      xen_drm_front_fb_to_cookie(fb));
0123 
0124     if (ret) {
0125         DRM_ERROR("Failed to enable display: %d\n", ret);
0126         pipeline->conn_connected = false;
0127     }
0128 
0129     drm_dev_exit(idx);
0130 }
0131 
0132 static void display_disable(struct drm_simple_display_pipe *pipe)
0133 {
0134     struct xen_drm_front_drm_pipeline *pipeline =
0135             to_xen_drm_pipeline(pipe);
0136     int ret = 0, idx;
0137 
0138     if (drm_dev_enter(pipe->crtc.dev, &idx)) {
0139         ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
0140                          xen_drm_front_fb_to_cookie(NULL));
0141         drm_dev_exit(idx);
0142     }
0143     if (ret)
0144         DRM_ERROR("Failed to disable display: %d\n", ret);
0145 
0146     /* Make sure we can restart with enabled connector next time */
0147     pipeline->conn_connected = true;
0148 
0149     /* release stalled event if any */
0150     send_pending_event(pipeline);
0151 }
0152 
0153 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
0154                      u64 fb_cookie)
0155 {
0156     /*
0157      * This runs in interrupt context, e.g. under
0158      * drm_info->front_info->io_lock, so we cannot call _sync version
0159      * to cancel the work
0160      */
0161     cancel_delayed_work(&pipeline->pflip_to_worker);
0162 
0163     send_pending_event(pipeline);
0164 }
0165 
0166 static void pflip_to_worker(struct work_struct *work)
0167 {
0168     struct delayed_work *delayed_work = to_delayed_work(work);
0169     struct xen_drm_front_drm_pipeline *pipeline =
0170             container_of(delayed_work,
0171                      struct xen_drm_front_drm_pipeline,
0172                      pflip_to_worker);
0173 
0174     DRM_ERROR("Frame done timed-out, releasing");
0175     send_pending_event(pipeline);
0176 }
0177 
0178 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
0179                    struct drm_plane_state *old_plane_state)
0180 {
0181     struct drm_plane_state *plane_state =
0182             drm_atomic_get_new_plane_state(old_plane_state->state,
0183                                &pipe->plane);
0184 
0185     /*
0186      * If old_plane_state->fb is NULL and plane_state->fb is not,
0187      * then this is an atomic commit which will enable display.
0188      * If old_plane_state->fb is not NULL and plane_state->fb is,
0189      * then this is an atomic commit which will disable display.
0190      * Ignore these and do not send page flip as this framebuffer will be
0191      * sent to the backend as a part of display_set_config call.
0192      */
0193     if (old_plane_state->fb && plane_state->fb) {
0194         struct xen_drm_front_drm_pipeline *pipeline =
0195                 to_xen_drm_pipeline(pipe);
0196         struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
0197         int ret;
0198 
0199         schedule_delayed_work(&pipeline->pflip_to_worker,
0200                       msecs_to_jiffies(FRAME_DONE_TO_MS));
0201 
0202         ret = xen_drm_front_page_flip(drm_info->front_info,
0203                           pipeline->index,
0204                           xen_drm_front_fb_to_cookie(plane_state->fb));
0205         if (ret) {
0206             DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
0207 
0208             pipeline->conn_connected = false;
0209             /*
0210              * Report the flip not handled, so pending event is
0211              * sent, unblocking user-space.
0212              */
0213             return false;
0214         }
0215         /*
0216          * Signal that page flip was handled, pending event will be sent
0217          * on frame done event from the backend.
0218          */
0219         return true;
0220     }
0221 
0222     return false;
0223 }
0224 
0225 static int display_check(struct drm_simple_display_pipe *pipe,
0226              struct drm_plane_state *plane_state,
0227              struct drm_crtc_state *crtc_state)
0228 {
0229     /*
0230      * Xen doesn't initialize vblanking via drm_vblank_init(), so
0231      * DRM helpers assume that it doesn't handle vblanking and start
0232      * sending out fake VBLANK events automatically.
0233      *
0234      * As xen contains it's own logic for sending out VBLANK events
0235      * in send_pending_event(), disable no_vblank (i.e., the xen
0236      * driver has vblanking support).
0237      */
0238     crtc_state->no_vblank = false;
0239 
0240     return 0;
0241 }
0242 
0243 static void display_update(struct drm_simple_display_pipe *pipe,
0244                struct drm_plane_state *old_plane_state)
0245 {
0246     struct xen_drm_front_drm_pipeline *pipeline =
0247             to_xen_drm_pipeline(pipe);
0248     struct drm_crtc *crtc = &pipe->crtc;
0249     struct drm_pending_vblank_event *event;
0250     int idx;
0251 
0252     event = crtc->state->event;
0253     if (event) {
0254         struct drm_device *dev = crtc->dev;
0255         unsigned long flags;
0256 
0257         WARN_ON(pipeline->pending_event);
0258 
0259         spin_lock_irqsave(&dev->event_lock, flags);
0260         crtc->state->event = NULL;
0261 
0262         pipeline->pending_event = event;
0263         spin_unlock_irqrestore(&dev->event_lock, flags);
0264     }
0265 
0266     if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
0267         send_pending_event(pipeline);
0268         return;
0269     }
0270 
0271     /*
0272      * Send page flip request to the backend *after* we have event cached
0273      * above, so on page flip done event from the backend we can
0274      * deliver it and there is no race condition between this code and
0275      * event from the backend.
0276      * If this is not a page flip, e.g. no flip done event from the backend
0277      * is expected, then send now.
0278      */
0279     if (!display_send_page_flip(pipe, old_plane_state))
0280         send_pending_event(pipeline);
0281 
0282     drm_dev_exit(idx);
0283 }
0284 
0285 static enum drm_mode_status
0286 display_mode_valid(struct drm_simple_display_pipe *pipe,
0287            const struct drm_display_mode *mode)
0288 {
0289     struct xen_drm_front_drm_pipeline *pipeline =
0290             container_of(pipe, struct xen_drm_front_drm_pipeline,
0291                      pipe);
0292 
0293     if (mode->hdisplay != pipeline->width)
0294         return MODE_ERROR;
0295 
0296     if (mode->vdisplay != pipeline->height)
0297         return MODE_ERROR;
0298 
0299     return MODE_OK;
0300 }
0301 
0302 static const struct drm_simple_display_pipe_funcs display_funcs = {
0303     .mode_valid = display_mode_valid,
0304     .enable = display_enable,
0305     .disable = display_disable,
0306     .check = display_check,
0307     .update = display_update,
0308 };
0309 
0310 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
0311                  int index, struct xen_drm_front_cfg_connector *cfg,
0312                  struct xen_drm_front_drm_pipeline *pipeline)
0313 {
0314     struct drm_device *dev = drm_info->drm_dev;
0315     const u32 *formats;
0316     int format_count;
0317     int ret;
0318 
0319     pipeline->drm_info = drm_info;
0320     pipeline->index = index;
0321     pipeline->height = cfg->height;
0322     pipeline->width = cfg->width;
0323 
0324     INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
0325 
0326     ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
0327     if (ret)
0328         return ret;
0329 
0330     formats = xen_drm_front_conn_get_formats(&format_count);
0331 
0332     return drm_simple_display_pipe_init(dev, &pipeline->pipe,
0333                         &display_funcs, formats,
0334                         format_count, NULL,
0335                         &pipeline->conn);
0336 }
0337 
0338 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
0339 {
0340     struct drm_device *dev = drm_info->drm_dev;
0341     int i, ret;
0342 
0343     drm_mode_config_init(dev);
0344 
0345     dev->mode_config.min_width = 0;
0346     dev->mode_config.min_height = 0;
0347     dev->mode_config.max_width = 4095;
0348     dev->mode_config.max_height = 2047;
0349     dev->mode_config.funcs = &mode_config_funcs;
0350 
0351     for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
0352         struct xen_drm_front_cfg_connector *cfg =
0353                 &drm_info->front_info->cfg.connectors[i];
0354         struct xen_drm_front_drm_pipeline *pipeline =
0355                 &drm_info->pipeline[i];
0356 
0357         ret = display_pipe_init(drm_info, i, cfg, pipeline);
0358         if (ret) {
0359             drm_mode_config_cleanup(dev);
0360             return ret;
0361         }
0362     }
0363 
0364     drm_mode_config_reset(dev);
0365     drm_kms_helper_poll_init(dev);
0366     return 0;
0367 }
0368 
0369 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
0370 {
0371     int i;
0372 
0373     for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
0374         struct xen_drm_front_drm_pipeline *pipeline =
0375                 &drm_info->pipeline[i];
0376 
0377         cancel_delayed_work_sync(&pipeline->pflip_to_worker);
0378 
0379         send_pending_event(pipeline);
0380     }
0381 }