Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR MIT
0002 
0003 /*
0004  *  Xen para-virtual DRM device
0005  *
0006  * Copyright (C) 2016-2018 EPAM Systems Inc.
0007  *
0008  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
0009  */
0010 
0011 #include <linux/delay.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/module.h>
0014 #include <linux/of_device.h>
0015 
0016 #include <drm/drm_atomic_helper.h>
0017 #include <drm/drm_drv.h>
0018 #include <drm/drm_ioctl.h>
0019 #include <drm/drm_probe_helper.h>
0020 #include <drm/drm_file.h>
0021 #include <drm/drm_gem.h>
0022 
0023 #include <xen/platform_pci.h>
0024 #include <xen/xen.h>
0025 #include <xen/xenbus.h>
0026 
0027 #include <xen/xen-front-pgdir-shbuf.h>
0028 #include <xen/interface/io/displif.h>
0029 
0030 #include "xen_drm_front.h"
0031 #include "xen_drm_front_cfg.h"
0032 #include "xen_drm_front_evtchnl.h"
0033 #include "xen_drm_front_gem.h"
0034 #include "xen_drm_front_kms.h"
0035 
0036 struct xen_drm_front_dbuf {
0037     struct list_head list;
0038     u64 dbuf_cookie;
0039     u64 fb_cookie;
0040 
0041     struct xen_front_pgdir_shbuf shbuf;
0042 };
0043 
0044 static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
0045                  struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
0046 {
0047     dbuf->dbuf_cookie = dbuf_cookie;
0048     list_add(&dbuf->list, &front_info->dbuf_list);
0049 }
0050 
0051 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
0052                        u64 dbuf_cookie)
0053 {
0054     struct xen_drm_front_dbuf *buf, *q;
0055 
0056     list_for_each_entry_safe(buf, q, dbuf_list, list)
0057         if (buf->dbuf_cookie == dbuf_cookie)
0058             return buf;
0059 
0060     return NULL;
0061 }
0062 
0063 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
0064 {
0065     struct xen_drm_front_dbuf *buf, *q;
0066 
0067     list_for_each_entry_safe(buf, q, dbuf_list, list)
0068         if (buf->dbuf_cookie == dbuf_cookie) {
0069             list_del(&buf->list);
0070             xen_front_pgdir_shbuf_unmap(&buf->shbuf);
0071             xen_front_pgdir_shbuf_free(&buf->shbuf);
0072             kfree(buf);
0073             break;
0074         }
0075 }
0076 
0077 static void dbuf_free_all(struct list_head *dbuf_list)
0078 {
0079     struct xen_drm_front_dbuf *buf, *q;
0080 
0081     list_for_each_entry_safe(buf, q, dbuf_list, list) {
0082         list_del(&buf->list);
0083         xen_front_pgdir_shbuf_unmap(&buf->shbuf);
0084         xen_front_pgdir_shbuf_free(&buf->shbuf);
0085         kfree(buf);
0086     }
0087 }
0088 
0089 static struct xendispl_req *
0090 be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
0091 {
0092     struct xendispl_req *req;
0093 
0094     req = RING_GET_REQUEST(&evtchnl->u.req.ring,
0095                    evtchnl->u.req.ring.req_prod_pvt);
0096     req->operation = operation;
0097     req->id = evtchnl->evt_next_id++;
0098     evtchnl->evt_id = req->id;
0099     return req;
0100 }
0101 
0102 static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
0103                struct xendispl_req *req)
0104 {
0105     reinit_completion(&evtchnl->u.req.completion);
0106     if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
0107         return -EIO;
0108 
0109     xen_drm_front_evtchnl_flush(evtchnl);
0110     return 0;
0111 }
0112 
0113 static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
0114 {
0115     if (wait_for_completion_timeout(&evtchnl->u.req.completion,
0116             msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
0117         return -ETIMEDOUT;
0118 
0119     return evtchnl->u.req.resp_status;
0120 }
0121 
0122 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
0123                u32 x, u32 y, u32 width, u32 height,
0124                u32 bpp, u64 fb_cookie)
0125 {
0126     struct xen_drm_front_evtchnl *evtchnl;
0127     struct xen_drm_front_info *front_info;
0128     struct xendispl_req *req;
0129     unsigned long flags;
0130     int ret;
0131 
0132     front_info = pipeline->drm_info->front_info;
0133     evtchnl = &front_info->evt_pairs[pipeline->index].req;
0134     if (unlikely(!evtchnl))
0135         return -EIO;
0136 
0137     mutex_lock(&evtchnl->u.req.req_io_lock);
0138 
0139     spin_lock_irqsave(&front_info->io_lock, flags);
0140     req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
0141     req->op.set_config.x = x;
0142     req->op.set_config.y = y;
0143     req->op.set_config.width = width;
0144     req->op.set_config.height = height;
0145     req->op.set_config.bpp = bpp;
0146     req->op.set_config.fb_cookie = fb_cookie;
0147 
0148     ret = be_stream_do_io(evtchnl, req);
0149     spin_unlock_irqrestore(&front_info->io_lock, flags);
0150 
0151     if (ret == 0)
0152         ret = be_stream_wait_io(evtchnl);
0153 
0154     mutex_unlock(&evtchnl->u.req.req_io_lock);
0155     return ret;
0156 }
0157 
0158 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
0159                   u64 dbuf_cookie, u32 width, u32 height,
0160                   u32 bpp, u64 size, u32 offset,
0161                   struct page **pages)
0162 {
0163     struct xen_drm_front_evtchnl *evtchnl;
0164     struct xen_drm_front_dbuf *dbuf;
0165     struct xendispl_req *req;
0166     struct xen_front_pgdir_shbuf_cfg buf_cfg;
0167     unsigned long flags;
0168     int ret;
0169 
0170     evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
0171     if (unlikely(!evtchnl))
0172         return -EIO;
0173 
0174     dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
0175     if (!dbuf)
0176         return -ENOMEM;
0177 
0178     dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
0179 
0180     memset(&buf_cfg, 0, sizeof(buf_cfg));
0181     buf_cfg.xb_dev = front_info->xb_dev;
0182     buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
0183     buf_cfg.pages = pages;
0184     buf_cfg.pgdir = &dbuf->shbuf;
0185     buf_cfg.be_alloc = front_info->cfg.be_alloc;
0186 
0187     ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
0188     if (ret < 0)
0189         goto fail_shbuf_alloc;
0190 
0191     mutex_lock(&evtchnl->u.req.req_io_lock);
0192 
0193     spin_lock_irqsave(&front_info->io_lock, flags);
0194     req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
0195     req->op.dbuf_create.gref_directory =
0196             xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
0197     req->op.dbuf_create.buffer_sz = size;
0198     req->op.dbuf_create.data_ofs = offset;
0199     req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
0200     req->op.dbuf_create.width = width;
0201     req->op.dbuf_create.height = height;
0202     req->op.dbuf_create.bpp = bpp;
0203     if (buf_cfg.be_alloc)
0204         req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
0205 
0206     ret = be_stream_do_io(evtchnl, req);
0207     spin_unlock_irqrestore(&front_info->io_lock, flags);
0208 
0209     if (ret < 0)
0210         goto fail;
0211 
0212     ret = be_stream_wait_io(evtchnl);
0213     if (ret < 0)
0214         goto fail;
0215 
0216     ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
0217     if (ret < 0)
0218         goto fail;
0219 
0220     mutex_unlock(&evtchnl->u.req.req_io_lock);
0221     return 0;
0222 
0223 fail:
0224     mutex_unlock(&evtchnl->u.req.req_io_lock);
0225 fail_shbuf_alloc:
0226     dbuf_free(&front_info->dbuf_list, dbuf_cookie);
0227     return ret;
0228 }
0229 
0230 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
0231                       u64 dbuf_cookie)
0232 {
0233     struct xen_drm_front_evtchnl *evtchnl;
0234     struct xendispl_req *req;
0235     unsigned long flags;
0236     bool be_alloc;
0237     int ret;
0238 
0239     evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
0240     if (unlikely(!evtchnl))
0241         return -EIO;
0242 
0243     be_alloc = front_info->cfg.be_alloc;
0244 
0245     /*
0246      * For the backend allocated buffer release references now, so backend
0247      * can free the buffer.
0248      */
0249     if (be_alloc)
0250         dbuf_free(&front_info->dbuf_list, dbuf_cookie);
0251 
0252     mutex_lock(&evtchnl->u.req.req_io_lock);
0253 
0254     spin_lock_irqsave(&front_info->io_lock, flags);
0255     req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
0256     req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
0257 
0258     ret = be_stream_do_io(evtchnl, req);
0259     spin_unlock_irqrestore(&front_info->io_lock, flags);
0260 
0261     if (ret == 0)
0262         ret = be_stream_wait_io(evtchnl);
0263 
0264     /*
0265      * Do this regardless of communication status with the backend:
0266      * if we cannot remove remote resources remove what we can locally.
0267      */
0268     if (!be_alloc)
0269         dbuf_free(&front_info->dbuf_list, dbuf_cookie);
0270 
0271     mutex_unlock(&evtchnl->u.req.req_io_lock);
0272     return ret;
0273 }
0274 
0275 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
0276                 u64 dbuf_cookie, u64 fb_cookie, u32 width,
0277                 u32 height, u32 pixel_format)
0278 {
0279     struct xen_drm_front_evtchnl *evtchnl;
0280     struct xen_drm_front_dbuf *buf;
0281     struct xendispl_req *req;
0282     unsigned long flags;
0283     int ret;
0284 
0285     evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
0286     if (unlikely(!evtchnl))
0287         return -EIO;
0288 
0289     buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
0290     if (!buf)
0291         return -EINVAL;
0292 
0293     buf->fb_cookie = fb_cookie;
0294 
0295     mutex_lock(&evtchnl->u.req.req_io_lock);
0296 
0297     spin_lock_irqsave(&front_info->io_lock, flags);
0298     req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
0299     req->op.fb_attach.dbuf_cookie = dbuf_cookie;
0300     req->op.fb_attach.fb_cookie = fb_cookie;
0301     req->op.fb_attach.width = width;
0302     req->op.fb_attach.height = height;
0303     req->op.fb_attach.pixel_format = pixel_format;
0304 
0305     ret = be_stream_do_io(evtchnl, req);
0306     spin_unlock_irqrestore(&front_info->io_lock, flags);
0307 
0308     if (ret == 0)
0309         ret = be_stream_wait_io(evtchnl);
0310 
0311     mutex_unlock(&evtchnl->u.req.req_io_lock);
0312     return ret;
0313 }
0314 
0315 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
0316                 u64 fb_cookie)
0317 {
0318     struct xen_drm_front_evtchnl *evtchnl;
0319     struct xendispl_req *req;
0320     unsigned long flags;
0321     int ret;
0322 
0323     evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
0324     if (unlikely(!evtchnl))
0325         return -EIO;
0326 
0327     mutex_lock(&evtchnl->u.req.req_io_lock);
0328 
0329     spin_lock_irqsave(&front_info->io_lock, flags);
0330     req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
0331     req->op.fb_detach.fb_cookie = fb_cookie;
0332 
0333     ret = be_stream_do_io(evtchnl, req);
0334     spin_unlock_irqrestore(&front_info->io_lock, flags);
0335 
0336     if (ret == 0)
0337         ret = be_stream_wait_io(evtchnl);
0338 
0339     mutex_unlock(&evtchnl->u.req.req_io_lock);
0340     return ret;
0341 }
0342 
0343 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
0344                 int conn_idx, u64 fb_cookie)
0345 {
0346     struct xen_drm_front_evtchnl *evtchnl;
0347     struct xendispl_req *req;
0348     unsigned long flags;
0349     int ret;
0350 
0351     if (unlikely(conn_idx >= front_info->num_evt_pairs))
0352         return -EINVAL;
0353 
0354     evtchnl = &front_info->evt_pairs[conn_idx].req;
0355 
0356     mutex_lock(&evtchnl->u.req.req_io_lock);
0357 
0358     spin_lock_irqsave(&front_info->io_lock, flags);
0359     req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
0360     req->op.pg_flip.fb_cookie = fb_cookie;
0361 
0362     ret = be_stream_do_io(evtchnl, req);
0363     spin_unlock_irqrestore(&front_info->io_lock, flags);
0364 
0365     if (ret == 0)
0366         ret = be_stream_wait_io(evtchnl);
0367 
0368     mutex_unlock(&evtchnl->u.req.req_io_lock);
0369     return ret;
0370 }
0371 
0372 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
0373                  int conn_idx, u64 fb_cookie)
0374 {
0375     struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
0376 
0377     if (unlikely(conn_idx >= front_info->cfg.num_connectors))
0378         return;
0379 
0380     xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
0381                     fb_cookie);
0382 }
0383 
0384 void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
0385 {
0386     struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
0387     int idx;
0388 
0389     if (drm_dev_enter(obj->dev, &idx)) {
0390         xen_drm_front_dbuf_destroy(drm_info->front_info,
0391                        xen_drm_front_dbuf_to_cookie(obj));
0392         drm_dev_exit(idx);
0393     } else {
0394         dbuf_free(&drm_info->front_info->dbuf_list,
0395               xen_drm_front_dbuf_to_cookie(obj));
0396     }
0397 
0398     xen_drm_front_gem_free_object_unlocked(obj);
0399 }
0400 
0401 static int xen_drm_drv_dumb_create(struct drm_file *filp,
0402                    struct drm_device *dev,
0403                    struct drm_mode_create_dumb *args)
0404 {
0405     struct xen_drm_front_drm_info *drm_info = dev->dev_private;
0406     struct drm_gem_object *obj;
0407     int ret;
0408 
0409     /*
0410      * Dumb creation is a two stage process: first we create a fully
0411      * constructed GEM object which is communicated to the backend, and
0412      * only after that we can create GEM's handle. This is done so,
0413      * because of the possible races: once you create a handle it becomes
0414      * immediately visible to user-space, so the latter can try accessing
0415      * object without pages etc.
0416      * For details also see drm_gem_handle_create
0417      */
0418     args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0419     args->size = args->pitch * args->height;
0420 
0421     obj = xen_drm_front_gem_create(dev, args->size);
0422     if (IS_ERR(obj)) {
0423         ret = PTR_ERR(obj);
0424         goto fail;
0425     }
0426 
0427     ret = xen_drm_front_dbuf_create(drm_info->front_info,
0428                     xen_drm_front_dbuf_to_cookie(obj),
0429                     args->width, args->height, args->bpp,
0430                     args->size, 0,
0431                     xen_drm_front_gem_get_pages(obj));
0432     if (ret)
0433         goto fail_backend;
0434 
0435     /* This is the tail of GEM object creation */
0436     ret = drm_gem_handle_create(filp, obj, &args->handle);
0437     if (ret)
0438         goto fail_handle;
0439 
0440     /* Drop reference from allocate - handle holds it now */
0441     drm_gem_object_put(obj);
0442     return 0;
0443 
0444 fail_handle:
0445     xen_drm_front_dbuf_destroy(drm_info->front_info,
0446                    xen_drm_front_dbuf_to_cookie(obj));
0447 fail_backend:
0448     /* drop reference from allocate */
0449     drm_gem_object_put(obj);
0450 fail:
0451     DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
0452     return ret;
0453 }
0454 
0455 static void xen_drm_drv_release(struct drm_device *dev)
0456 {
0457     struct xen_drm_front_drm_info *drm_info = dev->dev_private;
0458     struct xen_drm_front_info *front_info = drm_info->front_info;
0459 
0460     xen_drm_front_kms_fini(drm_info);
0461 
0462     drm_atomic_helper_shutdown(dev);
0463     drm_mode_config_cleanup(dev);
0464 
0465     if (front_info->cfg.be_alloc)
0466         xenbus_switch_state(front_info->xb_dev,
0467                     XenbusStateInitialising);
0468 
0469     kfree(drm_info);
0470 }
0471 
0472 DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
0473 
0474 static const struct drm_driver xen_drm_driver = {
0475     .driver_features           = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
0476     .release                   = xen_drm_drv_release,
0477     .prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
0478     .prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
0479     .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
0480     .gem_prime_mmap            = drm_gem_prime_mmap,
0481     .dumb_create               = xen_drm_drv_dumb_create,
0482     .fops                      = &xen_drm_dev_fops,
0483     .name                      = "xendrm-du",
0484     .desc                      = "Xen PV DRM Display Unit",
0485     .date                      = "20180221",
0486     .major                     = 1,
0487     .minor                     = 0,
0488 
0489 };
0490 
0491 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
0492 {
0493     struct device *dev = &front_info->xb_dev->dev;
0494     struct xen_drm_front_drm_info *drm_info;
0495     struct drm_device *drm_dev;
0496     int ret;
0497 
0498     if (drm_firmware_drivers_only())
0499         return -ENODEV;
0500 
0501     DRM_INFO("Creating %s\n", xen_drm_driver.desc);
0502 
0503     drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
0504     if (!drm_info) {
0505         ret = -ENOMEM;
0506         goto fail;
0507     }
0508 
0509     drm_info->front_info = front_info;
0510     front_info->drm_info = drm_info;
0511 
0512     drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
0513     if (IS_ERR(drm_dev)) {
0514         ret = PTR_ERR(drm_dev);
0515         goto fail_dev;
0516     }
0517 
0518     drm_info->drm_dev = drm_dev;
0519 
0520     drm_dev->dev_private = drm_info;
0521 
0522     ret = xen_drm_front_kms_init(drm_info);
0523     if (ret) {
0524         DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
0525         goto fail_modeset;
0526     }
0527 
0528     ret = drm_dev_register(drm_dev, 0);
0529     if (ret)
0530         goto fail_register;
0531 
0532     DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
0533          xen_drm_driver.name, xen_drm_driver.major,
0534          xen_drm_driver.minor, xen_drm_driver.patchlevel,
0535          xen_drm_driver.date, drm_dev->primary->index);
0536 
0537     return 0;
0538 
0539 fail_register:
0540     drm_dev_unregister(drm_dev);
0541 fail_modeset:
0542     drm_kms_helper_poll_fini(drm_dev);
0543     drm_mode_config_cleanup(drm_dev);
0544     drm_dev_put(drm_dev);
0545 fail_dev:
0546     kfree(drm_info);
0547     front_info->drm_info = NULL;
0548 fail:
0549     return ret;
0550 }
0551 
0552 static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
0553 {
0554     struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
0555     struct drm_device *dev;
0556 
0557     if (!drm_info)
0558         return;
0559 
0560     dev = drm_info->drm_dev;
0561     if (!dev)
0562         return;
0563 
0564     /* Nothing to do if device is already unplugged */
0565     if (drm_dev_is_unplugged(dev))
0566         return;
0567 
0568     drm_kms_helper_poll_fini(dev);
0569     drm_dev_unplug(dev);
0570     drm_dev_put(dev);
0571 
0572     front_info->drm_info = NULL;
0573 
0574     xen_drm_front_evtchnl_free_all(front_info);
0575     dbuf_free_all(&front_info->dbuf_list);
0576 
0577     /*
0578      * If we are not using backend allocated buffers, then tell the
0579      * backend we are ready to (re)initialize. Otherwise, wait for
0580      * drm_driver.release.
0581      */
0582     if (!front_info->cfg.be_alloc)
0583         xenbus_switch_state(front_info->xb_dev,
0584                     XenbusStateInitialising);
0585 }
0586 
0587 static int displback_initwait(struct xen_drm_front_info *front_info)
0588 {
0589     struct xen_drm_front_cfg *cfg = &front_info->cfg;
0590     int ret;
0591 
0592     cfg->front_info = front_info;
0593     ret = xen_drm_front_cfg_card(front_info, cfg);
0594     if (ret < 0)
0595         return ret;
0596 
0597     DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
0598     /* Create event channels for all connectors and publish */
0599     ret = xen_drm_front_evtchnl_create_all(front_info);
0600     if (ret < 0)
0601         return ret;
0602 
0603     return xen_drm_front_evtchnl_publish_all(front_info);
0604 }
0605 
0606 static int displback_connect(struct xen_drm_front_info *front_info)
0607 {
0608     xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
0609     return xen_drm_drv_init(front_info);
0610 }
0611 
0612 static void displback_disconnect(struct xen_drm_front_info *front_info)
0613 {
0614     if (!front_info->drm_info)
0615         return;
0616 
0617     /* Tell the backend to wait until we release the DRM driver. */
0618     xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
0619 
0620     xen_drm_drv_fini(front_info);
0621 }
0622 
0623 static void displback_changed(struct xenbus_device *xb_dev,
0624                   enum xenbus_state backend_state)
0625 {
0626     struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
0627     int ret;
0628 
0629     DRM_DEBUG("Backend state is %s, front is %s\n",
0630           xenbus_strstate(backend_state),
0631           xenbus_strstate(xb_dev->state));
0632 
0633     switch (backend_state) {
0634     case XenbusStateReconfiguring:
0635     case XenbusStateReconfigured:
0636     case XenbusStateInitialised:
0637         break;
0638 
0639     case XenbusStateInitialising:
0640         if (xb_dev->state == XenbusStateReconfiguring)
0641             break;
0642 
0643         /* recovering after backend unexpected closure */
0644         displback_disconnect(front_info);
0645         break;
0646 
0647     case XenbusStateInitWait:
0648         if (xb_dev->state == XenbusStateReconfiguring)
0649             break;
0650 
0651         /* recovering after backend unexpected closure */
0652         displback_disconnect(front_info);
0653         if (xb_dev->state != XenbusStateInitialising)
0654             break;
0655 
0656         ret = displback_initwait(front_info);
0657         if (ret < 0)
0658             xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
0659         else
0660             xenbus_switch_state(xb_dev, XenbusStateInitialised);
0661         break;
0662 
0663     case XenbusStateConnected:
0664         if (xb_dev->state != XenbusStateInitialised)
0665             break;
0666 
0667         ret = displback_connect(front_info);
0668         if (ret < 0) {
0669             displback_disconnect(front_info);
0670             xenbus_dev_fatal(xb_dev, ret, "connecting backend");
0671         } else {
0672             xenbus_switch_state(xb_dev, XenbusStateConnected);
0673         }
0674         break;
0675 
0676     case XenbusStateClosing:
0677         /*
0678          * in this state backend starts freeing resources,
0679          * so let it go into closed state, so we can also
0680          * remove ours
0681          */
0682         break;
0683 
0684     case XenbusStateUnknown:
0685     case XenbusStateClosed:
0686         if (xb_dev->state == XenbusStateClosed)
0687             break;
0688 
0689         displback_disconnect(front_info);
0690         break;
0691     }
0692 }
0693 
0694 static int xen_drv_probe(struct xenbus_device *xb_dev,
0695              const struct xenbus_device_id *id)
0696 {
0697     struct xen_drm_front_info *front_info;
0698     struct device *dev = &xb_dev->dev;
0699     int ret;
0700 
0701     ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
0702     if (ret < 0) {
0703         DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
0704         return ret;
0705     }
0706 
0707     front_info = devm_kzalloc(&xb_dev->dev,
0708                   sizeof(*front_info), GFP_KERNEL);
0709     if (!front_info)
0710         return -ENOMEM;
0711 
0712     front_info->xb_dev = xb_dev;
0713     spin_lock_init(&front_info->io_lock);
0714     INIT_LIST_HEAD(&front_info->dbuf_list);
0715     dev_set_drvdata(&xb_dev->dev, front_info);
0716 
0717     return xenbus_switch_state(xb_dev, XenbusStateInitialising);
0718 }
0719 
0720 static int xen_drv_remove(struct xenbus_device *dev)
0721 {
0722     struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
0723     int to = 100;
0724 
0725     xenbus_switch_state(dev, XenbusStateClosing);
0726 
0727     /*
0728      * On driver removal it is disconnected from XenBus,
0729      * so no backend state change events come via .otherend_changed
0730      * callback. This prevents us from exiting gracefully, e.g.
0731      * signaling the backend to free event channels, waiting for its
0732      * state to change to XenbusStateClosed and cleaning at our end.
0733      * Normally when front driver removed backend will finally go into
0734      * XenbusStateInitWait state.
0735      *
0736      * Workaround: read backend's state manually and wait with time-out.
0737      */
0738     while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
0739                      XenbusStateUnknown) != XenbusStateInitWait) &&
0740                      --to)
0741         msleep(10);
0742 
0743     if (!to) {
0744         unsigned int state;
0745 
0746         state = xenbus_read_unsigned(front_info->xb_dev->otherend,
0747                          "state", XenbusStateUnknown);
0748         DRM_ERROR("Backend state is %s while removing driver\n",
0749               xenbus_strstate(state));
0750     }
0751 
0752     xen_drm_drv_fini(front_info);
0753     xenbus_frontend_closed(dev);
0754     return 0;
0755 }
0756 
0757 static const struct xenbus_device_id xen_driver_ids[] = {
0758     { XENDISPL_DRIVER_NAME },
0759     { "" }
0760 };
0761 
0762 static struct xenbus_driver xen_driver = {
0763     .ids = xen_driver_ids,
0764     .probe = xen_drv_probe,
0765     .remove = xen_drv_remove,
0766     .otherend_changed = displback_changed,
0767     .not_essential = true,
0768 };
0769 
0770 static int __init xen_drv_init(void)
0771 {
0772     /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
0773     if (XEN_PAGE_SIZE != PAGE_SIZE) {
0774         DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
0775               XEN_PAGE_SIZE, PAGE_SIZE);
0776         return -ENODEV;
0777     }
0778 
0779     if (!xen_domain())
0780         return -ENODEV;
0781 
0782     if (!xen_has_pv_devices())
0783         return -ENODEV;
0784 
0785     DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
0786     return xenbus_register_frontend(&xen_driver);
0787 }
0788 
0789 static void __exit xen_drv_fini(void)
0790 {
0791     DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
0792     xenbus_unregister_driver(&xen_driver);
0793 }
0794 
0795 module_init(xen_drv_init);
0796 module_exit(xen_drv_fini);
0797 
0798 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
0799 MODULE_LICENSE("GPL");
0800 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);