Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2012 Samsung Electronics Co.Ltd
0004  * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
0005  */
0006 
0007 #include <linux/refcount.h>
0008 #include <linux/clk.h>
0009 #include <linux/component.h>
0010 #include <linux/delay.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/err.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/io.h>
0015 #include <linux/kernel.h>
0016 #include <linux/of.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/pm_runtime.h>
0019 #include <linux/slab.h>
0020 #include <linux/uaccess.h>
0021 #include <linux/workqueue.h>
0022 
0023 #include <drm/drm_file.h>
0024 #include <drm/exynos_drm.h>
0025 
0026 #include "exynos_drm_drv.h"
0027 #include "exynos_drm_g2d.h"
0028 #include "exynos_drm_gem.h"
0029 
0030 #define G2D_HW_MAJOR_VER        4
0031 #define G2D_HW_MINOR_VER        1
0032 
0033 /* vaild register range set from user: 0x0104 ~ 0x0880 */
0034 #define G2D_VALID_START         0x0104
0035 #define G2D_VALID_END           0x0880
0036 
0037 /* general registers */
0038 #define G2D_SOFT_RESET          0x0000
0039 #define G2D_INTEN           0x0004
0040 #define G2D_INTC_PEND           0x000C
0041 #define G2D_DMA_SFR_BASE_ADDR       0x0080
0042 #define G2D_DMA_COMMAND         0x0084
0043 #define G2D_DMA_STATUS          0x008C
0044 #define G2D_DMA_HOLD_CMD        0x0090
0045 
0046 /* command registers */
0047 #define G2D_BITBLT_START        0x0100
0048 
0049 /* registers for base address */
0050 #define G2D_SRC_BASE_ADDR       0x0304
0051 #define G2D_SRC_STRIDE          0x0308
0052 #define G2D_SRC_COLOR_MODE      0x030C
0053 #define G2D_SRC_LEFT_TOP        0x0310
0054 #define G2D_SRC_RIGHT_BOTTOM        0x0314
0055 #define G2D_SRC_PLANE2_BASE_ADDR    0x0318
0056 #define G2D_DST_BASE_ADDR       0x0404
0057 #define G2D_DST_STRIDE          0x0408
0058 #define G2D_DST_COLOR_MODE      0x040C
0059 #define G2D_DST_LEFT_TOP        0x0410
0060 #define G2D_DST_RIGHT_BOTTOM        0x0414
0061 #define G2D_DST_PLANE2_BASE_ADDR    0x0418
0062 #define G2D_PAT_BASE_ADDR       0x0500
0063 #define G2D_MSK_BASE_ADDR       0x0520
0064 
0065 /* G2D_SOFT_RESET */
0066 #define G2D_SFRCLEAR            (1 << 1)
0067 #define G2D_R               (1 << 0)
0068 
0069 /* G2D_INTEN */
0070 #define G2D_INTEN_ACF           (1 << 3)
0071 #define G2D_INTEN_UCF           (1 << 2)
0072 #define G2D_INTEN_GCF           (1 << 1)
0073 #define G2D_INTEN_SCF           (1 << 0)
0074 
0075 /* G2D_INTC_PEND */
0076 #define G2D_INTP_ACMD_FIN       (1 << 3)
0077 #define G2D_INTP_UCMD_FIN       (1 << 2)
0078 #define G2D_INTP_GCMD_FIN       (1 << 1)
0079 #define G2D_INTP_SCMD_FIN       (1 << 0)
0080 
0081 /* G2D_DMA_COMMAND */
0082 #define G2D_DMA_HALT            (1 << 2)
0083 #define G2D_DMA_CONTINUE        (1 << 1)
0084 #define G2D_DMA_START           (1 << 0)
0085 
0086 /* G2D_DMA_STATUS */
0087 #define G2D_DMA_LIST_DONE_COUNT     (0xFF << 17)
0088 #define G2D_DMA_BITBLT_DONE_COUNT   (0xFFFF << 1)
0089 #define G2D_DMA_DONE            (1 << 0)
0090 #define G2D_DMA_LIST_DONE_COUNT_OFFSET  17
0091 
0092 /* G2D_DMA_HOLD_CMD */
0093 #define G2D_USER_HOLD           (1 << 2)
0094 #define G2D_LIST_HOLD           (1 << 1)
0095 #define G2D_BITBLT_HOLD         (1 << 0)
0096 
0097 /* G2D_BITBLT_START */
0098 #define G2D_START_CASESEL       (1 << 2)
0099 #define G2D_START_NHOLT         (1 << 1)
0100 #define G2D_START_BITBLT        (1 << 0)
0101 
0102 /* buffer color format */
0103 #define G2D_FMT_XRGB8888        0
0104 #define G2D_FMT_ARGB8888        1
0105 #define G2D_FMT_RGB565          2
0106 #define G2D_FMT_XRGB1555        3
0107 #define G2D_FMT_ARGB1555        4
0108 #define G2D_FMT_XRGB4444        5
0109 #define G2D_FMT_ARGB4444        6
0110 #define G2D_FMT_PACKED_RGB888       7
0111 #define G2D_FMT_A8          11
0112 #define G2D_FMT_L8          12
0113 
0114 /* buffer valid length */
0115 #define G2D_LEN_MIN         1
0116 #define G2D_LEN_MAX         8000
0117 
0118 #define G2D_CMDLIST_SIZE        (PAGE_SIZE / 4)
0119 #define G2D_CMDLIST_NUM         64
0120 #define G2D_CMDLIST_POOL_SIZE       (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
0121 #define G2D_CMDLIST_DATA_NUM        (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
0122 
0123 /* maximum buffer pool size of userptr is 64MB as default */
0124 #define MAX_POOL        (64 * 1024 * 1024)
0125 
0126 enum {
0127     BUF_TYPE_GEM = 1,
0128     BUF_TYPE_USERPTR,
0129 };
0130 
0131 enum g2d_reg_type {
0132     REG_TYPE_NONE = -1,
0133     REG_TYPE_SRC,
0134     REG_TYPE_SRC_PLANE2,
0135     REG_TYPE_DST,
0136     REG_TYPE_DST_PLANE2,
0137     REG_TYPE_PAT,
0138     REG_TYPE_MSK,
0139     MAX_REG_TYPE_NR
0140 };
0141 
0142 enum g2d_flag_bits {
0143     /*
0144      * If set, suspends the runqueue worker after the currently
0145      * processed node is finished.
0146      */
0147     G2D_BIT_SUSPEND_RUNQUEUE,
0148     /*
0149      * If set, indicates that the engine is currently busy.
0150      */
0151     G2D_BIT_ENGINE_BUSY,
0152 };
0153 
0154 /* cmdlist data structure */
0155 struct g2d_cmdlist {
0156     u32     head;
0157     unsigned long   data[G2D_CMDLIST_DATA_NUM];
0158     u32     last;   /* last data offset */
0159 };
0160 
0161 /*
0162  * A structure of buffer description
0163  *
0164  * @format: color format
0165  * @stride: buffer stride/pitch in bytes
0166  * @left_x: the x coordinates of left top corner
0167  * @top_y: the y coordinates of left top corner
0168  * @right_x: the x coordinates of right bottom corner
0169  * @bottom_y: the y coordinates of right bottom corner
0170  *
0171  */
0172 struct g2d_buf_desc {
0173     unsigned int    format;
0174     unsigned int    stride;
0175     unsigned int    left_x;
0176     unsigned int    top_y;
0177     unsigned int    right_x;
0178     unsigned int    bottom_y;
0179 };
0180 
0181 /*
0182  * A structure of buffer information
0183  *
0184  * @map_nr: manages the number of mapped buffers
0185  * @reg_types: stores regitster type in the order of requested command
0186  * @handles: stores buffer handle in its reg_type position
0187  * @types: stores buffer type in its reg_type position
0188  * @descs: stores buffer description in its reg_type position
0189  *
0190  */
0191 struct g2d_buf_info {
0192     unsigned int        map_nr;
0193     enum g2d_reg_type   reg_types[MAX_REG_TYPE_NR];
0194     void            *obj[MAX_REG_TYPE_NR];
0195     unsigned int        types[MAX_REG_TYPE_NR];
0196     struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
0197 };
0198 
0199 struct drm_exynos_pending_g2d_event {
0200     struct drm_pending_event    base;
0201     struct drm_exynos_g2d_event event;
0202 };
0203 
0204 struct g2d_cmdlist_userptr {
0205     struct list_head    list;
0206     dma_addr_t      dma_addr;
0207     unsigned long       userptr;
0208     unsigned long       size;
0209     struct page     **pages;
0210     unsigned int        npages;
0211     struct sg_table     *sgt;
0212     refcount_t      refcount;
0213     bool            in_pool;
0214     bool            out_of_list;
0215 };
0216 struct g2d_cmdlist_node {
0217     struct list_head    list;
0218     struct g2d_cmdlist  *cmdlist;
0219     dma_addr_t      dma_addr;
0220     struct g2d_buf_info buf_info;
0221 
0222     struct drm_exynos_pending_g2d_event *event;
0223 };
0224 
0225 struct g2d_runqueue_node {
0226     struct list_head    list;
0227     struct list_head    run_cmdlist;
0228     struct list_head    event_list;
0229     struct drm_file     *filp;
0230     pid_t           pid;
0231     struct completion   complete;
0232     int         async;
0233 };
0234 
0235 struct g2d_data {
0236     struct device           *dev;
0237     void                *dma_priv;
0238     struct clk          *gate_clk;
0239     void __iomem            *regs;
0240     int             irq;
0241     struct workqueue_struct     *g2d_workq;
0242     struct work_struct      runqueue_work;
0243     struct drm_device       *drm_dev;
0244     unsigned long           flags;
0245 
0246     /* cmdlist */
0247     struct g2d_cmdlist_node     *cmdlist_node;
0248     struct list_head        free_cmdlist;
0249     struct mutex            cmdlist_mutex;
0250     dma_addr_t          cmdlist_pool;
0251     void                *cmdlist_pool_virt;
0252     unsigned long           cmdlist_dma_attrs;
0253 
0254     /* runqueue*/
0255     struct g2d_runqueue_node    *runqueue_node;
0256     struct list_head        runqueue;
0257     struct mutex            runqueue_mutex;
0258     struct kmem_cache       *runqueue_slab;
0259 
0260     unsigned long           current_pool;
0261     unsigned long           max_pool;
0262 };
0263 
0264 static inline void g2d_hw_reset(struct g2d_data *g2d)
0265 {
0266     writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET);
0267     clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
0268 }
0269 
0270 static int g2d_init_cmdlist(struct g2d_data *g2d)
0271 {
0272     struct device *dev = g2d->dev;
0273     struct g2d_cmdlist_node *node;
0274     int nr;
0275     int ret;
0276     struct g2d_buf_info *buf_info;
0277 
0278     g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
0279 
0280     g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
0281                         G2D_CMDLIST_POOL_SIZE,
0282                         &g2d->cmdlist_pool, GFP_KERNEL,
0283                         g2d->cmdlist_dma_attrs);
0284     if (!g2d->cmdlist_pool_virt) {
0285         dev_err(dev, "failed to allocate dma memory\n");
0286         return -ENOMEM;
0287     }
0288 
0289     node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
0290     if (!node) {
0291         ret = -ENOMEM;
0292         goto err;
0293     }
0294 
0295     for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
0296         unsigned int i;
0297 
0298         node[nr].cmdlist =
0299             g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
0300         node[nr].dma_addr =
0301             g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
0302 
0303         buf_info = &node[nr].buf_info;
0304         for (i = 0; i < MAX_REG_TYPE_NR; i++)
0305             buf_info->reg_types[i] = REG_TYPE_NONE;
0306 
0307         list_add_tail(&node[nr].list, &g2d->free_cmdlist);
0308     }
0309 
0310     return 0;
0311 
0312 err:
0313     dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
0314             g2d->cmdlist_pool_virt,
0315             g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
0316     return ret;
0317 }
0318 
0319 static void g2d_fini_cmdlist(struct g2d_data *g2d)
0320 {
0321     kfree(g2d->cmdlist_node);
0322 
0323     if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
0324         dma_free_attrs(to_dma_dev(g2d->drm_dev),
0325                 G2D_CMDLIST_POOL_SIZE,
0326                 g2d->cmdlist_pool_virt,
0327                 g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
0328     }
0329 }
0330 
0331 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
0332 {
0333     struct device *dev = g2d->dev;
0334     struct g2d_cmdlist_node *node;
0335 
0336     mutex_lock(&g2d->cmdlist_mutex);
0337     if (list_empty(&g2d->free_cmdlist)) {
0338         dev_err(dev, "there is no free cmdlist\n");
0339         mutex_unlock(&g2d->cmdlist_mutex);
0340         return NULL;
0341     }
0342 
0343     node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
0344                 list);
0345     list_del_init(&node->list);
0346     mutex_unlock(&g2d->cmdlist_mutex);
0347 
0348     return node;
0349 }
0350 
0351 static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
0352 {
0353     mutex_lock(&g2d->cmdlist_mutex);
0354     list_move_tail(&node->list, &g2d->free_cmdlist);
0355     mutex_unlock(&g2d->cmdlist_mutex);
0356 }
0357 
0358 static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
0359                      struct g2d_cmdlist_node *node)
0360 {
0361     struct g2d_cmdlist_node *lnode;
0362 
0363     if (list_empty(&file_priv->inuse_cmdlist))
0364         goto add_to_list;
0365 
0366     /* this links to base address of new cmdlist */
0367     lnode = list_entry(file_priv->inuse_cmdlist.prev,
0368                 struct g2d_cmdlist_node, list);
0369     lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
0370 
0371 add_to_list:
0372     list_add_tail(&node->list, &file_priv->inuse_cmdlist);
0373 
0374     if (node->event)
0375         list_add_tail(&node->event->base.link, &file_priv->event_list);
0376 }
0377 
0378 static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
0379                     void *obj,
0380                     bool force)
0381 {
0382     struct g2d_cmdlist_userptr *g2d_userptr = obj;
0383 
0384     if (!obj)
0385         return;
0386 
0387     if (force)
0388         goto out;
0389 
0390     refcount_dec(&g2d_userptr->refcount);
0391 
0392     if (refcount_read(&g2d_userptr->refcount) > 0)
0393         return;
0394 
0395     if (g2d_userptr->in_pool)
0396         return;
0397 
0398 out:
0399     dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
0400               DMA_BIDIRECTIONAL, 0);
0401 
0402     unpin_user_pages_dirty_lock(g2d_userptr->pages, g2d_userptr->npages,
0403                     true);
0404     kvfree(g2d_userptr->pages);
0405 
0406     if (!g2d_userptr->out_of_list)
0407         list_del_init(&g2d_userptr->list);
0408 
0409     sg_free_table(g2d_userptr->sgt);
0410     kfree(g2d_userptr->sgt);
0411     kfree(g2d_userptr);
0412 }
0413 
0414 static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
0415                     unsigned long userptr,
0416                     unsigned long size,
0417                     struct drm_file *filp,
0418                     void **obj)
0419 {
0420     struct drm_exynos_file_private *file_priv = filp->driver_priv;
0421     struct g2d_cmdlist_userptr *g2d_userptr;
0422     struct sg_table *sgt;
0423     unsigned long start, end;
0424     unsigned int npages, offset;
0425     int ret;
0426 
0427     if (!size) {
0428         DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
0429         return ERR_PTR(-EINVAL);
0430     }
0431 
0432     /* check if userptr already exists in userptr_list. */
0433     list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
0434         if (g2d_userptr->userptr == userptr) {
0435             /*
0436              * also check size because there could be same address
0437              * and different size.
0438              */
0439             if (g2d_userptr->size == size) {
0440                 refcount_inc(&g2d_userptr->refcount);
0441                 *obj = g2d_userptr;
0442 
0443                 return &g2d_userptr->dma_addr;
0444             }
0445 
0446             /*
0447              * at this moment, maybe g2d dma is accessing this
0448              * g2d_userptr memory region so just remove this
0449              * g2d_userptr object from userptr_list not to be
0450              * referred again and also except it the userptr
0451              * pool to be released after the dma access completion.
0452              */
0453             g2d_userptr->out_of_list = true;
0454             g2d_userptr->in_pool = false;
0455             list_del_init(&g2d_userptr->list);
0456 
0457             break;
0458         }
0459     }
0460 
0461     g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
0462     if (!g2d_userptr)
0463         return ERR_PTR(-ENOMEM);
0464 
0465     refcount_set(&g2d_userptr->refcount, 1);
0466     g2d_userptr->size = size;
0467 
0468     start = userptr & PAGE_MASK;
0469     offset = userptr & ~PAGE_MASK;
0470     end = PAGE_ALIGN(userptr + size);
0471     npages = (end - start) >> PAGE_SHIFT;
0472     g2d_userptr->pages = kvmalloc_array(npages, sizeof(*g2d_userptr->pages),
0473                         GFP_KERNEL);
0474     if (!g2d_userptr->pages) {
0475         ret = -ENOMEM;
0476         goto err_free;
0477     }
0478 
0479     ret = pin_user_pages_fast(start, npages,
0480                   FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
0481                   g2d_userptr->pages);
0482     if (ret != npages) {
0483         DRM_DEV_ERROR(g2d->dev,
0484                   "failed to get user pages from userptr.\n");
0485         if (ret < 0)
0486             goto err_destroy_pages;
0487         npages = ret;
0488         ret = -EFAULT;
0489         goto err_unpin_pages;
0490     }
0491     g2d_userptr->npages = npages;
0492 
0493     sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
0494     if (!sgt) {
0495         ret = -ENOMEM;
0496         goto err_unpin_pages;
0497     }
0498 
0499     ret = sg_alloc_table_from_pages(sgt,
0500                     g2d_userptr->pages,
0501                     npages, offset, size, GFP_KERNEL);
0502     if (ret < 0) {
0503         DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
0504         goto err_free_sgt;
0505     }
0506 
0507     g2d_userptr->sgt = sgt;
0508 
0509     ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
0510                   DMA_BIDIRECTIONAL, 0);
0511     if (ret) {
0512         DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
0513         goto err_sg_free_table;
0514     }
0515 
0516     g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
0517     g2d_userptr->userptr = userptr;
0518 
0519     list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
0520 
0521     if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
0522         g2d->current_pool += npages << PAGE_SHIFT;
0523         g2d_userptr->in_pool = true;
0524     }
0525 
0526     *obj = g2d_userptr;
0527 
0528     return &g2d_userptr->dma_addr;
0529 
0530 err_sg_free_table:
0531     sg_free_table(sgt);
0532 
0533 err_free_sgt:
0534     kfree(sgt);
0535 
0536 err_unpin_pages:
0537     unpin_user_pages(g2d_userptr->pages, npages);
0538 
0539 err_destroy_pages:
0540     kvfree(g2d_userptr->pages);
0541 
0542 err_free:
0543     kfree(g2d_userptr);
0544 
0545     return ERR_PTR(ret);
0546 }
0547 
0548 static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
0549 {
0550     struct drm_exynos_file_private *file_priv = filp->driver_priv;
0551     struct g2d_cmdlist_userptr *g2d_userptr, *n;
0552 
0553     list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
0554         if (g2d_userptr->in_pool)
0555             g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
0556 
0557     g2d->current_pool = 0;
0558 }
0559 
0560 static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
0561 {
0562     enum g2d_reg_type reg_type;
0563 
0564     switch (reg_offset) {
0565     case G2D_SRC_BASE_ADDR:
0566     case G2D_SRC_STRIDE:
0567     case G2D_SRC_COLOR_MODE:
0568     case G2D_SRC_LEFT_TOP:
0569     case G2D_SRC_RIGHT_BOTTOM:
0570         reg_type = REG_TYPE_SRC;
0571         break;
0572     case G2D_SRC_PLANE2_BASE_ADDR:
0573         reg_type = REG_TYPE_SRC_PLANE2;
0574         break;
0575     case G2D_DST_BASE_ADDR:
0576     case G2D_DST_STRIDE:
0577     case G2D_DST_COLOR_MODE:
0578     case G2D_DST_LEFT_TOP:
0579     case G2D_DST_RIGHT_BOTTOM:
0580         reg_type = REG_TYPE_DST;
0581         break;
0582     case G2D_DST_PLANE2_BASE_ADDR:
0583         reg_type = REG_TYPE_DST_PLANE2;
0584         break;
0585     case G2D_PAT_BASE_ADDR:
0586         reg_type = REG_TYPE_PAT;
0587         break;
0588     case G2D_MSK_BASE_ADDR:
0589         reg_type = REG_TYPE_MSK;
0590         break;
0591     default:
0592         reg_type = REG_TYPE_NONE;
0593         DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
0594                   reg_offset);
0595         break;
0596     }
0597 
0598     return reg_type;
0599 }
0600 
0601 static unsigned long g2d_get_buf_bpp(unsigned int format)
0602 {
0603     unsigned long bpp;
0604 
0605     switch (format) {
0606     case G2D_FMT_XRGB8888:
0607     case G2D_FMT_ARGB8888:
0608         bpp = 4;
0609         break;
0610     case G2D_FMT_RGB565:
0611     case G2D_FMT_XRGB1555:
0612     case G2D_FMT_ARGB1555:
0613     case G2D_FMT_XRGB4444:
0614     case G2D_FMT_ARGB4444:
0615         bpp = 2;
0616         break;
0617     case G2D_FMT_PACKED_RGB888:
0618         bpp = 3;
0619         break;
0620     default:
0621         bpp = 1;
0622         break;
0623     }
0624 
0625     return bpp;
0626 }
0627 
0628 static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
0629                     struct g2d_buf_desc *buf_desc,
0630                     enum g2d_reg_type reg_type,
0631                     unsigned long size)
0632 {
0633     int width, height;
0634     unsigned long bpp, last_pos;
0635 
0636     /*
0637      * check source and destination buffers only.
0638      * so the others are always valid.
0639      */
0640     if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
0641         return true;
0642 
0643     /* This check also makes sure that right_x > left_x. */
0644     width = (int)buf_desc->right_x - (int)buf_desc->left_x;
0645     if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
0646         DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
0647         return false;
0648     }
0649 
0650     /* This check also makes sure that bottom_y > top_y. */
0651     height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
0652     if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
0653         DRM_DEV_ERROR(g2d->dev,
0654                   "height[%d] is out of range!\n", height);
0655         return false;
0656     }
0657 
0658     bpp = g2d_get_buf_bpp(buf_desc->format);
0659 
0660     /* Compute the position of the last byte that the engine accesses. */
0661     last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
0662         (unsigned long)buf_desc->stride +
0663         (unsigned long)buf_desc->right_x * bpp - 1;
0664 
0665     /*
0666      * Since right_x > left_x and bottom_y > top_y we already know
0667      * that the first_pos < last_pos (first_pos being the position
0668      * of the first byte the engine accesses), it just remains to
0669      * check if last_pos is smaller then the buffer size.
0670      */
0671 
0672     if (last_pos >= size) {
0673         DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
0674                   "is out of range [%lu]!\n", last_pos, size);
0675         return false;
0676     }
0677 
0678     return true;
0679 }
0680 
0681 static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
0682                 struct g2d_cmdlist_node *node,
0683                 struct drm_device *drm_dev,
0684                 struct drm_file *file)
0685 {
0686     struct g2d_cmdlist *cmdlist = node->cmdlist;
0687     struct g2d_buf_info *buf_info = &node->buf_info;
0688     int offset;
0689     int ret;
0690     int i;
0691 
0692     for (i = 0; i < buf_info->map_nr; i++) {
0693         struct g2d_buf_desc *buf_desc;
0694         enum g2d_reg_type reg_type;
0695         int reg_pos;
0696         unsigned long handle;
0697         dma_addr_t *addr;
0698 
0699         reg_pos = cmdlist->last - 2 * (i + 1);
0700 
0701         offset = cmdlist->data[reg_pos];
0702         handle = cmdlist->data[reg_pos + 1];
0703 
0704         reg_type = g2d_get_reg_type(g2d, offset);
0705         if (reg_type == REG_TYPE_NONE) {
0706             ret = -EFAULT;
0707             goto err;
0708         }
0709 
0710         buf_desc = &buf_info->descs[reg_type];
0711 
0712         if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
0713             struct exynos_drm_gem *exynos_gem;
0714 
0715             exynos_gem = exynos_drm_gem_get(file, handle);
0716             if (!exynos_gem) {
0717                 ret = -EFAULT;
0718                 goto err;
0719             }
0720 
0721             if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
0722                              reg_type, exynos_gem->size)) {
0723                 exynos_drm_gem_put(exynos_gem);
0724                 ret = -EFAULT;
0725                 goto err;
0726             }
0727 
0728             addr = &exynos_gem->dma_addr;
0729             buf_info->obj[reg_type] = exynos_gem;
0730         } else {
0731             struct drm_exynos_g2d_userptr g2d_userptr;
0732 
0733             if (copy_from_user(&g2d_userptr, (void __user *)handle,
0734                 sizeof(struct drm_exynos_g2d_userptr))) {
0735                 ret = -EFAULT;
0736                 goto err;
0737             }
0738 
0739             if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
0740                              reg_type,
0741                              g2d_userptr.size)) {
0742                 ret = -EFAULT;
0743                 goto err;
0744             }
0745 
0746             addr = g2d_userptr_get_dma_addr(g2d,
0747                             g2d_userptr.userptr,
0748                             g2d_userptr.size,
0749                             file,
0750                             &buf_info->obj[reg_type]);
0751             if (IS_ERR(addr)) {
0752                 ret = -EFAULT;
0753                 goto err;
0754             }
0755         }
0756 
0757         cmdlist->data[reg_pos + 1] = *addr;
0758         buf_info->reg_types[i] = reg_type;
0759     }
0760 
0761     return 0;
0762 
0763 err:
0764     buf_info->map_nr = i;
0765     return ret;
0766 }
0767 
0768 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
0769                   struct g2d_cmdlist_node *node,
0770                   struct drm_file *filp)
0771 {
0772     struct g2d_buf_info *buf_info = &node->buf_info;
0773     int i;
0774 
0775     for (i = 0; i < buf_info->map_nr; i++) {
0776         struct g2d_buf_desc *buf_desc;
0777         enum g2d_reg_type reg_type;
0778         void *obj;
0779 
0780         reg_type = buf_info->reg_types[i];
0781 
0782         buf_desc = &buf_info->descs[reg_type];
0783         obj = buf_info->obj[reg_type];
0784 
0785         if (buf_info->types[reg_type] == BUF_TYPE_GEM)
0786             exynos_drm_gem_put(obj);
0787         else
0788             g2d_userptr_put_dma_addr(g2d, obj, false);
0789 
0790         buf_info->reg_types[i] = REG_TYPE_NONE;
0791         buf_info->obj[reg_type] = NULL;
0792         buf_info->types[reg_type] = 0;
0793         memset(buf_desc, 0x00, sizeof(*buf_desc));
0794     }
0795 
0796     buf_info->map_nr = 0;
0797 }
0798 
0799 static void g2d_dma_start(struct g2d_data *g2d,
0800               struct g2d_runqueue_node *runqueue_node)
0801 {
0802     struct g2d_cmdlist_node *node =
0803                 list_first_entry(&runqueue_node->run_cmdlist,
0804                         struct g2d_cmdlist_node, list);
0805 
0806     set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
0807     writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
0808     writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
0809 }
0810 
0811 static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
0812 {
0813     struct g2d_runqueue_node *runqueue_node;
0814 
0815     if (list_empty(&g2d->runqueue))
0816         return NULL;
0817 
0818     runqueue_node = list_first_entry(&g2d->runqueue,
0819                      struct g2d_runqueue_node, list);
0820     list_del_init(&runqueue_node->list);
0821     return runqueue_node;
0822 }
0823 
0824 static void g2d_free_runqueue_node(struct g2d_data *g2d,
0825                    struct g2d_runqueue_node *runqueue_node)
0826 {
0827     struct g2d_cmdlist_node *node;
0828 
0829     mutex_lock(&g2d->cmdlist_mutex);
0830     /*
0831      * commands in run_cmdlist have been completed so unmap all gem
0832      * objects in each command node so that they are unreferenced.
0833      */
0834     list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
0835         g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
0836     list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
0837     mutex_unlock(&g2d->cmdlist_mutex);
0838 
0839     kmem_cache_free(g2d->runqueue_slab, runqueue_node);
0840 }
0841 
0842 /**
0843  * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes
0844  * @g2d: G2D state object
0845  * @file: if not zero, only remove items with this DRM file
0846  *
0847  * Has to be called under runqueue lock.
0848  */
0849 static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
0850 {
0851     struct g2d_runqueue_node *node, *n;
0852 
0853     if (list_empty(&g2d->runqueue))
0854         return;
0855 
0856     list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
0857         if (file && node->filp != file)
0858             continue;
0859 
0860         list_del_init(&node->list);
0861         g2d_free_runqueue_node(g2d, node);
0862     }
0863 }
0864 
0865 static void g2d_runqueue_worker(struct work_struct *work)
0866 {
0867     struct g2d_data *g2d = container_of(work, struct g2d_data,
0868                         runqueue_work);
0869     struct g2d_runqueue_node *runqueue_node;
0870 
0871     /*
0872      * The engine is busy and the completion of the current node is going
0873      * to poke the runqueue worker, so nothing to do here.
0874      */
0875     if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags))
0876         return;
0877 
0878     mutex_lock(&g2d->runqueue_mutex);
0879 
0880     runqueue_node = g2d->runqueue_node;
0881     g2d->runqueue_node = NULL;
0882 
0883     if (runqueue_node) {
0884         pm_runtime_mark_last_busy(g2d->dev);
0885         pm_runtime_put_autosuspend(g2d->dev);
0886 
0887         complete(&runqueue_node->complete);
0888         if (runqueue_node->async)
0889             g2d_free_runqueue_node(g2d, runqueue_node);
0890     }
0891 
0892     if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) {
0893         g2d->runqueue_node = g2d_get_runqueue_node(g2d);
0894 
0895         if (g2d->runqueue_node) {
0896             int ret;
0897 
0898             ret = pm_runtime_resume_and_get(g2d->dev);
0899             if (ret < 0) {
0900                 dev_err(g2d->dev, "failed to enable G2D device.\n");
0901                 goto out;
0902             }
0903 
0904             g2d_dma_start(g2d, g2d->runqueue_node);
0905         }
0906     }
0907 
0908 out:
0909     mutex_unlock(&g2d->runqueue_mutex);
0910 }
0911 
0912 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
0913 {
0914     struct drm_device *drm_dev = g2d->drm_dev;
0915     struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
0916     struct drm_exynos_pending_g2d_event *e;
0917     struct timespec64 now;
0918 
0919     if (list_empty(&runqueue_node->event_list))
0920         return;
0921 
0922     e = list_first_entry(&runqueue_node->event_list,
0923                  struct drm_exynos_pending_g2d_event, base.link);
0924 
0925     ktime_get_ts64(&now);
0926     e->event.tv_sec = now.tv_sec;
0927     e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
0928     e->event.cmdlist_no = cmdlist_no;
0929 
0930     drm_send_event(drm_dev, &e->base);
0931 }
0932 
0933 static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
0934 {
0935     struct g2d_data *g2d = dev_id;
0936     u32 pending;
0937 
0938     pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
0939     if (pending)
0940         writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
0941 
0942     if (pending & G2D_INTP_GCMD_FIN) {
0943         u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
0944 
0945         cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
0946                         G2D_DMA_LIST_DONE_COUNT_OFFSET;
0947 
0948         g2d_finish_event(g2d, cmdlist_no);
0949 
0950         writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
0951         if (!(pending & G2D_INTP_ACMD_FIN)) {
0952             writel_relaxed(G2D_DMA_CONTINUE,
0953                     g2d->regs + G2D_DMA_COMMAND);
0954         }
0955     }
0956 
0957     if (pending & G2D_INTP_ACMD_FIN) {
0958         clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
0959         queue_work(g2d->g2d_workq, &g2d->runqueue_work);
0960     }
0961 
0962     return IRQ_HANDLED;
0963 }
0964 
0965 /**
0966  * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node
0967  * @g2d: G2D state object
0968  * @file: if not zero, only wait if the current runqueue node belongs
0969  *        to the DRM file
0970  *
0971  * Should the engine not become idle after a 100ms timeout, a hardware
0972  * reset is issued.
0973  */
0974 static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file)
0975 {
0976     struct device *dev = g2d->dev;
0977 
0978     struct g2d_runqueue_node *runqueue_node = NULL;
0979     unsigned int tries = 10;
0980 
0981     mutex_lock(&g2d->runqueue_mutex);
0982 
0983     /* If no node is currently processed, we have nothing to do. */
0984     if (!g2d->runqueue_node)
0985         goto out;
0986 
0987     runqueue_node = g2d->runqueue_node;
0988 
0989     /* Check if the currently processed item belongs to us. */
0990     if (file && runqueue_node->filp != file)
0991         goto out;
0992 
0993     mutex_unlock(&g2d->runqueue_mutex);
0994 
0995     /* Wait for the G2D engine to finish. */
0996     while (tries-- && (g2d->runqueue_node == runqueue_node))
0997         mdelay(10);
0998 
0999     mutex_lock(&g2d->runqueue_mutex);
1000 
1001     if (g2d->runqueue_node != runqueue_node)
1002         goto out;
1003 
1004     dev_err(dev, "wait timed out, resetting engine...\n");
1005     g2d_hw_reset(g2d);
1006 
1007     /*
1008      * After the hardware reset of the engine we are going to loose
1009      * the IRQ which triggers the PM runtime put().
1010      * So do this manually here.
1011      */
1012     pm_runtime_mark_last_busy(dev);
1013     pm_runtime_put_autosuspend(dev);
1014 
1015     complete(&runqueue_node->complete);
1016     if (runqueue_node->async)
1017         g2d_free_runqueue_node(g2d, runqueue_node);
1018 
1019 out:
1020     mutex_unlock(&g2d->runqueue_mutex);
1021 }
1022 
1023 static int g2d_check_reg_offset(struct g2d_data *g2d,
1024                 struct g2d_cmdlist_node *node,
1025                 int nr, bool for_addr)
1026 {
1027     struct g2d_cmdlist *cmdlist = node->cmdlist;
1028     int reg_offset;
1029     int index;
1030     int i;
1031 
1032     for (i = 0; i < nr; i++) {
1033         struct g2d_buf_info *buf_info = &node->buf_info;
1034         struct g2d_buf_desc *buf_desc;
1035         enum g2d_reg_type reg_type;
1036         unsigned long value;
1037 
1038         index = cmdlist->last - 2 * (i + 1);
1039 
1040         reg_offset = cmdlist->data[index] & ~0xfffff000;
1041         if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
1042             goto err;
1043         if (reg_offset % 4)
1044             goto err;
1045 
1046         switch (reg_offset) {
1047         case G2D_SRC_BASE_ADDR:
1048         case G2D_SRC_PLANE2_BASE_ADDR:
1049         case G2D_DST_BASE_ADDR:
1050         case G2D_DST_PLANE2_BASE_ADDR:
1051         case G2D_PAT_BASE_ADDR:
1052         case G2D_MSK_BASE_ADDR:
1053             if (!for_addr)
1054                 goto err;
1055 
1056             reg_type = g2d_get_reg_type(g2d, reg_offset);
1057 
1058             /* check userptr buffer type. */
1059             if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
1060                 buf_info->types[reg_type] = BUF_TYPE_USERPTR;
1061                 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
1062             } else
1063                 buf_info->types[reg_type] = BUF_TYPE_GEM;
1064             break;
1065         case G2D_SRC_STRIDE:
1066         case G2D_DST_STRIDE:
1067             if (for_addr)
1068                 goto err;
1069 
1070             reg_type = g2d_get_reg_type(g2d, reg_offset);
1071 
1072             buf_desc = &buf_info->descs[reg_type];
1073             buf_desc->stride = cmdlist->data[index + 1];
1074             break;
1075         case G2D_SRC_COLOR_MODE:
1076         case G2D_DST_COLOR_MODE:
1077             if (for_addr)
1078                 goto err;
1079 
1080             reg_type = g2d_get_reg_type(g2d, reg_offset);
1081 
1082             buf_desc = &buf_info->descs[reg_type];
1083             value = cmdlist->data[index + 1];
1084 
1085             buf_desc->format = value & 0xf;
1086             break;
1087         case G2D_SRC_LEFT_TOP:
1088         case G2D_DST_LEFT_TOP:
1089             if (for_addr)
1090                 goto err;
1091 
1092             reg_type = g2d_get_reg_type(g2d, reg_offset);
1093 
1094             buf_desc = &buf_info->descs[reg_type];
1095             value = cmdlist->data[index + 1];
1096 
1097             buf_desc->left_x = value & 0x1fff;
1098             buf_desc->top_y = (value & 0x1fff0000) >> 16;
1099             break;
1100         case G2D_SRC_RIGHT_BOTTOM:
1101         case G2D_DST_RIGHT_BOTTOM:
1102             if (for_addr)
1103                 goto err;
1104 
1105             reg_type = g2d_get_reg_type(g2d, reg_offset);
1106 
1107             buf_desc = &buf_info->descs[reg_type];
1108             value = cmdlist->data[index + 1];
1109 
1110             buf_desc->right_x = value & 0x1fff;
1111             buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
1112             break;
1113         default:
1114             if (for_addr)
1115                 goto err;
1116             break;
1117         }
1118     }
1119 
1120     return 0;
1121 
1122 err:
1123     dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
1124     return -EINVAL;
1125 }
1126 
1127 /* ioctl functions */
1128 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
1129                  struct drm_file *file)
1130 {
1131     struct drm_exynos_g2d_get_ver *ver = data;
1132 
1133     ver->major = G2D_HW_MAJOR_VER;
1134     ver->minor = G2D_HW_MINOR_VER;
1135 
1136     return 0;
1137 }
1138 
1139 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1140                  struct drm_file *file)
1141 {
1142     struct drm_exynos_file_private *file_priv = file->driver_priv;
1143     struct exynos_drm_private *priv = drm_dev->dev_private;
1144     struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
1145     struct drm_exynos_g2d_set_cmdlist *req = data;
1146     struct drm_exynos_g2d_cmd *cmd;
1147     struct drm_exynos_pending_g2d_event *e;
1148     struct g2d_cmdlist_node *node;
1149     struct g2d_cmdlist *cmdlist;
1150     int size;
1151     int ret;
1152 
1153     node = g2d_get_cmdlist(g2d);
1154     if (!node)
1155         return -ENOMEM;
1156 
1157     /*
1158      * To avoid an integer overflow for the later size computations, we
1159      * enforce a maximum number of submitted commands here. This limit is
1160      * sufficient for all conceivable usage cases of the G2D.
1161      */
1162     if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
1163         req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
1164         dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
1165         return -EINVAL;
1166     }
1167 
1168     node->event = NULL;
1169 
1170     if (req->event_type != G2D_EVENT_NOT) {
1171         e = kzalloc(sizeof(*node->event), GFP_KERNEL);
1172         if (!e) {
1173             ret = -ENOMEM;
1174             goto err;
1175         }
1176 
1177         e->event.base.type = DRM_EXYNOS_G2D_EVENT;
1178         e->event.base.length = sizeof(e->event);
1179         e->event.user_data = req->user_data;
1180 
1181         ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
1182         if (ret) {
1183             kfree(e);
1184             goto err;
1185         }
1186 
1187         node->event = e;
1188     }
1189 
1190     cmdlist = node->cmdlist;
1191 
1192     cmdlist->last = 0;
1193 
1194     /*
1195      * If don't clear SFR registers, the cmdlist is affected by register
1196      * values of previous cmdlist. G2D hw executes SFR clear command and
1197      * a next command at the same time then the next command is ignored and
1198      * is executed rightly from next next command, so needs a dummy command
1199      * to next command of SFR clear command.
1200      */
1201     cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
1202     cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
1203     cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
1204     cmdlist->data[cmdlist->last++] = 0;
1205 
1206     /*
1207      * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
1208      * and GCF bit should be set to INTEN register if user wants
1209      * G2D interrupt event once current command list execution is
1210      * finished.
1211      * Otherwise only ACF bit should be set to INTEN register so
1212      * that one interrupt is occurred after all command lists
1213      * have been completed.
1214      */
1215     if (node->event) {
1216         cmdlist->data[cmdlist->last++] = G2D_INTEN;
1217         cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
1218         cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
1219         cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
1220     } else {
1221         cmdlist->data[cmdlist->last++] = G2D_INTEN;
1222         cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
1223     }
1224 
1225     /*
1226      * Check the size of cmdlist. The 2 that is added last comes from
1227      * the implicit G2D_BITBLT_START that is appended once we have
1228      * checked all the submitted commands.
1229      */
1230     size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
1231     if (size > G2D_CMDLIST_DATA_NUM) {
1232         dev_err(g2d->dev, "cmdlist size is too big\n");
1233         ret = -EINVAL;
1234         goto err_free_event;
1235     }
1236 
1237     cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1238 
1239     if (copy_from_user(cmdlist->data + cmdlist->last,
1240                 (void __user *)cmd,
1241                 sizeof(*cmd) * req->cmd_nr)) {
1242         ret = -EFAULT;
1243         goto err_free_event;
1244     }
1245     cmdlist->last += req->cmd_nr * 2;
1246 
1247     ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
1248     if (ret < 0)
1249         goto err_free_event;
1250 
1251     node->buf_info.map_nr = req->cmd_buf_nr;
1252     if (req->cmd_buf_nr) {
1253         struct drm_exynos_g2d_cmd *cmd_buf;
1254 
1255         cmd_buf = (struct drm_exynos_g2d_cmd *)
1256                 (unsigned long)req->cmd_buf;
1257 
1258         if (copy_from_user(cmdlist->data + cmdlist->last,
1259                     (void __user *)cmd_buf,
1260                     sizeof(*cmd_buf) * req->cmd_buf_nr)) {
1261             ret = -EFAULT;
1262             goto err_free_event;
1263         }
1264         cmdlist->last += req->cmd_buf_nr * 2;
1265 
1266         ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
1267         if (ret < 0)
1268             goto err_free_event;
1269 
1270         ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
1271         if (ret < 0)
1272             goto err_unmap;
1273     }
1274 
1275     cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
1276     cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
1277 
1278     /* head */
1279     cmdlist->head = cmdlist->last / 2;
1280 
1281     /* tail */
1282     cmdlist->data[cmdlist->last] = 0;
1283 
1284     g2d_add_cmdlist_to_inuse(file_priv, node);
1285 
1286     return 0;
1287 
1288 err_unmap:
1289     g2d_unmap_cmdlist_gem(g2d, node, file);
1290 err_free_event:
1291     if (node->event)
1292         drm_event_cancel_free(drm_dev, &node->event->base);
1293 err:
1294     g2d_put_cmdlist(g2d, node);
1295     return ret;
1296 }
1297 
1298 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1299               struct drm_file *file)
1300 {
1301     struct drm_exynos_file_private *file_priv = file->driver_priv;
1302     struct exynos_drm_private *priv = drm_dev->dev_private;
1303     struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
1304     struct drm_exynos_g2d_exec *req = data;
1305     struct g2d_runqueue_node *runqueue_node;
1306     struct list_head *run_cmdlist;
1307     struct list_head *event_list;
1308 
1309     runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
1310     if (!runqueue_node)
1311         return -ENOMEM;
1312 
1313     run_cmdlist = &runqueue_node->run_cmdlist;
1314     event_list = &runqueue_node->event_list;
1315     INIT_LIST_HEAD(run_cmdlist);
1316     INIT_LIST_HEAD(event_list);
1317     init_completion(&runqueue_node->complete);
1318     runqueue_node->async = req->async;
1319 
1320     list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
1321     list_splice_init(&file_priv->event_list, event_list);
1322 
1323     if (list_empty(run_cmdlist)) {
1324         dev_err(g2d->dev, "there is no inuse cmdlist\n");
1325         kmem_cache_free(g2d->runqueue_slab, runqueue_node);
1326         return -EPERM;
1327     }
1328 
1329     mutex_lock(&g2d->runqueue_mutex);
1330     runqueue_node->pid = current->pid;
1331     runqueue_node->filp = file;
1332     list_add_tail(&runqueue_node->list, &g2d->runqueue);
1333     mutex_unlock(&g2d->runqueue_mutex);
1334 
1335     /* Let the runqueue know that there is work to do. */
1336     queue_work(g2d->g2d_workq, &g2d->runqueue_work);
1337 
1338     if (runqueue_node->async)
1339         goto out;
1340 
1341     wait_for_completion(&runqueue_node->complete);
1342     g2d_free_runqueue_node(g2d, runqueue_node);
1343 
1344 out:
1345     return 0;
1346 }
1347 
1348 int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
1349 {
1350     struct drm_exynos_file_private *file_priv = file->driver_priv;
1351 
1352     INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
1353     INIT_LIST_HEAD(&file_priv->event_list);
1354     INIT_LIST_HEAD(&file_priv->userptr_list);
1355 
1356     return 0;
1357 }
1358 
1359 void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
1360 {
1361     struct drm_exynos_file_private *file_priv = file->driver_priv;
1362     struct exynos_drm_private *priv = drm_dev->dev_private;
1363     struct g2d_data *g2d;
1364     struct g2d_cmdlist_node *node, *n;
1365 
1366     if (!priv->g2d_dev)
1367         return;
1368 
1369     g2d = dev_get_drvdata(priv->g2d_dev);
1370 
1371     /* Remove the runqueue nodes that belong to us. */
1372     mutex_lock(&g2d->runqueue_mutex);
1373     g2d_remove_runqueue_nodes(g2d, file);
1374     mutex_unlock(&g2d->runqueue_mutex);
1375 
1376     /*
1377      * Wait for the runqueue worker to finish its current node.
1378      * After this the engine should no longer be accessing any
1379      * memory belonging to us.
1380      */
1381     g2d_wait_finish(g2d, file);
1382 
1383     /*
1384      * Even after the engine is idle, there might still be stale cmdlists
1385      * (i.e. cmdlisst which we submitted but never executed) around, with
1386      * their corresponding GEM/userptr buffers.
1387      * Properly unmap these buffers here.
1388      */
1389     mutex_lock(&g2d->cmdlist_mutex);
1390     list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
1391         g2d_unmap_cmdlist_gem(g2d, node, file);
1392         list_move_tail(&node->list, &g2d->free_cmdlist);
1393     }
1394     mutex_unlock(&g2d->cmdlist_mutex);
1395 
1396     /* release all g2d_userptr in pool. */
1397     g2d_userptr_free_all(g2d, file);
1398 }
1399 
1400 static int g2d_bind(struct device *dev, struct device *master, void *data)
1401 {
1402     struct g2d_data *g2d = dev_get_drvdata(dev);
1403     struct drm_device *drm_dev = data;
1404     struct exynos_drm_private *priv = drm_dev->dev_private;
1405     int ret;
1406 
1407     g2d->drm_dev = drm_dev;
1408 
1409     /* allocate dma-aware cmdlist buffer. */
1410     ret = g2d_init_cmdlist(g2d);
1411     if (ret < 0) {
1412         dev_err(dev, "cmdlist init failed\n");
1413         return ret;
1414     }
1415 
1416     ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
1417     if (ret < 0) {
1418         dev_err(dev, "failed to enable iommu.\n");
1419         g2d_fini_cmdlist(g2d);
1420         return ret;
1421     }
1422     priv->g2d_dev = dev;
1423 
1424     dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
1425             G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
1426     return 0;
1427 }
1428 
1429 static void g2d_unbind(struct device *dev, struct device *master, void *data)
1430 {
1431     struct g2d_data *g2d = dev_get_drvdata(dev);
1432     struct drm_device *drm_dev = data;
1433     struct exynos_drm_private *priv = drm_dev->dev_private;
1434 
1435     /* Suspend operation and wait for engine idle. */
1436     set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1437     g2d_wait_finish(g2d, NULL);
1438     priv->g2d_dev = NULL;
1439 
1440     cancel_work_sync(&g2d->runqueue_work);
1441     exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
1442 }
1443 
1444 static const struct component_ops g2d_component_ops = {
1445     .bind   = g2d_bind,
1446     .unbind = g2d_unbind,
1447 };
1448 
1449 static int g2d_probe(struct platform_device *pdev)
1450 {
1451     struct device *dev = &pdev->dev;
1452     struct g2d_data *g2d;
1453     int ret;
1454 
1455     g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
1456     if (!g2d)
1457         return -ENOMEM;
1458 
1459     g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
1460             sizeof(struct g2d_runqueue_node), 0, 0, NULL);
1461     if (!g2d->runqueue_slab)
1462         return -ENOMEM;
1463 
1464     g2d->dev = dev;
1465 
1466     g2d->g2d_workq = create_singlethread_workqueue("g2d");
1467     if (!g2d->g2d_workq) {
1468         dev_err(dev, "failed to create workqueue\n");
1469         ret = -EINVAL;
1470         goto err_destroy_slab;
1471     }
1472 
1473     INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
1474     INIT_LIST_HEAD(&g2d->free_cmdlist);
1475     INIT_LIST_HEAD(&g2d->runqueue);
1476 
1477     mutex_init(&g2d->cmdlist_mutex);
1478     mutex_init(&g2d->runqueue_mutex);
1479 
1480     g2d->gate_clk = devm_clk_get(dev, "fimg2d");
1481     if (IS_ERR(g2d->gate_clk)) {
1482         dev_err(dev, "failed to get gate clock\n");
1483         ret = PTR_ERR(g2d->gate_clk);
1484         goto err_destroy_workqueue;
1485     }
1486 
1487     pm_runtime_use_autosuspend(dev);
1488     pm_runtime_set_autosuspend_delay(dev, 2000);
1489     pm_runtime_enable(dev);
1490     clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1491     clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
1492 
1493     g2d->regs = devm_platform_ioremap_resource(pdev, 0);
1494     if (IS_ERR(g2d->regs)) {
1495         ret = PTR_ERR(g2d->regs);
1496         goto err_put_clk;
1497     }
1498 
1499     g2d->irq = platform_get_irq(pdev, 0);
1500     if (g2d->irq < 0) {
1501         ret = g2d->irq;
1502         goto err_put_clk;
1503     }
1504 
1505     ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
1506                                 "drm_g2d", g2d);
1507     if (ret < 0) {
1508         dev_err(dev, "irq request failed\n");
1509         goto err_put_clk;
1510     }
1511 
1512     g2d->max_pool = MAX_POOL;
1513 
1514     platform_set_drvdata(pdev, g2d);
1515 
1516     ret = component_add(dev, &g2d_component_ops);
1517     if (ret < 0) {
1518         dev_err(dev, "failed to register drm g2d device\n");
1519         goto err_put_clk;
1520     }
1521 
1522     return 0;
1523 
1524 err_put_clk:
1525     pm_runtime_disable(dev);
1526 err_destroy_workqueue:
1527     destroy_workqueue(g2d->g2d_workq);
1528 err_destroy_slab:
1529     kmem_cache_destroy(g2d->runqueue_slab);
1530     return ret;
1531 }
1532 
1533 static int g2d_remove(struct platform_device *pdev)
1534 {
1535     struct g2d_data *g2d = platform_get_drvdata(pdev);
1536 
1537     component_del(&pdev->dev, &g2d_component_ops);
1538 
1539     /* There should be no locking needed here. */
1540     g2d_remove_runqueue_nodes(g2d, NULL);
1541 
1542     pm_runtime_dont_use_autosuspend(&pdev->dev);
1543     pm_runtime_disable(&pdev->dev);
1544 
1545     g2d_fini_cmdlist(g2d);
1546     destroy_workqueue(g2d->g2d_workq);
1547     kmem_cache_destroy(g2d->runqueue_slab);
1548 
1549     return 0;
1550 }
1551 
1552 #ifdef CONFIG_PM_SLEEP
1553 static int g2d_suspend(struct device *dev)
1554 {
1555     struct g2d_data *g2d = dev_get_drvdata(dev);
1556 
1557     /*
1558      * Suspend the runqueue worker operation and wait until the G2D
1559      * engine is idle.
1560      */
1561     set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1562     g2d_wait_finish(g2d, NULL);
1563     flush_work(&g2d->runqueue_work);
1564 
1565     return 0;
1566 }
1567 
1568 static int g2d_resume(struct device *dev)
1569 {
1570     struct g2d_data *g2d = dev_get_drvdata(dev);
1571 
1572     clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1573     queue_work(g2d->g2d_workq, &g2d->runqueue_work);
1574 
1575     return 0;
1576 }
1577 #endif
1578 
1579 #ifdef CONFIG_PM
1580 static int g2d_runtime_suspend(struct device *dev)
1581 {
1582     struct g2d_data *g2d = dev_get_drvdata(dev);
1583 
1584     clk_disable_unprepare(g2d->gate_clk);
1585 
1586     return 0;
1587 }
1588 
1589 static int g2d_runtime_resume(struct device *dev)
1590 {
1591     struct g2d_data *g2d = dev_get_drvdata(dev);
1592     int ret;
1593 
1594     ret = clk_prepare_enable(g2d->gate_clk);
1595     if (ret < 0)
1596         dev_warn(dev, "failed to enable clock.\n");
1597 
1598     return ret;
1599 }
1600 #endif
1601 
1602 static const struct dev_pm_ops g2d_pm_ops = {
1603     SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
1604     SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
1605 };
1606 
1607 static const struct of_device_id exynos_g2d_match[] = {
1608     { .compatible = "samsung,exynos5250-g2d" },
1609     { .compatible = "samsung,exynos4212-g2d" },
1610     {},
1611 };
1612 MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1613 
1614 struct platform_driver g2d_driver = {
1615     .probe      = g2d_probe,
1616     .remove     = g2d_remove,
1617     .driver     = {
1618         .name   = "exynos-drm-g2d",
1619         .owner  = THIS_MODULE,
1620         .pm = &g2d_pm_ops,
1621         .of_match_table = exynos_g2d_match,
1622     },
1623 };