Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
0004  * Author: Rob Clark <rob.clark@linaro.org>
0005  */
0006 
0007 #include <linux/dma-mapping.h>
0008 #include <linux/seq_file.h>
0009 #include <linux/shmem_fs.h>
0010 #include <linux/spinlock.h>
0011 #include <linux/pfn_t.h>
0012 
0013 #include <drm/drm_prime.h>
0014 #include <drm/drm_vma_manager.h>
0015 
0016 #include "omap_drv.h"
0017 #include "omap_dmm_tiler.h"
0018 
0019 /*
0020  * GEM buffer object implementation.
0021  */
0022 
0023 /* note: we use upper 8 bits of flags for driver-internal flags: */
0024 #define OMAP_BO_MEM_DMA_API 0x01000000  /* memory allocated with the dma_alloc_* API */
0025 #define OMAP_BO_MEM_SHMEM   0x02000000  /* memory allocated through shmem backing */
0026 #define OMAP_BO_MEM_DMABUF  0x08000000  /* memory imported from a dmabuf */
0027 
0028 struct omap_gem_object {
0029     struct drm_gem_object base;
0030 
0031     struct list_head mm_list;
0032 
0033     u32 flags;
0034 
0035     /** width/height for tiled formats (rounded up to slot boundaries) */
0036     u16 width, height;
0037 
0038     /** roll applied when mapping to DMM */
0039     u32 roll;
0040 
0041     /** protects pin_cnt, block, pages, dma_addrs and vaddr */
0042     struct mutex lock;
0043 
0044     /**
0045      * dma_addr contains the buffer DMA address. It is valid for
0046      *
0047      * - buffers allocated through the DMA mapping API (with the
0048      *   OMAP_BO_MEM_DMA_API flag set)
0049      *
0050      * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
0051      *   if they are physically contiguous (when sgt->orig_nents == 1)
0052      *
0053      * - buffers mapped through the TILER when pin_cnt is not zero, in which
0054      *   case the DMA address points to the TILER aperture
0055      *
0056      * Physically contiguous buffers have their DMA address equal to the
0057      * physical address as we don't remap those buffers through the TILER.
0058      *
0059      * Buffers mapped to the TILER have their DMA address pointing to the
0060      * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
0061      * the DMA address must be accessed through omap_gem_pin() to ensure
0062      * that the mapping won't disappear unexpectedly. References must be
0063      * released with omap_gem_unpin().
0064      */
0065     dma_addr_t dma_addr;
0066 
0067     /**
0068      * # of users
0069      */
0070     refcount_t pin_cnt;
0071 
0072     /**
0073      * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
0074      * is set and the sgt field is valid.
0075      */
0076     struct sg_table *sgt;
0077 
0078     /**
0079      * tiler block used when buffer is remapped in DMM/TILER.
0080      */
0081     struct tiler_block *block;
0082 
0083     /**
0084      * Array of backing pages, if allocated.  Note that pages are never
0085      * allocated for buffers originally allocated from contiguous memory
0086      */
0087     struct page **pages;
0088 
0089     /** addresses corresponding to pages in above array */
0090     dma_addr_t *dma_addrs;
0091 
0092     /**
0093      * Virtual address, if mapped.
0094      */
0095     void *vaddr;
0096 };
0097 
0098 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
0099 
0100 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
0101  * not necessarily pinned in TILER all the time, and (b) when they are
0102  * they are not necessarily page aligned, we reserve one or more small
0103  * regions in each of the 2d containers to use as a user-GART where we
0104  * can create a second page-aligned mapping of parts of the buffer
0105  * being accessed from userspace.
0106  *
0107  * Note that we could optimize slightly when we know that multiple
0108  * tiler containers are backed by the same PAT.. but I'll leave that
0109  * for later..
0110  */
0111 #define NUM_USERGART_ENTRIES 2
0112 struct omap_drm_usergart_entry {
0113     struct tiler_block *block;  /* the reserved tiler block */
0114     dma_addr_t dma_addr;
0115     struct drm_gem_object *obj; /* the current pinned obj */
0116     pgoff_t obj_pgoff;      /* page offset of obj currently
0117                        mapped in */
0118 };
0119 
0120 struct omap_drm_usergart {
0121     struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
0122     int height;             /* height in rows */
0123     int height_shift;       /* ilog2(height in rows) */
0124     int slot_shift;         /* ilog2(width per slot) */
0125     int stride_pfn;         /* stride in pages */
0126     int last;               /* index of last used entry */
0127 };
0128 
0129 /* -----------------------------------------------------------------------------
0130  * Helpers
0131  */
0132 
0133 /** get mmap offset */
0134 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
0135 {
0136     struct drm_device *dev = obj->dev;
0137     int ret;
0138     size_t size;
0139 
0140     /* Make it mmapable */
0141     size = omap_gem_mmap_size(obj);
0142     ret = drm_gem_create_mmap_offset_size(obj, size);
0143     if (ret) {
0144         dev_err(dev->dev, "could not allocate mmap offset\n");
0145         return 0;
0146     }
0147 
0148     return drm_vma_node_offset_addr(&obj->vma_node);
0149 }
0150 
0151 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
0152 {
0153     if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
0154         return true;
0155 
0156     if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
0157         return true;
0158 
0159     return false;
0160 }
0161 
0162 /* -----------------------------------------------------------------------------
0163  * Eviction
0164  */
0165 
0166 static void omap_gem_evict_entry(struct drm_gem_object *obj,
0167         enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
0168 {
0169     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0170     struct omap_drm_private *priv = obj->dev->dev_private;
0171     int n = priv->usergart[fmt].height;
0172     size_t size = PAGE_SIZE * n;
0173     loff_t off = omap_gem_mmap_offset(obj) +
0174             (entry->obj_pgoff << PAGE_SHIFT);
0175     const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
0176 
0177     if (m > 1) {
0178         int i;
0179         /* if stride > than PAGE_SIZE then sparse mapping: */
0180         for (i = n; i > 0; i--) {
0181             unmap_mapping_range(obj->dev->anon_inode->i_mapping,
0182                         off, PAGE_SIZE, 1);
0183             off += PAGE_SIZE * m;
0184         }
0185     } else {
0186         unmap_mapping_range(obj->dev->anon_inode->i_mapping,
0187                     off, size, 1);
0188     }
0189 
0190     entry->obj = NULL;
0191 }
0192 
0193 /* Evict a buffer from usergart, if it is mapped there */
0194 static void omap_gem_evict(struct drm_gem_object *obj)
0195 {
0196     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0197     struct omap_drm_private *priv = obj->dev->dev_private;
0198 
0199     if (omap_obj->flags & OMAP_BO_TILED_MASK) {
0200         enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
0201         int i;
0202 
0203         for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
0204             struct omap_drm_usergart_entry *entry =
0205                 &priv->usergart[fmt].entry[i];
0206 
0207             if (entry->obj == obj)
0208                 omap_gem_evict_entry(obj, fmt, entry);
0209         }
0210     }
0211 }
0212 
0213 /* -----------------------------------------------------------------------------
0214  * Page Management
0215  */
0216 
0217 /*
0218  * Ensure backing pages are allocated. Must be called with the omap_obj.lock
0219  * held.
0220  */
0221 static int omap_gem_attach_pages(struct drm_gem_object *obj)
0222 {
0223     struct drm_device *dev = obj->dev;
0224     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0225     struct page **pages;
0226     int npages = obj->size >> PAGE_SHIFT;
0227     int i, ret;
0228     dma_addr_t *addrs;
0229 
0230     lockdep_assert_held(&omap_obj->lock);
0231 
0232     /*
0233      * If not using shmem (in which case backing pages don't need to be
0234      * allocated) or if pages are already allocated we're done.
0235      */
0236     if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
0237         return 0;
0238 
0239     pages = drm_gem_get_pages(obj);
0240     if (IS_ERR(pages)) {
0241         dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
0242         return PTR_ERR(pages);
0243     }
0244 
0245     /* for non-cached buffers, ensure the new pages are clean because
0246      * DSS, GPU, etc. are not cache coherent:
0247      */
0248     if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
0249         addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
0250         if (!addrs) {
0251             ret = -ENOMEM;
0252             goto free_pages;
0253         }
0254 
0255         for (i = 0; i < npages; i++) {
0256             addrs[i] = dma_map_page(dev->dev, pages[i],
0257                     0, PAGE_SIZE, DMA_TO_DEVICE);
0258 
0259             if (dma_mapping_error(dev->dev, addrs[i])) {
0260                 dev_warn(dev->dev,
0261                     "%s: failed to map page\n", __func__);
0262 
0263                 for (i = i - 1; i >= 0; --i) {
0264                     dma_unmap_page(dev->dev, addrs[i],
0265                         PAGE_SIZE, DMA_TO_DEVICE);
0266                 }
0267 
0268                 ret = -ENOMEM;
0269                 goto free_addrs;
0270             }
0271         }
0272     } else {
0273         addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
0274         if (!addrs) {
0275             ret = -ENOMEM;
0276             goto free_pages;
0277         }
0278     }
0279 
0280     omap_obj->dma_addrs = addrs;
0281     omap_obj->pages = pages;
0282 
0283     return 0;
0284 
0285 free_addrs:
0286     kfree(addrs);
0287 free_pages:
0288     drm_gem_put_pages(obj, pages, true, false);
0289 
0290     return ret;
0291 }
0292 
0293 /* Release backing pages. Must be called with the omap_obj.lock held. */
0294 static void omap_gem_detach_pages(struct drm_gem_object *obj)
0295 {
0296     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0297     unsigned int npages = obj->size >> PAGE_SHIFT;
0298     unsigned int i;
0299 
0300     lockdep_assert_held(&omap_obj->lock);
0301 
0302     for (i = 0; i < npages; i++) {
0303         if (omap_obj->dma_addrs[i])
0304             dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
0305                        PAGE_SIZE, DMA_TO_DEVICE);
0306     }
0307 
0308     kfree(omap_obj->dma_addrs);
0309     omap_obj->dma_addrs = NULL;
0310 
0311     drm_gem_put_pages(obj, omap_obj->pages, true, false);
0312     omap_obj->pages = NULL;
0313 }
0314 
0315 /* get buffer flags */
0316 u32 omap_gem_flags(struct drm_gem_object *obj)
0317 {
0318     return to_omap_bo(obj)->flags;
0319 }
0320 
0321 /** get mmap size */
0322 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
0323 {
0324     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0325     size_t size = obj->size;
0326 
0327     if (omap_obj->flags & OMAP_BO_TILED_MASK) {
0328         /* for tiled buffers, the virtual size has stride rounded up
0329          * to 4kb.. (to hide the fact that row n+1 might start 16kb or
0330          * 32kb later!).  But we don't back the entire buffer with
0331          * pages, only the valid picture part.. so need to adjust for
0332          * this in the size used to mmap and generate mmap offset
0333          */
0334         size = tiler_vsize(gem2fmt(omap_obj->flags),
0335                 omap_obj->width, omap_obj->height);
0336     }
0337 
0338     return size;
0339 }
0340 
0341 /* -----------------------------------------------------------------------------
0342  * Fault Handling
0343  */
0344 
0345 /* Normal handling for the case of faulting in non-tiled buffers */
0346 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
0347         struct vm_area_struct *vma, struct vm_fault *vmf)
0348 {
0349     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0350     unsigned long pfn;
0351     pgoff_t pgoff;
0352 
0353     /* We don't use vmf->pgoff since that has the fake offset: */
0354     pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0355 
0356     if (omap_obj->pages) {
0357         omap_gem_cpu_sync_page(obj, pgoff);
0358         pfn = page_to_pfn(omap_obj->pages[pgoff]);
0359     } else {
0360         BUG_ON(!omap_gem_is_contiguous(omap_obj));
0361         pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
0362     }
0363 
0364     VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
0365             pfn, pfn << PAGE_SHIFT);
0366 
0367     return vmf_insert_mixed(vma, vmf->address,
0368             __pfn_to_pfn_t(pfn, PFN_DEV));
0369 }
0370 
0371 /* Special handling for the case of faulting in 2d tiled buffers */
0372 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
0373         struct vm_area_struct *vma, struct vm_fault *vmf)
0374 {
0375     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0376     struct omap_drm_private *priv = obj->dev->dev_private;
0377     struct omap_drm_usergart_entry *entry;
0378     enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
0379     struct page *pages[64];  /* XXX is this too much to have on stack? */
0380     unsigned long pfn;
0381     pgoff_t pgoff, base_pgoff;
0382     unsigned long vaddr;
0383     int i, err, slots;
0384     vm_fault_t ret = VM_FAULT_NOPAGE;
0385 
0386     /*
0387      * Note the height of the slot is also equal to the number of pages
0388      * that need to be mapped in to fill 4kb wide CPU page.  If the slot
0389      * height is 64, then 64 pages fill a 4kb wide by 64 row region.
0390      */
0391     const int n = priv->usergart[fmt].height;
0392     const int n_shift = priv->usergart[fmt].height_shift;
0393 
0394     /*
0395      * If buffer width in bytes > PAGE_SIZE then the virtual stride is
0396      * rounded up to next multiple of PAGE_SIZE.. this need to be taken
0397      * into account in some of the math, so figure out virtual stride
0398      * in pages
0399      */
0400     const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
0401 
0402     /* We don't use vmf->pgoff since that has the fake offset: */
0403     pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0404 
0405     /*
0406      * Actual address we start mapping at is rounded down to previous slot
0407      * boundary in the y direction:
0408      */
0409     base_pgoff = round_down(pgoff, m << n_shift);
0410 
0411     /* figure out buffer width in slots */
0412     slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
0413 
0414     vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
0415 
0416     entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
0417 
0418     /* evict previous buffer using this usergart entry, if any: */
0419     if (entry->obj)
0420         omap_gem_evict_entry(entry->obj, fmt, entry);
0421 
0422     entry->obj = obj;
0423     entry->obj_pgoff = base_pgoff;
0424 
0425     /* now convert base_pgoff to phys offset from virt offset: */
0426     base_pgoff = (base_pgoff >> n_shift) * slots;
0427 
0428     /* for wider-than 4k.. figure out which part of the slot-row we want: */
0429     if (m > 1) {
0430         int off = pgoff % m;
0431         entry->obj_pgoff += off;
0432         base_pgoff /= m;
0433         slots = min(slots - (off << n_shift), n);
0434         base_pgoff += off << n_shift;
0435         vaddr += off << PAGE_SHIFT;
0436     }
0437 
0438     /*
0439      * Map in pages. Beyond the valid pixel part of the buffer, we set
0440      * pages[i] to NULL to get a dummy page mapped in.. if someone
0441      * reads/writes it they will get random/undefined content, but at
0442      * least it won't be corrupting whatever other random page used to
0443      * be mapped in, or other undefined behavior.
0444      */
0445     memcpy(pages, &omap_obj->pages[base_pgoff],
0446             sizeof(struct page *) * slots);
0447     memset(pages + slots, 0,
0448             sizeof(struct page *) * (n - slots));
0449 
0450     err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
0451     if (err) {
0452         ret = vmf_error(err);
0453         dev_err(obj->dev->dev, "failed to pin: %d\n", err);
0454         return ret;
0455     }
0456 
0457     pfn = entry->dma_addr >> PAGE_SHIFT;
0458 
0459     VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
0460             pfn, pfn << PAGE_SHIFT);
0461 
0462     for (i = n; i > 0; i--) {
0463         ret = vmf_insert_mixed(vma,
0464             vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
0465         if (ret & VM_FAULT_ERROR)
0466             break;
0467         pfn += priv->usergart[fmt].stride_pfn;
0468         vaddr += PAGE_SIZE * m;
0469     }
0470 
0471     /* simple round-robin: */
0472     priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
0473                  % NUM_USERGART_ENTRIES;
0474 
0475     return ret;
0476 }
0477 
0478 /**
0479  * omap_gem_fault       -   pagefault handler for GEM objects
0480  * @vmf: fault detail
0481  *
0482  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
0483  * does most of the work for us including the actual map/unmap calls
0484  * but we need to do the actual page work.
0485  *
0486  * The VMA was set up by GEM. In doing so it also ensured that the
0487  * vma->vm_private_data points to the GEM object that is backing this
0488  * mapping.
0489  */
0490 static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
0491 {
0492     struct vm_area_struct *vma = vmf->vma;
0493     struct drm_gem_object *obj = vma->vm_private_data;
0494     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0495     int err;
0496     vm_fault_t ret;
0497 
0498     /* Make sure we don't parallel update on a fault, nor move or remove
0499      * something from beneath our feet
0500      */
0501     mutex_lock(&omap_obj->lock);
0502 
0503     /* if a shmem backed object, make sure we have pages attached now */
0504     err = omap_gem_attach_pages(obj);
0505     if (err) {
0506         ret = vmf_error(err);
0507         goto fail;
0508     }
0509 
0510     /* where should we do corresponding put_pages().. we are mapping
0511      * the original page, rather than thru a GART, so we can't rely
0512      * on eviction to trigger this.  But munmap() or all mappings should
0513      * probably trigger put_pages()?
0514      */
0515 
0516     if (omap_obj->flags & OMAP_BO_TILED_MASK)
0517         ret = omap_gem_fault_2d(obj, vma, vmf);
0518     else
0519         ret = omap_gem_fault_1d(obj, vma, vmf);
0520 
0521 
0522 fail:
0523     mutex_unlock(&omap_obj->lock);
0524     return ret;
0525 }
0526 
0527 /** We override mainly to fix up some of the vm mapping flags.. */
0528 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
0529 {
0530     int ret;
0531 
0532     ret = drm_gem_mmap(filp, vma);
0533     if (ret) {
0534         DBG("mmap failed: %d", ret);
0535         return ret;
0536     }
0537 
0538     return omap_gem_mmap_obj(vma->vm_private_data, vma);
0539 }
0540 
0541 int omap_gem_mmap_obj(struct drm_gem_object *obj,
0542         struct vm_area_struct *vma)
0543 {
0544     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0545 
0546     vma->vm_flags &= ~VM_PFNMAP;
0547     vma->vm_flags |= VM_MIXEDMAP;
0548 
0549     if (omap_obj->flags & OMAP_BO_WC) {
0550         vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
0551     } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
0552         vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
0553     } else {
0554         /*
0555          * We do have some private objects, at least for scanout buffers
0556          * on hardware without DMM/TILER.  But these are allocated write-
0557          * combine
0558          */
0559         if (WARN_ON(!obj->filp))
0560             return -EINVAL;
0561 
0562         /*
0563          * Shunt off cached objs to shmem file so they have their own
0564          * address_space (so unmap_mapping_range does what we want,
0565          * in particular in the case of mmap'd dmabufs)
0566          */
0567         vma->vm_pgoff = 0;
0568         vma_set_file(vma, obj->filp);
0569 
0570         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
0571     }
0572 
0573     return 0;
0574 }
0575 
0576 /* -----------------------------------------------------------------------------
0577  * Dumb Buffers
0578  */
0579 
0580 /**
0581  * omap_gem_dumb_create -   create a dumb buffer
0582  * @file: our client file
0583  * @dev: our device
0584  * @args: the requested arguments copied from userspace
0585  *
0586  * Allocate a buffer suitable for use for a frame buffer of the
0587  * form described by user space. Give userspace a handle by which
0588  * to reference it.
0589  */
0590 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
0591         struct drm_mode_create_dumb *args)
0592 {
0593     union omap_gem_size gsize;
0594 
0595     args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
0596 
0597     args->size = PAGE_ALIGN(args->pitch * args->height);
0598 
0599     gsize = (union omap_gem_size){
0600         .bytes = args->size,
0601     };
0602 
0603     return omap_gem_new_handle(dev, file, gsize,
0604             OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
0605 }
0606 
0607 /**
0608  * omap_gem_dumb_map    -   buffer mapping for dumb interface
0609  * @file: our drm client file
0610  * @dev: drm device
0611  * @handle: GEM handle to the object (from dumb_create)
0612  * @offset: memory map offset placeholder
0613  *
0614  * Do the necessary setup to allow the mapping of the frame buffer
0615  * into user memory. We don't have to do much here at the moment.
0616  */
0617 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
0618         u32 handle, u64 *offset)
0619 {
0620     struct drm_gem_object *obj;
0621     int ret = 0;
0622 
0623     /* GEM does all our handle to object mapping */
0624     obj = drm_gem_object_lookup(file, handle);
0625     if (obj == NULL) {
0626         ret = -ENOENT;
0627         goto fail;
0628     }
0629 
0630     *offset = omap_gem_mmap_offset(obj);
0631 
0632     drm_gem_object_put(obj);
0633 
0634 fail:
0635     return ret;
0636 }
0637 
0638 #ifdef CONFIG_DRM_FBDEV_EMULATION
0639 /* Set scrolling position.  This allows us to implement fast scrolling
0640  * for console.
0641  *
0642  * Call only from non-atomic contexts.
0643  */
0644 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
0645 {
0646     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0647     u32 npages = obj->size >> PAGE_SHIFT;
0648     int ret = 0;
0649 
0650     if (roll > npages) {
0651         dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
0652         return -EINVAL;
0653     }
0654 
0655     omap_obj->roll = roll;
0656 
0657     mutex_lock(&omap_obj->lock);
0658 
0659     /* if we aren't mapped yet, we don't need to do anything */
0660     if (omap_obj->block) {
0661         ret = omap_gem_attach_pages(obj);
0662         if (ret)
0663             goto fail;
0664 
0665         ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
0666                 roll, true);
0667         if (ret)
0668             dev_err(obj->dev->dev, "could not repin: %d\n", ret);
0669     }
0670 
0671 fail:
0672     mutex_unlock(&omap_obj->lock);
0673 
0674     return ret;
0675 }
0676 #endif
0677 
0678 /* -----------------------------------------------------------------------------
0679  * Memory Management & DMA Sync
0680  */
0681 
0682 /*
0683  * shmem buffers that are mapped cached are not coherent.
0684  *
0685  * We keep track of dirty pages using page faulting to perform cache management.
0686  * When a page is mapped to the CPU in read/write mode the device can't access
0687  * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
0688  * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
0689  * unmapped from the CPU.
0690  */
0691 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
0692 {
0693     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0694 
0695     return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
0696         ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
0697 }
0698 
0699 /* Sync the buffer for CPU access.. note pages should already be
0700  * attached, ie. omap_gem_get_pages()
0701  */
0702 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
0703 {
0704     struct drm_device *dev = obj->dev;
0705     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0706 
0707     if (omap_gem_is_cached_coherent(obj))
0708         return;
0709 
0710     if (omap_obj->dma_addrs[pgoff]) {
0711         dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
0712                 PAGE_SIZE, DMA_TO_DEVICE);
0713         omap_obj->dma_addrs[pgoff] = 0;
0714     }
0715 }
0716 
0717 /* sync the buffer for DMA access */
0718 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
0719         enum dma_data_direction dir)
0720 {
0721     struct drm_device *dev = obj->dev;
0722     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0723     int i, npages = obj->size >> PAGE_SHIFT;
0724     struct page **pages = omap_obj->pages;
0725     bool dirty = false;
0726 
0727     if (omap_gem_is_cached_coherent(obj))
0728         return;
0729 
0730     for (i = 0; i < npages; i++) {
0731         if (!omap_obj->dma_addrs[i]) {
0732             dma_addr_t addr;
0733 
0734             addr = dma_map_page(dev->dev, pages[i], 0,
0735                         PAGE_SIZE, dir);
0736             if (dma_mapping_error(dev->dev, addr)) {
0737                 dev_warn(dev->dev, "%s: failed to map page\n",
0738                     __func__);
0739                 break;
0740             }
0741 
0742             dirty = true;
0743             omap_obj->dma_addrs[i] = addr;
0744         }
0745     }
0746 
0747     if (dirty) {
0748         unmap_mapping_range(obj->filp->f_mapping, 0,
0749                     omap_gem_mmap_size(obj), 1);
0750     }
0751 }
0752 
0753 static int omap_gem_pin_tiler(struct drm_gem_object *obj)
0754 {
0755     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0756     u32 npages = obj->size >> PAGE_SHIFT;
0757     enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
0758     struct tiler_block *block;
0759     int ret;
0760 
0761     BUG_ON(omap_obj->block);
0762 
0763     if (omap_obj->flags & OMAP_BO_TILED_MASK) {
0764         block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
0765                      PAGE_SIZE);
0766     } else {
0767         block = tiler_reserve_1d(obj->size);
0768     }
0769 
0770     if (IS_ERR(block)) {
0771         ret = PTR_ERR(block);
0772         dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
0773         goto fail;
0774     }
0775 
0776     /* TODO: enable async refill.. */
0777     ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
0778     if (ret) {
0779         tiler_release(block);
0780         dev_err(obj->dev->dev, "could not pin: %d\n", ret);
0781         goto fail;
0782     }
0783 
0784     omap_obj->dma_addr = tiler_ssptr(block);
0785     omap_obj->block = block;
0786 
0787     DBG("got dma address: %pad", &omap_obj->dma_addr);
0788 
0789 fail:
0790     return ret;
0791 }
0792 
0793 /**
0794  * omap_gem_pin() - Pin a GEM object in memory
0795  * @obj: the GEM object
0796  * @dma_addr: the DMA address
0797  *
0798  * Pin the given GEM object in memory and fill the dma_addr pointer with the
0799  * object's DMA address. If the buffer is not physically contiguous it will be
0800  * remapped through the TILER to provide a contiguous view.
0801  *
0802  * Pins are reference-counted, calling this function multiple times is allowed
0803  * as long the corresponding omap_gem_unpin() calls are balanced.
0804  *
0805  * Return 0 on success or a negative error code otherwise.
0806  */
0807 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
0808 {
0809     struct omap_drm_private *priv = obj->dev->dev_private;
0810     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0811     int ret = 0;
0812 
0813     mutex_lock(&omap_obj->lock);
0814 
0815     if (!omap_gem_is_contiguous(omap_obj)) {
0816         if (refcount_read(&omap_obj->pin_cnt) == 0) {
0817 
0818             refcount_set(&omap_obj->pin_cnt, 1);
0819 
0820             ret = omap_gem_attach_pages(obj);
0821             if (ret)
0822                 goto fail;
0823 
0824             if (omap_obj->flags & OMAP_BO_SCANOUT) {
0825                 if (priv->has_dmm) {
0826                     ret = omap_gem_pin_tiler(obj);
0827                     if (ret)
0828                         goto fail;
0829                 }
0830             }
0831         } else {
0832             refcount_inc(&omap_obj->pin_cnt);
0833         }
0834     }
0835 
0836     if (dma_addr)
0837         *dma_addr = omap_obj->dma_addr;
0838 
0839 fail:
0840     mutex_unlock(&omap_obj->lock);
0841 
0842     return ret;
0843 }
0844 
0845 /**
0846  * omap_gem_unpin_locked() - Unpin a GEM object from memory
0847  * @obj: the GEM object
0848  *
0849  * omap_gem_unpin() without locking.
0850  */
0851 static void omap_gem_unpin_locked(struct drm_gem_object *obj)
0852 {
0853     struct omap_drm_private *priv = obj->dev->dev_private;
0854     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0855     int ret;
0856 
0857     if (omap_gem_is_contiguous(omap_obj))
0858         return;
0859 
0860     if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
0861         if (omap_obj->sgt) {
0862             sg_free_table(omap_obj->sgt);
0863             kfree(omap_obj->sgt);
0864             omap_obj->sgt = NULL;
0865         }
0866         if (!(omap_obj->flags & OMAP_BO_SCANOUT))
0867             return;
0868         if (priv->has_dmm) {
0869             ret = tiler_unpin(omap_obj->block);
0870             if (ret) {
0871                 dev_err(obj->dev->dev,
0872                     "could not unpin pages: %d\n", ret);
0873             }
0874             ret = tiler_release(omap_obj->block);
0875             if (ret) {
0876                 dev_err(obj->dev->dev,
0877                     "could not release unmap: %d\n", ret);
0878             }
0879             omap_obj->dma_addr = 0;
0880             omap_obj->block = NULL;
0881         }
0882     }
0883 }
0884 
0885 /**
0886  * omap_gem_unpin() - Unpin a GEM object from memory
0887  * @obj: the GEM object
0888  *
0889  * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
0890  * reference-counted, the actual unpin will only be performed when the number
0891  * of calls to this function matches the number of calls to omap_gem_pin().
0892  */
0893 void omap_gem_unpin(struct drm_gem_object *obj)
0894 {
0895     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0896 
0897     mutex_lock(&omap_obj->lock);
0898     omap_gem_unpin_locked(obj);
0899     mutex_unlock(&omap_obj->lock);
0900 }
0901 
0902 /* Get rotated scanout address (only valid if already pinned), at the
0903  * specified orientation and x,y offset from top-left corner of buffer
0904  * (only valid for tiled 2d buffers)
0905  */
0906 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
0907         int x, int y, dma_addr_t *dma_addr)
0908 {
0909     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0910     int ret = -EINVAL;
0911 
0912     mutex_lock(&omap_obj->lock);
0913 
0914     if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
0915             (omap_obj->flags & OMAP_BO_TILED_MASK)) {
0916         *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
0917         ret = 0;
0918     }
0919 
0920     mutex_unlock(&omap_obj->lock);
0921 
0922     return ret;
0923 }
0924 
0925 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
0926 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
0927 {
0928     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0929     int ret = -EINVAL;
0930     if (omap_obj->flags & OMAP_BO_TILED_MASK)
0931         ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
0932     return ret;
0933 }
0934 
0935 /* if !remap, and we don't have pages backing, then fail, rather than
0936  * increasing the pin count (which we don't really do yet anyways,
0937  * because we don't support swapping pages back out).  And 'remap'
0938  * might not be quite the right name, but I wanted to keep it working
0939  * similarly to omap_gem_pin().  Note though that mutex is not
0940  * aquired if !remap (because this can be called in atomic ctxt),
0941  * but probably omap_gem_unpin() should be changed to work in the
0942  * same way.  If !remap, a matching omap_gem_put_pages() call is not
0943  * required (and should not be made).
0944  */
0945 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
0946         bool remap)
0947 {
0948     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0949     int ret = 0;
0950 
0951     mutex_lock(&omap_obj->lock);
0952 
0953     if (remap) {
0954         ret = omap_gem_attach_pages(obj);
0955         if (ret)
0956             goto unlock;
0957     }
0958 
0959     if (!omap_obj->pages) {
0960         ret = -ENOMEM;
0961         goto unlock;
0962     }
0963 
0964     *pages = omap_obj->pages;
0965 
0966 unlock:
0967     mutex_unlock(&omap_obj->lock);
0968 
0969     return ret;
0970 }
0971 
0972 /* release pages when DMA no longer being performed */
0973 int omap_gem_put_pages(struct drm_gem_object *obj)
0974 {
0975     /* do something here if we dynamically attach/detach pages.. at
0976      * least they would no longer need to be pinned if everyone has
0977      * released the pages..
0978      */
0979     return 0;
0980 }
0981 
0982 struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
0983         enum dma_data_direction dir)
0984 {
0985     struct omap_gem_object *omap_obj = to_omap_bo(obj);
0986     dma_addr_t addr;
0987     struct sg_table *sgt;
0988     struct scatterlist *sg;
0989     unsigned int count, len, stride, i;
0990     int ret;
0991 
0992     ret = omap_gem_pin(obj, &addr);
0993     if (ret)
0994         return ERR_PTR(ret);
0995 
0996     mutex_lock(&omap_obj->lock);
0997 
0998     sgt = omap_obj->sgt;
0999     if (sgt)
1000         goto out;
1001 
1002     sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1003     if (!sgt) {
1004         ret = -ENOMEM;
1005         goto err_unpin;
1006     }
1007 
1008     if (addr) {
1009         if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1010             enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
1011 
1012             len = omap_obj->width << (int)fmt;
1013             count = omap_obj->height;
1014             stride = tiler_stride(fmt, 0);
1015         } else {
1016             len = obj->size;
1017             count = 1;
1018             stride = 0;
1019         }
1020     } else {
1021         count = obj->size >> PAGE_SHIFT;
1022     }
1023 
1024     ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1025     if (ret)
1026         goto err_free;
1027 
1028     /* this must be after omap_gem_pin() to ensure we have pages attached */
1029     omap_gem_dma_sync_buffer(obj, dir);
1030 
1031     if (addr) {
1032         for_each_sg(sgt->sgl, sg, count, i) {
1033             sg_set_page(sg, phys_to_page(addr), len,
1034                 offset_in_page(addr));
1035             sg_dma_address(sg) = addr;
1036             sg_dma_len(sg) = len;
1037 
1038             addr += stride;
1039         }
1040     } else {
1041         for_each_sg(sgt->sgl, sg, count, i) {
1042             sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1043             sg_dma_address(sg) = omap_obj->dma_addrs[i];
1044             sg_dma_len(sg) =  PAGE_SIZE;
1045         }
1046     }
1047 
1048     omap_obj->sgt = sgt;
1049 out:
1050     mutex_unlock(&omap_obj->lock);
1051     return sgt;
1052 
1053 err_free:
1054     kfree(sgt);
1055 err_unpin:
1056     mutex_unlock(&omap_obj->lock);
1057     omap_gem_unpin(obj);
1058     return ERR_PTR(ret);
1059 }
1060 
1061 void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1062 {
1063     struct omap_gem_object *omap_obj = to_omap_bo(obj);
1064 
1065     if (WARN_ON(omap_obj->sgt != sgt))
1066         return;
1067 
1068     omap_gem_unpin(obj);
1069 }
1070 
1071 #ifdef CONFIG_DRM_FBDEV_EMULATION
1072 /*
1073  * Get kernel virtual address for CPU access.. this more or less only
1074  * exists for omap_fbdev.
1075  */
1076 void *omap_gem_vaddr(struct drm_gem_object *obj)
1077 {
1078     struct omap_gem_object *omap_obj = to_omap_bo(obj);
1079     void *vaddr;
1080     int ret;
1081 
1082     mutex_lock(&omap_obj->lock);
1083 
1084     if (!omap_obj->vaddr) {
1085         ret = omap_gem_attach_pages(obj);
1086         if (ret) {
1087             vaddr = ERR_PTR(ret);
1088             goto unlock;
1089         }
1090 
1091         omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1092                 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1093     }
1094 
1095     vaddr = omap_obj->vaddr;
1096 
1097 unlock:
1098     mutex_unlock(&omap_obj->lock);
1099     return vaddr;
1100 }
1101 #endif
1102 
1103 /* -----------------------------------------------------------------------------
1104  * Power Management
1105  */
1106 
1107 #ifdef CONFIG_PM
1108 /* re-pin objects in DMM in resume path: */
1109 int omap_gem_resume(struct drm_device *dev)
1110 {
1111     struct omap_drm_private *priv = dev->dev_private;
1112     struct omap_gem_object *omap_obj;
1113     int ret = 0;
1114 
1115     mutex_lock(&priv->list_lock);
1116     list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1117         if (omap_obj->block) {
1118             struct drm_gem_object *obj = &omap_obj->base;
1119             u32 npages = obj->size >> PAGE_SHIFT;
1120 
1121             WARN_ON(!omap_obj->pages);  /* this can't happen */
1122             ret = tiler_pin(omap_obj->block,
1123                     omap_obj->pages, npages,
1124                     omap_obj->roll, true);
1125             if (ret) {
1126                 dev_err(dev->dev, "could not repin: %d\n", ret);
1127                 goto done;
1128             }
1129         }
1130     }
1131 
1132 done:
1133     mutex_unlock(&priv->list_lock);
1134     return ret;
1135 }
1136 #endif
1137 
1138 /* -----------------------------------------------------------------------------
1139  * DebugFS
1140  */
1141 
1142 #ifdef CONFIG_DEBUG_FS
1143 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1144 {
1145     struct omap_gem_object *omap_obj = to_omap_bo(obj);
1146     u64 off;
1147 
1148     off = drm_vma_node_start(&obj->vma_node);
1149 
1150     mutex_lock(&omap_obj->lock);
1151 
1152     seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1153             omap_obj->flags, obj->name, kref_read(&obj->refcount),
1154             off, &omap_obj->dma_addr,
1155             refcount_read(&omap_obj->pin_cnt),
1156             omap_obj->vaddr, omap_obj->roll);
1157 
1158     if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1159         seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1160         if (omap_obj->block) {
1161             struct tcm_area *area = &omap_obj->block->area;
1162             seq_printf(m, " (%dx%d, %dx%d)",
1163                     area->p0.x, area->p0.y,
1164                     area->p1.x, area->p1.y);
1165         }
1166     } else {
1167         seq_printf(m, " %zu", obj->size);
1168     }
1169 
1170     mutex_unlock(&omap_obj->lock);
1171 
1172     seq_printf(m, "\n");
1173 }
1174 
1175 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1176 {
1177     struct omap_gem_object *omap_obj;
1178     int count = 0;
1179     size_t size = 0;
1180 
1181     list_for_each_entry(omap_obj, list, mm_list) {
1182         struct drm_gem_object *obj = &omap_obj->base;
1183         seq_printf(m, "   ");
1184         omap_gem_describe(obj, m);
1185         count++;
1186         size += obj->size;
1187     }
1188 
1189     seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1190 }
1191 #endif
1192 
1193 /* -----------------------------------------------------------------------------
1194  * Constructor & Destructor
1195  */
1196 
1197 static void omap_gem_free_object(struct drm_gem_object *obj)
1198 {
1199     struct drm_device *dev = obj->dev;
1200     struct omap_drm_private *priv = dev->dev_private;
1201     struct omap_gem_object *omap_obj = to_omap_bo(obj);
1202 
1203     omap_gem_evict(obj);
1204 
1205     mutex_lock(&priv->list_lock);
1206     list_del(&omap_obj->mm_list);
1207     mutex_unlock(&priv->list_lock);
1208 
1209     /*
1210      * We own the sole reference to the object at this point, but to keep
1211      * lockdep happy, we must still take the omap_obj_lock to call
1212      * omap_gem_detach_pages(). This should hardly make any difference as
1213      * there can't be any lock contention.
1214      */
1215     mutex_lock(&omap_obj->lock);
1216 
1217     /* The object should not be pinned. */
1218     WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1219 
1220     if (omap_obj->pages) {
1221         if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1222             kfree(omap_obj->pages);
1223         else
1224             omap_gem_detach_pages(obj);
1225     }
1226 
1227     if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1228         dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1229                 omap_obj->dma_addr);
1230     } else if (omap_obj->vaddr) {
1231         vunmap(omap_obj->vaddr);
1232     } else if (obj->import_attach) {
1233         drm_prime_gem_destroy(obj, omap_obj->sgt);
1234     }
1235 
1236     mutex_unlock(&omap_obj->lock);
1237 
1238     drm_gem_object_release(obj);
1239 
1240     mutex_destroy(&omap_obj->lock);
1241 
1242     kfree(omap_obj);
1243 }
1244 
1245 static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1246 {
1247     struct omap_drm_private *priv = dev->dev_private;
1248 
1249     switch (flags & OMAP_BO_CACHE_MASK) {
1250     case OMAP_BO_CACHED:
1251     case OMAP_BO_WC:
1252     case OMAP_BO_CACHE_MASK:
1253         break;
1254 
1255     default:
1256         return false;
1257     }
1258 
1259     if (flags & OMAP_BO_TILED_MASK) {
1260         if (!priv->usergart)
1261             return false;
1262 
1263         switch (flags & OMAP_BO_TILED_MASK) {
1264         case OMAP_BO_TILED_8:
1265         case OMAP_BO_TILED_16:
1266         case OMAP_BO_TILED_32:
1267             break;
1268 
1269         default:
1270             return false;
1271         }
1272     }
1273 
1274     return true;
1275 }
1276 
1277 static const struct vm_operations_struct omap_gem_vm_ops = {
1278     .fault = omap_gem_fault,
1279     .open = drm_gem_vm_open,
1280     .close = drm_gem_vm_close,
1281 };
1282 
1283 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1284     .free = omap_gem_free_object,
1285     .export = omap_gem_prime_export,
1286     .vm_ops = &omap_gem_vm_ops,
1287 };
1288 
1289 /* GEM buffer object constructor */
1290 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1291         union omap_gem_size gsize, u32 flags)
1292 {
1293     struct omap_drm_private *priv = dev->dev_private;
1294     struct omap_gem_object *omap_obj;
1295     struct drm_gem_object *obj;
1296     struct address_space *mapping;
1297     size_t size;
1298     int ret;
1299 
1300     if (!omap_gem_validate_flags(dev, flags))
1301         return NULL;
1302 
1303     /* Validate the flags and compute the memory and cache flags. */
1304     if (flags & OMAP_BO_TILED_MASK) {
1305         /*
1306          * Tiled buffers are always shmem paged backed. When they are
1307          * scanned out, they are remapped into DMM/TILER.
1308          */
1309         flags |= OMAP_BO_MEM_SHMEM;
1310 
1311         /*
1312          * Currently don't allow cached buffers. There is some caching
1313          * stuff that needs to be handled better.
1314          */
1315         flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1316         flags |= tiler_get_cpu_cache_flags();
1317     } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1318         /*
1319          * If we don't have DMM, we must allocate scanout buffers
1320          * from contiguous DMA memory.
1321          */
1322         flags |= OMAP_BO_MEM_DMA_API;
1323     } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1324         /*
1325          * All other buffers not backed by dma_buf are shmem-backed.
1326          */
1327         flags |= OMAP_BO_MEM_SHMEM;
1328     }
1329 
1330     /* Allocate the initialize the OMAP GEM object. */
1331     omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1332     if (!omap_obj)
1333         return NULL;
1334 
1335     obj = &omap_obj->base;
1336     omap_obj->flags = flags;
1337     mutex_init(&omap_obj->lock);
1338 
1339     if (flags & OMAP_BO_TILED_MASK) {
1340         /*
1341          * For tiled buffers align dimensions to slot boundaries and
1342          * calculate size based on aligned dimensions.
1343          */
1344         tiler_align(gem2fmt(flags), &gsize.tiled.width,
1345                 &gsize.tiled.height);
1346 
1347         size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1348                   gsize.tiled.height);
1349 
1350         omap_obj->width = gsize.tiled.width;
1351         omap_obj->height = gsize.tiled.height;
1352     } else {
1353         size = PAGE_ALIGN(gsize.bytes);
1354     }
1355 
1356     obj->funcs = &omap_gem_object_funcs;
1357 
1358     /* Initialize the GEM object. */
1359     if (!(flags & OMAP_BO_MEM_SHMEM)) {
1360         drm_gem_private_object_init(dev, obj, size);
1361     } else {
1362         ret = drm_gem_object_init(dev, obj, size);
1363         if (ret)
1364             goto err_free;
1365 
1366         mapping = obj->filp->f_mapping;
1367         mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1368     }
1369 
1370     /* Allocate memory if needed. */
1371     if (flags & OMAP_BO_MEM_DMA_API) {
1372         omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1373                            &omap_obj->dma_addr,
1374                            GFP_KERNEL);
1375         if (!omap_obj->vaddr)
1376             goto err_release;
1377     }
1378 
1379     mutex_lock(&priv->list_lock);
1380     list_add(&omap_obj->mm_list, &priv->obj_list);
1381     mutex_unlock(&priv->list_lock);
1382 
1383     return obj;
1384 
1385 err_release:
1386     drm_gem_object_release(obj);
1387 err_free:
1388     kfree(omap_obj);
1389     return NULL;
1390 }
1391 
1392 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1393                        struct sg_table *sgt)
1394 {
1395     struct omap_drm_private *priv = dev->dev_private;
1396     struct omap_gem_object *omap_obj;
1397     struct drm_gem_object *obj;
1398     union omap_gem_size gsize;
1399 
1400     /* Without a DMM only physically contiguous buffers can be supported. */
1401     if (sgt->orig_nents != 1 && !priv->has_dmm)
1402         return ERR_PTR(-EINVAL);
1403 
1404     gsize.bytes = PAGE_ALIGN(size);
1405     obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1406     if (!obj)
1407         return ERR_PTR(-ENOMEM);
1408 
1409     omap_obj = to_omap_bo(obj);
1410 
1411     mutex_lock(&omap_obj->lock);
1412 
1413     omap_obj->sgt = sgt;
1414 
1415     if (sgt->orig_nents == 1) {
1416         omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1417     } else {
1418         /* Create pages list from sgt */
1419         struct page **pages;
1420         unsigned int npages;
1421         unsigned int ret;
1422 
1423         npages = DIV_ROUND_UP(size, PAGE_SIZE);
1424         pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1425         if (!pages) {
1426             omap_gem_free_object(obj);
1427             obj = ERR_PTR(-ENOMEM);
1428             goto done;
1429         }
1430 
1431         omap_obj->pages = pages;
1432         ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1433         if (ret) {
1434             omap_gem_free_object(obj);
1435             obj = ERR_PTR(-ENOMEM);
1436             goto done;
1437         }
1438     }
1439 
1440 done:
1441     mutex_unlock(&omap_obj->lock);
1442     return obj;
1443 }
1444 
1445 /* convenience method to construct a GEM buffer object, and userspace handle */
1446 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1447         union omap_gem_size gsize, u32 flags, u32 *handle)
1448 {
1449     struct drm_gem_object *obj;
1450     int ret;
1451 
1452     obj = omap_gem_new(dev, gsize, flags);
1453     if (!obj)
1454         return -ENOMEM;
1455 
1456     ret = drm_gem_handle_create(file, obj, handle);
1457     if (ret) {
1458         omap_gem_free_object(obj);
1459         return ret;
1460     }
1461 
1462     /* drop reference from allocate - handle holds it now */
1463     drm_gem_object_put(obj);
1464 
1465     return 0;
1466 }
1467 
1468 /* -----------------------------------------------------------------------------
1469  * Init & Cleanup
1470  */
1471 
1472 /* If DMM is used, we need to set some stuff up.. */
1473 void omap_gem_init(struct drm_device *dev)
1474 {
1475     struct omap_drm_private *priv = dev->dev_private;
1476     struct omap_drm_usergart *usergart;
1477     const enum tiler_fmt fmts[] = {
1478             TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1479     };
1480     int i, j;
1481 
1482     if (!dmm_is_available()) {
1483         /* DMM only supported on OMAP4 and later, so this isn't fatal */
1484         dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1485         return;
1486     }
1487 
1488     usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1489     if (!usergart)
1490         return;
1491 
1492     /* reserve 4k aligned/wide regions for userspace mappings: */
1493     for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1494         u16 h = 1, w = PAGE_SIZE >> i;
1495 
1496         tiler_align(fmts[i], &w, &h);
1497         /* note: since each region is 1 4kb page wide, and minimum
1498          * number of rows, the height ends up being the same as the
1499          * # of pages in the region
1500          */
1501         usergart[i].height = h;
1502         usergart[i].height_shift = ilog2(h);
1503         usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1504         usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1505         for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1506             struct omap_drm_usergart_entry *entry;
1507             struct tiler_block *block;
1508 
1509             entry = &usergart[i].entry[j];
1510             block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1511             if (IS_ERR(block)) {
1512                 dev_err(dev->dev,
1513                         "reserve failed: %d, %d, %ld\n",
1514                         i, j, PTR_ERR(block));
1515                 return;
1516             }
1517             entry->dma_addr = tiler_ssptr(block);
1518             entry->block = block;
1519 
1520             DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1521                     &entry->dma_addr,
1522                     usergart[i].stride_pfn << PAGE_SHIFT);
1523         }
1524     }
1525 
1526     priv->usergart = usergart;
1527     priv->has_dmm = true;
1528 }
1529 
1530 void omap_gem_deinit(struct drm_device *dev)
1531 {
1532     struct omap_drm_private *priv = dev->dev_private;
1533 
1534     /* I believe we can rely on there being no more outstanding GEM
1535      * objects which could depend on usergart/dmm at this point.
1536      */
1537     kfree(priv->usergart);
1538 }