Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  psb GEM interface
0004  *
0005  * Copyright (c) 2011, Intel Corporation.
0006  *
0007  * Authors: Alan Cox
0008  *
0009  * TODO:
0010  *  -   we need to work out if the MMU is relevant (eg for
0011  *      accelerated operations on a GEM object)
0012  */
0013 
0014 #include <linux/pagemap.h>
0015 
0016 #include <asm/set_memory.h>
0017 
0018 #include <drm/drm.h>
0019 #include <drm/drm_vma_manager.h>
0020 
0021 #include "gem.h"
0022 #include "psb_drv.h"
0023 
0024 /*
0025  * PSB GEM object
0026  */
0027 
0028 int psb_gem_pin(struct psb_gem_object *pobj)
0029 {
0030     struct drm_gem_object *obj = &pobj->base;
0031     struct drm_device *dev = obj->dev;
0032     struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0033     u32 gpu_base = dev_priv->gtt.gatt_start;
0034     struct page **pages;
0035     unsigned int npages;
0036     int ret;
0037 
0038     ret = dma_resv_lock(obj->resv, NULL);
0039     if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
0040         return ret;
0041 
0042     if (pobj->in_gart || pobj->stolen)
0043         goto out; /* already mapped */
0044 
0045     pages = drm_gem_get_pages(obj);
0046     if (IS_ERR(pages)) {
0047         ret = PTR_ERR(pages);
0048         goto err_dma_resv_unlock;
0049     }
0050 
0051     npages = obj->size / PAGE_SIZE;
0052 
0053     set_pages_array_wc(pages, npages);
0054 
0055     psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
0056     psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
0057                  (gpu_base + pobj->offset), npages, 0, 0,
0058                  PSB_MMU_CACHED_MEMORY);
0059 
0060     pobj->pages = pages;
0061 
0062 out:
0063     ++pobj->in_gart;
0064     dma_resv_unlock(obj->resv);
0065 
0066     return 0;
0067 
0068 err_dma_resv_unlock:
0069     dma_resv_unlock(obj->resv);
0070     return ret;
0071 }
0072 
0073 void psb_gem_unpin(struct psb_gem_object *pobj)
0074 {
0075     struct drm_gem_object *obj = &pobj->base;
0076     struct drm_device *dev = obj->dev;
0077     struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0078     u32 gpu_base = dev_priv->gtt.gatt_start;
0079     unsigned long npages;
0080     int ret;
0081 
0082     ret = dma_resv_lock(obj->resv, NULL);
0083     if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
0084         return;
0085 
0086     WARN_ON(!pobj->in_gart);
0087 
0088     --pobj->in_gart;
0089 
0090     if (pobj->in_gart || pobj->stolen)
0091         goto out;
0092 
0093     npages = obj->size / PAGE_SIZE;
0094 
0095     psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
0096                  (gpu_base + pobj->offset), npages, 0, 0);
0097     psb_gtt_remove_pages(dev_priv, &pobj->resource);
0098 
0099     /* Reset caching flags */
0100     set_pages_array_wb(pobj->pages, npages);
0101 
0102     drm_gem_put_pages(obj, pobj->pages, true, false);
0103     pobj->pages = NULL;
0104 
0105 out:
0106     dma_resv_unlock(obj->resv);
0107 }
0108 
0109 static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
0110 
0111 static void psb_gem_free_object(struct drm_gem_object *obj)
0112 {
0113     struct psb_gem_object *pobj = to_psb_gem_object(obj);
0114 
0115     /* Undo the mmap pin if we are destroying the object */
0116     if (pobj->mmapping)
0117         psb_gem_unpin(pobj);
0118 
0119     drm_gem_object_release(obj);
0120 
0121     WARN_ON(pobj->in_gart && !pobj->stolen);
0122 
0123     release_resource(&pobj->resource);
0124     kfree(pobj);
0125 }
0126 
0127 static const struct vm_operations_struct psb_gem_vm_ops = {
0128     .fault = psb_gem_fault,
0129     .open = drm_gem_vm_open,
0130     .close = drm_gem_vm_close,
0131 };
0132 
0133 static const struct drm_gem_object_funcs psb_gem_object_funcs = {
0134     .free = psb_gem_free_object,
0135     .vm_ops = &psb_gem_vm_ops,
0136 };
0137 
0138 struct psb_gem_object *
0139 psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
0140 {
0141     struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0142     struct psb_gem_object *pobj;
0143     struct drm_gem_object *obj;
0144     int ret;
0145 
0146     size = roundup(size, PAGE_SIZE);
0147 
0148     pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
0149     if (!pobj)
0150         return ERR_PTR(-ENOMEM);
0151     obj = &pobj->base;
0152 
0153     /* GTT resource */
0154 
0155     ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
0156                     &pobj->offset);
0157     if (ret)
0158         goto err_kfree;
0159 
0160     if (stolen) {
0161         pobj->stolen = true;
0162         pobj->in_gart = 1;
0163     }
0164 
0165     /* GEM object */
0166 
0167     obj->funcs = &psb_gem_object_funcs;
0168 
0169     if (stolen) {
0170         drm_gem_private_object_init(dev, obj, size);
0171     } else {
0172         ret = drm_gem_object_init(dev, obj, size);
0173         if (ret)
0174             goto err_release_resource;
0175 
0176         /* Limit the object to 32-bit mappings */
0177         mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
0178     }
0179 
0180     return pobj;
0181 
0182 err_release_resource:
0183     release_resource(&pobj->resource);
0184 err_kfree:
0185     kfree(pobj);
0186     return ERR_PTR(ret);
0187 }
0188 
0189 /**
0190  *  psb_gem_dumb_create -   create a dumb buffer
0191  *  @file: our client file
0192  *  @dev: our device
0193  *  @args: the requested arguments copied from userspace
0194  *
0195  *  Allocate a buffer suitable for use for a frame buffer of the
0196  *  form described by user space. Give userspace a handle by which
0197  *  to reference it.
0198  */
0199 int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
0200             struct drm_mode_create_dumb *args)
0201 {
0202     size_t pitch, size;
0203     struct psb_gem_object *pobj;
0204     struct drm_gem_object *obj;
0205     u32 handle;
0206     int ret;
0207 
0208     pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
0209     pitch = ALIGN(pitch, 64);
0210 
0211     size = pitch * args->height;
0212     size = roundup(size, PAGE_SIZE);
0213     if (!size)
0214         return -EINVAL;
0215 
0216     pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
0217     if (IS_ERR(pobj))
0218         return PTR_ERR(pobj);
0219     obj = &pobj->base;
0220 
0221     ret = drm_gem_handle_create(file, obj, &handle);
0222     if (ret)
0223         goto err_drm_gem_object_put;
0224 
0225     drm_gem_object_put(obj);
0226 
0227     args->pitch = pitch;
0228     args->size = size;
0229     args->handle = handle;
0230 
0231     return 0;
0232 
0233 err_drm_gem_object_put:
0234     drm_gem_object_put(obj);
0235     return ret;
0236 }
0237 
0238 /**
0239  *  psb_gem_fault       -   pagefault handler for GEM objects
0240  *  @vmf: fault detail
0241  *
0242  *  Invoked when a fault occurs on an mmap of a GEM managed area. GEM
0243  *  does most of the work for us including the actual map/unmap calls
0244  *  but we need to do the actual page work.
0245  *
0246  *  This code eventually needs to handle faulting objects in and out
0247  *  of the GTT and repacking it when we run out of space. We can put
0248  *  that off for now and for our simple uses
0249  *
0250  *  The VMA was set up by GEM. In doing so it also ensured that the
0251  *  vma->vm_private_data points to the GEM object that is backing this
0252  *  mapping.
0253  */
0254 static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
0255 {
0256     struct vm_area_struct *vma = vmf->vma;
0257     struct drm_gem_object *obj;
0258     struct psb_gem_object *pobj;
0259     int err;
0260     vm_fault_t ret;
0261     unsigned long pfn;
0262     pgoff_t page_offset;
0263     struct drm_device *dev;
0264     struct drm_psb_private *dev_priv;
0265 
0266     obj = vma->vm_private_data; /* GEM object */
0267     dev = obj->dev;
0268     dev_priv = to_drm_psb_private(dev);
0269 
0270     pobj = to_psb_gem_object(obj);
0271 
0272     /* Make sure we don't parallel update on a fault, nor move or remove
0273        something from beneath our feet */
0274     mutex_lock(&dev_priv->mmap_mutex);
0275 
0276     /* For now the mmap pins the object and it stays pinned. As things
0277        stand that will do us no harm */
0278     if (pobj->mmapping == 0) {
0279         err = psb_gem_pin(pobj);
0280         if (err < 0) {
0281             dev_err(dev->dev, "gma500: pin failed: %d\n", err);
0282             ret = vmf_error(err);
0283             goto fail;
0284         }
0285         pobj->mmapping = 1;
0286     }
0287 
0288     /* Page relative to the VMA start - we must calculate this ourselves
0289        because vmf->pgoff is the fake GEM offset */
0290     page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
0291 
0292     /* CPU view of the page, don't go via the GART for CPU writes */
0293     if (pobj->stolen)
0294         pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
0295     else
0296         pfn = page_to_pfn(pobj->pages[page_offset]);
0297     ret = vmf_insert_pfn(vma, vmf->address, pfn);
0298 fail:
0299     mutex_unlock(&dev_priv->mmap_mutex);
0300 
0301     return ret;
0302 }
0303 
0304 /*
0305  * Memory management
0306  */
0307 
0308 /* Insert vram stolen pages into the GTT. */
0309 static void psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
0310 {
0311     struct drm_device *dev = &pdev->dev;
0312     unsigned int pfn_base;
0313     unsigned int i, num_pages;
0314     uint32_t pte;
0315 
0316     pfn_base = pdev->stolen_base >> PAGE_SHIFT;
0317     num_pages = pdev->vram_stolen_size >> PAGE_SHIFT;
0318 
0319     drm_dbg(dev, "Set up %u stolen pages starting at 0x%08x, GTT offset %dK\n",
0320         num_pages, pfn_base << PAGE_SHIFT, 0);
0321 
0322     for (i = 0; i < num_pages; ++i) {
0323         pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
0324         iowrite32(pte, pdev->gtt_map + i);
0325     }
0326 
0327     (void)ioread32(pdev->gtt_map + i - 1);
0328 }
0329 
0330 int psb_gem_mm_init(struct drm_device *dev)
0331 {
0332     struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0333     struct pci_dev *pdev = to_pci_dev(dev->dev);
0334     unsigned long stolen_size, vram_stolen_size;
0335     struct psb_gtt *pg;
0336     int ret;
0337 
0338     mutex_init(&dev_priv->mmap_mutex);
0339 
0340     pg = &dev_priv->gtt;
0341 
0342     pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
0343     vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
0344 
0345     stolen_size = vram_stolen_size;
0346 
0347     dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
0348         dev_priv->stolen_base, vram_stolen_size / 1024);
0349 
0350     pg->stolen_size = stolen_size;
0351     dev_priv->vram_stolen_size = vram_stolen_size;
0352 
0353     dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
0354     if (!dev_priv->vram_addr) {
0355         dev_err(dev->dev, "Failure to map stolen base.\n");
0356         ret = -ENOMEM;
0357         goto err_mutex_destroy;
0358     }
0359 
0360     psb_gem_mm_populate_stolen(dev_priv);
0361 
0362     return 0;
0363 
0364 err_mutex_destroy:
0365     mutex_destroy(&dev_priv->mmap_mutex);
0366     return ret;
0367 }
0368 
0369 void psb_gem_mm_fini(struct drm_device *dev)
0370 {
0371     struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0372 
0373     iounmap(dev_priv->vram_addr);
0374 
0375     mutex_destroy(&dev_priv->mmap_mutex);
0376 }
0377 
0378 /* Re-insert all pinned GEM objects into GTT. */
0379 static void psb_gem_mm_populate_resources(struct drm_psb_private *pdev)
0380 {
0381     unsigned int restored = 0, total = 0, size = 0;
0382     struct resource *r = pdev->gtt_mem->child;
0383     struct drm_device *dev = &pdev->dev;
0384     struct psb_gem_object *pobj;
0385 
0386     while (r) {
0387         /*
0388          * TODO: GTT restoration needs a refactoring, so that we don't have to touch
0389          *       struct psb_gem_object here. The type represents a GEM object and is
0390          *       not related to the GTT itself.
0391          */
0392         pobj = container_of(r, struct psb_gem_object, resource);
0393         if (pobj->pages) {
0394             psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
0395             size += resource_size(&pobj->resource);
0396             ++restored;
0397         }
0398         r = r->sibling;
0399         ++total;
0400     }
0401 
0402     drm_dbg(dev, "Restored %u of %u gtt ranges (%u KB)", restored, total, (size / 1024));
0403 }
0404 
0405 int psb_gem_mm_resume(struct drm_device *dev)
0406 {
0407     struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
0408     struct pci_dev *pdev = to_pci_dev(dev->dev);
0409     unsigned long stolen_size, vram_stolen_size;
0410     struct psb_gtt *pg;
0411 
0412     pg = &dev_priv->gtt;
0413 
0414     pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
0415     vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
0416 
0417     stolen_size = vram_stolen_size;
0418 
0419     dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n", dev_priv->stolen_base,
0420         vram_stolen_size / 1024);
0421 
0422     if (stolen_size != pg->stolen_size) {
0423         dev_err(dev->dev, "GTT resume error.\n");
0424         return -EINVAL;
0425     }
0426 
0427     psb_gem_mm_populate_stolen(dev_priv);
0428     psb_gem_mm_populate_resources(dev_priv);
0429 
0430     return 0;
0431 }