Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2014-2016 Intel Corporation
0005  */
0006 
0007 #include <linux/highmem.h>
0008 #include <linux/shmem_fs.h>
0009 #include <linux/swap.h>
0010 
0011 #include <drm/drm_cache.h>
0012 
0013 #include "gt/intel_gt.h"
0014 #include "i915_drv.h"
0015 #include "i915_gem_object.h"
0016 #include "i915_gem_region.h"
0017 #include "i915_gem_tiling.h"
0018 #include "i915_scatterlist.h"
0019 
0020 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
0021 {
0022     struct address_space *mapping = obj->base.filp->f_mapping;
0023     struct drm_i915_private *i915 = to_i915(obj->base.dev);
0024     struct scatterlist *sg;
0025     struct sg_table *st;
0026     dma_addr_t dma;
0027     void *vaddr;
0028     void *dst;
0029     int i;
0030 
0031     if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
0032         return -EINVAL;
0033 
0034     /*
0035      * Always aligning to the object size, allows a single allocation
0036      * to handle all possible callers, and given typical object sizes,
0037      * the alignment of the buddy allocation will naturally match.
0038      */
0039     vaddr = dma_alloc_coherent(obj->base.dev->dev,
0040                    roundup_pow_of_two(obj->base.size),
0041                    &dma, GFP_KERNEL);
0042     if (!vaddr)
0043         return -ENOMEM;
0044 
0045     st = kmalloc(sizeof(*st), GFP_KERNEL);
0046     if (!st)
0047         goto err_pci;
0048 
0049     if (sg_alloc_table(st, 1, GFP_KERNEL))
0050         goto err_st;
0051 
0052     sg = st->sgl;
0053     sg->offset = 0;
0054     sg->length = obj->base.size;
0055 
0056     sg_assign_page(sg, (struct page *)vaddr);
0057     sg_dma_address(sg) = dma;
0058     sg_dma_len(sg) = obj->base.size;
0059 
0060     dst = vaddr;
0061     for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
0062         struct page *page;
0063         void *src;
0064 
0065         page = shmem_read_mapping_page(mapping, i);
0066         if (IS_ERR(page))
0067             goto err_st;
0068 
0069         src = kmap_atomic(page);
0070         memcpy(dst, src, PAGE_SIZE);
0071         drm_clflush_virt_range(dst, PAGE_SIZE);
0072         kunmap_atomic(src);
0073 
0074         put_page(page);
0075         dst += PAGE_SIZE;
0076     }
0077 
0078     intel_gt_chipset_flush(to_gt(i915));
0079 
0080     /* We're no longer struct page backed */
0081     obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
0082     __i915_gem_object_set_pages(obj, st, sg->length);
0083 
0084     return 0;
0085 
0086 err_st:
0087     kfree(st);
0088 err_pci:
0089     dma_free_coherent(obj->base.dev->dev,
0090               roundup_pow_of_two(obj->base.size),
0091               vaddr, dma);
0092     return -ENOMEM;
0093 }
0094 
0095 void
0096 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
0097                    struct sg_table *pages)
0098 {
0099     dma_addr_t dma = sg_dma_address(pages->sgl);
0100     void *vaddr = sg_page(pages->sgl);
0101 
0102     __i915_gem_object_release_shmem(obj, pages, false);
0103 
0104     if (obj->mm.dirty) {
0105         struct address_space *mapping = obj->base.filp->f_mapping;
0106         void *src = vaddr;
0107         int i;
0108 
0109         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
0110             struct page *page;
0111             char *dst;
0112 
0113             page = shmem_read_mapping_page(mapping, i);
0114             if (IS_ERR(page))
0115                 continue;
0116 
0117             dst = kmap_atomic(page);
0118             drm_clflush_virt_range(src, PAGE_SIZE);
0119             memcpy(dst, src, PAGE_SIZE);
0120             kunmap_atomic(dst);
0121 
0122             set_page_dirty(page);
0123             if (obj->mm.madv == I915_MADV_WILLNEED)
0124                 mark_page_accessed(page);
0125             put_page(page);
0126 
0127             src += PAGE_SIZE;
0128         }
0129         obj->mm.dirty = false;
0130     }
0131 
0132     sg_free_table(pages);
0133     kfree(pages);
0134 
0135     dma_free_coherent(obj->base.dev->dev,
0136               roundup_pow_of_two(obj->base.size),
0137               vaddr, dma);
0138 }
0139 
0140 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
0141                 const struct drm_i915_gem_pwrite *args)
0142 {
0143     void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
0144     char __user *user_data = u64_to_user_ptr(args->data_ptr);
0145     struct drm_i915_private *i915 = to_i915(obj->base.dev);
0146     int err;
0147 
0148     err = i915_gem_object_wait(obj,
0149                    I915_WAIT_INTERRUPTIBLE |
0150                    I915_WAIT_ALL,
0151                    MAX_SCHEDULE_TIMEOUT);
0152     if (err)
0153         return err;
0154 
0155     /*
0156      * We manually control the domain here and pretend that it
0157      * remains coherent i.e. in the GTT domain, like shmem_pwrite.
0158      */
0159     i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
0160 
0161     if (copy_from_user(vaddr, user_data, args->size))
0162         return -EFAULT;
0163 
0164     drm_clflush_virt_range(vaddr, args->size);
0165     intel_gt_chipset_flush(to_gt(i915));
0166 
0167     i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
0168     return 0;
0169 }
0170 
0171 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
0172                    const struct drm_i915_gem_pread *args)
0173 {
0174     void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
0175     char __user *user_data = u64_to_user_ptr(args->data_ptr);
0176     int err;
0177 
0178     err = i915_gem_object_wait(obj,
0179                    I915_WAIT_INTERRUPTIBLE,
0180                    MAX_SCHEDULE_TIMEOUT);
0181     if (err)
0182         return err;
0183 
0184     drm_clflush_virt_range(vaddr, args->size);
0185     if (copy_to_user(user_data, vaddr, args->size))
0186         return -EFAULT;
0187 
0188     return 0;
0189 }
0190 
0191 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
0192 {
0193     struct sg_table *pages;
0194     int err;
0195 
0196     pages = __i915_gem_object_unset_pages(obj);
0197 
0198     err = i915_gem_object_get_pages_phys(obj);
0199     if (err)
0200         goto err_xfer;
0201 
0202     /* Perma-pin (until release) the physical set of pages */
0203     __i915_gem_object_pin_pages(obj);
0204 
0205     if (!IS_ERR_OR_NULL(pages))
0206         i915_gem_object_put_pages_shmem(obj, pages);
0207 
0208     i915_gem_object_release_memory_region(obj);
0209     return 0;
0210 
0211 err_xfer:
0212     if (!IS_ERR_OR_NULL(pages)) {
0213         unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
0214 
0215         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
0216     }
0217     return err;
0218 }
0219 
0220 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
0221 {
0222     int err;
0223 
0224     assert_object_held(obj);
0225 
0226     if (align > obj->base.size)
0227         return -EINVAL;
0228 
0229     if (!i915_gem_object_is_shmem(obj))
0230         return -EINVAL;
0231 
0232     if (!i915_gem_object_has_struct_page(obj))
0233         return 0;
0234 
0235     err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
0236     if (err)
0237         return err;
0238 
0239     if (obj->mm.madv != I915_MADV_WILLNEED)
0240         return -EFAULT;
0241 
0242     if (i915_gem_object_has_tiling_quirk(obj))
0243         return -EFAULT;
0244 
0245     if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
0246         return -EBUSY;
0247 
0248     if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
0249         drm_dbg(obj->base.dev,
0250             "Attempting to obtain a purgeable object\n");
0251         return -EFAULT;
0252     }
0253 
0254     return i915_gem_object_shmem_to_phys(obj);
0255 }
0256 
0257 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0258 #include "selftests/i915_gem_phys.c"
0259 #endif