Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * SPDX-License-Identifier: MIT
0003  *
0004  * Copyright © 2014-2016 Intel Corporation
0005  */
0006 
0007 #include <linux/scatterlist.h>
0008 #include <linux/slab.h>
0009 #include <linux/swiotlb.h>
0010 
0011 #include "i915_drv.h"
0012 #include "i915_gem.h"
0013 #include "i915_gem_internal.h"
0014 #include "i915_gem_object.h"
0015 #include "i915_scatterlist.h"
0016 #include "i915_utils.h"
0017 
0018 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
0019 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
0020 
0021 static void internal_free_pages(struct sg_table *st)
0022 {
0023     struct scatterlist *sg;
0024 
0025     for (sg = st->sgl; sg; sg = __sg_next(sg)) {
0026         if (sg_page(sg))
0027             __free_pages(sg_page(sg), get_order(sg->length));
0028     }
0029 
0030     sg_free_table(st);
0031     kfree(st);
0032 }
0033 
0034 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
0035 {
0036     struct drm_i915_private *i915 = to_i915(obj->base.dev);
0037     struct sg_table *st;
0038     struct scatterlist *sg;
0039     unsigned int sg_page_sizes;
0040     unsigned int npages;
0041     int max_order;
0042     gfp_t gfp;
0043 
0044     max_order = MAX_ORDER;
0045 #ifdef CONFIG_SWIOTLB
0046     if (is_swiotlb_active(obj->base.dev->dev)) {
0047         unsigned int max_segment;
0048 
0049         max_segment = swiotlb_max_segment();
0050         if (max_segment) {
0051             max_segment = max_t(unsigned int, max_segment,
0052                         PAGE_SIZE) >> PAGE_SHIFT;
0053             max_order = min(max_order, ilog2(max_segment));
0054         }
0055     }
0056 #endif
0057 
0058     gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
0059     if (IS_I965GM(i915) || IS_I965G(i915)) {
0060         /* 965gm cannot relocate objects above 4GiB. */
0061         gfp &= ~__GFP_HIGHMEM;
0062         gfp |= __GFP_DMA32;
0063     }
0064 
0065 create_st:
0066     st = kmalloc(sizeof(*st), GFP_KERNEL);
0067     if (!st)
0068         return -ENOMEM;
0069 
0070     npages = obj->base.size / PAGE_SIZE;
0071     if (sg_alloc_table(st, npages, GFP_KERNEL)) {
0072         kfree(st);
0073         return -ENOMEM;
0074     }
0075 
0076     sg = st->sgl;
0077     st->nents = 0;
0078     sg_page_sizes = 0;
0079 
0080     do {
0081         int order = min(fls(npages) - 1, max_order);
0082         struct page *page;
0083 
0084         do {
0085             page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
0086                        order);
0087             if (page)
0088                 break;
0089             if (!order--)
0090                 goto err;
0091 
0092             /* Limit subsequent allocations as well */
0093             max_order = order;
0094         } while (1);
0095 
0096         sg_set_page(sg, page, PAGE_SIZE << order, 0);
0097         sg_page_sizes |= PAGE_SIZE << order;
0098         st->nents++;
0099 
0100         npages -= 1 << order;
0101         if (!npages) {
0102             sg_mark_end(sg);
0103             break;
0104         }
0105 
0106         sg = __sg_next(sg);
0107     } while (1);
0108 
0109     if (i915_gem_gtt_prepare_pages(obj, st)) {
0110         /* Failed to dma-map try again with single page sg segments */
0111         if (get_order(st->sgl->length)) {
0112             internal_free_pages(st);
0113             max_order = 0;
0114             goto create_st;
0115         }
0116         goto err;
0117     }
0118 
0119     __i915_gem_object_set_pages(obj, st, sg_page_sizes);
0120 
0121     return 0;
0122 
0123 err:
0124     sg_set_page(sg, NULL, 0, 0);
0125     sg_mark_end(sg);
0126     internal_free_pages(st);
0127 
0128     return -ENOMEM;
0129 }
0130 
0131 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
0132                            struct sg_table *pages)
0133 {
0134     i915_gem_gtt_finish_pages(obj, pages);
0135     internal_free_pages(pages);
0136 
0137     obj->mm.dirty = false;
0138 
0139     __start_cpu_write(obj);
0140 }
0141 
0142 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
0143     .name = "i915_gem_object_internal",
0144     .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
0145     .get_pages = i915_gem_object_get_pages_internal,
0146     .put_pages = i915_gem_object_put_pages_internal,
0147 };
0148 
0149 struct drm_i915_gem_object *
0150 __i915_gem_object_create_internal(struct drm_i915_private *i915,
0151                   const struct drm_i915_gem_object_ops *ops,
0152                   phys_addr_t size)
0153 {
0154     static struct lock_class_key lock_class;
0155     struct drm_i915_gem_object *obj;
0156     unsigned int cache_level;
0157 
0158     GEM_BUG_ON(!size);
0159     GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
0160 
0161     if (overflows_type(size, obj->base.size))
0162         return ERR_PTR(-E2BIG);
0163 
0164     obj = i915_gem_object_alloc();
0165     if (!obj)
0166         return ERR_PTR(-ENOMEM);
0167 
0168     drm_gem_private_object_init(&i915->drm, &obj->base, size);
0169     i915_gem_object_init(obj, ops, &lock_class, 0);
0170     obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
0171 
0172     /*
0173      * Mark the object as volatile, such that the pages are marked as
0174      * dontneed whilst they are still pinned. As soon as they are unpinned
0175      * they are allowed to be reaped by the shrinker, and the caller is
0176      * expected to repopulate - the contents of this object are only valid
0177      * whilst active and pinned.
0178      */
0179     i915_gem_object_set_volatile(obj);
0180 
0181     obj->read_domains = I915_GEM_DOMAIN_CPU;
0182     obj->write_domain = I915_GEM_DOMAIN_CPU;
0183 
0184     cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
0185     i915_gem_object_set_cache_coherency(obj, cache_level);
0186 
0187     return obj;
0188 }
0189 
0190 /**
0191  * i915_gem_object_create_internal: create an object with volatile pages
0192  * @i915: the i915 device
0193  * @size: the size in bytes of backing storage to allocate for the object
0194  *
0195  * Creates a new object that wraps some internal memory for private use.
0196  * This object is not backed by swappable storage, and as such its contents
0197  * are volatile and only valid whilst pinned. If the object is reaped by the
0198  * shrinker, its pages and data will be discarded. Equally, it is not a full
0199  * GEM object and so not valid for access from userspace. This makes it useful
0200  * for hardware interfaces like ringbuffers (which are pinned from the time
0201  * the request is written to the time the hardware stops accessing it), but
0202  * not for contexts (which need to be preserved when not active for later
0203  * reuse). Note that it is not cleared upon allocation.
0204  */
0205 struct drm_i915_gem_object *
0206 i915_gem_object_create_internal(struct drm_i915_private *i915,
0207                 phys_addr_t size)
0208 {
0209     return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
0210 }