Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include <uapi/drm/i915_drm.h>
0007 
0008 #include "intel_memory_region.h"
0009 #include "i915_gem_region.h"
0010 #include "i915_drv.h"
0011 #include "i915_trace.h"
0012 
0013 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
0014                     struct intel_memory_region *mem)
0015 {
0016     obj->mm.region = mem;
0017 
0018     mutex_lock(&mem->objects.lock);
0019     list_add(&obj->mm.region_link, &mem->objects.list);
0020     mutex_unlock(&mem->objects.lock);
0021 }
0022 
0023 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
0024 {
0025     struct intel_memory_region *mem = obj->mm.region;
0026 
0027     mutex_lock(&mem->objects.lock);
0028     list_del(&obj->mm.region_link);
0029     mutex_unlock(&mem->objects.lock);
0030 }
0031 
0032 static struct drm_i915_gem_object *
0033 __i915_gem_object_create_region(struct intel_memory_region *mem,
0034                 resource_size_t offset,
0035                 resource_size_t size,
0036                 resource_size_t page_size,
0037                 unsigned int flags)
0038 {
0039     struct drm_i915_gem_object *obj;
0040     resource_size_t default_page_size;
0041     int err;
0042 
0043     /*
0044      * NB: Our use of resource_size_t for the size stems from using struct
0045      * resource for the mem->region. We might need to revisit this in the
0046      * future.
0047      */
0048 
0049     GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
0050 
0051     if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY &&
0052              (flags & I915_BO_ALLOC_CPU_CLEAR ||
0053               flags & I915_BO_ALLOC_PM_EARLY)))
0054         return ERR_PTR(-EINVAL);
0055 
0056     if (!mem)
0057         return ERR_PTR(-ENODEV);
0058 
0059     default_page_size = mem->min_page_size;
0060     if (page_size)
0061         default_page_size = page_size;
0062 
0063     /* We should be able to fit a page within an sg entry */
0064     GEM_BUG_ON(overflows_type(default_page_size, u32));
0065     GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
0066     GEM_BUG_ON(default_page_size < PAGE_SIZE);
0067 
0068     size = round_up(size, default_page_size);
0069 
0070     if (default_page_size == size)
0071         flags |= I915_BO_ALLOC_CONTIGUOUS;
0072 
0073     GEM_BUG_ON(!size);
0074     GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
0075 
0076     if (i915_gem_object_size_2big(size))
0077         return ERR_PTR(-E2BIG);
0078 
0079     obj = i915_gem_object_alloc();
0080     if (!obj)
0081         return ERR_PTR(-ENOMEM);
0082 
0083     /*
0084      * Anything smaller than the min_page_size can't be freely inserted into
0085      * the GTT, due to alignemnt restrictions. For such special objects,
0086      * make sure we force memcpy based suspend-resume. In the future we can
0087      * revisit this, either by allowing special mis-aligned objects in the
0088      * migration path, or by mapping all of LMEM upfront using cheap 1G
0089      * GTT entries.
0090      */
0091     if (default_page_size < mem->min_page_size)
0092         flags |= I915_BO_ALLOC_PM_EARLY;
0093 
0094     err = mem->ops->init_object(mem, obj, offset, size, page_size, flags);
0095     if (err)
0096         goto err_object_free;
0097 
0098     trace_i915_gem_object_create(obj);
0099     return obj;
0100 
0101 err_object_free:
0102     i915_gem_object_free(obj);
0103     return ERR_PTR(err);
0104 }
0105 
0106 struct drm_i915_gem_object *
0107 i915_gem_object_create_region(struct intel_memory_region *mem,
0108                   resource_size_t size,
0109                   resource_size_t page_size,
0110                   unsigned int flags)
0111 {
0112     return __i915_gem_object_create_region(mem, I915_BO_INVALID_OFFSET,
0113                            size, page_size, flags);
0114 }
0115 
0116 struct drm_i915_gem_object *
0117 i915_gem_object_create_region_at(struct intel_memory_region *mem,
0118                  resource_size_t offset,
0119                  resource_size_t size,
0120                  unsigned int flags)
0121 {
0122     GEM_BUG_ON(offset == I915_BO_INVALID_OFFSET);
0123 
0124     if (GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
0125         GEM_WARN_ON(!IS_ALIGNED(offset, mem->min_page_size)))
0126         return ERR_PTR(-EINVAL);
0127 
0128     if (range_overflows(offset, size, resource_size(&mem->region)))
0129         return ERR_PTR(-EINVAL);
0130 
0131     if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
0132         offset + size > mem->io_size &&
0133         !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
0134         return ERR_PTR(-ENOSPC);
0135 
0136     return __i915_gem_object_create_region(mem, offset, size, 0,
0137                            flags | I915_BO_ALLOC_CONTIGUOUS);
0138 }
0139 
0140 /**
0141  * i915_gem_process_region - Iterate over all objects of a region using ops
0142  * to process and optionally skip objects
0143  * @mr: The memory region
0144  * @apply: ops and private data
0145  *
0146  * This function can be used to iterate over the regions object list,
0147  * checking whether to skip objects, and, if not, lock the objects and
0148  * process them using the supplied ops. Note that this function temporarily
0149  * removes objects from the region list while iterating, so that if run
0150  * concurrently with itself may not iterate over all objects.
0151  *
0152  * Return: 0 if successful, negative error code on failure.
0153  */
0154 int i915_gem_process_region(struct intel_memory_region *mr,
0155                 struct i915_gem_apply_to_region *apply)
0156 {
0157     const struct i915_gem_apply_to_region_ops *ops = apply->ops;
0158     struct drm_i915_gem_object *obj;
0159     struct list_head still_in_list;
0160     int ret = 0;
0161 
0162     /*
0163      * In the future, a non-NULL apply->ww could mean the caller is
0164      * already in a locking transaction and provides its own context.
0165      */
0166     GEM_WARN_ON(apply->ww);
0167 
0168     INIT_LIST_HEAD(&still_in_list);
0169     mutex_lock(&mr->objects.lock);
0170     for (;;) {
0171         struct i915_gem_ww_ctx ww;
0172 
0173         obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
0174                            mm.region_link);
0175         if (!obj)
0176             break;
0177 
0178         list_move_tail(&obj->mm.region_link, &still_in_list);
0179         if (!kref_get_unless_zero(&obj->base.refcount))
0180             continue;
0181 
0182         /*
0183          * Note: Someone else might be migrating the object at this
0184          * point. The object's region is not stable until we lock
0185          * the object.
0186          */
0187         mutex_unlock(&mr->objects.lock);
0188         apply->ww = &ww;
0189         for_i915_gem_ww(&ww, ret, apply->interruptible) {
0190             ret = i915_gem_object_lock(obj, apply->ww);
0191             if (ret)
0192                 continue;
0193 
0194             if (obj->mm.region == mr)
0195                 ret = ops->process_obj(apply, obj);
0196             /* Implicit object unlock */
0197         }
0198 
0199         i915_gem_object_put(obj);
0200         mutex_lock(&mr->objects.lock);
0201         if (ret)
0202             break;
0203     }
0204     list_splice_tail(&still_in_list, &mr->objects.list);
0205     mutex_unlock(&mr->objects.lock);
0206 
0207     return ret;
0208 }