Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2019 Intel Corporation
0004  */
0005 
0006 #include <uapi/drm/i915_drm.h>
0007 
0008 #include "intel_memory_region.h"
0009 #include "gem/i915_gem_region.h"
0010 #include "gem/i915_gem_lmem.h"
0011 #include "i915_drv.h"
0012 
0013 void __iomem *
0014 i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
0015                 unsigned long n,
0016                 unsigned long size)
0017 {
0018     resource_size_t offset;
0019 
0020     GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
0021 
0022     offset = i915_gem_object_get_dma_address(obj, n);
0023     offset -= obj->mm.region->region.start;
0024 
0025     return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
0026 }
0027 
0028 /**
0029  * i915_gem_object_is_lmem - Whether the object is resident in
0030  * lmem
0031  * @obj: The object to check.
0032  *
0033  * Even if an object is allowed to migrate and change memory region,
0034  * this function checks whether it will always be present in lmem when
0035  * valid *or* if that's not the case, whether it's currently resident in lmem.
0036  * For migratable and evictable objects, the latter only makes sense when
0037  * the object is locked.
0038  *
0039  * Return: Whether the object migratable but resident in lmem, or not
0040  * migratable and will be present in lmem when valid.
0041  */
0042 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
0043 {
0044     struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
0045 
0046 #ifdef CONFIG_LOCKDEP
0047     if (i915_gem_object_migratable(obj) &&
0048         i915_gem_object_evictable(obj))
0049         assert_object_held(obj);
0050 #endif
0051     return mr && (mr->type == INTEL_MEMORY_LOCAL ||
0052               mr->type == INTEL_MEMORY_STOLEN_LOCAL);
0053 }
0054 
0055 /**
0056  * __i915_gem_object_is_lmem - Whether the object is resident in
0057  * lmem while in the fence signaling critical path.
0058  * @obj: The object to check.
0059  *
0060  * This function is intended to be called from within the fence signaling
0061  * path where the fence, or a pin, keeps the object from being migrated. For
0062  * example during gpu reset or similar.
0063  *
0064  * Return: Whether the object is resident in lmem.
0065  */
0066 bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
0067 {
0068     struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
0069 
0070 #ifdef CONFIG_LOCKDEP
0071     GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
0072             i915_gem_object_evictable(obj));
0073 #endif
0074     return mr && (mr->type == INTEL_MEMORY_LOCAL ||
0075               mr->type == INTEL_MEMORY_STOLEN_LOCAL);
0076 }
0077 
0078 /**
0079  * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
0080  * minimum page size for the backing pages.
0081  * @i915: The i915 instance.
0082  * @size: The size in bytes for the object. Note that we need to round the size
0083  * up depending on the @page_size. The final object size can be fished out from
0084  * the drm GEM object.
0085  * @page_size: The requested minimum page size in bytes for this object. This is
0086  * useful if we need something bigger than the regions min_page_size due to some
0087  * hw restriction, or in some very specialised cases where it needs to be
0088  * smaller, where the internal fragmentation cost is too great when rounding up
0089  * the object size.
0090  * @flags: The optional BO allocation flags.
0091  *
0092  * Note that this interface assumes you know what you are doing when forcing the
0093  * @page_size. If this is smaller than the regions min_page_size then it can
0094  * never be inserted into any GTT, otherwise it might lead to undefined
0095  * behaviour.
0096  *
0097  * Return: The object pointer, which might be an ERR_PTR in the case of failure.
0098  */
0099 struct drm_i915_gem_object *
0100 __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
0101                       resource_size_t size,
0102                       resource_size_t page_size,
0103                       unsigned int flags)
0104 {
0105     return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
0106                          size, page_size, flags);
0107 }
0108 
0109 struct drm_i915_gem_object *
0110 i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
0111                       const void *data, size_t size)
0112 {
0113     struct drm_i915_gem_object *obj;
0114     void *map;
0115 
0116     obj = i915_gem_object_create_lmem(i915,
0117                       round_up(size, PAGE_SIZE),
0118                       I915_BO_ALLOC_CONTIGUOUS);
0119     if (IS_ERR(obj))
0120         return obj;
0121 
0122     map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
0123     if (IS_ERR(map)) {
0124         i915_gem_object_put(obj);
0125         return map;
0126     }
0127 
0128     memcpy(map, data, size);
0129 
0130     i915_gem_object_unpin_map(obj);
0131 
0132     return obj;
0133 }
0134 
0135 struct drm_i915_gem_object *
0136 i915_gem_object_create_lmem(struct drm_i915_private *i915,
0137                 resource_size_t size,
0138                 unsigned int flags)
0139 {
0140     return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
0141                          size, 0, flags);
0142 }