Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: MIT */
0002 /*
0003  * Copyright © 2021 Intel Corporation
0004  */
0005 
0006 #ifndef __I915_VMA_RESOURCE_H__
0007 #define __I915_VMA_RESOURCE_H__
0008 
0009 #include <linux/dma-fence.h>
0010 #include <linux/refcount.h>
0011 
0012 #include "i915_gem.h"
0013 #include "i915_scatterlist.h"
0014 #include "i915_sw_fence.h"
0015 #include "intel_runtime_pm.h"
0016 
0017 struct intel_memory_region;
0018 
0019 struct i915_page_sizes {
0020     /**
0021      * The sg mask of the pages sg_table. i.e the mask of
0022      * the lengths for each sg entry.
0023      */
0024     unsigned int phys;
0025 
0026     /**
0027      * The gtt page sizes we are allowed to use given the
0028      * sg mask and the supported page sizes. This will
0029      * express the smallest unit we can use for the whole
0030      * object, as well as the larger sizes we may be able
0031      * to use opportunistically.
0032      */
0033     unsigned int sg;
0034 };
0035 
0036 /**
0037  * struct i915_vma_resource - Snapshotted unbind information.
0038  * @unbind_fence: Fence to mark unbinding complete. Note that this fence
0039  * is not considered published until unbind is scheduled, and as such it
0040  * is illegal to access this fence before scheduled unbind other than
0041  * for refcounting.
0042  * @lock: The @unbind_fence lock.
0043  * @hold_count: Number of holders blocking the fence from finishing.
0044  * The vma itself is keeping a hold, which is released when unbind
0045  * is scheduled.
0046  * @work: Work struct for deferred unbind work.
0047  * @chain: Pointer to struct i915_sw_fence used to await dependencies.
0048  * @rb: Rb node for the vm's pending unbind interval tree.
0049  * @__subtree_last: Interval tree private member.
0050  * @vm: non-refcounted pointer to the vm. This is for internal use only and
0051  * this member is cleared after vm_resource unbind.
0052  * @mr: The memory region of the object pointed to by the vma.
0053  * @ops: Pointer to the backend i915_vma_ops.
0054  * @private: Bind backend private info.
0055  * @start: Offset into the address space of bind range start.
0056  * @node_size: Size of the allocated range manager node.
0057  * @vma_size: Bind size.
0058  * @page_sizes_gtt: Resulting page sizes from the bind operation.
0059  * @bound_flags: Flags indicating binding status.
0060  * @allocated: Backend private data. TODO: Should move into @private.
0061  * @immediate_unbind: Unbind can be done immediately and doesn't need to be
0062  * deferred to a work item awaiting unsignaled fences. This is a hack.
0063  * (dma_fence_work uses a fence flag for this, but this seems slightly
0064  * cleaner).
0065  * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
0066  * take a wakeref in the dma-fence signalling critical path, it needs to be
0067  * taken when the unbind is scheduled.
0068  * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
0069  * needs to be skipped for unbind.
0070  * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
0071  *
0072  * The lifetime of a struct i915_vma_resource is from a binding request to
0073  * the actual possible asynchronous unbind has completed.
0074  */
0075 struct i915_vma_resource {
0076     struct dma_fence unbind_fence;
0077     /* See above for description of the lock. */
0078     spinlock_t lock;
0079     refcount_t hold_count;
0080     struct work_struct work;
0081     struct i915_sw_fence chain;
0082     struct rb_node rb;
0083     u64 __subtree_last;
0084     struct i915_address_space *vm;
0085     intel_wakeref_t wakeref;
0086 
0087     /**
0088      * struct i915_vma_bindinfo - Information needed for async bind
0089      * only but that can be dropped after the bind has taken place.
0090      * Consider making this a separate argument to the bind_vma
0091      * op, coalescing with other arguments like vm, stash, cache_level
0092      * and flags
0093      * @pages: The pages sg-table.
0094      * @page_sizes: Page sizes of the pages.
0095      * @pages_rsgt: Refcounted sg-table when delayed object destruction
0096      * is supported. May be NULL.
0097      * @readonly: Whether the vma should be bound read-only.
0098      * @lmem: Whether the vma points to lmem.
0099      */
0100     struct i915_vma_bindinfo {
0101         struct sg_table *pages;
0102         struct i915_page_sizes page_sizes;
0103         struct i915_refct_sgt *pages_rsgt;
0104         bool readonly:1;
0105         bool lmem:1;
0106     } bi;
0107 
0108 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
0109     struct intel_memory_region *mr;
0110 #endif
0111     const struct i915_vma_ops *ops;
0112     void *private;
0113     u64 start;
0114     u64 node_size;
0115     u64 vma_size;
0116     u32 page_sizes_gtt;
0117 
0118     u32 bound_flags;
0119     bool allocated:1;
0120     bool immediate_unbind:1;
0121     bool needs_wakeref:1;
0122     bool skip_pte_rewrite:1;
0123 
0124     u32 *tlb;
0125 };
0126 
0127 bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
0128                 bool *lockdep_cookie);
0129 
0130 void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
0131                   bool lockdep_cookie);
0132 
0133 struct i915_vma_resource *i915_vma_resource_alloc(void);
0134 
0135 void i915_vma_resource_free(struct i915_vma_resource *vma_res);
0136 
0137 struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
0138                        u32 *tlb);
0139 
0140 void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
0141 
0142 /**
0143  * i915_vma_resource_get - Take a reference on a vma resource
0144  * @vma_res: The vma resource on which to take a reference.
0145  *
0146  * Return: The @vma_res pointer
0147  */
0148 static inline struct i915_vma_resource
0149 *i915_vma_resource_get(struct i915_vma_resource *vma_res)
0150 {
0151     dma_fence_get(&vma_res->unbind_fence);
0152     return vma_res;
0153 }
0154 
0155 /**
0156  * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
0157  * @vma_res: The resource
0158  */
0159 static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
0160 {
0161     dma_fence_put(&vma_res->unbind_fence);
0162 }
0163 
0164 /**
0165  * i915_vma_resource_init - Initialize a vma resource.
0166  * @vma_res: The vma resource to initialize
0167  * @vm: Pointer to the vm.
0168  * @pages: The pages sg-table.
0169  * @page_sizes: Page sizes of the pages.
0170  * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
0171  * delayed destruction.
0172  * @readonly: Whether the vma should be bound read-only.
0173  * @lmem: Whether the vma points to lmem.
0174  * @mr: The memory region of the object the vma points to.
0175  * @ops: The backend ops.
0176  * @private: Bind backend private info.
0177  * @start: Offset into the address space of bind range start.
0178  * @node_size: Size of the allocated range manager node.
0179  * @size: Bind size.
0180  *
0181  * Initializes a vma resource allocated using i915_vma_resource_alloc().
0182  * The reason for having separate allocate and initialize function is that
0183  * initialization may need to be performed from under a lock where
0184  * allocation is not allowed.
0185  */
0186 static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
0187                       struct i915_address_space *vm,
0188                       struct sg_table *pages,
0189                       const struct i915_page_sizes *page_sizes,
0190                       struct i915_refct_sgt *pages_rsgt,
0191                       bool readonly,
0192                       bool lmem,
0193                       struct intel_memory_region *mr,
0194                       const struct i915_vma_ops *ops,
0195                       void *private,
0196                       u64 start,
0197                       u64 node_size,
0198                       u64 size)
0199 {
0200     __i915_vma_resource_init(vma_res);
0201     vma_res->vm = vm;
0202     vma_res->bi.pages = pages;
0203     vma_res->bi.page_sizes = *page_sizes;
0204     if (pages_rsgt)
0205         vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
0206     vma_res->bi.readonly = readonly;
0207     vma_res->bi.lmem = lmem;
0208 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
0209     vma_res->mr = mr;
0210 #endif
0211     vma_res->ops = ops;
0212     vma_res->private = private;
0213     vma_res->start = start;
0214     vma_res->node_size = node_size;
0215     vma_res->vma_size = size;
0216 }
0217 
0218 static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
0219 {
0220     GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
0221     if (vma_res->bi.pages_rsgt)
0222         i915_refct_sgt_put(vma_res->bi.pages_rsgt);
0223     i915_sw_fence_fini(&vma_res->chain);
0224 }
0225 
0226 int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
0227                     u64 first,
0228                     u64 last,
0229                     bool intr);
0230 
0231 int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
0232                      struct i915_sw_fence *sw_fence,
0233                      u64 first,
0234                      u64 last,
0235                      bool intr,
0236                      gfp_t gfp);
0237 
0238 void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
0239 
0240 void i915_vma_resource_module_exit(void);
0241 
0242 int i915_vma_resource_module_init(void);
0243 
0244 #endif