![]() |
|
|||
0001 /* SPDX-License-Identifier: MIT */ 0002 /* 0003 * Copyright © 2022 Intel Corporation 0004 */ 0005 0006 /** 0007 * DOC: I915_PARAM_VM_BIND_VERSION 0008 * 0009 * VM_BIND feature version supported. 0010 * See typedef drm_i915_getparam_t param. 0011 * 0012 * Specifies the VM_BIND feature version supported. 0013 * The following versions of VM_BIND have been defined: 0014 * 0015 * 0: No VM_BIND support. 0016 * 0017 * 1: In VM_UNBIND calls, the UMD must specify the exact mappings created 0018 * previously with VM_BIND, the ioctl will not support unbinding multiple 0019 * mappings or splitting them. Similarly, VM_BIND calls will not replace 0020 * any existing mappings. 0021 * 0022 * 2: The restrictions on unbinding partial or multiple mappings is 0023 * lifted, Similarly, binding will replace any mappings in the given range. 0024 * 0025 * See struct drm_i915_gem_vm_bind and struct drm_i915_gem_vm_unbind. 0026 */ 0027 #define I915_PARAM_VM_BIND_VERSION 57 0028 0029 /** 0030 * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND 0031 * 0032 * Flag to opt-in for VM_BIND mode of binding during VM creation. 0033 * See struct drm_i915_gem_vm_control flags. 0034 * 0035 * The older execbuf2 ioctl will not support VM_BIND mode of operation. 0036 * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any 0037 * execlist (See struct drm_i915_gem_execbuffer3 for more details). 0038 */ 0039 #define I915_VM_CREATE_FLAGS_USE_VM_BIND (1 << 0) 0040 0041 /* VM_BIND related ioctls */ 0042 #define DRM_I915_GEM_VM_BIND 0x3d 0043 #define DRM_I915_GEM_VM_UNBIND 0x3e 0044 #define DRM_I915_GEM_EXECBUFFER3 0x3f 0045 0046 #define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind) 0047 #define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind) 0048 #define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3) 0049 0050 /** 0051 * struct drm_i915_gem_timeline_fence - An input or output timeline fence. 0052 * 0053 * The operation will wait for input fence to signal. 0054 * 0055 * The returned output fence will be signaled after the completion of the 0056 * operation. 0057 */ 0058 struct drm_i915_gem_timeline_fence { 0059 /** @handle: User's handle for a drm_syncobj to wait on or signal. */ 0060 __u32 handle; 0061 0062 /** 0063 * @flags: Supported flags are: 0064 * 0065 * I915_TIMELINE_FENCE_WAIT: 0066 * Wait for the input fence before the operation. 0067 * 0068 * I915_TIMELINE_FENCE_SIGNAL: 0069 * Return operation completion fence as output. 0070 */ 0071 __u32 flags; 0072 #define I915_TIMELINE_FENCE_WAIT (1 << 0) 0073 #define I915_TIMELINE_FENCE_SIGNAL (1 << 1) 0074 #define __I915_TIMELINE_FENCE_UNKNOWN_FLAGS (-(I915_TIMELINE_FENCE_SIGNAL << 1)) 0075 0076 /** 0077 * @value: A point in the timeline. 0078 * Value must be 0 for a binary drm_syncobj. A Value of 0 for a 0079 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a 0080 * binary one. 0081 */ 0082 __u64 value; 0083 }; 0084 0085 /** 0086 * struct drm_i915_gem_vm_bind - VA to object mapping to bind. 0087 * 0088 * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU 0089 * virtual address (VA) range to the section of an object that should be bound 0090 * in the device page table of the specified address space (VM). 0091 * The VA range specified must be unique (ie., not currently bound) and can 0092 * be mapped to whole object or a section of the object (partial binding). 0093 * Multiple VA mappings can be created to the same section of the object 0094 * (aliasing). 0095 * 0096 * The @start, @offset and @length must be 4K page aligned. However the DG2 0097 * and XEHPSDV has 64K page size for device local memory and has compact page 0098 * table. On those platforms, for binding device local-memory objects, the 0099 * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix 0100 * the local memory 64K page and the system memory 4K page bindings in the same 0101 * 2M range. 0102 * 0103 * Error code -EINVAL will be returned if @start, @offset and @length are not 0104 * properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code 0105 * -ENOSPC will be returned if the VA range specified can't be reserved. 0106 * 0107 * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently 0108 * are not ordered. Furthermore, parts of the VM_BIND operation can be done 0109 * asynchronously, if valid @fence is specified. 0110 */ 0111 struct drm_i915_gem_vm_bind { 0112 /** @vm_id: VM (address space) id to bind */ 0113 __u32 vm_id; 0114 0115 /** @handle: Object handle */ 0116 __u32 handle; 0117 0118 /** @start: Virtual Address start to bind */ 0119 __u64 start; 0120 0121 /** @offset: Offset in object to bind */ 0122 __u64 offset; 0123 0124 /** @length: Length of mapping to bind */ 0125 __u64 length; 0126 0127 /** 0128 * @flags: Supported flags are: 0129 * 0130 * I915_GEM_VM_BIND_CAPTURE: 0131 * Capture this mapping in the dump upon GPU error. 0132 * 0133 * Note that @fence carries its own flags. 0134 */ 0135 __u64 flags; 0136 #define I915_GEM_VM_BIND_CAPTURE (1 << 0) 0137 0138 /** 0139 * @fence: Timeline fence for bind completion signaling. 0140 * 0141 * Timeline fence is of format struct drm_i915_gem_timeline_fence. 0142 * 0143 * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag 0144 * is invalid, and an error will be returned. 0145 * 0146 * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence 0147 * is not requested and binding is completed synchronously. 0148 */ 0149 struct drm_i915_gem_timeline_fence fence; 0150 0151 /** 0152 * @extensions: Zero-terminated chain of extensions. 0153 * 0154 * For future extensions. See struct i915_user_extension. 0155 */ 0156 __u64 extensions; 0157 }; 0158 0159 /** 0160 * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind. 0161 * 0162 * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual 0163 * address (VA) range that should be unbound from the device page table of the 0164 * specified address space (VM). VM_UNBIND will force unbind the specified 0165 * range from device page table without waiting for any GPU job to complete. 0166 * It is UMDs responsibility to ensure the mapping is no longer in use before 0167 * calling VM_UNBIND. 0168 * 0169 * If the specified mapping is not found, the ioctl will simply return without 0170 * any error. 0171 * 0172 * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently 0173 * are not ordered. Furthermore, parts of the VM_UNBIND operation can be done 0174 * asynchronously, if valid @fence is specified. 0175 */ 0176 struct drm_i915_gem_vm_unbind { 0177 /** @vm_id: VM (address space) id to bind */ 0178 __u32 vm_id; 0179 0180 /** @rsvd: Reserved, MBZ */ 0181 __u32 rsvd; 0182 0183 /** @start: Virtual Address start to unbind */ 0184 __u64 start; 0185 0186 /** @length: Length of mapping to unbind */ 0187 __u64 length; 0188 0189 /** 0190 * @flags: Currently reserved, MBZ. 0191 * 0192 * Note that @fence carries its own flags. 0193 */ 0194 __u64 flags; 0195 0196 /** 0197 * @fence: Timeline fence for unbind completion signaling. 0198 * 0199 * Timeline fence is of format struct drm_i915_gem_timeline_fence. 0200 * 0201 * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag 0202 * is invalid, and an error will be returned. 0203 * 0204 * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence 0205 * is not requested and unbinding is completed synchronously. 0206 */ 0207 struct drm_i915_gem_timeline_fence fence; 0208 0209 /** 0210 * @extensions: Zero-terminated chain of extensions. 0211 * 0212 * For future extensions. See struct i915_user_extension. 0213 */ 0214 __u64 extensions; 0215 }; 0216 0217 /** 0218 * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3 0219 * ioctl. 0220 * 0221 * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode 0222 * only works with this ioctl for submission. 0223 * See I915_VM_CREATE_FLAGS_USE_VM_BIND. 0224 */ 0225 struct drm_i915_gem_execbuffer3 { 0226 /** 0227 * @ctx_id: Context id 0228 * 0229 * Only contexts with user engine map are allowed. 0230 */ 0231 __u32 ctx_id; 0232 0233 /** 0234 * @engine_idx: Engine index 0235 * 0236 * An index in the user engine map of the context specified by @ctx_id. 0237 */ 0238 __u32 engine_idx; 0239 0240 /** 0241 * @batch_address: Batch gpu virtual address/es. 0242 * 0243 * For normal submission, it is the gpu virtual address of the batch 0244 * buffer. For parallel submission, it is a pointer to an array of 0245 * batch buffer gpu virtual addresses with array size equal to the 0246 * number of (parallel) engines involved in that submission (See 0247 * struct i915_context_engines_parallel_submit). 0248 */ 0249 __u64 batch_address; 0250 0251 /** @flags: Currently reserved, MBZ */ 0252 __u64 flags; 0253 0254 /** @rsvd1: Reserved, MBZ */ 0255 __u32 rsvd1; 0256 0257 /** @fence_count: Number of fences in @timeline_fences array. */ 0258 __u32 fence_count; 0259 0260 /** 0261 * @timeline_fences: Pointer to an array of timeline fences. 0262 * 0263 * Timeline fences are of format struct drm_i915_gem_timeline_fence. 0264 */ 0265 __u64 timeline_fences; 0266 0267 /** @rsvd2: Reserved, MBZ */ 0268 __u64 rsvd2; 0269 0270 /** 0271 * @extensions: Zero-terminated chain of extensions. 0272 * 0273 * For future extensions. See struct i915_user_extension. 0274 */ 0275 __u64 extensions; 0276 }; 0277 0278 /** 0279 * struct drm_i915_gem_create_ext_vm_private - Extension to make the object 0280 * private to the specified VM. 0281 * 0282 * See struct drm_i915_gem_create_ext. 0283 */ 0284 struct drm_i915_gem_create_ext_vm_private { 0285 #define I915_GEM_CREATE_EXT_VM_PRIVATE 2 0286 /** @base: Extension link. See struct i915_user_extension. */ 0287 struct i915_user_extension base; 0288 0289 /** @vm_id: Id of the VM to which the object is private */ 0290 __u32 vm_id; 0291 };
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |