0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 #include "i915_drv.h"
0038 #include "i915_reg.h"
0039 #include "gt/intel_ggtt_fencing.h"
0040 #include "gvt.h"
0041
0042 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
0043 {
0044 struct intel_gvt *gvt = vgpu->gvt;
0045 struct intel_gt *gt = gvt->gt;
0046 unsigned int flags;
0047 u64 start, end, size;
0048 struct drm_mm_node *node;
0049 int ret;
0050
0051 if (high_gm) {
0052 node = &vgpu->gm.high_gm_node;
0053 size = vgpu_hidden_sz(vgpu);
0054 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
0055 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
0056 flags = PIN_HIGH;
0057 } else {
0058 node = &vgpu->gm.low_gm_node;
0059 size = vgpu_aperture_sz(vgpu);
0060 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
0061 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
0062 flags = PIN_MAPPABLE;
0063 }
0064
0065 mutex_lock(>->ggtt->vm.mutex);
0066 mmio_hw_access_pre(gt);
0067 ret = i915_gem_gtt_insert(>->ggtt->vm, NULL, node,
0068 size, I915_GTT_PAGE_SIZE,
0069 I915_COLOR_UNEVICTABLE,
0070 start, end, flags);
0071 mmio_hw_access_post(gt);
0072 mutex_unlock(>->ggtt->vm.mutex);
0073 if (ret)
0074 gvt_err("fail to alloc %s gm space from host\n",
0075 high_gm ? "high" : "low");
0076
0077 return ret;
0078 }
0079
0080 static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
0081 {
0082 struct intel_gvt *gvt = vgpu->gvt;
0083 struct intel_gt *gt = gvt->gt;
0084 int ret;
0085
0086 ret = alloc_gm(vgpu, false);
0087 if (ret)
0088 return ret;
0089
0090 ret = alloc_gm(vgpu, true);
0091 if (ret)
0092 goto out_free_aperture;
0093
0094 gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id,
0095 vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu));
0096
0097 gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id,
0098 vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu));
0099
0100 return 0;
0101 out_free_aperture:
0102 mutex_lock(>->ggtt->vm.mutex);
0103 drm_mm_remove_node(&vgpu->gm.low_gm_node);
0104 mutex_unlock(>->ggtt->vm.mutex);
0105 return ret;
0106 }
0107
0108 static void free_vgpu_gm(struct intel_vgpu *vgpu)
0109 {
0110 struct intel_gvt *gvt = vgpu->gvt;
0111 struct intel_gt *gt = gvt->gt;
0112
0113 mutex_lock(>->ggtt->vm.mutex);
0114 drm_mm_remove_node(&vgpu->gm.low_gm_node);
0115 drm_mm_remove_node(&vgpu->gm.high_gm_node);
0116 mutex_unlock(>->ggtt->vm.mutex);
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
0130 u32 fence, u64 value)
0131 {
0132 struct intel_gvt *gvt = vgpu->gvt;
0133 struct drm_i915_private *i915 = gvt->gt->i915;
0134 struct intel_uncore *uncore = gvt->gt->uncore;
0135 struct i915_fence_reg *reg;
0136 i915_reg_t fence_reg_lo, fence_reg_hi;
0137
0138 assert_rpm_wakelock_held(uncore->rpm);
0139
0140 if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
0141 return;
0142
0143 reg = vgpu->fence.regs[fence];
0144 if (drm_WARN_ON(&i915->drm, !reg))
0145 return;
0146
0147 fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
0148 fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
0149
0150 intel_uncore_write(uncore, fence_reg_lo, 0);
0151 intel_uncore_posting_read(uncore, fence_reg_lo);
0152
0153 intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
0154 intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
0155 intel_uncore_posting_read(uncore, fence_reg_lo);
0156 }
0157
0158 static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
0159 {
0160 int i;
0161
0162 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
0163 intel_vgpu_write_fence(vgpu, i, 0);
0164 }
0165
0166 static void free_vgpu_fence(struct intel_vgpu *vgpu)
0167 {
0168 struct intel_gvt *gvt = vgpu->gvt;
0169 struct intel_uncore *uncore = gvt->gt->uncore;
0170 struct i915_fence_reg *reg;
0171 intel_wakeref_t wakeref;
0172 u32 i;
0173
0174 if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
0175 return;
0176
0177 wakeref = intel_runtime_pm_get(uncore->rpm);
0178
0179 mutex_lock(&gvt->gt->ggtt->vm.mutex);
0180 _clear_vgpu_fence(vgpu);
0181 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
0182 reg = vgpu->fence.regs[i];
0183 i915_unreserve_fence(reg);
0184 vgpu->fence.regs[i] = NULL;
0185 }
0186 mutex_unlock(&gvt->gt->ggtt->vm.mutex);
0187
0188 intel_runtime_pm_put(uncore->rpm, wakeref);
0189 }
0190
0191 static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
0192 {
0193 struct intel_gvt *gvt = vgpu->gvt;
0194 struct intel_uncore *uncore = gvt->gt->uncore;
0195 struct i915_fence_reg *reg;
0196 intel_wakeref_t wakeref;
0197 int i;
0198
0199 wakeref = intel_runtime_pm_get(uncore->rpm);
0200
0201
0202 mutex_lock(&gvt->gt->ggtt->vm.mutex);
0203
0204 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
0205 reg = i915_reserve_fence(gvt->gt->ggtt);
0206 if (IS_ERR(reg))
0207 goto out_free_fence;
0208
0209 vgpu->fence.regs[i] = reg;
0210 }
0211
0212 _clear_vgpu_fence(vgpu);
0213
0214 mutex_unlock(&gvt->gt->ggtt->vm.mutex);
0215 intel_runtime_pm_put(uncore->rpm, wakeref);
0216 return 0;
0217
0218 out_free_fence:
0219 gvt_vgpu_err("Failed to alloc fences\n");
0220
0221 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
0222 reg = vgpu->fence.regs[i];
0223 if (!reg)
0224 continue;
0225 i915_unreserve_fence(reg);
0226 vgpu->fence.regs[i] = NULL;
0227 }
0228 mutex_unlock(&gvt->gt->ggtt->vm.mutex);
0229 intel_runtime_pm_put_unchecked(uncore->rpm);
0230 return -ENOSPC;
0231 }
0232
0233 static void free_resource(struct intel_vgpu *vgpu)
0234 {
0235 struct intel_gvt *gvt = vgpu->gvt;
0236
0237 gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu);
0238 gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu);
0239 gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu);
0240 }
0241
0242 static int alloc_resource(struct intel_vgpu *vgpu,
0243 struct intel_vgpu_creation_params *param)
0244 {
0245 struct intel_gvt *gvt = vgpu->gvt;
0246 unsigned long request, avail, max, taken;
0247 const char *item;
0248
0249 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
0250 gvt_vgpu_err("Invalid vGPU creation params\n");
0251 return -EINVAL;
0252 }
0253
0254 item = "low GM space";
0255 max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
0256 taken = gvt->gm.vgpu_allocated_low_gm_size;
0257 avail = max - taken;
0258 request = MB_TO_BYTES(param->low_gm_sz);
0259
0260 if (request > avail)
0261 goto no_enough_resource;
0262
0263 vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
0264
0265 item = "high GM space";
0266 max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
0267 taken = gvt->gm.vgpu_allocated_high_gm_size;
0268 avail = max - taken;
0269 request = MB_TO_BYTES(param->high_gm_sz);
0270
0271 if (request > avail)
0272 goto no_enough_resource;
0273
0274 vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
0275
0276 item = "fence";
0277 max = gvt_fence_sz(gvt) - HOST_FENCE;
0278 taken = gvt->fence.vgpu_allocated_fence_num;
0279 avail = max - taken;
0280 request = param->fence_sz;
0281
0282 if (request > avail)
0283 goto no_enough_resource;
0284
0285 vgpu_fence_sz(vgpu) = request;
0286
0287 gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
0288 gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
0289 gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
0290 return 0;
0291
0292 no_enough_resource:
0293 gvt_err("fail to allocate resource %s\n", item);
0294 gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
0295 BYTES_TO_MB(request), BYTES_TO_MB(avail),
0296 BYTES_TO_MB(max), BYTES_TO_MB(taken));
0297 return -ENOSPC;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307 void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
0308 {
0309 free_vgpu_gm(vgpu);
0310 free_vgpu_fence(vgpu);
0311 free_resource(vgpu);
0312 }
0313
0314
0315
0316
0317
0318
0319
0320
0321 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
0322 {
0323 struct intel_gvt *gvt = vgpu->gvt;
0324 intel_wakeref_t wakeref;
0325
0326 with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
0327 _clear_vgpu_fence(vgpu);
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
0343 struct intel_vgpu_creation_params *param)
0344 {
0345 int ret;
0346
0347 ret = alloc_resource(vgpu, param);
0348 if (ret)
0349 return ret;
0350
0351 ret = alloc_vgpu_gm(vgpu);
0352 if (ret)
0353 goto out_free_resource;
0354
0355 ret = alloc_vgpu_fence(vgpu);
0356 if (ret)
0357 goto out_free_vgpu_gm;
0358
0359 return 0;
0360
0361 out_free_vgpu_gm:
0362 free_vgpu_gm(vgpu);
0363 out_free_resource:
0364 free_resource(vgpu);
0365 return ret;
0366 }