0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include "i915_drv.h"
0037 #include "i915_reg.h"
0038 #include "gvt.h"
0039
0040 #include "gt/intel_gt_regs.h"
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
0051 {
0052 u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
0053 return gpa - gttmmio_gpa;
0054 }
0055
0056 #define reg_is_mmio(gvt, reg) \
0057 (reg >= 0 && reg < gvt->device_info.mmio_size)
0058
0059 #define reg_is_gtt(gvt, reg) \
0060 (reg >= gvt->device_info.gtt_start_offset \
0061 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
0062
0063 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
0064 void *p_data, unsigned int bytes, bool read)
0065 {
0066 struct intel_gvt *gvt = NULL;
0067 void *pt = NULL;
0068 unsigned int offset = 0;
0069
0070 if (!vgpu || !p_data)
0071 return;
0072
0073 gvt = vgpu->gvt;
0074 mutex_lock(&vgpu->vgpu_lock);
0075 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
0076 if (reg_is_mmio(gvt, offset)) {
0077 if (read)
0078 intel_vgpu_default_mmio_read(vgpu, offset, p_data,
0079 bytes);
0080 else
0081 intel_vgpu_default_mmio_write(vgpu, offset, p_data,
0082 bytes);
0083 } else if (reg_is_gtt(gvt, offset)) {
0084 offset -= gvt->device_info.gtt_start_offset;
0085 pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
0086 if (read)
0087 memcpy(p_data, pt, bytes);
0088 else
0089 memcpy(pt, p_data, bytes);
0090
0091 }
0092 mutex_unlock(&vgpu->vgpu_lock);
0093 }
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
0106 void *p_data, unsigned int bytes)
0107 {
0108 struct intel_gvt *gvt = vgpu->gvt;
0109 struct drm_i915_private *i915 = gvt->gt->i915;
0110 unsigned int offset = 0;
0111 int ret = -EINVAL;
0112
0113 if (vgpu->failsafe) {
0114 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
0115 return 0;
0116 }
0117 mutex_lock(&vgpu->vgpu_lock);
0118
0119 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
0120
0121 if (drm_WARN_ON(&i915->drm, bytes > 8))
0122 goto err;
0123
0124 if (reg_is_gtt(gvt, offset)) {
0125 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
0126 !IS_ALIGNED(offset, 8)))
0127 goto err;
0128 if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
0129 goto err;
0130 if (drm_WARN_ON(&i915->drm,
0131 !reg_is_gtt(gvt, offset + bytes - 1)))
0132 goto err;
0133
0134 ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
0135 p_data, bytes);
0136 if (ret)
0137 goto err;
0138 goto out;
0139 }
0140
0141 if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
0142 ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
0143 goto out;
0144 }
0145
0146 if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1)))
0147 goto err;
0148
0149 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
0150 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes)))
0151 goto err;
0152 }
0153
0154 ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
0155 if (ret < 0)
0156 goto err;
0157
0158 intel_gvt_mmio_set_accessed(gvt, offset);
0159 ret = 0;
0160 goto out;
0161
0162 err:
0163 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
0164 offset, bytes);
0165 out:
0166 mutex_unlock(&vgpu->vgpu_lock);
0167 return ret;
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
0181 void *p_data, unsigned int bytes)
0182 {
0183 struct intel_gvt *gvt = vgpu->gvt;
0184 struct drm_i915_private *i915 = gvt->gt->i915;
0185 unsigned int offset = 0;
0186 int ret = -EINVAL;
0187
0188 if (vgpu->failsafe) {
0189 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
0190 return 0;
0191 }
0192
0193 mutex_lock(&vgpu->vgpu_lock);
0194
0195 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
0196
0197 if (drm_WARN_ON(&i915->drm, bytes > 8))
0198 goto err;
0199
0200 if (reg_is_gtt(gvt, offset)) {
0201 if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
0202 !IS_ALIGNED(offset, 8)))
0203 goto err;
0204 if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
0205 goto err;
0206 if (drm_WARN_ON(&i915->drm,
0207 !reg_is_gtt(gvt, offset + bytes - 1)))
0208 goto err;
0209
0210 ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
0211 p_data, bytes);
0212 if (ret)
0213 goto err;
0214 goto out;
0215 }
0216
0217 if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
0218 ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
0219 goto out;
0220 }
0221
0222 ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
0223 if (ret < 0)
0224 goto err;
0225
0226 intel_gvt_mmio_set_accessed(gvt, offset);
0227 ret = 0;
0228 goto out;
0229 err:
0230 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
0231 bytes);
0232 out:
0233 mutex_unlock(&vgpu->vgpu_lock);
0234 return ret;
0235 }
0236
0237
0238
0239
0240
0241
0242
0243 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
0244 {
0245 struct intel_gvt *gvt = vgpu->gvt;
0246 const struct intel_gvt_device_info *info = &gvt->device_info;
0247 void *mmio = gvt->firmware.mmio;
0248
0249 if (dmlr) {
0250 memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
0251
0252 vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
0253
0254
0255 vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
0256
0257
0258 vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
0259
0260 if (IS_BROXTON(vgpu->gvt->gt->i915)) {
0261 vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
0262 ~(BIT(0) | BIT(1));
0263 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
0264 ~PHY_POWER_GOOD;
0265 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
0266 ~PHY_POWER_GOOD;
0267 vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
0268 ~BIT(30);
0269 vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
0270 ~BIT(30);
0271 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
0272 ~BXT_PHY_LANE_ENABLED;
0273 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
0274 BXT_PHY_CMNLANE_POWERDOWN_ACK |
0275 BXT_PHY_LANE_POWERDOWN_ACK;
0276 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
0277 ~BXT_PHY_LANE_ENABLED;
0278 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
0279 BXT_PHY_CMNLANE_POWERDOWN_ACK |
0280 BXT_PHY_LANE_POWERDOWN_ACK;
0281 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
0282 ~BXT_PHY_LANE_ENABLED;
0283 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
0284 BXT_PHY_CMNLANE_POWERDOWN_ACK |
0285 BXT_PHY_LANE_POWERDOWN_ACK;
0286 vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
0287 SKL_FUSE_DOWNLOAD_STATUS |
0288 SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
0289 SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
0290 SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
0291 }
0292 } else {
0293 #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
0294
0295
0296
0297
0298 memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
0299 }
0300
0301 }
0302
0303
0304
0305
0306
0307
0308
0309
0310 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
0311 {
0312 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
0313
0314 vgpu->mmio.vreg = vzalloc(info->mmio_size);
0315 if (!vgpu->mmio.vreg)
0316 return -ENOMEM;
0317
0318 intel_vgpu_reset_mmio(vgpu, true);
0319
0320 return 0;
0321 }
0322
0323
0324
0325
0326
0327
0328 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
0329 {
0330 vfree(vgpu->mmio.vreg);
0331 vgpu->mmio.vreg = NULL;
0332 }