0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/module.h>
0025
0026 #ifdef CONFIG_X86
0027 #include <asm/hypervisor.h>
0028 #endif
0029
0030 #include <drm/drm_drv.h>
0031 #include <xen/xen.h>
0032
0033 #include "amdgpu.h"
0034 #include "amdgpu_ras.h"
0035 #include "vi.h"
0036 #include "soc15.h"
0037 #include "nv.h"
0038
0039 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
0040 do { \
0041 vf2pf_info->ucode_info[ucode].id = ucode; \
0042 vf2pf_info->ucode_info[ucode].version = ver; \
0043 } while (0)
0044
0045 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
0046 {
0047
0048
0049
0050 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
0051 }
0052
0053 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
0054 {
0055 struct drm_device *ddev = adev_to_drm(adev);
0056
0057
0058 if (adev->asic_type != CHIP_ALDEBARAN &&
0059 adev->asic_type != CHIP_ARCTURUS) {
0060 if (adev->mode_info.num_crtc == 0)
0061 adev->mode_info.num_crtc = 1;
0062 adev->enable_virtual_display = true;
0063 }
0064 ddev->driver_features &= ~DRIVER_ATOMIC;
0065 adev->cg_flags = 0;
0066 adev->pg_flags = 0;
0067 }
0068
0069 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
0070 uint32_t reg0, uint32_t reg1,
0071 uint32_t ref, uint32_t mask)
0072 {
0073 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
0074 struct amdgpu_ring *ring = &kiq->ring;
0075 signed long r, cnt = 0;
0076 unsigned long flags;
0077 uint32_t seq;
0078
0079 if (adev->mes.ring.sched.ready) {
0080 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
0081 ref, mask);
0082 return;
0083 }
0084
0085 spin_lock_irqsave(&kiq->ring_lock, flags);
0086 amdgpu_ring_alloc(ring, 32);
0087 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
0088 ref, mask);
0089 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
0090 if (r)
0091 goto failed_undo;
0092
0093 amdgpu_ring_commit(ring);
0094 spin_unlock_irqrestore(&kiq->ring_lock, flags);
0095
0096 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
0097
0098
0099 if (r < 1 && in_interrupt())
0100 goto failed_kiq;
0101
0102 might_sleep();
0103 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
0104
0105 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
0106 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
0107 }
0108
0109 if (cnt > MAX_KIQ_REG_TRY)
0110 goto failed_kiq;
0111
0112 return;
0113
0114 failed_undo:
0115 amdgpu_ring_undo(ring);
0116 spin_unlock_irqrestore(&kiq->ring_lock, flags);
0117 failed_kiq:
0118 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
0119 }
0120
0121
0122
0123
0124
0125
0126
0127
0128 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
0129 {
0130 struct amdgpu_virt *virt = &adev->virt;
0131 int r;
0132
0133 if (virt->ops && virt->ops->req_full_gpu) {
0134 r = virt->ops->req_full_gpu(adev, init);
0135 if (r)
0136 return r;
0137
0138 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
0139 }
0140
0141 return 0;
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
0152 {
0153 struct amdgpu_virt *virt = &adev->virt;
0154 int r;
0155
0156 if (virt->ops && virt->ops->rel_full_gpu) {
0157 r = virt->ops->rel_full_gpu(adev, init);
0158 if (r)
0159 return r;
0160
0161 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
0162 }
0163 return 0;
0164 }
0165
0166
0167
0168
0169
0170
0171
0172 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
0173 {
0174 struct amdgpu_virt *virt = &adev->virt;
0175 int r;
0176
0177 if (virt->ops && virt->ops->reset_gpu) {
0178 r = virt->ops->reset_gpu(adev);
0179 if (r)
0180 return r;
0181
0182 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
0183 }
0184
0185 return 0;
0186 }
0187
0188 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
0189 {
0190 struct amdgpu_virt *virt = &adev->virt;
0191
0192 if (virt->ops && virt->ops->req_init_data)
0193 virt->ops->req_init_data(adev);
0194
0195 if (adev->virt.req_init_data_ver > 0)
0196 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
0197 else
0198 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
0199 }
0200
0201
0202
0203
0204
0205
0206
0207 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
0208 {
0209 struct amdgpu_virt *virt = &adev->virt;
0210
0211 if (!virt->ops || !virt->ops->wait_reset)
0212 return -EINVAL;
0213
0214 return virt->ops->wait_reset(adev);
0215 }
0216
0217
0218
0219
0220
0221
0222
0223 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
0224 {
0225 int r;
0226
0227 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
0228 return 0;
0229
0230 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
0231 AMDGPU_GEM_DOMAIN_VRAM,
0232 &adev->virt.mm_table.bo,
0233 &adev->virt.mm_table.gpu_addr,
0234 (void *)&adev->virt.mm_table.cpu_addr);
0235 if (r) {
0236 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
0237 return r;
0238 }
0239
0240 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
0241 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
0242 adev->virt.mm_table.gpu_addr,
0243 adev->virt.mm_table.cpu_addr);
0244 return 0;
0245 }
0246
0247
0248
0249
0250
0251
0252 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
0253 {
0254 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
0255 return;
0256
0257 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
0258 &adev->virt.mm_table.gpu_addr,
0259 (void *)&adev->virt.mm_table.cpu_addr);
0260 adev->virt.mm_table.gpu_addr = 0;
0261 }
0262
0263
0264 unsigned int amd_sriov_msg_checksum(void *obj,
0265 unsigned long obj_size,
0266 unsigned int key,
0267 unsigned int checksum)
0268 {
0269 unsigned int ret = key;
0270 unsigned long i = 0;
0271 unsigned char *pos;
0272
0273 pos = (char *)obj;
0274
0275 for (i = 0; i < obj_size; ++i)
0276 ret += *(pos + i);
0277
0278 pos = (char *)&checksum;
0279 for (i = 0; i < sizeof(checksum); ++i)
0280 ret -= *(pos + i);
0281 return ret;
0282 }
0283
0284 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
0285 {
0286 struct amdgpu_virt *virt = &adev->virt;
0287 struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
0288
0289
0290
0291 unsigned int align_space = 512;
0292 void *bps = NULL;
0293 struct amdgpu_bo **bps_bo = NULL;
0294
0295 *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
0296 if (!*data)
0297 goto data_failure;
0298
0299 bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
0300 if (!bps)
0301 goto bps_failure;
0302
0303 bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
0304 if (!bps_bo)
0305 goto bps_bo_failure;
0306
0307 (*data)->bps = bps;
0308 (*data)->bps_bo = bps_bo;
0309 (*data)->count = 0;
0310 (*data)->last_reserved = 0;
0311
0312 virt->ras_init_done = true;
0313
0314 return 0;
0315
0316 bps_bo_failure:
0317 kfree(bps);
0318 bps_failure:
0319 kfree(*data);
0320 data_failure:
0321 return -ENOMEM;
0322 }
0323
0324 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
0325 {
0326 struct amdgpu_virt *virt = &adev->virt;
0327 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
0328 struct amdgpu_bo *bo;
0329 int i;
0330
0331 if (!data)
0332 return;
0333
0334 for (i = data->last_reserved - 1; i >= 0; i--) {
0335 bo = data->bps_bo[i];
0336 amdgpu_bo_free_kernel(&bo, NULL, NULL);
0337 data->bps_bo[i] = bo;
0338 data->last_reserved = i;
0339 }
0340 }
0341
0342 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
0343 {
0344 struct amdgpu_virt *virt = &adev->virt;
0345 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
0346
0347 virt->ras_init_done = false;
0348
0349 if (!data)
0350 return;
0351
0352 amdgpu_virt_ras_release_bp(adev);
0353
0354 kfree(data->bps);
0355 kfree(data->bps_bo);
0356 kfree(data);
0357 virt->virt_eh_data = NULL;
0358 }
0359
0360 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
0361 struct eeprom_table_record *bps, int pages)
0362 {
0363 struct amdgpu_virt *virt = &adev->virt;
0364 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
0365
0366 if (!data)
0367 return;
0368
0369 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
0370 data->count += pages;
0371 }
0372
0373 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
0374 {
0375 struct amdgpu_virt *virt = &adev->virt;
0376 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
0377 struct amdgpu_bo *bo = NULL;
0378 uint64_t bp;
0379 int i;
0380
0381 if (!data)
0382 return;
0383
0384 for (i = data->last_reserved; i < data->count; i++) {
0385 bp = data->bps[i].retired_page;
0386
0387
0388
0389
0390
0391
0392 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
0393 AMDGPU_GPU_PAGE_SIZE,
0394 AMDGPU_GEM_DOMAIN_VRAM,
0395 &bo, NULL))
0396 DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
0397
0398 data->bps_bo[i] = bo;
0399 data->last_reserved = i + 1;
0400 bo = NULL;
0401 }
0402 }
0403
0404 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
0405 uint64_t retired_page)
0406 {
0407 struct amdgpu_virt *virt = &adev->virt;
0408 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
0409 int i;
0410
0411 if (!data)
0412 return true;
0413
0414 for (i = 0; i < data->count; i++)
0415 if (retired_page == data->bps[i].retired_page)
0416 return true;
0417
0418 return false;
0419 }
0420
0421 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
0422 uint64_t bp_block_offset, uint32_t bp_block_size)
0423 {
0424 struct eeprom_table_record bp;
0425 uint64_t retired_page;
0426 uint32_t bp_idx, bp_cnt;
0427
0428 if (bp_block_size) {
0429 bp_cnt = bp_block_size / sizeof(uint64_t);
0430 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
0431 retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
0432 bp_block_offset + bp_idx * sizeof(uint64_t));
0433 bp.retired_page = retired_page;
0434
0435 if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
0436 continue;
0437
0438 amdgpu_virt_ras_add_bps(adev, &bp, 1);
0439
0440 amdgpu_virt_ras_reserve_bps(adev);
0441 }
0442 }
0443 }
0444
0445 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
0446 {
0447 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
0448 uint32_t checksum;
0449 uint32_t checkval;
0450
0451 uint32_t i;
0452 uint32_t tmp;
0453
0454 if (adev->virt.fw_reserve.p_pf2vf == NULL)
0455 return -EINVAL;
0456
0457 if (pf2vf_info->size > 1024) {
0458 DRM_ERROR("invalid pf2vf message size\n");
0459 return -EINVAL;
0460 }
0461
0462 switch (pf2vf_info->version) {
0463 case 1:
0464 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
0465 checkval = amd_sriov_msg_checksum(
0466 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
0467 adev->virt.fw_reserve.checksum_key, checksum);
0468 if (checksum != checkval) {
0469 DRM_ERROR("invalid pf2vf message\n");
0470 return -EINVAL;
0471 }
0472
0473 adev->virt.gim_feature =
0474 ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
0475 break;
0476 case 2:
0477
0478 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
0479 checkval = amd_sriov_msg_checksum(
0480 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
0481 0, checksum);
0482 if (checksum != checkval) {
0483 DRM_ERROR("invalid pf2vf message\n");
0484 return -EINVAL;
0485 }
0486
0487 adev->virt.vf2pf_update_interval_ms =
0488 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
0489 adev->virt.gim_feature =
0490 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
0491 adev->virt.reg_access =
0492 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
0493
0494 adev->virt.decode_max_dimension_pixels = 0;
0495 adev->virt.decode_max_frame_pixels = 0;
0496 adev->virt.encode_max_dimension_pixels = 0;
0497 adev->virt.encode_max_frame_pixels = 0;
0498 adev->virt.is_mm_bw_enabled = false;
0499 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
0500 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
0501 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
0502
0503 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
0504 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
0505
0506 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
0507 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
0508
0509 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
0510 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
0511 }
0512 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
0513 adev->virt.is_mm_bw_enabled = true;
0514
0515 adev->unique_id =
0516 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
0517 break;
0518 default:
0519 DRM_ERROR("invalid pf2vf version\n");
0520 return -EINVAL;
0521 }
0522
0523
0524 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
0525 adev->virt.vf2pf_update_interval_ms = 2000;
0526
0527 return 0;
0528 }
0529
0530 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
0531 {
0532 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
0533 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
0534
0535 if (adev->virt.fw_reserve.p_vf2pf == NULL)
0536 return;
0537
0538 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
0539 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
0540 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
0541 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
0542 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
0543 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
0544 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
0545 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
0546 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
0547 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
0548 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
0549 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
0550 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
0551 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
0552 adev->psp.asd_context.bin_desc.fw_version);
0553 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
0554 adev->psp.ras_context.context.bin_desc.fw_version);
0555 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
0556 adev->psp.xgmi_context.context.bin_desc.fw_version);
0557 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
0558 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
0559 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
0560 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
0561 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
0562 }
0563
0564 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
0565 {
0566 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
0567
0568 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
0569
0570 if (adev->virt.fw_reserve.p_vf2pf == NULL)
0571 return -EINVAL;
0572
0573 memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
0574
0575 vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
0576 vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
0577
0578 #ifdef MODULE
0579 if (THIS_MODULE->version != NULL)
0580 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
0581 else
0582 #endif
0583 strcpy(vf2pf_info->driver_version, "N/A");
0584
0585 vf2pf_info->pf2vf_version_required = 0;
0586 vf2pf_info->driver_cert = 0;
0587 vf2pf_info->os_info.all = 0;
0588
0589 vf2pf_info->fb_usage =
0590 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
0591 vf2pf_info->fb_vis_usage =
0592 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
0593 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
0594 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
0595
0596 amdgpu_virt_populate_vf2pf_ucode_info(adev);
0597
0598
0599 vf2pf_info->gfx_usage = 0;
0600 vf2pf_info->compute_usage = 0;
0601 vf2pf_info->encode_usage = 0;
0602 vf2pf_info->decode_usage = 0;
0603
0604 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
0605 vf2pf_info->checksum =
0606 amd_sriov_msg_checksum(
0607 vf2pf_info, vf2pf_info->header.size, 0, 0);
0608
0609 return 0;
0610 }
0611
0612 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
0613 {
0614 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
0615 int ret;
0616
0617 ret = amdgpu_virt_read_pf2vf_data(adev);
0618 if (ret)
0619 goto out;
0620 amdgpu_virt_write_vf2pf_data(adev);
0621
0622 out:
0623 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
0624 }
0625
0626 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
0627 {
0628 if (adev->virt.vf2pf_update_interval_ms != 0) {
0629 DRM_INFO("clean up the vf2pf work item\n");
0630 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
0631 adev->virt.vf2pf_update_interval_ms = 0;
0632 }
0633 }
0634
0635 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
0636 {
0637 adev->virt.fw_reserve.p_pf2vf = NULL;
0638 adev->virt.fw_reserve.p_vf2pf = NULL;
0639 adev->virt.vf2pf_update_interval_ms = 0;
0640
0641 if (adev->mman.fw_vram_usage_va != NULL) {
0642
0643 amdgpu_virt_exchange_data(adev);
0644
0645 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
0646 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
0647 } else if (adev->bios != NULL) {
0648
0649 adev->virt.fw_reserve.p_pf2vf =
0650 (struct amd_sriov_msg_pf2vf_info_header *)
0651 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
0652
0653 amdgpu_virt_read_pf2vf_data(adev);
0654 }
0655 }
0656
0657
0658 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
0659 {
0660 uint64_t bp_block_offset = 0;
0661 uint32_t bp_block_size = 0;
0662 struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
0663
0664 if (adev->mman.fw_vram_usage_va != NULL) {
0665
0666 adev->virt.fw_reserve.p_pf2vf =
0667 (struct amd_sriov_msg_pf2vf_info_header *)
0668 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
0669 adev->virt.fw_reserve.p_vf2pf =
0670 (struct amd_sriov_msg_vf2pf_info_header *)
0671 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
0672
0673 amdgpu_virt_read_pf2vf_data(adev);
0674 amdgpu_virt_write_vf2pf_data(adev);
0675
0676
0677 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
0678 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
0679
0680 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
0681 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
0682 bp_block_size = pf2vf_v2->bp_block_size;
0683
0684 if (bp_block_size && !adev->virt.ras_init_done)
0685 amdgpu_virt_init_ras_err_handler_data(adev);
0686
0687 if (adev->virt.ras_init_done)
0688 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
0689 }
0690 }
0691 }
0692
0693
0694 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
0695 {
0696 uint32_t reg;
0697
0698 switch (adev->asic_type) {
0699 case CHIP_TONGA:
0700 case CHIP_FIJI:
0701 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
0702 break;
0703 case CHIP_VEGA10:
0704 case CHIP_VEGA20:
0705 case CHIP_NAVI10:
0706 case CHIP_NAVI12:
0707 case CHIP_SIENNA_CICHLID:
0708 case CHIP_ARCTURUS:
0709 case CHIP_ALDEBARAN:
0710 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
0711 break;
0712 default:
0713 reg = 0;
0714 break;
0715 }
0716
0717 if (reg & 1)
0718 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
0719
0720 if (reg & 0x80000000)
0721 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
0722
0723 if (!reg) {
0724
0725 if (is_virtual_machine() && !xen_initial_domain())
0726 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
0727 }
0728
0729
0730 if (amdgpu_sriov_vf(adev)) {
0731 switch (adev->asic_type) {
0732 case CHIP_TONGA:
0733 case CHIP_FIJI:
0734 vi_set_virt_ops(adev);
0735 break;
0736 case CHIP_VEGA10:
0737 soc15_set_virt_ops(adev);
0738 #ifdef CONFIG_X86
0739
0740 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
0741 #endif
0742
0743 amdgpu_virt_request_init_data(adev);
0744 break;
0745 case CHIP_VEGA20:
0746 case CHIP_ARCTURUS:
0747 case CHIP_ALDEBARAN:
0748 soc15_set_virt_ops(adev);
0749 break;
0750 case CHIP_NAVI10:
0751 case CHIP_NAVI12:
0752 case CHIP_SIENNA_CICHLID:
0753 nv_set_virt_ops(adev);
0754
0755 amdgpu_virt_request_init_data(adev);
0756 break;
0757 default:
0758 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
0759 break;
0760 }
0761 }
0762 }
0763
0764 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
0765 {
0766 return amdgpu_sriov_is_debug(adev) ? true : false;
0767 }
0768
0769 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
0770 {
0771 return amdgpu_sriov_is_normal(adev) ? true : false;
0772 }
0773
0774 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
0775 {
0776 if (!amdgpu_sriov_vf(adev) ||
0777 amdgpu_virt_access_debugfs_is_kiq(adev))
0778 return 0;
0779
0780 if (amdgpu_virt_access_debugfs_is_mmio(adev))
0781 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
0782 else
0783 return -EPERM;
0784
0785 return 0;
0786 }
0787
0788 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
0789 {
0790 if (amdgpu_sriov_vf(adev))
0791 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
0792 }
0793
0794 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
0795 {
0796 enum amdgpu_sriov_vf_mode mode;
0797
0798 if (amdgpu_sriov_vf(adev)) {
0799 if (amdgpu_sriov_is_pp_one_vf(adev))
0800 mode = SRIOV_VF_MODE_ONE_VF;
0801 else
0802 mode = SRIOV_VF_MODE_MULTI_VF;
0803 } else {
0804 mode = SRIOV_VF_MODE_BARE_METAL;
0805 }
0806
0807 return mode;
0808 }
0809
0810 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
0811 struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
0812 struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
0813 {
0814 uint32_t i;
0815
0816 if (!adev->virt.is_mm_bw_enabled)
0817 return;
0818
0819 if (encode) {
0820 for (i = 0; i < encode_array_size; i++) {
0821 encode[i].max_width = adev->virt.encode_max_dimension_pixels;
0822 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
0823 if (encode[i].max_width > 0)
0824 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
0825 else
0826 encode[i].max_height = 0;
0827 }
0828 }
0829
0830 if (decode) {
0831 for (i = 0; i < decode_array_size; i++) {
0832 decode[i].max_width = adev->virt.decode_max_dimension_pixels;
0833 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
0834 if (decode[i].max_width > 0)
0835 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
0836 else
0837 decode[i].max_height = 0;
0838 }
0839 }
0840 }
0841
0842 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
0843 u32 acc_flags, u32 hwip,
0844 bool write, u32 *rlcg_flag)
0845 {
0846 bool ret = false;
0847
0848 switch (hwip) {
0849 case GC_HWIP:
0850 if (amdgpu_sriov_reg_indirect_gc(adev)) {
0851 *rlcg_flag =
0852 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
0853 ret = true;
0854
0855
0856 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
0857 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
0858 *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
0859 ret = true;
0860 }
0861 break;
0862 case MMHUB_HWIP:
0863 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
0864 (acc_flags & AMDGPU_REGS_RLC) && write) {
0865 *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
0866 ret = true;
0867 }
0868 break;
0869 default:
0870 break;
0871 }
0872 return ret;
0873 }
0874
0875 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
0876 {
0877 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
0878 uint32_t timeout = 50000;
0879 uint32_t i, tmp;
0880 uint32_t ret = 0;
0881 void *scratch_reg0;
0882 void *scratch_reg1;
0883 void *scratch_reg2;
0884 void *scratch_reg3;
0885 void *spare_int;
0886
0887 if (!adev->gfx.rlc.rlcg_reg_access_supported) {
0888 dev_err(adev->dev,
0889 "indirect registers access through rlcg is not available\n");
0890 return 0;
0891 }
0892
0893 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
0894 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
0895 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
0896 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
0897 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
0898 if (reg_access_ctrl->spare_int)
0899 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
0900
0901 if (offset == reg_access_ctrl->grbm_cntl) {
0902
0903 writel(v, scratch_reg2);
0904 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
0905 } else if (offset == reg_access_ctrl->grbm_idx) {
0906
0907 writel(v, scratch_reg3);
0908 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
0909 } else {
0910
0911
0912
0913
0914
0915
0916 writel(v, scratch_reg0);
0917 writel((offset | flag), scratch_reg1);
0918 if (reg_access_ctrl->spare_int)
0919 writel(1, spare_int);
0920
0921 for (i = 0; i < timeout; i++) {
0922 tmp = readl(scratch_reg1);
0923 if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
0924 break;
0925 udelay(10);
0926 }
0927
0928 if (i >= timeout) {
0929 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
0930 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
0931 dev_err(adev->dev,
0932 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
0933 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
0934 dev_err(adev->dev,
0935 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
0936 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
0937 dev_err(adev->dev,
0938 "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
0939 } else {
0940 dev_err(adev->dev,
0941 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
0942 }
0943 } else {
0944 dev_err(adev->dev,
0945 "timeout: rlcg faled to program reg: 0x%05x\n", offset);
0946 }
0947 }
0948 }
0949
0950 ret = readl(scratch_reg0);
0951 return ret;
0952 }
0953
0954 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
0955 u32 offset, u32 value,
0956 u32 acc_flags, u32 hwip)
0957 {
0958 u32 rlcg_flag;
0959
0960 if (!amdgpu_sriov_runtime(adev) &&
0961 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
0962 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
0963 return;
0964 }
0965
0966 if (acc_flags & AMDGPU_REGS_NO_KIQ)
0967 WREG32_NO_KIQ(offset, value);
0968 else
0969 WREG32(offset, value);
0970 }
0971
0972 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
0973 u32 offset, u32 acc_flags, u32 hwip)
0974 {
0975 u32 rlcg_flag;
0976
0977 if (!amdgpu_sriov_runtime(adev) &&
0978 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
0979 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
0980
0981 if (acc_flags & AMDGPU_REGS_NO_KIQ)
0982 return RREG32_NO_KIQ(offset);
0983 else
0984 return RREG32(offset);
0985 }