0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include "amdgpu_amdkfd.h"
0024 #include "amd_pcie.h"
0025 #include "amd_shared.h"
0026
0027 #include "amdgpu.h"
0028 #include "amdgpu_gfx.h"
0029 #include "amdgpu_dma_buf.h"
0030 #include <linux/module.h>
0031 #include <linux/dma-buf.h>
0032 #include "amdgpu_xgmi.h"
0033 #include <uapi/linux/kfd_ioctl.h>
0034 #include "amdgpu_ras.h"
0035 #include "amdgpu_umc.h"
0036 #include "amdgpu_reset.h"
0037
0038
0039
0040
0041 uint64_t amdgpu_amdkfd_total_mem_size;
0042
0043 static bool kfd_initialized;
0044
0045 int amdgpu_amdkfd_init(void)
0046 {
0047 struct sysinfo si;
0048 int ret;
0049
0050 si_meminfo(&si);
0051 amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
0052 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
0053
0054 ret = kgd2kfd_init();
0055 amdgpu_amdkfd_gpuvm_init_mem_limits();
0056 kfd_initialized = !ret;
0057
0058 return ret;
0059 }
0060
0061 void amdgpu_amdkfd_fini(void)
0062 {
0063 if (kfd_initialized) {
0064 kgd2kfd_exit();
0065 kfd_initialized = false;
0066 }
0067 }
0068
0069 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
0070 {
0071 bool vf = amdgpu_sriov_vf(adev);
0072
0073 if (!kfd_initialized)
0074 return;
0075
0076 adev->kfd.dev = kgd2kfd_probe(adev, vf);
0077
0078 if (adev->kfd.dev)
0079 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
0080 }
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
0096 phys_addr_t *aperture_base,
0097 size_t *aperture_size,
0098 size_t *start_offset)
0099 {
0100
0101
0102
0103
0104 if (adev->enable_mes) {
0105
0106
0107
0108
0109
0110
0111 *aperture_base = adev->doorbell.base;
0112 *aperture_size = 0;
0113 *start_offset = 0;
0114 } else if (adev->doorbell.size > adev->doorbell.num_doorbells *
0115 sizeof(u32)) {
0116 *aperture_base = adev->doorbell.base;
0117 *aperture_size = adev->doorbell.size;
0118 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
0119 } else {
0120 *aperture_base = 0;
0121 *aperture_size = 0;
0122 *start_offset = 0;
0123 }
0124 }
0125
0126
0127 static void amdgpu_amdkfd_reset_work(struct work_struct *work)
0128 {
0129 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0130 kfd.reset_work);
0131
0132 struct amdgpu_reset_context reset_context;
0133 memset(&reset_context, 0, sizeof(reset_context));
0134
0135 reset_context.method = AMD_RESET_METHOD_NONE;
0136 reset_context.reset_req_dev = adev;
0137 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
0138
0139 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
0140 }
0141
0142 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
0143 {
0144 int i;
0145 int last_valid_bit;
0146
0147 if (adev->kfd.dev) {
0148 struct kgd2kfd_shared_resources gpu_resources = {
0149 .compute_vmid_bitmap =
0150 ((1 << AMDGPU_NUM_VMID) - 1) -
0151 ((1 << adev->vm_manager.first_kfd_vmid) - 1),
0152 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
0153 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
0154 .gpuvm_size = min(adev->vm_manager.max_pfn
0155 << AMDGPU_GPU_PAGE_SHIFT,
0156 AMDGPU_GMC_HOLE_START),
0157 .drm_render_minor = adev_to_drm(adev)->render->index,
0158 .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
0159 .enable_mes = adev->enable_mes,
0160 };
0161
0162
0163
0164
0165 bitmap_complement(gpu_resources.cp_queue_bitmap,
0166 adev->gfx.mec.queue_bitmap,
0167 KGD_MAX_QUEUES);
0168
0169
0170
0171
0172 last_valid_bit = 1
0173 * adev->gfx.mec.num_pipe_per_mec
0174 * adev->gfx.mec.num_queue_per_pipe;
0175 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
0176 clear_bit(i, gpu_resources.cp_queue_bitmap);
0177
0178 amdgpu_doorbell_get_kfd_info(adev,
0179 &gpu_resources.doorbell_physical_address,
0180 &gpu_resources.doorbell_aperture_size,
0181 &gpu_resources.doorbell_start_offset);
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191 if (adev->asic_type >= CHIP_VEGA10) {
0192 gpu_resources.non_cp_doorbells_start =
0193 adev->doorbell_index.first_non_cp;
0194 gpu_resources.non_cp_doorbells_end =
0195 adev->doorbell_index.last_non_cp;
0196 }
0197
0198 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
0199 adev_to_drm(adev), &gpu_resources);
0200
0201 INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
0202 }
0203 }
0204
0205 void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
0206 {
0207 if (adev->kfd.dev) {
0208 kgd2kfd_device_exit(adev->kfd.dev);
0209 adev->kfd.dev = NULL;
0210 }
0211 }
0212
0213 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
0214 const void *ih_ring_entry)
0215 {
0216 if (adev->kfd.dev)
0217 kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
0218 }
0219
0220 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
0221 {
0222 if (adev->kfd.dev)
0223 kgd2kfd_suspend(adev->kfd.dev, run_pm);
0224 }
0225
0226 int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
0227 {
0228 int r = 0;
0229
0230 if (adev->kfd.dev)
0231 r = kgd2kfd_resume_iommu(adev->kfd.dev);
0232
0233 return r;
0234 }
0235
0236 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
0237 {
0238 int r = 0;
0239
0240 if (adev->kfd.dev)
0241 r = kgd2kfd_resume(adev->kfd.dev, run_pm);
0242
0243 return r;
0244 }
0245
0246 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
0247 {
0248 int r = 0;
0249
0250 if (adev->kfd.dev)
0251 r = kgd2kfd_pre_reset(adev->kfd.dev);
0252
0253 return r;
0254 }
0255
0256 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
0257 {
0258 int r = 0;
0259
0260 if (adev->kfd.dev)
0261 r = kgd2kfd_post_reset(adev->kfd.dev);
0262
0263 return r;
0264 }
0265
0266 void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
0267 {
0268 if (amdgpu_device_should_recover_gpu(adev))
0269 amdgpu_reset_domain_schedule(adev->reset_domain,
0270 &adev->kfd.reset_work);
0271 }
0272
0273 int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
0274 void **mem_obj, uint64_t *gpu_addr,
0275 void **cpu_ptr, bool cp_mqd_gfx9)
0276 {
0277 struct amdgpu_bo *bo = NULL;
0278 struct amdgpu_bo_param bp;
0279 int r;
0280 void *cpu_ptr_tmp = NULL;
0281
0282 memset(&bp, 0, sizeof(bp));
0283 bp.size = size;
0284 bp.byte_align = PAGE_SIZE;
0285 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
0286 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
0287 bp.type = ttm_bo_type_kernel;
0288 bp.resv = NULL;
0289 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
0290
0291 if (cp_mqd_gfx9)
0292 bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
0293
0294 r = amdgpu_bo_create(adev, &bp, &bo);
0295 if (r) {
0296 dev_err(adev->dev,
0297 "failed to allocate BO for amdkfd (%d)\n", r);
0298 return r;
0299 }
0300
0301
0302 r = amdgpu_bo_reserve(bo, true);
0303 if (r) {
0304 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
0305 goto allocate_mem_reserve_bo_failed;
0306 }
0307
0308 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
0309 if (r) {
0310 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
0311 goto allocate_mem_pin_bo_failed;
0312 }
0313
0314 r = amdgpu_ttm_alloc_gart(&bo->tbo);
0315 if (r) {
0316 dev_err(adev->dev, "%p bind failed\n", bo);
0317 goto allocate_mem_kmap_bo_failed;
0318 }
0319
0320 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
0321 if (r) {
0322 dev_err(adev->dev,
0323 "(%d) failed to map bo to kernel for amdkfd\n", r);
0324 goto allocate_mem_kmap_bo_failed;
0325 }
0326
0327 *mem_obj = bo;
0328 *gpu_addr = amdgpu_bo_gpu_offset(bo);
0329 *cpu_ptr = cpu_ptr_tmp;
0330
0331 amdgpu_bo_unreserve(bo);
0332
0333 return 0;
0334
0335 allocate_mem_kmap_bo_failed:
0336 amdgpu_bo_unpin(bo);
0337 allocate_mem_pin_bo_failed:
0338 amdgpu_bo_unreserve(bo);
0339 allocate_mem_reserve_bo_failed:
0340 amdgpu_bo_unref(&bo);
0341
0342 return r;
0343 }
0344
0345 void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj)
0346 {
0347 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
0348
0349 amdgpu_bo_reserve(bo, true);
0350 amdgpu_bo_kunmap(bo);
0351 amdgpu_bo_unpin(bo);
0352 amdgpu_bo_unreserve(bo);
0353 amdgpu_bo_unref(&(bo));
0354 }
0355
0356 int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
0357 void **mem_obj)
0358 {
0359 struct amdgpu_bo *bo = NULL;
0360 struct amdgpu_bo_user *ubo;
0361 struct amdgpu_bo_param bp;
0362 int r;
0363
0364 memset(&bp, 0, sizeof(bp));
0365 bp.size = size;
0366 bp.byte_align = 1;
0367 bp.domain = AMDGPU_GEM_DOMAIN_GWS;
0368 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
0369 bp.type = ttm_bo_type_device;
0370 bp.resv = NULL;
0371 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
0372
0373 r = amdgpu_bo_create_user(adev, &bp, &ubo);
0374 if (r) {
0375 dev_err(adev->dev,
0376 "failed to allocate gws BO for amdkfd (%d)\n", r);
0377 return r;
0378 }
0379
0380 bo = &ubo->bo;
0381 *mem_obj = bo;
0382 return 0;
0383 }
0384
0385 void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
0386 {
0387 struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
0388
0389 amdgpu_bo_unref(&bo);
0390 }
0391
0392 uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
0393 enum kgd_engine_type type)
0394 {
0395 switch (type) {
0396 case KGD_ENGINE_PFP:
0397 return adev->gfx.pfp_fw_version;
0398
0399 case KGD_ENGINE_ME:
0400 return adev->gfx.me_fw_version;
0401
0402 case KGD_ENGINE_CE:
0403 return adev->gfx.ce_fw_version;
0404
0405 case KGD_ENGINE_MEC1:
0406 return adev->gfx.mec_fw_version;
0407
0408 case KGD_ENGINE_MEC2:
0409 return adev->gfx.mec2_fw_version;
0410
0411 case KGD_ENGINE_RLC:
0412 return adev->gfx.rlc_fw_version;
0413
0414 case KGD_ENGINE_SDMA1:
0415 return adev->sdma.instance[0].fw_version;
0416
0417 case KGD_ENGINE_SDMA2:
0418 return adev->sdma.instance[1].fw_version;
0419
0420 default:
0421 return 0;
0422 }
0423
0424 return 0;
0425 }
0426
0427 void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
0428 struct kfd_local_mem_info *mem_info)
0429 {
0430 memset(mem_info, 0, sizeof(*mem_info));
0431
0432 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
0433 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
0434 adev->gmc.visible_vram_size;
0435
0436 mem_info->vram_width = adev->gmc.vram_width;
0437
0438 pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
0439 &adev->gmc.aper_base,
0440 mem_info->local_mem_size_public,
0441 mem_info->local_mem_size_private);
0442
0443 if (amdgpu_sriov_vf(adev))
0444 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
0445 else if (adev->pm.dpm_enabled) {
0446 if (amdgpu_emu_mode == 1)
0447 mem_info->mem_clk_max = 0;
0448 else
0449 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
0450 } else
0451 mem_info->mem_clk_max = 100;
0452 }
0453
0454 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
0455 {
0456 if (adev->gfx.funcs->get_gpu_clock_counter)
0457 return adev->gfx.funcs->get_gpu_clock_counter(adev);
0458 return 0;
0459 }
0460
0461 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
0462 {
0463
0464 if (amdgpu_sriov_vf(adev))
0465 return adev->clock.default_sclk / 100;
0466 else if (adev->pm.dpm_enabled)
0467 return amdgpu_dpm_get_sclk(adev, false) / 100;
0468 else
0469 return 100;
0470 }
0471
0472 void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info)
0473 {
0474 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
0475
0476 memset(cu_info, 0, sizeof(*cu_info));
0477 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
0478 return;
0479
0480 cu_info->cu_active_number = acu_info.number;
0481 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
0482 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
0483 sizeof(acu_info.bitmap));
0484 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
0485 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
0486 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
0487 cu_info->simd_per_cu = acu_info.simd_per_cu;
0488 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
0489 cu_info->wave_front_size = acu_info.wave_front_size;
0490 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
0491 cu_info->lds_size = acu_info.lds_size;
0492 }
0493
0494 int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
0495 struct amdgpu_device **dmabuf_adev,
0496 uint64_t *bo_size, void *metadata_buffer,
0497 size_t buffer_size, uint32_t *metadata_size,
0498 uint32_t *flags)
0499 {
0500 struct dma_buf *dma_buf;
0501 struct drm_gem_object *obj;
0502 struct amdgpu_bo *bo;
0503 uint64_t metadata_flags;
0504 int r = -EINVAL;
0505
0506 dma_buf = dma_buf_get(dma_buf_fd);
0507 if (IS_ERR(dma_buf))
0508 return PTR_ERR(dma_buf);
0509
0510 if (dma_buf->ops != &amdgpu_dmabuf_ops)
0511
0512 goto out_put;
0513
0514 obj = dma_buf->priv;
0515 if (obj->dev->driver != adev_to_drm(adev)->driver)
0516
0517 goto out_put;
0518
0519 adev = drm_to_adev(obj->dev);
0520 bo = gem_to_amdgpu_bo(obj);
0521 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
0522 AMDGPU_GEM_DOMAIN_GTT)))
0523
0524 goto out_put;
0525
0526 r = 0;
0527 if (dmabuf_adev)
0528 *dmabuf_adev = adev;
0529 if (bo_size)
0530 *bo_size = amdgpu_bo_size(bo);
0531 if (metadata_buffer)
0532 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
0533 metadata_size, &metadata_flags);
0534 if (flags) {
0535 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
0536 KFD_IOC_ALLOC_MEM_FLAGS_VRAM
0537 : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
0538
0539 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
0540 *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
0541 }
0542
0543 out_put:
0544 dma_buf_put(dma_buf);
0545 return r;
0546 }
0547
0548 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
0549 struct amdgpu_device *src)
0550 {
0551 struct amdgpu_device *peer_adev = src;
0552 struct amdgpu_device *adev = dst;
0553 int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
0554
0555 if (ret < 0) {
0556 DRM_ERROR("amdgpu: failed to get xgmi hops count between node %d and %d. ret = %d\n",
0557 adev->gmc.xgmi.physical_node_id,
0558 peer_adev->gmc.xgmi.physical_node_id, ret);
0559 ret = 0;
0560 }
0561 return (uint8_t)ret;
0562 }
0563
0564 int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
0565 struct amdgpu_device *src,
0566 bool is_min)
0567 {
0568 struct amdgpu_device *adev = dst, *peer_adev;
0569 int num_links;
0570
0571 if (adev->asic_type != CHIP_ALDEBARAN)
0572 return 0;
0573
0574 if (src)
0575 peer_adev = src;
0576
0577
0578 num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
0579 if (num_links < 0) {
0580 DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
0581 adev->gmc.xgmi.physical_node_id,
0582 peer_adev->gmc.xgmi.physical_node_id, num_links);
0583 num_links = 0;
0584 }
0585
0586
0587 return (num_links * 16 * 25000)/BITS_PER_BYTE;
0588 }
0589
0590 int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
0591 {
0592 int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
0593 fls(adev->pm.pcie_mlw_mask)) - 1;
0594 int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
0595 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
0596 fls(adev->pm.pcie_gen_mask &
0597 CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
0598 uint32_t num_lanes_mask = 1 << num_lanes_shift;
0599 uint32_t gen_speed_mask = 1 << gen_speed_shift;
0600 int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
0601
0602 switch (num_lanes_mask) {
0603 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
0604 num_lanes_factor = 1;
0605 break;
0606 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
0607 num_lanes_factor = 2;
0608 break;
0609 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
0610 num_lanes_factor = 4;
0611 break;
0612 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
0613 num_lanes_factor = 8;
0614 break;
0615 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
0616 num_lanes_factor = 12;
0617 break;
0618 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
0619 num_lanes_factor = 16;
0620 break;
0621 case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
0622 num_lanes_factor = 32;
0623 break;
0624 }
0625
0626 switch (gen_speed_mask) {
0627 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
0628 gen_speed_mbits_factor = 2500;
0629 break;
0630 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
0631 gen_speed_mbits_factor = 5000;
0632 break;
0633 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
0634 gen_speed_mbits_factor = 8000;
0635 break;
0636 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
0637 gen_speed_mbits_factor = 16000;
0638 break;
0639 case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
0640 gen_speed_mbits_factor = 32000;
0641 break;
0642 }
0643
0644 return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
0645 }
0646
0647 int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
0648 enum kgd_engine_type engine,
0649 uint32_t vmid, uint64_t gpu_addr,
0650 uint32_t *ib_cmd, uint32_t ib_len)
0651 {
0652 struct amdgpu_job *job;
0653 struct amdgpu_ib *ib;
0654 struct amdgpu_ring *ring;
0655 struct dma_fence *f = NULL;
0656 int ret;
0657
0658 switch (engine) {
0659 case KGD_ENGINE_MEC1:
0660 ring = &adev->gfx.compute_ring[0];
0661 break;
0662 case KGD_ENGINE_SDMA1:
0663 ring = &adev->sdma.instance[0].ring;
0664 break;
0665 case KGD_ENGINE_SDMA2:
0666 ring = &adev->sdma.instance[1].ring;
0667 break;
0668 default:
0669 pr_err("Invalid engine in IB submission: %d\n", engine);
0670 ret = -EINVAL;
0671 goto err;
0672 }
0673
0674 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
0675 if (ret)
0676 goto err;
0677
0678 ib = &job->ibs[0];
0679 memset(ib, 0, sizeof(struct amdgpu_ib));
0680
0681 ib->gpu_addr = gpu_addr;
0682 ib->ptr = ib_cmd;
0683 ib->length_dw = ib_len;
0684
0685 job->vmid = vmid;
0686
0687 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
0688
0689 if (ret) {
0690 DRM_ERROR("amdgpu: failed to schedule IB.\n");
0691 goto err_ib_sched;
0692 }
0693
0694
0695 dma_fence_put(f);
0696 ret = dma_fence_wait(f, false);
0697
0698 err_ib_sched:
0699 amdgpu_job_free(job);
0700 err:
0701 return ret;
0702 }
0703
0704 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
0705 {
0706 amdgpu_dpm_switch_power_profile(adev,
0707 PP_SMC_POWER_PROFILE_COMPUTE,
0708 !idle);
0709 }
0710
0711 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
0712 {
0713 if (adev->kfd.dev)
0714 return vmid >= adev->vm_manager.first_kfd_vmid;
0715
0716 return false;
0717 }
0718
0719 int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
0720 uint16_t vmid)
0721 {
0722 if (adev->family == AMDGPU_FAMILY_AI) {
0723 int i;
0724
0725 for (i = 0; i < adev->num_vmhubs; i++)
0726 amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
0727 } else {
0728 amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
0729 }
0730
0731 return 0;
0732 }
0733
0734 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
0735 uint16_t pasid, enum TLB_FLUSH_TYPE flush_type)
0736 {
0737 bool all_hub = false;
0738
0739 if (adev->family == AMDGPU_FAMILY_AI ||
0740 adev->family == AMDGPU_FAMILY_RV)
0741 all_hub = true;
0742
0743 return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
0744 }
0745
0746 bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
0747 {
0748 return adev->have_atomics_support;
0749 }
0750
0751 void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
0752 {
0753 struct ras_err_data err_data = {0, 0, 0, NULL};
0754
0755
0756 if (!adev->gmc.xgmi.connected_to_cpu)
0757 amdgpu_umc_poison_handler(adev, &err_data, reset);
0758 else if (reset)
0759 amdgpu_amdkfd_gpu_reset(adev);
0760 }
0761
0762 bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
0763 {
0764 if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
0765 return adev->gfx.ras->query_utcl2_poison_status(adev);
0766 else
0767 return false;
0768 }