0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/delay.h>
0025 #include <linux/firmware.h>
0026 #include <linux/module.h>
0027 #include <linux/pci.h>
0028
0029 #include "amdgpu.h"
0030 #include "amdgpu_ucode.h"
0031 #include "amdgpu_trace.h"
0032
0033 #include "gc/gc_11_0_0_offset.h"
0034 #include "gc/gc_11_0_0_sh_mask.h"
0035 #include "gc/gc_11_0_0_default.h"
0036 #include "hdp/hdp_6_0_0_offset.h"
0037 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
0038
0039 #include "soc15_common.h"
0040 #include "soc15.h"
0041 #include "sdma_v6_0_0_pkt_open.h"
0042 #include "nbio_v4_3.h"
0043 #include "sdma_common.h"
0044 #include "sdma_v6_0.h"
0045 #include "v11_structs.h"
0046
0047 MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
0048 MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
0049 MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
0050
0051 #define SDMA1_REG_OFFSET 0x600
0052 #define SDMA0_HYP_DEC_REG_START 0x5880
0053 #define SDMA0_HYP_DEC_REG_END 0x589a
0054 #define SDMA1_HYP_DEC_REG_OFFSET 0x20
0055
0056 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
0057 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
0058 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
0059 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
0060 static int sdma_v6_0_start(struct amdgpu_device *adev);
0061
0062 static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
0063 {
0064 u32 base;
0065
0066 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
0067 internal_offset <= SDMA0_HYP_DEC_REG_END) {
0068 base = adev->reg_offset[GC_HWIP][0][1];
0069 if (instance != 0)
0070 internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
0071 } else {
0072 base = adev->reg_offset[GC_HWIP][0][0];
0073 if (instance == 1)
0074 internal_offset += SDMA1_REG_OFFSET;
0075 }
0076
0077 return base + internal_offset;
0078 }
0079
0080 static int sdma_v6_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
0081 {
0082 int err = 0;
0083 const struct sdma_firmware_header_v2_0 *hdr;
0084
0085 err = amdgpu_ucode_validate(sdma_inst->fw);
0086 if (err)
0087 return err;
0088
0089 hdr = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
0090 sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
0091 sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
0092
0093 if (sdma_inst->feature_version >= 20)
0094 sdma_inst->burst_nop = true;
0095
0096 return 0;
0097 }
0098
0099 static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
0100 {
0101 release_firmware(adev->sdma.instance[0].fw);
0102
0103 memset((void*)adev->sdma.instance, 0,
0104 sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
0105 }
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
0120 {
0121 char fw_name[30];
0122 char ucode_prefix[30];
0123 int err = 0, i;
0124 struct amdgpu_firmware_info *info = NULL;
0125 const struct sdma_firmware_header_v2_0 *sdma_hdr;
0126
0127 DRM_DEBUG("\n");
0128
0129 amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
0130
0131 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
0132
0133 err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
0134 if (err)
0135 goto out;
0136
0137 err = sdma_v6_0_init_inst_ctx(&adev->sdma.instance[0]);
0138 if (err)
0139 goto out;
0140
0141 for (i = 1; i < adev->sdma.num_instances; i++) {
0142 memcpy((void*)&adev->sdma.instance[i],
0143 (void*)&adev->sdma.instance[0],
0144 sizeof(struct amdgpu_sdma_instance));
0145 }
0146
0147 DRM_DEBUG("psp_load == '%s'\n",
0148 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
0149
0150 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
0151 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
0152 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
0153 info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
0154 info->fw = adev->sdma.instance[0].fw;
0155 adev->firmware.fw_size +=
0156 ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
0157 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
0158 info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
0159 info->fw = adev->sdma.instance[0].fw;
0160 adev->firmware.fw_size +=
0161 ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
0162 }
0163
0164 out:
0165 if (err) {
0166 DRM_ERROR("sdma_v6_0: Failed to load firmware \"%s\"\n", fw_name);
0167 sdma_v6_0_destroy_inst_ctx(adev);
0168 }
0169 return err;
0170 }
0171
0172 static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
0173 {
0174 unsigned ret;
0175
0176 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE));
0177 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
0178 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
0179 amdgpu_ring_write(ring, 1);
0180 ret = ring->wptr & ring->buf_mask;
0181 amdgpu_ring_write(ring, 0x55aa55aa);
0182
0183 return ret;
0184 }
0185
0186 static void sdma_v6_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
0187 unsigned offset)
0188 {
0189 unsigned cur;
0190
0191 BUG_ON(offset > ring->buf_mask);
0192 BUG_ON(ring->ring[offset] != 0x55aa55aa);
0193
0194 cur = (ring->wptr - 1) & ring->buf_mask;
0195 if (cur > offset)
0196 ring->ring[offset] = cur - offset;
0197 else
0198 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208 static uint64_t sdma_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
0209 {
0210 u64 *rptr;
0211
0212
0213 rptr = (u64 *)ring->rptr_cpu_addr;
0214
0215 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
0216 return ((*rptr) >> 2);
0217 }
0218
0219
0220
0221
0222
0223
0224
0225
0226 static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
0227 {
0228 u64 wptr = 0;
0229
0230 if (ring->use_doorbell) {
0231
0232 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
0233 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
0234 }
0235
0236 return wptr >> 2;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246 static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
0247 {
0248 struct amdgpu_device *adev = ring->adev;
0249 uint32_t *wptr_saved;
0250 uint32_t *is_queue_unmap;
0251 uint64_t aggregated_db_index;
0252 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
0253
0254 DRM_DEBUG("Setting write pointer\n");
0255
0256 if (ring->is_mes_queue) {
0257 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
0258 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
0259 sizeof(uint32_t));
0260 aggregated_db_index =
0261 amdgpu_mes_get_aggregated_doorbell_index(adev,
0262 ring->hw_prio);
0263
0264 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
0265 ring->wptr << 2);
0266 *wptr_saved = ring->wptr << 2;
0267 if (*is_queue_unmap) {
0268 WDOORBELL64(aggregated_db_index, ring->wptr << 2);
0269 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
0270 ring->doorbell_index, ring->wptr << 2);
0271 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
0272 } else {
0273 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
0274 ring->doorbell_index, ring->wptr << 2);
0275 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
0276
0277 if (*is_queue_unmap)
0278 WDOORBELL64(aggregated_db_index,
0279 ring->wptr << 2);
0280 }
0281 } else {
0282 if (ring->use_doorbell) {
0283 DRM_DEBUG("Using doorbell -- "
0284 "wptr_offs == 0x%08x "
0285 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
0286 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
0287 ring->wptr_offs,
0288 lower_32_bits(ring->wptr << 2),
0289 upper_32_bits(ring->wptr << 2));
0290
0291 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
0292 ring->wptr << 2);
0293 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
0294 ring->doorbell_index, ring->wptr << 2);
0295 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
0296 } else {
0297 DRM_DEBUG("Not using doorbell -- "
0298 "regSDMA%i_GFX_RB_WPTR == 0x%08x "
0299 "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
0300 ring->me,
0301 lower_32_bits(ring->wptr << 2),
0302 ring->me,
0303 upper_32_bits(ring->wptr << 2));
0304 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
0305 ring->me, regSDMA0_QUEUE0_RB_WPTR),
0306 lower_32_bits(ring->wptr << 2));
0307 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
0308 ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
0309 upper_32_bits(ring->wptr << 2));
0310 }
0311 }
0312 }
0313
0314 static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
0315 {
0316 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
0317 int i;
0318
0319 for (i = 0; i < count; i++)
0320 if (sdma && sdma->burst_nop && (i == 0))
0321 amdgpu_ring_write(ring, ring->funcs->nop |
0322 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
0323 else
0324 amdgpu_ring_write(ring, ring->funcs->nop);
0325 }
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
0336 struct amdgpu_job *job,
0337 struct amdgpu_ib *ib,
0338 uint32_t flags)
0339 {
0340 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
0341 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351 sdma_v6_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
0352
0353 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) |
0354 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
0355
0356 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
0357 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
0358 amdgpu_ring_write(ring, ib->length_dw);
0359 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
0360 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
0361 }
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372 static void sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
0373 {
0374 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
0375 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
0376 SDMA_GCR_GLI_INV(1);
0377
0378
0379 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ));
0380 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
0381 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
0382 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
0383 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
0384 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
0385 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
0386 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397 static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
0398 {
0399 struct amdgpu_device *adev = ring->adev;
0400 u32 ref_and_mask = 0;
0401 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
0402
0403 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
0404
0405 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
0406 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
0407 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
0408 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
0409 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
0410 amdgpu_ring_write(ring, ref_and_mask);
0411 amdgpu_ring_write(ring, ref_and_mask);
0412 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
0413 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
0414 }
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
0427 unsigned flags)
0428 {
0429 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
0430
0431 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
0432 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
0433
0434 BUG_ON(addr & 0x3);
0435 amdgpu_ring_write(ring, lower_32_bits(addr));
0436 amdgpu_ring_write(ring, upper_32_bits(addr));
0437 amdgpu_ring_write(ring, lower_32_bits(seq));
0438
0439
0440 if (write64bit) {
0441 addr += 4;
0442 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
0443 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
0444
0445 BUG_ON(addr & 0x3);
0446 amdgpu_ring_write(ring, lower_32_bits(addr));
0447 amdgpu_ring_write(ring, upper_32_bits(addr));
0448 amdgpu_ring_write(ring, upper_32_bits(seq));
0449 }
0450
0451 if (flags & AMDGPU_FENCE_FLAG_INT) {
0452 uint32_t ctx = ring->is_mes_queue ?
0453 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
0454
0455 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
0456 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
0457 }
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
0468 {
0469 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
0470 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
0471 u32 rb_cntl, ib_cntl;
0472 int i;
0473
0474 if ((adev->mman.buffer_funcs_ring == sdma0) ||
0475 (adev->mman.buffer_funcs_ring == sdma1))
0476 amdgpu_ttm_set_buffer_funcs_status(adev, false);
0477
0478 for (i = 0; i < adev->sdma.num_instances; i++) {
0479 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
0480 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
0481 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
0482 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
0483 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
0484 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
0485 }
0486
0487 sdma0->sched.ready = false;
0488 sdma1->sched.ready = false;
0489 }
0490
0491
0492
0493
0494
0495
0496
0497
0498 static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
0499 {
0500
0501 }
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511 static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
0512 {
0513 }
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
0524 {
0525 u32 f32_cntl;
0526 int i;
0527
0528 if (!enable) {
0529 sdma_v6_0_gfx_stop(adev);
0530 sdma_v6_0_rlc_stop(adev);
0531 }
0532
0533 for (i = 0; i < adev->sdma.num_instances; i++) {
0534 f32_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
0535 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
0536 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), f32_cntl);
0537 }
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
0549 {
0550 struct amdgpu_ring *ring;
0551 u32 rb_cntl, ib_cntl;
0552 u32 rb_bufsz;
0553 u32 doorbell;
0554 u32 doorbell_offset;
0555 u32 temp;
0556 u64 wptr_gpu_addr;
0557 int i, r;
0558
0559 for (i = 0; i < adev->sdma.num_instances; i++) {
0560 ring = &adev->sdma.instance[i].ring;
0561
0562 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
0563
0564
0565 rb_bufsz = order_base_2(ring->ring_size / 4);
0566 rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
0567 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
0568 #ifdef __BIG_ENDIAN
0569 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
0570 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
0571 RPTR_WRITEBACK_SWAP_ENABLE, 1);
0572 #endif
0573 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
0574 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
0575
0576
0577 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
0578 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
0579 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
0580 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
0581
0582
0583 wptr_gpu_addr = ring->wptr_gpu_addr;
0584 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
0585 lower_32_bits(wptr_gpu_addr));
0586 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
0587 upper_32_bits(wptr_gpu_addr));
0588
0589
0590 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
0591 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
0592 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
0593 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
0594
0595 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
0596 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
0597 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
0598
0599 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
0600 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
0601
0602 ring->wptr = 0;
0603
0604
0605 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
0606
0607 if (!amdgpu_sriov_vf(adev)) {
0608 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
0609 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
0610 }
0611
0612 doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
0613 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
0614
0615 if (ring->use_doorbell) {
0616 doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
0617 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
0618 OFFSET, ring->doorbell_index);
0619 } else {
0620 doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
0621 }
0622 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
0623 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
0624
0625 if (i == 0)
0626 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
0627 ring->doorbell_index,
0628 adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
0629
0630 if (amdgpu_sriov_vf(adev))
0631 sdma_v6_0_ring_set_wptr(ring);
0632
0633
0634 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
0635
0636
0637 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
0638 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
0639 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
0640 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
0641
0642
0643 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
0644
0645 temp &= 0xFF0FFF;
0646 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
0647 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
0648 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
0649 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
0650
0651 if (!amdgpu_sriov_vf(adev)) {
0652
0653 temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
0654 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
0655 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
0656 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
0657 }
0658
0659
0660 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
0661 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
0662
0663 ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
0664 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
0665 #ifdef __BIG_ENDIAN
0666 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
0667 #endif
0668
0669 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
0670
0671 ring->sched.ready = true;
0672
0673 if (amdgpu_sriov_vf(adev)) {
0674 sdma_v6_0_ctx_switch_enable(adev, true);
0675 sdma_v6_0_enable(adev, true);
0676 }
0677
0678 r = amdgpu_ring_test_helper(ring);
0679 if (r) {
0680 ring->sched.ready = false;
0681 return r;
0682 }
0683
0684 if (adev->mman.buffer_funcs_ring == ring)
0685 amdgpu_ttm_set_buffer_funcs_status(adev, true);
0686 }
0687
0688 return 0;
0689 }
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699 static int sdma_v6_0_rlc_resume(struct amdgpu_device *adev)
0700 {
0701 return 0;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712 static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
0713 {
0714 const struct sdma_firmware_header_v2_0 *hdr;
0715 const __le32 *fw_data;
0716 u32 fw_size;
0717 int i, j;
0718 bool use_broadcast;
0719
0720
0721 sdma_v6_0_enable(adev, false);
0722
0723 if (!adev->sdma.instance[0].fw)
0724 return -EINVAL;
0725
0726
0727 use_broadcast = true;
0728
0729 if (use_broadcast) {
0730 dev_info(adev->dev, "Use broadcast method to load SDMA firmware\n");
0731
0732 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
0733 amdgpu_ucode_print_sdma_hdr(&hdr->header);
0734 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
0735
0736 fw_data = (const __le32 *)
0737 (adev->sdma.instance[0].fw->data +
0738 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0739
0740 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0);
0741
0742 for (j = 0; j < fw_size; j++) {
0743 if (amdgpu_emu_mode == 1 && j % 500 == 0)
0744 msleep(1);
0745 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
0746 }
0747
0748
0749 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
0750
0751 fw_data = (const __le32 *)
0752 (adev->sdma.instance[0].fw->data +
0753 le32_to_cpu(hdr->ctl_ucode_offset));
0754
0755 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0x8000);
0756
0757 for (j = 0; j < fw_size; j++) {
0758 if (amdgpu_emu_mode == 1 && j % 500 == 0)
0759 msleep(1);
0760 WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
0761 }
0762 } else {
0763 dev_info(adev->dev, "Use legacy method to load SDMA firmware\n");
0764 for (i = 0; i < adev->sdma.num_instances; i++) {
0765
0766 hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
0767 amdgpu_ucode_print_sdma_hdr(&hdr->header);
0768 fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
0769
0770 fw_data = (const __le32 *)
0771 (adev->sdma.instance[0].fw->data +
0772 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
0773
0774 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0);
0775
0776 for (j = 0; j < fw_size; j++) {
0777 if (amdgpu_emu_mode == 1 && j % 500 == 0)
0778 msleep(1);
0779 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
0780 }
0781
0782 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
0783
0784
0785 fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
0786
0787 fw_data = (const __le32 *)
0788 (adev->sdma.instance[0].fw->data +
0789 le32_to_cpu(hdr->ctl_ucode_offset));
0790
0791 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0x8000);
0792
0793 for (j = 0; j < fw_size; j++) {
0794 if (amdgpu_emu_mode == 1 && j % 500 == 0)
0795 msleep(1);
0796 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
0797 }
0798
0799 WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
0800 }
0801 }
0802
0803 return 0;
0804 }
0805
0806 static int sdma_v6_0_soft_reset(void *handle)
0807 {
0808 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0809 u32 tmp;
0810 int i;
0811
0812 sdma_v6_0_gfx_stop(adev);
0813
0814 for (i = 0; i < adev->sdma.num_instances; i++) {
0815 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE));
0816 tmp |= SDMA0_FREEZE__FREEZE_MASK;
0817 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp);
0818 tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
0819 tmp |= SDMA0_F32_CNTL__HALT_MASK;
0820 tmp |= SDMA0_F32_CNTL__TH1_RESET_MASK;
0821 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), tmp);
0822
0823 WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0);
0824
0825 udelay(100);
0826
0827 tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i;
0828 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
0829 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
0830
0831 udelay(100);
0832
0833 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0);
0834 tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
0835
0836 udelay(100);
0837 }
0838
0839 return sdma_v6_0_start(adev);
0840 }
0841
0842 static bool sdma_v6_0_check_soft_reset(void *handle)
0843 {
0844 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
0845 struct amdgpu_ring *ring;
0846 int i, r;
0847 long tmo = msecs_to_jiffies(1000);
0848
0849 for (i = 0; i < adev->sdma.num_instances; i++) {
0850 ring = &adev->sdma.instance[i].ring;
0851 r = amdgpu_ring_test_ib(ring, tmo);
0852 if (r)
0853 return true;
0854 }
0855
0856 return false;
0857 }
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867 static int sdma_v6_0_start(struct amdgpu_device *adev)
0868 {
0869 int r = 0;
0870
0871 if (amdgpu_sriov_vf(adev)) {
0872 sdma_v6_0_ctx_switch_enable(adev, false);
0873 sdma_v6_0_enable(adev, false);
0874
0875
0876 r = sdma_v6_0_gfx_resume(adev);
0877 return r;
0878 }
0879
0880 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
0881 r = sdma_v6_0_load_microcode(adev);
0882 if (r)
0883 return r;
0884
0885
0886 if (amdgpu_emu_mode == 1)
0887 msleep(1000);
0888 }
0889
0890
0891 sdma_v6_0_enable(adev, true);
0892
0893 sdma_v6_0_ctx_switch_enable(adev, true);
0894
0895
0896 r = sdma_v6_0_gfx_resume(adev);
0897 if (r)
0898 return r;
0899 r = sdma_v6_0_rlc_resume(adev);
0900
0901 return r;
0902 }
0903
0904 static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
0905 struct amdgpu_mqd_prop *prop)
0906 {
0907 struct v11_sdma_mqd *m = mqd;
0908 uint64_t wb_gpu_addr;
0909
0910 m->sdmax_rlcx_rb_cntl =
0911 order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
0912 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
0913 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
0914
0915 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
0916 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
0917
0918 wb_gpu_addr = prop->wptr_gpu_addr;
0919 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
0920 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
0921
0922 wb_gpu_addr = prop->rptr_gpu_addr;
0923 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
0924 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
0925
0926 m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 0,
0927 regSDMA0_QUEUE0_IB_CNTL));
0928
0929 m->sdmax_rlcx_doorbell_offset =
0930 prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
0931
0932 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
0933
0934 m->sdmax_rlcx_skip_cntl = 0;
0935 m->sdmax_rlcx_context_status = 0;
0936 m->sdmax_rlcx_doorbell_log = 0;
0937
0938 m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
0939 m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
0940
0941 return 0;
0942 }
0943
0944 static void sdma_v6_0_set_mqd_funcs(struct amdgpu_device *adev)
0945 {
0946 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v11_sdma_mqd);
0947 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v6_0_mqd_init;
0948 }
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959 static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
0960 {
0961 struct amdgpu_device *adev = ring->adev;
0962 unsigned i;
0963 unsigned index;
0964 int r;
0965 u32 tmp;
0966 u64 gpu_addr;
0967 volatile uint32_t *cpu_ptr = NULL;
0968
0969 tmp = 0xCAFEDEAD;
0970
0971 if (ring->is_mes_queue) {
0972 uint32_t offset = 0;
0973 offset = amdgpu_mes_ctx_get_offs(ring,
0974 AMDGPU_MES_CTX_PADDING_OFFS);
0975 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
0976 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
0977 *cpu_ptr = tmp;
0978 } else {
0979 r = amdgpu_device_wb_get(adev, &index);
0980 if (r) {
0981 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
0982 return r;
0983 }
0984
0985 gpu_addr = adev->wb.gpu_addr + (index * 4);
0986 adev->wb.wb[index] = cpu_to_le32(tmp);
0987 }
0988
0989 r = amdgpu_ring_alloc(ring, 5);
0990 if (r) {
0991 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
0992 amdgpu_device_wb_free(adev, index);
0993 return r;
0994 }
0995
0996 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
0997 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
0998 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
0999 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1000 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1001 amdgpu_ring_write(ring, 0xDEADBEEF);
1002 amdgpu_ring_commit(ring);
1003
1004 for (i = 0; i < adev->usec_timeout; i++) {
1005 if (ring->is_mes_queue)
1006 tmp = le32_to_cpu(*cpu_ptr);
1007 else
1008 tmp = le32_to_cpu(adev->wb.wb[index]);
1009 if (tmp == 0xDEADBEEF)
1010 break;
1011 if (amdgpu_emu_mode == 1)
1012 msleep(1);
1013 else
1014 udelay(1);
1015 }
1016
1017 if (i >= adev->usec_timeout)
1018 r = -ETIMEDOUT;
1019
1020 if (!ring->is_mes_queue)
1021 amdgpu_device_wb_free(adev, index);
1022
1023 return r;
1024 }
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1035 {
1036 struct amdgpu_device *adev = ring->adev;
1037 struct amdgpu_ib ib;
1038 struct dma_fence *f = NULL;
1039 unsigned index;
1040 long r;
1041 u32 tmp = 0;
1042 u64 gpu_addr;
1043 volatile uint32_t *cpu_ptr = NULL;
1044
1045 tmp = 0xCAFEDEAD;
1046 memset(&ib, 0, sizeof(ib));
1047
1048 if (ring->is_mes_queue) {
1049 uint32_t offset = 0;
1050 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
1051 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1052 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1053
1054 offset = amdgpu_mes_ctx_get_offs(ring,
1055 AMDGPU_MES_CTX_PADDING_OFFS);
1056 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1057 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
1058 *cpu_ptr = tmp;
1059 } else {
1060 r = amdgpu_device_wb_get(adev, &index);
1061 if (r) {
1062 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1063 return r;
1064 }
1065
1066 gpu_addr = adev->wb.gpu_addr + (index * 4);
1067 adev->wb.wb[index] = cpu_to_le32(tmp);
1068
1069 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
1070 if (r) {
1071 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1072 goto err0;
1073 }
1074 }
1075
1076 ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1077 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1078 ib.ptr[1] = lower_32_bits(gpu_addr);
1079 ib.ptr[2] = upper_32_bits(gpu_addr);
1080 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1081 ib.ptr[4] = 0xDEADBEEF;
1082 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1083 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1084 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1085 ib.length_dw = 8;
1086
1087 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1088 if (r)
1089 goto err1;
1090
1091 r = dma_fence_wait_timeout(f, false, timeout);
1092 if (r == 0) {
1093 DRM_ERROR("amdgpu: IB test timed out\n");
1094 r = -ETIMEDOUT;
1095 goto err1;
1096 } else if (r < 0) {
1097 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1098 goto err1;
1099 }
1100
1101 if (ring->is_mes_queue)
1102 tmp = le32_to_cpu(*cpu_ptr);
1103 else
1104 tmp = le32_to_cpu(adev->wb.wb[index]);
1105
1106 if (tmp == 0xDEADBEEF)
1107 r = 0;
1108 else
1109 r = -EINVAL;
1110
1111 err1:
1112 amdgpu_ib_free(adev, &ib, NULL);
1113 dma_fence_put(f);
1114 err0:
1115 if (!ring->is_mes_queue)
1116 amdgpu_device_wb_free(adev, index);
1117 return r;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib,
1132 uint64_t pe, uint64_t src,
1133 unsigned count)
1134 {
1135 unsigned bytes = count * 8;
1136
1137 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1138 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1139 ib->ptr[ib->length_dw++] = bytes - 1;
1140 ib->ptr[ib->length_dw++] = 0;
1141 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1142 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1143 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1144 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1145
1146 }
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160 static void sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1161 uint64_t value, unsigned count,
1162 uint32_t incr)
1163 {
1164 unsigned ndw = count * 2;
1165
1166 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1167 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1168 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1169 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1170 ib->ptr[ib->length_dw++] = ndw - 1;
1171 for (; ndw > 0; ndw -= 2) {
1172 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1173 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1174 value += incr;
1175 }
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1191 uint64_t pe,
1192 uint64_t addr, unsigned count,
1193 uint32_t incr, uint64_t flags)
1194 {
1195
1196 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE);
1197 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1198 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1199 ib->ptr[ib->length_dw++] = lower_32_bits(flags);
1200 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1201 ib->ptr[ib->length_dw++] = lower_32_bits(addr);
1202 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1203 ib->ptr[ib->length_dw++] = incr;
1204 ib->ptr[ib->length_dw++] = 0;
1205 ib->ptr[ib->length_dw++] = count - 1;
1206 }
1207
1208
1209
1210
1211
1212
1213
1214 static void sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1215 {
1216 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1217 u32 pad_count;
1218 int i;
1219
1220 pad_count = (-ib->length_dw) & 0x7;
1221 for (i = 0; i < pad_count; i++)
1222 if (sdma && sdma->burst_nop && (i == 0))
1223 ib->ptr[ib->length_dw++] =
1224 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) |
1225 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1226 else
1227 ib->ptr[ib->length_dw++] =
1228 SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP);
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238 static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1239 {
1240 uint32_t seq = ring->fence_drv.sync_seq;
1241 uint64_t addr = ring->fence_drv.gpu_addr;
1242
1243
1244 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1245 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1246 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) |
1247 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1248 amdgpu_ring_write(ring, addr & 0xfffffffc);
1249 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1250 amdgpu_ring_write(ring, seq);
1251 amdgpu_ring_write(ring, 0xffffffff);
1252 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1253 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4));
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1266 unsigned vmid, uint64_t pd_addr)
1267 {
1268 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1269 }
1270
1271 static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1272 uint32_t reg, uint32_t val)
1273 {
1274 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1275 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1276 amdgpu_ring_write(ring, reg);
1277 amdgpu_ring_write(ring, val);
1278 }
1279
1280 static void sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1281 uint32_t val, uint32_t mask)
1282 {
1283 amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1284 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1285 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3));
1286 amdgpu_ring_write(ring, reg << 2);
1287 amdgpu_ring_write(ring, 0);
1288 amdgpu_ring_write(ring, val);
1289 amdgpu_ring_write(ring, mask);
1290 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1291 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1292 }
1293
1294 static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1295 uint32_t reg0, uint32_t reg1,
1296 uint32_t ref, uint32_t mask)
1297 {
1298 amdgpu_ring_emit_wreg(ring, reg0, ref);
1299
1300 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1301 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1302 }
1303
1304 static int sdma_v6_0_early_init(void *handle)
1305 {
1306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1307
1308 sdma_v6_0_set_ring_funcs(adev);
1309 sdma_v6_0_set_buffer_funcs(adev);
1310 sdma_v6_0_set_vm_pte_funcs(adev);
1311 sdma_v6_0_set_irq_funcs(adev);
1312 sdma_v6_0_set_mqd_funcs(adev);
1313
1314 return 0;
1315 }
1316
1317 static int sdma_v6_0_sw_init(void *handle)
1318 {
1319 struct amdgpu_ring *ring;
1320 int r, i;
1321 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322
1323
1324 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1325 GFX_11_0_0__SRCID__SDMA_TRAP,
1326 &adev->sdma.trap_irq);
1327 if (r)
1328 return r;
1329
1330 r = sdma_v6_0_init_microcode(adev);
1331 if (r) {
1332 DRM_ERROR("Failed to load sdma firmware!\n");
1333 return r;
1334 }
1335
1336 for (i = 0; i < adev->sdma.num_instances; i++) {
1337 ring = &adev->sdma.instance[i].ring;
1338 ring->ring_obj = NULL;
1339 ring->use_doorbell = true;
1340 ring->me = i;
1341
1342 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1343 ring->use_doorbell?"true":"false");
1344
1345 ring->doorbell_index =
1346 (adev->doorbell_index.sdma_engine[i] << 1);
1347
1348 sprintf(ring->name, "sdma%d", i);
1349 r = amdgpu_ring_init(adev, ring, 1024,
1350 &adev->sdma.trap_irq,
1351 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1352 AMDGPU_RING_PRIO_DEFAULT, NULL);
1353 if (r)
1354 return r;
1355 }
1356
1357 return r;
1358 }
1359
1360 static int sdma_v6_0_sw_fini(void *handle)
1361 {
1362 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1363 int i;
1364
1365 for (i = 0; i < adev->sdma.num_instances; i++)
1366 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1367
1368 sdma_v6_0_destroy_inst_ctx(adev);
1369
1370 return 0;
1371 }
1372
1373 static int sdma_v6_0_hw_init(void *handle)
1374 {
1375 int r;
1376 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1377
1378 r = sdma_v6_0_start(adev);
1379
1380 return r;
1381 }
1382
1383 static int sdma_v6_0_hw_fini(void *handle)
1384 {
1385 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386
1387 if (amdgpu_sriov_vf(adev))
1388 return 0;
1389
1390 sdma_v6_0_ctx_switch_enable(adev, false);
1391 sdma_v6_0_enable(adev, false);
1392
1393 return 0;
1394 }
1395
1396 static int sdma_v6_0_suspend(void *handle)
1397 {
1398 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1399
1400 return sdma_v6_0_hw_fini(adev);
1401 }
1402
1403 static int sdma_v6_0_resume(void *handle)
1404 {
1405 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1406
1407 return sdma_v6_0_hw_init(adev);
1408 }
1409
1410 static bool sdma_v6_0_is_idle(void *handle)
1411 {
1412 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1413 u32 i;
1414
1415 for (i = 0; i < adev->sdma.num_instances; i++) {
1416 u32 tmp = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG));
1417
1418 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1419 return false;
1420 }
1421
1422 return true;
1423 }
1424
1425 static int sdma_v6_0_wait_for_idle(void *handle)
1426 {
1427 unsigned i;
1428 u32 sdma0, sdma1;
1429 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1430
1431 for (i = 0; i < adev->usec_timeout; i++) {
1432 sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
1433 sdma1 = RREG32(sdma_v6_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG));
1434
1435 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1436 return 0;
1437 udelay(1);
1438 }
1439 return -ETIMEDOUT;
1440 }
1441
1442 static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
1443 {
1444 int i, r = 0;
1445 struct amdgpu_device *adev = ring->adev;
1446 u32 index = 0;
1447 u64 sdma_gfx_preempt;
1448
1449 amdgpu_sdma_get_index_from_ring(ring, &index);
1450 sdma_gfx_preempt =
1451 sdma_v6_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT);
1452
1453
1454 amdgpu_ring_set_preempt_cond_exec(ring, false);
1455
1456
1457 ring->trail_seq += 1;
1458 amdgpu_ring_alloc(ring, 10);
1459 sdma_v6_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1460 ring->trail_seq, 0);
1461 amdgpu_ring_commit(ring);
1462
1463
1464 WREG32(sdma_gfx_preempt, 1);
1465
1466
1467 for (i = 0; i < adev->usec_timeout; i++) {
1468 if (ring->trail_seq ==
1469 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1470 break;
1471 udelay(1);
1472 }
1473
1474 if (i >= adev->usec_timeout) {
1475 r = -EINVAL;
1476 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1477 }
1478
1479
1480 WREG32(sdma_gfx_preempt, 0);
1481
1482
1483 amdgpu_ring_set_preempt_cond_exec(ring, true);
1484 return r;
1485 }
1486
1487 static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
1488 struct amdgpu_irq_src *source,
1489 unsigned type,
1490 enum amdgpu_interrupt_state state)
1491 {
1492 u32 sdma_cntl;
1493
1494 u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
1495
1496 sdma_cntl = RREG32(reg_offset);
1497 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1498 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1499 WREG32(reg_offset, sdma_cntl);
1500
1501 return 0;
1502 }
1503
1504 static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
1505 struct amdgpu_irq_src *source,
1506 struct amdgpu_iv_entry *entry)
1507 {
1508 int instances, queue;
1509 uint32_t mes_queue_id = entry->src_data[0];
1510
1511 DRM_DEBUG("IH: SDMA trap\n");
1512
1513 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1514 struct amdgpu_mes_queue *queue;
1515
1516 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1517
1518 spin_lock(&adev->mes.queue_id_lock);
1519 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1520 if (queue) {
1521 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1522 amdgpu_fence_process(queue->ring);
1523 }
1524 spin_unlock(&adev->mes.queue_id_lock);
1525 return 0;
1526 }
1527
1528 queue = entry->ring_id & 0xf;
1529 instances = (entry->ring_id & 0xf0) >> 4;
1530 if (instances > 1) {
1531 DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n");
1532 return -EINVAL;
1533 }
1534
1535 switch (entry->client_id) {
1536 case SOC21_IH_CLIENTID_GFX:
1537 switch (queue) {
1538 case 0:
1539 amdgpu_fence_process(&adev->sdma.instance[instances].ring);
1540 break;
1541 default:
1542 break;
1543 }
1544 break;
1545 }
1546 return 0;
1547 }
1548
1549 static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1550 struct amdgpu_irq_src *source,
1551 struct amdgpu_iv_entry *entry)
1552 {
1553 return 0;
1554 }
1555
1556 static int sdma_v6_0_set_clockgating_state(void *handle,
1557 enum amd_clockgating_state state)
1558 {
1559 return 0;
1560 }
1561
1562 static int sdma_v6_0_set_powergating_state(void *handle,
1563 enum amd_powergating_state state)
1564 {
1565 return 0;
1566 }
1567
1568 static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
1569 {
1570 }
1571
1572 const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
1573 .name = "sdma_v6_0",
1574 .early_init = sdma_v6_0_early_init,
1575 .late_init = NULL,
1576 .sw_init = sdma_v6_0_sw_init,
1577 .sw_fini = sdma_v6_0_sw_fini,
1578 .hw_init = sdma_v6_0_hw_init,
1579 .hw_fini = sdma_v6_0_hw_fini,
1580 .suspend = sdma_v6_0_suspend,
1581 .resume = sdma_v6_0_resume,
1582 .is_idle = sdma_v6_0_is_idle,
1583 .wait_for_idle = sdma_v6_0_wait_for_idle,
1584 .soft_reset = sdma_v6_0_soft_reset,
1585 .check_soft_reset = sdma_v6_0_check_soft_reset,
1586 .set_clockgating_state = sdma_v6_0_set_clockgating_state,
1587 .set_powergating_state = sdma_v6_0_set_powergating_state,
1588 .get_clockgating_state = sdma_v6_0_get_clockgating_state,
1589 };
1590
1591 static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
1592 .type = AMDGPU_RING_TYPE_SDMA,
1593 .align_mask = 0xf,
1594 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1595 .support_64bit_ptrs = true,
1596 .vmhub = AMDGPU_GFXHUB_0,
1597 .get_rptr = sdma_v6_0_ring_get_rptr,
1598 .get_wptr = sdma_v6_0_ring_get_wptr,
1599 .set_wptr = sdma_v6_0_ring_set_wptr,
1600 .emit_frame_size =
1601 5 +
1602 6 +
1603 6 +
1604
1605 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1606 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1607 10 + 10 + 10,
1608 .emit_ib_size = 5 + 7 + 6,
1609 .emit_ib = sdma_v6_0_ring_emit_ib,
1610 .emit_mem_sync = sdma_v6_0_ring_emit_mem_sync,
1611 .emit_fence = sdma_v6_0_ring_emit_fence,
1612 .emit_pipeline_sync = sdma_v6_0_ring_emit_pipeline_sync,
1613 .emit_vm_flush = sdma_v6_0_ring_emit_vm_flush,
1614 .emit_hdp_flush = sdma_v6_0_ring_emit_hdp_flush,
1615 .test_ring = sdma_v6_0_ring_test_ring,
1616 .test_ib = sdma_v6_0_ring_test_ib,
1617 .insert_nop = sdma_v6_0_ring_insert_nop,
1618 .pad_ib = sdma_v6_0_ring_pad_ib,
1619 .emit_wreg = sdma_v6_0_ring_emit_wreg,
1620 .emit_reg_wait = sdma_v6_0_ring_emit_reg_wait,
1621 .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
1622 .init_cond_exec = sdma_v6_0_ring_init_cond_exec,
1623 .patch_cond_exec = sdma_v6_0_ring_patch_cond_exec,
1624 .preempt_ib = sdma_v6_0_ring_preempt_ib,
1625 };
1626
1627 static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1628 {
1629 int i;
1630
1631 for (i = 0; i < adev->sdma.num_instances; i++) {
1632 adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs;
1633 adev->sdma.instance[i].ring.me = i;
1634 }
1635 }
1636
1637 static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
1638 .set = sdma_v6_0_set_trap_irq_state,
1639 .process = sdma_v6_0_process_trap_irq,
1640 };
1641
1642 static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
1643 .process = sdma_v6_0_process_illegal_inst_irq,
1644 };
1645
1646 static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1647 {
1648 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1649 adev->sdma.num_instances;
1650 adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
1651 adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666 static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
1667 uint64_t src_offset,
1668 uint64_t dst_offset,
1669 uint32_t byte_count,
1670 bool tmz)
1671 {
1672 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1673 SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1674 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1675 ib->ptr[ib->length_dw++] = byte_count - 1;
1676 ib->ptr[ib->length_dw++] = 0;
1677 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1678 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1679 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1680 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1681 }
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib,
1694 uint32_t src_data,
1695 uint64_t dst_offset,
1696 uint32_t byte_count)
1697 {
1698 ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
1699 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1700 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1701 ib->ptr[ib->length_dw++] = src_data;
1702 ib->ptr[ib->length_dw++] = byte_count - 1;
1703 }
1704
1705 static const struct amdgpu_buffer_funcs sdma_v6_0_buffer_funcs = {
1706 .copy_max_bytes = 0x400000,
1707 .copy_num_dw = 7,
1708 .emit_copy_buffer = sdma_v6_0_emit_copy_buffer,
1709
1710 .fill_max_bytes = 0x400000,
1711 .fill_num_dw = 5,
1712 .emit_fill_buffer = sdma_v6_0_emit_fill_buffer,
1713 };
1714
1715 static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev)
1716 {
1717 adev->mman.buffer_funcs = &sdma_v6_0_buffer_funcs;
1718 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1719 }
1720
1721 static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
1722 .copy_pte_num_dw = 7,
1723 .copy_pte = sdma_v6_0_vm_copy_pte,
1724 .write_pte = sdma_v6_0_vm_write_pte,
1725 .set_pte_pde = sdma_v6_0_vm_set_pte_pde,
1726 };
1727
1728 static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1729 {
1730 unsigned i;
1731
1732 adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
1733 for (i = 0; i < adev->sdma.num_instances; i++) {
1734 adev->vm_manager.vm_pte_scheds[i] =
1735 &adev->sdma.instance[i].ring.sched;
1736 }
1737 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1738 }
1739
1740 const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
1741 .type = AMD_IP_BLOCK_TYPE_SDMA,
1742 .major = 6,
1743 .minor = 0,
1744 .rev = 0,
1745 .funcs = &sdma_v6_0_ip_funcs,
1746 };