0001
0002
0003
0004 #include <linux/completion.h>
0005 #include <linux/circ_buf.h>
0006 #include <linux/list.h>
0007
0008 #include "a6xx_gmu.h"
0009 #include "a6xx_gmu.xml.h"
0010 #include "a6xx_gpu.h"
0011
0012 #define HFI_MSG_ID(val) [val] = #val
0013
0014 static const char * const a6xx_hfi_msg_id[] = {
0015 HFI_MSG_ID(HFI_H2F_MSG_INIT),
0016 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
0017 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
0018 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
0019 HFI_MSG_ID(HFI_H2F_MSG_TEST),
0020 HFI_MSG_ID(HFI_H2F_MSG_START),
0021 HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
0022 HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
0023 HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
0024 };
0025
0026 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
0027 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
0028 {
0029 struct a6xx_hfi_queue_header *header = queue->header;
0030 u32 i, hdr, index = header->read_index;
0031
0032 if (header->read_index == header->write_index) {
0033 header->rx_request = 1;
0034 return 0;
0035 }
0036
0037 hdr = queue->data[index];
0038
0039 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
0051
0052 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
0053 data[i] = queue->data[index];
0054 index = (index + 1) % header->size;
0055 }
0056
0057 if (!gmu->legacy)
0058 index = ALIGN(index, 4) % header->size;
0059
0060 header->read_index = index;
0061 return HFI_HEADER_SIZE(hdr);
0062 }
0063
0064 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
0065 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
0066 {
0067 struct a6xx_hfi_queue_header *header = queue->header;
0068 u32 i, space, index = header->write_index;
0069
0070 spin_lock(&queue->lock);
0071
0072 space = CIRC_SPACE(header->write_index, header->read_index,
0073 header->size);
0074 if (space < dwords) {
0075 header->dropped++;
0076 spin_unlock(&queue->lock);
0077 return -ENOSPC;
0078 }
0079
0080 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
0081
0082 for (i = 0; i < dwords; i++) {
0083 queue->data[index] = data[i];
0084 index = (index + 1) % header->size;
0085 }
0086
0087
0088 if (!gmu->legacy) {
0089 for (; index % 4; index = (index + 1) % header->size)
0090 queue->data[index] = 0xfafafafa;
0091 }
0092
0093 header->write_index = index;
0094 spin_unlock(&queue->lock);
0095
0096 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
0097 return 0;
0098 }
0099
0100 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
0101 u32 *payload, u32 payload_size)
0102 {
0103 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
0104 u32 val;
0105 int ret;
0106
0107
0108 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
0109 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
0110
0111 if (ret) {
0112 DRM_DEV_ERROR(gmu->dev,
0113 "Message %s id %d timed out waiting for response\n",
0114 a6xx_hfi_msg_id[id], seqnum);
0115 return -ETIMEDOUT;
0116 }
0117
0118
0119 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
0120 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
0121
0122 for (;;) {
0123 struct a6xx_hfi_msg_response resp;
0124
0125
0126 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
0127 sizeof(resp) >> 2);
0128
0129
0130 if (!ret) {
0131 DRM_DEV_ERROR(gmu->dev,
0132 "The HFI response queue is unexpectedly empty\n");
0133
0134 return -ENOENT;
0135 }
0136
0137 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
0138 struct a6xx_hfi_msg_error *error =
0139 (struct a6xx_hfi_msg_error *) &resp;
0140
0141 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
0142 error->code);
0143 continue;
0144 }
0145
0146 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
0147 DRM_DEV_ERROR(gmu->dev,
0148 "Unexpected message id %d on the response queue\n",
0149 HFI_HEADER_SEQNUM(resp.ret_header));
0150 continue;
0151 }
0152
0153 if (resp.error) {
0154 DRM_DEV_ERROR(gmu->dev,
0155 "Message %s id %d returned error %d\n",
0156 a6xx_hfi_msg_id[id], seqnum, resp.error);
0157 return -EINVAL;
0158 }
0159
0160
0161 if (payload && payload_size)
0162 memcpy(payload, resp.payload,
0163 min_t(u32, payload_size, sizeof(resp.payload)));
0164
0165 return 0;
0166 }
0167 }
0168
0169 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
0170 void *data, u32 size, u32 *payload, u32 payload_size)
0171 {
0172 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
0173 int ret, dwords = size >> 2;
0174 u32 seqnum;
0175
0176 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
0177
0178
0179 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
0180 (dwords << 8) | id;
0181
0182 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
0183 if (ret) {
0184 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
0185 a6xx_hfi_msg_id[id], seqnum);
0186 return ret;
0187 }
0188
0189 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
0190 }
0191
0192 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
0193 {
0194 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
0195
0196 msg.dbg_buffer_addr = (u32) gmu->debug.iova;
0197 msg.dbg_buffer_size = (u32) gmu->debug.size;
0198 msg.boot_state = boot_state;
0199
0200 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
0201 NULL, 0);
0202 }
0203
0204 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
0205 {
0206 struct a6xx_hfi_msg_fw_version msg = { 0 };
0207
0208
0209 msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17);
0210
0211 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
0212 version, sizeof(*version));
0213 }
0214
0215 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
0216 {
0217 struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
0218 int i;
0219
0220 msg.num_gpu_levels = gmu->nr_gpu_freqs;
0221 msg.num_gmu_levels = gmu->nr_gmu_freqs;
0222
0223 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
0224 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
0225 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
0226 }
0227
0228 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
0229 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
0230 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
0231 }
0232
0233 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
0234 NULL, 0);
0235 }
0236
0237 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
0238 {
0239 struct a6xx_hfi_msg_perf_table msg = { 0 };
0240 int i;
0241
0242 msg.num_gpu_levels = gmu->nr_gpu_freqs;
0243 msg.num_gmu_levels = gmu->nr_gmu_freqs;
0244
0245 for (i = 0; i < gmu->nr_gpu_freqs; i++) {
0246 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
0247 msg.gx_votes[i].acd = 0xffffffff;
0248 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
0249 }
0250
0251 for (i = 0; i < gmu->nr_gmu_freqs; i++) {
0252 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
0253 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
0254 }
0255
0256 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
0257 NULL, 0);
0258 }
0259
0260 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0261 {
0262
0263 msg->bw_level_num = 1;
0264
0265 msg->ddr_cmds_num = 3;
0266 msg->ddr_wait_bitmask = 0x01;
0267
0268 msg->ddr_cmds_addrs[0] = 0x50000;
0269 msg->ddr_cmds_addrs[1] = 0x5003c;
0270 msg->ddr_cmds_addrs[2] = 0x5000c;
0271
0272 msg->ddr_cmds_data[0][0] = 0x40000000;
0273 msg->ddr_cmds_data[0][1] = 0x40000000;
0274 msg->ddr_cmds_data[0][2] = 0x40000000;
0275
0276
0277
0278
0279
0280 msg->cnoc_cmds_num = 1;
0281 msg->cnoc_wait_bitmask = 0x01;
0282
0283 msg->cnoc_cmds_addrs[0] = 0x5007c;
0284 msg->cnoc_cmds_data[0][0] = 0x40000000;
0285 msg->cnoc_cmds_data[1][0] = 0x60000001;
0286 }
0287
0288 static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0289 {
0290 msg->bw_level_num = 13;
0291
0292 msg->ddr_cmds_num = 3;
0293 msg->ddr_wait_bitmask = 0x0;
0294
0295 msg->ddr_cmds_addrs[0] = 0x50000;
0296 msg->ddr_cmds_addrs[1] = 0x50004;
0297 msg->ddr_cmds_addrs[2] = 0x50080;
0298
0299 msg->ddr_cmds_data[0][0] = 0x40000000;
0300 msg->ddr_cmds_data[0][1] = 0x40000000;
0301 msg->ddr_cmds_data[0][2] = 0x40000000;
0302 msg->ddr_cmds_data[1][0] = 0x6000030c;
0303 msg->ddr_cmds_data[1][1] = 0x600000db;
0304 msg->ddr_cmds_data[1][2] = 0x60000008;
0305 msg->ddr_cmds_data[2][0] = 0x60000618;
0306 msg->ddr_cmds_data[2][1] = 0x600001b6;
0307 msg->ddr_cmds_data[2][2] = 0x60000008;
0308 msg->ddr_cmds_data[3][0] = 0x60000925;
0309 msg->ddr_cmds_data[3][1] = 0x60000291;
0310 msg->ddr_cmds_data[3][2] = 0x60000008;
0311 msg->ddr_cmds_data[4][0] = 0x60000dc1;
0312 msg->ddr_cmds_data[4][1] = 0x600003dc;
0313 msg->ddr_cmds_data[4][2] = 0x60000008;
0314 msg->ddr_cmds_data[5][0] = 0x600010ad;
0315 msg->ddr_cmds_data[5][1] = 0x600004ae;
0316 msg->ddr_cmds_data[5][2] = 0x60000008;
0317 msg->ddr_cmds_data[6][0] = 0x600014c3;
0318 msg->ddr_cmds_data[6][1] = 0x600005d4;
0319 msg->ddr_cmds_data[6][2] = 0x60000008;
0320 msg->ddr_cmds_data[7][0] = 0x6000176a;
0321 msg->ddr_cmds_data[7][1] = 0x60000693;
0322 msg->ddr_cmds_data[7][2] = 0x60000008;
0323 msg->ddr_cmds_data[8][0] = 0x60001f01;
0324 msg->ddr_cmds_data[8][1] = 0x600008b5;
0325 msg->ddr_cmds_data[8][2] = 0x60000008;
0326 msg->ddr_cmds_data[9][0] = 0x60002940;
0327 msg->ddr_cmds_data[9][1] = 0x60000b95;
0328 msg->ddr_cmds_data[9][2] = 0x60000008;
0329 msg->ddr_cmds_data[10][0] = 0x60002f68;
0330 msg->ddr_cmds_data[10][1] = 0x60000d50;
0331 msg->ddr_cmds_data[10][2] = 0x60000008;
0332 msg->ddr_cmds_data[11][0] = 0x60003700;
0333 msg->ddr_cmds_data[11][1] = 0x60000f71;
0334 msg->ddr_cmds_data[11][2] = 0x60000008;
0335 msg->ddr_cmds_data[12][0] = 0x60003fce;
0336 msg->ddr_cmds_data[12][1] = 0x600011ea;
0337 msg->ddr_cmds_data[12][2] = 0x60000008;
0338
0339 msg->cnoc_cmds_num = 1;
0340 msg->cnoc_wait_bitmask = 0x0;
0341
0342 msg->cnoc_cmds_addrs[0] = 0x50054;
0343
0344 msg->cnoc_cmds_data[0][0] = 0x40000000;
0345 }
0346
0347 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0348 {
0349
0350
0351
0352
0353 msg->bw_level_num = 1;
0354
0355 msg->ddr_cmds_num = 3;
0356 msg->ddr_wait_bitmask = 0x01;
0357
0358 msg->ddr_cmds_addrs[0] = 0x50000;
0359 msg->ddr_cmds_addrs[1] = 0x5003c;
0360 msg->ddr_cmds_addrs[2] = 0x5000c;
0361
0362 msg->ddr_cmds_data[0][0] = 0x40000000;
0363 msg->ddr_cmds_data[0][1] = 0x40000000;
0364 msg->ddr_cmds_data[0][2] = 0x40000000;
0365
0366
0367
0368
0369
0370 msg->cnoc_cmds_num = 3;
0371 msg->cnoc_wait_bitmask = 0x01;
0372
0373 msg->cnoc_cmds_addrs[0] = 0x50034;
0374 msg->cnoc_cmds_addrs[1] = 0x5007c;
0375 msg->cnoc_cmds_addrs[2] = 0x5004c;
0376
0377 msg->cnoc_cmds_data[0][0] = 0x40000000;
0378 msg->cnoc_cmds_data[0][1] = 0x00000000;
0379 msg->cnoc_cmds_data[0][2] = 0x40000000;
0380
0381 msg->cnoc_cmds_data[1][0] = 0x60000001;
0382 msg->cnoc_cmds_data[1][1] = 0x20000001;
0383 msg->cnoc_cmds_data[1][2] = 0x60000001;
0384 }
0385
0386 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0387 {
0388
0389
0390
0391
0392 msg->bw_level_num = 1;
0393
0394 msg->ddr_cmds_num = 3;
0395 msg->ddr_wait_bitmask = 0x01;
0396
0397 msg->ddr_cmds_addrs[0] = 0x50000;
0398 msg->ddr_cmds_addrs[1] = 0x50004;
0399 msg->ddr_cmds_addrs[2] = 0x5007c;
0400
0401 msg->ddr_cmds_data[0][0] = 0x40000000;
0402 msg->ddr_cmds_data[0][1] = 0x40000000;
0403 msg->ddr_cmds_data[0][2] = 0x40000000;
0404
0405
0406
0407
0408
0409 msg->cnoc_cmds_num = 1;
0410 msg->cnoc_wait_bitmask = 0x01;
0411
0412 msg->cnoc_cmds_addrs[0] = 0x500a4;
0413 msg->cnoc_cmds_data[0][0] = 0x40000000;
0414 msg->cnoc_cmds_data[1][0] = 0x60000001;
0415 }
0416
0417 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0418 {
0419
0420
0421
0422
0423 msg->bw_level_num = 1;
0424
0425 msg->ddr_cmds_num = 3;
0426 msg->ddr_wait_bitmask = 0x01;
0427
0428 msg->ddr_cmds_addrs[0] = 0x50004;
0429 msg->ddr_cmds_addrs[1] = 0x500a0;
0430 msg->ddr_cmds_addrs[2] = 0x50000;
0431
0432 msg->ddr_cmds_data[0][0] = 0x40000000;
0433 msg->ddr_cmds_data[0][1] = 0x40000000;
0434 msg->ddr_cmds_data[0][2] = 0x40000000;
0435
0436
0437
0438
0439
0440 msg->cnoc_cmds_num = 1;
0441 msg->cnoc_wait_bitmask = 0x01;
0442
0443 msg->cnoc_cmds_addrs[0] = 0x50070;
0444 msg->cnoc_cmds_data[0][0] = 0x40000000;
0445 msg->cnoc_cmds_data[1][0] = 0x60000001;
0446 }
0447
0448 static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0449 {
0450
0451
0452
0453
0454 msg->bw_level_num = 1;
0455
0456 msg->ddr_cmds_num = 3;
0457 msg->ddr_wait_bitmask = 0x07;
0458
0459 msg->ddr_cmds_addrs[0] = 0x50004;
0460 msg->ddr_cmds_addrs[1] = 0x50000;
0461 msg->ddr_cmds_addrs[2] = 0x50088;
0462
0463 msg->ddr_cmds_data[0][0] = 0x40000000;
0464 msg->ddr_cmds_data[0][1] = 0x40000000;
0465 msg->ddr_cmds_data[0][2] = 0x40000000;
0466
0467
0468
0469
0470
0471 msg->cnoc_cmds_num = 1;
0472 msg->cnoc_wait_bitmask = 0x01;
0473
0474 msg->cnoc_cmds_addrs[0] = 0x5006c;
0475 msg->cnoc_cmds_data[0][0] = 0x40000000;
0476 msg->cnoc_cmds_data[1][0] = 0x60000001;
0477 }
0478 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
0479 {
0480
0481 msg->bw_level_num = 1;
0482
0483 msg->ddr_cmds_num = 3;
0484 msg->ddr_wait_bitmask = 0x07;
0485
0486 msg->ddr_cmds_addrs[0] = 0x50000;
0487 msg->ddr_cmds_addrs[1] = 0x5005c;
0488 msg->ddr_cmds_addrs[2] = 0x5000c;
0489
0490 msg->ddr_cmds_data[0][0] = 0x40000000;
0491 msg->ddr_cmds_data[0][1] = 0x40000000;
0492 msg->ddr_cmds_data[0][2] = 0x40000000;
0493
0494
0495
0496
0497
0498
0499 msg->cnoc_cmds_num = 3;
0500 msg->cnoc_wait_bitmask = 0x05;
0501
0502 msg->cnoc_cmds_addrs[0] = 0x50034;
0503 msg->cnoc_cmds_addrs[1] = 0x5007c;
0504 msg->cnoc_cmds_addrs[2] = 0x5004c;
0505
0506 msg->cnoc_cmds_data[0][0] = 0x40000000;
0507 msg->cnoc_cmds_data[0][1] = 0x00000000;
0508 msg->cnoc_cmds_data[0][2] = 0x40000000;
0509
0510 msg->cnoc_cmds_data[1][0] = 0x60000001;
0511 msg->cnoc_cmds_data[1][1] = 0x20000001;
0512 msg->cnoc_cmds_data[1][2] = 0x60000001;
0513 }
0514
0515
0516 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
0517 {
0518 struct a6xx_hfi_msg_bw_table msg = { 0 };
0519 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
0520 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
0521
0522 if (adreno_is_a618(adreno_gpu))
0523 a618_build_bw_table(&msg);
0524 else if (adreno_is_a619(adreno_gpu))
0525 a619_build_bw_table(&msg);
0526 else if (adreno_is_a640_family(adreno_gpu))
0527 a640_build_bw_table(&msg);
0528 else if (adreno_is_a650(adreno_gpu))
0529 a650_build_bw_table(&msg);
0530 else if (adreno_is_7c3(adreno_gpu))
0531 adreno_7c3_build_bw_table(&msg);
0532 else if (adreno_is_a660(adreno_gpu))
0533 a660_build_bw_table(&msg);
0534 else
0535 a6xx_build_bw_table(&msg);
0536
0537 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
0538 NULL, 0);
0539 }
0540
0541 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
0542 {
0543 struct a6xx_hfi_msg_test msg = { 0 };
0544
0545 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
0546 NULL, 0);
0547 }
0548
0549 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
0550 {
0551 struct a6xx_hfi_msg_start msg = { 0 };
0552
0553 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
0554 NULL, 0);
0555 }
0556
0557 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
0558 {
0559 struct a6xx_hfi_msg_core_fw_start msg = { 0 };
0560
0561 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
0562 sizeof(msg), NULL, 0);
0563 }
0564
0565 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
0566 {
0567 struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
0568
0569 msg.ack_type = 1;
0570 msg.freq = index;
0571 msg.bw = 0;
0572
0573 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
0574 sizeof(msg), NULL, 0);
0575 }
0576
0577 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
0578 {
0579 struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
0580
0581
0582
0583 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
0584 sizeof(msg), NULL, 0);
0585 }
0586
0587 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
0588 {
0589 int ret;
0590
0591 ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
0592 if (ret)
0593 return ret;
0594
0595 ret = a6xx_hfi_get_fw_version(gmu, NULL);
0596 if (ret)
0597 return ret;
0598
0599
0600
0601
0602
0603
0604
0605 ret = a6xx_hfi_send_perf_table_v1(gmu);
0606 if (ret)
0607 return ret;
0608
0609 ret = a6xx_hfi_send_bw_table(gmu);
0610 if (ret)
0611 return ret;
0612
0613
0614
0615
0616
0617 a6xx_hfi_send_test(gmu);
0618
0619 return 0;
0620 }
0621
0622 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
0623 {
0624 int ret;
0625
0626 if (gmu->legacy)
0627 return a6xx_hfi_start_v1(gmu, boot_state);
0628
0629
0630 ret = a6xx_hfi_send_perf_table(gmu);
0631 if (ret)
0632 return ret;
0633
0634 ret = a6xx_hfi_send_bw_table(gmu);
0635 if (ret)
0636 return ret;
0637
0638 ret = a6xx_hfi_send_core_fw_start(gmu);
0639 if (ret)
0640 return ret;
0641
0642
0643
0644
0645
0646 ret = a6xx_hfi_send_start(gmu);
0647 if (ret)
0648 return ret;
0649
0650 return 0;
0651 }
0652
0653 void a6xx_hfi_stop(struct a6xx_gmu *gmu)
0654 {
0655 int i;
0656
0657 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
0658 struct a6xx_hfi_queue *queue = &gmu->queues[i];
0659
0660 if (!queue->header)
0661 continue;
0662
0663 if (queue->header->read_index != queue->header->write_index)
0664 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
0665
0666 queue->header->read_index = 0;
0667 queue->header->write_index = 0;
0668
0669 memset(&queue->history, 0xff, sizeof(queue->history));
0670 queue->history_idx = 0;
0671 }
0672 }
0673
0674 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
0675 struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
0676 u32 id)
0677 {
0678 spin_lock_init(&queue->lock);
0679 queue->header = header;
0680 queue->data = virt;
0681 atomic_set(&queue->seqnum, 0);
0682
0683 memset(&queue->history, 0xff, sizeof(queue->history));
0684 queue->history_idx = 0;
0685
0686
0687 header->iova = iova;
0688 header->type = 10 << 8 | id;
0689 header->status = 1;
0690 header->size = SZ_4K >> 2;
0691 header->msg_size = 0;
0692 header->dropped = 0;
0693 header->rx_watermark = 1;
0694 header->tx_watermark = 1;
0695 header->rx_request = 1;
0696 header->tx_request = 0;
0697 header->read_index = 0;
0698 header->write_index = 0;
0699 }
0700
0701 void a6xx_hfi_init(struct a6xx_gmu *gmu)
0702 {
0703 struct a6xx_gmu_bo *hfi = &gmu->hfi;
0704 struct a6xx_hfi_queue_table_header *table = hfi->virt;
0705 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
0706 u64 offset;
0707 int table_size;
0708
0709
0710
0711
0712
0713 table_size = sizeof(*table);
0714 table_size += (ARRAY_SIZE(gmu->queues) *
0715 sizeof(struct a6xx_hfi_queue_header));
0716
0717 table->version = 0;
0718 table->size = table_size;
0719
0720 table->qhdr0_offset = sizeof(*table) >> 2;
0721 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
0722 table->num_queues = ARRAY_SIZE(gmu->queues);
0723 table->active_queues = ARRAY_SIZE(gmu->queues);
0724
0725
0726 offset = SZ_4K;
0727 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
0728 hfi->iova + offset, 0);
0729
0730
0731 offset += SZ_4K;
0732 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
0733 hfi->iova + offset, gmu->legacy ? 4 : 1);
0734 }