0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/delay.h>
0031 #include <linux/stdarg.h>
0032
0033 #include "dm_services.h"
0034
0035 #include "dc.h"
0036 #include "dc_dmub_srv.h"
0037 #include "reg_helper.h"
0038
0039 static inline void submit_dmub_read_modify_write(
0040 struct dc_reg_helper_state *offload,
0041 const struct dc_context *ctx)
0042 {
0043 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
0044 bool gather = false;
0045
0046 offload->should_burst_write =
0047 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
0048 cmd_buf->header.payload_bytes =
0049 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
0050
0051 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
0052 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
0053
0054 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
0055
0056 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
0057
0058 memset(cmd_buf, 0, sizeof(*cmd_buf));
0059
0060 offload->reg_seq_count = 0;
0061 offload->same_addr_count = 0;
0062 }
0063
0064 static inline void submit_dmub_burst_write(
0065 struct dc_reg_helper_state *offload,
0066 const struct dc_context *ctx)
0067 {
0068 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
0069 bool gather = false;
0070
0071 cmd_buf->header.payload_bytes =
0072 sizeof(uint32_t) * offload->reg_seq_count;
0073
0074 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
0075 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
0076
0077 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
0078
0079 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
0080
0081 memset(cmd_buf, 0, sizeof(*cmd_buf));
0082
0083 offload->reg_seq_count = 0;
0084 }
0085
0086 static inline void submit_dmub_reg_wait(
0087 struct dc_reg_helper_state *offload,
0088 const struct dc_context *ctx)
0089 {
0090 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
0091 bool gather = false;
0092
0093 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
0094 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
0095
0096 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
0097
0098 memset(cmd_buf, 0, sizeof(*cmd_buf));
0099 offload->reg_seq_count = 0;
0100
0101 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
0102 }
0103
0104 struct dc_reg_value_masks {
0105 uint32_t value;
0106 uint32_t mask;
0107 };
0108
0109 struct dc_reg_sequence {
0110 uint32_t addr;
0111 struct dc_reg_value_masks value_masks;
0112 };
0113
0114 static inline void set_reg_field_value_masks(
0115 struct dc_reg_value_masks *field_value_mask,
0116 uint32_t value,
0117 uint32_t mask,
0118 uint8_t shift)
0119 {
0120 ASSERT(mask != 0);
0121
0122 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift));
0123 field_value_mask->mask = field_value_mask->mask | mask;
0124 }
0125
0126 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
0127 uint32_t addr, int n,
0128 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
0129 va_list ap)
0130 {
0131 uint32_t shift, mask, field_value;
0132 int i = 1;
0133
0134
0135 set_reg_field_value_masks(field_value_mask,
0136 field_value1, mask1, shift1);
0137
0138 while (i < n) {
0139 shift = va_arg(ap, uint32_t);
0140 mask = va_arg(ap, uint32_t);
0141 field_value = va_arg(ap, uint32_t);
0142
0143 set_reg_field_value_masks(field_value_mask,
0144 field_value, mask, shift);
0145 i++;
0146 }
0147 }
0148
0149 static void dmub_flush_buffer_execute(
0150 struct dc_reg_helper_state *offload,
0151 const struct dc_context *ctx)
0152 {
0153 submit_dmub_read_modify_write(offload, ctx);
0154 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
0155 }
0156
0157 static void dmub_flush_burst_write_buffer_execute(
0158 struct dc_reg_helper_state *offload,
0159 const struct dc_context *ctx)
0160 {
0161 submit_dmub_burst_write(offload, ctx);
0162 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
0163 }
0164
0165 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
0166 uint32_t reg_val)
0167 {
0168 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
0169 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
0170
0171
0172 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
0173 dmub_flush_burst_write_buffer_execute(offload, ctx);
0174
0175 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
0176 addr != cmd_buf->addr) {
0177 dmub_flush_burst_write_buffer_execute(offload, ctx);
0178 return false;
0179 }
0180
0181 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
0182 cmd_buf->header.sub_type = 0;
0183 cmd_buf->addr = addr;
0184 cmd_buf->write_values[offload->reg_seq_count] = reg_val;
0185 offload->reg_seq_count++;
0186
0187 return true;
0188 }
0189
0190 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
0191 struct dc_reg_value_masks *field_value_mask)
0192 {
0193 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
0194 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
0195 struct dmub_cmd_read_modify_write_sequence *seq;
0196
0197
0198 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
0199 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
0200 dmub_flush_buffer_execute(offload, ctx);
0201
0202 if (offload->should_burst_write) {
0203 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
0204 return field_value_mask->value;
0205 else
0206 offload->should_burst_write = false;
0207 }
0208
0209
0210 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
0211 cmd_buf->header.sub_type = 0;
0212 seq = &cmd_buf->seq[offload->reg_seq_count];
0213
0214 if (offload->reg_seq_count) {
0215 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
0216 offload->same_addr_count++;
0217 else
0218 offload->same_addr_count = 0;
0219 }
0220
0221 seq->addr = addr;
0222 seq->modify_mask = field_value_mask->mask;
0223 seq->modify_value = field_value_mask->value;
0224 offload->reg_seq_count++;
0225
0226 return field_value_mask->value;
0227 }
0228
0229 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
0230 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
0231 {
0232 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
0233 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
0234
0235 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
0236 cmd_buf->header.sub_type = 0;
0237 cmd_buf->reg_wait.addr = addr;
0238 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
0239 cmd_buf->reg_wait.mask = mask;
0240 cmd_buf->reg_wait.time_out_us = time_out_us;
0241 }
0242
0243 uint32_t generic_reg_update_ex(const struct dc_context *ctx,
0244 uint32_t addr, int n,
0245 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
0246 ...)
0247 {
0248 struct dc_reg_value_masks field_value_mask = {0};
0249 uint32_t reg_val;
0250 va_list ap;
0251
0252 va_start(ap, field_value1);
0253
0254 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
0255 field_value1, ap);
0256
0257 va_end(ap);
0258
0259 if (ctx->dmub_srv &&
0260 ctx->dmub_srv->reg_helper_offload.gather_in_progress)
0261 return dmub_reg_value_pack(ctx, addr, &field_value_mask);
0262
0263
0264
0265 reg_val = dm_read_reg(ctx, addr);
0266 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
0267 dm_write_reg(ctx, addr, reg_val);
0268 return reg_val;
0269 }
0270
0271 uint32_t generic_reg_set_ex(const struct dc_context *ctx,
0272 uint32_t addr, uint32_t reg_val, int n,
0273 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
0274 ...)
0275 {
0276 struct dc_reg_value_masks field_value_mask = {0};
0277 va_list ap;
0278
0279 va_start(ap, field_value1);
0280
0281 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
0282 field_value1, ap);
0283
0284 va_end(ap);
0285
0286
0287
0288 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
0289
0290 if (ctx->dmub_srv &&
0291 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
0292 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
0293
0294 }
0295
0296 dm_write_reg(ctx, addr, reg_val);
0297 return reg_val;
0298 }
0299
0300 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
0301 uint8_t shift, uint32_t mask, uint32_t *field_value)
0302 {
0303 uint32_t reg_val = dm_read_reg(ctx, addr);
0304 *field_value = get_reg_field_value_ex(reg_val, mask, shift);
0305 return reg_val;
0306 }
0307
0308 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
0309 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0310 uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
0311 {
0312 uint32_t reg_val = dm_read_reg(ctx, addr);
0313 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0314 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0315 return reg_val;
0316 }
0317
0318 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
0319 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0320 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
0321 uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
0322 {
0323 uint32_t reg_val = dm_read_reg(ctx, addr);
0324 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0325 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0326 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
0327 return reg_val;
0328 }
0329
0330 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
0331 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0332 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
0333 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
0334 uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
0335 {
0336 uint32_t reg_val = dm_read_reg(ctx, addr);
0337 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0338 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0339 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
0340 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
0341 return reg_val;
0342 }
0343
0344 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
0345 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0346 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
0347 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
0348 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
0349 uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
0350 {
0351 uint32_t reg_val = dm_read_reg(ctx, addr);
0352 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0353 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0354 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
0355 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
0356 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
0357 return reg_val;
0358 }
0359
0360 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
0361 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0362 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
0363 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
0364 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
0365 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
0366 uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
0367 {
0368 uint32_t reg_val = dm_read_reg(ctx, addr);
0369 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0370 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0371 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
0372 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
0373 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
0374 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
0375 return reg_val;
0376 }
0377
0378 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
0379 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0380 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
0381 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
0382 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
0383 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
0384 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
0385 uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
0386 {
0387 uint32_t reg_val = dm_read_reg(ctx, addr);
0388 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0389 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0390 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
0391 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
0392 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
0393 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
0394 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
0395 return reg_val;
0396 }
0397
0398 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
0399 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0400 uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
0401 uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
0402 uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
0403 uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
0404 uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
0405 uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
0406 uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
0407 {
0408 uint32_t reg_val = dm_read_reg(ctx, addr);
0409 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
0410 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
0411 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
0412 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
0413 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
0414 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
0415 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
0416 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
0417 return reg_val;
0418 }
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450 void generic_reg_wait(const struct dc_context *ctx,
0451 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
0452 unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
0453 const char *func_name, int line)
0454 {
0455 uint32_t field_value;
0456 uint32_t reg_val;
0457 int i;
0458
0459 if (ctx->dmub_srv &&
0460 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
0461 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
0462 delay_between_poll_us * time_out_num_tries);
0463 return;
0464 }
0465
0466
0467
0468
0469
0470
0471
0472 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
0473
0474 for (i = 0; i <= time_out_num_tries; i++) {
0475 if (i) {
0476 if (delay_between_poll_us >= 1000)
0477 msleep(delay_between_poll_us/1000);
0478 else if (delay_between_poll_us > 0)
0479 udelay(delay_between_poll_us);
0480 }
0481
0482 reg_val = dm_read_reg(ctx, addr);
0483
0484 field_value = get_reg_field_value_ex(reg_val, mask, shift);
0485
0486 if (field_value == condition_value) {
0487 if (i * delay_between_poll_us > 1000 &&
0488 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
0489 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
0490 delay_between_poll_us * i / 1000,
0491 func_name, line);
0492 return;
0493 }
0494 }
0495
0496 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
0497 delay_between_poll_us, time_out_num_tries,
0498 func_name, line);
0499
0500 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
0501 BREAK_TO_DEBUGGER();
0502 }
0503
0504 void generic_write_indirect_reg(const struct dc_context *ctx,
0505 uint32_t addr_index, uint32_t addr_data,
0506 uint32_t index, uint32_t data)
0507 {
0508 dm_write_reg(ctx, addr_index, index);
0509 dm_write_reg(ctx, addr_data, data);
0510 }
0511
0512 uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
0513 uint32_t addr_index, uint32_t addr_data,
0514 uint32_t index)
0515 {
0516 uint32_t value = 0;
0517
0518
0519 if (ctx->dmub_srv &&
0520 ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
0521 ASSERT(false);
0522 }
0523
0524 dm_write_reg(ctx, addr_index, index);
0525 value = dm_read_reg(ctx, addr_data);
0526
0527 return value;
0528 }
0529
0530 uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
0531 uint32_t addr_index, uint32_t addr_data,
0532 uint32_t index, int n,
0533 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0534 ...)
0535 {
0536 uint32_t shift, mask, *field_value;
0537 uint32_t value = 0;
0538 int i = 1;
0539
0540 va_list ap;
0541
0542 va_start(ap, field_value1);
0543
0544 value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
0545 *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
0546
0547 while (i < n) {
0548 shift = va_arg(ap, uint32_t);
0549 mask = va_arg(ap, uint32_t);
0550 field_value = va_arg(ap, uint32_t *);
0551
0552 *field_value = get_reg_field_value_ex(value, mask, shift);
0553 i++;
0554 }
0555
0556 va_end(ap);
0557
0558 return value;
0559 }
0560
0561 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
0562 uint32_t addr_index, uint32_t addr_data,
0563 uint32_t index, uint32_t reg_val, int n,
0564 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
0565 ...)
0566 {
0567 uint32_t shift, mask, field_value;
0568 int i = 1;
0569
0570 va_list ap;
0571
0572 va_start(ap, field_value1);
0573
0574 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
0575
0576 while (i < n) {
0577 shift = va_arg(ap, uint32_t);
0578 mask = va_arg(ap, uint32_t);
0579 field_value = va_arg(ap, uint32_t);
0580
0581 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
0582 i++;
0583 }
0584
0585 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
0586 va_end(ap);
0587
0588 return reg_val;
0589 }
0590
0591
0592 uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx,
0593 uint32_t index, uint32_t reg_val, int n,
0594 uint8_t shift1, uint32_t mask1, uint32_t field_value1,
0595 ...)
0596 {
0597 uint32_t shift, mask, field_value;
0598 int i = 1;
0599
0600 va_list ap;
0601
0602 va_start(ap, field_value1);
0603
0604 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
0605
0606 while (i < n) {
0607 shift = va_arg(ap, uint32_t);
0608 mask = va_arg(ap, uint32_t);
0609 field_value = va_arg(ap, uint32_t);
0610
0611 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
0612 i++;
0613 }
0614
0615 dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val);
0616 va_end(ap);
0617
0618 return reg_val;
0619 }
0620
0621 uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx,
0622 uint32_t index, int n,
0623 uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
0624 ...)
0625 {
0626 uint32_t shift, mask, *field_value;
0627 uint32_t value = 0;
0628 int i = 1;
0629
0630 va_list ap;
0631
0632 va_start(ap, field_value1);
0633
0634 value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index);
0635 *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
0636
0637 while (i < n) {
0638 shift = va_arg(ap, uint32_t);
0639 mask = va_arg(ap, uint32_t);
0640 field_value = va_arg(ap, uint32_t *);
0641
0642 *field_value = get_reg_field_value_ex(value, mask, shift);
0643 i++;
0644 }
0645
0646 va_end(ap);
0647
0648 return value;
0649 }
0650
0651 void reg_sequence_start_gather(const struct dc_context *ctx)
0652 {
0653
0654
0655
0656
0657
0658 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
0659 struct dc_reg_helper_state *offload =
0660 &ctx->dmub_srv->reg_helper_offload;
0661
0662
0663 ASSERT(!offload->gather_in_progress);
0664
0665 offload->gather_in_progress = true;
0666 }
0667 }
0668
0669 void reg_sequence_start_execute(const struct dc_context *ctx)
0670 {
0671 struct dc_reg_helper_state *offload;
0672
0673 if (!ctx->dmub_srv)
0674 return;
0675
0676 offload = &ctx->dmub_srv->reg_helper_offload;
0677
0678 if (offload && offload->gather_in_progress) {
0679 offload->gather_in_progress = false;
0680 offload->should_burst_write = false;
0681 switch (offload->cmd_data.cmd_common.header.type) {
0682 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
0683 submit_dmub_read_modify_write(offload, ctx);
0684 break;
0685 case DMUB_CMD__REG_REG_WAIT:
0686 submit_dmub_reg_wait(offload, ctx);
0687 break;
0688 case DMUB_CMD__REG_SEQ_BURST_WRITE:
0689 submit_dmub_burst_write(offload, ctx);
0690 break;
0691 default:
0692 return;
0693 }
0694
0695 dc_dmub_srv_cmd_execute(ctx->dmub_srv);
0696 }
0697 }
0698
0699 void reg_sequence_wait_done(const struct dc_context *ctx)
0700 {
0701
0702 struct dc_reg_helper_state *offload;
0703
0704 if (!ctx->dmub_srv)
0705 return;
0706
0707 offload = &ctx->dmub_srv->reg_helper_offload;
0708
0709 if (offload &&
0710 ctx->dc->debug.dmub_offload_enabled &&
0711 !ctx->dc->debug.dmcub_emulation) {
0712 dc_dmub_srv_wait_idle(ctx->dmub_srv);
0713 }
0714 }