0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/firmware.h>
0026
0027 #include "i915_drv.h"
0028 #include "i915_reg.h"
0029 #include "intel_de.h"
0030 #include "intel_dmc.h"
0031 #include "intel_dmc_regs.h"
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 #define DMC_VERSION(major, minor) ((major) << 16 | (minor))
0042 #define DMC_VERSION_MAJOR(version) ((version) >> 16)
0043 #define DMC_VERSION_MINOR(version) ((version) & 0xffff)
0044
0045 #define DMC_PATH(platform, major, minor) \
0046 "i915/" \
0047 __stringify(platform) "_dmc_ver" \
0048 __stringify(major) "_" \
0049 __stringify(minor) ".bin"
0050
0051 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
0052
0053 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
0054
0055 #define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
0056 #define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
0057 MODULE_FIRMWARE(DG2_DMC_PATH);
0058
0059 #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
0060 #define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
0061 MODULE_FIRMWARE(ADLP_DMC_PATH);
0062
0063 #define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
0064 #define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
0065 MODULE_FIRMWARE(ADLS_DMC_PATH);
0066
0067 #define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
0068 #define DG1_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
0069 MODULE_FIRMWARE(DG1_DMC_PATH);
0070
0071 #define RKL_DMC_PATH DMC_PATH(rkl, 2, 03)
0072 #define RKL_DMC_VERSION_REQUIRED DMC_VERSION(2, 3)
0073 MODULE_FIRMWARE(RKL_DMC_PATH);
0074
0075 #define TGL_DMC_PATH DMC_PATH(tgl, 2, 12)
0076 #define TGL_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
0077 MODULE_FIRMWARE(TGL_DMC_PATH);
0078
0079 #define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
0080 #define ICL_DMC_VERSION_REQUIRED DMC_VERSION(1, 9)
0081 #define ICL_DMC_MAX_FW_SIZE 0x6000
0082 MODULE_FIRMWARE(ICL_DMC_PATH);
0083
0084 #define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
0085 #define GLK_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
0086 #define GLK_DMC_MAX_FW_SIZE 0x4000
0087 MODULE_FIRMWARE(GLK_DMC_PATH);
0088
0089 #define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
0090 #define KBL_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
0091 #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
0092 MODULE_FIRMWARE(KBL_DMC_PATH);
0093
0094 #define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
0095 #define SKL_DMC_VERSION_REQUIRED DMC_VERSION(1, 27)
0096 #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
0097 MODULE_FIRMWARE(SKL_DMC_PATH);
0098
0099 #define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
0100 #define BXT_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
0101 #define BXT_DMC_MAX_FW_SIZE 0x3000
0102 MODULE_FIRMWARE(BXT_DMC_PATH);
0103
0104 #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
0105 #define PACKAGE_MAX_FW_INFO_ENTRIES 20
0106 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
0107 #define DMC_V1_MAX_MMIO_COUNT 8
0108 #define DMC_V3_MAX_MMIO_COUNT 20
0109 #define DMC_V1_MMIO_START_RANGE 0x80000
0110
0111 struct intel_css_header {
0112
0113 u32 module_type;
0114
0115
0116 u32 header_len;
0117
0118
0119 u32 header_ver;
0120
0121
0122 u32 module_id;
0123
0124
0125 u32 module_vendor;
0126
0127
0128 u32 date;
0129
0130
0131 u32 size;
0132
0133
0134 u32 key_size;
0135
0136
0137 u32 modulus_size;
0138
0139
0140 u32 exponent_size;
0141
0142
0143 u32 reserved1[12];
0144
0145
0146 u32 version;
0147
0148
0149 u32 reserved2[8];
0150
0151
0152 u32 kernel_header_info;
0153 } __packed;
0154
0155 struct intel_fw_info {
0156 u8 reserved1;
0157
0158
0159 u8 dmc_id;
0160
0161
0162 char stepping;
0163
0164
0165 char substepping;
0166
0167 u32 offset;
0168 u32 reserved2;
0169 } __packed;
0170
0171 struct intel_package_header {
0172
0173 u8 header_len;
0174
0175
0176 u8 header_ver;
0177
0178 u8 reserved[10];
0179
0180
0181 u32 num_entries;
0182 } __packed;
0183
0184 struct intel_dmc_header_base {
0185
0186 u32 signature;
0187
0188
0189 u8 header_len;
0190
0191
0192 u8 header_ver;
0193
0194
0195 u16 dmcc_ver;
0196
0197
0198 u32 project;
0199
0200
0201 u32 fw_size;
0202
0203
0204 u32 fw_version;
0205 } __packed;
0206
0207 struct intel_dmc_header_v1 {
0208 struct intel_dmc_header_base base;
0209
0210
0211 u32 mmio_count;
0212
0213
0214 u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
0215
0216
0217 u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
0218
0219
0220 char dfile[32];
0221
0222 u32 reserved1[2];
0223 } __packed;
0224
0225 struct intel_dmc_header_v3 {
0226 struct intel_dmc_header_base base;
0227
0228
0229 u32 start_mmioaddr;
0230
0231 u32 reserved[9];
0232
0233
0234 char dfile[32];
0235
0236
0237 u32 mmio_count;
0238
0239
0240 u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
0241
0242
0243 u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
0244 } __packed;
0245
0246 struct stepping_info {
0247 char stepping;
0248 char substepping;
0249 };
0250
0251 static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
0252 {
0253 return i915->dmc.dmc_info[dmc_id].payload;
0254 }
0255
0256 bool intel_dmc_has_payload(struct drm_i915_private *i915)
0257 {
0258 return has_dmc_id_fw(i915, DMC_FW_MAIN);
0259 }
0260
0261 static const struct stepping_info *
0262 intel_get_stepping_info(struct drm_i915_private *i915,
0263 struct stepping_info *si)
0264 {
0265 const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step);
0266
0267 si->stepping = step_name[0];
0268 si->substepping = step_name[1];
0269 return si;
0270 }
0271
0272 static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
0273 {
0274
0275 intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
0276 DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
0277 intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
0278 }
0279
0280 static void
0281 disable_flip_queue_event(struct drm_i915_private *i915,
0282 i915_reg_t ctl_reg, i915_reg_t htp_reg)
0283 {
0284 u32 event_ctl;
0285 u32 event_htp;
0286
0287 event_ctl = intel_de_read(i915, ctl_reg);
0288 event_htp = intel_de_read(i915, htp_reg);
0289 if (event_ctl != (DMC_EVT_CTL_ENABLE |
0290 DMC_EVT_CTL_RECURRING |
0291 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
0292 DMC_EVT_CTL_TYPE_EDGE_0_1) |
0293 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
0294 DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
0295 !event_htp) {
0296 drm_dbg_kms(&i915->drm,
0297 "Unexpected DMC event configuration (control %08x htp %08x)\n",
0298 event_ctl, event_htp);
0299 return;
0300 }
0301
0302 intel_de_write(i915, ctl_reg,
0303 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
0304 DMC_EVT_CTL_TYPE_EDGE_0_1) |
0305 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
0306 DMC_EVT_CTL_EVENT_ID_FALSE));
0307 intel_de_write(i915, htp_reg, 0);
0308 }
0309
0310 static bool
0311 get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id,
0312 i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
0313 {
0314 switch (dmc_id) {
0315 case DMC_FW_MAIN:
0316 if (DISPLAY_VER(i915) == 12) {
0317 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
0318 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
0319
0320 return true;
0321 }
0322 break;
0323 case DMC_FW_PIPEA ... DMC_FW_PIPED:
0324 if (IS_DG2(i915)) {
0325 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
0326 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
0327
0328 return true;
0329 }
0330 break;
0331 }
0332
0333 return false;
0334 }
0335
0336 static void
0337 disable_all_flip_queue_events(struct drm_i915_private *i915)
0338 {
0339 int dmc_id;
0340
0341
0342 if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
0343 return;
0344
0345 for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) {
0346 i915_reg_t ctl_reg;
0347 i915_reg_t htp_reg;
0348
0349 if (!has_dmc_id_fw(i915, dmc_id))
0350 continue;
0351
0352 if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
0353 continue;
0354
0355 disable_flip_queue_event(i915, ctl_reg, htp_reg);
0356 }
0357 }
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367 void intel_dmc_load_program(struct drm_i915_private *dev_priv)
0368 {
0369 struct intel_dmc *dmc = &dev_priv->dmc;
0370 u32 id, i;
0371
0372 if (!intel_dmc_has_payload(dev_priv))
0373 return;
0374
0375 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
0376
0377 preempt_disable();
0378
0379 for (id = 0; id < DMC_FW_MAX; id++) {
0380 for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
0381 intel_uncore_write_fw(&dev_priv->uncore,
0382 DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
0383 dmc->dmc_info[id].payload[i]);
0384 }
0385 }
0386
0387 preempt_enable();
0388
0389 for (id = 0; id < DMC_FW_MAX; id++) {
0390 for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) {
0391 intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i],
0392 dmc->dmc_info[id].mmiodata[i]);
0393 }
0394 }
0395
0396 dev_priv->dmc.dc_state = 0;
0397
0398 gen9_set_dc_state_debugmask(dev_priv);
0399
0400
0401
0402
0403
0404
0405 disable_all_flip_queue_events(dev_priv);
0406 }
0407
0408 void assert_dmc_loaded(struct drm_i915_private *i915)
0409 {
0410 drm_WARN_ONCE(&i915->drm,
0411 !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
0412 "DMC program storage start is NULL\n");
0413 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
0414 "DMC SSP Base Not fine\n");
0415 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
0416 "DMC HTP Not fine\n");
0417 }
0418
0419 static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
0420 const struct stepping_info *si)
0421 {
0422 if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
0423 (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
0424
0425
0426
0427
0428
0429 (si->stepping == '*' && si->substepping == fw_info->substepping) ||
0430 (fw_info->stepping == '*' && fw_info->substepping == '*'))
0431 return true;
0432
0433 return false;
0434 }
0435
0436
0437
0438
0439
0440 static void dmc_set_fw_offset(struct intel_dmc *dmc,
0441 const struct intel_fw_info *fw_info,
0442 unsigned int num_entries,
0443 const struct stepping_info *si,
0444 u8 package_ver)
0445 {
0446 unsigned int i, id;
0447
0448 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
0449
0450 for (i = 0; i < num_entries; i++) {
0451 id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
0452
0453 if (id >= DMC_FW_MAX) {
0454 drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id);
0455 continue;
0456 }
0457
0458
0459
0460
0461
0462 if (dmc->dmc_info[id].present)
0463 continue;
0464
0465 if (fw_info_matches_stepping(&fw_info[i], si)) {
0466 dmc->dmc_info[id].present = true;
0467 dmc->dmc_info[id].dmc_offset = fw_info[i].offset;
0468 }
0469 }
0470 }
0471
0472 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
0473 const u32 *mmioaddr, u32 mmio_count,
0474 int header_ver, u8 dmc_id)
0475 {
0476 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
0477 u32 start_range, end_range;
0478 int i;
0479
0480 if (dmc_id >= DMC_FW_MAX) {
0481 drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
0482 return false;
0483 }
0484
0485 if (header_ver == 1) {
0486 start_range = DMC_MMIO_START_RANGE;
0487 end_range = DMC_MMIO_END_RANGE;
0488 } else if (dmc_id == DMC_FW_MAIN) {
0489 start_range = TGL_MAIN_MMIO_START;
0490 end_range = TGL_MAIN_MMIO_END;
0491 } else if (DISPLAY_VER(i915) >= 13) {
0492 start_range = ADLP_PIPE_MMIO_START;
0493 end_range = ADLP_PIPE_MMIO_END;
0494 } else if (DISPLAY_VER(i915) >= 12) {
0495 start_range = TGL_PIPE_MMIO_START(dmc_id);
0496 end_range = TGL_PIPE_MMIO_END(dmc_id);
0497 } else {
0498 drm_warn(&i915->drm, "Unknown mmio range for sanity check");
0499 return false;
0500 }
0501
0502 for (i = 0; i < mmio_count; i++) {
0503 if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
0504 return false;
0505 }
0506
0507 return true;
0508 }
0509
0510 static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
0511 const struct intel_dmc_header_base *dmc_header,
0512 size_t rem_size, u8 dmc_id)
0513 {
0514 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
0515 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
0516 unsigned int header_len_bytes, dmc_header_size, payload_size, i;
0517 const u32 *mmioaddr, *mmiodata;
0518 u32 mmio_count, mmio_count_max, start_mmioaddr;
0519 u8 *payload;
0520
0521 BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
0522 ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
0523
0524
0525
0526
0527
0528 if (rem_size < sizeof(struct intel_dmc_header_base))
0529 goto error_truncated;
0530
0531
0532 if (dmc_header->header_ver == 3) {
0533 const struct intel_dmc_header_v3 *v3 =
0534 (const struct intel_dmc_header_v3 *)dmc_header;
0535
0536 if (rem_size < sizeof(struct intel_dmc_header_v3))
0537 goto error_truncated;
0538
0539 mmioaddr = v3->mmioaddr;
0540 mmiodata = v3->mmiodata;
0541 mmio_count = v3->mmio_count;
0542 mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
0543
0544 header_len_bytes = dmc_header->header_len * 4;
0545 start_mmioaddr = v3->start_mmioaddr;
0546 dmc_header_size = sizeof(*v3);
0547 } else if (dmc_header->header_ver == 1) {
0548 const struct intel_dmc_header_v1 *v1 =
0549 (const struct intel_dmc_header_v1 *)dmc_header;
0550
0551 if (rem_size < sizeof(struct intel_dmc_header_v1))
0552 goto error_truncated;
0553
0554 mmioaddr = v1->mmioaddr;
0555 mmiodata = v1->mmiodata;
0556 mmio_count = v1->mmio_count;
0557 mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
0558 header_len_bytes = dmc_header->header_len;
0559 start_mmioaddr = DMC_V1_MMIO_START_RANGE;
0560 dmc_header_size = sizeof(*v1);
0561 } else {
0562 drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
0563 dmc_header->header_ver);
0564 return 0;
0565 }
0566
0567 if (header_len_bytes != dmc_header_size) {
0568 drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
0569 "(%u bytes)\n", header_len_bytes);
0570 return 0;
0571 }
0572
0573
0574 if (mmio_count > mmio_count_max) {
0575 drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
0576 return 0;
0577 }
0578
0579 if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
0580 dmc_header->header_ver, dmc_id)) {
0581 drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
0582 return 0;
0583 }
0584
0585 for (i = 0; i < mmio_count; i++) {
0586 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
0587 dmc_info->mmiodata[i] = mmiodata[i];
0588 }
0589 dmc_info->mmio_count = mmio_count;
0590 dmc_info->start_mmioaddr = start_mmioaddr;
0591
0592 rem_size -= header_len_bytes;
0593
0594
0595 payload_size = dmc_header->fw_size * 4;
0596 if (rem_size < payload_size)
0597 goto error_truncated;
0598
0599 if (payload_size > dmc->max_fw_size) {
0600 drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
0601 return 0;
0602 }
0603 dmc_info->dmc_fw_size = dmc_header->fw_size;
0604
0605 dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
0606 if (!dmc_info->payload)
0607 return 0;
0608
0609 payload = (u8 *)(dmc_header) + header_len_bytes;
0610 memcpy(dmc_info->payload, payload, payload_size);
0611
0612 return header_len_bytes + payload_size;
0613
0614 error_truncated:
0615 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
0616 return 0;
0617 }
0618
0619 static u32
0620 parse_dmc_fw_package(struct intel_dmc *dmc,
0621 const struct intel_package_header *package_header,
0622 const struct stepping_info *si,
0623 size_t rem_size)
0624 {
0625 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
0626 u32 package_size = sizeof(struct intel_package_header);
0627 u32 num_entries, max_entries;
0628 const struct intel_fw_info *fw_info;
0629
0630 if (rem_size < package_size)
0631 goto error_truncated;
0632
0633 if (package_header->header_ver == 1) {
0634 max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
0635 } else if (package_header->header_ver == 2) {
0636 max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
0637 } else {
0638 drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
0639 package_header->header_ver);
0640 return 0;
0641 }
0642
0643
0644
0645
0646
0647 package_size += max_entries * sizeof(struct intel_fw_info);
0648 if (rem_size < package_size)
0649 goto error_truncated;
0650
0651 if (package_header->header_len * 4 != package_size) {
0652 drm_err(&i915->drm, "DMC firmware has wrong package header length "
0653 "(%u bytes)\n", package_size);
0654 return 0;
0655 }
0656
0657 num_entries = package_header->num_entries;
0658 if (WARN_ON(package_header->num_entries > max_entries))
0659 num_entries = max_entries;
0660
0661 fw_info = (const struct intel_fw_info *)
0662 ((u8 *)package_header + sizeof(*package_header));
0663 dmc_set_fw_offset(dmc, fw_info, num_entries, si,
0664 package_header->header_ver);
0665
0666
0667 return package_size;
0668
0669 error_truncated:
0670 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
0671 return 0;
0672 }
0673
0674
0675 static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
0676 struct intel_css_header *css_header,
0677 size_t rem_size)
0678 {
0679 struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
0680
0681 if (rem_size < sizeof(struct intel_css_header)) {
0682 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
0683 return 0;
0684 }
0685
0686 if (sizeof(struct intel_css_header) !=
0687 (css_header->header_len * 4)) {
0688 drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
0689 "(%u bytes)\n",
0690 (css_header->header_len * 4));
0691 return 0;
0692 }
0693
0694 if (dmc->required_version &&
0695 css_header->version != dmc->required_version) {
0696 drm_info(&i915->drm, "Refusing to load DMC firmware v%u.%u,"
0697 " please use v%u.%u\n",
0698 DMC_VERSION_MAJOR(css_header->version),
0699 DMC_VERSION_MINOR(css_header->version),
0700 DMC_VERSION_MAJOR(dmc->required_version),
0701 DMC_VERSION_MINOR(dmc->required_version));
0702 return 0;
0703 }
0704
0705 dmc->version = css_header->version;
0706
0707 return sizeof(struct intel_css_header);
0708 }
0709
0710 static void parse_dmc_fw(struct drm_i915_private *dev_priv,
0711 const struct firmware *fw)
0712 {
0713 struct intel_css_header *css_header;
0714 struct intel_package_header *package_header;
0715 struct intel_dmc_header_base *dmc_header;
0716 struct intel_dmc *dmc = &dev_priv->dmc;
0717 struct stepping_info display_info = { '*', '*'};
0718 const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
0719 u32 readcount = 0;
0720 u32 r, offset;
0721 int id;
0722
0723 if (!fw)
0724 return;
0725
0726
0727 css_header = (struct intel_css_header *)fw->data;
0728 r = parse_dmc_fw_css(dmc, css_header, fw->size);
0729 if (!r)
0730 return;
0731
0732 readcount += r;
0733
0734
0735 package_header = (struct intel_package_header *)&fw->data[readcount];
0736 r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
0737 if (!r)
0738 return;
0739
0740 readcount += r;
0741
0742 for (id = 0; id < DMC_FW_MAX; id++) {
0743 if (!dev_priv->dmc.dmc_info[id].present)
0744 continue;
0745
0746 offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
0747 if (offset > fw->size) {
0748 drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
0749 continue;
0750 }
0751
0752 dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
0753 parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id);
0754 }
0755 }
0756
0757 static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
0758 {
0759 drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
0760 dev_priv->dmc.wakeref =
0761 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
0762 }
0763
0764 static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
0765 {
0766 intel_wakeref_t wakeref __maybe_unused =
0767 fetch_and_zero(&dev_priv->dmc.wakeref);
0768
0769 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
0770 }
0771
0772 static void dmc_load_work_fn(struct work_struct *work)
0773 {
0774 struct drm_i915_private *dev_priv;
0775 struct intel_dmc *dmc;
0776 const struct firmware *fw = NULL;
0777
0778 dev_priv = container_of(work, typeof(*dev_priv), dmc.work);
0779 dmc = &dev_priv->dmc;
0780
0781 request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev);
0782 parse_dmc_fw(dev_priv, fw);
0783
0784 if (intel_dmc_has_payload(dev_priv)) {
0785 intel_dmc_load_program(dev_priv);
0786 intel_dmc_runtime_pm_put(dev_priv);
0787
0788 drm_info(&dev_priv->drm,
0789 "Finished loading DMC firmware %s (v%u.%u)\n",
0790 dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
0791 DMC_VERSION_MINOR(dmc->version));
0792 } else {
0793 drm_notice(&dev_priv->drm,
0794 "Failed to load DMC firmware %s."
0795 " Disabling runtime power management.\n",
0796 dmc->fw_path);
0797 drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
0798 INTEL_UC_FIRMWARE_URL);
0799 }
0800
0801 release_firmware(fw);
0802 }
0803
0804
0805
0806
0807
0808
0809
0810
0811 void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
0812 {
0813 struct intel_dmc *dmc = &dev_priv->dmc;
0814
0815 INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn);
0816
0817 if (!HAS_DMC(dev_priv))
0818 return;
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 intel_dmc_runtime_pm_get(dev_priv);
0829
0830 if (IS_DG2(dev_priv)) {
0831 dmc->fw_path = DG2_DMC_PATH;
0832 dmc->required_version = DG2_DMC_VERSION_REQUIRED;
0833 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
0834 } else if (IS_ALDERLAKE_P(dev_priv)) {
0835 dmc->fw_path = ADLP_DMC_PATH;
0836 dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
0837 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
0838 } else if (IS_ALDERLAKE_S(dev_priv)) {
0839 dmc->fw_path = ADLS_DMC_PATH;
0840 dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
0841 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
0842 } else if (IS_DG1(dev_priv)) {
0843 dmc->fw_path = DG1_DMC_PATH;
0844 dmc->required_version = DG1_DMC_VERSION_REQUIRED;
0845 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
0846 } else if (IS_ROCKETLAKE(dev_priv)) {
0847 dmc->fw_path = RKL_DMC_PATH;
0848 dmc->required_version = RKL_DMC_VERSION_REQUIRED;
0849 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
0850 } else if (IS_TIGERLAKE(dev_priv)) {
0851 dmc->fw_path = TGL_DMC_PATH;
0852 dmc->required_version = TGL_DMC_VERSION_REQUIRED;
0853 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
0854 } else if (DISPLAY_VER(dev_priv) == 11) {
0855 dmc->fw_path = ICL_DMC_PATH;
0856 dmc->required_version = ICL_DMC_VERSION_REQUIRED;
0857 dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
0858 } else if (IS_GEMINILAKE(dev_priv)) {
0859 dmc->fw_path = GLK_DMC_PATH;
0860 dmc->required_version = GLK_DMC_VERSION_REQUIRED;
0861 dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
0862 } else if (IS_KABYLAKE(dev_priv) ||
0863 IS_COFFEELAKE(dev_priv) ||
0864 IS_COMETLAKE(dev_priv)) {
0865 dmc->fw_path = KBL_DMC_PATH;
0866 dmc->required_version = KBL_DMC_VERSION_REQUIRED;
0867 dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
0868 } else if (IS_SKYLAKE(dev_priv)) {
0869 dmc->fw_path = SKL_DMC_PATH;
0870 dmc->required_version = SKL_DMC_VERSION_REQUIRED;
0871 dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
0872 } else if (IS_BROXTON(dev_priv)) {
0873 dmc->fw_path = BXT_DMC_PATH;
0874 dmc->required_version = BXT_DMC_VERSION_REQUIRED;
0875 dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
0876 }
0877
0878 if (dev_priv->params.dmc_firmware_path) {
0879 if (strlen(dev_priv->params.dmc_firmware_path) == 0) {
0880 dmc->fw_path = NULL;
0881 drm_info(&dev_priv->drm,
0882 "Disabling DMC firmware and runtime PM\n");
0883 return;
0884 }
0885
0886 dmc->fw_path = dev_priv->params.dmc_firmware_path;
0887
0888 dmc->required_version = 0;
0889 }
0890
0891 if (!dmc->fw_path) {
0892 drm_dbg_kms(&dev_priv->drm,
0893 "No known DMC firmware for platform, disabling runtime PM\n");
0894 return;
0895 }
0896
0897 drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
0898 schedule_work(&dev_priv->dmc.work);
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
0910 {
0911 if (!HAS_DMC(dev_priv))
0912 return;
0913
0914 flush_work(&dev_priv->dmc.work);
0915
0916
0917 if (!intel_dmc_has_payload(dev_priv))
0918 intel_dmc_runtime_pm_put(dev_priv);
0919 }
0920
0921
0922
0923
0924
0925
0926
0927
0928 void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
0929 {
0930 if (!HAS_DMC(dev_priv))
0931 return;
0932
0933
0934
0935
0936
0937 if (!intel_dmc_has_payload(dev_priv))
0938 intel_dmc_runtime_pm_get(dev_priv);
0939 }
0940
0941
0942
0943
0944
0945
0946
0947
0948 void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
0949 {
0950 int id;
0951
0952 if (!HAS_DMC(dev_priv))
0953 return;
0954
0955 intel_dmc_ucode_suspend(dev_priv);
0956 drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
0957
0958 for (id = 0; id < DMC_FW_MAX; id++)
0959 kfree(dev_priv->dmc.dmc_info[id].payload);
0960 }
0961
0962 void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
0963 struct drm_i915_private *i915)
0964 {
0965 struct intel_dmc *dmc = &i915->dmc;
0966
0967 if (!HAS_DMC(i915))
0968 return;
0969
0970 i915_error_printf(m, "DMC loaded: %s\n",
0971 str_yes_no(intel_dmc_has_payload(i915)));
0972 i915_error_printf(m, "DMC fw version: %d.%d\n",
0973 DMC_VERSION_MAJOR(dmc->version),
0974 DMC_VERSION_MINOR(dmc->version));
0975 }
0976
0977 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
0978 {
0979 struct drm_i915_private *i915 = m->private;
0980 intel_wakeref_t wakeref;
0981 struct intel_dmc *dmc;
0982 i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
0983
0984 if (!HAS_DMC(i915))
0985 return -ENODEV;
0986
0987 dmc = &i915->dmc;
0988
0989 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
0990
0991 seq_printf(m, "fw loaded: %s\n",
0992 str_yes_no(intel_dmc_has_payload(i915)));
0993 seq_printf(m, "path: %s\n", dmc->fw_path);
0994 seq_printf(m, "Pipe A fw support: %s\n",
0995 str_yes_no(GRAPHICS_VER(i915) >= 12));
0996 seq_printf(m, "Pipe A fw loaded: %s\n",
0997 str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload));
0998 seq_printf(m, "Pipe B fw support: %s\n",
0999 str_yes_no(IS_ALDERLAKE_P(i915)));
1000 seq_printf(m, "Pipe B fw loaded: %s\n",
1001 str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload));
1002
1003 if (!intel_dmc_has_payload(i915))
1004 goto out;
1005
1006 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
1007 DMC_VERSION_MINOR(dmc->version));
1008
1009 if (DISPLAY_VER(i915) >= 12) {
1010 if (IS_DGFX(i915)) {
1011 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
1012 } else {
1013 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
1014 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
1015 }
1016
1017
1018
1019
1020
1021
1022
1023 seq_printf(m, "DC3CO count: %d\n",
1024 intel_de_read(i915, IS_DGFX(i915) ?
1025 DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
1026 } else {
1027 dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
1028 SKL_DMC_DC3_DC5_COUNT;
1029 if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
1030 dc6_reg = SKL_DMC_DC5_DC6_COUNT;
1031 }
1032
1033 seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
1034 if (i915_mmio_reg_valid(dc6_reg))
1035 seq_printf(m, "DC5 -> DC6 count: %d\n",
1036 intel_de_read(i915, dc6_reg));
1037
1038 out:
1039 seq_printf(m, "program base: 0x%08x\n",
1040 intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
1041 seq_printf(m, "ssp base: 0x%08x\n",
1042 intel_de_read(i915, DMC_SSP_BASE));
1043 seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
1044
1045 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1046
1047 return 0;
1048 }
1049
1050 DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
1051
1052 void intel_dmc_debugfs_register(struct drm_i915_private *i915)
1053 {
1054 struct drm_minor *minor = i915->drm.primary;
1055
1056 debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
1057 i915, &intel_dmc_debugfs_status_fops);
1058 }