0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/pm_runtime.h>
0025
0026 #include "gt/intel_engine_regs.h"
0027 #include "gt/intel_gt_regs.h"
0028
0029 #include "i915_drv.h"
0030 #include "i915_iosf_mbi.h"
0031 #include "i915_trace.h"
0032 #include "i915_vgpu.h"
0033 #include "intel_pm.h"
0034
0035 #define FORCEWAKE_ACK_TIMEOUT_MS 50
0036 #define GT_FIFO_TIMEOUT_MS 10
0037
0038 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
0039
0040 static void
0041 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
0042 {
0043 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
0044 }
0045
0046 void
0047 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
0048 {
0049 spin_lock_init(&mmio_debug->lock);
0050 mmio_debug->unclaimed_mmio_check = 1;
0051 }
0052
0053 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
0054 {
0055 lockdep_assert_held(&mmio_debug->lock);
0056
0057
0058 if (!mmio_debug->suspend_count++) {
0059 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
0060 mmio_debug->unclaimed_mmio_check = 0;
0061 }
0062 }
0063
0064 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
0065 {
0066 lockdep_assert_held(&mmio_debug->lock);
0067
0068 if (!--mmio_debug->suspend_count)
0069 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
0070 }
0071
0072 static const char * const forcewake_domain_names[] = {
0073 "render",
0074 "gt",
0075 "media",
0076 "vdbox0",
0077 "vdbox1",
0078 "vdbox2",
0079 "vdbox3",
0080 "vdbox4",
0081 "vdbox5",
0082 "vdbox6",
0083 "vdbox7",
0084 "vebox0",
0085 "vebox1",
0086 "vebox2",
0087 "vebox3",
0088 };
0089
0090 const char *
0091 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
0092 {
0093 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
0094
0095 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
0096 return forcewake_domain_names[id];
0097
0098 WARN_ON(id);
0099
0100 return "unknown";
0101 }
0102
0103 #define fw_ack(d) readl((d)->reg_ack)
0104 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
0105 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
0106
0107 static inline void
0108 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
0109 {
0110
0111
0112
0113
0114
0115
0116 fw_clear(d, 0xffff);
0117 }
0118
0119 static inline void
0120 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
0121 {
0122 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
0123 d->uncore->fw_domains_timer |= d->mask;
0124 d->wake_count++;
0125 hrtimer_start_range_ns(&d->timer,
0126 NSEC_PER_MSEC,
0127 NSEC_PER_MSEC,
0128 HRTIMER_MODE_REL);
0129 }
0130
0131 static inline int
0132 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
0133 const u32 ack,
0134 const u32 value)
0135 {
0136 return wait_for_atomic((fw_ack(d) & ack) == value,
0137 FORCEWAKE_ACK_TIMEOUT_MS);
0138 }
0139
0140 static inline int
0141 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
0142 const u32 ack)
0143 {
0144 return __wait_for_ack(d, ack, 0);
0145 }
0146
0147 static inline int
0148 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
0149 const u32 ack)
0150 {
0151 return __wait_for_ack(d, ack, ack);
0152 }
0153
0154 static inline void
0155 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
0156 {
0157 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
0158 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
0159 intel_uncore_forcewake_domain_to_str(d->id));
0160 add_taint_for_CI(d->uncore->i915, TAINT_WARN);
0161 }
0162 }
0163
0164 enum ack_type {
0165 ACK_CLEAR = 0,
0166 ACK_SET
0167 };
0168
0169 static int
0170 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
0171 const enum ack_type type)
0172 {
0173 const u32 ack_bit = FORCEWAKE_KERNEL;
0174 const u32 value = type == ACK_SET ? ack_bit : 0;
0175 unsigned int pass;
0176 bool ack_detected;
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 pass = 1;
0193 do {
0194 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
0195
0196 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
0197
0198 udelay(10 * pass);
0199 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
0200
0201 ack_detected = (fw_ack(d) & ack_bit) == value;
0202
0203 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
0204 } while (!ack_detected && pass++ < 10);
0205
0206 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
0207 intel_uncore_forcewake_domain_to_str(d->id),
0208 type == ACK_SET ? "set" : "clear",
0209 fw_ack(d),
0210 pass);
0211
0212 return ack_detected ? 0 : -ETIMEDOUT;
0213 }
0214
0215 static inline void
0216 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
0217 {
0218 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
0219 return;
0220
0221 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
0222 fw_domain_wait_ack_clear(d);
0223 }
0224
0225 static inline void
0226 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
0227 {
0228 fw_set(d, FORCEWAKE_KERNEL);
0229 }
0230
0231 static inline void
0232 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
0233 {
0234 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
0235 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
0236 intel_uncore_forcewake_domain_to_str(d->id));
0237 add_taint_for_CI(d->uncore->i915, TAINT_WARN);
0238 }
0239 }
0240
0241 static inline void
0242 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
0243 {
0244 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
0245 return;
0246
0247 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
0248 fw_domain_wait_ack_set(d);
0249 }
0250
0251 static inline void
0252 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
0253 {
0254 fw_clear(d, FORCEWAKE_KERNEL);
0255 }
0256
0257 static void
0258 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
0259 {
0260 struct intel_uncore_forcewake_domain *d;
0261 unsigned int tmp;
0262
0263 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
0264
0265 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
0266 fw_domain_wait_ack_clear(d);
0267 fw_domain_get(d);
0268 }
0269
0270 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
0271 fw_domain_wait_ack_set(d);
0272
0273 uncore->fw_domains_active |= fw_domains;
0274 }
0275
0276 static void
0277 fw_domains_get_with_fallback(struct intel_uncore *uncore,
0278 enum forcewake_domains fw_domains)
0279 {
0280 struct intel_uncore_forcewake_domain *d;
0281 unsigned int tmp;
0282
0283 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
0284
0285 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
0286 fw_domain_wait_ack_clear_fallback(d);
0287 fw_domain_get(d);
0288 }
0289
0290 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
0291 fw_domain_wait_ack_set_fallback(d);
0292
0293 uncore->fw_domains_active |= fw_domains;
0294 }
0295
0296 static void
0297 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
0298 {
0299 struct intel_uncore_forcewake_domain *d;
0300 unsigned int tmp;
0301
0302 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
0303
0304 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
0305 fw_domain_put(d);
0306
0307 uncore->fw_domains_active &= ~fw_domains;
0308 }
0309
0310 static void
0311 fw_domains_reset(struct intel_uncore *uncore,
0312 enum forcewake_domains fw_domains)
0313 {
0314 struct intel_uncore_forcewake_domain *d;
0315 unsigned int tmp;
0316
0317 if (!fw_domains)
0318 return;
0319
0320 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
0321
0322 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
0323 fw_domain_reset(d);
0324 }
0325
0326 static inline u32 gt_thread_status(struct intel_uncore *uncore)
0327 {
0328 u32 val;
0329
0330 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
0331 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
0332
0333 return val;
0334 }
0335
0336 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
0337 {
0338
0339
0340
0341
0342 drm_WARN_ONCE(&uncore->i915->drm,
0343 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
0344 "GT thread status wait timed out\n");
0345 }
0346
0347 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
0348 enum forcewake_domains fw_domains)
0349 {
0350 fw_domains_get_normal(uncore, fw_domains);
0351
0352
0353 __gen6_gt_wait_for_thread_c0(uncore);
0354 }
0355
0356 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
0357 {
0358 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
0359
0360 return count & GT_FIFO_FREE_ENTRIES_MASK;
0361 }
0362
0363 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
0364 {
0365 u32 n;
0366
0367
0368
0369 if (IS_VALLEYVIEW(uncore->i915))
0370 n = fifo_free_entries(uncore);
0371 else
0372 n = uncore->fifo_count;
0373
0374 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
0375 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
0376 GT_FIFO_NUM_RESERVED_ENTRIES,
0377 GT_FIFO_TIMEOUT_MS)) {
0378 drm_dbg(&uncore->i915->drm,
0379 "GT_FIFO timeout, entries: %u\n", n);
0380 return;
0381 }
0382 }
0383
0384 uncore->fifo_count = n - 1;
0385 }
0386
0387 static enum hrtimer_restart
0388 intel_uncore_fw_release_timer(struct hrtimer *timer)
0389 {
0390 struct intel_uncore_forcewake_domain *domain =
0391 container_of(timer, struct intel_uncore_forcewake_domain, timer);
0392 struct intel_uncore *uncore = domain->uncore;
0393 unsigned long irqflags;
0394
0395 assert_rpm_device_not_suspended(uncore->rpm);
0396
0397 if (xchg(&domain->active, false))
0398 return HRTIMER_RESTART;
0399
0400 spin_lock_irqsave(&uncore->lock, irqflags);
0401
0402 uncore->fw_domains_timer &= ~domain->mask;
0403
0404 GEM_BUG_ON(!domain->wake_count);
0405 if (--domain->wake_count == 0)
0406 fw_domains_put(uncore, domain->mask);
0407
0408 spin_unlock_irqrestore(&uncore->lock, irqflags);
0409
0410 return HRTIMER_NORESTART;
0411 }
0412
0413
0414 static unsigned int
0415 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
0416 {
0417 unsigned long irqflags;
0418 struct intel_uncore_forcewake_domain *domain;
0419 int retry_count = 100;
0420 enum forcewake_domains fw, active_domains;
0421
0422 iosf_mbi_assert_punit_acquired();
0423
0424
0425
0426
0427
0428 while (1) {
0429 unsigned int tmp;
0430
0431 active_domains = 0;
0432
0433 for_each_fw_domain(domain, uncore, tmp) {
0434 smp_store_mb(domain->active, false);
0435 if (hrtimer_cancel(&domain->timer) == 0)
0436 continue;
0437
0438 intel_uncore_fw_release_timer(&domain->timer);
0439 }
0440
0441 spin_lock_irqsave(&uncore->lock, irqflags);
0442
0443 for_each_fw_domain(domain, uncore, tmp) {
0444 if (hrtimer_active(&domain->timer))
0445 active_domains |= domain->mask;
0446 }
0447
0448 if (active_domains == 0)
0449 break;
0450
0451 if (--retry_count == 0) {
0452 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
0453 break;
0454 }
0455
0456 spin_unlock_irqrestore(&uncore->lock, irqflags);
0457 cond_resched();
0458 }
0459
0460 drm_WARN_ON(&uncore->i915->drm, active_domains);
0461
0462 fw = uncore->fw_domains_active;
0463 if (fw)
0464 fw_domains_put(uncore, fw);
0465
0466 fw_domains_reset(uncore, uncore->fw_domains);
0467 assert_forcewakes_inactive(uncore);
0468
0469 spin_unlock_irqrestore(&uncore->lock, irqflags);
0470
0471 return fw;
0472 }
0473
0474 static bool
0475 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
0476 {
0477 u32 dbg;
0478
0479 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
0480 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
0481 return false;
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495 if (unlikely(dbg == ~0))
0496 drm_err(&uncore->i915->drm,
0497 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
0498
0499 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
0500
0501 return true;
0502 }
0503
0504 static bool
0505 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
0506 {
0507 u32 cer;
0508
0509 cer = __raw_uncore_read32(uncore, CLAIM_ER);
0510 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
0511 return false;
0512
0513 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
0514
0515 return true;
0516 }
0517
0518 static bool
0519 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
0520 {
0521 u32 fifodbg;
0522
0523 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
0524
0525 if (unlikely(fifodbg)) {
0526 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
0527 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
0528 }
0529
0530 return fifodbg;
0531 }
0532
0533 static bool
0534 check_for_unclaimed_mmio(struct intel_uncore *uncore)
0535 {
0536 bool ret = false;
0537
0538 lockdep_assert_held(&uncore->debug->lock);
0539
0540 if (uncore->debug->suspend_count)
0541 return false;
0542
0543 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
0544 ret |= fpga_check_for_unclaimed_mmio(uncore);
0545
0546 if (intel_uncore_has_dbg_unclaimed(uncore))
0547 ret |= vlv_check_for_unclaimed_mmio(uncore);
0548
0549 if (intel_uncore_has_fifo(uncore))
0550 ret |= gen6_check_for_fifo_debug(uncore);
0551
0552 return ret;
0553 }
0554
0555 static void forcewake_early_sanitize(struct intel_uncore *uncore,
0556 unsigned int restore_forcewake)
0557 {
0558 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
0559
0560
0561 if (IS_CHERRYVIEW(uncore->i915)) {
0562 __raw_uncore_write32(uncore, GTFIFOCTL,
0563 __raw_uncore_read32(uncore, GTFIFOCTL) |
0564 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
0565 GT_FIFO_CTL_RC6_POLICY_STALL);
0566 }
0567
0568 iosf_mbi_punit_acquire();
0569 intel_uncore_forcewake_reset(uncore);
0570 if (restore_forcewake) {
0571 spin_lock_irq(&uncore->lock);
0572 fw_domains_get(uncore, restore_forcewake);
0573
0574 if (intel_uncore_has_fifo(uncore))
0575 uncore->fifo_count = fifo_free_entries(uncore);
0576 spin_unlock_irq(&uncore->lock);
0577 }
0578 iosf_mbi_punit_release();
0579 }
0580
0581 void intel_uncore_suspend(struct intel_uncore *uncore)
0582 {
0583 if (!intel_uncore_has_forcewake(uncore))
0584 return;
0585
0586 iosf_mbi_punit_acquire();
0587 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
0588 &uncore->pmic_bus_access_nb);
0589 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
0590 iosf_mbi_punit_release();
0591 }
0592
0593 void intel_uncore_resume_early(struct intel_uncore *uncore)
0594 {
0595 unsigned int restore_forcewake;
0596
0597 if (intel_uncore_unclaimed_mmio(uncore))
0598 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
0599
0600 if (!intel_uncore_has_forcewake(uncore))
0601 return;
0602
0603 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
0604 forcewake_early_sanitize(uncore, restore_forcewake);
0605
0606 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
0607 }
0608
0609 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
0610 {
0611 if (!intel_uncore_has_forcewake(uncore))
0612 return;
0613
0614 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
0615 }
0616
0617 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
0618 enum forcewake_domains fw_domains)
0619 {
0620 struct intel_uncore_forcewake_domain *domain;
0621 unsigned int tmp;
0622
0623 fw_domains &= uncore->fw_domains;
0624
0625 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
0626 if (domain->wake_count++) {
0627 fw_domains &= ~domain->mask;
0628 domain->active = true;
0629 }
0630 }
0631
0632 if (fw_domains)
0633 fw_domains_get(uncore, fw_domains);
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
0650 enum forcewake_domains fw_domains)
0651 {
0652 unsigned long irqflags;
0653
0654 if (!uncore->fw_get_funcs)
0655 return;
0656
0657 assert_rpm_wakelock_held(uncore->rpm);
0658
0659 spin_lock_irqsave(&uncore->lock, irqflags);
0660 __intel_uncore_forcewake_get(uncore, fw_domains);
0661 spin_unlock_irqrestore(&uncore->lock, irqflags);
0662 }
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
0673 {
0674 spin_lock_irq(&uncore->lock);
0675 if (!uncore->user_forcewake_count++) {
0676 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
0677 spin_lock(&uncore->debug->lock);
0678 mmio_debug_suspend(uncore->debug);
0679 spin_unlock(&uncore->debug->lock);
0680 }
0681 spin_unlock_irq(&uncore->lock);
0682 }
0683
0684
0685
0686
0687
0688
0689
0690
0691 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
0692 {
0693 spin_lock_irq(&uncore->lock);
0694 if (!--uncore->user_forcewake_count) {
0695 spin_lock(&uncore->debug->lock);
0696 mmio_debug_resume(uncore->debug);
0697
0698 if (check_for_unclaimed_mmio(uncore))
0699 drm_info(&uncore->i915->drm,
0700 "Invalid mmio detected during user access\n");
0701 spin_unlock(&uncore->debug->lock);
0702
0703 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
0704 }
0705 spin_unlock_irq(&uncore->lock);
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
0717 enum forcewake_domains fw_domains)
0718 {
0719 lockdep_assert_held(&uncore->lock);
0720
0721 if (!uncore->fw_get_funcs)
0722 return;
0723
0724 __intel_uncore_forcewake_get(uncore, fw_domains);
0725 }
0726
0727 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
0728 enum forcewake_domains fw_domains,
0729 bool delayed)
0730 {
0731 struct intel_uncore_forcewake_domain *domain;
0732 unsigned int tmp;
0733
0734 fw_domains &= uncore->fw_domains;
0735
0736 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
0737 GEM_BUG_ON(!domain->wake_count);
0738
0739 if (--domain->wake_count) {
0740 domain->active = true;
0741 continue;
0742 }
0743
0744 if (delayed &&
0745 !(domain->uncore->fw_domains_timer & domain->mask))
0746 fw_domain_arm_timer(domain);
0747 else
0748 fw_domains_put(uncore, domain->mask);
0749 }
0750 }
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
0761 enum forcewake_domains fw_domains)
0762 {
0763 unsigned long irqflags;
0764
0765 if (!uncore->fw_get_funcs)
0766 return;
0767
0768 spin_lock_irqsave(&uncore->lock, irqflags);
0769 __intel_uncore_forcewake_put(uncore, fw_domains, false);
0770 spin_unlock_irqrestore(&uncore->lock, irqflags);
0771 }
0772
0773 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
0774 enum forcewake_domains fw_domains)
0775 {
0776 unsigned long irqflags;
0777
0778 if (!uncore->fw_get_funcs)
0779 return;
0780
0781 spin_lock_irqsave(&uncore->lock, irqflags);
0782 __intel_uncore_forcewake_put(uncore, fw_domains, true);
0783 spin_unlock_irqrestore(&uncore->lock, irqflags);
0784 }
0785
0786
0787
0788
0789
0790
0791 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
0792 enum forcewake_domains fw_domains)
0793 {
0794 struct intel_uncore_forcewake_domain *domain;
0795 unsigned int tmp;
0796
0797 if (!uncore->fw_get_funcs)
0798 return;
0799
0800 fw_domains &= uncore->fw_domains;
0801 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
0802 WRITE_ONCE(domain->active, false);
0803 if (hrtimer_cancel(&domain->timer))
0804 intel_uncore_fw_release_timer(&domain->timer);
0805 }
0806 }
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
0817 enum forcewake_domains fw_domains)
0818 {
0819 lockdep_assert_held(&uncore->lock);
0820
0821 if (!uncore->fw_get_funcs)
0822 return;
0823
0824 __intel_uncore_forcewake_put(uncore, fw_domains, false);
0825 }
0826
0827 void assert_forcewakes_inactive(struct intel_uncore *uncore)
0828 {
0829 if (!uncore->fw_get_funcs)
0830 return;
0831
0832 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
0833 "Expected all fw_domains to be inactive, but %08x are still on\n",
0834 uncore->fw_domains_active);
0835 }
0836
0837 void assert_forcewakes_active(struct intel_uncore *uncore,
0838 enum forcewake_domains fw_domains)
0839 {
0840 struct intel_uncore_forcewake_domain *domain;
0841 unsigned int tmp;
0842
0843 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
0844 return;
0845
0846 if (!uncore->fw_get_funcs)
0847 return;
0848
0849 spin_lock_irq(&uncore->lock);
0850
0851 assert_rpm_wakelock_held(uncore->rpm);
0852
0853 fw_domains &= uncore->fw_domains;
0854 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
0855 "Expected %08x fw_domains to be active, but %08x are off\n",
0856 fw_domains, fw_domains & ~uncore->fw_domains_active);
0857
0858
0859
0860
0861
0862 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
0863 unsigned int actual = READ_ONCE(domain->wake_count);
0864 unsigned int expect = 1;
0865
0866 if (uncore->fw_domains_timer & domain->mask)
0867 expect++;
0868
0869 if (drm_WARN(&uncore->i915->drm, actual < expect,
0870 "Expected domain %d to be held awake by caller, count=%d\n",
0871 domain->id, actual))
0872 break;
0873 }
0874
0875 spin_unlock_irq(&uncore->lock);
0876 }
0877
0878
0879 #define NEEDS_FORCE_WAKE(reg) ({ \
0880 u32 __reg = (reg); \
0881 __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
0882 })
0883
0884 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
0885 {
0886 if (offset < entry->start)
0887 return -1;
0888 else if (offset > entry->end)
0889 return 1;
0890 else
0891 return 0;
0892 }
0893
0894
0895 #define BSEARCH(key, base, num, cmp) ({ \
0896 unsigned int start__ = 0, end__ = (num); \
0897 typeof(base) result__ = NULL; \
0898 while (start__ < end__) { \
0899 unsigned int mid__ = start__ + (end__ - start__) / 2; \
0900 int ret__ = (cmp)((key), (base) + mid__); \
0901 if (ret__ < 0) { \
0902 end__ = mid__; \
0903 } else if (ret__ > 0) { \
0904 start__ = mid__ + 1; \
0905 } else { \
0906 result__ = (base) + mid__; \
0907 break; \
0908 } \
0909 } \
0910 result__; \
0911 })
0912
0913 static enum forcewake_domains
0914 find_fw_domain(struct intel_uncore *uncore, u32 offset)
0915 {
0916 const struct intel_forcewake_range *entry;
0917
0918 entry = BSEARCH(offset,
0919 uncore->fw_domains_table,
0920 uncore->fw_domains_table_entries,
0921 fw_range_cmp);
0922
0923 if (!entry)
0924 return 0;
0925
0926
0927
0928
0929
0930
0931 if (entry->domains == FORCEWAKE_ALL)
0932 return uncore->fw_domains;
0933
0934 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
0935 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
0936 entry->domains & ~uncore->fw_domains, offset);
0937
0938 return entry->domains;
0939 }
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961 static const struct i915_range gen8_shadowed_regs[] = {
0962 { .start = 0x2030, .end = 0x2030 },
0963 { .start = 0xA008, .end = 0xA00C },
0964 { .start = 0x12030, .end = 0x12030 },
0965 { .start = 0x1a030, .end = 0x1a030 },
0966 { .start = 0x22030, .end = 0x22030 },
0967 };
0968
0969 static const struct i915_range gen11_shadowed_regs[] = {
0970 { .start = 0x2030, .end = 0x2030 },
0971 { .start = 0x2550, .end = 0x2550 },
0972 { .start = 0xA008, .end = 0xA00C },
0973 { .start = 0x22030, .end = 0x22030 },
0974 { .start = 0x22230, .end = 0x22230 },
0975 { .start = 0x22510, .end = 0x22550 },
0976 { .start = 0x1C0030, .end = 0x1C0030 },
0977 { .start = 0x1C0230, .end = 0x1C0230 },
0978 { .start = 0x1C0510, .end = 0x1C0550 },
0979 { .start = 0x1C4030, .end = 0x1C4030 },
0980 { .start = 0x1C4230, .end = 0x1C4230 },
0981 { .start = 0x1C4510, .end = 0x1C4550 },
0982 { .start = 0x1C8030, .end = 0x1C8030 },
0983 { .start = 0x1C8230, .end = 0x1C8230 },
0984 { .start = 0x1C8510, .end = 0x1C8550 },
0985 { .start = 0x1D0030, .end = 0x1D0030 },
0986 { .start = 0x1D0230, .end = 0x1D0230 },
0987 { .start = 0x1D0510, .end = 0x1D0550 },
0988 { .start = 0x1D4030, .end = 0x1D4030 },
0989 { .start = 0x1D4230, .end = 0x1D4230 },
0990 { .start = 0x1D4510, .end = 0x1D4550 },
0991 { .start = 0x1D8030, .end = 0x1D8030 },
0992 { .start = 0x1D8230, .end = 0x1D8230 },
0993 { .start = 0x1D8510, .end = 0x1D8550 },
0994 };
0995
0996 static const struct i915_range gen12_shadowed_regs[] = {
0997 { .start = 0x2030, .end = 0x2030 },
0998 { .start = 0x2510, .end = 0x2550 },
0999 { .start = 0xA008, .end = 0xA00C },
1000 { .start = 0xA188, .end = 0xA188 },
1001 { .start = 0xA278, .end = 0xA278 },
1002 { .start = 0xA540, .end = 0xA56C },
1003 { .start = 0xC4C8, .end = 0xC4C8 },
1004 { .start = 0xC4D4, .end = 0xC4D4 },
1005 { .start = 0xC600, .end = 0xC600 },
1006 { .start = 0x22030, .end = 0x22030 },
1007 { .start = 0x22510, .end = 0x22550 },
1008 { .start = 0x1C0030, .end = 0x1C0030 },
1009 { .start = 0x1C0510, .end = 0x1C0550 },
1010 { .start = 0x1C4030, .end = 0x1C4030 },
1011 { .start = 0x1C4510, .end = 0x1C4550 },
1012 { .start = 0x1C8030, .end = 0x1C8030 },
1013 { .start = 0x1C8510, .end = 0x1C8550 },
1014 { .start = 0x1D0030, .end = 0x1D0030 },
1015 { .start = 0x1D0510, .end = 0x1D0550 },
1016 { .start = 0x1D4030, .end = 0x1D4030 },
1017 { .start = 0x1D4510, .end = 0x1D4550 },
1018 { .start = 0x1D8030, .end = 0x1D8030 },
1019 { .start = 0x1D8510, .end = 0x1D8550 },
1020
1021
1022
1023
1024
1025
1026 { .start = 0x1E0030, .end = 0x1E0030 },
1027 { .start = 0x1E0510, .end = 0x1E0550 },
1028 { .start = 0x1E4030, .end = 0x1E4030 },
1029 { .start = 0x1E4510, .end = 0x1E4550 },
1030 { .start = 0x1E8030, .end = 0x1E8030 },
1031 { .start = 0x1E8510, .end = 0x1E8550 },
1032 { .start = 0x1F0030, .end = 0x1F0030 },
1033 { .start = 0x1F0510, .end = 0x1F0550 },
1034 { .start = 0x1F4030, .end = 0x1F4030 },
1035 { .start = 0x1F4510, .end = 0x1F4550 },
1036 { .start = 0x1F8030, .end = 0x1F8030 },
1037 { .start = 0x1F8510, .end = 0x1F8550 },
1038 };
1039
1040 static const struct i915_range dg2_shadowed_regs[] = {
1041 { .start = 0x2030, .end = 0x2030 },
1042 { .start = 0x2510, .end = 0x2550 },
1043 { .start = 0xA008, .end = 0xA00C },
1044 { .start = 0xA188, .end = 0xA188 },
1045 { .start = 0xA278, .end = 0xA278 },
1046 { .start = 0xA540, .end = 0xA56C },
1047 { .start = 0xC4C8, .end = 0xC4C8 },
1048 { .start = 0xC4E0, .end = 0xC4E0 },
1049 { .start = 0xC600, .end = 0xC600 },
1050 { .start = 0xC658, .end = 0xC658 },
1051 { .start = 0x22030, .end = 0x22030 },
1052 { .start = 0x22510, .end = 0x22550 },
1053 { .start = 0x1C0030, .end = 0x1C0030 },
1054 { .start = 0x1C0510, .end = 0x1C0550 },
1055 { .start = 0x1C4030, .end = 0x1C4030 },
1056 { .start = 0x1C4510, .end = 0x1C4550 },
1057 { .start = 0x1C8030, .end = 0x1C8030 },
1058 { .start = 0x1C8510, .end = 0x1C8550 },
1059 { .start = 0x1D0030, .end = 0x1D0030 },
1060 { .start = 0x1D0510, .end = 0x1D0550 },
1061 { .start = 0x1D4030, .end = 0x1D4030 },
1062 { .start = 0x1D4510, .end = 0x1D4550 },
1063 { .start = 0x1D8030, .end = 0x1D8030 },
1064 { .start = 0x1D8510, .end = 0x1D8550 },
1065 { .start = 0x1E0030, .end = 0x1E0030 },
1066 { .start = 0x1E0510, .end = 0x1E0550 },
1067 { .start = 0x1E4030, .end = 0x1E4030 },
1068 { .start = 0x1E4510, .end = 0x1E4550 },
1069 { .start = 0x1E8030, .end = 0x1E8030 },
1070 { .start = 0x1E8510, .end = 0x1E8550 },
1071 { .start = 0x1F0030, .end = 0x1F0030 },
1072 { .start = 0x1F0510, .end = 0x1F0550 },
1073 { .start = 0x1F4030, .end = 0x1F4030 },
1074 { .start = 0x1F4510, .end = 0x1F4550 },
1075 { .start = 0x1F8030, .end = 0x1F8030 },
1076 { .start = 0x1F8510, .end = 0x1F8550 },
1077 };
1078
1079 static const struct i915_range pvc_shadowed_regs[] = {
1080 { .start = 0x2030, .end = 0x2030 },
1081 { .start = 0x2510, .end = 0x2550 },
1082 { .start = 0xA008, .end = 0xA00C },
1083 { .start = 0xA188, .end = 0xA188 },
1084 { .start = 0xA278, .end = 0xA278 },
1085 { .start = 0xA540, .end = 0xA56C },
1086 { .start = 0xC4C8, .end = 0xC4C8 },
1087 { .start = 0xC4E0, .end = 0xC4E0 },
1088 { .start = 0xC600, .end = 0xC600 },
1089 { .start = 0xC658, .end = 0xC658 },
1090 { .start = 0x22030, .end = 0x22030 },
1091 { .start = 0x22510, .end = 0x22550 },
1092 { .start = 0x1C0030, .end = 0x1C0030 },
1093 { .start = 0x1C0510, .end = 0x1C0550 },
1094 { .start = 0x1C4030, .end = 0x1C4030 },
1095 { .start = 0x1C4510, .end = 0x1C4550 },
1096 { .start = 0x1C8030, .end = 0x1C8030 },
1097 { .start = 0x1C8510, .end = 0x1C8550 },
1098 { .start = 0x1D0030, .end = 0x1D0030 },
1099 { .start = 0x1D0510, .end = 0x1D0550 },
1100 { .start = 0x1D4030, .end = 0x1D4030 },
1101 { .start = 0x1D4510, .end = 0x1D4550 },
1102 { .start = 0x1D8030, .end = 0x1D8030 },
1103 { .start = 0x1D8510, .end = 0x1D8550 },
1104 { .start = 0x1E0030, .end = 0x1E0030 },
1105 { .start = 0x1E0510, .end = 0x1E0550 },
1106 { .start = 0x1E4030, .end = 0x1E4030 },
1107 { .start = 0x1E4510, .end = 0x1E4550 },
1108 { .start = 0x1E8030, .end = 0x1E8030 },
1109 { .start = 0x1E8510, .end = 0x1E8550 },
1110 { .start = 0x1F0030, .end = 0x1F0030 },
1111 { .start = 0x1F0510, .end = 0x1F0550 },
1112 { .start = 0x1F4030, .end = 0x1F4030 },
1113 { .start = 0x1F4510, .end = 0x1F4550 },
1114 { .start = 0x1F8030, .end = 0x1F8030 },
1115 { .start = 0x1F8510, .end = 0x1F8550 },
1116 };
1117
1118 static int mmio_range_cmp(u32 key, const struct i915_range *range)
1119 {
1120 if (key < range->start)
1121 return -1;
1122 else if (key > range->end)
1123 return 1;
1124 else
1125 return 0;
1126 }
1127
1128 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1129 {
1130 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1131 return false;
1132
1133 return BSEARCH(offset,
1134 uncore->shadowed_reg_table,
1135 uncore->shadowed_reg_table_entries,
1136 mmio_range_cmp);
1137 }
1138
1139 static enum forcewake_domains
1140 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1141 {
1142 return FORCEWAKE_RENDER;
1143 }
1144
1145 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1146 ({ \
1147 enum forcewake_domains __fwd = 0; \
1148 if (NEEDS_FORCE_WAKE((offset))) \
1149 __fwd = find_fw_domain(uncore, offset); \
1150 __fwd; \
1151 })
1152
1153 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1154 ({ \
1155 enum forcewake_domains __fwd = 0; \
1156 const u32 __offset = (offset); \
1157 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1158 __fwd = find_fw_domain(uncore, __offset); \
1159 __fwd; \
1160 })
1161
1162 #define GEN_FW_RANGE(s, e, d) \
1163 { .start = (s), .end = (e), .domains = (d) }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1196 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1197 };
1198
1199 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1200 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1201 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1202 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1203 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1204 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1205 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1206 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1207 };
1208
1209 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1210 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1211 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1212 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1213 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1214 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1215 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1216 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1217 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1218 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1219 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1220 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1221 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1222 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1223 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1224 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1225 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1226 };
1227
1228 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1229 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1230 GEN_FW_RANGE(0xb00, 0x1fff, 0),
1231 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1232 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1233 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1234 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1235 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1236 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1237 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1238 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1239 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1240 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1241 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1242 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1243 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1244 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1245 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1246 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1247 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1248 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1249 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1250 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1251 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1252 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1253 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1254 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1255 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1256 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1257 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1258 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1259 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1260 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1261 };
1262
1263 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1264 GEN_FW_RANGE(0x0, 0x1fff, 0),
1265 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1266 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1267 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1268 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1269 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1270 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1271 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1272 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1273 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1274 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1275 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1276 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1277 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1278 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1279 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1280 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1281 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1282 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1283 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1284 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1285 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1286 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1287 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1288 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1289 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1290 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1291 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1292 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1293 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1294 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1295 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1296 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1297 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1298 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1299 };
1300
1301 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1302 GEN_FW_RANGE(0x0, 0x1fff, 0),
1303
1304
1305 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1306 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1307 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1308 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1309 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1310 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1311
1312
1313 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1314
1315
1316
1317 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1318 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1319 GEN_FW_RANGE(0x8160, 0x81ff, 0),
1320
1321
1322 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1323 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1324 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT),
1325
1326
1327
1328
1329 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1330 GEN_FW_RANGE(0x9560, 0x97ff, 0),
1331
1332
1333 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1334 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1335 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT),
1336
1337
1338
1339 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1340 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1341 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1342 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER),
1343
1344
1345
1346
1347 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT),
1348
1349
1350 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER),
1351
1352
1353
1354
1355 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1356 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1357 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1358 GEN_FW_RANGE(0x24000, 0x2417f, 0),
1359
1360
1361 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT),
1362
1363
1364 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER),
1365
1366
1367 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT),
1368
1369
1370 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1371 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2),
1372
1373
1374 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1375 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2),
1376
1377
1378 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1379 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1380 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1381
1382
1383
1384
1385
1386 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1387 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1388
1389
1390
1391 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0),
1392
1393
1394 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1395
1396
1397
1398
1399
1400 };
1401
1402
1403
1404
1405
1406 #define XEHP_FWRANGES(FW_RANGE_D800) \
1407 GEN_FW_RANGE(0x0, 0x1fff, 0),
1408
1409 \
1410 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1411 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1412 GEN_FW_RANGE(0x4b00, 0x51ff, 0),
1413
1414 \
1415 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1416 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1417 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1418 GEN_FW_RANGE(0x8160, 0x81ff, 0),
1419
1420 \
1421 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1422 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1423 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT),
1424
1425
1426 \
1427 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER),
1428
1429 \
1430 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT),
1431
1432 \
1433 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1434 GEN_FW_RANGE(0x9560, 0x967f, 0),
1435
1436 \
1437 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER),
1438
1439 \
1440 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT),
1441
1442
1443 \
1444 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1445 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1446 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1447 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1448 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT),
1449
1450 \
1451 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER),
1452
1453
1454 \
1455 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT),
1456
1457
1458 \
1459 GEN_FW_RANGE(0x10000, 0x12fff, 0),
1460
1461
1462 \
1463 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), \
1464 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2),
1465
1466 \
1467 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), \
1468 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), \
1469 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), \
1470 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), \
1471 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1472 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT),
1473
1474 \
1475 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1476 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0),
1477
1478 \
1479 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1480 GEN_FW_RANGE(0x24000, 0x2417f, 0),
1481
1482 \
1483 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT),
1484
1485 \
1486 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER),
1487
1488 \
1489 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT),
1490
1491 \
1492 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER),
1493
1494
1495 \
1496 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1497 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1498 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1499
1500
1501
1502
1503 \
1504 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1505
1506
1507
1508 \
1509 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1510
1511 \
1512 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1513 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1514 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1515 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1516 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1517
1518
1519
1520
1521
1522 \
1523 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1524
1525
1526
1527 \
1528 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1),
1529
1530 \
1531 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4),
1532
1533
1534
1535
1536 \
1537 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5),
1538
1539
1540
1541 \
1542 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2),
1543
1544 \
1545 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6),
1546
1547
1548
1549
1550 \
1551 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7),
1552
1553
1554
1555 \
1556 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1557
1558 static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1559 XEHP_FWRANGES(FORCEWAKE_GT)
1560 };
1561
1562 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1563 XEHP_FWRANGES(FORCEWAKE_RENDER)
1564 };
1565
1566 static const struct intel_forcewake_range __pvc_fw_ranges[] = {
1567 GEN_FW_RANGE(0x0, 0xaff, 0),
1568 GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1569 GEN_FW_RANGE(0xc00, 0xfff, 0),
1570 GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1571 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1572 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1573 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1574 GEN_FW_RANGE(0x4000, 0x813f, FORCEWAKE_GT),
1575
1576
1577
1578
1579
1580
1581
1582 GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER),
1583 GEN_FW_RANGE(0x8180, 0x81ff, 0),
1584 GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT),
1585
1586
1587
1588
1589
1590
1591
1592
1593 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1594 GEN_FW_RANGE(0x9560, 0x967f, 0),
1595
1596
1597 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER),
1598
1599
1600 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT),
1601
1602
1603
1604 GEN_FW_RANGE(0xd000, 0xd3ff, 0),
1605 GEN_FW_RANGE(0xd400, 0xdbff, FORCEWAKE_GT),
1606 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1607 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT),
1608
1609
1610 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER),
1611
1612
1613
1614
1615 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1616
1617
1618
1619
1620 GEN_FW_RANGE(0x12000, 0x12fff, 0),
1621
1622
1623 GEN_FW_RANGE(0x13000, 0x23fff, FORCEWAKE_GT),
1624
1625
1626
1627
1628
1629
1630
1631 GEN_FW_RANGE(0x24000, 0x2417f, 0),
1632
1633
1634 GEN_FW_RANGE(0x24180, 0x3ffff, FORCEWAKE_GT),
1635
1636
1637
1638
1639
1640
1641
1642 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1643 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1644
1645
1646
1647
1648
1649 GEN_FW_RANGE(0x1c4000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX1),
1650
1651
1652
1653
1654 GEN_FW_RANGE(0x1d0000, 0x23ffff, FORCEWAKE_MEDIA_VDBOX2),
1655
1656
1657
1658
1659 GEN_FW_RANGE(0x240000, 0x3dffff, 0),
1660 GEN_FW_RANGE(0x3e0000, 0x3effff, FORCEWAKE_GT),
1661 };
1662
1663 static void
1664 ilk_dummy_write(struct intel_uncore *uncore)
1665 {
1666
1667
1668
1669 __raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1670 }
1671
1672 static void
1673 __unclaimed_reg_debug(struct intel_uncore *uncore,
1674 const i915_reg_t reg,
1675 const bool read)
1676 {
1677 if (drm_WARN(&uncore->i915->drm,
1678 check_for_unclaimed_mmio(uncore),
1679 "Unclaimed %s register 0x%x\n",
1680 read ? "read from" : "write to",
1681 i915_mmio_reg_offset(reg)))
1682
1683 uncore->i915->params.mmio_debug--;
1684 }
1685
1686 static void
1687 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1688 const i915_reg_t reg,
1689 const bool read)
1690 {
1691 if (check_for_unclaimed_mmio(uncore))
1692 drm_dbg(&uncore->i915->drm,
1693 "Unclaimed access detected before %s register 0x%x\n",
1694 read ? "read from" : "write to",
1695 i915_mmio_reg_offset(reg));
1696 }
1697
1698 static inline void
1699 unclaimed_reg_debug(struct intel_uncore *uncore,
1700 const i915_reg_t reg,
1701 const bool read,
1702 const bool before)
1703 {
1704 if (likely(!uncore->i915->params.mmio_debug))
1705 return;
1706
1707
1708 lockdep_assert_held(&uncore->lock);
1709
1710 if (before) {
1711 spin_lock(&uncore->debug->lock);
1712 __unclaimed_previous_reg_debug(uncore, reg, read);
1713 } else {
1714 __unclaimed_reg_debug(uncore, reg, read);
1715 spin_unlock(&uncore->debug->lock);
1716 }
1717 }
1718
1719 #define __vgpu_read(x) \
1720 static u##x \
1721 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1722 u##x val = __raw_uncore_read##x(uncore, reg); \
1723 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1724 return val; \
1725 }
1726 __vgpu_read(8)
1727 __vgpu_read(16)
1728 __vgpu_read(32)
1729 __vgpu_read(64)
1730
1731 #define GEN2_READ_HEADER(x) \
1732 u##x val = 0; \
1733 assert_rpm_wakelock_held(uncore->rpm);
1734
1735 #define GEN2_READ_FOOTER \
1736 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1737 return val
1738
1739 #define __gen2_read(x) \
1740 static u##x \
1741 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1742 GEN2_READ_HEADER(x); \
1743 val = __raw_uncore_read##x(uncore, reg); \
1744 GEN2_READ_FOOTER; \
1745 }
1746
1747 #define __gen5_read(x) \
1748 static u##x \
1749 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1750 GEN2_READ_HEADER(x); \
1751 ilk_dummy_write(uncore); \
1752 val = __raw_uncore_read##x(uncore, reg); \
1753 GEN2_READ_FOOTER; \
1754 }
1755
1756 __gen5_read(8)
1757 __gen5_read(16)
1758 __gen5_read(32)
1759 __gen5_read(64)
1760 __gen2_read(8)
1761 __gen2_read(16)
1762 __gen2_read(32)
1763 __gen2_read(64)
1764
1765 #undef __gen5_read
1766 #undef __gen2_read
1767
1768 #undef GEN2_READ_FOOTER
1769 #undef GEN2_READ_HEADER
1770
1771 #define GEN6_READ_HEADER(x) \
1772 u32 offset = i915_mmio_reg_offset(reg); \
1773 unsigned long irqflags; \
1774 u##x val = 0; \
1775 assert_rpm_wakelock_held(uncore->rpm); \
1776 spin_lock_irqsave(&uncore->lock, irqflags); \
1777 unclaimed_reg_debug(uncore, reg, true, true)
1778
1779 #define GEN6_READ_FOOTER \
1780 unclaimed_reg_debug(uncore, reg, true, false); \
1781 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1782 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1783 return val
1784
1785 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1786 enum forcewake_domains fw_domains)
1787 {
1788 struct intel_uncore_forcewake_domain *domain;
1789 unsigned int tmp;
1790
1791 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1792
1793 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1794 fw_domain_arm_timer(domain);
1795
1796 fw_domains_get(uncore, fw_domains);
1797 }
1798
1799 static inline void __force_wake_auto(struct intel_uncore *uncore,
1800 enum forcewake_domains fw_domains)
1801 {
1802 GEM_BUG_ON(!fw_domains);
1803
1804
1805 fw_domains &= uncore->fw_domains;
1806 fw_domains &= ~uncore->fw_domains_active;
1807
1808 if (fw_domains)
1809 ___force_wake_auto(uncore, fw_domains);
1810 }
1811
1812 #define __gen_fwtable_read(x) \
1813 static u##x \
1814 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1815 { \
1816 enum forcewake_domains fw_engine; \
1817 GEN6_READ_HEADER(x); \
1818 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1819 if (fw_engine) \
1820 __force_wake_auto(uncore, fw_engine); \
1821 val = __raw_uncore_read##x(uncore, reg); \
1822 GEN6_READ_FOOTER; \
1823 }
1824
1825 static enum forcewake_domains
1826 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1827 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1828 }
1829
1830 __gen_fwtable_read(8)
1831 __gen_fwtable_read(16)
1832 __gen_fwtable_read(32)
1833 __gen_fwtable_read(64)
1834
1835 #undef __gen_fwtable_read
1836 #undef GEN6_READ_FOOTER
1837 #undef GEN6_READ_HEADER
1838
1839 #define GEN2_WRITE_HEADER \
1840 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1841 assert_rpm_wakelock_held(uncore->rpm); \
1842
1843 #define GEN2_WRITE_FOOTER
1844
1845 #define __gen2_write(x) \
1846 static void \
1847 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1848 GEN2_WRITE_HEADER; \
1849 __raw_uncore_write##x(uncore, reg, val); \
1850 GEN2_WRITE_FOOTER; \
1851 }
1852
1853 #define __gen5_write(x) \
1854 static void \
1855 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1856 GEN2_WRITE_HEADER; \
1857 ilk_dummy_write(uncore); \
1858 __raw_uncore_write##x(uncore, reg, val); \
1859 GEN2_WRITE_FOOTER; \
1860 }
1861
1862 __gen5_write(8)
1863 __gen5_write(16)
1864 __gen5_write(32)
1865 __gen2_write(8)
1866 __gen2_write(16)
1867 __gen2_write(32)
1868
1869 #undef __gen5_write
1870 #undef __gen2_write
1871
1872 #undef GEN2_WRITE_FOOTER
1873 #undef GEN2_WRITE_HEADER
1874
1875 #define GEN6_WRITE_HEADER \
1876 u32 offset = i915_mmio_reg_offset(reg); \
1877 unsigned long irqflags; \
1878 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1879 assert_rpm_wakelock_held(uncore->rpm); \
1880 spin_lock_irqsave(&uncore->lock, irqflags); \
1881 unclaimed_reg_debug(uncore, reg, false, true)
1882
1883 #define GEN6_WRITE_FOOTER \
1884 unclaimed_reg_debug(uncore, reg, false, false); \
1885 spin_unlock_irqrestore(&uncore->lock, irqflags)
1886
1887 #define __gen6_write(x) \
1888 static void \
1889 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1890 GEN6_WRITE_HEADER; \
1891 if (NEEDS_FORCE_WAKE(offset)) \
1892 __gen6_gt_wait_for_fifo(uncore); \
1893 __raw_uncore_write##x(uncore, reg, val); \
1894 GEN6_WRITE_FOOTER; \
1895 }
1896 __gen6_write(8)
1897 __gen6_write(16)
1898 __gen6_write(32)
1899
1900 #define __gen_fwtable_write(x) \
1901 static void \
1902 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1903 enum forcewake_domains fw_engine; \
1904 GEN6_WRITE_HEADER; \
1905 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
1906 if (fw_engine) \
1907 __force_wake_auto(uncore, fw_engine); \
1908 __raw_uncore_write##x(uncore, reg, val); \
1909 GEN6_WRITE_FOOTER; \
1910 }
1911
1912 static enum forcewake_domains
1913 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1914 {
1915 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
1916 }
1917
1918 __gen_fwtable_write(8)
1919 __gen_fwtable_write(16)
1920 __gen_fwtable_write(32)
1921
1922 #undef __gen_fwtable_write
1923 #undef GEN6_WRITE_FOOTER
1924 #undef GEN6_WRITE_HEADER
1925
1926 #define __vgpu_write(x) \
1927 static void \
1928 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1929 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1930 __raw_uncore_write##x(uncore, reg, val); \
1931 }
1932 __vgpu_write(8)
1933 __vgpu_write(16)
1934 __vgpu_write(32)
1935
1936 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1937 do { \
1938 (uncore)->funcs.mmio_writeb = x##_write8; \
1939 (uncore)->funcs.mmio_writew = x##_write16; \
1940 (uncore)->funcs.mmio_writel = x##_write32; \
1941 } while (0)
1942
1943 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1944 do { \
1945 (uncore)->funcs.mmio_readb = x##_read8; \
1946 (uncore)->funcs.mmio_readw = x##_read16; \
1947 (uncore)->funcs.mmio_readl = x##_read32; \
1948 (uncore)->funcs.mmio_readq = x##_read64; \
1949 } while (0)
1950
1951 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1952 do { \
1953 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1954 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1955 } while (0)
1956
1957 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1958 do { \
1959 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1960 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1961 } while (0)
1962
1963 static int __fw_domain_init(struct intel_uncore *uncore,
1964 enum forcewake_domain_id domain_id,
1965 i915_reg_t reg_set,
1966 i915_reg_t reg_ack)
1967 {
1968 struct intel_uncore_forcewake_domain *d;
1969
1970 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1971 GEM_BUG_ON(uncore->fw_domain[domain_id]);
1972
1973 if (i915_inject_probe_failure(uncore->i915))
1974 return -ENOMEM;
1975
1976 d = kzalloc(sizeof(*d), GFP_KERNEL);
1977 if (!d)
1978 return -ENOMEM;
1979
1980 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1981 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
1982
1983 d->uncore = uncore;
1984 d->wake_count = 0;
1985 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1986 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1987
1988 d->id = domain_id;
1989
1990 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1991 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
1992 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1993 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1994 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1995 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1996 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1997 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
1998 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
1999 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2000 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2001 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2002 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2003 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2004 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2005
2006 d->mask = BIT(domain_id);
2007
2008 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2009 d->timer.function = intel_uncore_fw_release_timer;
2010
2011 uncore->fw_domains |= BIT(domain_id);
2012
2013 fw_domain_reset(d);
2014
2015 uncore->fw_domain[domain_id] = d;
2016
2017 return 0;
2018 }
2019
2020 static void fw_domain_fini(struct intel_uncore *uncore,
2021 enum forcewake_domain_id domain_id)
2022 {
2023 struct intel_uncore_forcewake_domain *d;
2024
2025 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2026
2027 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2028 if (!d)
2029 return;
2030
2031 uncore->fw_domains &= ~BIT(domain_id);
2032 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2033 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2034 kfree(d);
2035 }
2036
2037 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2038 {
2039 struct intel_uncore_forcewake_domain *d;
2040 int tmp;
2041
2042 for_each_fw_domain(d, uncore, tmp)
2043 fw_domain_fini(uncore, d->id);
2044 }
2045
2046 static const struct intel_uncore_fw_get uncore_get_fallback = {
2047 .force_wake_get = fw_domains_get_with_fallback
2048 };
2049
2050 static const struct intel_uncore_fw_get uncore_get_normal = {
2051 .force_wake_get = fw_domains_get_normal,
2052 };
2053
2054 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2055 .force_wake_get = fw_domains_get_with_thread_status
2056 };
2057
2058 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2059 {
2060 struct drm_i915_private *i915 = uncore->i915;
2061 int ret = 0;
2062
2063 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2064
2065 #define fw_domain_init(uncore__, id__, set__, ack__) \
2066 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2067
2068 if (GRAPHICS_VER(i915) >= 11) {
2069
2070 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
2071 int i;
2072
2073 uncore->fw_get_funcs = &uncore_get_fallback;
2074 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2075 FORCEWAKE_RENDER_GEN9,
2076 FORCEWAKE_ACK_RENDER_GEN9);
2077 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2078 FORCEWAKE_GT_GEN9,
2079 FORCEWAKE_ACK_GT_GEN9);
2080
2081 for (i = 0; i < I915_MAX_VCS; i++) {
2082 if (!__HAS_ENGINE(emask, _VCS(i)))
2083 continue;
2084
2085 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2086 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2087 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2088 }
2089 for (i = 0; i < I915_MAX_VECS; i++) {
2090 if (!__HAS_ENGINE(emask, _VECS(i)))
2091 continue;
2092
2093 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2094 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2095 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2096 }
2097 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2098 uncore->fw_get_funcs = &uncore_get_fallback;
2099 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2100 FORCEWAKE_RENDER_GEN9,
2101 FORCEWAKE_ACK_RENDER_GEN9);
2102 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2103 FORCEWAKE_GT_GEN9,
2104 FORCEWAKE_ACK_GT_GEN9);
2105 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2106 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2107 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2108 uncore->fw_get_funcs = &uncore_get_normal;
2109 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2110 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2111 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2112 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2113 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2114 uncore->fw_get_funcs = &uncore_get_thread_status;
2115 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2116 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2117 } else if (IS_IVYBRIDGE(i915)) {
2118 u32 ecobus;
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129 uncore->fw_get_funcs = &uncore_get_thread_status;
2130
2131
2132
2133
2134
2135
2136
2137
2138 __raw_uncore_write32(uncore, FORCEWAKE, 0);
2139 __raw_posting_read(uncore, ECOBUS);
2140
2141 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2142 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2143 if (ret)
2144 goto out;
2145
2146 spin_lock_irq(&uncore->lock);
2147 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2148 ecobus = __raw_uncore_read32(uncore, ECOBUS);
2149 fw_domains_put(uncore, FORCEWAKE_RENDER);
2150 spin_unlock_irq(&uncore->lock);
2151
2152 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2153 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2154 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2155 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2156 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2157 FORCEWAKE, FORCEWAKE_ACK);
2158 }
2159 } else if (GRAPHICS_VER(i915) == 6) {
2160 uncore->fw_get_funcs = &uncore_get_thread_status;
2161 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2162 FORCEWAKE, FORCEWAKE_ACK);
2163 }
2164
2165 #undef fw_domain_init
2166
2167
2168 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2169
2170 out:
2171 if (ret)
2172 intel_uncore_fw_domains_fini(uncore);
2173
2174 return ret;
2175 }
2176
2177 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2178 { \
2179 (uncore)->fw_domains_table = \
2180 (struct intel_forcewake_range *)(d); \
2181 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2182 }
2183
2184 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2185 { \
2186 (uncore)->shadowed_reg_table = d; \
2187 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2188 }
2189
2190 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2191 unsigned long action, void *data)
2192 {
2193 struct intel_uncore *uncore = container_of(nb,
2194 struct intel_uncore, pmic_bus_access_nb);
2195
2196 switch (action) {
2197 case MBI_PMIC_BUS_ACCESS_BEGIN:
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211 disable_rpm_wakeref_asserts(uncore->rpm);
2212 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2213 enable_rpm_wakeref_asserts(uncore->rpm);
2214 break;
2215 case MBI_PMIC_BUS_ACCESS_END:
2216 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2217 break;
2218 }
2219
2220 return NOTIFY_OK;
2221 }
2222
2223 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2224 {
2225 struct drm_i915_private *i915 = uncore->i915;
2226 int mmio_size;
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237 if (GRAPHICS_VER(i915) < 5)
2238 mmio_size = 512 * 1024;
2239 else if (IS_DGFX(i915))
2240 mmio_size = 4 * 1024 * 1024;
2241 else
2242 mmio_size = 2 * 1024 * 1024;
2243
2244 uncore->regs = ioremap(phys_addr, mmio_size);
2245 if (uncore->regs == NULL) {
2246 drm_err(&i915->drm, "failed to map registers\n");
2247 return -EIO;
2248 }
2249
2250 return 0;
2251 }
2252
2253 void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
2254 {
2255 iounmap(uncore->regs);
2256 }
2257
2258 void intel_uncore_init_early(struct intel_uncore *uncore,
2259 struct intel_gt *gt)
2260 {
2261 spin_lock_init(&uncore->lock);
2262 uncore->i915 = gt->i915;
2263 uncore->gt = gt;
2264 uncore->rpm = >->i915->runtime_pm;
2265 uncore->debug = >->i915->mmio_debug;
2266 }
2267
2268 static void uncore_raw_init(struct intel_uncore *uncore)
2269 {
2270 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2271
2272 if (intel_vgpu_active(uncore->i915)) {
2273 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2274 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2275 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2276 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2277 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2278 } else {
2279 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2280 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2281 }
2282 }
2283
2284 static int uncore_forcewake_init(struct intel_uncore *uncore)
2285 {
2286 struct drm_i915_private *i915 = uncore->i915;
2287 int ret;
2288
2289 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2290
2291 ret = intel_uncore_fw_domains_init(uncore);
2292 if (ret)
2293 return ret;
2294 forcewake_early_sanitize(uncore, 0);
2295
2296 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2297
2298 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60)) {
2299 ASSIGN_FW_DOMAINS_TABLE(uncore, __pvc_fw_ranges);
2300 ASSIGN_SHADOW_TABLE(uncore, pvc_shadowed_regs);
2301 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2302 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2303 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2304 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2305 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2306 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2307 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2308 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2309 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2310 } else if (GRAPHICS_VER(i915) >= 12) {
2311 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2312 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2313 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2314 } else if (GRAPHICS_VER(i915) == 11) {
2315 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2316 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2317 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2318 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2319 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2320 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2321 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2322 } else if (IS_CHERRYVIEW(i915)) {
2323 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2324 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2325 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2326 } else if (GRAPHICS_VER(i915) == 8) {
2327 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2328 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2329 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2330 } else if (IS_VALLEYVIEW(i915)) {
2331 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2332 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2333 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2334 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2335 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2336 }
2337
2338 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2339 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2340
2341 return 0;
2342 }
2343
2344 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2345 {
2346 struct drm_i915_private *i915 = uncore->i915;
2347 int ret;
2348
2349
2350
2351
2352
2353
2354
2355 if (IS_DGFX(i915) &&
2356 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2357 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2358 return -ENODEV;
2359 }
2360
2361 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2362 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2363
2364 if (!intel_uncore_has_forcewake(uncore)) {
2365 uncore_raw_init(uncore);
2366 } else {
2367 ret = uncore_forcewake_init(uncore);
2368 if (ret)
2369 return ret;
2370 }
2371
2372
2373 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2374 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2375 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2376
2377 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2378 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2379
2380 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2381 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2382
2383 if (IS_GRAPHICS_VER(i915, 6, 7))
2384 uncore->flags |= UNCORE_HAS_FIFO;
2385
2386
2387 if (intel_uncore_unclaimed_mmio(uncore))
2388 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2389
2390 return 0;
2391 }
2392
2393
2394
2395
2396
2397
2398 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2399 struct intel_gt *gt)
2400 {
2401 enum forcewake_domains fw_domains = uncore->fw_domains;
2402 enum forcewake_domain_id domain_id;
2403 int i;
2404
2405 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2406 return;
2407
2408 for (i = 0; i < I915_MAX_VCS; i++) {
2409 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2410
2411 if (HAS_ENGINE(gt, _VCS(i)))
2412 continue;
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2423 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2424 continue;
2425
2426 if (HAS_ENGINE(gt, _VECS(i / 2)))
2427 continue;
2428 }
2429
2430 if (fw_domains & BIT(domain_id))
2431 fw_domain_fini(uncore, domain_id);
2432 }
2433
2434 for (i = 0; i < I915_MAX_VECS; i++) {
2435 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2436
2437 if (HAS_ENGINE(gt, _VECS(i)))
2438 continue;
2439
2440 if (fw_domains & BIT(domain_id))
2441 fw_domain_fini(uncore, domain_id);
2442 }
2443 }
2444
2445 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
2446 {
2447 if (intel_uncore_has_forcewake(uncore)) {
2448 iosf_mbi_punit_acquire();
2449 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2450 &uncore->pmic_bus_access_nb);
2451 intel_uncore_forcewake_reset(uncore);
2452 intel_uncore_fw_domains_fini(uncore);
2453 iosf_mbi_punit_release();
2454 }
2455 }
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2484 i915_reg_t reg,
2485 u32 mask,
2486 u32 value,
2487 unsigned int fast_timeout_us,
2488 unsigned int slow_timeout_ms,
2489 u32 *out_value)
2490 {
2491 u32 reg_value = 0;
2492 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2493 int ret;
2494
2495
2496 might_sleep_if(slow_timeout_ms);
2497 GEM_BUG_ON(fast_timeout_us > 20000);
2498 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2499
2500 ret = -ETIMEDOUT;
2501 if (fast_timeout_us && fast_timeout_us <= 20000)
2502 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2503 if (ret && slow_timeout_ms)
2504 ret = wait_for(done, slow_timeout_ms);
2505
2506 if (out_value)
2507 *out_value = reg_value;
2508
2509 return ret;
2510 #undef done
2511 }
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 int __intel_wait_for_register(struct intel_uncore *uncore,
2533 i915_reg_t reg,
2534 u32 mask,
2535 u32 value,
2536 unsigned int fast_timeout_us,
2537 unsigned int slow_timeout_ms,
2538 u32 *out_value)
2539 {
2540 unsigned fw =
2541 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2542 u32 reg_value;
2543 int ret;
2544
2545 might_sleep_if(slow_timeout_ms);
2546
2547 spin_lock_irq(&uncore->lock);
2548 intel_uncore_forcewake_get__locked(uncore, fw);
2549
2550 ret = __intel_wait_for_register_fw(uncore,
2551 reg, mask, value,
2552 fast_timeout_us, 0, ®_value);
2553
2554 intel_uncore_forcewake_put__locked(uncore, fw);
2555 spin_unlock_irq(&uncore->lock);
2556
2557 if (ret && slow_timeout_ms)
2558 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2559 reg),
2560 (reg_value & mask) == value,
2561 slow_timeout_ms * 1000, 10, 1000);
2562
2563
2564 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2565
2566 if (out_value)
2567 *out_value = reg_value;
2568
2569 return ret;
2570 }
2571
2572 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2573 {
2574 bool ret;
2575
2576 spin_lock_irq(&uncore->debug->lock);
2577 ret = check_for_unclaimed_mmio(uncore);
2578 spin_unlock_irq(&uncore->debug->lock);
2579
2580 return ret;
2581 }
2582
2583 bool
2584 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2585 {
2586 bool ret = false;
2587
2588 spin_lock_irq(&uncore->debug->lock);
2589
2590 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2591 goto out;
2592
2593 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2594 if (!uncore->i915->params.mmio_debug) {
2595 drm_dbg(&uncore->i915->drm,
2596 "Unclaimed register detected, "
2597 "enabling oneshot unclaimed register reporting. "
2598 "Please use i915.mmio_debug=N for more information.\n");
2599 uncore->i915->params.mmio_debug++;
2600 }
2601 uncore->debug->unclaimed_mmio_check--;
2602 ret = true;
2603 }
2604
2605 out:
2606 spin_unlock_irq(&uncore->debug->lock);
2607
2608 return ret;
2609 }
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625 enum forcewake_domains
2626 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2627 i915_reg_t reg, unsigned int op)
2628 {
2629 enum forcewake_domains fw_domains = 0;
2630
2631 drm_WARN_ON(&uncore->i915->drm, !op);
2632
2633 if (!intel_uncore_has_forcewake(uncore))
2634 return 0;
2635
2636 if (op & FW_REG_READ)
2637 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2638
2639 if (op & FW_REG_WRITE)
2640 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2641
2642 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2643
2644 return fw_domains;
2645 }
2646
2647 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2648 #include "selftests/mock_uncore.c"
2649 #include "selftests/intel_uncore.c"
2650 #endif