0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/acpi.h>
0031 #include <linux/device.h>
0032 #include <linux/module.h>
0033 #include <linux/oom.h>
0034 #include <linux/pci.h>
0035 #include <linux/pm.h>
0036 #include <linux/pm_runtime.h>
0037 #include <linux/pnp.h>
0038 #include <linux/slab.h>
0039 #include <linux/string_helpers.h>
0040 #include <linux/vga_switcheroo.h>
0041 #include <linux/vt.h>
0042
0043 #include <drm/drm_aperture.h>
0044 #include <drm/drm_atomic_helper.h>
0045 #include <drm/drm_ioctl.h>
0046 #include <drm/drm_managed.h>
0047 #include <drm/drm_probe_helper.h>
0048
0049 #include "display/intel_acpi.h"
0050 #include "display/intel_bw.h"
0051 #include "display/intel_cdclk.h"
0052 #include "display/intel_display_types.h"
0053 #include "display/intel_dmc.h"
0054 #include "display/intel_dp.h"
0055 #include "display/intel_dpt.h"
0056 #include "display/intel_fbdev.h"
0057 #include "display/intel_hotplug.h"
0058 #include "display/intel_overlay.h"
0059 #include "display/intel_pch_refclk.h"
0060 #include "display/intel_pipe_crc.h"
0061 #include "display/intel_pps.h"
0062 #include "display/intel_sprite.h"
0063 #include "display/intel_vga.h"
0064
0065 #include "gem/i915_gem_context.h"
0066 #include "gem/i915_gem_create.h"
0067 #include "gem/i915_gem_dmabuf.h"
0068 #include "gem/i915_gem_ioctls.h"
0069 #include "gem/i915_gem_mman.h"
0070 #include "gem/i915_gem_pm.h"
0071 #include "gt/intel_gt.h"
0072 #include "gt/intel_gt_pm.h"
0073 #include "gt/intel_rc6.h"
0074
0075 #include "pxp/intel_pxp_pm.h"
0076
0077 #include "i915_file_private.h"
0078 #include "i915_debugfs.h"
0079 #include "i915_driver.h"
0080 #include "i915_drm_client.h"
0081 #include "i915_drv.h"
0082 #include "i915_getparam.h"
0083 #include "i915_ioc32.h"
0084 #include "i915_ioctl.h"
0085 #include "i915_irq.h"
0086 #include "i915_memcpy.h"
0087 #include "i915_perf.h"
0088 #include "i915_query.h"
0089 #include "i915_suspend.h"
0090 #include "i915_switcheroo.h"
0091 #include "i915_sysfs.h"
0092 #include "i915_utils.h"
0093 #include "i915_vgpu.h"
0094 #include "intel_dram.h"
0095 #include "intel_gvt.h"
0096 #include "intel_memory_region.h"
0097 #include "intel_pci_config.h"
0098 #include "intel_pcode.h"
0099 #include "intel_pm.h"
0100 #include "intel_region_ttm.h"
0101 #include "vlv_suspend.h"
0102
0103
0104 static const char irst_name[] = "INT3392";
0105
0106 static const struct drm_driver i915_drm_driver;
0107
0108 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
0109 {
0110 int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
0111
0112 dev_priv->bridge_dev =
0113 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
0114 if (!dev_priv->bridge_dev) {
0115 drm_err(&dev_priv->drm, "bridge device not found\n");
0116 return -EIO;
0117 }
0118 return 0;
0119 }
0120
0121
0122 static int
0123 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
0124 {
0125 int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
0126 u32 temp_lo, temp_hi = 0;
0127 u64 mchbar_addr;
0128 int ret;
0129
0130 if (GRAPHICS_VER(dev_priv) >= 4)
0131 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
0132 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
0133 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
0134
0135
0136 #ifdef CONFIG_PNP
0137 if (mchbar_addr &&
0138 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
0139 return 0;
0140 #endif
0141
0142
0143 dev_priv->mch_res.name = "i915 MCHBAR";
0144 dev_priv->mch_res.flags = IORESOURCE_MEM;
0145 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
0146 &dev_priv->mch_res,
0147 MCHBAR_SIZE, MCHBAR_SIZE,
0148 PCIBIOS_MIN_MEM,
0149 0, pcibios_align_resource,
0150 dev_priv->bridge_dev);
0151 if (ret) {
0152 drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
0153 dev_priv->mch_res.start = 0;
0154 return ret;
0155 }
0156
0157 if (GRAPHICS_VER(dev_priv) >= 4)
0158 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
0159 upper_32_bits(dev_priv->mch_res.start));
0160
0161 pci_write_config_dword(dev_priv->bridge_dev, reg,
0162 lower_32_bits(dev_priv->mch_res.start));
0163 return 0;
0164 }
0165
0166
0167 static void
0168 intel_setup_mchbar(struct drm_i915_private *dev_priv)
0169 {
0170 int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
0171 u32 temp;
0172 bool enabled;
0173
0174 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
0175 return;
0176
0177 dev_priv->mchbar_need_disable = false;
0178
0179 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
0180 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
0181 enabled = !!(temp & DEVEN_MCHBAR_EN);
0182 } else {
0183 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
0184 enabled = temp & 1;
0185 }
0186
0187
0188 if (enabled)
0189 return;
0190
0191 if (intel_alloc_mchbar_resource(dev_priv))
0192 return;
0193
0194 dev_priv->mchbar_need_disable = true;
0195
0196
0197 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
0198 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
0199 temp | DEVEN_MCHBAR_EN);
0200 } else {
0201 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
0202 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
0203 }
0204 }
0205
0206 static void
0207 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
0208 {
0209 int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
0210
0211 if (dev_priv->mchbar_need_disable) {
0212 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
0213 u32 deven_val;
0214
0215 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
0216 &deven_val);
0217 deven_val &= ~DEVEN_MCHBAR_EN;
0218 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
0219 deven_val);
0220 } else {
0221 u32 mchbar_val;
0222
0223 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
0224 &mchbar_val);
0225 mchbar_val &= ~1;
0226 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
0227 mchbar_val);
0228 }
0229 }
0230
0231 if (dev_priv->mch_res.start)
0232 release_resource(&dev_priv->mch_res);
0233 }
0234
0235 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
0236 {
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
0252 if (dev_priv->wq == NULL)
0253 goto out_err;
0254
0255 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
0256 if (dev_priv->hotplug.dp_wq == NULL)
0257 goto out_free_wq;
0258
0259 return 0;
0260
0261 out_free_wq:
0262 destroy_workqueue(dev_priv->wq);
0263 out_err:
0264 drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
0265
0266 return -ENOMEM;
0267 }
0268
0269 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
0270 {
0271 destroy_workqueue(dev_priv->hotplug.dp_wq);
0272 destroy_workqueue(dev_priv->wq);
0273 }
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
0286 {
0287 bool pre = false;
0288
0289 pre |= IS_HSW_EARLY_SDV(dev_priv);
0290 pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
0291 pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
0292 pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
0293 pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
0294 pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
0295
0296 if (pre) {
0297 drm_err(&dev_priv->drm, "This is a pre-production stepping. "
0298 "It may not be fully functional.\n");
0299 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
0300 }
0301 }
0302
0303 static void sanitize_gpu(struct drm_i915_private *i915)
0304 {
0305 if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
0306 __intel_gt_reset(to_gt(i915), ALL_ENGINES);
0307 }
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
0320 {
0321 int ret = 0;
0322
0323 if (i915_inject_probe_failure(dev_priv))
0324 return -ENODEV;
0325
0326 intel_device_info_subplatform_init(dev_priv);
0327 intel_step_init(dev_priv);
0328
0329 intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
0330
0331 spin_lock_init(&dev_priv->irq_lock);
0332 spin_lock_init(&dev_priv->gpu_error.lock);
0333 mutex_init(&dev_priv->backlight_lock);
0334
0335 mutex_init(&dev_priv->sb_lock);
0336 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
0337
0338 mutex_init(&dev_priv->audio.mutex);
0339 mutex_init(&dev_priv->wm.wm_mutex);
0340 mutex_init(&dev_priv->pps_mutex);
0341 mutex_init(&dev_priv->hdcp_comp_mutex);
0342
0343 i915_memcpy_init_early(dev_priv);
0344 intel_runtime_pm_init_early(&dev_priv->runtime_pm);
0345
0346 ret = i915_workqueues_init(dev_priv);
0347 if (ret < 0)
0348 return ret;
0349
0350 ret = vlv_suspend_init(dev_priv);
0351 if (ret < 0)
0352 goto err_workqueues;
0353
0354 ret = intel_region_ttm_device_init(dev_priv);
0355 if (ret)
0356 goto err_ttm;
0357
0358 intel_wopcm_init_early(&dev_priv->wopcm);
0359
0360 intel_root_gt_init_early(dev_priv);
0361
0362 i915_drm_clients_init(&dev_priv->clients, dev_priv);
0363
0364 i915_gem_init_early(dev_priv);
0365
0366
0367 intel_detect_pch(dev_priv);
0368
0369 intel_pm_setup(dev_priv);
0370 ret = intel_power_domains_init(dev_priv);
0371 if (ret < 0)
0372 goto err_gem;
0373 intel_irq_init(dev_priv);
0374 intel_init_display_hooks(dev_priv);
0375 intel_init_clock_gating_hooks(dev_priv);
0376
0377 intel_detect_preproduction_hw(dev_priv);
0378
0379 return 0;
0380
0381 err_gem:
0382 i915_gem_cleanup_early(dev_priv);
0383 intel_gt_driver_late_release_all(dev_priv);
0384 i915_drm_clients_fini(&dev_priv->clients);
0385 intel_region_ttm_device_fini(dev_priv);
0386 err_ttm:
0387 vlv_suspend_cleanup(dev_priv);
0388 err_workqueues:
0389 i915_workqueues_cleanup(dev_priv);
0390 return ret;
0391 }
0392
0393
0394
0395
0396
0397
0398 static void i915_driver_late_release(struct drm_i915_private *dev_priv)
0399 {
0400 intel_irq_fini(dev_priv);
0401 intel_power_domains_cleanup(dev_priv);
0402 i915_gem_cleanup_early(dev_priv);
0403 intel_gt_driver_late_release_all(dev_priv);
0404 i915_drm_clients_fini(&dev_priv->clients);
0405 intel_region_ttm_device_fini(dev_priv);
0406 vlv_suspend_cleanup(dev_priv);
0407 i915_workqueues_cleanup(dev_priv);
0408
0409 cpu_latency_qos_remove_request(&dev_priv->sb_qos);
0410 mutex_destroy(&dev_priv->sb_lock);
0411
0412 i915_params_free(&dev_priv->params);
0413 }
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
0425 {
0426 int ret;
0427
0428 if (i915_inject_probe_failure(dev_priv))
0429 return -ENODEV;
0430
0431 ret = i915_get_bridge_dev(dev_priv);
0432 if (ret < 0)
0433 return ret;
0434
0435 ret = intel_uncore_init_mmio(&dev_priv->uncore);
0436 if (ret)
0437 return ret;
0438
0439
0440 intel_setup_mchbar(dev_priv);
0441 intel_device_info_runtime_init(dev_priv);
0442
0443 ret = intel_gt_init_mmio(to_gt(dev_priv));
0444 if (ret)
0445 goto err_uncore;
0446
0447
0448 sanitize_gpu(dev_priv);
0449
0450 return 0;
0451
0452 err_uncore:
0453 intel_teardown_mchbar(dev_priv);
0454 intel_uncore_fini_mmio(&dev_priv->uncore);
0455 pci_dev_put(dev_priv->bridge_dev);
0456
0457 return ret;
0458 }
0459
0460
0461
0462
0463
0464 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
0465 {
0466 intel_teardown_mchbar(dev_priv);
0467 intel_uncore_fini_mmio(&dev_priv->uncore);
0468 pci_dev_put(dev_priv->bridge_dev);
0469 }
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482 static int i915_set_dma_info(struct drm_i915_private *i915)
0483 {
0484 unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
0485 int ret;
0486
0487 GEM_BUG_ON(!mask_size);
0488
0489
0490
0491
0492
0493 dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
0494
0495 ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
0496 if (ret)
0497 goto mask_err;
0498
0499
0500 if (GRAPHICS_VER(i915) == 2)
0501 mask_size = 30;
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 if (IS_I965G(i915) || IS_I965GM(i915))
0513 mask_size = 32;
0514
0515 ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
0516 if (ret)
0517 goto mask_err;
0518
0519 return 0;
0520
0521 mask_err:
0522 drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
0523 return ret;
0524 }
0525
0526 static int i915_pcode_init(struct drm_i915_private *i915)
0527 {
0528 struct intel_gt *gt;
0529 int id, ret;
0530
0531 for_each_gt(gt, i915, id) {
0532 ret = intel_pcode_init(gt->uncore);
0533 if (ret) {
0534 drm_err(>->i915->drm, "gt%d: intel_pcode_init failed %d\n", id, ret);
0535 return ret;
0536 }
0537 }
0538
0539 return 0;
0540 }
0541
0542
0543
0544
0545
0546
0547
0548
0549 static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
0550 {
0551 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
0552 struct pci_dev *root_pdev;
0553 int ret;
0554
0555 if (i915_inject_probe_failure(dev_priv))
0556 return -ENODEV;
0557
0558 if (HAS_PPGTT(dev_priv)) {
0559 if (intel_vgpu_active(dev_priv) &&
0560 !intel_vgpu_has_full_ppgtt(dev_priv)) {
0561 i915_report_error(dev_priv,
0562 "incompatible vGPU found, support for isolated ppGTT required\n");
0563 return -ENXIO;
0564 }
0565 }
0566
0567 if (HAS_EXECLISTS(dev_priv)) {
0568
0569
0570
0571
0572
0573 if (intel_vgpu_active(dev_priv) &&
0574 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
0575 i915_report_error(dev_priv,
0576 "old vGPU host found, support for HWSP emulation required\n");
0577 return -ENXIO;
0578 }
0579 }
0580
0581
0582 intel_dram_edram_detect(dev_priv);
0583
0584 ret = i915_set_dma_info(dev_priv);
0585 if (ret)
0586 return ret;
0587
0588 i915_perf_init(dev_priv);
0589
0590 ret = intel_gt_assign_ggtt(to_gt(dev_priv));
0591 if (ret)
0592 goto err_perf;
0593
0594 ret = i915_ggtt_probe_hw(dev_priv);
0595 if (ret)
0596 goto err_perf;
0597
0598 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
0599 if (ret)
0600 goto err_ggtt;
0601
0602 ret = i915_ggtt_init_hw(dev_priv);
0603 if (ret)
0604 goto err_ggtt;
0605
0606 ret = intel_memory_regions_hw_probe(dev_priv);
0607 if (ret)
0608 goto err_ggtt;
0609
0610 ret = intel_gt_tiles_init(dev_priv);
0611 if (ret)
0612 goto err_mem_regions;
0613
0614 ret = i915_ggtt_enable_hw(dev_priv);
0615 if (ret) {
0616 drm_err(&dev_priv->drm, "failed to enable GGTT\n");
0617 goto err_mem_regions;
0618 }
0619
0620 pci_set_master(pdev);
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641 if (GRAPHICS_VER(dev_priv) >= 5) {
0642 if (pci_enable_msi(pdev) < 0)
0643 drm_dbg(&dev_priv->drm, "can't enable MSI");
0644 }
0645
0646 ret = intel_gvt_init(dev_priv);
0647 if (ret)
0648 goto err_msi;
0649
0650 intel_opregion_setup(dev_priv);
0651
0652 ret = i915_pcode_init(dev_priv);
0653 if (ret)
0654 goto err_msi;
0655
0656
0657
0658
0659
0660 intel_dram_detect(dev_priv);
0661
0662 intel_bw_init_hw(dev_priv);
0663
0664
0665
0666
0667
0668
0669 root_pdev = pcie_find_root_port(pdev);
0670 if (root_pdev)
0671 pci_d3cold_disable(root_pdev);
0672
0673 return 0;
0674
0675 err_msi:
0676 if (pdev->msi_enabled)
0677 pci_disable_msi(pdev);
0678 err_mem_regions:
0679 intel_memory_regions_driver_release(dev_priv);
0680 err_ggtt:
0681 i915_ggtt_driver_release(dev_priv);
0682 i915_gem_drain_freed_objects(dev_priv);
0683 i915_ggtt_driver_late_release(dev_priv);
0684 err_perf:
0685 i915_perf_fini(dev_priv);
0686 return ret;
0687 }
0688
0689
0690
0691
0692
0693 static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
0694 {
0695 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
0696 struct pci_dev *root_pdev;
0697
0698 i915_perf_fini(dev_priv);
0699
0700 if (pdev->msi_enabled)
0701 pci_disable_msi(pdev);
0702
0703 root_pdev = pcie_find_root_port(pdev);
0704 if (root_pdev)
0705 pci_d3cold_enable(root_pdev);
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715 static void i915_driver_register(struct drm_i915_private *dev_priv)
0716 {
0717 struct drm_device *dev = &dev_priv->drm;
0718
0719 i915_gem_driver_register(dev_priv);
0720 i915_pmu_register(dev_priv);
0721
0722 intel_vgpu_register(dev_priv);
0723
0724
0725 if (drm_dev_register(dev, 0)) {
0726 drm_err(&dev_priv->drm,
0727 "Failed to register driver for userspace access!\n");
0728 return;
0729 }
0730
0731 i915_debugfs_register(dev_priv);
0732 i915_setup_sysfs(dev_priv);
0733
0734
0735 i915_perf_register(dev_priv);
0736
0737 intel_gt_driver_register(to_gt(dev_priv));
0738
0739 intel_display_driver_register(dev_priv);
0740
0741 intel_power_domains_enable(dev_priv);
0742 intel_runtime_pm_enable(&dev_priv->runtime_pm);
0743
0744 intel_register_dsm_handler();
0745
0746 if (i915_switcheroo_register(dev_priv))
0747 drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
0748 }
0749
0750
0751
0752
0753
0754 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
0755 {
0756 i915_switcheroo_unregister(dev_priv);
0757
0758 intel_unregister_dsm_handler();
0759
0760 intel_runtime_pm_disable(&dev_priv->runtime_pm);
0761 intel_power_domains_disable(dev_priv);
0762
0763 intel_display_driver_unregister(dev_priv);
0764
0765 intel_gt_driver_unregister(to_gt(dev_priv));
0766
0767 i915_perf_unregister(dev_priv);
0768 i915_pmu_unregister(dev_priv);
0769
0770 i915_teardown_sysfs(dev_priv);
0771 drm_dev_unplug(&dev_priv->drm);
0772
0773 i915_gem_driver_unregister(dev_priv);
0774 }
0775
0776 void
0777 i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
0778 {
0779 drm_printf(p, "iommu: %s\n",
0780 str_enabled_disabled(i915_vtd_active(i915)));
0781 }
0782
0783 static void i915_welcome_messages(struct drm_i915_private *dev_priv)
0784 {
0785 if (drm_debug_enabled(DRM_UT_DRIVER)) {
0786 struct drm_printer p = drm_debug_printer("i915 device info:");
0787
0788 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
0789 INTEL_DEVID(dev_priv),
0790 INTEL_REVID(dev_priv),
0791 intel_platform_name(INTEL_INFO(dev_priv)->platform),
0792 intel_subplatform(RUNTIME_INFO(dev_priv),
0793 INTEL_INFO(dev_priv)->platform),
0794 GRAPHICS_VER(dev_priv));
0795
0796 intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
0797 intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
0798 i915_print_iommu_status(dev_priv, &p);
0799 intel_gt_info_print(&to_gt(dev_priv)->info, &p);
0800 }
0801
0802 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
0803 drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
0804 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
0805 drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
0806 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
0807 drm_info(&dev_priv->drm,
0808 "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
0809 }
0810
0811 static struct drm_i915_private *
0812 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
0813 {
0814 const struct intel_device_info *match_info =
0815 (struct intel_device_info *)ent->driver_data;
0816 struct intel_device_info *device_info;
0817 struct drm_i915_private *i915;
0818
0819 i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
0820 struct drm_i915_private, drm);
0821 if (IS_ERR(i915))
0822 return i915;
0823
0824 pci_set_drvdata(pdev, i915);
0825
0826
0827 i915_params_copy(&i915->params, &i915_modparams);
0828
0829
0830 device_info = mkwrite_device_info(i915);
0831 memcpy(device_info, match_info, sizeof(*device_info));
0832 RUNTIME_INFO(i915)->device_id = pdev->device;
0833
0834 return i915;
0835 }
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
0849 {
0850 struct drm_i915_private *i915;
0851 int ret;
0852
0853 i915 = i915_driver_create(pdev, ent);
0854 if (IS_ERR(i915))
0855 return PTR_ERR(i915);
0856
0857
0858 if (!i915->params.nuclear_pageflip && DISPLAY_VER(i915) < 5)
0859 i915->drm.driver_features &= ~DRIVER_ATOMIC;
0860
0861 ret = pci_enable_device(pdev);
0862 if (ret)
0863 goto out_fini;
0864
0865 ret = i915_driver_early_probe(i915);
0866 if (ret < 0)
0867 goto out_pci_disable;
0868
0869 disable_rpm_wakeref_asserts(&i915->runtime_pm);
0870
0871 intel_vgpu_detect(i915);
0872
0873 ret = intel_gt_probe_all(i915);
0874 if (ret < 0)
0875 goto out_runtime_pm_put;
0876
0877 ret = i915_driver_mmio_probe(i915);
0878 if (ret < 0)
0879 goto out_tiles_cleanup;
0880
0881 ret = i915_driver_hw_probe(i915);
0882 if (ret < 0)
0883 goto out_cleanup_mmio;
0884
0885 ret = intel_modeset_init_noirq(i915);
0886 if (ret < 0)
0887 goto out_cleanup_hw;
0888
0889 ret = intel_irq_install(i915);
0890 if (ret)
0891 goto out_cleanup_modeset;
0892
0893 ret = intel_modeset_init_nogem(i915);
0894 if (ret)
0895 goto out_cleanup_irq;
0896
0897 ret = i915_gem_init(i915);
0898 if (ret)
0899 goto out_cleanup_modeset2;
0900
0901 ret = intel_modeset_init(i915);
0902 if (ret)
0903 goto out_cleanup_gem;
0904
0905 i915_driver_register(i915);
0906
0907 enable_rpm_wakeref_asserts(&i915->runtime_pm);
0908
0909 i915_welcome_messages(i915);
0910
0911 i915->do_release = true;
0912
0913 return 0;
0914
0915 out_cleanup_gem:
0916 i915_gem_suspend(i915);
0917 i915_gem_driver_remove(i915);
0918 i915_gem_driver_release(i915);
0919 out_cleanup_modeset2:
0920
0921 intel_modeset_driver_remove(i915);
0922 intel_irq_uninstall(i915);
0923 intel_modeset_driver_remove_noirq(i915);
0924 goto out_cleanup_modeset;
0925 out_cleanup_irq:
0926 intel_irq_uninstall(i915);
0927 out_cleanup_modeset:
0928 intel_modeset_driver_remove_nogem(i915);
0929 out_cleanup_hw:
0930 i915_driver_hw_remove(i915);
0931 intel_memory_regions_driver_release(i915);
0932 i915_ggtt_driver_release(i915);
0933 i915_gem_drain_freed_objects(i915);
0934 i915_ggtt_driver_late_release(i915);
0935 out_cleanup_mmio:
0936 i915_driver_mmio_release(i915);
0937 out_tiles_cleanup:
0938 intel_gt_release_all(i915);
0939 out_runtime_pm_put:
0940 enable_rpm_wakeref_asserts(&i915->runtime_pm);
0941 i915_driver_late_release(i915);
0942 out_pci_disable:
0943 pci_disable_device(pdev);
0944 out_fini:
0945 i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
0946 return ret;
0947 }
0948
0949 void i915_driver_remove(struct drm_i915_private *i915)
0950 {
0951 disable_rpm_wakeref_asserts(&i915->runtime_pm);
0952
0953 i915_driver_unregister(i915);
0954
0955
0956 synchronize_rcu();
0957
0958 i915_gem_suspend(i915);
0959
0960 intel_gvt_driver_remove(i915);
0961
0962 intel_modeset_driver_remove(i915);
0963
0964 intel_irq_uninstall(i915);
0965
0966 intel_modeset_driver_remove_noirq(i915);
0967
0968 i915_reset_error_state(i915);
0969 i915_gem_driver_remove(i915);
0970
0971 intel_modeset_driver_remove_nogem(i915);
0972
0973 i915_driver_hw_remove(i915);
0974
0975 enable_rpm_wakeref_asserts(&i915->runtime_pm);
0976 }
0977
0978 static void i915_driver_release(struct drm_device *dev)
0979 {
0980 struct drm_i915_private *dev_priv = to_i915(dev);
0981 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
0982
0983 if (!dev_priv->do_release)
0984 return;
0985
0986 disable_rpm_wakeref_asserts(rpm);
0987
0988 i915_gem_driver_release(dev_priv);
0989
0990 intel_memory_regions_driver_release(dev_priv);
0991 i915_ggtt_driver_release(dev_priv);
0992 i915_gem_drain_freed_objects(dev_priv);
0993 i915_ggtt_driver_late_release(dev_priv);
0994
0995 i915_driver_mmio_release(dev_priv);
0996
0997 enable_rpm_wakeref_asserts(rpm);
0998 intel_runtime_pm_driver_release(rpm);
0999
1000 i915_driver_late_release(dev_priv);
1001 }
1002
1003 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1004 {
1005 struct drm_i915_private *i915 = to_i915(dev);
1006 int ret;
1007
1008 ret = i915_gem_open(i915, file);
1009 if (ret)
1010 return ret;
1011
1012 return 0;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 static void i915_driver_lastclose(struct drm_device *dev)
1028 {
1029 struct drm_i915_private *i915 = to_i915(dev);
1030
1031 intel_fbdev_restore_mode(dev);
1032
1033 if (HAS_DISPLAY(i915))
1034 vga_switcheroo_process_delayed_switch();
1035 }
1036
1037 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1038 {
1039 struct drm_i915_file_private *file_priv = file->driver_priv;
1040
1041 i915_gem_context_close(file);
1042 i915_drm_client_put(file_priv->client);
1043
1044 kfree_rcu(file_priv, rcu);
1045
1046
1047 i915_gem_flush_free_objects(to_i915(dev));
1048 }
1049
1050 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1051 {
1052 struct drm_device *dev = &dev_priv->drm;
1053 struct intel_encoder *encoder;
1054
1055 if (!HAS_DISPLAY(dev_priv))
1056 return;
1057
1058 drm_modeset_lock_all(dev);
1059 for_each_intel_encoder(dev, encoder)
1060 if (encoder->suspend)
1061 encoder->suspend(encoder);
1062 drm_modeset_unlock_all(dev);
1063 }
1064
1065 static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
1066 {
1067 struct drm_device *dev = &dev_priv->drm;
1068 struct intel_encoder *encoder;
1069
1070 if (!HAS_DISPLAY(dev_priv))
1071 return;
1072
1073 drm_modeset_lock_all(dev);
1074 for_each_intel_encoder(dev, encoder)
1075 if (encoder->shutdown)
1076 encoder->shutdown(encoder);
1077 drm_modeset_unlock_all(dev);
1078 }
1079
1080 void i915_driver_shutdown(struct drm_i915_private *i915)
1081 {
1082 disable_rpm_wakeref_asserts(&i915->runtime_pm);
1083 intel_runtime_pm_disable(&i915->runtime_pm);
1084 intel_power_domains_disable(i915);
1085
1086 if (HAS_DISPLAY(i915)) {
1087 drm_kms_helper_poll_disable(&i915->drm);
1088
1089 drm_atomic_helper_shutdown(&i915->drm);
1090 }
1091
1092 intel_dp_mst_suspend(i915);
1093
1094 intel_runtime_pm_disable_interrupts(i915);
1095 intel_hpd_cancel_work(i915);
1096
1097 intel_suspend_encoders(i915);
1098 intel_shutdown_encoders(i915);
1099
1100 intel_dmc_ucode_suspend(i915);
1101
1102 i915_gem_suspend(i915);
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 intel_power_domains_driver_remove(i915);
1116 enable_rpm_wakeref_asserts(&i915->runtime_pm);
1117
1118 intel_runtime_pm_driver_release(&i915->runtime_pm);
1119 }
1120
1121 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1122 {
1123 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1124 if (acpi_target_system_state() < ACPI_STATE_S3)
1125 return true;
1126 #endif
1127 return false;
1128 }
1129
1130 static int i915_drm_prepare(struct drm_device *dev)
1131 {
1132 struct drm_i915_private *i915 = to_i915(dev);
1133
1134
1135
1136
1137
1138
1139
1140 return i915_gem_backup_suspend(i915);
1141 }
1142
1143 static int i915_drm_suspend(struct drm_device *dev)
1144 {
1145 struct drm_i915_private *dev_priv = to_i915(dev);
1146 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1147 pci_power_t opregion_target_state;
1148
1149 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1150
1151
1152
1153 intel_power_domains_disable(dev_priv);
1154 if (HAS_DISPLAY(dev_priv))
1155 drm_kms_helper_poll_disable(dev);
1156
1157 pci_save_state(pdev);
1158
1159 intel_display_suspend(dev);
1160
1161 intel_dp_mst_suspend(dev_priv);
1162
1163 intel_runtime_pm_disable_interrupts(dev_priv);
1164 intel_hpd_cancel_work(dev_priv);
1165
1166 intel_suspend_encoders(dev_priv);
1167
1168 intel_suspend_hw(dev_priv);
1169
1170
1171 intel_dpt_suspend(dev_priv);
1172 i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
1173
1174 i915_save_display(dev_priv);
1175
1176 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1177 intel_opregion_suspend(dev_priv, opregion_target_state);
1178
1179 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1180
1181 dev_priv->suspend_count++;
1182
1183 intel_dmc_ucode_suspend(dev_priv);
1184
1185 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1186
1187 i915_gem_drain_freed_objects(dev_priv);
1188
1189 return 0;
1190 }
1191
1192 static enum i915_drm_suspend_mode
1193 get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1194 {
1195 if (hibernate)
1196 return I915_DRM_SUSPEND_HIBERNATE;
1197
1198 if (suspend_to_idle(dev_priv))
1199 return I915_DRM_SUSPEND_IDLE;
1200
1201 return I915_DRM_SUSPEND_MEM;
1202 }
1203
1204 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1205 {
1206 struct drm_i915_private *dev_priv = to_i915(dev);
1207 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1208 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1209 int ret;
1210
1211 disable_rpm_wakeref_asserts(rpm);
1212
1213 i915_gem_suspend_late(dev_priv);
1214
1215 intel_uncore_suspend(&dev_priv->uncore);
1216
1217 intel_power_domains_suspend(dev_priv,
1218 get_suspend_mode(dev_priv, hibernation));
1219
1220 intel_display_power_suspend_late(dev_priv);
1221
1222 ret = vlv_suspend_complete(dev_priv);
1223 if (ret) {
1224 drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
1225 intel_power_domains_resume(dev_priv);
1226
1227 goto out;
1228 }
1229
1230 pci_disable_device(pdev);
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
1244 pci_set_power_state(pdev, PCI_D3hot);
1245
1246 out:
1247 enable_rpm_wakeref_asserts(rpm);
1248 if (!dev_priv->uncore.user_forcewake_count)
1249 intel_runtime_pm_driver_release(rpm);
1250
1251 return ret;
1252 }
1253
1254 int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
1255 pm_message_t state)
1256 {
1257 int error;
1258
1259 if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
1260 state.event != PM_EVENT_FREEZE))
1261 return -EINVAL;
1262
1263 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1264 return 0;
1265
1266 error = i915_drm_suspend(&i915->drm);
1267 if (error)
1268 return error;
1269
1270 return i915_drm_suspend_late(&i915->drm, false);
1271 }
1272
1273 static int i915_drm_resume(struct drm_device *dev)
1274 {
1275 struct drm_i915_private *dev_priv = to_i915(dev);
1276 int ret;
1277
1278 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1279
1280 ret = i915_pcode_init(dev_priv);
1281 if (ret)
1282 return ret;
1283
1284 sanitize_gpu(dev_priv);
1285
1286 ret = i915_ggtt_enable_hw(dev_priv);
1287 if (ret)
1288 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
1289
1290 i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1291
1292 intel_dpt_resume(dev_priv);
1293
1294 intel_dmc_ucode_resume(dev_priv);
1295
1296 i915_restore_display(dev_priv);
1297 intel_pps_unlock_regs_wa(dev_priv);
1298
1299 intel_init_pch_refclk(dev_priv);
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 intel_runtime_pm_enable_interrupts(dev_priv);
1312
1313 if (HAS_DISPLAY(dev_priv))
1314 drm_mode_config_reset(dev);
1315
1316 i915_gem_resume(dev_priv);
1317
1318 intel_modeset_init_hw(dev_priv);
1319 intel_init_clock_gating(dev_priv);
1320 intel_hpd_init(dev_priv);
1321
1322
1323 intel_dp_mst_resume(dev_priv);
1324 intel_display_resume(dev);
1325
1326 intel_hpd_poll_disable(dev_priv);
1327 if (HAS_DISPLAY(dev_priv))
1328 drm_kms_helper_poll_enable(dev);
1329
1330 intel_opregion_resume(dev_priv);
1331
1332 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1333
1334 intel_power_domains_enable(dev_priv);
1335
1336 intel_gvt_resume(dev_priv);
1337
1338 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1339
1340 return 0;
1341 }
1342
1343 static int i915_drm_resume_early(struct drm_device *dev)
1344 {
1345 struct drm_i915_private *dev_priv = to_i915(dev);
1346 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
1347 int ret;
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369 ret = pci_set_power_state(pdev, PCI_D0);
1370 if (ret) {
1371 drm_err(&dev_priv->drm,
1372 "failed to set PCI D0 power state (%d)\n", ret);
1373 return ret;
1374 }
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 if (pci_enable_device(pdev))
1390 return -EIO;
1391
1392 pci_set_master(pdev);
1393
1394 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1395
1396 ret = vlv_resume_prepare(dev_priv, false);
1397 if (ret)
1398 drm_err(&dev_priv->drm,
1399 "Resume prepare failed: %d, continuing anyway\n", ret);
1400
1401 intel_uncore_resume_early(&dev_priv->uncore);
1402
1403 intel_gt_check_and_clear_faults(to_gt(dev_priv));
1404
1405 intel_display_power_resume_early(dev_priv);
1406
1407 intel_power_domains_resume(dev_priv);
1408
1409 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1410
1411 return ret;
1412 }
1413
1414 int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
1415 {
1416 int ret;
1417
1418 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1419 return 0;
1420
1421 ret = i915_drm_resume_early(&i915->drm);
1422 if (ret)
1423 return ret;
1424
1425 return i915_drm_resume(&i915->drm);
1426 }
1427
1428 static int i915_pm_prepare(struct device *kdev)
1429 {
1430 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1431
1432 if (!i915) {
1433 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1434 return -ENODEV;
1435 }
1436
1437 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1438 return 0;
1439
1440 return i915_drm_prepare(&i915->drm);
1441 }
1442
1443 static int i915_pm_suspend(struct device *kdev)
1444 {
1445 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1446
1447 if (!i915) {
1448 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
1449 return -ENODEV;
1450 }
1451
1452 i915_ggtt_mark_pte_lost(i915, false);
1453
1454 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1455 return 0;
1456
1457 return i915_drm_suspend(&i915->drm);
1458 }
1459
1460 static int i915_pm_suspend_late(struct device *kdev)
1461 {
1462 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1474 return 0;
1475
1476 return i915_drm_suspend_late(&i915->drm, false);
1477 }
1478
1479 static int i915_pm_poweroff_late(struct device *kdev)
1480 {
1481 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1482
1483 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1484 return 0;
1485
1486 return i915_drm_suspend_late(&i915->drm, true);
1487 }
1488
1489 static int i915_pm_resume_early(struct device *kdev)
1490 {
1491 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1492
1493 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1494 return 0;
1495
1496 return i915_drm_resume_early(&i915->drm);
1497 }
1498
1499 static int i915_pm_resume(struct device *kdev)
1500 {
1501 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1502
1503 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1504 return 0;
1505
1506
1507
1508
1509
1510
1511 if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1))
1512 i915_ggtt_mark_pte_lost(i915, true);
1513
1514 return i915_drm_resume(&i915->drm);
1515 }
1516
1517
1518 static int i915_pm_freeze(struct device *kdev)
1519 {
1520 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1521 int ret;
1522
1523 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1524 ret = i915_drm_suspend(&i915->drm);
1525 if (ret)
1526 return ret;
1527 }
1528
1529 ret = i915_gem_freeze(i915);
1530 if (ret)
1531 return ret;
1532
1533 return 0;
1534 }
1535
1536 static int i915_pm_freeze_late(struct device *kdev)
1537 {
1538 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1539 int ret;
1540
1541 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
1542 ret = i915_drm_suspend_late(&i915->drm, true);
1543 if (ret)
1544 return ret;
1545 }
1546
1547 ret = i915_gem_freeze_late(i915);
1548 if (ret)
1549 return ret;
1550
1551 return 0;
1552 }
1553
1554
1555 static int i915_pm_thaw_early(struct device *kdev)
1556 {
1557 return i915_pm_resume_early(kdev);
1558 }
1559
1560 static int i915_pm_thaw(struct device *kdev)
1561 {
1562 return i915_pm_resume(kdev);
1563 }
1564
1565
1566 static int i915_pm_restore_early(struct device *kdev)
1567 {
1568 return i915_pm_resume_early(kdev);
1569 }
1570
1571 static int i915_pm_restore(struct device *kdev)
1572 {
1573 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1574
1575 i915_ggtt_mark_pte_lost(i915, true);
1576 return i915_pm_resume(kdev);
1577 }
1578
1579 static int intel_runtime_suspend(struct device *kdev)
1580 {
1581 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1582 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1583 int ret;
1584
1585 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1586 return -ENODEV;
1587
1588 drm_dbg(&dev_priv->drm, "Suspending device\n");
1589
1590 disable_rpm_wakeref_asserts(rpm);
1591
1592
1593
1594
1595
1596 i915_gem_runtime_suspend(dev_priv);
1597
1598 intel_gt_runtime_suspend(to_gt(dev_priv));
1599
1600 intel_runtime_pm_disable_interrupts(dev_priv);
1601
1602 intel_uncore_suspend(&dev_priv->uncore);
1603
1604 intel_display_power_suspend(dev_priv);
1605
1606 ret = vlv_suspend_complete(dev_priv);
1607 if (ret) {
1608 drm_err(&dev_priv->drm,
1609 "Runtime suspend failed, disabling it (%d)\n", ret);
1610 intel_uncore_runtime_resume(&dev_priv->uncore);
1611
1612 intel_runtime_pm_enable_interrupts(dev_priv);
1613
1614 intel_gt_runtime_resume(to_gt(dev_priv));
1615
1616 enable_rpm_wakeref_asserts(rpm);
1617
1618 return ret;
1619 }
1620
1621 enable_rpm_wakeref_asserts(rpm);
1622 intel_runtime_pm_driver_release(rpm);
1623
1624 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
1625 drm_err(&dev_priv->drm,
1626 "Unclaimed access detected prior to suspending\n");
1627
1628 rpm->suspended = true;
1629
1630
1631
1632
1633
1634 if (IS_BROADWELL(dev_priv)) {
1635
1636
1637
1638
1639
1640
1641 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1642 } else {
1643
1644
1645
1646
1647
1648
1649
1650 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1651 }
1652
1653 assert_forcewakes_inactive(&dev_priv->uncore);
1654
1655 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1656 intel_hpd_poll_enable(dev_priv);
1657
1658 drm_dbg(&dev_priv->drm, "Device suspended\n");
1659 return 0;
1660 }
1661
1662 static int intel_runtime_resume(struct device *kdev)
1663 {
1664 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1665 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1666 int ret;
1667
1668 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
1669 return -ENODEV;
1670
1671 drm_dbg(&dev_priv->drm, "Resuming device\n");
1672
1673 drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
1674 disable_rpm_wakeref_asserts(rpm);
1675
1676 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1677 rpm->suspended = false;
1678 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
1679 drm_dbg(&dev_priv->drm,
1680 "Unclaimed access during suspend, bios?\n");
1681
1682 intel_display_power_resume(dev_priv);
1683
1684 ret = vlv_resume_prepare(dev_priv, true);
1685
1686 intel_uncore_runtime_resume(&dev_priv->uncore);
1687
1688 intel_runtime_pm_enable_interrupts(dev_priv);
1689
1690
1691
1692
1693
1694 intel_gt_runtime_resume(to_gt(dev_priv));
1695
1696
1697
1698
1699
1700
1701 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
1702 intel_hpd_init(dev_priv);
1703 intel_hpd_poll_disable(dev_priv);
1704 }
1705
1706 intel_enable_ipc(dev_priv);
1707
1708 enable_rpm_wakeref_asserts(rpm);
1709
1710 if (ret)
1711 drm_err(&dev_priv->drm,
1712 "Runtime resume failed, disabling it (%d)\n", ret);
1713 else
1714 drm_dbg(&dev_priv->drm, "Device resumed\n");
1715
1716 return ret;
1717 }
1718
1719 const struct dev_pm_ops i915_pm_ops = {
1720
1721
1722
1723
1724 .prepare = i915_pm_prepare,
1725 .suspend = i915_pm_suspend,
1726 .suspend_late = i915_pm_suspend_late,
1727 .resume_early = i915_pm_resume_early,
1728 .resume = i915_pm_resume,
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745 .freeze = i915_pm_freeze,
1746 .freeze_late = i915_pm_freeze_late,
1747 .thaw_early = i915_pm_thaw_early,
1748 .thaw = i915_pm_thaw,
1749 .poweroff = i915_pm_suspend,
1750 .poweroff_late = i915_pm_poweroff_late,
1751 .restore_early = i915_pm_restore_early,
1752 .restore = i915_pm_restore,
1753
1754
1755 .runtime_suspend = intel_runtime_suspend,
1756 .runtime_resume = intel_runtime_resume,
1757 };
1758
1759 static const struct file_operations i915_driver_fops = {
1760 .owner = THIS_MODULE,
1761 .open = drm_open,
1762 .release = drm_release_noglobal,
1763 .unlocked_ioctl = drm_ioctl,
1764 .mmap = i915_gem_mmap,
1765 .poll = drm_poll,
1766 .read = drm_read,
1767 .compat_ioctl = i915_ioc32_compat_ioctl,
1768 .llseek = noop_llseek,
1769 #ifdef CONFIG_PROC_FS
1770 .show_fdinfo = i915_drm_client_fdinfo,
1771 #endif
1772 };
1773
1774 static int
1775 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1776 struct drm_file *file)
1777 {
1778 return -ENODEV;
1779 }
1780
1781 static const struct drm_ioctl_desc i915_ioctls[] = {
1782 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1783 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1784 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1785 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1786 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1787 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1788 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
1789 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1790 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1791 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1792 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1793 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1794 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1795 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1796 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1797 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1798 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1799 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1800 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
1801 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
1802 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1803 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1804 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
1805 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1806 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1807 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
1808 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1809 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1810 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1811 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
1812 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1813 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1814 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1815 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
1816 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1817 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1818 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
1819 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
1820 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1821 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
1822 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1823 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
1824 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
1825 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
1826 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
1827 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
1828 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1829 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1830 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1831 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1832 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1833 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1834 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1835 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
1836 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
1837 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
1838 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
1839 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
1840 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
1841 };
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854 #define DRIVER_MAJOR 1
1855 #define DRIVER_MINOR 6
1856 #define DRIVER_PATCHLEVEL 0
1857
1858 static const struct drm_driver i915_drm_driver = {
1859
1860
1861
1862 .driver_features =
1863 DRIVER_GEM |
1864 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
1865 DRIVER_SYNCOBJ_TIMELINE,
1866 .release = i915_driver_release,
1867 .open = i915_driver_open,
1868 .lastclose = i915_driver_lastclose,
1869 .postclose = i915_driver_postclose,
1870
1871 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1872 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1873 .gem_prime_import = i915_gem_prime_import,
1874
1875 .dumb_create = i915_gem_dumb_create,
1876 .dumb_map_offset = i915_gem_dumb_mmap_offset,
1877
1878 .ioctls = i915_ioctls,
1879 .num_ioctls = ARRAY_SIZE(i915_ioctls),
1880 .fops = &i915_driver_fops,
1881 .name = DRIVER_NAME,
1882 .desc = DRIVER_DESC,
1883 .date = DRIVER_DATE,
1884 .major = DRIVER_MAJOR,
1885 .minor = DRIVER_MINOR,
1886 .patchlevel = DRIVER_PATCHLEVEL,
1887 };