0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 #include <linux/console.h>
0030 #include <linux/efi.h>
0031 #include <linux/pci.h>
0032 #include <linux/pm_runtime.h>
0033 #include <linux/slab.h>
0034 #include <linux/vga_switcheroo.h>
0035 #include <linux/vgaarb.h>
0036
0037 #include <drm/drm_cache.h>
0038 #include <drm/drm_crtc_helper.h>
0039 #include <drm/drm_device.h>
0040 #include <drm/drm_file.h>
0041 #include <drm/drm_framebuffer.h>
0042 #include <drm/drm_probe_helper.h>
0043 #include <drm/radeon_drm.h>
0044
0045 #include "radeon_device.h"
0046 #include "radeon_reg.h"
0047 #include "radeon.h"
0048 #include "atom.h"
0049
0050 static const char radeon_family_name[][16] = {
0051 "R100",
0052 "RV100",
0053 "RS100",
0054 "RV200",
0055 "RS200",
0056 "R200",
0057 "RV250",
0058 "RS300",
0059 "RV280",
0060 "R300",
0061 "R350",
0062 "RV350",
0063 "RV380",
0064 "R420",
0065 "R423",
0066 "RV410",
0067 "RS400",
0068 "RS480",
0069 "RS600",
0070 "RS690",
0071 "RS740",
0072 "RV515",
0073 "R520",
0074 "RV530",
0075 "RV560",
0076 "RV570",
0077 "R580",
0078 "R600",
0079 "RV610",
0080 "RV630",
0081 "RV670",
0082 "RV620",
0083 "RV635",
0084 "RS780",
0085 "RS880",
0086 "RV770",
0087 "RV730",
0088 "RV710",
0089 "RV740",
0090 "CEDAR",
0091 "REDWOOD",
0092 "JUNIPER",
0093 "CYPRESS",
0094 "HEMLOCK",
0095 "PALM",
0096 "SUMO",
0097 "SUMO2",
0098 "BARTS",
0099 "TURKS",
0100 "CAICOS",
0101 "CAYMAN",
0102 "ARUBA",
0103 "TAHITI",
0104 "PITCAIRN",
0105 "VERDE",
0106 "OLAND",
0107 "HAINAN",
0108 "BONAIRE",
0109 "KAVERI",
0110 "KABINI",
0111 "HAWAII",
0112 "MULLINS",
0113 "LAST",
0114 };
0115
0116 #if defined(CONFIG_VGA_SWITCHEROO)
0117 bool radeon_has_atpx_dgpu_power_cntl(void);
0118 bool radeon_is_atpx_hybrid(void);
0119 #else
0120 static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
0121 static inline bool radeon_is_atpx_hybrid(void) { return false; }
0122 #endif
0123
0124 #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
0125
0126 struct radeon_px_quirk {
0127 u32 chip_vendor;
0128 u32 chip_device;
0129 u32 subsys_vendor;
0130 u32 subsys_device;
0131 u32 px_quirk_flags;
0132 };
0133
0134 static struct radeon_px_quirk radeon_px_quirk_list[] = {
0135
0136
0137
0138 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
0139
0140
0141
0142 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
0143
0144
0145
0146 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
0147
0148
0149
0150 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
0151
0152
0153
0154 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
0155 { 0, 0, 0, 0, 0 },
0156 };
0157
0158 bool radeon_is_px(struct drm_device *dev)
0159 {
0160 struct radeon_device *rdev = dev->dev_private;
0161
0162 if (rdev->flags & RADEON_IS_PX)
0163 return true;
0164 return false;
0165 }
0166
0167 static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
0168 {
0169 struct radeon_px_quirk *p = radeon_px_quirk_list;
0170
0171
0172 while (p && p->chip_device != 0) {
0173 if (rdev->pdev->vendor == p->chip_vendor &&
0174 rdev->pdev->device == p->chip_device &&
0175 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
0176 rdev->pdev->subsystem_device == p->subsys_device) {
0177 rdev->px_quirk_flags = p->px_quirk_flags;
0178 break;
0179 }
0180 ++p;
0181 }
0182
0183 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
0184 rdev->flags &= ~RADEON_IS_PX;
0185
0186
0187 if (!radeon_is_atpx_hybrid() &&
0188 !radeon_has_atpx_dgpu_power_cntl())
0189 rdev->flags &= ~RADEON_IS_PX;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 void radeon_program_register_sequence(struct radeon_device *rdev,
0203 const u32 *registers,
0204 const u32 array_size)
0205 {
0206 u32 tmp, reg, and_mask, or_mask;
0207 int i;
0208
0209 if (array_size % 3)
0210 return;
0211
0212 for (i = 0; i < array_size; i +=3) {
0213 reg = registers[i + 0];
0214 and_mask = registers[i + 1];
0215 or_mask = registers[i + 2];
0216
0217 if (and_mask == 0xffffffff) {
0218 tmp = or_mask;
0219 } else {
0220 tmp = RREG32(reg);
0221 tmp &= ~and_mask;
0222 tmp |= or_mask;
0223 }
0224 WREG32(reg, tmp);
0225 }
0226 }
0227
0228 void radeon_pci_config_reset(struct radeon_device *rdev)
0229 {
0230 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240 void radeon_surface_init(struct radeon_device *rdev)
0241 {
0242
0243 if (rdev->family < CHIP_R600) {
0244 int i;
0245
0246 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
0247 if (rdev->surface_regs[i].bo)
0248 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
0249 else
0250 radeon_clear_surface_reg(rdev, i);
0251 }
0252
0253 WREG32(RADEON_SURFACE_CNTL, 0);
0254 }
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 void radeon_scratch_init(struct radeon_device *rdev)
0268 {
0269 int i;
0270
0271
0272 if (rdev->family < CHIP_R300) {
0273 rdev->scratch.num_reg = 5;
0274 } else {
0275 rdev->scratch.num_reg = 7;
0276 }
0277 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
0278 for (i = 0; i < rdev->scratch.num_reg; i++) {
0279 rdev->scratch.free[i] = true;
0280 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
0281 }
0282 }
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
0294 {
0295 int i;
0296
0297 for (i = 0; i < rdev->scratch.num_reg; i++) {
0298 if (rdev->scratch.free[i]) {
0299 rdev->scratch.free[i] = false;
0300 *reg = rdev->scratch.reg[i];
0301 return 0;
0302 }
0303 }
0304 return -EINVAL;
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
0316 {
0317 int i;
0318
0319 for (i = 0; i < rdev->scratch.num_reg; i++) {
0320 if (rdev->scratch.reg[i] == reg) {
0321 rdev->scratch.free[i] = true;
0322 return;
0323 }
0324 }
0325 }
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 static int radeon_doorbell_init(struct radeon_device *rdev)
0339 {
0340
0341 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
0342 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
0343
0344 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
0345 if (rdev->doorbell.num_doorbells == 0)
0346 return -EINVAL;
0347
0348 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
0349 if (rdev->doorbell.ptr == NULL) {
0350 return -ENOMEM;
0351 }
0352 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
0353 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
0354
0355 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
0356
0357 return 0;
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367 static void radeon_doorbell_fini(struct radeon_device *rdev)
0368 {
0369 iounmap(rdev->doorbell.ptr);
0370 rdev->doorbell.ptr = NULL;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
0383 {
0384 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
0385 if (offset < rdev->doorbell.num_doorbells) {
0386 __set_bit(offset, rdev->doorbell.used);
0387 *doorbell = offset;
0388 return 0;
0389 } else {
0390 return -EINVAL;
0391 }
0392 }
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
0403 {
0404 if (doorbell < rdev->doorbell.num_doorbells)
0405 __clear_bit(doorbell, rdev->doorbell.used);
0406 }
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 void radeon_wb_disable(struct radeon_device *rdev)
0423 {
0424 rdev->wb.enabled = false;
0425 }
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435 void radeon_wb_fini(struct radeon_device *rdev)
0436 {
0437 radeon_wb_disable(rdev);
0438 if (rdev->wb.wb_obj) {
0439 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
0440 radeon_bo_kunmap(rdev->wb.wb_obj);
0441 radeon_bo_unpin(rdev->wb.wb_obj);
0442 radeon_bo_unreserve(rdev->wb.wb_obj);
0443 }
0444 radeon_bo_unref(&rdev->wb.wb_obj);
0445 rdev->wb.wb = NULL;
0446 rdev->wb.wb_obj = NULL;
0447 }
0448 }
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459 int radeon_wb_init(struct radeon_device *rdev)
0460 {
0461 int r;
0462
0463 if (rdev->wb.wb_obj == NULL) {
0464 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
0465 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
0466 &rdev->wb.wb_obj);
0467 if (r) {
0468 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
0469 return r;
0470 }
0471 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
0472 if (unlikely(r != 0)) {
0473 radeon_wb_fini(rdev);
0474 return r;
0475 }
0476 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
0477 &rdev->wb.gpu_addr);
0478 if (r) {
0479 radeon_bo_unreserve(rdev->wb.wb_obj);
0480 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
0481 radeon_wb_fini(rdev);
0482 return r;
0483 }
0484 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
0485 radeon_bo_unreserve(rdev->wb.wb_obj);
0486 if (r) {
0487 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
0488 radeon_wb_fini(rdev);
0489 return r;
0490 }
0491 }
0492
0493
0494 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
0495
0496 rdev->wb.use_event = false;
0497
0498 if (radeon_no_wb == 1) {
0499 rdev->wb.enabled = false;
0500 } else {
0501 if (rdev->flags & RADEON_IS_AGP) {
0502
0503 rdev->wb.enabled = false;
0504 } else if (rdev->family < CHIP_R300) {
0505
0506 rdev->wb.enabled = false;
0507 } else {
0508 rdev->wb.enabled = true;
0509
0510 if (rdev->family >= CHIP_R600) {
0511 rdev->wb.use_event = true;
0512 }
0513 }
0514 }
0515
0516 if (rdev->family >= CHIP_PALM) {
0517 rdev->wb.enabled = true;
0518 rdev->wb.use_event = true;
0519 }
0520
0521 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
0522
0523 return 0;
0524 }
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
0568 {
0569 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
0570
0571 mc->vram_start = base;
0572 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
0573 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
0574 mc->real_vram_size = mc->aper_size;
0575 mc->mc_vram_size = mc->aper_size;
0576 }
0577 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
0578 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
0579 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
0580 mc->real_vram_size = mc->aper_size;
0581 mc->mc_vram_size = mc->aper_size;
0582 }
0583 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
0584 if (limit && limit < mc->real_vram_size)
0585 mc->real_vram_size = limit;
0586 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
0587 mc->mc_vram_size >> 20, mc->vram_start,
0588 mc->vram_end, mc->real_vram_size >> 20);
0589 }
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603 void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
0604 {
0605 u64 size_af, size_bf;
0606
0607 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
0608 size_bf = mc->vram_start & ~mc->gtt_base_align;
0609 if (size_bf > size_af) {
0610 if (mc->gtt_size > size_bf) {
0611 dev_warn(rdev->dev, "limiting GTT\n");
0612 mc->gtt_size = size_bf;
0613 }
0614 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
0615 } else {
0616 if (mc->gtt_size > size_af) {
0617 dev_warn(rdev->dev, "limiting GTT\n");
0618 mc->gtt_size = size_af;
0619 }
0620 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
0621 }
0622 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
0623 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
0624 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
0625 }
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 bool radeon_device_is_virtual(void)
0639 {
0640 #ifdef CONFIG_X86
0641 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
0642 #else
0643 return false;
0644 #endif
0645 }
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 bool radeon_card_posted(struct radeon_device *rdev)
0657 {
0658 uint32_t reg;
0659
0660
0661 if (rdev->family >= CHIP_BONAIRE &&
0662 radeon_device_is_virtual())
0663 return false;
0664
0665
0666 if (efi_enabled(EFI_BOOT) &&
0667 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
0668 (rdev->family < CHIP_R600))
0669 return false;
0670
0671 if (ASIC_IS_NODCE(rdev))
0672 goto check_memsize;
0673
0674
0675 if (ASIC_IS_DCE4(rdev)) {
0676 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
0677 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
0678 if (rdev->num_crtc >= 4) {
0679 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
0680 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
0681 }
0682 if (rdev->num_crtc >= 6) {
0683 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
0684 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
0685 }
0686 if (reg & EVERGREEN_CRTC_MASTER_EN)
0687 return true;
0688 } else if (ASIC_IS_AVIVO(rdev)) {
0689 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
0690 RREG32(AVIVO_D2CRTC_CONTROL);
0691 if (reg & AVIVO_CRTC_EN) {
0692 return true;
0693 }
0694 } else {
0695 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
0696 RREG32(RADEON_CRTC2_GEN_CNTL);
0697 if (reg & RADEON_CRTC_EN) {
0698 return true;
0699 }
0700 }
0701
0702 check_memsize:
0703
0704 if (rdev->family >= CHIP_R600)
0705 reg = RREG32(R600_CONFIG_MEMSIZE);
0706 else
0707 reg = RREG32(RADEON_CONFIG_MEMSIZE);
0708
0709 if (reg)
0710 return true;
0711
0712 return false;
0713
0714 }
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724 void radeon_update_bandwidth_info(struct radeon_device *rdev)
0725 {
0726 fixed20_12 a;
0727 u32 sclk = rdev->pm.current_sclk;
0728 u32 mclk = rdev->pm.current_mclk;
0729
0730
0731 a.full = dfixed_const(100);
0732 rdev->pm.sclk.full = dfixed_const(sclk);
0733 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
0734 rdev->pm.mclk.full = dfixed_const(mclk);
0735 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
0736
0737 if (rdev->flags & RADEON_IS_IGP) {
0738 a.full = dfixed_const(16);
0739
0740 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
0741 }
0742 }
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 bool radeon_boot_test_post_card(struct radeon_device *rdev)
0754 {
0755 if (radeon_card_posted(rdev))
0756 return true;
0757
0758 if (rdev->bios) {
0759 DRM_INFO("GPU not posted. posting now...\n");
0760 if (rdev->is_atom_bios)
0761 atom_asic_init(rdev->mode_info.atom_context);
0762 else
0763 radeon_combios_asic_init(rdev->ddev);
0764 return true;
0765 } else {
0766 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
0767 return false;
0768 }
0769 }
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781 int radeon_dummy_page_init(struct radeon_device *rdev)
0782 {
0783 if (rdev->dummy_page.page)
0784 return 0;
0785 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
0786 if (rdev->dummy_page.page == NULL)
0787 return -ENOMEM;
0788 rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
0789 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
0790 if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
0791 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
0792 __free_page(rdev->dummy_page.page);
0793 rdev->dummy_page.page = NULL;
0794 return -ENOMEM;
0795 }
0796 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
0797 RADEON_GART_PAGE_DUMMY);
0798 return 0;
0799 }
0800
0801
0802
0803
0804
0805
0806
0807
0808 void radeon_dummy_page_fini(struct radeon_device *rdev)
0809 {
0810 if (rdev->dummy_page.page == NULL)
0811 return;
0812 dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
0813 DMA_BIDIRECTIONAL);
0814 __free_page(rdev->dummy_page.page);
0815 rdev->dummy_page.page = NULL;
0816 }
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
0838 {
0839 struct radeon_device *rdev = info->dev->dev_private;
0840 uint32_t r;
0841
0842 r = rdev->pll_rreg(rdev, reg);
0843 return r;
0844 }
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855 static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
0856 {
0857 struct radeon_device *rdev = info->dev->dev_private;
0858
0859 rdev->pll_wreg(rdev, reg, val);
0860 }
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871 static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
0872 {
0873 struct radeon_device *rdev = info->dev->dev_private;
0874 uint32_t r;
0875
0876 r = rdev->mc_rreg(rdev, reg);
0877 return r;
0878 }
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889 static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
0890 {
0891 struct radeon_device *rdev = info->dev->dev_private;
0892
0893 rdev->mc_wreg(rdev, reg, val);
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905 static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
0906 {
0907 struct radeon_device *rdev = info->dev->dev_private;
0908
0909 WREG32(reg*4, val);
0910 }
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921 static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
0922 {
0923 struct radeon_device *rdev = info->dev->dev_private;
0924 uint32_t r;
0925
0926 r = RREG32(reg*4);
0927 return r;
0928 }
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
0940 {
0941 struct radeon_device *rdev = info->dev->dev_private;
0942
0943 WREG32_IO(reg*4, val);
0944 }
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955 static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
0956 {
0957 struct radeon_device *rdev = info->dev->dev_private;
0958 uint32_t r;
0959
0960 r = RREG32_IO(reg*4);
0961 return r;
0962 }
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974 int radeon_atombios_init(struct radeon_device *rdev)
0975 {
0976 struct card_info *atom_card_info =
0977 kzalloc(sizeof(struct card_info), GFP_KERNEL);
0978
0979 if (!atom_card_info)
0980 return -ENOMEM;
0981
0982 rdev->mode_info.atom_card_info = atom_card_info;
0983 atom_card_info->dev = rdev->ddev;
0984 atom_card_info->reg_read = cail_reg_read;
0985 atom_card_info->reg_write = cail_reg_write;
0986
0987 if (rdev->rio_mem) {
0988 atom_card_info->ioreg_read = cail_ioreg_read;
0989 atom_card_info->ioreg_write = cail_ioreg_write;
0990 } else {
0991 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
0992 atom_card_info->ioreg_read = cail_reg_read;
0993 atom_card_info->ioreg_write = cail_reg_write;
0994 }
0995 atom_card_info->mc_read = cail_mc_read;
0996 atom_card_info->mc_write = cail_mc_write;
0997 atom_card_info->pll_read = cail_pll_read;
0998 atom_card_info->pll_write = cail_pll_write;
0999
1000 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1001 if (!rdev->mode_info.atom_context) {
1002 radeon_atombios_fini(rdev);
1003 return -ENOMEM;
1004 }
1005
1006 mutex_init(&rdev->mode_info.atom_context->mutex);
1007 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1008 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1009 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1010 return 0;
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 void radeon_atombios_fini(struct radeon_device *rdev)
1023 {
1024 if (rdev->mode_info.atom_context) {
1025 kfree(rdev->mode_info.atom_context->scratch);
1026 }
1027 kfree(rdev->mode_info.atom_context);
1028 rdev->mode_info.atom_context = NULL;
1029 kfree(rdev->mode_info.atom_card_info);
1030 rdev->mode_info.atom_card_info = NULL;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 int radeon_combios_init(struct radeon_device *rdev)
1050 {
1051 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1052 return 0;
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 void radeon_combios_fini(struct radeon_device *rdev)
1064 {
1065 }
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077 static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
1078 {
1079 struct drm_device *dev = pci_get_drvdata(pdev);
1080 struct radeon_device *rdev = dev->dev_private;
1081 radeon_vga_set_state(rdev, state);
1082 if (state)
1083 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1084 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1085 else
1086 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1087 }
1088
1089
1090
1091
1092
1093
1094
1095 static int radeon_gart_size_auto(enum radeon_family family)
1096 {
1097
1098 if (family >= CHIP_TAHITI)
1099 return 2048;
1100 else if (family >= CHIP_RV770)
1101 return 1024;
1102 else
1103 return 512;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 static void radeon_check_arguments(struct radeon_device *rdev)
1115 {
1116
1117 if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
1118 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1119 radeon_vram_limit);
1120 radeon_vram_limit = 0;
1121 }
1122
1123 if (radeon_gart_size == -1) {
1124 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1125 }
1126
1127 if (radeon_gart_size < 32) {
1128 dev_warn(rdev->dev, "gart size (%d) too small\n",
1129 radeon_gart_size);
1130 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1131 } else if (!is_power_of_2(radeon_gart_size)) {
1132 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1133 radeon_gart_size);
1134 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1135 }
1136 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1137
1138
1139 switch (radeon_agpmode) {
1140 case -1:
1141 case 0:
1142 case 1:
1143 case 2:
1144 case 4:
1145 case 8:
1146 break;
1147 default:
1148 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1149 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1150 radeon_agpmode = 0;
1151 break;
1152 }
1153
1154 if (!is_power_of_2(radeon_vm_size)) {
1155 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1156 radeon_vm_size);
1157 radeon_vm_size = 4;
1158 }
1159
1160 if (radeon_vm_size < 1) {
1161 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1162 radeon_vm_size);
1163 radeon_vm_size = 4;
1164 }
1165
1166
1167
1168
1169 if (radeon_vm_size > 1024) {
1170 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1171 radeon_vm_size);
1172 radeon_vm_size = 4;
1173 }
1174
1175
1176
1177
1178 if (radeon_vm_block_size == -1) {
1179
1180
1181 unsigned bits = ilog2(radeon_vm_size) + 18;
1182
1183
1184
1185 if (radeon_vm_size <= 8)
1186 radeon_vm_block_size = bits - 9;
1187 else
1188 radeon_vm_block_size = (bits + 3) / 2;
1189
1190 } else if (radeon_vm_block_size < 9) {
1191 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1192 radeon_vm_block_size);
1193 radeon_vm_block_size = 9;
1194 }
1195
1196 if (radeon_vm_block_size > 24 ||
1197 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1198 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1199 radeon_vm_block_size);
1200 radeon_vm_block_size = 9;
1201 }
1202 }
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1214 {
1215 struct drm_device *dev = pci_get_drvdata(pdev);
1216
1217 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1218 return;
1219
1220 if (state == VGA_SWITCHEROO_ON) {
1221 pr_info("radeon: switched on\n");
1222
1223 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1224
1225 radeon_resume_kms(dev, true, true);
1226
1227 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1228 drm_kms_helper_poll_enable(dev);
1229 } else {
1230 pr_info("radeon: switched off\n");
1231 drm_kms_helper_poll_disable(dev);
1232 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1233 radeon_suspend_kms(dev, true, true, false);
1234 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1235 }
1236 }
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1248 {
1249 struct drm_device *dev = pci_get_drvdata(pdev);
1250
1251
1252
1253
1254
1255
1256 return atomic_read(&dev->open_count) == 0;
1257 }
1258
1259 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1260 .set_gpu_state = radeon_switcheroo_set_state,
1261 .reprobe = NULL,
1262 .can_switch = radeon_switcheroo_can_switch,
1263 };
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277 int radeon_device_init(struct radeon_device *rdev,
1278 struct drm_device *ddev,
1279 struct pci_dev *pdev,
1280 uint32_t flags)
1281 {
1282 int r, i;
1283 int dma_bits;
1284 bool runtime = false;
1285
1286 rdev->shutdown = false;
1287 rdev->dev = &pdev->dev;
1288 rdev->ddev = ddev;
1289 rdev->pdev = pdev;
1290 rdev->flags = flags;
1291 rdev->family = flags & RADEON_FAMILY_MASK;
1292 rdev->is_atom_bios = false;
1293 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1294 rdev->mc.gtt_size = 512 * 1024 * 1024;
1295 rdev->accel_working = false;
1296
1297 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1298 rdev->ring[i].idx = i;
1299 }
1300 rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1301
1302 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1303 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1304 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1305
1306
1307
1308 mutex_init(&rdev->ring_lock);
1309 mutex_init(&rdev->dc_hw_i2c_mutex);
1310 atomic_set(&rdev->ih.lock, 0);
1311 mutex_init(&rdev->gem.mutex);
1312 mutex_init(&rdev->pm.mutex);
1313 mutex_init(&rdev->gpu_clock_mutex);
1314 mutex_init(&rdev->srbm_mutex);
1315 init_rwsem(&rdev->pm.mclk_lock);
1316 init_rwsem(&rdev->exclusive_lock);
1317 init_waitqueue_head(&rdev->irq.vblank_queue);
1318 r = radeon_gem_init(rdev);
1319 if (r)
1320 return r;
1321
1322 radeon_check_arguments(rdev);
1323
1324
1325
1326 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1327
1328
1329 r = radeon_asic_init(rdev);
1330 if (r)
1331 return r;
1332
1333
1334
1335
1336 if ((rdev->family >= CHIP_RS400) &&
1337 (rdev->flags & RADEON_IS_IGP)) {
1338 rdev->flags &= ~RADEON_IS_AGP;
1339 }
1340
1341 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1342 radeon_agp_disable(rdev);
1343 }
1344
1345
1346
1347
1348
1349 if (rdev->family >= CHIP_CAYMAN)
1350 rdev->mc.mc_mask = 0xffffffffffULL;
1351 else if (rdev->family >= CHIP_CEDAR)
1352 rdev->mc.mc_mask = 0xfffffffffULL;
1353 else
1354 rdev->mc.mc_mask = 0xffffffffULL;
1355
1356
1357
1358
1359
1360
1361
1362 dma_bits = 40;
1363 if (rdev->flags & RADEON_IS_AGP)
1364 dma_bits = 32;
1365 if ((rdev->flags & RADEON_IS_PCI) &&
1366 (rdev->family <= CHIP_RS740))
1367 dma_bits = 32;
1368 #ifdef CONFIG_PPC64
1369 if (rdev->family == CHIP_CEDAR)
1370 dma_bits = 32;
1371 #endif
1372
1373 r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1374 if (r) {
1375 pr_warn("radeon: No suitable DMA available\n");
1376 return r;
1377 }
1378 rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1379
1380
1381
1382 spin_lock_init(&rdev->mmio_idx_lock);
1383 spin_lock_init(&rdev->smc_idx_lock);
1384 spin_lock_init(&rdev->pll_idx_lock);
1385 spin_lock_init(&rdev->mc_idx_lock);
1386 spin_lock_init(&rdev->pcie_idx_lock);
1387 spin_lock_init(&rdev->pciep_idx_lock);
1388 spin_lock_init(&rdev->pif_idx_lock);
1389 spin_lock_init(&rdev->cg_idx_lock);
1390 spin_lock_init(&rdev->uvd_idx_lock);
1391 spin_lock_init(&rdev->rcu_idx_lock);
1392 spin_lock_init(&rdev->didt_idx_lock);
1393 spin_lock_init(&rdev->end_idx_lock);
1394 if (rdev->family >= CHIP_BONAIRE) {
1395 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1396 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1397 } else {
1398 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1399 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1400 }
1401 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1402 if (rdev->rmmio == NULL)
1403 return -ENOMEM;
1404
1405
1406 if (rdev->family >= CHIP_BONAIRE)
1407 radeon_doorbell_init(rdev);
1408
1409
1410 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1411 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1412 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1413 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1414 break;
1415 }
1416 }
1417 if (rdev->rio_mem == NULL)
1418 DRM_ERROR("Unable to find PCI I/O BAR\n");
1419
1420 if (rdev->flags & RADEON_IS_PX)
1421 radeon_device_handle_px_quirks(rdev);
1422
1423
1424
1425
1426 vga_client_register(rdev->pdev, radeon_vga_set_decode);
1427
1428 if (rdev->flags & RADEON_IS_PX)
1429 runtime = true;
1430 if (!pci_is_thunderbolt_attached(rdev->pdev))
1431 vga_switcheroo_register_client(rdev->pdev,
1432 &radeon_switcheroo_ops, runtime);
1433 if (runtime)
1434 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1435
1436 r = radeon_init(rdev);
1437 if (r)
1438 goto failed;
1439
1440 radeon_gem_debugfs_init(rdev);
1441 radeon_mst_debugfs_init(rdev);
1442
1443 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1444
1445
1446
1447 radeon_asic_reset(rdev);
1448 radeon_fini(rdev);
1449 radeon_agp_disable(rdev);
1450 r = radeon_init(rdev);
1451 if (r)
1452 goto failed;
1453 }
1454
1455 r = radeon_ib_ring_tests(rdev);
1456 if (r)
1457 DRM_ERROR("ib ring test failed (%d).\n", r);
1458
1459
1460
1461
1462
1463
1464 if (rdev->pm.dpm_enabled &&
1465 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1466 (rdev->family == CHIP_TURKS) &&
1467 (rdev->flags & RADEON_IS_MOBILITY)) {
1468 mutex_lock(&rdev->pm.mutex);
1469 radeon_dpm_disable(rdev);
1470 radeon_dpm_enable(rdev);
1471 mutex_unlock(&rdev->pm.mutex);
1472 }
1473
1474 if ((radeon_testing & 1)) {
1475 if (rdev->accel_working)
1476 radeon_test_moves(rdev);
1477 else
1478 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1479 }
1480 if ((radeon_testing & 2)) {
1481 if (rdev->accel_working)
1482 radeon_test_syncing(rdev);
1483 else
1484 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1485 }
1486 if (radeon_benchmarking) {
1487 if (rdev->accel_working)
1488 radeon_benchmark(rdev, radeon_benchmarking);
1489 else
1490 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1491 }
1492 return 0;
1493
1494 failed:
1495
1496 if (radeon_is_px(ddev))
1497 pm_runtime_put_noidle(ddev->dev);
1498 if (runtime)
1499 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1500 return r;
1501 }
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 void radeon_device_fini(struct radeon_device *rdev)
1512 {
1513 DRM_INFO("radeon: finishing device.\n");
1514 rdev->shutdown = true;
1515
1516 radeon_bo_evict_vram(rdev);
1517 radeon_fini(rdev);
1518 if (!pci_is_thunderbolt_attached(rdev->pdev))
1519 vga_switcheroo_unregister_client(rdev->pdev);
1520 if (rdev->flags & RADEON_IS_PX)
1521 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1522 vga_client_unregister(rdev->pdev);
1523 if (rdev->rio_mem)
1524 pci_iounmap(rdev->pdev, rdev->rio_mem);
1525 rdev->rio_mem = NULL;
1526 iounmap(rdev->rmmio);
1527 rdev->rmmio = NULL;
1528 if (rdev->family >= CHIP_BONAIRE)
1529 radeon_doorbell_fini(rdev);
1530 }
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1544 bool fbcon, bool freeze)
1545 {
1546 struct radeon_device *rdev;
1547 struct pci_dev *pdev;
1548 struct drm_crtc *crtc;
1549 struct drm_connector *connector;
1550 int i, r;
1551
1552 if (dev == NULL || dev->dev_private == NULL) {
1553 return -ENODEV;
1554 }
1555
1556 rdev = dev->dev_private;
1557 pdev = to_pci_dev(dev->dev);
1558
1559 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1560 return 0;
1561
1562 drm_kms_helper_poll_disable(dev);
1563
1564 drm_modeset_lock_all(dev);
1565
1566 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1567 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1568 }
1569 drm_modeset_unlock_all(dev);
1570
1571
1572 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1573 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1574 struct drm_framebuffer *fb = crtc->primary->fb;
1575 struct radeon_bo *robj;
1576
1577 if (radeon_crtc->cursor_bo) {
1578 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1579 r = radeon_bo_reserve(robj, false);
1580 if (r == 0) {
1581 radeon_bo_unpin(robj);
1582 radeon_bo_unreserve(robj);
1583 }
1584 }
1585
1586 if (fb == NULL || fb->obj[0] == NULL) {
1587 continue;
1588 }
1589 robj = gem_to_radeon_bo(fb->obj[0]);
1590
1591 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1592 r = radeon_bo_reserve(robj, false);
1593 if (r == 0) {
1594 radeon_bo_unpin(robj);
1595 radeon_bo_unreserve(robj);
1596 }
1597 }
1598 }
1599
1600 radeon_bo_evict_vram(rdev);
1601
1602
1603 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1604 r = radeon_fence_wait_empty(rdev, i);
1605 if (r) {
1606
1607 radeon_fence_driver_force_completion(rdev, i);
1608 } else {
1609
1610 flush_delayed_work(&rdev->fence_drv[i].lockup_work);
1611 }
1612 }
1613
1614 radeon_save_bios_scratch_regs(rdev);
1615
1616 radeon_suspend(rdev);
1617 radeon_hpd_fini(rdev);
1618
1619
1620
1621
1622 radeon_bo_evict_vram(rdev);
1623
1624 radeon_agp_suspend(rdev);
1625
1626 pci_save_state(pdev);
1627 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1628 rdev->asic->asic_reset(rdev, true);
1629 pci_restore_state(pdev);
1630 } else if (suspend) {
1631
1632 pci_disable_device(pdev);
1633 pci_set_power_state(pdev, PCI_D3hot);
1634 }
1635
1636 if (fbcon) {
1637 console_lock();
1638 radeon_fbdev_set_suspend(rdev, 1);
1639 console_unlock();
1640 }
1641 return 0;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651 int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1652 {
1653 struct drm_connector *connector;
1654 struct radeon_device *rdev = dev->dev_private;
1655 struct pci_dev *pdev = to_pci_dev(dev->dev);
1656 struct drm_crtc *crtc;
1657 int r;
1658
1659 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1660 return 0;
1661
1662 if (fbcon) {
1663 console_lock();
1664 }
1665 if (resume) {
1666 pci_set_power_state(pdev, PCI_D0);
1667 pci_restore_state(pdev);
1668 if (pci_enable_device(pdev)) {
1669 if (fbcon)
1670 console_unlock();
1671 return -1;
1672 }
1673 }
1674
1675 radeon_agp_resume(rdev);
1676 radeon_resume(rdev);
1677
1678 r = radeon_ib_ring_tests(rdev);
1679 if (r)
1680 DRM_ERROR("ib ring test failed (%d).\n", r);
1681
1682 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1683
1684 r = radeon_pm_late_init(rdev);
1685 if (r) {
1686 rdev->pm.dpm_enabled = false;
1687 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1688 }
1689 } else {
1690
1691 radeon_pm_resume(rdev);
1692 }
1693
1694 radeon_restore_bios_scratch_regs(rdev);
1695
1696
1697 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1698 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1699
1700 if (radeon_crtc->cursor_bo) {
1701 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1702 r = radeon_bo_reserve(robj, false);
1703 if (r == 0) {
1704
1705 r = radeon_bo_pin_restricted(robj,
1706 RADEON_GEM_DOMAIN_VRAM,
1707 ASIC_IS_AVIVO(rdev) ?
1708 0 : 1 << 27,
1709 &radeon_crtc->cursor_addr);
1710 if (r != 0)
1711 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1712 radeon_bo_unreserve(robj);
1713 }
1714 }
1715 }
1716
1717
1718 if (rdev->is_atom_bios) {
1719 radeon_atom_encoder_init(rdev);
1720 radeon_atom_disp_eng_pll_init(rdev);
1721
1722 if (rdev->mode_info.bl_encoder) {
1723 u8 bl_level = radeon_get_backlight_level(rdev,
1724 rdev->mode_info.bl_encoder);
1725 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1726 bl_level);
1727 }
1728 }
1729
1730 radeon_hpd_init(rdev);
1731
1732 if (fbcon) {
1733 drm_helper_resume_force_mode(dev);
1734
1735 drm_modeset_lock_all(dev);
1736 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1737 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1738 }
1739 drm_modeset_unlock_all(dev);
1740 }
1741
1742 drm_kms_helper_poll_enable(dev);
1743
1744
1745 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1746 radeon_pm_compute_clocks(rdev);
1747
1748 if (fbcon) {
1749 radeon_fbdev_set_suspend(rdev, 0);
1750 console_unlock();
1751 }
1752
1753 return 0;
1754 }
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 int radeon_gpu_reset(struct radeon_device *rdev)
1765 {
1766 unsigned ring_sizes[RADEON_NUM_RINGS];
1767 uint32_t *ring_data[RADEON_NUM_RINGS];
1768
1769 bool saved = false;
1770
1771 int i, r;
1772 int resched;
1773
1774 down_write(&rdev->exclusive_lock);
1775
1776 if (!rdev->needs_reset) {
1777 up_write(&rdev->exclusive_lock);
1778 return 0;
1779 }
1780
1781 atomic_inc(&rdev->gpu_reset_counter);
1782
1783 radeon_save_bios_scratch_regs(rdev);
1784
1785 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1786 radeon_suspend(rdev);
1787 radeon_hpd_fini(rdev);
1788
1789 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1790 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1791 &ring_data[i]);
1792 if (ring_sizes[i]) {
1793 saved = true;
1794 dev_info(rdev->dev, "Saved %d dwords of commands "
1795 "on ring %d.\n", ring_sizes[i], i);
1796 }
1797 }
1798
1799 r = radeon_asic_reset(rdev);
1800 if (!r) {
1801 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1802 radeon_resume(rdev);
1803 }
1804
1805 radeon_restore_bios_scratch_regs(rdev);
1806
1807 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1808 if (!r && ring_data[i]) {
1809 radeon_ring_restore(rdev, &rdev->ring[i],
1810 ring_sizes[i], ring_data[i]);
1811 } else {
1812 radeon_fence_driver_force_completion(rdev, i);
1813 kfree(ring_data[i]);
1814 }
1815 }
1816
1817 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1818
1819 r = radeon_pm_late_init(rdev);
1820 if (r) {
1821 rdev->pm.dpm_enabled = false;
1822 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1823 }
1824 } else {
1825
1826 radeon_pm_resume(rdev);
1827 }
1828
1829
1830 if (rdev->is_atom_bios) {
1831 radeon_atom_encoder_init(rdev);
1832 radeon_atom_disp_eng_pll_init(rdev);
1833
1834 if (rdev->mode_info.bl_encoder) {
1835 u8 bl_level = radeon_get_backlight_level(rdev,
1836 rdev->mode_info.bl_encoder);
1837 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1838 bl_level);
1839 }
1840 }
1841
1842 radeon_hpd_init(rdev);
1843
1844 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1845
1846 rdev->in_reset = true;
1847 rdev->needs_reset = false;
1848
1849 downgrade_write(&rdev->exclusive_lock);
1850
1851 drm_helper_resume_force_mode(rdev->ddev);
1852
1853
1854 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1855 radeon_pm_compute_clocks(rdev);
1856
1857 if (!r) {
1858 r = radeon_ib_ring_tests(rdev);
1859 if (r && saved)
1860 r = -EAGAIN;
1861 } else {
1862
1863 dev_info(rdev->dev, "GPU reset failed\n");
1864 }
1865
1866 rdev->needs_reset = r == -EAGAIN;
1867 rdev->in_reset = false;
1868
1869 up_read(&rdev->exclusive_lock);
1870 return r;
1871 }