0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/module.h>
0019 #include <linux/pci.h>
0020 #include <linux/kernel.h>
0021 #include <linux/pagemap.h>
0022 #include <linux/agp_backend.h>
0023 #include <linux/iommu.h>
0024 #include <linux/delay.h>
0025 #include <asm/smp.h>
0026 #include "agp.h"
0027 #include "intel-agp.h"
0028 #include <drm/intel-gtt.h>
0029 #include <asm/set_memory.h>
0030
0031
0032
0033
0034
0035
0036
0037 #ifdef CONFIG_INTEL_IOMMU
0038 #define USE_PCI_DMA_API 1
0039 #else
0040 #define USE_PCI_DMA_API 0
0041 #endif
0042
0043 struct intel_gtt_driver {
0044 unsigned int gen : 8;
0045 unsigned int is_g33 : 1;
0046 unsigned int is_pineview : 1;
0047 unsigned int is_ironlake : 1;
0048 unsigned int has_pgtbl_enable : 1;
0049 unsigned int dma_mask_size : 8;
0050
0051 int (*setup)(void);
0052
0053
0054 void (*cleanup)(void);
0055 void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
0056
0057
0058
0059 bool (*check_flags)(unsigned int flags);
0060 void (*chipset_flush)(void);
0061 };
0062
0063 static struct _intel_private {
0064 const struct intel_gtt_driver *driver;
0065 struct pci_dev *pcidev;
0066 struct pci_dev *bridge_dev;
0067 u8 __iomem *registers;
0068 phys_addr_t gtt_phys_addr;
0069 u32 PGETBL_save;
0070 u32 __iomem *gtt;
0071 bool clear_fake_agp;
0072 int num_dcache_entries;
0073 void __iomem *i9xx_flush_page;
0074 char *i81x_gtt_table;
0075 struct resource ifp_resource;
0076 int resource_valid;
0077 struct page *scratch_page;
0078 phys_addr_t scratch_page_dma;
0079 int refcount;
0080
0081 unsigned int needs_dmar : 1;
0082 phys_addr_t gma_bus_addr;
0083
0084 resource_size_t stolen_size;
0085
0086 unsigned int gtt_total_entries;
0087
0088
0089 unsigned int gtt_mappable_entries;
0090 } intel_private;
0091
0092 #define INTEL_GTT_GEN intel_private.driver->gen
0093 #define IS_G33 intel_private.driver->is_g33
0094 #define IS_PINEVIEW intel_private.driver->is_pineview
0095 #define IS_IRONLAKE intel_private.driver->is_ironlake
0096 #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
0097
0098 #if IS_ENABLED(CONFIG_AGP_INTEL)
0099 static int intel_gtt_map_memory(struct page **pages,
0100 unsigned int num_entries,
0101 struct sg_table *st)
0102 {
0103 struct scatterlist *sg;
0104 int i;
0105
0106 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
0107
0108 if (sg_alloc_table(st, num_entries, GFP_KERNEL))
0109 goto err;
0110
0111 for_each_sg(st->sgl, sg, num_entries, i)
0112 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
0113
0114 if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
0115 DMA_BIDIRECTIONAL))
0116 goto err;
0117
0118 return 0;
0119
0120 err:
0121 sg_free_table(st);
0122 return -ENOMEM;
0123 }
0124
0125 static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
0126 {
0127 struct sg_table st;
0128 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
0129
0130 dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
0131 DMA_BIDIRECTIONAL);
0132
0133 st.sgl = sg_list;
0134 st.orig_nents = st.nents = num_sg;
0135
0136 sg_free_table(&st);
0137 }
0138
0139 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
0140 {
0141 return;
0142 }
0143
0144
0145 static struct page *i8xx_alloc_pages(void)
0146 {
0147 struct page *page;
0148
0149 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
0150 if (page == NULL)
0151 return NULL;
0152
0153 if (set_pages_uc(page, 4) < 0) {
0154 set_pages_wb(page, 4);
0155 __free_pages(page, 2);
0156 return NULL;
0157 }
0158 atomic_inc(&agp_bridge->current_memory_agp);
0159 return page;
0160 }
0161
0162 static void i8xx_destroy_pages(struct page *page)
0163 {
0164 if (page == NULL)
0165 return;
0166
0167 set_pages_wb(page, 4);
0168 __free_pages(page, 2);
0169 atomic_dec(&agp_bridge->current_memory_agp);
0170 }
0171 #endif
0172
0173 #define I810_GTT_ORDER 4
0174 static int i810_setup(void)
0175 {
0176 phys_addr_t reg_addr;
0177 char *gtt_table;
0178
0179
0180 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
0181 if (gtt_table == NULL)
0182 return -ENOMEM;
0183 intel_private.i81x_gtt_table = gtt_table;
0184
0185 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
0186
0187 intel_private.registers = ioremap(reg_addr, KB(64));
0188 if (!intel_private.registers)
0189 return -ENOMEM;
0190
0191 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
0192 intel_private.registers+I810_PGETBL_CTL);
0193
0194 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
0195
0196 if ((readl(intel_private.registers+I810_DRAM_CTL)
0197 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
0198 dev_info(&intel_private.pcidev->dev,
0199 "detected 4MB dedicated video ram\n");
0200 intel_private.num_dcache_entries = 1024;
0201 }
0202
0203 return 0;
0204 }
0205
0206 static void i810_cleanup(void)
0207 {
0208 writel(0, intel_private.registers+I810_PGETBL_CTL);
0209 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
0210 }
0211
0212 #if IS_ENABLED(CONFIG_AGP_INTEL)
0213 static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
0214 int type)
0215 {
0216 int i;
0217
0218 if ((pg_start + mem->page_count)
0219 > intel_private.num_dcache_entries)
0220 return -EINVAL;
0221
0222 if (!mem->is_flushed)
0223 global_cache_flush();
0224
0225 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
0226 dma_addr_t addr = i << PAGE_SHIFT;
0227 intel_private.driver->write_entry(addr,
0228 i, type);
0229 }
0230 wmb();
0231
0232 return 0;
0233 }
0234
0235
0236
0237
0238
0239
0240 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
0241 {
0242 struct agp_memory *new;
0243 struct page *page;
0244
0245 switch (pg_count) {
0246 case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
0247 break;
0248 case 4:
0249
0250 page = i8xx_alloc_pages();
0251 break;
0252 default:
0253 return NULL;
0254 }
0255
0256 if (page == NULL)
0257 return NULL;
0258
0259 new = agp_create_memory(pg_count);
0260 if (new == NULL)
0261 return NULL;
0262
0263 new->pages[0] = page;
0264 if (pg_count == 4) {
0265
0266 new->pages[1] = new->pages[0] + 1;
0267 new->pages[2] = new->pages[1] + 1;
0268 new->pages[3] = new->pages[2] + 1;
0269 }
0270 new->page_count = pg_count;
0271 new->num_scratch_pages = pg_count;
0272 new->type = AGP_PHYS_MEMORY;
0273 new->physical = page_to_phys(new->pages[0]);
0274 return new;
0275 }
0276
0277 static void intel_i810_free_by_type(struct agp_memory *curr)
0278 {
0279 agp_free_key(curr->key);
0280 if (curr->type == AGP_PHYS_MEMORY) {
0281 if (curr->page_count == 4)
0282 i8xx_destroy_pages(curr->pages[0]);
0283 else {
0284 agp_bridge->driver->agp_destroy_page(curr->pages[0],
0285 AGP_PAGE_DESTROY_UNMAP);
0286 agp_bridge->driver->agp_destroy_page(curr->pages[0],
0287 AGP_PAGE_DESTROY_FREE);
0288 }
0289 agp_free_page_array(curr);
0290 }
0291 kfree(curr);
0292 }
0293 #endif
0294
0295 static int intel_gtt_setup_scratch_page(void)
0296 {
0297 struct page *page;
0298 dma_addr_t dma_addr;
0299
0300 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
0301 if (page == NULL)
0302 return -ENOMEM;
0303 set_pages_uc(page, 1);
0304
0305 if (intel_private.needs_dmar) {
0306 dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
0307 PAGE_SIZE, DMA_BIDIRECTIONAL);
0308 if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
0309 __free_page(page);
0310 return -EINVAL;
0311 }
0312
0313 intel_private.scratch_page_dma = dma_addr;
0314 } else
0315 intel_private.scratch_page_dma = page_to_phys(page);
0316
0317 intel_private.scratch_page = page;
0318
0319 return 0;
0320 }
0321
0322 static void i810_write_entry(dma_addr_t addr, unsigned int entry,
0323 unsigned int flags)
0324 {
0325 u32 pte_flags = I810_PTE_VALID;
0326
0327 switch (flags) {
0328 case AGP_DCACHE_MEMORY:
0329 pte_flags |= I810_PTE_LOCAL;
0330 break;
0331 case AGP_USER_CACHED_MEMORY:
0332 pte_flags |= I830_PTE_SYSTEM_CACHED;
0333 break;
0334 }
0335
0336 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
0337 }
0338
0339 static resource_size_t intel_gtt_stolen_size(void)
0340 {
0341 u16 gmch_ctrl;
0342 u8 rdct;
0343 int local = 0;
0344 static const int ddt[4] = { 0, 16, 32, 64 };
0345 resource_size_t stolen_size = 0;
0346
0347 if (INTEL_GTT_GEN == 1)
0348 return 0;
0349
0350 pci_read_config_word(intel_private.bridge_dev,
0351 I830_GMCH_CTRL, &gmch_ctrl);
0352
0353 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
0354 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
0355 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
0356 case I830_GMCH_GMS_STOLEN_512:
0357 stolen_size = KB(512);
0358 break;
0359 case I830_GMCH_GMS_STOLEN_1024:
0360 stolen_size = MB(1);
0361 break;
0362 case I830_GMCH_GMS_STOLEN_8192:
0363 stolen_size = MB(8);
0364 break;
0365 case I830_GMCH_GMS_LOCAL:
0366 rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
0367 stolen_size = (I830_RDRAM_ND(rdct) + 1) *
0368 MB(ddt[I830_RDRAM_DDT(rdct)]);
0369 local = 1;
0370 break;
0371 default:
0372 stolen_size = 0;
0373 break;
0374 }
0375 } else {
0376 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
0377 case I855_GMCH_GMS_STOLEN_1M:
0378 stolen_size = MB(1);
0379 break;
0380 case I855_GMCH_GMS_STOLEN_4M:
0381 stolen_size = MB(4);
0382 break;
0383 case I855_GMCH_GMS_STOLEN_8M:
0384 stolen_size = MB(8);
0385 break;
0386 case I855_GMCH_GMS_STOLEN_16M:
0387 stolen_size = MB(16);
0388 break;
0389 case I855_GMCH_GMS_STOLEN_32M:
0390 stolen_size = MB(32);
0391 break;
0392 case I915_GMCH_GMS_STOLEN_48M:
0393 stolen_size = MB(48);
0394 break;
0395 case I915_GMCH_GMS_STOLEN_64M:
0396 stolen_size = MB(64);
0397 break;
0398 case G33_GMCH_GMS_STOLEN_128M:
0399 stolen_size = MB(128);
0400 break;
0401 case G33_GMCH_GMS_STOLEN_256M:
0402 stolen_size = MB(256);
0403 break;
0404 case INTEL_GMCH_GMS_STOLEN_96M:
0405 stolen_size = MB(96);
0406 break;
0407 case INTEL_GMCH_GMS_STOLEN_160M:
0408 stolen_size = MB(160);
0409 break;
0410 case INTEL_GMCH_GMS_STOLEN_224M:
0411 stolen_size = MB(224);
0412 break;
0413 case INTEL_GMCH_GMS_STOLEN_352M:
0414 stolen_size = MB(352);
0415 break;
0416 default:
0417 stolen_size = 0;
0418 break;
0419 }
0420 }
0421
0422 if (stolen_size > 0) {
0423 dev_info(&intel_private.bridge_dev->dev, "detected %lluK %s memory\n",
0424 (u64)stolen_size / KB(1), local ? "local" : "stolen");
0425 } else {
0426 dev_info(&intel_private.bridge_dev->dev,
0427 "no pre-allocated video memory detected\n");
0428 stolen_size = 0;
0429 }
0430
0431 return stolen_size;
0432 }
0433
0434 static void i965_adjust_pgetbl_size(unsigned int size_flag)
0435 {
0436 u32 pgetbl_ctl, pgetbl_ctl2;
0437
0438
0439 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
0440 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
0441 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
0442
0443
0444 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
0445 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
0446 pgetbl_ctl |= size_flag;
0447 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
0448 }
0449
0450 static unsigned int i965_gtt_total_entries(void)
0451 {
0452 int size;
0453 u32 pgetbl_ctl;
0454 u16 gmch_ctl;
0455
0456 pci_read_config_word(intel_private.bridge_dev,
0457 I830_GMCH_CTRL, &gmch_ctl);
0458
0459 if (INTEL_GTT_GEN == 5) {
0460 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
0461 case G4x_GMCH_SIZE_1M:
0462 case G4x_GMCH_SIZE_VT_1M:
0463 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
0464 break;
0465 case G4x_GMCH_SIZE_VT_1_5M:
0466 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
0467 break;
0468 case G4x_GMCH_SIZE_2M:
0469 case G4x_GMCH_SIZE_VT_2M:
0470 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
0471 break;
0472 }
0473 }
0474
0475 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
0476
0477 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
0478 case I965_PGETBL_SIZE_128KB:
0479 size = KB(128);
0480 break;
0481 case I965_PGETBL_SIZE_256KB:
0482 size = KB(256);
0483 break;
0484 case I965_PGETBL_SIZE_512KB:
0485 size = KB(512);
0486 break;
0487
0488 case I965_PGETBL_SIZE_1MB:
0489 size = KB(1024);
0490 break;
0491 case I965_PGETBL_SIZE_2MB:
0492 size = KB(2048);
0493 break;
0494 case I965_PGETBL_SIZE_1_5MB:
0495 size = KB(1024 + 512);
0496 break;
0497 default:
0498 dev_info(&intel_private.pcidev->dev,
0499 "unknown page table size, assuming 512KB\n");
0500 size = KB(512);
0501 }
0502
0503 return size/4;
0504 }
0505
0506 static unsigned int intel_gtt_total_entries(void)
0507 {
0508 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
0509 return i965_gtt_total_entries();
0510 else {
0511
0512
0513
0514 return intel_private.gtt_mappable_entries;
0515 }
0516 }
0517
0518 static unsigned int intel_gtt_mappable_entries(void)
0519 {
0520 unsigned int aperture_size;
0521
0522 if (INTEL_GTT_GEN == 1) {
0523 u32 smram_miscc;
0524
0525 pci_read_config_dword(intel_private.bridge_dev,
0526 I810_SMRAM_MISCC, &smram_miscc);
0527
0528 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
0529 == I810_GFX_MEM_WIN_32M)
0530 aperture_size = MB(32);
0531 else
0532 aperture_size = MB(64);
0533 } else if (INTEL_GTT_GEN == 2) {
0534 u16 gmch_ctrl;
0535
0536 pci_read_config_word(intel_private.bridge_dev,
0537 I830_GMCH_CTRL, &gmch_ctrl);
0538
0539 if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
0540 aperture_size = MB(64);
0541 else
0542 aperture_size = MB(128);
0543 } else {
0544
0545 aperture_size = pci_resource_len(intel_private.pcidev, 2);
0546 }
0547
0548 return aperture_size >> PAGE_SHIFT;
0549 }
0550
0551 static void intel_gtt_teardown_scratch_page(void)
0552 {
0553 set_pages_wb(intel_private.scratch_page, 1);
0554 if (intel_private.needs_dmar)
0555 dma_unmap_page(&intel_private.pcidev->dev,
0556 intel_private.scratch_page_dma, PAGE_SIZE,
0557 DMA_BIDIRECTIONAL);
0558 __free_page(intel_private.scratch_page);
0559 }
0560
0561 static void intel_gtt_cleanup(void)
0562 {
0563 intel_private.driver->cleanup();
0564
0565 iounmap(intel_private.gtt);
0566 iounmap(intel_private.registers);
0567
0568 intel_gtt_teardown_scratch_page();
0569 }
0570
0571
0572
0573
0574 static inline int needs_ilk_vtd_wa(void)
0575 {
0576 const unsigned short gpu_devid = intel_private.pcidev->device;
0577
0578
0579
0580
0581
0582 return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
0583 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
0584 device_iommu_mapped(&intel_private.pcidev->dev));
0585 }
0586
0587 static bool intel_gtt_can_wc(void)
0588 {
0589 if (INTEL_GTT_GEN <= 2)
0590 return false;
0591
0592 if (INTEL_GTT_GEN >= 6)
0593 return false;
0594
0595
0596 if (needs_ilk_vtd_wa())
0597 return false;
0598
0599 return true;
0600 }
0601
0602 static int intel_gtt_init(void)
0603 {
0604 u32 gtt_map_size;
0605 int ret, bar;
0606
0607 ret = intel_private.driver->setup();
0608 if (ret != 0)
0609 return ret;
0610
0611 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
0612 intel_private.gtt_total_entries = intel_gtt_total_entries();
0613
0614
0615 intel_private.PGETBL_save =
0616 readl(intel_private.registers+I810_PGETBL_CTL)
0617 & ~I810_PGETBL_ENABLED;
0618
0619 if (HAS_PGTBL_EN)
0620 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
0621
0622 dev_info(&intel_private.bridge_dev->dev,
0623 "detected gtt size: %dK total, %dK mappable\n",
0624 intel_private.gtt_total_entries * 4,
0625 intel_private.gtt_mappable_entries * 4);
0626
0627 gtt_map_size = intel_private.gtt_total_entries * 4;
0628
0629 intel_private.gtt = NULL;
0630 if (intel_gtt_can_wc())
0631 intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
0632 gtt_map_size);
0633 if (intel_private.gtt == NULL)
0634 intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
0635 gtt_map_size);
0636 if (intel_private.gtt == NULL) {
0637 intel_private.driver->cleanup();
0638 iounmap(intel_private.registers);
0639 return -ENOMEM;
0640 }
0641
0642 #if IS_ENABLED(CONFIG_AGP_INTEL)
0643 global_cache_flush();
0644 #endif
0645
0646 intel_private.stolen_size = intel_gtt_stolen_size();
0647
0648 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
0649
0650 ret = intel_gtt_setup_scratch_page();
0651 if (ret != 0) {
0652 intel_gtt_cleanup();
0653 return ret;
0654 }
0655
0656 if (INTEL_GTT_GEN <= 2)
0657 bar = I810_GMADR_BAR;
0658 else
0659 bar = I915_GMADR_BAR;
0660
0661 intel_private.gma_bus_addr = pci_bus_address(intel_private.pcidev, bar);
0662 return 0;
0663 }
0664
0665 #if IS_ENABLED(CONFIG_AGP_INTEL)
0666 static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
0667 {32, 8192, 3},
0668 {64, 16384, 4},
0669 {128, 32768, 5},
0670 {256, 65536, 6},
0671 {512, 131072, 7},
0672 };
0673
0674 static int intel_fake_agp_fetch_size(void)
0675 {
0676 int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
0677 unsigned int aper_size;
0678 int i;
0679
0680 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
0681
0682 for (i = 0; i < num_sizes; i++) {
0683 if (aper_size == intel_fake_agp_sizes[i].size) {
0684 agp_bridge->current_size =
0685 (void *) (intel_fake_agp_sizes + i);
0686 return aper_size;
0687 }
0688 }
0689
0690 return 0;
0691 }
0692 #endif
0693
0694 static void i830_cleanup(void)
0695 {
0696 }
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708 static void i830_chipset_flush(void)
0709 {
0710 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
0711
0712
0713
0714
0715 wbinvd_on_all_cpus();
0716
0717
0718
0719
0720
0721
0722 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
0723 intel_private.registers+I830_HIC);
0724
0725 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
0726 if (time_after(jiffies, timeout))
0727 break;
0728
0729 udelay(50);
0730 }
0731 }
0732
0733 static void i830_write_entry(dma_addr_t addr, unsigned int entry,
0734 unsigned int flags)
0735 {
0736 u32 pte_flags = I810_PTE_VALID;
0737
0738 if (flags == AGP_USER_CACHED_MEMORY)
0739 pte_flags |= I830_PTE_SYSTEM_CACHED;
0740
0741 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
0742 }
0743
0744 bool intel_gmch_enable_gtt(void)
0745 {
0746 u8 __iomem *reg;
0747
0748 if (INTEL_GTT_GEN == 2) {
0749 u16 gmch_ctrl;
0750
0751 pci_read_config_word(intel_private.bridge_dev,
0752 I830_GMCH_CTRL, &gmch_ctrl);
0753 gmch_ctrl |= I830_GMCH_ENABLED;
0754 pci_write_config_word(intel_private.bridge_dev,
0755 I830_GMCH_CTRL, gmch_ctrl);
0756
0757 pci_read_config_word(intel_private.bridge_dev,
0758 I830_GMCH_CTRL, &gmch_ctrl);
0759 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
0760 dev_err(&intel_private.pcidev->dev,
0761 "failed to enable the GTT: GMCH_CTRL=%x\n",
0762 gmch_ctrl);
0763 return false;
0764 }
0765 }
0766
0767
0768
0769
0770 if (INTEL_GTT_GEN >= 3)
0771 writel(0, intel_private.registers+GFX_FLSH_CNTL);
0772
0773 reg = intel_private.registers+I810_PGETBL_CTL;
0774 writel(intel_private.PGETBL_save, reg);
0775 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
0776 dev_err(&intel_private.pcidev->dev,
0777 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
0778 readl(reg), intel_private.PGETBL_save);
0779 return false;
0780 }
0781
0782 if (INTEL_GTT_GEN >= 3)
0783 writel(0, intel_private.registers+GFX_FLSH_CNTL);
0784
0785 return true;
0786 }
0787 EXPORT_SYMBOL(intel_gmch_enable_gtt);
0788
0789 static int i830_setup(void)
0790 {
0791 phys_addr_t reg_addr;
0792
0793 reg_addr = pci_resource_start(intel_private.pcidev, I810_MMADR_BAR);
0794
0795 intel_private.registers = ioremap(reg_addr, KB(64));
0796 if (!intel_private.registers)
0797 return -ENOMEM;
0798
0799 intel_private.gtt_phys_addr = reg_addr + I810_PTE_BASE;
0800
0801 return 0;
0802 }
0803
0804 #if IS_ENABLED(CONFIG_AGP_INTEL)
0805 static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
0806 {
0807 agp_bridge->gatt_table_real = NULL;
0808 agp_bridge->gatt_table = NULL;
0809 agp_bridge->gatt_bus_addr = 0;
0810
0811 return 0;
0812 }
0813
0814 static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
0815 {
0816 return 0;
0817 }
0818
0819 static int intel_fake_agp_configure(void)
0820 {
0821 if (!intel_gmch_enable_gtt())
0822 return -EIO;
0823
0824 intel_private.clear_fake_agp = true;
0825 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
0826
0827 return 0;
0828 }
0829 #endif
0830
0831 static bool i830_check_flags(unsigned int flags)
0832 {
0833 switch (flags) {
0834 case 0:
0835 case AGP_PHYS_MEMORY:
0836 case AGP_USER_CACHED_MEMORY:
0837 case AGP_USER_MEMORY:
0838 return true;
0839 }
0840
0841 return false;
0842 }
0843
0844 void intel_gmch_gtt_insert_page(dma_addr_t addr,
0845 unsigned int pg,
0846 unsigned int flags)
0847 {
0848 intel_private.driver->write_entry(addr, pg, flags);
0849 readl(intel_private.gtt + pg);
0850 if (intel_private.driver->chipset_flush)
0851 intel_private.driver->chipset_flush();
0852 }
0853 EXPORT_SYMBOL(intel_gmch_gtt_insert_page);
0854
0855 void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
0856 unsigned int pg_start,
0857 unsigned int flags)
0858 {
0859 struct scatterlist *sg;
0860 unsigned int len, m;
0861 int i, j;
0862
0863 j = pg_start;
0864
0865
0866
0867 for_each_sg(st->sgl, sg, st->nents, i) {
0868 len = sg_dma_len(sg) >> PAGE_SHIFT;
0869 for (m = 0; m < len; m++) {
0870 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
0871 intel_private.driver->write_entry(addr, j, flags);
0872 j++;
0873 }
0874 }
0875 readl(intel_private.gtt + j - 1);
0876 if (intel_private.driver->chipset_flush)
0877 intel_private.driver->chipset_flush();
0878 }
0879 EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
0880
0881 #if IS_ENABLED(CONFIG_AGP_INTEL)
0882 static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
0883 unsigned int num_entries,
0884 struct page **pages,
0885 unsigned int flags)
0886 {
0887 int i, j;
0888
0889 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
0890 dma_addr_t addr = page_to_phys(pages[i]);
0891 intel_private.driver->write_entry(addr,
0892 j, flags);
0893 }
0894 wmb();
0895 }
0896
0897 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
0898 off_t pg_start, int type)
0899 {
0900 int ret = -EINVAL;
0901
0902 if (intel_private.clear_fake_agp) {
0903 int start = intel_private.stolen_size / PAGE_SIZE;
0904 int end = intel_private.gtt_mappable_entries;
0905 intel_gmch_gtt_clear_range(start, end - start);
0906 intel_private.clear_fake_agp = false;
0907 }
0908
0909 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
0910 return i810_insert_dcache_entries(mem, pg_start, type);
0911
0912 if (mem->page_count == 0)
0913 goto out;
0914
0915 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
0916 goto out_err;
0917
0918 if (type != mem->type)
0919 goto out_err;
0920
0921 if (!intel_private.driver->check_flags(type))
0922 goto out_err;
0923
0924 if (!mem->is_flushed)
0925 global_cache_flush();
0926
0927 if (intel_private.needs_dmar) {
0928 struct sg_table st;
0929
0930 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
0931 if (ret != 0)
0932 return ret;
0933
0934 intel_gmch_gtt_insert_sg_entries(&st, pg_start, type);
0935 mem->sg_list = st.sgl;
0936 mem->num_sg = st.nents;
0937 } else
0938 intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
0939 type);
0940
0941 out:
0942 ret = 0;
0943 out_err:
0944 mem->is_flushed = true;
0945 return ret;
0946 }
0947 #endif
0948
0949 void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
0950 {
0951 unsigned int i;
0952
0953 for (i = first_entry; i < (first_entry + num_entries); i++) {
0954 intel_private.driver->write_entry(intel_private.scratch_page_dma,
0955 i, 0);
0956 }
0957 wmb();
0958 }
0959 EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
0960
0961 #if IS_ENABLED(CONFIG_AGP_INTEL)
0962 static int intel_fake_agp_remove_entries(struct agp_memory *mem,
0963 off_t pg_start, int type)
0964 {
0965 if (mem->page_count == 0)
0966 return 0;
0967
0968 intel_gmch_gtt_clear_range(pg_start, mem->page_count);
0969
0970 if (intel_private.needs_dmar) {
0971 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
0972 mem->sg_list = NULL;
0973 mem->num_sg = 0;
0974 }
0975
0976 return 0;
0977 }
0978
0979 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
0980 int type)
0981 {
0982 struct agp_memory *new;
0983
0984 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
0985 if (pg_count != intel_private.num_dcache_entries)
0986 return NULL;
0987
0988 new = agp_create_memory(1);
0989 if (new == NULL)
0990 return NULL;
0991
0992 new->type = AGP_DCACHE_MEMORY;
0993 new->page_count = pg_count;
0994 new->num_scratch_pages = 0;
0995 agp_free_page_array(new);
0996 return new;
0997 }
0998 if (type == AGP_PHYS_MEMORY)
0999 return alloc_agpphysmem_i8xx(pg_count, type);
1000
1001 return NULL;
1002 }
1003 #endif
1004
1005 static int intel_alloc_chipset_flush_resource(void)
1006 {
1007 int ret;
1008 ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
1009 PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
1010 pcibios_align_resource, intel_private.bridge_dev);
1011
1012 return ret;
1013 }
1014
1015 static void intel_i915_setup_chipset_flush(void)
1016 {
1017 int ret;
1018 u32 temp;
1019
1020 pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
1021 if (!(temp & 0x1)) {
1022 intel_alloc_chipset_flush_resource();
1023 intel_private.resource_valid = 1;
1024 pci_write_config_dword(intel_private.bridge_dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1025 } else {
1026 temp &= ~1;
1027
1028 intel_private.resource_valid = 1;
1029 intel_private.ifp_resource.start = temp;
1030 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1031 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1032
1033 if (ret)
1034 intel_private.resource_valid = 0;
1035 }
1036 }
1037
1038 static void intel_i965_g33_setup_chipset_flush(void)
1039 {
1040 u32 temp_hi, temp_lo;
1041 int ret;
1042
1043 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4, &temp_hi);
1044 pci_read_config_dword(intel_private.bridge_dev, I965_IFPADDR, &temp_lo);
1045
1046 if (!(temp_lo & 0x1)) {
1047
1048 intel_alloc_chipset_flush_resource();
1049
1050 intel_private.resource_valid = 1;
1051 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR + 4,
1052 upper_32_bits(intel_private.ifp_resource.start));
1053 pci_write_config_dword(intel_private.bridge_dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1054 } else {
1055 u64 l64;
1056
1057 temp_lo &= ~0x1;
1058 l64 = ((u64)temp_hi << 32) | temp_lo;
1059
1060 intel_private.resource_valid = 1;
1061 intel_private.ifp_resource.start = l64;
1062 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1063 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1064
1065 if (ret)
1066 intel_private.resource_valid = 0;
1067 }
1068 }
1069
1070 static void intel_i9xx_setup_flush(void)
1071 {
1072
1073 if (intel_private.ifp_resource.start)
1074 return;
1075
1076 if (INTEL_GTT_GEN == 6)
1077 return;
1078
1079
1080 intel_private.ifp_resource.name = "Intel Flush Page";
1081 intel_private.ifp_resource.flags = IORESOURCE_MEM;
1082
1083
1084 if (IS_G33 || INTEL_GTT_GEN >= 4) {
1085 intel_i965_g33_setup_chipset_flush();
1086 } else {
1087 intel_i915_setup_chipset_flush();
1088 }
1089
1090 if (intel_private.ifp_resource.start)
1091 intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
1092 if (!intel_private.i9xx_flush_page)
1093 dev_err(&intel_private.pcidev->dev,
1094 "can't ioremap flush page - no chipset flushing\n");
1095 }
1096
1097 static void i9xx_cleanup(void)
1098 {
1099 if (intel_private.i9xx_flush_page)
1100 iounmap(intel_private.i9xx_flush_page);
1101 if (intel_private.resource_valid)
1102 release_resource(&intel_private.ifp_resource);
1103 intel_private.ifp_resource.start = 0;
1104 intel_private.resource_valid = 0;
1105 }
1106
1107 static void i9xx_chipset_flush(void)
1108 {
1109 wmb();
1110 if (intel_private.i9xx_flush_page)
1111 writel(1, intel_private.i9xx_flush_page);
1112 }
1113
1114 static void i965_write_entry(dma_addr_t addr,
1115 unsigned int entry,
1116 unsigned int flags)
1117 {
1118 u32 pte_flags;
1119
1120 pte_flags = I810_PTE_VALID;
1121 if (flags == AGP_USER_CACHED_MEMORY)
1122 pte_flags |= I830_PTE_SYSTEM_CACHED;
1123
1124
1125 addr |= (addr >> 28) & 0xf0;
1126 writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
1127 }
1128
1129 static int i9xx_setup(void)
1130 {
1131 phys_addr_t reg_addr;
1132 int size = KB(512);
1133
1134 reg_addr = pci_resource_start(intel_private.pcidev, I915_MMADR_BAR);
1135
1136 intel_private.registers = ioremap(reg_addr, size);
1137 if (!intel_private.registers)
1138 return -ENOMEM;
1139
1140 switch (INTEL_GTT_GEN) {
1141 case 3:
1142 intel_private.gtt_phys_addr =
1143 pci_resource_start(intel_private.pcidev, I915_PTE_BAR);
1144 break;
1145 case 5:
1146 intel_private.gtt_phys_addr = reg_addr + MB(2);
1147 break;
1148 default:
1149 intel_private.gtt_phys_addr = reg_addr + KB(512);
1150 break;
1151 }
1152
1153 intel_i9xx_setup_flush();
1154
1155 return 0;
1156 }
1157
1158 #if IS_ENABLED(CONFIG_AGP_INTEL)
1159 static const struct agp_bridge_driver intel_fake_agp_driver = {
1160 .owner = THIS_MODULE,
1161 .size_type = FIXED_APER_SIZE,
1162 .aperture_sizes = intel_fake_agp_sizes,
1163 .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes),
1164 .configure = intel_fake_agp_configure,
1165 .fetch_size = intel_fake_agp_fetch_size,
1166 .cleanup = intel_gtt_cleanup,
1167 .agp_enable = intel_fake_agp_enable,
1168 .cache_flush = global_cache_flush,
1169 .create_gatt_table = intel_fake_agp_create_gatt_table,
1170 .free_gatt_table = intel_fake_agp_free_gatt_table,
1171 .insert_memory = intel_fake_agp_insert_entries,
1172 .remove_memory = intel_fake_agp_remove_entries,
1173 .alloc_by_type = intel_fake_agp_alloc_by_type,
1174 .free_by_type = intel_i810_free_by_type,
1175 .agp_alloc_page = agp_generic_alloc_page,
1176 .agp_alloc_pages = agp_generic_alloc_pages,
1177 .agp_destroy_page = agp_generic_destroy_page,
1178 .agp_destroy_pages = agp_generic_destroy_pages,
1179 };
1180 #endif
1181
1182 static const struct intel_gtt_driver i81x_gtt_driver = {
1183 .gen = 1,
1184 .has_pgtbl_enable = 1,
1185 .dma_mask_size = 32,
1186 .setup = i810_setup,
1187 .cleanup = i810_cleanup,
1188 .check_flags = i830_check_flags,
1189 .write_entry = i810_write_entry,
1190 };
1191 static const struct intel_gtt_driver i8xx_gtt_driver = {
1192 .gen = 2,
1193 .has_pgtbl_enable = 1,
1194 .setup = i830_setup,
1195 .cleanup = i830_cleanup,
1196 .write_entry = i830_write_entry,
1197 .dma_mask_size = 32,
1198 .check_flags = i830_check_flags,
1199 .chipset_flush = i830_chipset_flush,
1200 };
1201 static const struct intel_gtt_driver i915_gtt_driver = {
1202 .gen = 3,
1203 .has_pgtbl_enable = 1,
1204 .setup = i9xx_setup,
1205 .cleanup = i9xx_cleanup,
1206
1207 .write_entry = i830_write_entry,
1208 .dma_mask_size = 32,
1209 .check_flags = i830_check_flags,
1210 .chipset_flush = i9xx_chipset_flush,
1211 };
1212 static const struct intel_gtt_driver g33_gtt_driver = {
1213 .gen = 3,
1214 .is_g33 = 1,
1215 .setup = i9xx_setup,
1216 .cleanup = i9xx_cleanup,
1217 .write_entry = i965_write_entry,
1218 .dma_mask_size = 36,
1219 .check_flags = i830_check_flags,
1220 .chipset_flush = i9xx_chipset_flush,
1221 };
1222 static const struct intel_gtt_driver pineview_gtt_driver = {
1223 .gen = 3,
1224 .is_pineview = 1, .is_g33 = 1,
1225 .setup = i9xx_setup,
1226 .cleanup = i9xx_cleanup,
1227 .write_entry = i965_write_entry,
1228 .dma_mask_size = 36,
1229 .check_flags = i830_check_flags,
1230 .chipset_flush = i9xx_chipset_flush,
1231 };
1232 static const struct intel_gtt_driver i965_gtt_driver = {
1233 .gen = 4,
1234 .has_pgtbl_enable = 1,
1235 .setup = i9xx_setup,
1236 .cleanup = i9xx_cleanup,
1237 .write_entry = i965_write_entry,
1238 .dma_mask_size = 36,
1239 .check_flags = i830_check_flags,
1240 .chipset_flush = i9xx_chipset_flush,
1241 };
1242 static const struct intel_gtt_driver g4x_gtt_driver = {
1243 .gen = 5,
1244 .setup = i9xx_setup,
1245 .cleanup = i9xx_cleanup,
1246 .write_entry = i965_write_entry,
1247 .dma_mask_size = 36,
1248 .check_flags = i830_check_flags,
1249 .chipset_flush = i9xx_chipset_flush,
1250 };
1251 static const struct intel_gtt_driver ironlake_gtt_driver = {
1252 .gen = 5,
1253 .is_ironlake = 1,
1254 .setup = i9xx_setup,
1255 .cleanup = i9xx_cleanup,
1256 .write_entry = i965_write_entry,
1257 .dma_mask_size = 36,
1258 .check_flags = i830_check_flags,
1259 .chipset_flush = i9xx_chipset_flush,
1260 };
1261
1262
1263
1264
1265
1266 static const struct intel_gtt_driver_description {
1267 unsigned int gmch_chip_id;
1268 char *name;
1269 const struct intel_gtt_driver *gtt_driver;
1270 } intel_gtt_chipsets[] = {
1271 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1272 &i81x_gtt_driver},
1273 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1274 &i81x_gtt_driver},
1275 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1276 &i81x_gtt_driver},
1277 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1278 &i81x_gtt_driver},
1279 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1280 &i8xx_gtt_driver},
1281 { PCI_DEVICE_ID_INTEL_82845G_IG, "845G",
1282 &i8xx_gtt_driver},
1283 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1284 &i8xx_gtt_driver},
1285 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1286 &i8xx_gtt_driver},
1287 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1288 &i8xx_gtt_driver},
1289 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1290 &i915_gtt_driver },
1291 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1292 &i915_gtt_driver },
1293 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1294 &i915_gtt_driver },
1295 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1296 &i915_gtt_driver },
1297 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1298 &i915_gtt_driver },
1299 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1300 &i915_gtt_driver },
1301 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1302 &i965_gtt_driver },
1303 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1304 &i965_gtt_driver },
1305 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1306 &i965_gtt_driver },
1307 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1308 &i965_gtt_driver },
1309 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1310 &i965_gtt_driver },
1311 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1312 &i965_gtt_driver },
1313 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1314 &g33_gtt_driver },
1315 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1316 &g33_gtt_driver },
1317 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1318 &g33_gtt_driver },
1319 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1320 &pineview_gtt_driver },
1321 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1322 &pineview_gtt_driver },
1323 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1324 &g4x_gtt_driver },
1325 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1326 &g4x_gtt_driver },
1327 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1328 &g4x_gtt_driver },
1329 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1330 &g4x_gtt_driver },
1331 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1332 &g4x_gtt_driver },
1333 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1334 &g4x_gtt_driver },
1335 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1336 &g4x_gtt_driver },
1337 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1338 "HD Graphics", &ironlake_gtt_driver },
1339 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1340 "HD Graphics", &ironlake_gtt_driver },
1341 { 0, NULL, NULL }
1342 };
1343
1344 static int find_gmch(u16 device)
1345 {
1346 struct pci_dev *gmch_device;
1347
1348 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
1349 if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
1350 gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
1351 device, gmch_device);
1352 }
1353
1354 if (!gmch_device)
1355 return 0;
1356
1357 intel_private.pcidev = gmch_device;
1358 return 1;
1359 }
1360
1361 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1362 struct agp_bridge_data *bridge)
1363 {
1364 int i, mask;
1365
1366 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1367 if (gpu_pdev) {
1368 if (gpu_pdev->device ==
1369 intel_gtt_chipsets[i].gmch_chip_id) {
1370 intel_private.pcidev = pci_dev_get(gpu_pdev);
1371 intel_private.driver =
1372 intel_gtt_chipsets[i].gtt_driver;
1373
1374 break;
1375 }
1376 } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1377 intel_private.driver =
1378 intel_gtt_chipsets[i].gtt_driver;
1379 break;
1380 }
1381 }
1382
1383 if (!intel_private.driver)
1384 return 0;
1385
1386 #if IS_ENABLED(CONFIG_AGP_INTEL)
1387 if (bridge) {
1388 if (INTEL_GTT_GEN > 1)
1389 return 0;
1390
1391 bridge->driver = &intel_fake_agp_driver;
1392 bridge->dev_private_data = &intel_private;
1393 bridge->dev = bridge_pdev;
1394 }
1395 #endif
1396
1397
1398
1399
1400
1401
1402
1403 if (intel_private.refcount++)
1404 return 1;
1405
1406 intel_private.bridge_dev = pci_dev_get(bridge_pdev);
1407
1408 dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
1409
1410 if (bridge) {
1411 mask = intel_private.driver->dma_mask_size;
1412 if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
1413 dev_err(&intel_private.pcidev->dev,
1414 "set gfx device dma mask %d-bit failed!\n",
1415 mask);
1416 else
1417 dma_set_coherent_mask(&intel_private.pcidev->dev,
1418 DMA_BIT_MASK(mask));
1419 }
1420
1421 if (intel_gtt_init() != 0) {
1422 intel_gmch_remove();
1423
1424 return 0;
1425 }
1426
1427 return 1;
1428 }
1429 EXPORT_SYMBOL(intel_gmch_probe);
1430
1431 void intel_gmch_gtt_get(u64 *gtt_total,
1432 phys_addr_t *mappable_base,
1433 resource_size_t *mappable_end)
1434 {
1435 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1436 *mappable_base = intel_private.gma_bus_addr;
1437 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1438 }
1439 EXPORT_SYMBOL(intel_gmch_gtt_get);
1440
1441 void intel_gmch_gtt_flush(void)
1442 {
1443 if (intel_private.driver->chipset_flush)
1444 intel_private.driver->chipset_flush();
1445 }
1446 EXPORT_SYMBOL(intel_gmch_gtt_flush);
1447
1448 void intel_gmch_remove(void)
1449 {
1450 if (--intel_private.refcount)
1451 return;
1452
1453 if (intel_private.scratch_page)
1454 intel_gtt_teardown_scratch_page();
1455 if (intel_private.pcidev)
1456 pci_dev_put(intel_private.pcidev);
1457 if (intel_private.bridge_dev)
1458 pci_dev_put(intel_private.bridge_dev);
1459 intel_private.driver = NULL;
1460 }
1461 EXPORT_SYMBOL(intel_gmch_remove);
1462
1463 MODULE_AUTHOR("Dave Jones, Various @Intel");
1464 MODULE_LICENSE("GPL and additional rights");