0001
0002
0003
0004
0005
0006 #include <linux/delay.h>
0007 #include <linux/dmi.h>
0008 #include <linux/pci.h>
0009 #include <linux/vgaarb.h>
0010 #include <asm/hpet.h>
0011 #include <asm/pci_x86.h>
0012
0013 static void pci_fixup_i450nx(struct pci_dev *d)
0014 {
0015
0016
0017
0018 int pxb, reg;
0019 u8 busno, suba, subb;
0020
0021 dev_warn(&d->dev, "Searching for i450NX host bridges\n");
0022 reg = 0xd0;
0023 for(pxb = 0; pxb < 2; pxb++) {
0024 pci_read_config_byte(d, reg++, &busno);
0025 pci_read_config_byte(d, reg++, &suba);
0026 pci_read_config_byte(d, reg++, &subb);
0027 dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
0028 suba, subb);
0029 if (busno)
0030 pcibios_scan_root(busno);
0031 if (suba < subb)
0032 pcibios_scan_root(suba+1);
0033 }
0034 pcibios_last_bus = -1;
0035 }
0036 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
0037
0038 static void pci_fixup_i450gx(struct pci_dev *d)
0039 {
0040
0041
0042
0043
0044 u8 busno;
0045 pci_read_config_byte(d, 0x4a, &busno);
0046 dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno);
0047 pcibios_scan_root(busno);
0048 pcibios_last_bus = -1;
0049 }
0050 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx);
0051
0052 static void pci_fixup_umc_ide(struct pci_dev *d)
0053 {
0054
0055
0056
0057
0058 int i;
0059
0060 dev_warn(&d->dev, "Fixing base address flags\n");
0061 for(i = 0; i < 4; i++)
0062 d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
0063 }
0064 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
0065
0066 static void pci_fixup_latency(struct pci_dev *d)
0067 {
0068
0069
0070
0071
0072 dev_dbg(&d->dev, "Setting max latency to 32\n");
0073 pcibios_max_latency = 32;
0074 }
0075 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
0076 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
0077
0078 static void pci_fixup_piix4_acpi(struct pci_dev *d)
0079 {
0080
0081
0082
0083 d->irq = 9;
0084 }
0085 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi);
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 #define VIA_8363_KL133_REVISION_ID 0x81
0105 #define VIA_8363_KM133_REVISION_ID 0x84
0106
0107 static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
0108 {
0109 u8 v;
0110 int where = 0x55;
0111 int mask = 0x1f;
0112
0113 if (d->device == PCI_DEVICE_ID_VIA_8367_0) {
0114
0115
0116
0117 pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
0118
0119 where = 0x95;
0120
0121 } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
0122 (d->revision == VIA_8363_KL133_REVISION_ID ||
0123 d->revision == VIA_8363_KM133_REVISION_ID)) {
0124 mask = 0x3f;
0125
0126 }
0127
0128 pci_read_config_byte(d, where, &v);
0129 if (v & ~mask) {
0130 dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \
0131 d->device, d->revision, where, v, mask, v & mask);
0132 v &= mask;
0133 pci_write_config_byte(d, where, v);
0134 }
0135 }
0136 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
0137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
0138 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
0139 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
0140 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
0141 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
0142 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
0143 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154 static void pci_fixup_transparent_bridge(struct pci_dev *dev)
0155 {
0156 if ((dev->device & 0xff00) == 0x2400)
0157 dev->transparent = 1;
0158 }
0159 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
0160 PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge);
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 static void pci_fixup_nforce2(struct pci_dev *dev)
0175 {
0176 u32 val;
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 pci_read_config_dword(dev, 0x6c, &val);
0187
0188
0189
0190
0191 if ((val & 0x00FF0000) != 0x00010000) {
0192 dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n");
0193 pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000);
0194 }
0195 }
0196 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
0197 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
0198
0199
0200 #define MAX_PCIEROOT 6
0201 static int quirk_aspm_offset[MAX_PCIEROOT << 3];
0202
0203 #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7))
0204
0205 static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
0206 {
0207 return raw_pci_read(pci_domain_nr(bus), bus->number,
0208 devfn, where, size, value);
0209 }
0210
0211
0212
0213
0214
0215 static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
0216 {
0217 u8 offset;
0218
0219 offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
0220
0221 if ((offset) && (where == offset))
0222 value = value & ~PCI_EXP_LNKCTL_ASPMC;
0223
0224 return raw_pci_write(pci_domain_nr(bus), bus->number,
0225 devfn, where, size, value);
0226 }
0227
0228 static struct pci_ops quirk_pcie_aspm_ops = {
0229 .read = quirk_pcie_aspm_read,
0230 .write = quirk_pcie_aspm_write,
0231 };
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
0242 {
0243 int i;
0244 struct pci_bus *pbus;
0245 struct pci_dev *dev;
0246
0247 if ((pbus = pdev->subordinate) == NULL)
0248 return;
0249
0250
0251
0252
0253
0254
0255 if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
0256 (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
0257 return;
0258
0259 if (list_empty(&pbus->devices)) {
0260
0261
0262
0263
0264
0265
0266 for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
0267 quirk_aspm_offset[i] = 0;
0268
0269 pci_bus_set_ops(pbus, pbus->parent->ops);
0270 } else {
0271
0272
0273
0274
0275
0276
0277 list_for_each_entry(dev, &pbus->devices, bus_list)
0278
0279 quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
0280 dev->pcie_cap + PCI_EXP_LNKCTL;
0281
0282 pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
0283 dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
0284 }
0285
0286 }
0287 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
0288 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
0289 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk);
0290 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk);
0291 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk);
0292 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 static void pci_fixup_video(struct pci_dev *pdev)
0312 {
0313 struct pci_dev *bridge;
0314 struct pci_bus *bus;
0315 u16 config;
0316 struct resource *res;
0317
0318
0319 bus = pdev->bus;
0320 while (bus) {
0321 bridge = bus->self;
0322
0323
0324
0325
0326
0327
0328
0329
0330 if (bridge && (pci_is_bridge(bridge))) {
0331 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
0332 &config);
0333 if (!(config & PCI_BRIDGE_CTL_VGA))
0334 return;
0335 }
0336 bus = bus->parent;
0337 }
0338 if (!vga_default_device() || pdev == vga_default_device()) {
0339 pci_read_config_word(pdev, PCI_COMMAND, &config);
0340 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
0341 res = &pdev->resource[PCI_ROM_RESOURCE];
0342
0343 pci_disable_rom(pdev);
0344 if (res->parent)
0345 release_resource(res);
0346
0347 res->start = 0xC0000;
0348 res->end = res->start + 0x20000 - 1;
0349 res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
0350 IORESOURCE_PCI_FIXED;
0351 dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n",
0352 res);
0353 }
0354 }
0355 }
0356 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
0357 PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
0358
0359
0360 static const struct dmi_system_id msi_k8t_dmi_table[] = {
0361 {
0362 .ident = "MSI-K8T-Neo2Fir",
0363 .matches = {
0364 DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
0365 DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
0366 },
0367 },
0368 {}
0369 };
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
0382 {
0383 unsigned char val;
0384 if (!dmi_check_system(msi_k8t_dmi_table))
0385 return;
0386
0387 pci_read_config_byte(dev, 0x50, &val);
0388 if (val & 0x40) {
0389 pci_write_config_byte(dev, 0x50, val & (~0x40));
0390
0391
0392 pci_read_config_byte(dev, 0x50, &val);
0393 if (val & 0x40)
0394 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
0395 "can't enable onboard soundcard!\n");
0396 else
0397 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
0398 "enabled onboard soundcard\n");
0399 }
0400 }
0401 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
0402 pci_fixup_msi_k8t_onboard_sound);
0403 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
0404 pci_fixup_msi_k8t_onboard_sound);
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415 static u16 toshiba_line_size;
0416
0417 static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = {
0418 {
0419 .ident = "Toshiba PS5 based laptop",
0420 .matches = {
0421 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
0422 DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
0423 },
0424 },
0425 {
0426 .ident = "Toshiba PSM4 based laptop",
0427 .matches = {
0428 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
0429 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
0430 },
0431 },
0432 {
0433 .ident = "Toshiba A40 based laptop",
0434 .matches = {
0435 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
0436 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
0437 },
0438 },
0439 { }
0440 };
0441
0442 static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
0443 {
0444 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
0445 return;
0446
0447 dev->current_state = PCI_D3cold;
0448 pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
0449 }
0450 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
0451 pci_pre_fixup_toshiba_ohci1394);
0452
0453 static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
0454 {
0455 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
0456 return;
0457
0458
0459 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
0460 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
0461 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
0462 pci_resource_start(dev, 0));
0463 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
0464 pci_resource_start(dev, 1));
0465 }
0466 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
0467 pci_post_fixup_toshiba_ohci1394);
0468
0469
0470
0471
0472
0473
0474 static void pci_early_fixup_cyrix_5530(struct pci_dev *dev)
0475 {
0476 u8 r;
0477
0478 pci_read_config_byte(dev, 0x42, &r);
0479 r &= 0xfd;
0480 pci_write_config_byte(dev, 0x42, r);
0481 }
0482 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
0483 pci_early_fixup_cyrix_5530);
0484 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
0485 pci_early_fixup_cyrix_5530);
0486
0487
0488
0489
0490
0491 static void pci_siemens_interrupt_controller(struct pci_dev *dev)
0492 {
0493 dev->resource[0].flags |= IORESOURCE_PCI_FIXED;
0494 }
0495 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
0496 pci_siemens_interrupt_controller);
0497
0498
0499
0500
0501
0502 static void sb600_disable_hpet_bar(struct pci_dev *dev)
0503 {
0504 u8 val;
0505
0506
0507
0508
0509
0510
0511
0512
0513 pci_read_config_byte(dev, 0x08, &val);
0514
0515 if (val < 0x2F) {
0516 outb(0x55, 0xCD6);
0517 val = inb(0xCD7);
0518
0519
0520 outb(0x55, 0xCD6);
0521 outb(val | 0x80, 0xCD7);
0522 }
0523 }
0524 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
0525
0526 #ifdef CONFIG_HPET_TIMER
0527 static void sb600_hpet_quirk(struct pci_dev *dev)
0528 {
0529 struct resource *r = &dev->resource[1];
0530
0531 if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
0532 r->flags |= IORESOURCE_PCI_FIXED;
0533 dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
0534 }
0535 }
0536 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
0537 #endif
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 static void twinhead_reserve_killing_zone(struct pci_dev *dev)
0548 {
0549 if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
0550 pr_info("Reserving memory on Twinhead H12Y\n");
0551 request_mem_region(0xFFB00000, 0x100000, "twinhead");
0552 }
0553 }
0554 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 static void pci_invalid_bar(struct pci_dev *dev)
0568 {
0569 dev->non_compliant_bars = 1;
0570 }
0571 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
0572 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
0573 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
0574 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
0575 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
0576 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
0577 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
0578 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
0579
0580
0581
0582
0583
0584
0585
0586
0587 static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
0588 {
0589 dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
0590 dev->pme_support &= ~((PCI_PM_CAP_PME_D3hot | PCI_PM_CAP_PME_D3cold)
0591 >> PCI_PM_CAP_PME_SHIFT);
0592 }
0593 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
0594
0595
0596
0597
0598
0599 static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
0600 {
0601 dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
0602 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
0603 }
0604 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619 static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
0620 {
0621 struct device *dev = &pdev->dev;
0622 struct resource *res;
0623
0624 if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
0625 !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
0626 pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
0627 return;
0628
0629 res = request_mem_region(0x7fa00000, 0x200000,
0630 "MacBook Pro poweroff workaround");
0631 if (res)
0632 dev_info(dev, "claimed %s %pR\n", res->name, res);
0633 else
0634 dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
0635 }
0636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
0637
0638
0639
0640
0641
0642
0643
0644 static void quirk_no_aersid(struct pci_dev *pdev)
0645 {
0646
0647 if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
0648 pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
0649 }
0650 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
0651 PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
0652
0653 static void quirk_intel_th_dnv(struct pci_dev *dev)
0654 {
0655 struct resource *r = &dev->resource[4];
0656
0657
0658
0659
0660
0661 if (r->end == r->start + 0x7ff) {
0662 r->start = 0;
0663 r->end = 0x3fffff;
0664 r->flags |= IORESOURCE_UNSET;
0665 }
0666 }
0667 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
0668
0669 #ifdef CONFIG_PHYS_ADDR_T_64BIT
0670
0671 #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
0672 #define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
0673 #define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
0674 #define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
0675
0676 #define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
0677 #define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
0678
0679 #define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
0680 #define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
0681 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
0682 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
0693 {
0694 static const char *name = "PCI Bus 0000:00";
0695 struct resource *res, *conflict;
0696 u32 base, limit, high;
0697 struct pci_dev *other;
0698 unsigned i;
0699
0700 if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
0701 return;
0702
0703
0704 other = pci_get_device(dev->vendor, dev->device, NULL);
0705 if (other != dev ||
0706 (other = pci_get_device(dev->vendor, dev->device, other))) {
0707
0708 pci_dev_put(other);
0709 return;
0710 }
0711
0712 for (i = 0; i < 8; i++) {
0713 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
0714 pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
0715
0716
0717 if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
0718 AMD_141b_MMIO_BASE_WE_MASK)))
0719 break;
0720
0721 base >>= 8;
0722 base |= high << 24;
0723
0724
0725 if (base > 0x10000)
0726 return;
0727 }
0728 if (i == 8)
0729 return;
0730
0731 res = kzalloc(sizeof(*res), GFP_KERNEL);
0732 if (!res)
0733 return;
0734
0735
0736
0737
0738
0739 res->name = name;
0740 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
0741 IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
0742 res->start = 0xbd00000000ull;
0743 res->end = 0xfd00000000ull - 1;
0744
0745 conflict = request_resource_conflict(&iomem_resource, res);
0746 if (conflict) {
0747 kfree(res);
0748 if (conflict->name != name)
0749 return;
0750
0751
0752 res = conflict;
0753 } else {
0754 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
0755 res);
0756 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
0757 pci_bus_add_resource(dev->bus, res, 0);
0758 }
0759
0760 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
0761 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
0762 limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
0763 high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
0764 ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
0765 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
0766
0767 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
0768 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
0769 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
0770 }
0771 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
0772 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
0773 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
0774 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
0775 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
0776 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
0777 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
0778 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
0779 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
0780 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
0781
0782 #define RS690_LOWER_TOP_OF_DRAM2 0x30
0783 #define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
0784 #define RS690_UPPER_TOP_OF_DRAM2 0x31
0785 #define RS690_HTIU_NB_INDEX 0xA8
0786 #define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
0787 #define RS690_HTIU_NB_DATA 0xAC
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798 static void rs690_fix_64bit_dma(struct pci_dev *pdev)
0799 {
0800 u32 val = 0;
0801 phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
0802
0803 if (top_of_dram <= (1ULL << 32))
0804 return;
0805
0806 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
0807 RS690_LOWER_TOP_OF_DRAM2);
0808 pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
0809
0810 if (val)
0811 return;
0812
0813 pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
0814
0815 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
0816 RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
0817 pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
0818
0819 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
0820 RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
0821 pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
0822 top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
0823 }
0824 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
0825
0826 #endif