Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * HP zx1 AGPGART routines.
0004  *
0005  * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
0006  *  Bjorn Helgaas <bjorn.helgaas@hp.com>
0007  */
0008 
0009 #include <linux/acpi.h>
0010 #include <linux/module.h>
0011 #include <linux/pci.h>
0012 #include <linux/init.h>
0013 #include <linux/agp_backend.h>
0014 #include <linux/log2.h>
0015 #include <linux/slab.h>
0016 
0017 #include <asm/acpi-ext.h>
0018 
0019 #include "agp.h"
0020 
0021 #define HP_ZX1_IOC_OFFSET   0x1000  /* ACPI reports SBA, we want IOC */
0022 
0023 /* HP ZX1 IOC registers */
0024 #define HP_ZX1_IBASE        0x300
0025 #define HP_ZX1_IMASK        0x308
0026 #define HP_ZX1_PCOM     0x310
0027 #define HP_ZX1_TCNFG        0x318
0028 #define HP_ZX1_PDIR_BASE    0x320
0029 
0030 #define HP_ZX1_IOVA_BASE    GB(1UL)
0031 #define HP_ZX1_IOVA_SIZE    GB(1UL)
0032 #define HP_ZX1_GART_SIZE    (HP_ZX1_IOVA_SIZE / 2)
0033 #define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
0034 
0035 #define HP_ZX1_PDIR_VALID_BIT   0x8000000000000000UL
0036 #define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
0037 
0038 #define AGP8X_MODE_BIT      3
0039 #define AGP8X_MODE      (1 << AGP8X_MODE_BIT)
0040 
0041 /* AGP bridge need not be PCI device, but DRM thinks it is. */
0042 static struct pci_dev fake_bridge_dev;
0043 
0044 static int hp_zx1_gart_found;
0045 
0046 static struct aper_size_info_fixed hp_zx1_sizes[] =
0047 {
0048     {0, 0, 0},      /* filled in by hp_zx1_fetch_size() */
0049 };
0050 
0051 static struct gatt_mask hp_zx1_masks[] =
0052 {
0053     {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
0054 };
0055 
0056 static struct _hp_private {
0057     volatile u8 __iomem *ioc_regs;
0058     volatile u8 __iomem *lba_regs;
0059     int lba_cap_offset;
0060     u64 *io_pdir;       // PDIR for entire IOVA
0061     u64 *gatt;      // PDIR just for GART (subset of above)
0062     u64 gatt_entries;
0063     u64 iova_base;
0064     u64 gart_base;
0065     u64 gart_size;
0066     u64 io_pdir_size;
0067     int io_pdir_owner;  // do we own it, or share it with sba_iommu?
0068     int io_page_size;
0069     int io_tlb_shift;
0070     int io_tlb_ps;      // IOC ps config
0071     int io_pages_per_kpage;
0072 } hp_private;
0073 
0074 static int __init hp_zx1_ioc_shared(void)
0075 {
0076     struct _hp_private *hp = &hp_private;
0077 
0078     printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
0079 
0080     /*
0081      * IOC already configured by sba_iommu module; just use
0082      * its setup.  We assume:
0083      *  - IOVA space is 1Gb in size
0084      *  - first 512Mb is IOMMU, second 512Mb is GART
0085      */
0086     hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
0087     switch (hp->io_tlb_ps) {
0088         case 0: hp->io_tlb_shift = 12; break;
0089         case 1: hp->io_tlb_shift = 13; break;
0090         case 2: hp->io_tlb_shift = 14; break;
0091         case 3: hp->io_tlb_shift = 16; break;
0092         default:
0093             printk(KERN_ERR PFX "Invalid IOTLB page size "
0094                    "configuration 0x%x\n", hp->io_tlb_ps);
0095             hp->gatt = NULL;
0096             hp->gatt_entries = 0;
0097             return -ENODEV;
0098     }
0099     hp->io_page_size = 1 << hp->io_tlb_shift;
0100     hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
0101 
0102     hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
0103     hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
0104 
0105     hp->gart_size = HP_ZX1_GART_SIZE;
0106     hp->gatt_entries = hp->gart_size / hp->io_page_size;
0107 
0108     hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
0109     hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
0110 
0111     if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
0112         /* Normal case when no AGP device in system */
0113         hp->gatt = NULL;
0114         hp->gatt_entries = 0;
0115         printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
0116                "GART disabled\n");
0117         return -ENODEV;
0118     }
0119 
0120     return 0;
0121 }
0122 
0123 static int __init
0124 hp_zx1_ioc_owner (void)
0125 {
0126     struct _hp_private *hp = &hp_private;
0127 
0128     printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
0129 
0130     /*
0131      * Select an IOV page size no larger than system page size.
0132      */
0133     if (PAGE_SIZE >= KB(64)) {
0134         hp->io_tlb_shift = 16;
0135         hp->io_tlb_ps = 3;
0136     } else if (PAGE_SIZE >= KB(16)) {
0137         hp->io_tlb_shift = 14;
0138         hp->io_tlb_ps = 2;
0139     } else if (PAGE_SIZE >= KB(8)) {
0140         hp->io_tlb_shift = 13;
0141         hp->io_tlb_ps = 1;
0142     } else {
0143         hp->io_tlb_shift = 12;
0144         hp->io_tlb_ps = 0;
0145     }
0146     hp->io_page_size = 1 << hp->io_tlb_shift;
0147     hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
0148 
0149     hp->iova_base = HP_ZX1_IOVA_BASE;
0150     hp->gart_size = HP_ZX1_GART_SIZE;
0151     hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
0152 
0153     hp->gatt_entries = hp->gart_size / hp->io_page_size;
0154     hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
0155 
0156     return 0;
0157 }
0158 
0159 static int __init
0160 hp_zx1_ioc_init (u64 hpa)
0161 {
0162     struct _hp_private *hp = &hp_private;
0163 
0164     hp->ioc_regs = ioremap(hpa, 1024);
0165     if (!hp->ioc_regs)
0166         return -ENOMEM;
0167 
0168     /*
0169      * If the IOTLB is currently disabled, we can take it over.
0170      * Otherwise, we have to share with sba_iommu.
0171      */
0172     hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
0173 
0174     if (hp->io_pdir_owner)
0175         return hp_zx1_ioc_owner();
0176 
0177     return hp_zx1_ioc_shared();
0178 }
0179 
0180 static int
0181 hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
0182 {
0183     u16 status;
0184     u8 pos, id;
0185     int ttl = 48;
0186 
0187     status = readw(hpa+PCI_STATUS);
0188     if (!(status & PCI_STATUS_CAP_LIST))
0189         return 0;
0190     pos = readb(hpa+PCI_CAPABILITY_LIST);
0191     while (ttl-- && pos >= 0x40) {
0192         pos &= ~3;
0193         id = readb(hpa+pos+PCI_CAP_LIST_ID);
0194         if (id == 0xff)
0195             break;
0196         if (id == cap)
0197             return pos;
0198         pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
0199     }
0200     return 0;
0201 }
0202 
0203 static int __init
0204 hp_zx1_lba_init (u64 hpa)
0205 {
0206     struct _hp_private *hp = &hp_private;
0207     int cap;
0208 
0209     hp->lba_regs = ioremap(hpa, 256);
0210     if (!hp->lba_regs)
0211         return -ENOMEM;
0212 
0213     hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
0214 
0215     cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
0216     if (cap != PCI_CAP_ID_AGP) {
0217         printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
0218                cap, hp->lba_cap_offset);
0219         iounmap(hp->lba_regs);
0220         return -ENODEV;
0221     }
0222 
0223     return 0;
0224 }
0225 
0226 static int
0227 hp_zx1_fetch_size(void)
0228 {
0229     int size;
0230 
0231     size = hp_private.gart_size / MB(1);
0232     hp_zx1_sizes[0].size = size;
0233     agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
0234     return size;
0235 }
0236 
0237 static int
0238 hp_zx1_configure (void)
0239 {
0240     struct _hp_private *hp = &hp_private;
0241 
0242     agp_bridge->gart_bus_addr = hp->gart_base;
0243     agp_bridge->capndx = hp->lba_cap_offset;
0244     agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
0245 
0246     if (hp->io_pdir_owner) {
0247         writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
0248         readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
0249         writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
0250         readl(hp->ioc_regs+HP_ZX1_TCNFG);
0251         writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
0252         readl(hp->ioc_regs+HP_ZX1_IMASK);
0253         writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
0254         readl(hp->ioc_regs+HP_ZX1_IBASE);
0255         writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
0256         readl(hp->ioc_regs+HP_ZX1_PCOM);
0257     }
0258 
0259     return 0;
0260 }
0261 
0262 static void
0263 hp_zx1_cleanup (void)
0264 {
0265     struct _hp_private *hp = &hp_private;
0266 
0267     if (hp->ioc_regs) {
0268         if (hp->io_pdir_owner) {
0269             writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
0270             readq(hp->ioc_regs+HP_ZX1_IBASE);
0271         }
0272         iounmap(hp->ioc_regs);
0273     }
0274     if (hp->lba_regs)
0275         iounmap(hp->lba_regs);
0276 }
0277 
0278 static void
0279 hp_zx1_tlbflush (struct agp_memory *mem)
0280 {
0281     struct _hp_private *hp = &hp_private;
0282 
0283     writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
0284     readq(hp->ioc_regs+HP_ZX1_PCOM);
0285 }
0286 
0287 static int
0288 hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
0289 {
0290     struct _hp_private *hp = &hp_private;
0291     int i;
0292 
0293     if (hp->io_pdir_owner) {
0294         hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
0295                         get_order(hp->io_pdir_size));
0296         if (!hp->io_pdir) {
0297             printk(KERN_ERR PFX "Couldn't allocate contiguous "
0298                 "memory for I/O PDIR\n");
0299             hp->gatt = NULL;
0300             hp->gatt_entries = 0;
0301             return -ENOMEM;
0302         }
0303         memset(hp->io_pdir, 0, hp->io_pdir_size);
0304 
0305         hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
0306     }
0307 
0308     for (i = 0; i < hp->gatt_entries; i++) {
0309         hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
0310     }
0311 
0312     return 0;
0313 }
0314 
0315 static int
0316 hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
0317 {
0318     struct _hp_private *hp = &hp_private;
0319 
0320     if (hp->io_pdir_owner)
0321         free_pages((unsigned long) hp->io_pdir,
0322                 get_order(hp->io_pdir_size));
0323     else
0324         hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
0325     return 0;
0326 }
0327 
0328 static int
0329 hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
0330 {
0331     struct _hp_private *hp = &hp_private;
0332     int i, k;
0333     off_t j, io_pg_start;
0334     int io_pg_count;
0335 
0336     if (type != mem->type ||
0337         agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
0338         return -EINVAL;
0339     }
0340 
0341     io_pg_start = hp->io_pages_per_kpage * pg_start;
0342     io_pg_count = hp->io_pages_per_kpage * mem->page_count;
0343     if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
0344         return -EINVAL;
0345     }
0346 
0347     j = io_pg_start;
0348     while (j < (io_pg_start + io_pg_count)) {
0349         if (hp->gatt[j]) {
0350             return -EBUSY;
0351         }
0352         j++;
0353     }
0354 
0355     if (!mem->is_flushed) {
0356         global_cache_flush();
0357         mem->is_flushed = true;
0358     }
0359 
0360     for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
0361         unsigned long paddr;
0362 
0363         paddr = page_to_phys(mem->pages[i]);
0364         for (k = 0;
0365              k < hp->io_pages_per_kpage;
0366              k++, j++, paddr += hp->io_page_size) {
0367             hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
0368         }
0369     }
0370 
0371     agp_bridge->driver->tlb_flush(mem);
0372     return 0;
0373 }
0374 
0375 static int
0376 hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
0377 {
0378     struct _hp_private *hp = &hp_private;
0379     int i, io_pg_start, io_pg_count;
0380 
0381     if (type != mem->type ||
0382         agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
0383         return -EINVAL;
0384     }
0385 
0386     io_pg_start = hp->io_pages_per_kpage * pg_start;
0387     io_pg_count = hp->io_pages_per_kpage * mem->page_count;
0388     for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
0389         hp->gatt[i] = agp_bridge->scratch_page;
0390     }
0391 
0392     agp_bridge->driver->tlb_flush(mem);
0393     return 0;
0394 }
0395 
0396 static unsigned long
0397 hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
0398 {
0399     return HP_ZX1_PDIR_VALID_BIT | addr;
0400 }
0401 
0402 static void
0403 hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
0404 {
0405     struct _hp_private *hp = &hp_private;
0406     u32 command;
0407 
0408     command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
0409     command = agp_collect_device_status(bridge, mode, command);
0410     command |= 0x00000100;
0411 
0412     writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
0413 
0414     agp_device_command(command, (mode & AGP8X_MODE) != 0);
0415 }
0416 
0417 const struct agp_bridge_driver hp_zx1_driver = {
0418     .owner          = THIS_MODULE,
0419     .size_type      = FIXED_APER_SIZE,
0420     .configure      = hp_zx1_configure,
0421     .fetch_size     = hp_zx1_fetch_size,
0422     .cleanup        = hp_zx1_cleanup,
0423     .tlb_flush      = hp_zx1_tlbflush,
0424     .mask_memory        = hp_zx1_mask_memory,
0425     .masks          = hp_zx1_masks,
0426     .agp_enable     = hp_zx1_enable,
0427     .cache_flush        = global_cache_flush,
0428     .create_gatt_table  = hp_zx1_create_gatt_table,
0429     .free_gatt_table    = hp_zx1_free_gatt_table,
0430     .insert_memory      = hp_zx1_insert_memory,
0431     .remove_memory      = hp_zx1_remove_memory,
0432     .alloc_by_type      = agp_generic_alloc_by_type,
0433     .free_by_type       = agp_generic_free_by_type,
0434     .agp_alloc_page     = agp_generic_alloc_page,
0435     .agp_alloc_pages    = agp_generic_alloc_pages,
0436     .agp_destroy_page   = agp_generic_destroy_page,
0437     .agp_destroy_pages  = agp_generic_destroy_pages,
0438     .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
0439     .cant_use_aperture  = true,
0440 };
0441 
0442 static int __init
0443 hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
0444 {
0445     struct agp_bridge_data *bridge;
0446     int error = 0;
0447 
0448     error = hp_zx1_ioc_init(ioc_hpa);
0449     if (error)
0450         goto fail;
0451 
0452     error = hp_zx1_lba_init(lba_hpa);
0453     if (error)
0454         goto fail;
0455 
0456     bridge = agp_alloc_bridge();
0457     if (!bridge) {
0458         error = -ENOMEM;
0459         goto fail;
0460     }
0461     bridge->driver = &hp_zx1_driver;
0462 
0463     fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
0464     fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
0465     bridge->dev = &fake_bridge_dev;
0466 
0467     error = agp_add_bridge(bridge);
0468   fail:
0469     if (error)
0470         hp_zx1_cleanup();
0471     return error;
0472 }
0473 
0474 static acpi_status __init
0475 zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
0476 {
0477     acpi_handle handle, parent;
0478     acpi_status status;
0479     struct acpi_device_info *info;
0480     u64 lba_hpa, sba_hpa, length;
0481     int match;
0482 
0483     status = hp_acpi_csr_space(obj, &lba_hpa, &length);
0484     if (ACPI_FAILURE(status))
0485         return AE_OK; /* keep looking for another bridge */
0486 
0487     /* Look for an enclosing IOC scope and find its CSR space */
0488     handle = obj;
0489     do {
0490         status = acpi_get_object_info(handle, &info);
0491         if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
0492             /* TBD check _CID also */
0493             match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
0494             kfree(info);
0495             if (match) {
0496                 status = hp_acpi_csr_space(handle, &sba_hpa, &length);
0497                 if (ACPI_SUCCESS(status))
0498                     break;
0499                 else {
0500                     printk(KERN_ERR PFX "Detected HP ZX1 "
0501                            "AGP LBA but no IOC.\n");
0502                     return AE_OK;
0503                 }
0504             }
0505         }
0506 
0507         status = acpi_get_parent(handle, &parent);
0508         handle = parent;
0509     } while (ACPI_SUCCESS(status));
0510 
0511     if (ACPI_FAILURE(status))
0512         return AE_OK;   /* found no enclosing IOC */
0513 
0514     if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
0515         return AE_OK;
0516 
0517     printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
0518         "(ioc=%llx, lba=%llx)\n", (char *)context,
0519         sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
0520 
0521     hp_zx1_gart_found = 1;
0522     return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
0523 }
0524 
0525 static int __init
0526 agp_hp_init (void)
0527 {
0528     if (agp_off)
0529         return -EINVAL;
0530 
0531     acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
0532     if (hp_zx1_gart_found)
0533         return 0;
0534 
0535     acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
0536     if (hp_zx1_gart_found)
0537         return 0;
0538 
0539     return -ENODEV;
0540 }
0541 
0542 static void __exit
0543 agp_hp_cleanup (void)
0544 {
0545 }
0546 
0547 module_init(agp_hp_init);
0548 module_exit(agp_hp_cleanup);
0549 
0550 MODULE_LICENSE("GPL and additional rights");