Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2005-2008, PA Semi, Inc
0004  *
0005  * Maintained by: Olof Johansson <olof@lixom.net>
0006  */
0007 
0008 #undef DEBUG
0009 
0010 #include <linux/memblock.h>
0011 #include <linux/types.h>
0012 #include <linux/spinlock.h>
0013 #include <linux/pci.h>
0014 #include <linux/of.h>
0015 #include <asm/iommu.h>
0016 #include <asm/machdep.h>
0017 #include <asm/firmware.h>
0018 
0019 #include "pasemi.h"
0020 
0021 #define IOBMAP_PAGE_SHIFT   12
0022 #define IOBMAP_PAGE_SIZE    (1 << IOBMAP_PAGE_SHIFT)
0023 #define IOBMAP_PAGE_MASK    (IOBMAP_PAGE_SIZE - 1)
0024 
0025 #define IOB_BASE        0xe0000000
0026 #define IOB_SIZE        0x3000
0027 /* Configuration registers */
0028 #define IOBCAP_REG      0x40
0029 #define IOBCOM_REG      0x100
0030 /* Enable IOB address translation */
0031 #define IOBCOM_ATEN     0x00000100
0032 
0033 /* Address decode configuration register */
0034 #define IOB_AD_REG      0x14c
0035 /* IOBCOM_AD_REG fields */
0036 #define IOB_AD_VGPRT        0x00000e00
0037 #define IOB_AD_VGAEN        0x00000100
0038 /* Direct mapping settings */
0039 #define IOB_AD_MPSEL_MASK   0x00000030
0040 #define IOB_AD_MPSEL_B38    0x00000000
0041 #define IOB_AD_MPSEL_B40    0x00000010
0042 #define IOB_AD_MPSEL_B42    0x00000020
0043 /* Translation window size / enable */
0044 #define IOB_AD_TRNG_MASK    0x00000003
0045 #define IOB_AD_TRNG_256M    0x00000000
0046 #define IOB_AD_TRNG_2G      0x00000001
0047 #define IOB_AD_TRNG_128G    0x00000003
0048 
0049 #define IOB_TABLEBASE_REG   0x154
0050 
0051 /* Base of the 64 4-byte L1 registers */
0052 #define IOB_XLT_L1_REGBASE  0x2b00
0053 
0054 /* Register to invalidate TLB entries */
0055 #define IOB_AT_INVAL_TLB_REG    0x2d00
0056 
0057 /* The top two bits of the level 1 entry contains valid and type flags */
0058 #define IOBMAP_L1E_V        0x40000000
0059 #define IOBMAP_L1E_V_B      0x80000000
0060 
0061 /* For big page entries, the bottom two bits contains flags */
0062 #define IOBMAP_L1E_BIG_CACHED   0x00000002
0063 #define IOBMAP_L1E_BIG_PRIORITY 0x00000001
0064 
0065 /* For regular level 2 entries, top 2 bits contain valid and cache flags */
0066 #define IOBMAP_L2E_V        0x80000000
0067 #define IOBMAP_L2E_V_CACHED 0xc0000000
0068 
0069 static void __iomem *iob;
0070 static u32 iob_l1_emptyval;
0071 static u32 iob_l2_emptyval;
0072 static u32 *iob_l2_base;
0073 
0074 static struct iommu_table iommu_table_iobmap;
0075 static int iommu_table_iobmap_inited;
0076 
0077 static int iobmap_build(struct iommu_table *tbl, long index,
0078              long npages, unsigned long uaddr,
0079              enum dma_data_direction direction,
0080              unsigned long attrs)
0081 {
0082     u32 *ip;
0083     u32 rpn;
0084     unsigned long bus_addr;
0085 
0086     pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr);
0087 
0088     bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
0089 
0090     ip = ((u32 *)tbl->it_base) + index;
0091 
0092     while (npages--) {
0093         rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
0094 
0095         *(ip++) = IOBMAP_L2E_V | rpn;
0096         /* invalidate tlb, can be optimized more */
0097         out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
0098 
0099         uaddr += IOBMAP_PAGE_SIZE;
0100         bus_addr += IOBMAP_PAGE_SIZE;
0101     }
0102     return 0;
0103 }
0104 
0105 
0106 static void iobmap_free(struct iommu_table *tbl, long index,
0107             long npages)
0108 {
0109     u32 *ip;
0110     unsigned long bus_addr;
0111 
0112     pr_debug("iobmap: free at: %lx, %lx\n", index, npages);
0113 
0114     bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
0115 
0116     ip = ((u32 *)tbl->it_base) + index;
0117 
0118     while (npages--) {
0119         *(ip++) = iob_l2_emptyval;
0120         /* invalidate tlb, can be optimized more */
0121         out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
0122         bus_addr += IOBMAP_PAGE_SIZE;
0123     }
0124 }
0125 
0126 static struct iommu_table_ops iommu_table_iobmap_ops = {
0127     .set = iobmap_build,
0128     .clear  = iobmap_free
0129 };
0130 
0131 static void iommu_table_iobmap_setup(void)
0132 {
0133     pr_debug(" -> %s\n", __func__);
0134     iommu_table_iobmap.it_busno = 0;
0135     iommu_table_iobmap.it_offset = 0;
0136     iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
0137 
0138     /* it_size is in number of entries */
0139     iommu_table_iobmap.it_size =
0140         0x80000000 >> iommu_table_iobmap.it_page_shift;
0141 
0142     /* Initialize the common IOMMU code */
0143     iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
0144     iommu_table_iobmap.it_index = 0;
0145     /* XXXOJN tune this to avoid IOB cache invals.
0146      * Should probably be 8 (64 bytes)
0147      */
0148     iommu_table_iobmap.it_blocksize = 4;
0149     iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
0150     if (!iommu_init_table(&iommu_table_iobmap, 0, 0, 0))
0151         panic("Failed to initialize iommu table");
0152 
0153     pr_debug(" <- %s\n", __func__);
0154 }
0155 
0156 
0157 
0158 static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
0159 {
0160     pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
0161 
0162     if (!iommu_table_iobmap_inited) {
0163         iommu_table_iobmap_inited = 1;
0164         iommu_table_iobmap_setup();
0165     }
0166 }
0167 
0168 
0169 static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
0170 {
0171     pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
0172 
0173 #if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
0174     /* For non-LPAR environment, don't translate anything for the DMA
0175      * engine. The exception to this is if the user has enabled
0176      * CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time.
0177      */
0178     if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
0179         !firmware_has_feature(FW_FEATURE_LPAR)) {
0180         dev->dev.dma_ops = NULL;
0181         /*
0182          * Set the coherent DMA mask to prevent the iommu
0183          * being used unnecessarily
0184          */
0185         dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
0186         return;
0187     }
0188 #endif
0189 
0190     set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
0191 }
0192 
0193 static int __init iob_init(struct device_node *dn)
0194 {
0195     unsigned long tmp;
0196     u32 regword;
0197     int i;
0198 
0199     pr_debug(" -> %s\n", __func__);
0200 
0201     /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
0202     iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
0203                     MEMBLOCK_LOW_LIMIT, 0x80000000,
0204                     NUMA_NO_NODE);
0205     if (!iob_l2_base)
0206         panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n",
0207               __func__, 1UL << 21, 1UL << 21, 0x80000000);
0208 
0209     pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
0210 
0211     /* Allocate a spare page to map all invalid IOTLB pages. */
0212     tmp = memblock_phys_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
0213     if (!tmp)
0214         panic("IOBMAP: Cannot allocate spare page!");
0215     /* Empty l1 is marked invalid */
0216     iob_l1_emptyval = 0;
0217     /* Empty l2 is mapped to dummy page */
0218     iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT);
0219 
0220     iob = ioremap(IOB_BASE, IOB_SIZE);
0221     if (!iob)
0222         panic("IOBMAP: Cannot map registers!");
0223 
0224     /* setup direct mapping of the L1 entries */
0225     for (i = 0; i < 64; i++) {
0226         /* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */
0227         regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12);
0228         out_le32(iob+IOB_XLT_L1_REGBASE+i*4, regword);
0229     }
0230 
0231     /* set 2GB translation window, based at 0 */
0232     regword = in_le32(iob+IOB_AD_REG);
0233     regword &= ~IOB_AD_TRNG_MASK;
0234     regword |= IOB_AD_TRNG_2G;
0235     out_le32(iob+IOB_AD_REG, regword);
0236 
0237     /* Enable translation */
0238     regword = in_le32(iob+IOBCOM_REG);
0239     regword |= IOBCOM_ATEN;
0240     out_le32(iob+IOBCOM_REG, regword);
0241 
0242     pr_debug(" <- %s\n", __func__);
0243 
0244     return 0;
0245 }
0246 
0247 
0248 /* These are called very early. */
0249 void __init iommu_init_early_pasemi(void)
0250 {
0251     int iommu_off;
0252 
0253 #ifndef CONFIG_PPC_PASEMI_IOMMU
0254     iommu_off = 1;
0255 #else
0256     iommu_off = of_chosen &&
0257             of_get_property(of_chosen, "linux,iommu-off", NULL);
0258 #endif
0259     if (iommu_off)
0260         return;
0261 
0262     iob_init(NULL);
0263 
0264     pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
0265     pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
0266     set_pci_dma_ops(&dma_iommu_ops);
0267 }