Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __POWERNV_PCI_H
0003 #define __POWERNV_PCI_H
0004 
0005 #include <linux/compiler.h>     /* for __printf */
0006 #include <linux/iommu.h>
0007 #include <asm/iommu.h>
0008 #include <asm/msi_bitmap.h>
0009 
0010 struct pci_dn;
0011 
0012 enum pnv_phb_type {
0013     PNV_PHB_IODA1,
0014     PNV_PHB_IODA2,
0015     PNV_PHB_NPU_OCAPI,
0016 };
0017 
0018 /* Precise PHB model for error management */
0019 enum pnv_phb_model {
0020     PNV_PHB_MODEL_UNKNOWN,
0021     PNV_PHB_MODEL_P7IOC,
0022     PNV_PHB_MODEL_PHB3,
0023 };
0024 
0025 #define PNV_PCI_DIAG_BUF_SIZE   8192
0026 #define PNV_IODA_PE_DEV     (1 << 0)    /* PE has single PCI device */
0027 #define PNV_IODA_PE_BUS     (1 << 1)    /* PE has primary PCI bus   */
0028 #define PNV_IODA_PE_BUS_ALL (1 << 2)    /* PE has subordinate buses */
0029 #define PNV_IODA_PE_MASTER  (1 << 3)    /* Master PE in compound case   */
0030 #define PNV_IODA_PE_SLAVE   (1 << 4)    /* Slave PE in compound case    */
0031 #define PNV_IODA_PE_VF      (1 << 5)    /* PE for one VF        */
0032 
0033 /*
0034  * A brief note on PNV_IODA_PE_BUS_ALL
0035  *
0036  * This is needed because of the behaviour of PCIe-to-PCI bridges. The PHB uses
0037  * the Requester ID field of the PCIe request header to determine the device
0038  * (and PE) that initiated a DMA. In legacy PCI individual memory read/write
0039  * requests aren't tagged with the RID. To work around this the PCIe-to-PCI
0040  * bridge will use (secondary_bus_no << 8) | 0x00 as the RID on the PCIe side.
0041  *
0042  * PCIe-to-X bridges have a similar issue even though PCI-X requests also have
0043  * a RID in the transaction header. The PCIe-to-X bridge is permitted to "take
0044  * ownership" of a transaction by a PCI-X device when forwarding it to the PCIe
0045  * side of the bridge.
0046  *
0047  * To work around these problems we use the BUS_ALL flag since every subordinate
0048  * bus of the bridge should go into the same PE.
0049  */
0050 
0051 /* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */
0052 #define PNV_IODA_STOPPED_STATE  0x8000000000000000
0053 
0054 /* Data associated with a PE, including IOMMU tracking etc.. */
0055 struct pnv_phb;
0056 struct pnv_ioda_pe {
0057     unsigned long       flags;
0058     struct pnv_phb      *phb;
0059     int         device_count;
0060 
0061     /* A PE can be associated with a single device or an
0062      * entire bus (& children). In the former case, pdev
0063      * is populated, in the later case, pbus is.
0064      */
0065 #ifdef CONFIG_PCI_IOV
0066     struct pci_dev          *parent_dev;
0067 #endif
0068     struct pci_dev      *pdev;
0069     struct pci_bus      *pbus;
0070 
0071     /* Effective RID (device RID for a device PE and base bus
0072      * RID with devfn 0 for a bus PE)
0073      */
0074     unsigned int        rid;
0075 
0076     /* PE number */
0077     unsigned int        pe_number;
0078 
0079     /* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
0080     struct iommu_table_group table_group;
0081 
0082     /* 64-bit TCE bypass region */
0083     bool            tce_bypass_enabled;
0084     uint64_t        tce_bypass_base;
0085 
0086     /*
0087      * Used to track whether we've done DMA setup for this PE or not. We
0088      * want to defer allocating TCE tables, etc until we've added a
0089      * non-bridge device to the PE.
0090      */
0091     bool            dma_setup_done;
0092 
0093     /* MSIs. MVE index is identical for 32 and 64 bit MSI
0094      * and -1 if not supported. (It's actually identical to the
0095      * PE number)
0096      */
0097     int         mve_number;
0098 
0099     /* PEs in compound case */
0100     struct pnv_ioda_pe  *master;
0101     struct list_head    slaves;
0102 
0103     /* Link in list of PE#s */
0104     struct list_head    list;
0105 };
0106 
0107 #define PNV_PHB_FLAG_EEH    (1 << 0)
0108 
0109 struct pnv_phb {
0110     struct pci_controller   *hose;
0111     enum pnv_phb_type   type;
0112     enum pnv_phb_model  model;
0113     u64         hub_id;
0114     u64         opal_id;
0115     int         flags;
0116     void __iomem        *regs;
0117     u64         regs_phys;
0118     spinlock_t      lock;
0119 
0120 #ifdef CONFIG_DEBUG_FS
0121     int         has_dbgfs;
0122     struct dentry       *dbgfs;
0123 #endif
0124 
0125     unsigned int        msi_base;
0126     struct msi_bitmap   msi_bmp;
0127     int (*init_m64)(struct pnv_phb *phb);
0128     int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
0129     void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
0130     int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
0131 
0132     struct {
0133         /* Global bridge info */
0134         unsigned int        total_pe_num;
0135         unsigned int        reserved_pe_idx;
0136         unsigned int        root_pe_idx;
0137 
0138         /* 32-bit MMIO window */
0139         unsigned int        m32_size;
0140         unsigned int        m32_segsize;
0141         unsigned int        m32_pci_base;
0142 
0143         /* 64-bit MMIO window */
0144         unsigned int        m64_bar_idx;
0145         unsigned long       m64_size;
0146         unsigned long       m64_segsize;
0147         unsigned long       m64_base;
0148 #define MAX_M64_BARS 64
0149         unsigned long       m64_bar_alloc;
0150 
0151         /* IO ports */
0152         unsigned int        io_size;
0153         unsigned int        io_segsize;
0154         unsigned int        io_pci_base;
0155 
0156         /* PE allocation */
0157         struct mutex        pe_alloc_mutex;
0158         unsigned long       *pe_alloc;
0159         struct pnv_ioda_pe  *pe_array;
0160 
0161         /* M32 & IO segment maps */
0162         unsigned int        *m64_segmap;
0163         unsigned int        *m32_segmap;
0164         unsigned int        *io_segmap;
0165 
0166         /* DMA32 segment maps - IODA1 only */
0167         unsigned int        dma32_count;
0168         unsigned int        *dma32_segmap;
0169 
0170         /* IRQ chip */
0171         int         irq_chip_init;
0172         struct irq_chip     irq_chip;
0173 
0174         /* Sorted list of used PE's based
0175          * on the sequence of creation
0176          */
0177         struct list_head    pe_list;
0178         struct mutex            pe_list_mutex;
0179 
0180         /* Reverse map of PEs, indexed by {bus, devfn} */
0181         unsigned int        pe_rmap[0x10000];
0182     } ioda;
0183 
0184     /* PHB and hub diagnostics */
0185     unsigned int        diag_data_size;
0186     u8          *diag_data;
0187 };
0188 
0189 
0190 /* IODA PE management */
0191 
0192 static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
0193 {
0194     /*
0195      * WARNING: We cannot rely on the resource flags. The Linux PCI
0196      * allocation code sometimes decides to put a 64-bit prefetchable
0197      * BAR in the 32-bit window, so we have to compare the addresses.
0198      *
0199      * For simplicity we only test resource start.
0200      */
0201     return (r->start >= phb->ioda.m64_base &&
0202         r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
0203 }
0204 
0205 static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
0206 {
0207     unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
0208 
0209     return (resource_flags & flags) == flags;
0210 }
0211 
0212 int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);
0213 int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);
0214 
0215 void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);
0216 void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe);
0217 
0218 struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count);
0219 void pnv_ioda_free_pe(struct pnv_ioda_pe *pe);
0220 
0221 #ifdef CONFIG_PCI_IOV
0222 /*
0223  * For SR-IOV we want to put each VF's MMIO resource in to a separate PE.
0224  * This requires a bit of acrobatics with the MMIO -> PE configuration
0225  * and this structure is used to keep track of it all.
0226  */
0227 struct pnv_iov_data {
0228     /* number of VFs enabled */
0229     u16     num_vfs;
0230 
0231     /* pointer to the array of VF PEs. num_vfs long*/
0232     struct pnv_ioda_pe *vf_pe_arr;
0233 
0234     /* Did we map the VF BAR with single-PE IODA BARs? */
0235     bool    m64_single_mode[PCI_SRIOV_NUM_BARS];
0236 
0237     /*
0238      * True if we're using any segmented windows. In that case we need
0239      * shift the start of the IOV resource the segment corresponding to
0240      * the allocated PE.
0241      */
0242     bool    need_shift;
0243 
0244     /*
0245      * Bit mask used to track which m64 windows are used to map the
0246      * SR-IOV BARs for this device.
0247      */
0248     DECLARE_BITMAP(used_m64_bar_mask, MAX_M64_BARS);
0249 
0250     /*
0251      * If we map the SR-IOV BARs with a segmented window then
0252      * parts of that window will be "claimed" by other PEs.
0253      *
0254      * "holes" here is used to reserve the leading portion
0255      * of the window that is used by other (non VF) PEs.
0256      */
0257     struct resource holes[PCI_SRIOV_NUM_BARS];
0258 };
0259 
0260 static inline struct pnv_iov_data *pnv_iov_get(struct pci_dev *pdev)
0261 {
0262     return pdev->dev.archdata.iov_data;
0263 }
0264 
0265 void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev);
0266 resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno);
0267 
0268 int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
0269 int pnv_pcibios_sriov_disable(struct pci_dev *pdev);
0270 #endif /* CONFIG_PCI_IOV */
0271 
0272 extern struct pci_ops pnv_pci_ops;
0273 
0274 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
0275                 unsigned char *log_buff);
0276 int pnv_pci_cfg_read(struct pci_dn *pdn,
0277              int where, int size, u32 *val);
0278 int pnv_pci_cfg_write(struct pci_dn *pdn,
0279               int where, int size, u32 val);
0280 extern struct iommu_table *pnv_pci_table_alloc(int nid);
0281 
0282 extern void pnv_pci_init_ioda_hub(struct device_node *np);
0283 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
0284 extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
0285 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
0286 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
0287 
0288 extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn);
0289 extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
0290 extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
0291 extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
0292         __u64 window_size, __u32 levels);
0293 extern int pnv_eeh_post_init(void);
0294 
0295 __printf(3, 4)
0296 extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
0297                 const char *fmt, ...);
0298 #define pe_err(pe, fmt, ...)                    \
0299     pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
0300 #define pe_warn(pe, fmt, ...)                   \
0301     pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
0302 #define pe_info(pe, fmt, ...)                   \
0303     pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
0304 
0305 /* pci-ioda-tce.c */
0306 #define POWERNV_IOMMU_DEFAULT_LEVELS    2
0307 #define POWERNV_IOMMU_MAX_LEVELS    5
0308 
0309 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
0310         unsigned long uaddr, enum dma_data_direction direction,
0311         unsigned long attrs);
0312 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
0313 extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
0314         unsigned long *hpa, enum dma_data_direction *direction);
0315 extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
0316         bool alloc);
0317 extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
0318 
0319 extern long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
0320         __u32 page_shift, __u64 window_size, __u32 levels,
0321         bool alloc_userspace_copy, struct iommu_table *tbl);
0322 extern void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
0323 
0324 extern long pnv_pci_link_table_and_group(int node, int num,
0325         struct iommu_table *tbl,
0326         struct iommu_table_group *table_group);
0327 extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
0328         struct iommu_table_group *table_group);
0329 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
0330         void *tce_mem, u64 tce_size,
0331         u64 dma_offset, unsigned int page_shift);
0332 
0333 extern unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
0334 
0335 static inline struct pnv_phb *pci_bus_to_pnvhb(struct pci_bus *bus)
0336 {
0337     struct pci_controller *hose = bus->sysdata;
0338 
0339     if (hose)
0340         return hose->private_data;
0341 
0342     return NULL;
0343 }
0344 
0345 #endif /* __POWERNV_PCI_H */