0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dma-direct.h>
0010 #include <linux/pci.h>
0011 #include <asm/iommu.h>
0012
0013 #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
0014 #define can_map_direct(dev, addr) \
0015 ((dev)->bus_dma_limit >= phys_to_dma((dev), (addr)))
0016
0017 bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
0018 {
0019 if (likely(!dev->bus_dma_limit))
0020 return false;
0021
0022 return can_map_direct(dev, addr);
0023 }
0024
0025 #define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset)
0026
0027 bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle)
0028 {
0029 if (likely(!dev->bus_dma_limit))
0030 return false;
0031
0032 return is_direct_handle(dev, dma_handle);
0033 }
0034
0035 bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
0036 int nents)
0037 {
0038 struct scatterlist *s;
0039 int i;
0040
0041 if (likely(!dev->bus_dma_limit))
0042 return false;
0043
0044 for_each_sg(sg, s, nents, i) {
0045 if (!can_map_direct(dev, sg_phys(s) + s->offset + s->length))
0046 return false;
0047 }
0048
0049 return true;
0050 }
0051
0052 bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
0053 int nents)
0054 {
0055 struct scatterlist *s;
0056 int i;
0057
0058 if (likely(!dev->bus_dma_limit))
0059 return false;
0060
0061 for_each_sg(sg, s, nents, i) {
0062 if (!is_direct_handle(dev, s->dma_address + s->length))
0063 return false;
0064 }
0065
0066 return true;
0067 }
0068 #endif
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
0079 dma_addr_t *dma_handle, gfp_t flag,
0080 unsigned long attrs)
0081 {
0082 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
0083 dma_handle, dev->coherent_dma_mask, flag,
0084 dev_to_node(dev));
0085 }
0086
0087 static void dma_iommu_free_coherent(struct device *dev, size_t size,
0088 void *vaddr, dma_addr_t dma_handle,
0089 unsigned long attrs)
0090 {
0091 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
0092 }
0093
0094
0095
0096
0097
0098
0099 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
0100 unsigned long offset, size_t size,
0101 enum dma_data_direction direction,
0102 unsigned long attrs)
0103 {
0104 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
0105 size, dma_get_mask(dev), direction, attrs);
0106 }
0107
0108
0109 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
0110 size_t size, enum dma_data_direction direction,
0111 unsigned long attrs)
0112 {
0113 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
0114 attrs);
0115 }
0116
0117
0118 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
0119 int nelems, enum dma_data_direction direction,
0120 unsigned long attrs)
0121 {
0122 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
0123 dma_get_mask(dev), direction, attrs);
0124 }
0125
0126 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
0127 int nelems, enum dma_data_direction direction,
0128 unsigned long attrs)
0129 {
0130 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
0131 direction, attrs);
0132 }
0133
0134 static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
0135 {
0136 struct pci_dev *pdev = to_pci_dev(dev);
0137 struct pci_controller *phb = pci_bus_to_host(pdev->bus);
0138
0139 if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
0140 return false;
0141 return phb->controller_ops.iommu_bypass_supported(pdev, mask);
0142 }
0143
0144
0145 int dma_iommu_dma_supported(struct device *dev, u64 mask)
0146 {
0147 struct iommu_table *tbl = get_iommu_table_base(dev);
0148
0149 if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
0150
0151
0152
0153
0154
0155 dev->dma_ops_bypass = dev->bus_dma_limit == 0;
0156 if (!dev->dma_ops_bypass)
0157 dev_warn(dev,
0158 "iommu: 64-bit OK but direct DMA is limited by %llx\n",
0159 dev->bus_dma_limit);
0160 else
0161 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
0162 return 1;
0163 }
0164
0165 if (!tbl) {
0166 dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
0167 return 0;
0168 }
0169
0170 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
0171 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
0172 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
0173 mask, tbl->it_offset << tbl->it_page_shift);
0174 return 0;
0175 }
0176
0177 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
0178 dev->dma_ops_bypass = false;
0179 return 1;
0180 }
0181
0182 u64 dma_iommu_get_required_mask(struct device *dev)
0183 {
0184 struct iommu_table *tbl = get_iommu_table_base(dev);
0185 u64 mask;
0186
0187 if (dev_is_pci(dev)) {
0188 u64 bypass_mask = dma_direct_get_required_mask(dev);
0189
0190 if (dma_iommu_dma_supported(dev, bypass_mask)) {
0191 dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
0192 return bypass_mask;
0193 }
0194 }
0195
0196 if (!tbl)
0197 return 0;
0198
0199 mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
0200 tbl->it_page_shift - 1);
0201 mask += mask - 1;
0202
0203 return mask;
0204 }
0205
0206 const struct dma_map_ops dma_iommu_ops = {
0207 .alloc = dma_iommu_alloc_coherent,
0208 .free = dma_iommu_free_coherent,
0209 .map_sg = dma_iommu_map_sg,
0210 .unmap_sg = dma_iommu_unmap_sg,
0211 .dma_supported = dma_iommu_dma_supported,
0212 .map_page = dma_iommu_map_page,
0213 .unmap_page = dma_iommu_unmap_page,
0214 .get_required_mask = dma_iommu_get_required_mask,
0215 .mmap = dma_common_mmap,
0216 .get_sgtable = dma_common_get_sgtable,
0217 .alloc_pages = dma_common_alloc_pages,
0218 .free_pages = dma_common_free_pages,
0219 };