0001
0002 #include <linux/prefetch.h>
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 static inline unsigned int
0015 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
0016 unsigned long hint,
0017 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
0018 unsigned long))
0019 {
0020 struct scatterlist *dma_sg = startsg;
0021 unsigned int n_mappings = 0;
0022 unsigned long dma_offset = 0, dma_len = 0;
0023 u64 *pdirp = NULL;
0024
0025
0026
0027
0028 dma_sg--;
0029
0030 while (nents-- > 0) {
0031 unsigned long vaddr;
0032 long size;
0033
0034 DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
0035 (unsigned long)sg_dma_address(startsg), cnt,
0036 sg_virt(startsg), startsg->length
0037 );
0038
0039
0040
0041
0042
0043
0044 if (sg_dma_address(startsg) & PIDE_FLAG) {
0045 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
0046
0047 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
0048
0049 dma_sg++;
0050
0051 dma_len = sg_dma_len(startsg);
0052 sg_dma_len(startsg) = 0;
0053 dma_offset = (unsigned long) pide & ~IOVP_MASK;
0054 n_mappings++;
0055 #if defined(ZX1_SUPPORT)
0056
0057 sg_dma_address(dma_sg) = pide | ioc->ibase;
0058 #else
0059
0060
0061
0062 sg_dma_address(dma_sg) = pide;
0063 #endif
0064 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
0065 prefetchw(pdirp);
0066 }
0067
0068 BUG_ON(pdirp == NULL);
0069
0070 vaddr = (unsigned long)sg_virt(startsg);
0071 sg_dma_len(dma_sg) += startsg->length;
0072 size = startsg->length + dma_offset;
0073 dma_offset = 0;
0074 #ifdef IOMMU_MAP_STATS
0075 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
0076 #endif
0077 do {
0078 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
0079 vaddr, hint);
0080 vaddr += IOVP_SIZE;
0081 size -= IOVP_SIZE;
0082 pdirp++;
0083 } while(unlikely(size > 0));
0084 startsg++;
0085 }
0086 return(n_mappings);
0087 }
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 static inline unsigned int
0101 iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
0102 struct scatterlist *startsg, int nents,
0103 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
0104 {
0105 struct scatterlist *contig_sg;
0106 unsigned long dma_offset, dma_len;
0107 unsigned int n_mappings = 0;
0108 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
0109 (unsigned)DMA_CHUNK_SIZE);
0110 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
0111 if (max_seg_boundary)
0112 max_seg_size = min(max_seg_size, max_seg_boundary);
0113
0114 while (nents > 0) {
0115
0116
0117
0118
0119 contig_sg = startsg;
0120 dma_len = startsg->length;
0121 dma_offset = startsg->offset;
0122
0123
0124 sg_dma_address(startsg) = 0;
0125 sg_dma_len(startsg) = 0;
0126
0127
0128
0129
0130
0131 while(--nents > 0) {
0132 unsigned long prev_end, sg_start;
0133
0134 prev_end = (unsigned long)sg_virt(startsg) +
0135 startsg->length;
0136
0137 startsg++;
0138 sg_start = (unsigned long)sg_virt(startsg);
0139
0140
0141 sg_dma_address(startsg) = 0;
0142 sg_dma_len(startsg) = 0;
0143
0144
0145
0146
0147
0148
0149 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
0150 max_seg_size))
0151 break;
0152
0153
0154
0155
0156
0157
0158
0159 if (unlikely((prev_end != sg_start) ||
0160 ((prev_end | sg_start) & ~PAGE_MASK)))
0161 break;
0162
0163 dma_len += startsg->length;
0164 }
0165
0166
0167
0168
0169
0170
0171 sg_dma_len(contig_sg) = dma_len;
0172 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
0173 sg_dma_address(contig_sg) =
0174 PIDE_FLAG
0175 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
0176 | dma_offset;
0177 n_mappings++;
0178 }
0179
0180 return n_mappings;
0181 }
0182