0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/dma-map-ops.h>
0012 #include <linux/of.h>
0013 #include <linux/pfn.h>
0014 #include <linux/xarray.h>
0015 #include <linux/virtio_anchor.h>
0016 #include <linux/virtio.h>
0017 #include <xen/xen.h>
0018 #include <xen/xen-ops.h>
0019 #include <xen/grant_table.h>
0020
0021 struct xen_grant_dma_data {
0022
0023 domid_t backend_domid;
0024
0025 bool broken;
0026 };
0027
0028 static DEFINE_XARRAY(xen_grant_dma_devices);
0029
0030 #define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
0031
0032 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
0033 {
0034 return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
0035 }
0036
0037 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
0038 {
0039 return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
0040 }
0041
0042 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
0043 {
0044 struct xen_grant_dma_data *data;
0045
0046 xa_lock(&xen_grant_dma_devices);
0047 data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
0048 xa_unlock(&xen_grant_dma_devices);
0049
0050 return data;
0051 }
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
0063 dma_addr_t *dma_handle, gfp_t gfp,
0064 unsigned long attrs)
0065 {
0066 struct xen_grant_dma_data *data;
0067 unsigned int i, n_pages = PFN_UP(size);
0068 unsigned long pfn;
0069 grant_ref_t grant;
0070 void *ret;
0071
0072 data = find_xen_grant_dma_data(dev);
0073 if (!data)
0074 return NULL;
0075
0076 if (unlikely(data->broken))
0077 return NULL;
0078
0079 ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
0080 if (!ret)
0081 return NULL;
0082
0083 pfn = virt_to_pfn(ret);
0084
0085 if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
0086 free_pages_exact(ret, n_pages * PAGE_SIZE);
0087 return NULL;
0088 }
0089
0090 for (i = 0; i < n_pages; i++) {
0091 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
0092 pfn_to_gfn(pfn + i), 0);
0093 }
0094
0095 *dma_handle = grant_to_dma(grant);
0096
0097 return ret;
0098 }
0099
0100 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
0101 dma_addr_t dma_handle, unsigned long attrs)
0102 {
0103 struct xen_grant_dma_data *data;
0104 unsigned int i, n_pages = PFN_UP(size);
0105 grant_ref_t grant;
0106
0107 data = find_xen_grant_dma_data(dev);
0108 if (!data)
0109 return;
0110
0111 if (unlikely(data->broken))
0112 return;
0113
0114 grant = dma_to_grant(dma_handle);
0115
0116 for (i = 0; i < n_pages; i++) {
0117 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
0118 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
0119 data->broken = true;
0120 return;
0121 }
0122 }
0123
0124 gnttab_free_grant_reference_seq(grant, n_pages);
0125
0126 free_pages_exact(vaddr, n_pages * PAGE_SIZE);
0127 }
0128
0129 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
0130 dma_addr_t *dma_handle,
0131 enum dma_data_direction dir,
0132 gfp_t gfp)
0133 {
0134 void *vaddr;
0135
0136 vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
0137 if (!vaddr)
0138 return NULL;
0139
0140 return virt_to_page(vaddr);
0141 }
0142
0143 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
0144 struct page *vaddr, dma_addr_t dma_handle,
0145 enum dma_data_direction dir)
0146 {
0147 xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
0148 }
0149
0150 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
0151 unsigned long offset, size_t size,
0152 enum dma_data_direction dir,
0153 unsigned long attrs)
0154 {
0155 struct xen_grant_dma_data *data;
0156 unsigned int i, n_pages = PFN_UP(size);
0157 grant_ref_t grant;
0158 dma_addr_t dma_handle;
0159
0160 if (WARN_ON(dir == DMA_NONE))
0161 return DMA_MAPPING_ERROR;
0162
0163 data = find_xen_grant_dma_data(dev);
0164 if (!data)
0165 return DMA_MAPPING_ERROR;
0166
0167 if (unlikely(data->broken))
0168 return DMA_MAPPING_ERROR;
0169
0170 if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
0171 return DMA_MAPPING_ERROR;
0172
0173 for (i = 0; i < n_pages; i++) {
0174 gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
0175 xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
0176 }
0177
0178 dma_handle = grant_to_dma(grant) + offset;
0179
0180 return dma_handle;
0181 }
0182
0183 static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
0184 size_t size, enum dma_data_direction dir,
0185 unsigned long attrs)
0186 {
0187 struct xen_grant_dma_data *data;
0188 unsigned int i, n_pages = PFN_UP(size);
0189 grant_ref_t grant;
0190
0191 if (WARN_ON(dir == DMA_NONE))
0192 return;
0193
0194 data = find_xen_grant_dma_data(dev);
0195 if (!data)
0196 return;
0197
0198 if (unlikely(data->broken))
0199 return;
0200
0201 grant = dma_to_grant(dma_handle);
0202
0203 for (i = 0; i < n_pages; i++) {
0204 if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
0205 dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
0206 data->broken = true;
0207 return;
0208 }
0209 }
0210
0211 gnttab_free_grant_reference_seq(grant, n_pages);
0212 }
0213
0214 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
0215 int nents, enum dma_data_direction dir,
0216 unsigned long attrs)
0217 {
0218 struct scatterlist *s;
0219 unsigned int i;
0220
0221 if (WARN_ON(dir == DMA_NONE))
0222 return;
0223
0224 for_each_sg(sg, s, nents, i)
0225 xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
0226 attrs);
0227 }
0228
0229 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
0230 int nents, enum dma_data_direction dir,
0231 unsigned long attrs)
0232 {
0233 struct scatterlist *s;
0234 unsigned int i;
0235
0236 if (WARN_ON(dir == DMA_NONE))
0237 return -EINVAL;
0238
0239 for_each_sg(sg, s, nents, i) {
0240 s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
0241 s->length, dir, attrs);
0242 if (s->dma_address == DMA_MAPPING_ERROR)
0243 goto out;
0244
0245 sg_dma_len(s) = s->length;
0246 }
0247
0248 return nents;
0249
0250 out:
0251 xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
0252 sg_dma_len(sg) = 0;
0253
0254 return -EIO;
0255 }
0256
0257 static int xen_grant_dma_supported(struct device *dev, u64 mask)
0258 {
0259 return mask == DMA_BIT_MASK(64);
0260 }
0261
0262 static const struct dma_map_ops xen_grant_dma_ops = {
0263 .alloc = xen_grant_dma_alloc,
0264 .free = xen_grant_dma_free,
0265 .alloc_pages = xen_grant_dma_alloc_pages,
0266 .free_pages = xen_grant_dma_free_pages,
0267 .mmap = dma_common_mmap,
0268 .get_sgtable = dma_common_get_sgtable,
0269 .map_page = xen_grant_dma_map_page,
0270 .unmap_page = xen_grant_dma_unmap_page,
0271 .map_sg = xen_grant_dma_map_sg,
0272 .unmap_sg = xen_grant_dma_unmap_sg,
0273 .dma_supported = xen_grant_dma_supported,
0274 };
0275
0276 bool xen_is_grant_dma_device(struct device *dev)
0277 {
0278 struct device_node *iommu_np;
0279 bool has_iommu;
0280
0281
0282 if (!dev->of_node)
0283 return false;
0284
0285 iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
0286 has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
0287 of_node_put(iommu_np);
0288
0289 return has_iommu;
0290 }
0291
0292 bool xen_virtio_mem_acc(struct virtio_device *dev)
0293 {
0294 if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))
0295 return true;
0296
0297 return xen_is_grant_dma_device(dev->dev.parent);
0298 }
0299
0300 void xen_grant_setup_dma_ops(struct device *dev)
0301 {
0302 struct xen_grant_dma_data *data;
0303 struct of_phandle_args iommu_spec;
0304
0305 data = find_xen_grant_dma_data(dev);
0306 if (data) {
0307 dev_err(dev, "Xen grant DMA data is already created\n");
0308 return;
0309 }
0310
0311
0312 if (!dev->of_node)
0313 goto err;
0314
0315 if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
0316 0, &iommu_spec)) {
0317 dev_err(dev, "Cannot parse iommus property\n");
0318 goto err;
0319 }
0320
0321 if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
0322 iommu_spec.args_count != 1) {
0323 dev_err(dev, "Incompatible IOMMU node\n");
0324 of_node_put(iommu_spec.np);
0325 goto err;
0326 }
0327
0328 of_node_put(iommu_spec.np);
0329
0330 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
0331 if (!data)
0332 goto err;
0333
0334
0335
0336
0337
0338 data->backend_domid = iommu_spec.args[0];
0339
0340 if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
0341 GFP_KERNEL))) {
0342 dev_err(dev, "Cannot store Xen grant DMA data\n");
0343 goto err;
0344 }
0345
0346 dev->dma_ops = &xen_grant_dma_ops;
0347
0348 return;
0349
0350 err:
0351 dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
0352 }
0353
0354 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
0355 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
0356 MODULE_LICENSE("GPL");