0001
0002
0003
0004
0005
0006 #include <linux/dma-buf.h>
0007 #include <linux/dma-resv.h>
0008 #include <linux/dma-mapping.h>
0009 #include <linux/module.h>
0010
0011 #include "uverbs.h"
0012
0013 MODULE_IMPORT_NS(DMA_BUF);
0014
0015 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
0016 {
0017 struct sg_table *sgt;
0018 struct scatterlist *sg;
0019 unsigned long start, end, cur = 0;
0020 unsigned int nmap = 0;
0021 long ret;
0022 int i;
0023
0024 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
0025
0026 if (umem_dmabuf->sgt)
0027 goto wait_fence;
0028
0029 sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
0030 if (IS_ERR(sgt))
0031 return PTR_ERR(sgt);
0032
0033
0034
0035 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
0036 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
0037 PAGE_SIZE);
0038 for_each_sgtable_dma_sg(sgt, sg, i) {
0039 if (start < cur + sg_dma_len(sg) && cur < end)
0040 nmap++;
0041 if (cur <= start && start < cur + sg_dma_len(sg)) {
0042 unsigned long offset = start - cur;
0043
0044 umem_dmabuf->first_sg = sg;
0045 umem_dmabuf->first_sg_offset = offset;
0046 sg_dma_address(sg) += offset;
0047 sg_dma_len(sg) -= offset;
0048 cur += offset;
0049 }
0050 if (cur < end && end <= cur + sg_dma_len(sg)) {
0051 unsigned long trim = cur + sg_dma_len(sg) - end;
0052
0053 umem_dmabuf->last_sg = sg;
0054 umem_dmabuf->last_sg_trim = trim;
0055 sg_dma_len(sg) -= trim;
0056 break;
0057 }
0058 cur += sg_dma_len(sg);
0059 }
0060
0061 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
0062 umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
0063 umem_dmabuf->sgt = sgt;
0064
0065 wait_fence:
0066
0067
0068
0069
0070
0071 ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
0072 DMA_RESV_USAGE_KERNEL,
0073 false, MAX_SCHEDULE_TIMEOUT);
0074 if (ret < 0)
0075 return ret;
0076 if (ret == 0)
0077 return -ETIMEDOUT;
0078 return 0;
0079 }
0080 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
0081
0082 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
0083 {
0084 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
0085
0086 if (!umem_dmabuf->sgt)
0087 return;
0088
0089
0090 if (umem_dmabuf->first_sg) {
0091 sg_dma_address(umem_dmabuf->first_sg) -=
0092 umem_dmabuf->first_sg_offset;
0093 sg_dma_len(umem_dmabuf->first_sg) +=
0094 umem_dmabuf->first_sg_offset;
0095 umem_dmabuf->first_sg = NULL;
0096 umem_dmabuf->first_sg_offset = 0;
0097 }
0098 if (umem_dmabuf->last_sg) {
0099 sg_dma_len(umem_dmabuf->last_sg) +=
0100 umem_dmabuf->last_sg_trim;
0101 umem_dmabuf->last_sg = NULL;
0102 umem_dmabuf->last_sg_trim = 0;
0103 }
0104
0105 dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
0106 DMA_BIDIRECTIONAL);
0107
0108 umem_dmabuf->sgt = NULL;
0109 }
0110 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
0111
0112 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
0113 unsigned long offset, size_t size,
0114 int fd, int access,
0115 const struct dma_buf_attach_ops *ops)
0116 {
0117 struct dma_buf *dmabuf;
0118 struct ib_umem_dmabuf *umem_dmabuf;
0119 struct ib_umem *umem;
0120 unsigned long end;
0121 struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
0122
0123 if (check_add_overflow(offset, (unsigned long)size, &end))
0124 return ret;
0125
0126 if (unlikely(!ops || !ops->move_notify))
0127 return ret;
0128
0129 dmabuf = dma_buf_get(fd);
0130 if (IS_ERR(dmabuf))
0131 return ERR_CAST(dmabuf);
0132
0133 if (dmabuf->size < end)
0134 goto out_release_dmabuf;
0135
0136 umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
0137 if (!umem_dmabuf) {
0138 ret = ERR_PTR(-ENOMEM);
0139 goto out_release_dmabuf;
0140 }
0141
0142 umem = &umem_dmabuf->umem;
0143 umem->ibdev = device;
0144 umem->length = size;
0145 umem->address = offset;
0146 umem->writable = ib_access_writable(access);
0147 umem->is_dmabuf = 1;
0148
0149 if (!ib_umem_num_pages(umem))
0150 goto out_free_umem;
0151
0152 umem_dmabuf->attach = dma_buf_dynamic_attach(
0153 dmabuf,
0154 device->dma_device,
0155 ops,
0156 umem_dmabuf);
0157 if (IS_ERR(umem_dmabuf->attach)) {
0158 ret = ERR_CAST(umem_dmabuf->attach);
0159 goto out_free_umem;
0160 }
0161 return umem_dmabuf;
0162
0163 out_free_umem:
0164 kfree(umem_dmabuf);
0165
0166 out_release_dmabuf:
0167 dma_buf_put(dmabuf);
0168 return ret;
0169 }
0170 EXPORT_SYMBOL(ib_umem_dmabuf_get);
0171
0172 static void
0173 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
0174 {
0175 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
0176
0177 ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
0178 "Invalidate callback should not be called when memory is pinned\n");
0179 }
0180
0181 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
0182 .allow_peer2peer = true,
0183 .move_notify = ib_umem_dmabuf_unsupported_move_notify,
0184 };
0185
0186 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
0187 unsigned long offset,
0188 size_t size, int fd,
0189 int access)
0190 {
0191 struct ib_umem_dmabuf *umem_dmabuf;
0192 int err;
0193
0194 umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
0195 &ib_umem_dmabuf_attach_pinned_ops);
0196 if (IS_ERR(umem_dmabuf))
0197 return umem_dmabuf;
0198
0199 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
0200 err = dma_buf_pin(umem_dmabuf->attach);
0201 if (err)
0202 goto err_release;
0203 umem_dmabuf->pinned = 1;
0204
0205 err = ib_umem_dmabuf_map_pages(umem_dmabuf);
0206 if (err)
0207 goto err_unpin;
0208 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
0209
0210 return umem_dmabuf;
0211
0212 err_unpin:
0213 dma_buf_unpin(umem_dmabuf->attach);
0214 err_release:
0215 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
0216 ib_umem_release(&umem_dmabuf->umem);
0217 return ERR_PTR(err);
0218 }
0219 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
0220
0221 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
0222 {
0223 struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
0224
0225 dma_resv_lock(dmabuf->resv, NULL);
0226 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
0227 if (umem_dmabuf->pinned)
0228 dma_buf_unpin(umem_dmabuf->attach);
0229 dma_resv_unlock(dmabuf->resv);
0230
0231 dma_buf_detach(dmabuf, umem_dmabuf->attach);
0232 dma_buf_put(dmabuf);
0233 kfree(umem_dmabuf);
0234 }