0001
0002
0003
0004
0005
0006
0007 #ifndef IB_UMEM_H
0008 #define IB_UMEM_H
0009
0010 #include <linux/list.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/workqueue.h>
0013 #include <rdma/ib_verbs.h>
0014
0015 struct ib_ucontext;
0016 struct ib_umem_odp;
0017 struct dma_buf_attach_ops;
0018
0019 struct ib_umem {
0020 struct ib_device *ibdev;
0021 struct mm_struct *owning_mm;
0022 u64 iova;
0023 size_t length;
0024 unsigned long address;
0025 u32 writable : 1;
0026 u32 is_odp : 1;
0027 u32 is_dmabuf : 1;
0028 struct work_struct work;
0029 struct sg_append_table sgt_append;
0030 };
0031
0032 struct ib_umem_dmabuf {
0033 struct ib_umem umem;
0034 struct dma_buf_attachment *attach;
0035 struct sg_table *sgt;
0036 struct scatterlist *first_sg;
0037 struct scatterlist *last_sg;
0038 unsigned long first_sg_offset;
0039 unsigned long last_sg_trim;
0040 void *private;
0041 u8 pinned : 1;
0042 };
0043
0044 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
0045 {
0046 return container_of(umem, struct ib_umem_dmabuf, umem);
0047 }
0048
0049
0050 static inline int ib_umem_offset(struct ib_umem *umem)
0051 {
0052 return umem->address & ~PAGE_MASK;
0053 }
0054
0055 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
0056 unsigned long pgsz)
0057 {
0058 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
0059 (pgsz - 1);
0060 }
0061
0062 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
0063 unsigned long pgsz)
0064 {
0065 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
0066 ALIGN_DOWN(umem->iova, pgsz))) /
0067 pgsz;
0068 }
0069
0070 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
0071 {
0072 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
0073 }
0074
0075 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
0076 struct ib_umem *umem,
0077 unsigned long pgsz)
0078 {
0079 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
0080 umem->sgt_append.sgt.nents, pgsz);
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
0095 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
0096 __rdma_block_iter_next(biter);)
0097
0098 #ifdef CONFIG_INFINIBAND_USER_MEM
0099
0100 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
0101 size_t size, int access);
0102 void ib_umem_release(struct ib_umem *umem);
0103 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
0104 size_t length);
0105 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
0106 unsigned long pgsz_bitmap,
0107 unsigned long virt);
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
0128 unsigned long pgsz_bitmap,
0129 u64 pgoff_bitmask)
0130 {
0131 struct scatterlist *sg = umem->sgt_append.sgt.sgl;
0132 dma_addr_t dma_addr;
0133
0134 dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
0135 return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
0136 dma_addr & pgoff_bitmask);
0137 }
0138
0139 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
0140 unsigned long offset, size_t size,
0141 int fd, int access,
0142 const struct dma_buf_attach_ops *ops);
0143 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
0144 unsigned long offset,
0145 size_t size, int fd,
0146 int access);
0147 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
0148 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
0149 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
0150
0151 #else
0152
0153 #include <linux/err.h>
0154
0155 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
0156 unsigned long addr, size_t size,
0157 int access)
0158 {
0159 return ERR_PTR(-EOPNOTSUPP);
0160 }
0161 static inline void ib_umem_release(struct ib_umem *umem) { }
0162 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
0163 size_t length) {
0164 return -EOPNOTSUPP;
0165 }
0166 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
0167 unsigned long pgsz_bitmap,
0168 unsigned long virt)
0169 {
0170 return 0;
0171 }
0172 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
0173 unsigned long pgsz_bitmap,
0174 u64 pgoff_bitmask)
0175 {
0176 return 0;
0177 }
0178 static inline
0179 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
0180 unsigned long offset,
0181 size_t size, int fd,
0182 int access,
0183 struct dma_buf_attach_ops *ops)
0184 {
0185 return ERR_PTR(-EOPNOTSUPP);
0186 }
0187 static inline struct ib_umem_dmabuf *
0188 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
0189 size_t size, int fd, int access)
0190 {
0191 return ERR_PTR(-EOPNOTSUPP);
0192 }
0193 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
0194 {
0195 return -EOPNOTSUPP;
0196 }
0197 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
0198 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
0199
0200 #endif
0201 #endif