Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
0002 /*
0003  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
0004  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
0005  */
0006 
0007 #ifndef IB_UMEM_H
0008 #define IB_UMEM_H
0009 
0010 #include <linux/list.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/workqueue.h>
0013 #include <rdma/ib_verbs.h>
0014 
0015 struct ib_ucontext;
0016 struct ib_umem_odp;
0017 struct dma_buf_attach_ops;
0018 
0019 struct ib_umem {
0020     struct ib_device       *ibdev;
0021     struct mm_struct       *owning_mm;
0022     u64 iova;
0023     size_t          length;
0024     unsigned long       address;
0025     u32 writable : 1;
0026     u32 is_odp : 1;
0027     u32 is_dmabuf : 1;
0028     struct work_struct  work;
0029     struct sg_append_table sgt_append;
0030 };
0031 
0032 struct ib_umem_dmabuf {
0033     struct ib_umem umem;
0034     struct dma_buf_attachment *attach;
0035     struct sg_table *sgt;
0036     struct scatterlist *first_sg;
0037     struct scatterlist *last_sg;
0038     unsigned long first_sg_offset;
0039     unsigned long last_sg_trim;
0040     void *private;
0041     u8 pinned : 1;
0042 };
0043 
0044 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
0045 {
0046     return container_of(umem, struct ib_umem_dmabuf, umem);
0047 }
0048 
0049 /* Returns the offset of the umem start relative to the first page. */
0050 static inline int ib_umem_offset(struct ib_umem *umem)
0051 {
0052     return umem->address & ~PAGE_MASK;
0053 }
0054 
0055 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
0056                            unsigned long pgsz)
0057 {
0058     return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
0059            (pgsz - 1);
0060 }
0061 
0062 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
0063                         unsigned long pgsz)
0064 {
0065     return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
0066              ALIGN_DOWN(umem->iova, pgsz))) /
0067            pgsz;
0068 }
0069 
0070 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
0071 {
0072     return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
0073 }
0074 
0075 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
0076                         struct ib_umem *umem,
0077                         unsigned long pgsz)
0078 {
0079     __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
0080                 umem->sgt_append.sgt.nents, pgsz);
0081 }
0082 
0083 /**
0084  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
0085  * @umem: umem to iterate over
0086  * @pgsz: Page size to split the list into
0087  *
0088  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
0089  * returned DMA blocks will be aligned to pgsz and span the range:
0090  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
0091  *
0092  * Performs exactly ib_umem_num_dma_blocks() iterations.
0093  */
0094 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
0095     for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
0096          __rdma_block_iter_next(biter);)
0097 
0098 #ifdef CONFIG_INFINIBAND_USER_MEM
0099 
0100 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
0101                 size_t size, int access);
0102 void ib_umem_release(struct ib_umem *umem);
0103 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
0104               size_t length);
0105 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
0106                      unsigned long pgsz_bitmap,
0107                      unsigned long virt);
0108 
0109 /**
0110  * ib_umem_find_best_pgoff - Find best HW page size
0111  *
0112  * @umem: umem struct
0113  * @pgsz_bitmap bitmap of HW supported page sizes
0114  * @pgoff_bitmask: Mask of bits that can be represented with an offset
0115  *
0116  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
0117  * an IOVA it accepts a bitmask specifying what address bits can be represented
0118  * with a page offset.
0119  *
0120  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
0121  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
0122  * "111111000000".
0123  *
0124  * If the pgoff_bitmask requires either alignment in the low bit or an
0125  * unavailable page size for the high bits, this function returns 0.
0126  */
0127 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
0128                             unsigned long pgsz_bitmap,
0129                             u64 pgoff_bitmask)
0130 {
0131     struct scatterlist *sg = umem->sgt_append.sgt.sgl;
0132     dma_addr_t dma_addr;
0133 
0134     dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
0135     return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
0136                       dma_addr & pgoff_bitmask);
0137 }
0138 
0139 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
0140                       unsigned long offset, size_t size,
0141                       int fd, int access,
0142                       const struct dma_buf_attach_ops *ops);
0143 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
0144                          unsigned long offset,
0145                          size_t size, int fd,
0146                          int access);
0147 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
0148 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
0149 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
0150 
0151 #else /* CONFIG_INFINIBAND_USER_MEM */
0152 
0153 #include <linux/err.h>
0154 
0155 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
0156                       unsigned long addr, size_t size,
0157                       int access)
0158 {
0159     return ERR_PTR(-EOPNOTSUPP);
0160 }
0161 static inline void ib_umem_release(struct ib_umem *umem) { }
0162 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
0163                         size_t length) {
0164     return -EOPNOTSUPP;
0165 }
0166 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
0167                            unsigned long pgsz_bitmap,
0168                            unsigned long virt)
0169 {
0170     return 0;
0171 }
0172 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
0173                             unsigned long pgsz_bitmap,
0174                             u64 pgoff_bitmask)
0175 {
0176     return 0;
0177 }
0178 static inline
0179 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
0180                       unsigned long offset,
0181                       size_t size, int fd,
0182                       int access,
0183                       struct dma_buf_attach_ops *ops)
0184 {
0185     return ERR_PTR(-EOPNOTSUPP);
0186 }
0187 static inline struct ib_umem_dmabuf *
0188 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
0189               size_t size, int fd, int access)
0190 {
0191     return ERR_PTR(-EOPNOTSUPP);
0192 }
0193 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
0194 {
0195     return -EOPNOTSUPP;
0196 }
0197 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
0198 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
0199 
0200 #endif /* CONFIG_INFINIBAND_USER_MEM */
0201 #endif /* IB_UMEM_H */