0001
0002
0003
0004
0005
0006 #ifndef DEF_RDMAVT_INCMR_H
0007 #define DEF_RDMAVT_INCMR_H
0008
0009
0010
0011
0012
0013 #include <linux/percpu-refcount.h>
0014
0015
0016
0017
0018
0019 struct rvt_seg {
0020 void *vaddr;
0021 size_t length;
0022 };
0023
0024
0025 #define RVT_SEGSZ (PAGE_SIZE / sizeof(struct rvt_seg))
0026
0027 struct rvt_segarray {
0028 struct rvt_seg segs[RVT_SEGSZ];
0029 };
0030
0031 struct rvt_mregion {
0032 struct ib_pd *pd;
0033 u64 user_base;
0034 u64 iova;
0035 size_t length;
0036 u32 lkey;
0037 u32 offset;
0038 int access_flags;
0039 u32 max_segs;
0040 u32 mapsz;
0041 atomic_t lkey_invalid;
0042 u8 page_shift;
0043 u8 lkey_published;
0044 struct percpu_ref refcount;
0045 struct completion comp;
0046 struct rvt_segarray *map[];
0047 };
0048
0049 #define RVT_MAX_LKEY_TABLE_BITS 23
0050
0051 struct rvt_lkey_table {
0052
0053 u32 max;
0054 u32 shift;
0055 struct rvt_mregion __rcu **table;
0056
0057
0058 spinlock_t lock ____cacheline_aligned_in_smp;
0059 u32 next;
0060 u32 gen;
0061 };
0062
0063
0064
0065
0066
0067 struct rvt_sge {
0068 struct rvt_mregion *mr;
0069 void *vaddr;
0070 u32 sge_length;
0071 u32 length;
0072 u16 m;
0073 u16 n;
0074 };
0075
0076 struct rvt_sge_state {
0077 struct rvt_sge *sg_list;
0078 struct rvt_sge sge;
0079 u32 total_len;
0080 u8 num_sge;
0081 };
0082
0083 static inline void rvt_put_mr(struct rvt_mregion *mr)
0084 {
0085 percpu_ref_put(&mr->refcount);
0086 }
0087
0088 static inline void rvt_get_mr(struct rvt_mregion *mr)
0089 {
0090 percpu_ref_get(&mr->refcount);
0091 }
0092
0093 static inline void rvt_put_ss(struct rvt_sge_state *ss)
0094 {
0095 while (ss->num_sge) {
0096 rvt_put_mr(ss->sge.mr);
0097 if (--ss->num_sge)
0098 ss->sge = *ss->sg_list++;
0099 }
0100 }
0101
0102 static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length)
0103 {
0104 u32 len = sge->length;
0105
0106 if (len > length)
0107 len = length;
0108 if (len > sge->sge_length)
0109 len = sge->sge_length;
0110
0111 return len;
0112 }
0113
0114 static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length,
0115 bool release)
0116 {
0117 struct rvt_sge *sge = &ss->sge;
0118
0119 sge->vaddr += length;
0120 sge->length -= length;
0121 sge->sge_length -= length;
0122 if (sge->sge_length == 0) {
0123 if (release)
0124 rvt_put_mr(sge->mr);
0125 if (--ss->num_sge)
0126 *sge = *ss->sg_list++;
0127 } else if (sge->length == 0 && sge->mr->lkey) {
0128 if (++sge->n >= RVT_SEGSZ) {
0129 if (++sge->m >= sge->mr->mapsz)
0130 return;
0131 sge->n = 0;
0132 }
0133 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
0134 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
0135 }
0136 }
0137
0138 static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length,
0139 bool release)
0140 {
0141 struct rvt_sge *sge = &ss->sge;
0142
0143 while (length) {
0144 u32 len = rvt_get_sge_length(sge, length);
0145
0146 WARN_ON_ONCE(len == 0);
0147 rvt_update_sge(ss, len, release);
0148 length -= len;
0149 }
0150 }
0151
0152 bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey);
0153 bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey);
0154
0155 #endif