0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include "rxe.h"
0015
0016 int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
0017 {
0018 struct rxe_mw *mw = to_rmw(ibmw);
0019 struct rxe_pd *pd = to_rpd(ibmw->pd);
0020 struct rxe_dev *rxe = to_rdev(ibmw->device);
0021 int ret;
0022
0023 rxe_get(pd);
0024
0025 ret = rxe_add_to_pool(&rxe->mw_pool, mw);
0026 if (ret) {
0027 rxe_put(pd);
0028 return ret;
0029 }
0030
0031 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
0032 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
0033 RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
0034 spin_lock_init(&mw->lock);
0035
0036 rxe_finalize(mw);
0037
0038 return 0;
0039 }
0040
0041 int rxe_dealloc_mw(struct ib_mw *ibmw)
0042 {
0043 struct rxe_mw *mw = to_rmw(ibmw);
0044
0045 rxe_cleanup(mw);
0046
0047 return 0;
0048 }
0049
0050 static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
0051 struct rxe_mw *mw, struct rxe_mr *mr)
0052 {
0053 if (mw->ibmw.type == IB_MW_TYPE_1) {
0054 if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
0055 pr_err_once(
0056 "attempt to bind a type 1 MW not in the valid state\n");
0057 return -EINVAL;
0058 }
0059
0060
0061 if (unlikely((mw->access & IB_ZERO_BASED))) {
0062 pr_err_once("attempt to bind a zero based type 1 MW\n");
0063 return -EINVAL;
0064 }
0065 }
0066
0067 if (mw->ibmw.type == IB_MW_TYPE_2) {
0068
0069 if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
0070 pr_err_once(
0071 "attempt to bind a type 2 MW not in the free state\n");
0072 return -EINVAL;
0073 }
0074
0075
0076 if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
0077 pr_err_once(
0078 "attempt to bind type 2 MW with qp with different PD\n");
0079 return -EINVAL;
0080 }
0081
0082
0083 if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
0084 pr_err_once(
0085 "attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
0086 return -EINVAL;
0087 }
0088 }
0089
0090
0091 if (!mr)
0092 return 0;
0093
0094 if (unlikely(mr->access & IB_ZERO_BASED)) {
0095 pr_err_once("attempt to bind MW to zero based MR\n");
0096 return -EINVAL;
0097 }
0098
0099
0100 if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
0101 pr_err_once(
0102 "attempt to bind an MW to an MR without bind access\n");
0103 return -EINVAL;
0104 }
0105
0106
0107 if (unlikely((mw->access &
0108 (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
0109 !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
0110 pr_err_once(
0111 "attempt to bind an Writable MW to an MR without local write access\n");
0112 return -EINVAL;
0113 }
0114
0115
0116 if (mw->access & IB_ZERO_BASED) {
0117 if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
0118 pr_err_once(
0119 "attempt to bind a ZB MW outside of the MR\n");
0120 return -EINVAL;
0121 }
0122 } else {
0123 if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
0124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
0125 (mr->iova + mr->length)))) {
0126 pr_err_once(
0127 "attempt to bind a VA MW outside of the MR\n");
0128 return -EINVAL;
0129 }
0130 }
0131
0132 return 0;
0133 }
0134
0135 static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
0136 struct rxe_mw *mw, struct rxe_mr *mr)
0137 {
0138 u32 key = wqe->wr.wr.mw.rkey & 0xff;
0139
0140 mw->rkey = (mw->rkey & ~0xff) | key;
0141 mw->access = wqe->wr.wr.mw.access;
0142 mw->state = RXE_MW_STATE_VALID;
0143 mw->addr = wqe->wr.wr.mw.addr;
0144 mw->length = wqe->wr.wr.mw.length;
0145
0146 if (mw->mr) {
0147 rxe_put(mw->mr);
0148 atomic_dec(&mw->mr->num_mw);
0149 mw->mr = NULL;
0150 }
0151
0152 if (mw->length) {
0153 mw->mr = mr;
0154 atomic_inc(&mr->num_mw);
0155 rxe_get(mr);
0156 }
0157
0158 if (mw->ibmw.type == IB_MW_TYPE_2) {
0159 rxe_get(qp);
0160 mw->qp = qp;
0161 }
0162 }
0163
0164 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
0165 {
0166 int ret;
0167 struct rxe_mw *mw;
0168 struct rxe_mr *mr;
0169 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0170 u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
0171 u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
0172
0173 mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
0174 if (unlikely(!mw)) {
0175 ret = -EINVAL;
0176 goto err;
0177 }
0178
0179 if (unlikely(mw->rkey != mw_rkey)) {
0180 ret = -EINVAL;
0181 goto err_drop_mw;
0182 }
0183
0184 if (likely(wqe->wr.wr.mw.length)) {
0185 mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8);
0186 if (unlikely(!mr)) {
0187 ret = -EINVAL;
0188 goto err_drop_mw;
0189 }
0190
0191 if (unlikely(mr->lkey != mr_lkey)) {
0192 ret = -EINVAL;
0193 goto err_drop_mr;
0194 }
0195 } else {
0196 mr = NULL;
0197 }
0198
0199 spin_lock_bh(&mw->lock);
0200
0201 ret = rxe_check_bind_mw(qp, wqe, mw, mr);
0202 if (ret)
0203 goto err_unlock;
0204
0205 rxe_do_bind_mw(qp, wqe, mw, mr);
0206 err_unlock:
0207 spin_unlock_bh(&mw->lock);
0208 err_drop_mr:
0209 if (mr)
0210 rxe_put(mr);
0211 err_drop_mw:
0212 rxe_put(mw);
0213 err:
0214 return ret;
0215 }
0216
0217 static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
0218 {
0219 if (unlikely(mw->state == RXE_MW_STATE_INVALID))
0220 return -EINVAL;
0221
0222
0223 if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
0224 return -EINVAL;
0225
0226 return 0;
0227 }
0228
0229 static void rxe_do_invalidate_mw(struct rxe_mw *mw)
0230 {
0231 struct rxe_qp *qp;
0232 struct rxe_mr *mr;
0233
0234
0235 qp = mw->qp;
0236 mw->qp = NULL;
0237 rxe_put(qp);
0238
0239
0240 mr = mw->mr;
0241 mw->mr = NULL;
0242 atomic_dec(&mr->num_mw);
0243 rxe_put(mr);
0244
0245 mw->access = 0;
0246 mw->addr = 0;
0247 mw->length = 0;
0248 mw->state = RXE_MW_STATE_FREE;
0249 }
0250
0251 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
0252 {
0253 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0254 struct rxe_mw *mw;
0255 int ret;
0256
0257 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
0258 if (!mw) {
0259 ret = -EINVAL;
0260 goto err;
0261 }
0262
0263 if (rkey != mw->rkey) {
0264 ret = -EINVAL;
0265 goto err_drop_ref;
0266 }
0267
0268 spin_lock_bh(&mw->lock);
0269
0270 ret = rxe_check_invalidate_mw(qp, mw);
0271 if (ret)
0272 goto err_unlock;
0273
0274 rxe_do_invalidate_mw(mw);
0275 err_unlock:
0276 spin_unlock_bh(&mw->lock);
0277 err_drop_ref:
0278 rxe_put(mw);
0279 err:
0280 return ret;
0281 }
0282
0283 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
0284 {
0285 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
0286 struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
0287 struct rxe_mw *mw;
0288 int index = rkey >> 8;
0289
0290 mw = rxe_pool_get_index(&rxe->mw_pool, index);
0291 if (!mw)
0292 return NULL;
0293
0294 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
0295 (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
0296 (mw->length == 0) ||
0297 (access && !(access & mw->access)) ||
0298 mw->state != RXE_MW_STATE_VALID)) {
0299 rxe_put(mw);
0300 return NULL;
0301 }
0302
0303 return mw;
0304 }
0305
0306 void rxe_mw_cleanup(struct rxe_pool_elem *elem)
0307 {
0308 struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
0309 struct rxe_pd *pd = to_rpd(mw->ibmw.pd);
0310
0311 rxe_put(pd);
0312
0313 if (mw->mr) {
0314 struct rxe_mr *mr = mw->mr;
0315
0316 mw->mr = NULL;
0317 atomic_dec(&mr->num_mw);
0318 rxe_put(mr);
0319 }
0320
0321 if (mw->qp) {
0322 struct rxe_qp *qp = mw->qp;
0323
0324 mw->qp = NULL;
0325 rxe_put(qp);
0326 }
0327
0328 mw->access = 0;
0329 mw->addr = 0;
0330 mw->length = 0;
0331 mw->state = RXE_MW_STATE_INVALID;
0332 }