0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/slab.h>
0036
0037 #include "mad_priv.h"
0038 #include "mad_rmpp.h"
0039
0040 enum rmpp_state {
0041 RMPP_STATE_ACTIVE,
0042 RMPP_STATE_TIMEOUT,
0043 RMPP_STATE_COMPLETE
0044 };
0045
0046 struct mad_rmpp_recv {
0047 struct ib_mad_agent_private *agent;
0048 struct list_head list;
0049 struct delayed_work timeout_work;
0050 struct delayed_work cleanup_work;
0051 struct completion comp;
0052 enum rmpp_state state;
0053 spinlock_t lock;
0054 refcount_t refcount;
0055
0056 struct ib_ah *ah;
0057 struct ib_mad_recv_wc *rmpp_wc;
0058 struct ib_mad_recv_buf *cur_seg_buf;
0059 int last_ack;
0060 int seg_num;
0061 int newwin;
0062 int repwin;
0063
0064 __be64 tid;
0065 u32 src_qp;
0066 u32 slid;
0067 u8 mgmt_class;
0068 u8 class_version;
0069 u8 method;
0070 u8 base_version;
0071 };
0072
0073 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
0074 {
0075 if (refcount_dec_and_test(&rmpp_recv->refcount))
0076 complete(&rmpp_recv->comp);
0077 }
0078
0079 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
0080 {
0081 deref_rmpp_recv(rmpp_recv);
0082 wait_for_completion(&rmpp_recv->comp);
0083 rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE);
0084 kfree(rmpp_recv);
0085 }
0086
0087 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
0088 {
0089 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
0090 unsigned long flags;
0091
0092 spin_lock_irqsave(&agent->lock, flags);
0093 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
0094 cancel_delayed_work(&rmpp_recv->timeout_work);
0095 cancel_delayed_work(&rmpp_recv->cleanup_work);
0096 }
0097 spin_unlock_irqrestore(&agent->lock, flags);
0098
0099 flush_workqueue(agent->qp_info->port_priv->wq);
0100
0101 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
0102 &agent->rmpp_list, list) {
0103 list_del(&rmpp_recv->list);
0104 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
0105 ib_free_recv_mad(rmpp_recv->rmpp_wc);
0106 destroy_rmpp_recv(rmpp_recv);
0107 }
0108 }
0109
0110 static void format_ack(struct ib_mad_send_buf *msg,
0111 struct ib_rmpp_mad *data,
0112 struct mad_rmpp_recv *rmpp_recv)
0113 {
0114 struct ib_rmpp_mad *ack = msg->mad;
0115 unsigned long flags;
0116
0117 memcpy(ack, &data->mad_hdr, msg->hdr_len);
0118
0119 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
0120 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
0121 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
0122
0123 spin_lock_irqsave(&rmpp_recv->lock, flags);
0124 rmpp_recv->last_ack = rmpp_recv->seg_num;
0125 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
0126 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
0127 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
0128 }
0129
0130 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
0131 struct ib_mad_recv_wc *recv_wc)
0132 {
0133 struct ib_mad_send_buf *msg;
0134 int ret, hdr_len;
0135
0136 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
0137 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
0138 recv_wc->wc->pkey_index, 1, hdr_len,
0139 0, GFP_KERNEL,
0140 IB_MGMT_BASE_VERSION);
0141 if (IS_ERR(msg))
0142 return;
0143
0144 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
0145 msg->ah = rmpp_recv->ah;
0146 ret = ib_post_send_mad(msg, NULL);
0147 if (ret)
0148 ib_free_send_mad(msg);
0149 }
0150
0151 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
0152 struct ib_mad_recv_wc *recv_wc)
0153 {
0154 struct ib_mad_send_buf *msg;
0155 struct ib_ah *ah;
0156 int hdr_len;
0157
0158 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
0159 recv_wc->recv_buf.grh, agent->port_num);
0160 if (IS_ERR(ah))
0161 return (void *) ah;
0162
0163 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
0164 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
0165 recv_wc->wc->pkey_index, 1,
0166 hdr_len, 0, GFP_KERNEL,
0167 IB_MGMT_BASE_VERSION);
0168 if (IS_ERR(msg))
0169 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
0170 else {
0171 msg->ah = ah;
0172 msg->context[0] = ah;
0173 }
0174
0175 return msg;
0176 }
0177
0178 static void ack_ds_ack(struct ib_mad_agent_private *agent,
0179 struct ib_mad_recv_wc *recv_wc)
0180 {
0181 struct ib_mad_send_buf *msg;
0182 struct ib_rmpp_mad *rmpp_mad;
0183 int ret;
0184
0185 msg = alloc_response_msg(&agent->agent, recv_wc);
0186 if (IS_ERR(msg))
0187 return;
0188
0189 rmpp_mad = msg->mad;
0190 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
0191
0192 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
0193 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
0194 rmpp_mad->rmpp_hdr.seg_num = 0;
0195 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
0196
0197 ret = ib_post_send_mad(msg, NULL);
0198 if (ret) {
0199 rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
0200 ib_free_send_mad(msg);
0201 }
0202 }
0203
0204 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
0205 {
0206 if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
0207 rdma_destroy_ah(mad_send_wc->send_buf->ah,
0208 RDMA_DESTROY_AH_SLEEPABLE);
0209 ib_free_send_mad(mad_send_wc->send_buf);
0210 }
0211
0212 static void nack_recv(struct ib_mad_agent_private *agent,
0213 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
0214 {
0215 struct ib_mad_send_buf *msg;
0216 struct ib_rmpp_mad *rmpp_mad;
0217 int ret;
0218
0219 msg = alloc_response_msg(&agent->agent, recv_wc);
0220 if (IS_ERR(msg))
0221 return;
0222
0223 rmpp_mad = msg->mad;
0224 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
0225
0226 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
0227 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
0228 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
0229 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
0230 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
0231 rmpp_mad->rmpp_hdr.seg_num = 0;
0232 rmpp_mad->rmpp_hdr.paylen_newwin = 0;
0233
0234 ret = ib_post_send_mad(msg, NULL);
0235 if (ret) {
0236 rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
0237 ib_free_send_mad(msg);
0238 }
0239 }
0240
0241 static void recv_timeout_handler(struct work_struct *work)
0242 {
0243 struct mad_rmpp_recv *rmpp_recv =
0244 container_of(work, struct mad_rmpp_recv, timeout_work.work);
0245 struct ib_mad_recv_wc *rmpp_wc;
0246 unsigned long flags;
0247
0248 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
0249 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
0250 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
0251 return;
0252 }
0253 rmpp_recv->state = RMPP_STATE_TIMEOUT;
0254 list_del(&rmpp_recv->list);
0255 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
0256
0257 rmpp_wc = rmpp_recv->rmpp_wc;
0258 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
0259 destroy_rmpp_recv(rmpp_recv);
0260 ib_free_recv_mad(rmpp_wc);
0261 }
0262
0263 static void recv_cleanup_handler(struct work_struct *work)
0264 {
0265 struct mad_rmpp_recv *rmpp_recv =
0266 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
0267 unsigned long flags;
0268
0269 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
0270 list_del(&rmpp_recv->list);
0271 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
0272 destroy_rmpp_recv(rmpp_recv);
0273 }
0274
0275 static struct mad_rmpp_recv *
0276 create_rmpp_recv(struct ib_mad_agent_private *agent,
0277 struct ib_mad_recv_wc *mad_recv_wc)
0278 {
0279 struct mad_rmpp_recv *rmpp_recv;
0280 struct ib_mad_hdr *mad_hdr;
0281
0282 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
0283 if (!rmpp_recv)
0284 return NULL;
0285
0286 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
0287 mad_recv_wc->wc,
0288 mad_recv_wc->recv_buf.grh,
0289 agent->agent.port_num);
0290 if (IS_ERR(rmpp_recv->ah))
0291 goto error;
0292
0293 rmpp_recv->agent = agent;
0294 init_completion(&rmpp_recv->comp);
0295 INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
0296 INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
0297 spin_lock_init(&rmpp_recv->lock);
0298 rmpp_recv->state = RMPP_STATE_ACTIVE;
0299 refcount_set(&rmpp_recv->refcount, 1);
0300
0301 rmpp_recv->rmpp_wc = mad_recv_wc;
0302 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
0303 rmpp_recv->newwin = 1;
0304 rmpp_recv->seg_num = 1;
0305 rmpp_recv->last_ack = 0;
0306 rmpp_recv->repwin = 1;
0307
0308 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
0309 rmpp_recv->tid = mad_hdr->tid;
0310 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
0311 rmpp_recv->slid = mad_recv_wc->wc->slid;
0312 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
0313 rmpp_recv->class_version = mad_hdr->class_version;
0314 rmpp_recv->method = mad_hdr->method;
0315 rmpp_recv->base_version = mad_hdr->base_version;
0316 return rmpp_recv;
0317
0318 error: kfree(rmpp_recv);
0319 return NULL;
0320 }
0321
0322 static struct mad_rmpp_recv *
0323 find_rmpp_recv(struct ib_mad_agent_private *agent,
0324 struct ib_mad_recv_wc *mad_recv_wc)
0325 {
0326 struct mad_rmpp_recv *rmpp_recv;
0327 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
0328
0329 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
0330 if (rmpp_recv->tid == mad_hdr->tid &&
0331 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
0332 rmpp_recv->slid == mad_recv_wc->wc->slid &&
0333 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
0334 rmpp_recv->class_version == mad_hdr->class_version &&
0335 rmpp_recv->method == mad_hdr->method)
0336 return rmpp_recv;
0337 }
0338 return NULL;
0339 }
0340
0341 static struct mad_rmpp_recv *
0342 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
0343 struct ib_mad_recv_wc *mad_recv_wc)
0344 {
0345 struct mad_rmpp_recv *rmpp_recv;
0346 unsigned long flags;
0347
0348 spin_lock_irqsave(&agent->lock, flags);
0349 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
0350 if (rmpp_recv)
0351 refcount_inc(&rmpp_recv->refcount);
0352 spin_unlock_irqrestore(&agent->lock, flags);
0353 return rmpp_recv;
0354 }
0355
0356 static struct mad_rmpp_recv *
0357 insert_rmpp_recv(struct ib_mad_agent_private *agent,
0358 struct mad_rmpp_recv *rmpp_recv)
0359 {
0360 struct mad_rmpp_recv *cur_rmpp_recv;
0361
0362 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
0363 if (!cur_rmpp_recv)
0364 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
0365
0366 return cur_rmpp_recv;
0367 }
0368
0369 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
0370 {
0371 struct ib_rmpp_mad *rmpp_mad;
0372
0373 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
0374 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
0375 }
0376
0377 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
0378 {
0379 struct ib_rmpp_mad *rmpp_mad;
0380
0381 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
0382 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
0383 }
0384
0385 static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list,
0386 struct ib_mad_recv_buf *seg)
0387 {
0388 if (seg->list.next == rmpp_list)
0389 return NULL;
0390
0391 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
0392 }
0393
0394 static inline int window_size(struct ib_mad_agent_private *agent)
0395 {
0396 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
0397 }
0398
0399 static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list,
0400 int seg_num)
0401 {
0402 struct ib_mad_recv_buf *seg_buf;
0403 int cur_seg_num;
0404
0405 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
0406 cur_seg_num = get_seg_num(seg_buf);
0407 if (seg_num > cur_seg_num)
0408 return seg_buf;
0409 if (seg_num == cur_seg_num)
0410 break;
0411 }
0412 return NULL;
0413 }
0414
0415 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
0416 struct ib_mad_recv_buf *new_buf)
0417 {
0418 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
0419
0420 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
0421 rmpp_recv->cur_seg_buf = new_buf;
0422 rmpp_recv->seg_num++;
0423 new_buf = get_next_seg(rmpp_list, new_buf);
0424 }
0425 }
0426
0427 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
0428 {
0429 struct ib_rmpp_mad *rmpp_mad;
0430 int hdr_size, data_size, pad;
0431 bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
0432 rmpp_recv->agent->qp_info->port_priv->port_num);
0433
0434 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
0435
0436 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
0437 if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
0438 data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
0439 pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
0440 if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
0441 pad = 0;
0442 } else {
0443 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
0444 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
0445 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
0446 pad = 0;
0447 }
0448
0449 return hdr_size + rmpp_recv->seg_num * data_size - pad;
0450 }
0451
0452 static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
0453 {
0454 struct ib_mad_recv_wc *rmpp_wc;
0455
0456 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
0457 if (rmpp_recv->seg_num > 1)
0458 cancel_delayed_work(&rmpp_recv->timeout_work);
0459
0460 rmpp_wc = rmpp_recv->rmpp_wc;
0461 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
0462
0463 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
0464 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
0465 return rmpp_wc;
0466 }
0467
0468 static struct ib_mad_recv_wc *
0469 continue_rmpp(struct ib_mad_agent_private *agent,
0470 struct ib_mad_recv_wc *mad_recv_wc)
0471 {
0472 struct mad_rmpp_recv *rmpp_recv;
0473 struct ib_mad_recv_buf *prev_buf;
0474 struct ib_mad_recv_wc *done_wc;
0475 int seg_num;
0476 unsigned long flags;
0477
0478 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
0479 if (!rmpp_recv)
0480 goto drop1;
0481
0482 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
0483
0484 spin_lock_irqsave(&rmpp_recv->lock, flags);
0485 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
0486 (seg_num > rmpp_recv->newwin))
0487 goto drop3;
0488
0489 if ((seg_num <= rmpp_recv->last_ack) ||
0490 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
0491 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
0492 ack_recv(rmpp_recv, mad_recv_wc);
0493 goto drop2;
0494 }
0495
0496 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
0497 if (!prev_buf)
0498 goto drop3;
0499
0500 done_wc = NULL;
0501 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
0502 if (rmpp_recv->cur_seg_buf == prev_buf) {
0503 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
0504 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
0505 rmpp_recv->state = RMPP_STATE_COMPLETE;
0506 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
0507 done_wc = complete_rmpp(rmpp_recv);
0508 goto out;
0509 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
0510 rmpp_recv->newwin += window_size(agent);
0511 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
0512 ack_recv(rmpp_recv, mad_recv_wc);
0513 goto out;
0514 }
0515 }
0516 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
0517 out:
0518 deref_rmpp_recv(rmpp_recv);
0519 return done_wc;
0520
0521 drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
0522 drop2: deref_rmpp_recv(rmpp_recv);
0523 drop1: ib_free_recv_mad(mad_recv_wc);
0524 return NULL;
0525 }
0526
0527 static struct ib_mad_recv_wc *
0528 start_rmpp(struct ib_mad_agent_private *agent,
0529 struct ib_mad_recv_wc *mad_recv_wc)
0530 {
0531 struct mad_rmpp_recv *rmpp_recv;
0532 unsigned long flags;
0533
0534 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
0535 if (!rmpp_recv) {
0536 ib_free_recv_mad(mad_recv_wc);
0537 return NULL;
0538 }
0539
0540 spin_lock_irqsave(&agent->lock, flags);
0541 if (insert_rmpp_recv(agent, rmpp_recv)) {
0542 spin_unlock_irqrestore(&agent->lock, flags);
0543
0544 destroy_rmpp_recv(rmpp_recv);
0545 return continue_rmpp(agent, mad_recv_wc);
0546 }
0547 refcount_inc(&rmpp_recv->refcount);
0548
0549 if (get_last_flag(&mad_recv_wc->recv_buf)) {
0550 rmpp_recv->state = RMPP_STATE_COMPLETE;
0551 spin_unlock_irqrestore(&agent->lock, flags);
0552 complete_rmpp(rmpp_recv);
0553 } else {
0554 spin_unlock_irqrestore(&agent->lock, flags);
0555
0556 queue_delayed_work(agent->qp_info->port_priv->wq,
0557 &rmpp_recv->timeout_work,
0558 msecs_to_jiffies(40000));
0559 rmpp_recv->newwin += window_size(agent);
0560 ack_recv(rmpp_recv, mad_recv_wc);
0561 mad_recv_wc = NULL;
0562 }
0563 deref_rmpp_recv(rmpp_recv);
0564 return mad_recv_wc;
0565 }
0566
0567 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
0568 {
0569 struct ib_rmpp_mad *rmpp_mad;
0570 int timeout;
0571 u32 paylen = 0;
0572
0573 rmpp_mad = mad_send_wr->send_buf.mad;
0574 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
0575 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
0576
0577 if (mad_send_wr->seg_num == 1) {
0578 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
0579 paylen = (mad_send_wr->send_buf.seg_count *
0580 mad_send_wr->send_buf.seg_rmpp_size) -
0581 mad_send_wr->pad;
0582 }
0583
0584 if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
0585 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
0586 paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
0587 }
0588 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
0589
0590
0591 timeout = mad_send_wr->send_buf.timeout_ms;
0592 if (!timeout || timeout > 2000)
0593 mad_send_wr->timeout = msecs_to_jiffies(2000);
0594
0595 return ib_send_mad(mad_send_wr);
0596 }
0597
0598 static void abort_send(struct ib_mad_agent_private *agent,
0599 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
0600 {
0601 struct ib_mad_send_wr_private *mad_send_wr;
0602 struct ib_mad_send_wc wc;
0603 unsigned long flags;
0604
0605 spin_lock_irqsave(&agent->lock, flags);
0606 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
0607 if (!mad_send_wr)
0608 goto out;
0609
0610 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
0611 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
0612 goto out;
0613
0614 ib_mark_mad_done(mad_send_wr);
0615 spin_unlock_irqrestore(&agent->lock, flags);
0616
0617 wc.status = IB_WC_REM_ABORT_ERR;
0618 wc.vendor_err = rmpp_status;
0619 wc.send_buf = &mad_send_wr->send_buf;
0620 ib_mad_complete_send_wr(mad_send_wr, &wc);
0621 return;
0622 out:
0623 spin_unlock_irqrestore(&agent->lock, flags);
0624 }
0625
0626 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
0627 int seg_num)
0628 {
0629 struct list_head *list;
0630
0631 wr->last_ack = seg_num;
0632 list = &wr->last_ack_seg->list;
0633 list_for_each_entry(wr->last_ack_seg, list, list)
0634 if (wr->last_ack_seg->num == seg_num)
0635 break;
0636 }
0637
0638 static void process_ds_ack(struct ib_mad_agent_private *agent,
0639 struct ib_mad_recv_wc *mad_recv_wc, int newwin)
0640 {
0641 struct mad_rmpp_recv *rmpp_recv;
0642
0643 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
0644 if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
0645 rmpp_recv->repwin = newwin;
0646 }
0647
0648 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
0649 struct ib_mad_recv_wc *mad_recv_wc)
0650 {
0651 struct ib_mad_send_wr_private *mad_send_wr;
0652 struct ib_rmpp_mad *rmpp_mad;
0653 unsigned long flags;
0654 int seg_num, newwin, ret;
0655
0656 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
0657 if (rmpp_mad->rmpp_hdr.rmpp_status) {
0658 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
0659 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
0660 return;
0661 }
0662
0663 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
0664 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
0665 if (newwin < seg_num) {
0666 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
0667 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
0668 return;
0669 }
0670
0671 spin_lock_irqsave(&agent->lock, flags);
0672 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
0673 if (!mad_send_wr) {
0674 if (!seg_num)
0675 process_ds_ack(agent, mad_recv_wc, newwin);
0676 goto out;
0677 }
0678
0679 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
0680 (mad_send_wr->timeout)) {
0681 spin_unlock_irqrestore(&agent->lock, flags);
0682 ack_ds_ack(agent, mad_recv_wc);
0683 return;
0684 }
0685
0686 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
0687 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
0688 goto out;
0689
0690 if (seg_num > mad_send_wr->send_buf.seg_count ||
0691 seg_num > mad_send_wr->newwin) {
0692 spin_unlock_irqrestore(&agent->lock, flags);
0693 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
0694 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
0695 return;
0696 }
0697
0698 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
0699 goto out;
0700
0701 if (seg_num > mad_send_wr->last_ack) {
0702 adjust_last_ack(mad_send_wr, seg_num);
0703 mad_send_wr->retries_left = mad_send_wr->max_retries;
0704 }
0705 mad_send_wr->newwin = newwin;
0706 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
0707
0708 if (!mad_send_wr->send_buf.timeout_ms) {
0709 struct ib_mad_send_wc wc;
0710
0711 ib_mark_mad_done(mad_send_wr);
0712 spin_unlock_irqrestore(&agent->lock, flags);
0713
0714 wc.status = IB_WC_SUCCESS;
0715 wc.vendor_err = 0;
0716 wc.send_buf = &mad_send_wr->send_buf;
0717 ib_mad_complete_send_wr(mad_send_wr, &wc);
0718 return;
0719 }
0720 if (mad_send_wr->refcount == 1)
0721 ib_reset_mad_timeout(mad_send_wr,
0722 mad_send_wr->send_buf.timeout_ms);
0723 spin_unlock_irqrestore(&agent->lock, flags);
0724 ack_ds_ack(agent, mad_recv_wc);
0725 return;
0726 } else if (mad_send_wr->refcount == 1 &&
0727 mad_send_wr->seg_num < mad_send_wr->newwin &&
0728 mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
0729
0730 ret = send_next_seg(mad_send_wr);
0731 if (ret)
0732 goto out;
0733
0734 mad_send_wr->refcount++;
0735 list_move_tail(&mad_send_wr->agent_list,
0736 &mad_send_wr->mad_agent_priv->send_list);
0737 }
0738 out:
0739 spin_unlock_irqrestore(&agent->lock, flags);
0740 }
0741
0742 static struct ib_mad_recv_wc *
0743 process_rmpp_data(struct ib_mad_agent_private *agent,
0744 struct ib_mad_recv_wc *mad_recv_wc)
0745 {
0746 struct ib_rmpp_hdr *rmpp_hdr;
0747 u8 rmpp_status;
0748
0749 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
0750
0751 if (rmpp_hdr->rmpp_status) {
0752 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
0753 goto bad;
0754 }
0755
0756 if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
0757 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
0758 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
0759 goto bad;
0760 }
0761 return start_rmpp(agent, mad_recv_wc);
0762 } else {
0763 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
0764 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
0765 goto bad;
0766 }
0767 return continue_rmpp(agent, mad_recv_wc);
0768 }
0769 bad:
0770 nack_recv(agent, mad_recv_wc, rmpp_status);
0771 ib_free_recv_mad(mad_recv_wc);
0772 return NULL;
0773 }
0774
0775 static void process_rmpp_stop(struct ib_mad_agent_private *agent,
0776 struct ib_mad_recv_wc *mad_recv_wc)
0777 {
0778 struct ib_rmpp_mad *rmpp_mad;
0779
0780 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
0781
0782 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
0783 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
0784 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
0785 } else
0786 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
0787 }
0788
0789 static void process_rmpp_abort(struct ib_mad_agent_private *agent,
0790 struct ib_mad_recv_wc *mad_recv_wc)
0791 {
0792 struct ib_rmpp_mad *rmpp_mad;
0793
0794 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
0795
0796 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
0797 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
0798 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
0799 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
0800 } else
0801 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
0802 }
0803
0804 struct ib_mad_recv_wc *
0805 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
0806 struct ib_mad_recv_wc *mad_recv_wc)
0807 {
0808 struct ib_rmpp_mad *rmpp_mad;
0809
0810 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
0811 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
0812 return mad_recv_wc;
0813
0814 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
0815 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
0816 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
0817 goto out;
0818 }
0819
0820 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
0821 case IB_MGMT_RMPP_TYPE_DATA:
0822 return process_rmpp_data(agent, mad_recv_wc);
0823 case IB_MGMT_RMPP_TYPE_ACK:
0824 process_rmpp_ack(agent, mad_recv_wc);
0825 break;
0826 case IB_MGMT_RMPP_TYPE_STOP:
0827 process_rmpp_stop(agent, mad_recv_wc);
0828 break;
0829 case IB_MGMT_RMPP_TYPE_ABORT:
0830 process_rmpp_abort(agent, mad_recv_wc);
0831 break;
0832 default:
0833 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
0834 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
0835 break;
0836 }
0837 out:
0838 ib_free_recv_mad(mad_recv_wc);
0839 return NULL;
0840 }
0841
0842 static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
0843 {
0844 struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
0845 struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
0846 struct mad_rmpp_recv *rmpp_recv;
0847 struct rdma_ah_attr ah_attr;
0848 unsigned long flags;
0849 int newwin = 1;
0850
0851 if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
0852 goto out;
0853
0854 spin_lock_irqsave(&agent->lock, flags);
0855 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
0856 if (rmpp_recv->tid != mad_hdr->tid ||
0857 rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
0858 rmpp_recv->class_version != mad_hdr->class_version ||
0859 (rmpp_recv->method & IB_MGMT_METHOD_RESP))
0860 continue;
0861
0862 if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
0863 continue;
0864
0865 if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) {
0866 newwin = rmpp_recv->repwin;
0867 break;
0868 }
0869 }
0870 spin_unlock_irqrestore(&agent->lock, flags);
0871 out:
0872 return newwin;
0873 }
0874
0875 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
0876 {
0877 struct ib_rmpp_mad *rmpp_mad;
0878 int ret;
0879
0880 rmpp_mad = mad_send_wr->send_buf.mad;
0881 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
0882 IB_MGMT_RMPP_FLAG_ACTIVE))
0883 return IB_RMPP_RESULT_UNHANDLED;
0884
0885 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
0886 mad_send_wr->seg_num = 1;
0887 return IB_RMPP_RESULT_INTERNAL;
0888 }
0889
0890 mad_send_wr->newwin = init_newwin(mad_send_wr);
0891
0892
0893 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
0894 ret = send_next_seg(mad_send_wr);
0895 if (!ret)
0896 return IB_RMPP_RESULT_CONSUMED;
0897 return ret;
0898 }
0899
0900 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
0901 struct ib_mad_send_wc *mad_send_wc)
0902 {
0903 struct ib_rmpp_mad *rmpp_mad;
0904 int ret;
0905
0906 rmpp_mad = mad_send_wr->send_buf.mad;
0907 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
0908 IB_MGMT_RMPP_FLAG_ACTIVE))
0909 return IB_RMPP_RESULT_UNHANDLED;
0910
0911 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
0912 return IB_RMPP_RESULT_INTERNAL;
0913
0914 if (mad_send_wc->status != IB_WC_SUCCESS ||
0915 mad_send_wr->status != IB_WC_SUCCESS)
0916 return IB_RMPP_RESULT_PROCESSED;
0917
0918 if (!mad_send_wr->timeout)
0919 return IB_RMPP_RESULT_PROCESSED;
0920
0921 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
0922 mad_send_wr->timeout =
0923 msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
0924 return IB_RMPP_RESULT_PROCESSED;
0925 }
0926
0927 if (mad_send_wr->seg_num == mad_send_wr->newwin ||
0928 mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
0929 return IB_RMPP_RESULT_PROCESSED;
0930
0931 ret = send_next_seg(mad_send_wr);
0932 if (ret) {
0933 mad_send_wc->status = IB_WC_GENERAL_ERR;
0934 return IB_RMPP_RESULT_PROCESSED;
0935 }
0936 return IB_RMPP_RESULT_CONSUMED;
0937 }
0938
0939 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
0940 {
0941 struct ib_rmpp_mad *rmpp_mad;
0942 int ret;
0943
0944 rmpp_mad = mad_send_wr->send_buf.mad;
0945 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
0946 IB_MGMT_RMPP_FLAG_ACTIVE))
0947 return IB_RMPP_RESULT_UNHANDLED;
0948
0949 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
0950 return IB_RMPP_RESULT_PROCESSED;
0951
0952 mad_send_wr->seg_num = mad_send_wr->last_ack;
0953 mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
0954
0955 ret = send_next_seg(mad_send_wr);
0956 if (ret)
0957 return IB_RMPP_RESULT_PROCESSED;
0958
0959 return IB_RMPP_RESULT_CONSUMED;
0960 }