0001
0002
0003
0004 #include <linux/bitfield.h>
0005 #include <linux/io.h>
0006 #include <linux/skbuff.h>
0007
0008 #include "ccm.h"
0009 #include "nfp_net.h"
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #define NFP_CCM_MBOX_BATCH_LIMIT 64
0027 #define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
0028 #define NFP_CCM_MAX_QLEN 1024
0029
0030 enum nfp_net_mbox_cmsg_state {
0031 NFP_NET_MBOX_CMSG_STATE_QUEUED,
0032 NFP_NET_MBOX_CMSG_STATE_NEXT,
0033 NFP_NET_MBOX_CMSG_STATE_BUSY,
0034 NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
0035 NFP_NET_MBOX_CMSG_STATE_DONE,
0036 };
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 struct nfp_ccm_mbox_cmsg_cb {
0047 enum nfp_net_mbox_cmsg_state state;
0048 int err;
0049 unsigned int max_len;
0050 unsigned int exp_reply;
0051 bool posted;
0052 };
0053
0054 static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
0055 {
0056 return round_down(nn->tlv_caps.mbox_len, 4) -
0057 NFP_NET_CFG_MBOX_SIMPLE_VAL -
0058 4 * 2;
0059 }
0060
0061 static void
0062 nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
0063 {
0064 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0065
0066 cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
0067 cb->err = 0;
0068 cb->max_len = max_len;
0069 cb->exp_reply = exp_reply;
0070 cb->posted = false;
0071 }
0072
0073 static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
0074 {
0075 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0076
0077 return cb->max_len;
0078 }
0079
0080 static bool nfp_ccm_mbox_done(struct sk_buff *skb)
0081 {
0082 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0083
0084 return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
0085 }
0086
0087 static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
0088 {
0089 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0090
0091 return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
0092 cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
0093 }
0094
0095 static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
0096 {
0097 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0098
0099 cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
0100 }
0101
0102 static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
0103 {
0104 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0105
0106 return cb->posted;
0107 }
0108
0109 static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
0110 {
0111 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0112
0113 cb->posted = true;
0114 }
0115
0116 static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
0117 {
0118 return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
0119 }
0120
0121 static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
0122 {
0123 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0124
0125 return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
0126 }
0127
0128 static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
0129 {
0130 struct nfp_ccm_mbox_cmsg_cb *cb;
0131 struct sk_buff *skb;
0132
0133 skb = skb_peek(&nn->mbox_cmsg.queue);
0134 if (!skb)
0135 return;
0136
0137 cb = (void *)skb->cb;
0138 cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
0139 if (cb->posted)
0140 queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work);
0141 }
0142
0143 static void
0144 nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
0145 {
0146 nn_writel(nn, off,
0147 FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
0148 FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
0149 }
0150
0151 static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
0152 {
0153 struct sk_buff *skb;
0154 int reserve, i, cnt;
0155 __be32 *data;
0156 u32 off, len;
0157
0158 off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
0159 skb = __skb_peek(&nn->mbox_cmsg.queue);
0160 while (true) {
0161 nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
0162 skb->len);
0163 off += 4;
0164
0165
0166 data = (__be32 *)skb->data;
0167 cnt = skb->len / 4;
0168 for (i = 0 ; i < cnt; i++) {
0169 nn_writel(nn, off, be32_to_cpu(data[i]));
0170 off += 4;
0171 }
0172 if (skb->len & 3) {
0173 __be32 tmp = 0;
0174
0175 memcpy(&tmp, &data[i], skb->len & 3);
0176 nn_writel(nn, off, be32_to_cpu(tmp));
0177 off += 4;
0178 }
0179
0180
0181 len = round_up(skb->len, 4);
0182 reserve = nfp_ccm_mbox_maxlen(skb) - len;
0183 if (reserve > 0) {
0184 nfp_ccm_mbox_write_tlv(nn, off,
0185 NFP_NET_MBOX_TLV_TYPE_RESV,
0186 reserve);
0187 off += 4 + reserve;
0188 }
0189
0190 if (skb == last)
0191 break;
0192 skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
0193 }
0194
0195 nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
0196 }
0197
0198 static struct sk_buff *
0199 nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
0200 {
0201 struct sk_buff *skb;
0202
0203 skb = __skb_peek(&nn->mbox_cmsg.queue);
0204 while (true) {
0205 if (__nfp_ccm_get_tag(skb) == tag)
0206 return skb;
0207
0208 if (skb == last)
0209 return NULL;
0210 skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
0211 }
0212 }
0213
0214 static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
0215 {
0216 struct nfp_ccm_mbox_cmsg_cb *cb;
0217 u8 __iomem *data, *end;
0218 struct sk_buff *skb;
0219
0220 data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
0221 NFP_NET_CFG_MBOX_SIMPLE_VAL;
0222 end = data + nn->tlv_caps.mbox_len;
0223
0224 while (true) {
0225 unsigned int length, offset, type;
0226 struct nfp_ccm_hdr hdr;
0227 u32 tlv_hdr;
0228
0229 tlv_hdr = readl(data);
0230 type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
0231 length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
0232 offset = data - nn->dp.ctrl_bar;
0233
0234
0235 data += 4;
0236
0237 if (data + length > end) {
0238 nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
0239 type, offset, length);
0240 break;
0241 }
0242
0243 if (type == NFP_NET_MBOX_TLV_TYPE_END)
0244 break;
0245 if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
0246 goto next_tlv;
0247 if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
0248 type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
0249 nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
0250 type, offset, length);
0251 break;
0252 }
0253
0254 if (length < 4) {
0255 nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
0256 type, offset, length);
0257 break;
0258 }
0259
0260 hdr.raw = cpu_to_be32(readl(data));
0261
0262 skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
0263 if (!skb) {
0264 nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
0265 be16_to_cpu(hdr.tag));
0266 break;
0267 }
0268 cb = (void *)skb->cb;
0269
0270 if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
0271 nn_dp_warn(&nn->dp,
0272 "mailbox msg not supported type:%d\n",
0273 nfp_ccm_get_type(skb));
0274 cb->err = -EIO;
0275 goto next_tlv;
0276 }
0277
0278 if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
0279 nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
0280 hdr.type,
0281 __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
0282 cb->err = -EIO;
0283 goto next_tlv;
0284 }
0285 if (cb->exp_reply && length != cb->exp_reply) {
0286 nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
0287 hdr.type, length, cb->exp_reply);
0288 cb->err = -EIO;
0289 goto next_tlv;
0290 }
0291 if (length > cb->max_len) {
0292 nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
0293 hdr.type, cb->max_len, length);
0294 cb->err = -EIO;
0295 goto next_tlv;
0296 }
0297
0298 if (!cb->posted) {
0299 __be32 *skb_data;
0300 int i, cnt;
0301
0302 if (length <= skb->len)
0303 __skb_trim(skb, length);
0304 else
0305 skb_put(skb, length - skb->len);
0306
0307
0308
0309
0310
0311 skb_data = (__be32 *)skb->data;
0312 memcpy(skb_data, &hdr, 4);
0313
0314 cnt = DIV_ROUND_UP(length, 4);
0315 for (i = 1 ; i < cnt; i++)
0316 skb_data[i] = cpu_to_be32(readl(data + i * 4));
0317 }
0318
0319 cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
0320 next_tlv:
0321 data += round_up(length, 4);
0322 if (data + 4 > end) {
0323 nn_dp_warn(&nn->dp,
0324 "reached end of MBOX without END TLV\n");
0325 break;
0326 }
0327 }
0328
0329 smp_wmb();
0330 spin_lock_bh(&nn->mbox_cmsg.queue.lock);
0331 do {
0332 skb = __skb_dequeue(&nn->mbox_cmsg.queue);
0333 cb = (void *)skb->cb;
0334
0335 if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
0336 cb->err = -ENOENT;
0337 smp_wmb();
0338 }
0339 cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
0340
0341 if (cb->posted) {
0342 if (cb->err)
0343 nn_dp_warn(&nn->dp,
0344 "mailbox posted msg failed type:%u err:%d\n",
0345 nfp_ccm_get_type(skb), cb->err);
0346 dev_consume_skb_any(skb);
0347 }
0348 } while (skb != last);
0349
0350 nfp_ccm_mbox_mark_next_runner(nn);
0351 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0352 }
0353
0354 static void
0355 nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
0356 {
0357 struct nfp_ccm_mbox_cmsg_cb *cb;
0358 struct sk_buff *skb;
0359
0360 spin_lock_bh(&nn->mbox_cmsg.queue.lock);
0361 do {
0362 skb = __skb_dequeue(&nn->mbox_cmsg.queue);
0363 cb = (void *)skb->cb;
0364
0365 cb->err = err;
0366 smp_wmb();
0367 cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
0368 } while (skb != last);
0369
0370 nfp_ccm_mbox_mark_next_runner(nn);
0371 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0372 }
0373
0374 static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
0375 __releases(&nn->mbox_cmsg.queue.lock)
0376 {
0377 int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
0378 struct sk_buff *skb, *last;
0379 int cnt, err;
0380
0381 space -= 4;
0382
0383
0384 cnt = 1;
0385 last = skb = __skb_peek(&nn->mbox_cmsg.queue);
0386 space -= 4 + nfp_ccm_mbox_maxlen(skb);
0387
0388 while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
0389 skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
0390 space -= 4 + nfp_ccm_mbox_maxlen(skb);
0391 if (space < 0)
0392 break;
0393 last = skb;
0394 nfp_ccm_mbox_set_busy(skb);
0395 cnt++;
0396 if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
0397 break;
0398 }
0399 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0400
0401
0402
0403
0404
0405 nn_ctrl_bar_lock(nn);
0406
0407 nfp_ccm_mbox_copy_in(nn, last);
0408
0409 err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
0410 if (!err)
0411 nfp_ccm_mbox_copy_out(nn, last);
0412 else
0413 nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
0414
0415 nn_ctrl_bar_unlock(nn);
0416
0417 wake_up_all(&nn->mbox_cmsg.wq);
0418 }
0419
0420 static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
0421 {
0422 struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
0423
0424 if (cb->err)
0425 dev_kfree_skb_any(skb);
0426 return cb->err;
0427 }
0428
0429
0430
0431
0432
0433 static int
0434 nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
0435 enum nfp_ccm_type type)
0436 __releases(&nn->mbox_cmsg.queue.lock)
0437 {
0438 bool was_first;
0439
0440 if (nfp_ccm_mbox_in_progress(skb)) {
0441 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0442
0443 wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
0444 smp_rmb();
0445 return nfp_ccm_mbox_skb_return(skb);
0446 }
0447
0448 was_first = nfp_ccm_mbox_should_run(nn, skb);
0449 __skb_unlink(skb, &nn->mbox_cmsg.queue);
0450 if (was_first)
0451 nfp_ccm_mbox_mark_next_runner(nn);
0452
0453 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0454
0455 if (was_first)
0456 wake_up_all(&nn->mbox_cmsg.wq);
0457
0458 nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
0459 type);
0460 return -ETIMEDOUT;
0461 }
0462
0463 static int
0464 nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
0465 enum nfp_ccm_type type,
0466 unsigned int reply_size, unsigned int max_reply_size,
0467 gfp_t flags)
0468 {
0469 const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
0470 unsigned int max_len;
0471 ssize_t undersize;
0472 int err;
0473
0474 if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
0475 nn_dp_warn(&nn->dp,
0476 "message type %d not supported by mailbox\n", type);
0477 return -EINVAL;
0478 }
0479
0480
0481
0482
0483
0484 if (!max_reply_size)
0485 max_reply_size = mbox_max;
0486 max_reply_size = round_up(max_reply_size, 4);
0487
0488
0489
0490
0491
0492 undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
0493 if (undersize > 0) {
0494 err = pskb_expand_head(skb, 0, undersize, flags);
0495 if (err) {
0496 nn_dp_warn(&nn->dp,
0497 "can't allocate reply buffer for mailbox\n");
0498 return err;
0499 }
0500 }
0501
0502
0503 max_len = max(max_reply_size, round_up(skb->len, 4));
0504 if (max_len > mbox_max) {
0505 nn_dp_warn(&nn->dp,
0506 "message too big for tha mailbox: %u/%u vs %u\n",
0507 skb->len, max_reply_size, mbox_max);
0508 return -EMSGSIZE;
0509 }
0510
0511 nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
0512
0513 return 0;
0514 }
0515
0516 static int
0517 nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
0518 enum nfp_ccm_type type, bool critical)
0519 {
0520 struct nfp_ccm_hdr *hdr;
0521
0522 assert_spin_locked(&nn->mbox_cmsg.queue.lock);
0523
0524 if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
0525 nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
0526 return -EBUSY;
0527 }
0528
0529 hdr = (void *)skb->data;
0530 hdr->ver = NFP_CCM_ABI_VERSION;
0531 hdr->type = type;
0532 hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
0533
0534 __skb_queue_tail(&nn->mbox_cmsg.queue, skb);
0535
0536 return 0;
0537 }
0538
0539 int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
0540 enum nfp_ccm_type type,
0541 unsigned int reply_size,
0542 unsigned int max_reply_size, bool critical)
0543 {
0544 int err;
0545
0546 err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
0547 max_reply_size, GFP_KERNEL);
0548 if (err)
0549 goto err_free_skb;
0550
0551 spin_lock_bh(&nn->mbox_cmsg.queue.lock);
0552
0553 err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
0554 if (err)
0555 goto err_unlock;
0556
0557
0558 if (!nfp_ccm_mbox_is_first(nn, skb)) {
0559 bool to;
0560
0561 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0562
0563 to = !wait_event_timeout(nn->mbox_cmsg.wq,
0564 nfp_ccm_mbox_done(skb) ||
0565 nfp_ccm_mbox_should_run(nn, skb),
0566 msecs_to_jiffies(NFP_CCM_TIMEOUT));
0567
0568
0569 if (nfp_ccm_mbox_done(skb)) {
0570 smp_rmb();
0571 return nfp_ccm_mbox_skb_return(skb);
0572 }
0573
0574 spin_lock_bh(&nn->mbox_cmsg.queue.lock);
0575
0576 if (!nfp_ccm_mbox_is_first(nn, skb)) {
0577 WARN_ON(!to);
0578
0579 err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
0580 if (err)
0581 goto err_free_skb;
0582 return 0;
0583 }
0584 }
0585
0586
0587 nfp_ccm_mbox_run_queue_unlock(nn);
0588 return nfp_ccm_mbox_skb_return(skb);
0589
0590 err_unlock:
0591 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0592 err_free_skb:
0593 dev_kfree_skb_any(skb);
0594 return err;
0595 }
0596
0597 int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
0598 enum nfp_ccm_type type,
0599 unsigned int reply_size,
0600 unsigned int max_reply_size)
0601 {
0602 return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
0603 max_reply_size, false);
0604 }
0605
0606 static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
0607 {
0608 struct sk_buff *skb;
0609 struct nfp_net *nn;
0610
0611 nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
0612
0613 spin_lock_bh(&nn->mbox_cmsg.queue.lock);
0614
0615 skb = __skb_peek(&nn->mbox_cmsg.queue);
0616 if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
0617 !nfp_ccm_mbox_should_run(nn, skb))) {
0618 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0619 return;
0620 }
0621
0622 nfp_ccm_mbox_run_queue_unlock(nn);
0623 }
0624
0625 static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
0626 {
0627 struct sk_buff *skb;
0628 struct nfp_net *nn;
0629 int err;
0630
0631 nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
0632
0633 skb = skb_peek(&nn->mbox_cmsg.queue);
0634 if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
0635
0636 goto exit_unlock_wake;
0637
0638 err = nfp_net_mbox_reconfig_wait_posted(nn);
0639 if (!err)
0640 nfp_ccm_mbox_copy_out(nn, skb);
0641 else
0642 nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
0643 exit_unlock_wake:
0644 nn_ctrl_bar_unlock(nn);
0645 wake_up_all(&nn->mbox_cmsg.wq);
0646 }
0647
0648 int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
0649 enum nfp_ccm_type type, unsigned int max_reply_size)
0650 {
0651 int err;
0652
0653 err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
0654 GFP_ATOMIC);
0655 if (err)
0656 goto err_free_skb;
0657
0658 nfp_ccm_mbox_mark_posted(skb);
0659
0660 spin_lock_bh(&nn->mbox_cmsg.queue.lock);
0661
0662 err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
0663 if (err)
0664 goto err_unlock;
0665
0666 if (nfp_ccm_mbox_is_first(nn, skb)) {
0667 if (nn_ctrl_bar_trylock(nn)) {
0668 nfp_ccm_mbox_copy_in(nn, skb);
0669 nfp_net_mbox_reconfig_post(nn,
0670 NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
0671 queue_work(nn->mbox_cmsg.workq,
0672 &nn->mbox_cmsg.wait_work);
0673 } else {
0674 nfp_ccm_mbox_mark_next_runner(nn);
0675 }
0676 }
0677
0678 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0679
0680 return 0;
0681
0682 err_unlock:
0683 spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
0684 err_free_skb:
0685 dev_kfree_skb_any(skb);
0686 return err;
0687 }
0688
0689 struct sk_buff *
0690 nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
0691 unsigned int reply_size, gfp_t flags)
0692 {
0693 unsigned int max_size;
0694 struct sk_buff *skb;
0695
0696 if (!reply_size)
0697 max_size = nfp_ccm_mbox_max_msg(nn);
0698 else
0699 max_size = max(req_size, reply_size);
0700 max_size = round_up(max_size, 4);
0701
0702 skb = alloc_skb(max_size, flags);
0703 if (!skb)
0704 return NULL;
0705
0706 skb_put(skb, req_size);
0707
0708 return skb;
0709 }
0710
0711 bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
0712 {
0713 return nfp_ccm_mbox_max_msg(nn) >= size;
0714 }
0715
0716 int nfp_ccm_mbox_init(struct nfp_net *nn)
0717 {
0718 return 0;
0719 }
0720
0721 void nfp_ccm_mbox_clean(struct nfp_net *nn)
0722 {
0723 drain_workqueue(nn->mbox_cmsg.workq);
0724 }
0725
0726 int nfp_ccm_mbox_alloc(struct nfp_net *nn)
0727 {
0728 skb_queue_head_init(&nn->mbox_cmsg.queue);
0729 init_waitqueue_head(&nn->mbox_cmsg.wq);
0730 INIT_WORK(&nn->mbox_cmsg.wait_work, nfp_ccm_mbox_post_wait_work);
0731 INIT_WORK(&nn->mbox_cmsg.runq_work, nfp_ccm_mbox_post_runq_work);
0732
0733 nn->mbox_cmsg.workq = alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND, 0);
0734 if (!nn->mbox_cmsg.workq)
0735 return -ENOMEM;
0736 return 0;
0737 }
0738
0739 void nfp_ccm_mbox_free(struct nfp_net *nn)
0740 {
0741 destroy_workqueue(nn->mbox_cmsg.workq);
0742 WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
0743 }