0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <asm/byteorder.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/dmapool.h>
0013 #include <linux/errno.h>
0014 #include <linux/ethtool.h>
0015 #include <linux/if_ether.h>
0016 #include <linux/io.h>
0017 #include <linux/irq.h>
0018 #include <linux/kernel.h>
0019 #include <linux/list.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/pci.h>
0022 #include <linux/skbuff.h>
0023
0024 #include "bnxt_hsi.h"
0025 #include "bnxt.h"
0026 #include "bnxt_hwrm.h"
0027
0028 static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
0029 {
0030 return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
0031 }
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061 int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
0062 {
0063 struct bnxt_hwrm_ctx *ctx;
0064 dma_addr_t dma_handle;
0065 u8 *req_addr;
0066
0067 if (req_len > BNXT_HWRM_CTX_OFFSET)
0068 return -E2BIG;
0069
0070 req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
0071 &dma_handle);
0072 if (!req_addr)
0073 return -ENOMEM;
0074
0075 ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
0076
0077 ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
0078 ctx->req_len = req_len;
0079 ctx->req = (struct input *)req_addr;
0080 ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
0081 ctx->dma_handle = dma_handle;
0082 ctx->flags = 0;
0083 ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
0084 ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
0085 ctx->gfp = GFP_KERNEL;
0086 ctx->slice_addr = NULL;
0087
0088
0089 ctx->req->req_type = cpu_to_le16(req_type);
0090 ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
0091 ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
0092 ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
0093 *req = ctx->req;
0094
0095 return 0;
0096 }
0097
0098 static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
0099 {
0100 void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
0101 struct input *req = (struct input *)req_addr;
0102 struct bnxt_hwrm_ctx *ctx = ctx_addr;
0103 u64 sentinel;
0104
0105 if (!req) {
0106
0107 netdev_err(bp->dev, "null HWRM request");
0108 dump_stack();
0109 return NULL;
0110 }
0111
0112
0113 sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
0114 if (ctx->sentinel != sentinel) {
0115
0116 netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
0117 (u32)le16_to_cpu(req->req_type));
0118 dump_stack();
0119 return NULL;
0120 }
0121
0122 return ctx;
0123 }
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
0136 {
0137 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0138
0139 if (ctx)
0140 ctx->timeout = timeout;
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
0159 {
0160 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0161
0162 if (ctx)
0163 ctx->gfp = gfp;
0164 }
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192 int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
0193 {
0194 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0195 struct input *internal_req = req;
0196 u16 req_type;
0197
0198 if (!ctx)
0199 return -EINVAL;
0200
0201 if (len > BNXT_HWRM_CTX_OFFSET)
0202 return -E2BIG;
0203
0204
0205 ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
0206 if (ctx->slice_addr) {
0207 dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
0208 ctx->slice_addr, ctx->slice_handle);
0209 ctx->slice_addr = NULL;
0210 }
0211 ctx->gfp = GFP_KERNEL;
0212
0213 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
0214 memcpy(internal_req, new_req, len);
0215 } else {
0216 internal_req->req_type = ((struct input *)new_req)->req_type;
0217 ctx->req = new_req;
0218 }
0219
0220 ctx->req_len = len;
0221 ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
0222 BNXT_HWRM_RESP_OFFSET);
0223
0224
0225 req_type = le16_to_cpu(internal_req->req_type);
0226 ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
0227
0228 return 0;
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245 void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
0246 {
0247 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0248
0249 if (ctx)
0250 ctx->flags |= (flags & HWRM_API_FLAGS);
0251 }
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 void *hwrm_req_hold(struct bnxt *bp, void *req)
0279 {
0280 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0281 struct input *input = (struct input *)req;
0282
0283 if (!ctx)
0284 return NULL;
0285
0286 if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
0287
0288 netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
0289 (u32)le16_to_cpu(input->req_type));
0290 dump_stack();
0291 return NULL;
0292 }
0293
0294 ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
0295 return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
0296 }
0297
0298 static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
0299 {
0300 void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
0301 dma_addr_t dma_handle = ctx->dma_handle;
0302
0303
0304 if (ctx->slice_addr)
0305 dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
0306 ctx->slice_addr, ctx->slice_handle);
0307
0308
0309 memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
0310
0311
0312 if (dma_handle)
0313 dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 void hwrm_req_drop(struct bnxt *bp, void *req)
0331 {
0332 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0333
0334 if (ctx)
0335 __hwrm_ctx_drop(bp, ctx);
0336 }
0337
0338 static int __hwrm_to_stderr(u32 hwrm_err)
0339 {
0340 switch (hwrm_err) {
0341 case HWRM_ERR_CODE_SUCCESS:
0342 return 0;
0343 case HWRM_ERR_CODE_RESOURCE_LOCKED:
0344 return -EROFS;
0345 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
0346 return -EACCES;
0347 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
0348 return -ENOSPC;
0349 case HWRM_ERR_CODE_INVALID_PARAMS:
0350 case HWRM_ERR_CODE_INVALID_FLAGS:
0351 case HWRM_ERR_CODE_INVALID_ENABLES:
0352 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
0353 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
0354 return -EINVAL;
0355 case HWRM_ERR_CODE_NO_BUFFER:
0356 return -ENOMEM;
0357 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
0358 case HWRM_ERR_CODE_BUSY:
0359 return -EAGAIN;
0360 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
0361 return -EOPNOTSUPP;
0362 case HWRM_ERR_CODE_PF_UNAVAILABLE:
0363 return -ENODEV;
0364 default:
0365 return -EIO;
0366 }
0367 }
0368
0369 static struct bnxt_hwrm_wait_token *
0370 __hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
0371 {
0372 struct bnxt_hwrm_wait_token *token;
0373
0374 token = kzalloc(sizeof(*token), GFP_KERNEL);
0375 if (!token)
0376 return NULL;
0377
0378 mutex_lock(&bp->hwrm_cmd_lock);
0379
0380 token->dst = dst;
0381 token->state = BNXT_HWRM_PENDING;
0382 if (dst == BNXT_HWRM_CHNL_CHIMP) {
0383 token->seq_id = bp->hwrm_cmd_seq++;
0384 hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
0385 } else {
0386 token->seq_id = bp->hwrm_cmd_kong_seq++;
0387 }
0388
0389 return token;
0390 }
0391
0392 static void
0393 __hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
0394 {
0395 if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
0396 hlist_del_rcu(&token->node);
0397 kfree_rcu(token, rcu);
0398 } else {
0399 kfree(token);
0400 }
0401 mutex_unlock(&bp->hwrm_cmd_lock);
0402 }
0403
0404 void
0405 hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
0406 {
0407 struct bnxt_hwrm_wait_token *token;
0408
0409 rcu_read_lock();
0410 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
0411 if (token->seq_id == seq_id) {
0412 WRITE_ONCE(token->state, state);
0413 rcu_read_unlock();
0414 return;
0415 }
0416 }
0417 rcu_read_unlock();
0418 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
0419 }
0420
0421 static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
0422 {
0423 u32 ring = le16_to_cpu(req->cmpl_ring);
0424 u32 type = le16_to_cpu(req->req_type);
0425 u32 tgt = le16_to_cpu(req->target_id);
0426 u32 seq = le16_to_cpu(req->seq_id);
0427 char opt[32] = "\n";
0428
0429 if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
0430 snprintf(opt, 16, " ring %d\n", ring);
0431
0432 if (unlikely(tgt != BNXT_HWRM_TARGET))
0433 snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
0434
0435 netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
0436 type, seq, opt);
0437 }
0438
0439 #define hwrm_err(bp, ctx, fmt, ...) \
0440 do { \
0441 if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
0442 netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
0443 else \
0444 netdev_err((bp)->dev, fmt, __VA_ARGS__); \
0445 } while (0)
0446
0447 static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
0448 {
0449 if (req_type == HWRM_VER_GET)
0450 return false;
0451
0452 if (!bp->fw_health || !bp->fw_health->status_reliable)
0453 return false;
0454
0455 *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
0456 return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
0457 }
0458
0459 static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
0460 {
0461 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
0462 enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
0463 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
0464 struct bnxt_hwrm_wait_token *token = NULL;
0465 struct hwrm_short_input short_input = {0};
0466 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
0467 unsigned int i, timeout, tmo_count;
0468 u32 *data = (u32 *)ctx->req;
0469 u32 msg_len = ctx->req_len;
0470 u32 req_type, sts;
0471 int rc = -EBUSY;
0472 u16 len = 0;
0473 u8 *valid;
0474
0475 if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
0476 memset(ctx->resp, 0, PAGE_SIZE);
0477
0478 req_type = le16_to_cpu(ctx->req->req_type);
0479 if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) {
0480 netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
0481 req_type);
0482 goto exit;
0483 }
0484
0485 if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
0486 msg_len > bp->hwrm_max_ext_req_len) {
0487 rc = -E2BIG;
0488 goto exit;
0489 }
0490
0491 if (bnxt_kong_hwrm_message(bp, ctx->req)) {
0492 dst = BNXT_HWRM_CHNL_KONG;
0493 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
0494 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
0495 if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
0496 netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
0497 req_type);
0498 rc = -EINVAL;
0499 goto exit;
0500 }
0501 }
0502
0503 token = __hwrm_acquire_token(bp, dst);
0504 if (!token) {
0505 rc = -ENOMEM;
0506 goto exit;
0507 }
0508 ctx->req->seq_id = cpu_to_le16(token->seq_id);
0509
0510 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
0511 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
0512 short_input.req_type = ctx->req->req_type;
0513 short_input.signature =
0514 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
0515 short_input.size = cpu_to_le16(msg_len);
0516 short_input.req_addr = cpu_to_le64(ctx->dma_handle);
0517
0518 data = (u32 *)&short_input;
0519 msg_len = sizeof(short_input);
0520
0521 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
0522 }
0523
0524
0525 wmb();
0526
0527
0528 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
0529
0530 for (i = msg_len; i < max_req_len; i += 4)
0531 writel(0, bp->bar0 + bar_offset + i);
0532
0533
0534 writel(1, bp->bar0 + doorbell_offset);
0535
0536 hwrm_req_dbg(bp, ctx->req);
0537
0538 if (!pci_is_enabled(bp->pdev)) {
0539 rc = -ENODEV;
0540 goto exit;
0541 }
0542
0543
0544 timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
0545
0546 timeout *= 1000;
0547
0548 i = 0;
0549
0550
0551
0552
0553 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
0554 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
0555 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
0556
0557 if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
0558
0559 while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
0560 i++ < tmo_count) {
0561
0562
0563
0564 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
0565 goto exit;
0566
0567 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
0568 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
0569 HWRM_SHORT_MAX_TIMEOUT);
0570 } else {
0571 if (hwrm_wait_must_abort(bp, req_type, &sts)) {
0572 hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
0573 req_type, sts);
0574 goto exit;
0575 }
0576 usleep_range(HWRM_MIN_TIMEOUT,
0577 HWRM_MAX_TIMEOUT);
0578 }
0579 }
0580
0581 if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
0582 hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
0583 req_type);
0584 goto exit;
0585 }
0586 len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
0587 valid = ((u8 *)ctx->resp) + len - 1;
0588 } else {
0589 __le16 seen_out_of_seq = ctx->req->seq_id;
0590 int j;
0591
0592
0593 for (i = 0; i < tmo_count; i++) {
0594
0595
0596
0597 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
0598 goto exit;
0599
0600 if (token &&
0601 READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
0602 __hwrm_release_token(bp, token);
0603 token = NULL;
0604 }
0605
0606 len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
0607 if (len) {
0608 __le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
0609
0610 if (resp_seq == ctx->req->seq_id)
0611 break;
0612 if (resp_seq != seen_out_of_seq) {
0613 netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
0614 le16_to_cpu(resp_seq),
0615 req_type,
0616 le16_to_cpu(ctx->req->seq_id));
0617 seen_out_of_seq = resp_seq;
0618 }
0619 }
0620
0621
0622 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
0623 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
0624 HWRM_SHORT_MAX_TIMEOUT);
0625 } else {
0626 if (hwrm_wait_must_abort(bp, req_type, &sts)) {
0627 hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
0628 req_type,
0629 le16_to_cpu(ctx->req->seq_id),
0630 len, sts);
0631 goto exit;
0632 }
0633 usleep_range(HWRM_MIN_TIMEOUT,
0634 HWRM_MAX_TIMEOUT);
0635 }
0636 }
0637
0638 if (i >= tmo_count) {
0639 hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
0640 hwrm_total_timeout(i), req_type,
0641 le16_to_cpu(ctx->req->seq_id), len);
0642 goto exit;
0643 }
0644
0645
0646 valid = ((u8 *)ctx->resp) + len - 1;
0647 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
0648
0649 dma_rmb();
0650 if (*valid)
0651 break;
0652 if (j < 10) {
0653 udelay(1);
0654 j++;
0655 } else {
0656 usleep_range(20, 30);
0657 j += 20;
0658 }
0659 }
0660
0661 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
0662 hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
0663 hwrm_total_timeout(i) + j, req_type,
0664 le16_to_cpu(ctx->req->seq_id), len, *valid);
0665 goto exit;
0666 }
0667 }
0668
0669
0670
0671
0672
0673 *valid = 0;
0674 rc = le16_to_cpu(ctx->resp->error_code);
0675 if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
0676 netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
0677 req_type);
0678 else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
0679 hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
0680 req_type, token->seq_id, rc);
0681 rc = __hwrm_to_stderr(rc);
0682 exit:
0683 if (token)
0684 __hwrm_release_token(bp, token);
0685 if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
0686 ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
0687 else
0688 __hwrm_ctx_drop(bp, ctx);
0689 return rc;
0690 }
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722 int hwrm_req_send(struct bnxt *bp, void *req)
0723 {
0724 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0725
0726 if (!ctx)
0727 return -EINVAL;
0728
0729 return __hwrm_send(bp, ctx);
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743 int hwrm_req_send_silent(struct bnxt *bp, void *req)
0744 {
0745 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
0746 return hwrm_req_send(bp, req);
0747 }
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774 void *
0775 hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
0776 {
0777 struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
0778 u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
0779 struct input *input = req;
0780 u8 *addr, *req_addr = req;
0781 u32 max_offset, offset;
0782
0783 if (!ctx)
0784 return NULL;
0785
0786 max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
0787 offset = max_offset - size;
0788 offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
0789 addr = req_addr + offset;
0790
0791 if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
0792 ctx->allocated = end - addr;
0793 *dma_handle = ctx->dma_handle + offset;
0794 return addr;
0795 }
0796
0797
0798 if (ctx->slice_addr) {
0799
0800 netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
0801 (u32)le16_to_cpu(input->req_type));
0802 dump_stack();
0803 return NULL;
0804 }
0805
0806 addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
0807
0808 if (!addr)
0809 return NULL;
0810
0811 ctx->slice_addr = addr;
0812 ctx->slice_size = size;
0813 ctx->slice_handle = *dma_handle;
0814
0815 return addr;
0816 }