0001
0002
0003
0004
0005
0006 #include "efa_com.h"
0007 #include "efa_regs_defs.h"
0008
0009 #define ADMIN_CMD_TIMEOUT_US 30000000
0010
0011 #define EFA_REG_READ_TIMEOUT_US 50000
0012 #define EFA_MMIO_READ_INVALID 0xffffffff
0013
0014 #define EFA_POLL_INTERVAL_MS 100
0015
0016 #define EFA_ASYNC_QUEUE_DEPTH 16
0017 #define EFA_ADMIN_QUEUE_DEPTH 32
0018
0019 #define EFA_CTRL_MAJOR 0
0020 #define EFA_CTRL_MINOR 0
0021 #define EFA_CTRL_SUB_MINOR 1
0022
0023 enum efa_cmd_status {
0024 EFA_CMD_SUBMITTED,
0025 EFA_CMD_COMPLETED,
0026 };
0027
0028 struct efa_comp_ctx {
0029 struct completion wait_event;
0030 struct efa_admin_acq_entry *user_cqe;
0031 u32 comp_size;
0032 enum efa_cmd_status status;
0033 u8 cmd_opcode;
0034 u8 occupied;
0035 };
0036
0037 static const char *efa_com_cmd_str(u8 cmd)
0038 {
0039 #define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
0040
0041 switch (cmd) {
0042 EFA_CMD_STR_CASE(CREATE_QP);
0043 EFA_CMD_STR_CASE(MODIFY_QP);
0044 EFA_CMD_STR_CASE(QUERY_QP);
0045 EFA_CMD_STR_CASE(DESTROY_QP);
0046 EFA_CMD_STR_CASE(CREATE_AH);
0047 EFA_CMD_STR_CASE(DESTROY_AH);
0048 EFA_CMD_STR_CASE(REG_MR);
0049 EFA_CMD_STR_CASE(DEREG_MR);
0050 EFA_CMD_STR_CASE(CREATE_CQ);
0051 EFA_CMD_STR_CASE(DESTROY_CQ);
0052 EFA_CMD_STR_CASE(GET_FEATURE);
0053 EFA_CMD_STR_CASE(SET_FEATURE);
0054 EFA_CMD_STR_CASE(GET_STATS);
0055 EFA_CMD_STR_CASE(ALLOC_PD);
0056 EFA_CMD_STR_CASE(DEALLOC_PD);
0057 EFA_CMD_STR_CASE(ALLOC_UAR);
0058 EFA_CMD_STR_CASE(DEALLOC_UAR);
0059 EFA_CMD_STR_CASE(CREATE_EQ);
0060 EFA_CMD_STR_CASE(DESTROY_EQ);
0061 default: return "unknown command opcode";
0062 }
0063 #undef EFA_CMD_STR_CASE
0064 }
0065
0066 void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
0067 {
0068 *addr_low = lower_32_bits(addr);
0069 *addr_high = upper_32_bits(addr);
0070 }
0071
0072 static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
0073 {
0074 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
0075 struct efa_admin_mmio_req_read_less_resp *read_resp;
0076 unsigned long exp_time;
0077 u32 mmio_read_reg = 0;
0078 u32 err;
0079
0080 read_resp = mmio_read->read_resp;
0081
0082 spin_lock(&mmio_read->lock);
0083 mmio_read->seq_num++;
0084
0085
0086 read_resp->req_id = mmio_read->seq_num + 0x9aL;
0087 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
0088 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
0089 mmio_read->seq_num);
0090
0091 writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
0092
0093 exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
0094 do {
0095 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
0096 break;
0097 udelay(1);
0098 } while (time_is_after_jiffies(exp_time));
0099
0100 if (read_resp->req_id != mmio_read->seq_num) {
0101 ibdev_err_ratelimited(
0102 edev->efa_dev,
0103 "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
0104 mmio_read->seq_num, offset, read_resp->req_id,
0105 read_resp->reg_off);
0106 err = EFA_MMIO_READ_INVALID;
0107 goto out;
0108 }
0109
0110 if (read_resp->reg_off != offset) {
0111 ibdev_err_ratelimited(
0112 edev->efa_dev,
0113 "Reading register failed: wrong offset provided\n");
0114 err = EFA_MMIO_READ_INVALID;
0115 goto out;
0116 }
0117
0118 err = read_resp->reg_val;
0119 out:
0120 spin_unlock(&mmio_read->lock);
0121 return err;
0122 }
0123
0124 static int efa_com_admin_init_sq(struct efa_com_dev *edev)
0125 {
0126 struct efa_com_admin_queue *aq = &edev->aq;
0127 struct efa_com_admin_sq *sq = &aq->sq;
0128 u16 size = aq->depth * sizeof(*sq->entries);
0129 u32 aq_caps = 0;
0130 u32 addr_high;
0131 u32 addr_low;
0132
0133 sq->entries =
0134 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
0135 if (!sq->entries)
0136 return -ENOMEM;
0137
0138 spin_lock_init(&sq->lock);
0139
0140 sq->cc = 0;
0141 sq->pc = 0;
0142 sq->phase = 1;
0143
0144 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
0145
0146 addr_high = upper_32_bits(sq->dma_addr);
0147 addr_low = lower_32_bits(sq->dma_addr);
0148
0149 writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
0150 writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
0151
0152 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
0153 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
0154 sizeof(struct efa_admin_aq_entry));
0155
0156 writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
0157
0158 return 0;
0159 }
0160
0161 static int efa_com_admin_init_cq(struct efa_com_dev *edev)
0162 {
0163 struct efa_com_admin_queue *aq = &edev->aq;
0164 struct efa_com_admin_cq *cq = &aq->cq;
0165 u16 size = aq->depth * sizeof(*cq->entries);
0166 u32 acq_caps = 0;
0167 u32 addr_high;
0168 u32 addr_low;
0169
0170 cq->entries =
0171 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
0172 if (!cq->entries)
0173 return -ENOMEM;
0174
0175 spin_lock_init(&cq->lock);
0176
0177 cq->cc = 0;
0178 cq->phase = 1;
0179
0180 addr_high = upper_32_bits(cq->dma_addr);
0181 addr_low = lower_32_bits(cq->dma_addr);
0182
0183 writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
0184 writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
0185
0186 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
0187 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
0188 sizeof(struct efa_admin_acq_entry));
0189 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
0190 aq->msix_vector_idx);
0191
0192 writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
0193
0194 return 0;
0195 }
0196
0197 static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
0198 struct efa_aenq_handlers *aenq_handlers)
0199 {
0200 struct efa_com_aenq *aenq = &edev->aenq;
0201 u32 addr_low, addr_high;
0202 u32 aenq_caps = 0;
0203 u16 size;
0204
0205 if (!aenq_handlers) {
0206 ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n");
0207 return -EINVAL;
0208 }
0209
0210 size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
0211 aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
0212 GFP_KERNEL);
0213 if (!aenq->entries)
0214 return -ENOMEM;
0215
0216 aenq->aenq_handlers = aenq_handlers;
0217 aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
0218 aenq->cc = 0;
0219 aenq->phase = 1;
0220
0221 addr_low = lower_32_bits(aenq->dma_addr);
0222 addr_high = upper_32_bits(aenq->dma_addr);
0223
0224 writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
0225 writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
0226
0227 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
0228 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
0229 sizeof(struct efa_admin_aenq_entry));
0230 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
0231 aenq->msix_vector_idx);
0232 writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
0233
0234
0235
0236
0237
0238 writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
0239
0240 return 0;
0241 }
0242
0243
0244 static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
0245 {
0246 u16 ctx_id;
0247
0248 spin_lock(&aq->comp_ctx_lock);
0249 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
0250 aq->comp_ctx_pool_next++;
0251 spin_unlock(&aq->comp_ctx_lock);
0252
0253 return ctx_id;
0254 }
0255
0256 static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
0257 u16 ctx_id)
0258 {
0259 spin_lock(&aq->comp_ctx_lock);
0260 aq->comp_ctx_pool_next--;
0261 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
0262 spin_unlock(&aq->comp_ctx_lock);
0263 }
0264
0265 static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
0266 struct efa_comp_ctx *comp_ctx)
0267 {
0268 u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
0269 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
0270 u16 ctx_id = cmd_id & (aq->depth - 1);
0271
0272 ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
0273 comp_ctx->occupied = 0;
0274 efa_com_dealloc_ctx_id(aq, ctx_id);
0275 }
0276
0277 static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
0278 u16 cmd_id, bool capture)
0279 {
0280 u16 ctx_id = cmd_id & (aq->depth - 1);
0281
0282 if (aq->comp_ctx[ctx_id].occupied && capture) {
0283 ibdev_err_ratelimited(
0284 aq->efa_dev,
0285 "Completion context for command_id %#x is occupied\n",
0286 cmd_id);
0287 return NULL;
0288 }
0289
0290 if (capture) {
0291 aq->comp_ctx[ctx_id].occupied = 1;
0292 ibdev_dbg(aq->efa_dev,
0293 "Take completion ctxt for command_id %#x\n", cmd_id);
0294 }
0295
0296 return &aq->comp_ctx[ctx_id];
0297 }
0298
0299 static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
0300 struct efa_admin_aq_entry *cmd,
0301 size_t cmd_size_in_bytes,
0302 struct efa_admin_acq_entry *comp,
0303 size_t comp_size_in_bytes)
0304 {
0305 struct efa_admin_aq_entry *aqe;
0306 struct efa_comp_ctx *comp_ctx;
0307 u16 queue_size_mask;
0308 u16 cmd_id;
0309 u16 ctx_id;
0310 u16 pi;
0311
0312 queue_size_mask = aq->depth - 1;
0313 pi = aq->sq.pc & queue_size_mask;
0314
0315 ctx_id = efa_com_alloc_ctx_id(aq);
0316
0317
0318 cmd_id = ctx_id & queue_size_mask;
0319 cmd_id |= aq->sq.pc & ~queue_size_mask;
0320 cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
0321
0322 cmd->aq_common_descriptor.command_id = cmd_id;
0323 EFA_SET(&cmd->aq_common_descriptor.flags,
0324 EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
0325
0326 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
0327 if (!comp_ctx) {
0328 efa_com_dealloc_ctx_id(aq, ctx_id);
0329 return ERR_PTR(-EINVAL);
0330 }
0331
0332 comp_ctx->status = EFA_CMD_SUBMITTED;
0333 comp_ctx->comp_size = comp_size_in_bytes;
0334 comp_ctx->user_cqe = comp;
0335 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
0336
0337 reinit_completion(&comp_ctx->wait_event);
0338
0339 aqe = &aq->sq.entries[pi];
0340 memset(aqe, 0, sizeof(*aqe));
0341 memcpy(aqe, cmd, cmd_size_in_bytes);
0342
0343 aq->sq.pc++;
0344 atomic64_inc(&aq->stats.submitted_cmd);
0345
0346 if ((aq->sq.pc & queue_size_mask) == 0)
0347 aq->sq.phase = !aq->sq.phase;
0348
0349
0350 writel(aq->sq.pc, aq->sq.db_addr);
0351
0352 return comp_ctx;
0353 }
0354
0355 static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
0356 {
0357 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
0358 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
0359 struct efa_comp_ctx *comp_ctx;
0360 u16 i;
0361
0362 aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
0363 aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
0364 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
0365 devm_kfree(aq->dmadev, aq->comp_ctx_pool);
0366 devm_kfree(aq->dmadev, aq->comp_ctx);
0367 return -ENOMEM;
0368 }
0369
0370 for (i = 0; i < aq->depth; i++) {
0371 comp_ctx = efa_com_get_comp_ctx(aq, i, false);
0372 if (comp_ctx)
0373 init_completion(&comp_ctx->wait_event);
0374
0375 aq->comp_ctx_pool[i] = i;
0376 }
0377
0378 spin_lock_init(&aq->comp_ctx_lock);
0379
0380 aq->comp_ctx_pool_next = 0;
0381
0382 return 0;
0383 }
0384
0385 static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
0386 struct efa_admin_aq_entry *cmd,
0387 size_t cmd_size_in_bytes,
0388 struct efa_admin_acq_entry *comp,
0389 size_t comp_size_in_bytes)
0390 {
0391 struct efa_comp_ctx *comp_ctx;
0392
0393 spin_lock(&aq->sq.lock);
0394 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
0395 ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
0396 spin_unlock(&aq->sq.lock);
0397 return ERR_PTR(-ENODEV);
0398 }
0399
0400 comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
0401 comp_size_in_bytes);
0402 spin_unlock(&aq->sq.lock);
0403 if (IS_ERR(comp_ctx))
0404 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
0405
0406 return comp_ctx;
0407 }
0408
0409 static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
0410 struct efa_admin_acq_entry *cqe)
0411 {
0412 struct efa_comp_ctx *comp_ctx;
0413 u16 cmd_id;
0414
0415 cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
0416 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
0417
0418 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
0419 if (!comp_ctx) {
0420 ibdev_err(aq->efa_dev,
0421 "comp_ctx is NULL. Changing the admin queue running state\n");
0422 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
0423 return;
0424 }
0425
0426 comp_ctx->status = EFA_CMD_COMPLETED;
0427 memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
0428
0429 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
0430 complete(&comp_ctx->wait_event);
0431 }
0432
0433 static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
0434 {
0435 struct efa_admin_acq_entry *cqe;
0436 u16 queue_size_mask;
0437 u16 comp_num = 0;
0438 u8 phase;
0439 u16 ci;
0440
0441 queue_size_mask = aq->depth - 1;
0442
0443 ci = aq->cq.cc & queue_size_mask;
0444 phase = aq->cq.phase;
0445
0446 cqe = &aq->cq.entries[ci];
0447
0448
0449 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
0450 EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
0451
0452
0453
0454
0455 dma_rmb();
0456 efa_com_handle_single_admin_completion(aq, cqe);
0457
0458 ci++;
0459 comp_num++;
0460 if (ci == aq->depth) {
0461 ci = 0;
0462 phase = !phase;
0463 }
0464
0465 cqe = &aq->cq.entries[ci];
0466 }
0467
0468 aq->cq.cc += comp_num;
0469 aq->cq.phase = phase;
0470 aq->sq.cc += comp_num;
0471 atomic64_add(comp_num, &aq->stats.completed_cmd);
0472 }
0473
0474 static int efa_com_comp_status_to_errno(u8 comp_status)
0475 {
0476 switch (comp_status) {
0477 case EFA_ADMIN_SUCCESS:
0478 return 0;
0479 case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
0480 return -ENOMEM;
0481 case EFA_ADMIN_UNSUPPORTED_OPCODE:
0482 return -EOPNOTSUPP;
0483 case EFA_ADMIN_BAD_OPCODE:
0484 case EFA_ADMIN_MALFORMED_REQUEST:
0485 case EFA_ADMIN_ILLEGAL_PARAMETER:
0486 case EFA_ADMIN_UNKNOWN_ERROR:
0487 return -EINVAL;
0488 default:
0489 return -EINVAL;
0490 }
0491 }
0492
0493 static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
0494 struct efa_com_admin_queue *aq)
0495 {
0496 unsigned long timeout;
0497 unsigned long flags;
0498 int err;
0499
0500 timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
0501
0502 while (1) {
0503 spin_lock_irqsave(&aq->cq.lock, flags);
0504 efa_com_handle_admin_completion(aq);
0505 spin_unlock_irqrestore(&aq->cq.lock, flags);
0506
0507 if (comp_ctx->status != EFA_CMD_SUBMITTED)
0508 break;
0509
0510 if (time_is_before_jiffies(timeout)) {
0511 ibdev_err_ratelimited(
0512 aq->efa_dev,
0513 "Wait for completion (polling) timeout\n");
0514
0515 atomic64_inc(&aq->stats.no_completion);
0516
0517 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
0518 err = -ETIME;
0519 goto out;
0520 }
0521
0522 msleep(aq->poll_interval);
0523 }
0524
0525 err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
0526 out:
0527 efa_com_put_comp_ctx(aq, comp_ctx);
0528 return err;
0529 }
0530
0531 static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
0532 struct efa_com_admin_queue *aq)
0533 {
0534 unsigned long flags;
0535 int err;
0536
0537 wait_for_completion_timeout(&comp_ctx->wait_event,
0538 usecs_to_jiffies(aq->completion_timeout));
0539
0540
0541
0542
0543
0544
0545
0546 if (comp_ctx->status == EFA_CMD_SUBMITTED) {
0547 spin_lock_irqsave(&aq->cq.lock, flags);
0548 efa_com_handle_admin_completion(aq);
0549 spin_unlock_irqrestore(&aq->cq.lock, flags);
0550
0551 atomic64_inc(&aq->stats.no_completion);
0552
0553 if (comp_ctx->status == EFA_CMD_COMPLETED)
0554 ibdev_err_ratelimited(
0555 aq->efa_dev,
0556 "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
0557 efa_com_cmd_str(comp_ctx->cmd_opcode),
0558 comp_ctx->cmd_opcode, comp_ctx->status,
0559 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
0560 else
0561 ibdev_err_ratelimited(
0562 aq->efa_dev,
0563 "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
0564 efa_com_cmd_str(comp_ctx->cmd_opcode),
0565 comp_ctx->cmd_opcode, comp_ctx->status,
0566 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
0567
0568 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
0569 err = -ETIME;
0570 goto out;
0571 }
0572
0573 err = efa_com_comp_status_to_errno(comp_ctx->user_cqe->acq_common_descriptor.status);
0574 out:
0575 efa_com_put_comp_ctx(aq, comp_ctx);
0576 return err;
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
0588 struct efa_com_admin_queue *aq)
0589 {
0590 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
0591 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
0592
0593 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
0610 struct efa_admin_aq_entry *cmd,
0611 size_t cmd_size,
0612 struct efa_admin_acq_entry *comp,
0613 size_t comp_size)
0614 {
0615 struct efa_comp_ctx *comp_ctx;
0616 int err;
0617
0618 might_sleep();
0619
0620
0621 down(&aq->avail_cmds);
0622
0623 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
0624 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
0625 cmd->aq_common_descriptor.opcode);
0626 comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
0627 if (IS_ERR(comp_ctx)) {
0628 ibdev_err_ratelimited(
0629 aq->efa_dev,
0630 "Failed to submit command %s (opcode %u) err %ld\n",
0631 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
0632 cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
0633
0634 up(&aq->avail_cmds);
0635 atomic64_inc(&aq->stats.cmd_err);
0636 return PTR_ERR(comp_ctx);
0637 }
0638
0639 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
0640 if (err) {
0641 ibdev_err_ratelimited(
0642 aq->efa_dev,
0643 "Failed to process command %s (opcode %u) comp_status %d err %d\n",
0644 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
0645 cmd->aq_common_descriptor.opcode,
0646 comp_ctx->user_cqe->acq_common_descriptor.status, err);
0647 atomic64_inc(&aq->stats.cmd_err);
0648 }
0649
0650 up(&aq->avail_cmds);
0651
0652 return err;
0653 }
0654
0655
0656
0657
0658
0659 void efa_com_admin_destroy(struct efa_com_dev *edev)
0660 {
0661 struct efa_com_admin_queue *aq = &edev->aq;
0662 struct efa_com_aenq *aenq = &edev->aenq;
0663 struct efa_com_admin_cq *cq = &aq->cq;
0664 struct efa_com_admin_sq *sq = &aq->sq;
0665 u16 size;
0666
0667 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
0668
0669 devm_kfree(edev->dmadev, aq->comp_ctx_pool);
0670 devm_kfree(edev->dmadev, aq->comp_ctx);
0671
0672 size = aq->depth * sizeof(*sq->entries);
0673 dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr);
0674
0675 size = aq->depth * sizeof(*cq->entries);
0676 dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr);
0677
0678 size = aenq->depth * sizeof(*aenq->entries);
0679 dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689 void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
0690 {
0691 u32 mask_value = 0;
0692
0693 if (polling)
0694 EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
0695
0696 writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
0697 if (polling)
0698 set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
0699 else
0700 clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
0701 }
0702
0703 static void efa_com_stats_init(struct efa_com_dev *edev)
0704 {
0705 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
0706 int i;
0707
0708 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
0709 atomic64_set(s, 0);
0710 }
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722 int efa_com_admin_init(struct efa_com_dev *edev,
0723 struct efa_aenq_handlers *aenq_handlers)
0724 {
0725 struct efa_com_admin_queue *aq = &edev->aq;
0726 u32 timeout;
0727 u32 dev_sts;
0728 u32 cap;
0729 int err;
0730
0731 dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
0732 if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
0733 ibdev_err(edev->efa_dev,
0734 "Device isn't ready, abort com init %#x\n", dev_sts);
0735 return -ENODEV;
0736 }
0737
0738 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
0739
0740 aq->dmadev = edev->dmadev;
0741 aq->efa_dev = edev->efa_dev;
0742 set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
0743
0744 sema_init(&aq->avail_cmds, aq->depth);
0745
0746 efa_com_stats_init(edev);
0747
0748 err = efa_com_init_comp_ctxt(aq);
0749 if (err)
0750 return err;
0751
0752 err = efa_com_admin_init_sq(edev);
0753 if (err)
0754 goto err_destroy_comp_ctxt;
0755
0756 err = efa_com_admin_init_cq(edev);
0757 if (err)
0758 goto err_destroy_sq;
0759
0760 efa_com_set_admin_polling_mode(edev, false);
0761
0762 err = efa_com_admin_init_aenq(edev, aenq_handlers);
0763 if (err)
0764 goto err_destroy_cq;
0765
0766 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
0767 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
0768 if (timeout)
0769
0770 aq->completion_timeout = timeout * 100000;
0771 else
0772 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
0773
0774 aq->poll_interval = EFA_POLL_INTERVAL_MS;
0775
0776 set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
0777
0778 return 0;
0779
0780 err_destroy_cq:
0781 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
0782 aq->cq.entries, aq->cq.dma_addr);
0783 err_destroy_sq:
0784 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
0785 aq->sq.entries, aq->sq.dma_addr);
0786 err_destroy_comp_ctxt:
0787 devm_kfree(edev->dmadev, aq->comp_ctx);
0788
0789 return err;
0790 }
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801 void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
0802 {
0803 unsigned long flags;
0804
0805 spin_lock_irqsave(&edev->aq.cq.lock, flags);
0806 efa_com_handle_admin_completion(&edev->aq);
0807 spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
0808 }
0809
0810
0811
0812
0813
0814 static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
0815 u16 group)
0816 {
0817 struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
0818
0819 if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
0820 return aenq_handlers->handlers[group];
0821
0822 return aenq_handlers->unimplemented_handler;
0823 }
0824
0825
0826
0827
0828
0829
0830
0831
0832 void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
0833 {
0834 struct efa_admin_aenq_common_desc *aenq_common;
0835 struct efa_com_aenq *aenq = &edev->aenq;
0836 struct efa_admin_aenq_entry *aenq_e;
0837 efa_aenq_handler handler_cb;
0838 u32 processed = 0;
0839 u8 phase;
0840 u32 ci;
0841
0842 ci = aenq->cc & (aenq->depth - 1);
0843 phase = aenq->phase;
0844 aenq_e = &aenq->entries[ci];
0845 aenq_common = &aenq_e->aenq_common_desc;
0846
0847
0848 while ((READ_ONCE(aenq_common->flags) &
0849 EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
0850
0851
0852
0853
0854 dma_rmb();
0855
0856
0857 handler_cb = efa_com_get_specific_aenq_cb(edev,
0858 aenq_common->group);
0859 handler_cb(data, aenq_e);
0860
0861
0862 ci++;
0863 processed++;
0864
0865 if (ci == aenq->depth) {
0866 ci = 0;
0867 phase = !phase;
0868 }
0869 aenq_e = &aenq->entries[ci];
0870 aenq_common = &aenq_e->aenq_common_desc;
0871 }
0872
0873 aenq->cc += processed;
0874 aenq->phase = phase;
0875
0876
0877 if (!processed)
0878 return;
0879
0880
0881 writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
0882 }
0883
0884 static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
0885 {
0886 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
0887 u32 addr_high;
0888 u32 addr_low;
0889
0890
0891 addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
0892 addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
0893
0894 writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
0895 writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
0896 }
0897
0898 int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
0899 {
0900 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
0901
0902 spin_lock_init(&mmio_read->lock);
0903 mmio_read->read_resp =
0904 dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
0905 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
0906 if (!mmio_read->read_resp)
0907 return -ENOMEM;
0908
0909 efa_com_mmio_reg_read_resp_addr_init(edev);
0910
0911 mmio_read->read_resp->req_id = 0;
0912 mmio_read->seq_num = 0;
0913 mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
0914
0915 return 0;
0916 }
0917
0918 void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
0919 {
0920 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
0921
0922 dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
0923 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
0924 }
0925
0926 int efa_com_validate_version(struct efa_com_dev *edev)
0927 {
0928 u32 min_ctrl_ver = 0;
0929 u32 ctrl_ver_masked;
0930 u32 min_ver = 0;
0931 u32 ctrl_ver;
0932 u32 ver;
0933
0934
0935
0936
0937
0938 ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
0939 ctrl_ver = efa_com_reg_read32(edev,
0940 EFA_REGS_CONTROLLER_VERSION_OFF);
0941
0942 ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
0943 EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
0944 EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
0945
0946 EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
0947 EFA_ADMIN_API_VERSION_MAJOR);
0948 EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
0949 EFA_ADMIN_API_VERSION_MINOR);
0950 if (ver < min_ver) {
0951 ibdev_err(edev->efa_dev,
0952 "EFA version is lower than the minimal version the driver supports\n");
0953 return -EOPNOTSUPP;
0954 }
0955
0956 ibdev_dbg(
0957 edev->efa_dev,
0958 "efa controller version: %d.%d.%d implementation version %d\n",
0959 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
0960 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
0961 EFA_GET(&ctrl_ver,
0962 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
0963 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
0964
0965 ctrl_ver_masked =
0966 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
0967 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
0968 EFA_GET(&ctrl_ver,
0969 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
0970
0971 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
0972 EFA_CTRL_MAJOR);
0973 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
0974 EFA_CTRL_MINOR);
0975 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
0976 EFA_CTRL_SUB_MINOR);
0977
0978 if (ctrl_ver_masked < min_ctrl_ver) {
0979 ibdev_err(edev->efa_dev,
0980 "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
0981 return -EOPNOTSUPP;
0982 }
0983
0984 return 0;
0985 }
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996 int efa_com_get_dma_width(struct efa_com_dev *edev)
0997 {
0998 u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
0999 int width;
1000
1001 width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
1002
1003 ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
1004
1005 if (width < 32 || width > 64) {
1006 ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width);
1007 return -EINVAL;
1008 }
1009
1010 edev->dma_addr_bits = width;
1011
1012 return width;
1013 }
1014
1015 static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
1016 {
1017 u32 val, i;
1018
1019 for (i = 0; i < timeout; i++) {
1020 val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1021
1022 if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
1023 return 0;
1024
1025 ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
1026 msleep(EFA_POLL_INTERVAL_MS);
1027 }
1028
1029 return -ETIME;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039 int efa_com_dev_reset(struct efa_com_dev *edev,
1040 enum efa_regs_reset_reason_types reset_reason)
1041 {
1042 u32 stat, timeout, cap;
1043 u32 reset_val = 0;
1044 int err;
1045
1046 stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1047 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1048
1049 if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
1050 ibdev_err(edev->efa_dev,
1051 "Device isn't ready, can't reset device\n");
1052 return -EINVAL;
1053 }
1054
1055 timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
1056 if (!timeout) {
1057 ibdev_err(edev->efa_dev, "Invalid timeout value\n");
1058 return -EINVAL;
1059 }
1060
1061
1062 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
1063 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
1064 writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1065
1066
1067 efa_com_mmio_reg_read_resp_addr_init(edev);
1068
1069 err = wait_for_reset_state(edev, timeout, 1);
1070 if (err) {
1071 ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
1072 return err;
1073 }
1074
1075
1076 writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1077 err = wait_for_reset_state(edev, timeout, 0);
1078 if (err) {
1079 ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n");
1080 return err;
1081 }
1082
1083 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
1084 if (timeout)
1085
1086 edev->aq.completion_timeout = timeout * 100000;
1087 else
1088 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1089
1090 return 0;
1091 }
1092
1093 static int efa_com_create_eq(struct efa_com_dev *edev,
1094 struct efa_com_create_eq_params *params,
1095 struct efa_com_create_eq_result *result)
1096 {
1097 struct efa_com_admin_queue *aq = &edev->aq;
1098 struct efa_admin_create_eq_resp resp = {};
1099 struct efa_admin_create_eq_cmd cmd = {};
1100 int err;
1101
1102 cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
1103 EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
1104 params->entry_size_in_bytes / 4);
1105 cmd.depth = params->depth;
1106 cmd.event_bitmask = params->event_bitmask;
1107 cmd.msix_vec = params->msix_vec;
1108
1109 efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high,
1110 &cmd.ba.mem_addr_low);
1111
1112 err = efa_com_cmd_exec(aq,
1113 (struct efa_admin_aq_entry *)&cmd,
1114 sizeof(cmd),
1115 (struct efa_admin_acq_entry *)&resp,
1116 sizeof(resp));
1117 if (err) {
1118 ibdev_err_ratelimited(edev->efa_dev,
1119 "Failed to create eq[%d]\n", err);
1120 return err;
1121 }
1122
1123 result->eqn = resp.eqn;
1124
1125 return 0;
1126 }
1127
1128 static void efa_com_destroy_eq(struct efa_com_dev *edev,
1129 struct efa_com_destroy_eq_params *params)
1130 {
1131 struct efa_com_admin_queue *aq = &edev->aq;
1132 struct efa_admin_destroy_eq_resp resp = {};
1133 struct efa_admin_destroy_eq_cmd cmd = {};
1134 int err;
1135
1136 cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
1137 cmd.eqn = params->eqn;
1138
1139 err = efa_com_cmd_exec(aq,
1140 (struct efa_admin_aq_entry *)&cmd,
1141 sizeof(cmd),
1142 (struct efa_admin_acq_entry *)&resp,
1143 sizeof(resp));
1144 if (err)
1145 ibdev_err_ratelimited(edev->efa_dev,
1146 "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
1147 err);
1148 }
1149
1150 static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1151 {
1152 u32 val = 0;
1153
1154 EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
1155 EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
1156
1157 writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF);
1158 }
1159
1160 void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
1161 struct efa_com_eq *eeq)
1162 {
1163 struct efa_admin_eqe *eqe;
1164 u32 processed = 0;
1165 u8 phase;
1166 u32 ci;
1167
1168 ci = eeq->cc & (eeq->depth - 1);
1169 phase = eeq->phase;
1170 eqe = &eeq->eqes[ci];
1171
1172
1173 while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
1174
1175
1176
1177
1178 dma_rmb();
1179
1180 eeq->cb(eeq, eqe);
1181
1182
1183 ci++;
1184 processed++;
1185
1186 if (ci == eeq->depth) {
1187 ci = 0;
1188 phase = !phase;
1189 }
1190
1191 eqe = &eeq->eqes[ci];
1192 }
1193
1194 eeq->cc += processed;
1195 eeq->phase = phase;
1196 efa_com_arm_eq(eeq->edev, eeq);
1197 }
1198
1199 void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1200 {
1201 struct efa_com_destroy_eq_params params = {
1202 .eqn = eeq->eqn,
1203 };
1204
1205 efa_com_destroy_eq(edev, ¶ms);
1206 dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes),
1207 eeq->eqes, eeq->dma_addr);
1208 }
1209
1210 int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
1211 efa_eqe_handler cb, u16 depth, u8 msix_vec)
1212 {
1213 struct efa_com_create_eq_params params = {};
1214 struct efa_com_create_eq_result result = {};
1215 int err;
1216
1217 params.depth = depth;
1218 params.entry_size_in_bytes = sizeof(*eeq->eqes);
1219 EFA_SET(¶ms.event_bitmask,
1220 EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
1221 params.msix_vec = msix_vec;
1222
1223 eeq->eqes = dma_alloc_coherent(edev->dmadev,
1224 params.depth * sizeof(*eeq->eqes),
1225 ¶ms.dma_addr, GFP_KERNEL);
1226 if (!eeq->eqes)
1227 return -ENOMEM;
1228
1229 err = efa_com_create_eq(edev, ¶ms, &result);
1230 if (err)
1231 goto err_free_coherent;
1232
1233 eeq->eqn = result.eqn;
1234 eeq->edev = edev;
1235 eeq->dma_addr = params.dma_addr;
1236 eeq->phase = 1;
1237 eeq->depth = params.depth;
1238 eeq->cb = cb;
1239 efa_com_arm_eq(edev, eeq);
1240
1241 return 0;
1242
1243 err_free_coherent:
1244 dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes),
1245 eeq->eqes, params.dma_addr);
1246 return err;
1247 }