Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
0002 /* Copyright (c) 2021, Microsoft Corporation. */
0003 
0004 #include "gdma.h"
0005 #include "hw_channel.h"
0006 
0007 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
0008 {
0009     struct gdma_resource *r = &hwc->inflight_msg_res;
0010     unsigned long flags;
0011     u32 index;
0012 
0013     down(&hwc->sema);
0014 
0015     spin_lock_irqsave(&r->lock, flags);
0016 
0017     index = find_first_zero_bit(hwc->inflight_msg_res.map,
0018                     hwc->inflight_msg_res.size);
0019 
0020     bitmap_set(hwc->inflight_msg_res.map, index, 1);
0021 
0022     spin_unlock_irqrestore(&r->lock, flags);
0023 
0024     *msg_id = index;
0025 
0026     return 0;
0027 }
0028 
0029 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
0030 {
0031     struct gdma_resource *r = &hwc->inflight_msg_res;
0032     unsigned long flags;
0033 
0034     spin_lock_irqsave(&r->lock, flags);
0035     bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
0036     spin_unlock_irqrestore(&r->lock, flags);
0037 
0038     up(&hwc->sema);
0039 }
0040 
0041 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
0042                     const struct gdma_resp_hdr *resp_msg,
0043                     u32 resp_len)
0044 {
0045     if (resp_len < sizeof(*resp_msg))
0046         return -EPROTO;
0047 
0048     if (resp_len > caller_ctx->output_buflen)
0049         return -EPROTO;
0050 
0051     return 0;
0052 }
0053 
0054 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
0055                  const struct gdma_resp_hdr *resp_msg)
0056 {
0057     struct hwc_caller_ctx *ctx;
0058     int err;
0059 
0060     if (!test_bit(resp_msg->response.hwc_msg_id,
0061               hwc->inflight_msg_res.map)) {
0062         dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
0063             resp_msg->response.hwc_msg_id);
0064         return;
0065     }
0066 
0067     ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
0068     err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
0069     if (err)
0070         goto out;
0071 
0072     ctx->status_code = resp_msg->status;
0073 
0074     memcpy(ctx->output_buf, resp_msg, resp_len);
0075 out:
0076     ctx->error = err;
0077     complete(&ctx->comp_event);
0078 }
0079 
0080 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
0081                 struct hwc_work_request *req)
0082 {
0083     struct device *dev = hwc_rxq->hwc->dev;
0084     struct gdma_sge *sge;
0085     int err;
0086 
0087     sge = &req->sge;
0088     sge->address = (u64)req->buf_sge_addr;
0089     sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
0090     sge->size = req->buf_len;
0091 
0092     memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
0093     req->wqe_req.sgl = sge;
0094     req->wqe_req.num_sge = 1;
0095     req->wqe_req.client_data_unit = 0;
0096 
0097     err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
0098     if (err)
0099         dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
0100     return err;
0101 }
0102 
0103 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
0104                     struct gdma_event *event)
0105 {
0106     struct hw_channel_context *hwc = ctx;
0107     struct gdma_dev *gd = hwc->gdma_dev;
0108     union hwc_init_type_data type_data;
0109     union hwc_init_eq_id_db eq_db;
0110     u32 type, val;
0111 
0112     switch (event->type) {
0113     case GDMA_EQE_HWC_INIT_EQ_ID_DB:
0114         eq_db.as_uint32 = event->details[0];
0115         hwc->cq->gdma_eq->id = eq_db.eq_id;
0116         gd->doorbell = eq_db.doorbell;
0117         break;
0118 
0119     case GDMA_EQE_HWC_INIT_DATA:
0120         type_data.as_uint32 = event->details[0];
0121         type = type_data.type;
0122         val = type_data.value;
0123 
0124         switch (type) {
0125         case HWC_INIT_DATA_CQID:
0126             hwc->cq->gdma_cq->id = val;
0127             break;
0128 
0129         case HWC_INIT_DATA_RQID:
0130             hwc->rxq->gdma_wq->id = val;
0131             break;
0132 
0133         case HWC_INIT_DATA_SQID:
0134             hwc->txq->gdma_wq->id = val;
0135             break;
0136 
0137         case HWC_INIT_DATA_QUEUE_DEPTH:
0138             hwc->hwc_init_q_depth_max = (u16)val;
0139             break;
0140 
0141         case HWC_INIT_DATA_MAX_REQUEST:
0142             hwc->hwc_init_max_req_msg_size = val;
0143             break;
0144 
0145         case HWC_INIT_DATA_MAX_RESPONSE:
0146             hwc->hwc_init_max_resp_msg_size = val;
0147             break;
0148 
0149         case HWC_INIT_DATA_MAX_NUM_CQS:
0150             gd->gdma_context->max_num_cqs = val;
0151             break;
0152 
0153         case HWC_INIT_DATA_PDID:
0154             hwc->gdma_dev->pdid = val;
0155             break;
0156 
0157         case HWC_INIT_DATA_GPA_MKEY:
0158             hwc->rxq->msg_buf->gpa_mkey = val;
0159             hwc->txq->msg_buf->gpa_mkey = val;
0160             break;
0161 
0162         case HWC_INIT_DATA_PF_DEST_RQ_ID:
0163             hwc->pf_dest_vrq_id = val;
0164             break;
0165 
0166         case HWC_INIT_DATA_PF_DEST_CQ_ID:
0167             hwc->pf_dest_vrcq_id = val;
0168             break;
0169         }
0170 
0171         break;
0172 
0173     case GDMA_EQE_HWC_INIT_DONE:
0174         complete(&hwc->hwc_init_eqe_comp);
0175         break;
0176 
0177     default:
0178         /* Ignore unknown events, which should never happen. */
0179         break;
0180     }
0181 }
0182 
0183 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
0184                       const struct hwc_rx_oob *rx_oob)
0185 {
0186     struct hw_channel_context *hwc = ctx;
0187     struct hwc_wq *hwc_rxq = hwc->rxq;
0188     struct hwc_work_request *rx_req;
0189     struct gdma_resp_hdr *resp;
0190     struct gdma_wqe *dma_oob;
0191     struct gdma_queue *rq;
0192     struct gdma_sge *sge;
0193     u64 rq_base_addr;
0194     u64 rx_req_idx;
0195     u8 *wqe;
0196 
0197     if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
0198         return;
0199 
0200     rq = hwc_rxq->gdma_wq;
0201     wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
0202     dma_oob = (struct gdma_wqe *)wqe;
0203 
0204     sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
0205 
0206     /* Select the RX work request for virtual address and for reposting. */
0207     rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
0208     rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
0209 
0210     rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
0211     resp = (struct gdma_resp_hdr *)rx_req->buf_va;
0212 
0213     if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
0214         dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
0215             resp->response.hwc_msg_id);
0216         return;
0217     }
0218 
0219     mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
0220 
0221     /* Do no longer use 'resp', because the buffer is posted to the HW
0222      * in the below mana_hwc_post_rx_wqe().
0223      */
0224     resp = NULL;
0225 
0226     mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
0227 }
0228 
0229 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
0230                       const struct hwc_rx_oob *rx_oob)
0231 {
0232     struct hw_channel_context *hwc = ctx;
0233     struct hwc_wq *hwc_txq = hwc->txq;
0234 
0235     WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
0236 }
0237 
0238 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
0239                    enum gdma_queue_type type, u64 queue_size,
0240                    struct gdma_queue **queue)
0241 {
0242     struct gdma_queue_spec spec = {};
0243 
0244     if (type != GDMA_SQ && type != GDMA_RQ)
0245         return -EINVAL;
0246 
0247     spec.type = type;
0248     spec.monitor_avl_buf = false;
0249     spec.queue_size = queue_size;
0250 
0251     return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
0252 }
0253 
0254 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
0255                    u64 queue_size,
0256                    void *ctx, gdma_cq_callback *cb,
0257                    struct gdma_queue *parent_eq,
0258                    struct gdma_queue **queue)
0259 {
0260     struct gdma_queue_spec spec = {};
0261 
0262     spec.type = GDMA_CQ;
0263     spec.monitor_avl_buf = false;
0264     spec.queue_size = queue_size;
0265     spec.cq.context = ctx;
0266     spec.cq.callback = cb;
0267     spec.cq.parent_eq = parent_eq;
0268 
0269     return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
0270 }
0271 
0272 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
0273                    u64 queue_size,
0274                    void *ctx, gdma_eq_callback *cb,
0275                    struct gdma_queue **queue)
0276 {
0277     struct gdma_queue_spec spec = {};
0278 
0279     spec.type = GDMA_EQ;
0280     spec.monitor_avl_buf = false;
0281     spec.queue_size = queue_size;
0282     spec.eq.context = ctx;
0283     spec.eq.callback = cb;
0284     spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
0285 
0286     return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
0287 }
0288 
0289 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
0290 {
0291     struct hwc_rx_oob comp_data = {};
0292     struct gdma_comp *completions;
0293     struct hwc_cq *hwc_cq = ctx;
0294     int comp_read, i;
0295 
0296     WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
0297 
0298     completions = hwc_cq->comp_buf;
0299     comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
0300     WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
0301 
0302     for (i = 0; i < comp_read; ++i) {
0303         comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
0304 
0305         if (completions[i].is_sq)
0306             hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
0307                         completions[i].wq_num,
0308                         &comp_data);
0309         else
0310             hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
0311                         completions[i].wq_num,
0312                         &comp_data);
0313     }
0314 
0315     mana_gd_ring_cq(q_self, SET_ARM_BIT);
0316 }
0317 
0318 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
0319 {
0320     kfree(hwc_cq->comp_buf);
0321 
0322     if (hwc_cq->gdma_cq)
0323         mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
0324 
0325     if (hwc_cq->gdma_eq)
0326         mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
0327 
0328     kfree(hwc_cq);
0329 }
0330 
0331 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
0332                   gdma_eq_callback *callback, void *ctx,
0333                   hwc_rx_event_handler_t *rx_ev_hdlr,
0334                   void *rx_ev_ctx,
0335                   hwc_tx_event_handler_t *tx_ev_hdlr,
0336                   void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
0337 {
0338     struct gdma_queue *eq, *cq;
0339     struct gdma_comp *comp_buf;
0340     struct hwc_cq *hwc_cq;
0341     u32 eq_size, cq_size;
0342     int err;
0343 
0344     eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
0345     if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
0346         eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
0347 
0348     cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
0349     if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
0350         cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
0351 
0352     hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
0353     if (!hwc_cq)
0354         return -ENOMEM;
0355 
0356     err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
0357     if (err) {
0358         dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
0359         goto out;
0360     }
0361     hwc_cq->gdma_eq = eq;
0362 
0363     err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
0364                       eq, &cq);
0365     if (err) {
0366         dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
0367         goto out;
0368     }
0369     hwc_cq->gdma_cq = cq;
0370 
0371     comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
0372     if (!comp_buf) {
0373         err = -ENOMEM;
0374         goto out;
0375     }
0376 
0377     hwc_cq->hwc = hwc;
0378     hwc_cq->comp_buf = comp_buf;
0379     hwc_cq->queue_depth = q_depth;
0380     hwc_cq->rx_event_handler = rx_ev_hdlr;
0381     hwc_cq->rx_event_ctx = rx_ev_ctx;
0382     hwc_cq->tx_event_handler = tx_ev_hdlr;
0383     hwc_cq->tx_event_ctx = tx_ev_ctx;
0384 
0385     *hwc_cq_ptr = hwc_cq;
0386     return 0;
0387 out:
0388     mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
0389     return err;
0390 }
0391 
0392 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
0393                   u32 max_msg_size,
0394                   struct hwc_dma_buf **dma_buf_ptr)
0395 {
0396     struct gdma_context *gc = hwc->gdma_dev->gdma_context;
0397     struct hwc_work_request *hwc_wr;
0398     struct hwc_dma_buf *dma_buf;
0399     struct gdma_mem_info *gmi;
0400     void *virt_addr;
0401     u32 buf_size;
0402     u8 *base_pa;
0403     int err;
0404     u16 i;
0405 
0406     dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
0407     if (!dma_buf)
0408         return -ENOMEM;
0409 
0410     dma_buf->num_reqs = q_depth;
0411 
0412     buf_size = PAGE_ALIGN(q_depth * max_msg_size);
0413 
0414     gmi = &dma_buf->mem_info;
0415     err = mana_gd_alloc_memory(gc, buf_size, gmi);
0416     if (err) {
0417         dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
0418         goto out;
0419     }
0420 
0421     virt_addr = dma_buf->mem_info.virt_addr;
0422     base_pa = (u8 *)dma_buf->mem_info.dma_handle;
0423 
0424     for (i = 0; i < q_depth; i++) {
0425         hwc_wr = &dma_buf->reqs[i];
0426 
0427         hwc_wr->buf_va = virt_addr + i * max_msg_size;
0428         hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
0429 
0430         hwc_wr->buf_len = max_msg_size;
0431     }
0432 
0433     *dma_buf_ptr = dma_buf;
0434     return 0;
0435 out:
0436     kfree(dma_buf);
0437     return err;
0438 }
0439 
0440 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
0441                      struct hwc_dma_buf *dma_buf)
0442 {
0443     if (!dma_buf)
0444         return;
0445 
0446     mana_gd_free_memory(&dma_buf->mem_info);
0447 
0448     kfree(dma_buf);
0449 }
0450 
0451 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
0452                 struct hwc_wq *hwc_wq)
0453 {
0454     mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
0455 
0456     if (hwc_wq->gdma_wq)
0457         mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
0458                       hwc_wq->gdma_wq);
0459 
0460     kfree(hwc_wq);
0461 }
0462 
0463 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
0464                   enum gdma_queue_type q_type, u16 q_depth,
0465                   u32 max_msg_size, struct hwc_cq *hwc_cq,
0466                   struct hwc_wq **hwc_wq_ptr)
0467 {
0468     struct gdma_queue *queue;
0469     struct hwc_wq *hwc_wq;
0470     u32 queue_size;
0471     int err;
0472 
0473     WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
0474 
0475     if (q_type == GDMA_RQ)
0476         queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
0477     else
0478         queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
0479 
0480     if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
0481         queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
0482 
0483     hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
0484     if (!hwc_wq)
0485         return -ENOMEM;
0486 
0487     err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
0488     if (err)
0489         goto out;
0490 
0491     hwc_wq->hwc = hwc;
0492     hwc_wq->gdma_wq = queue;
0493     hwc_wq->queue_depth = q_depth;
0494     hwc_wq->hwc_cq = hwc_cq;
0495 
0496     err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
0497                      &hwc_wq->msg_buf);
0498     if (err)
0499         goto out;
0500 
0501     *hwc_wq_ptr = hwc_wq;
0502     return 0;
0503 out:
0504     if (err)
0505         mana_hwc_destroy_wq(hwc, hwc_wq);
0506     return err;
0507 }
0508 
0509 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
0510                 struct hwc_work_request *req,
0511                 u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
0512                 bool dest_pf)
0513 {
0514     struct device *dev = hwc_txq->hwc->dev;
0515     struct hwc_tx_oob *tx_oob;
0516     struct gdma_sge *sge;
0517     int err;
0518 
0519     if (req->msg_size == 0 || req->msg_size > req->buf_len) {
0520         dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
0521             req->msg_size, req->buf_len);
0522         return -EINVAL;
0523     }
0524 
0525     tx_oob = &req->tx_oob;
0526 
0527     tx_oob->vrq_id = dest_virt_rq_id;
0528     tx_oob->dest_vfid = 0;
0529     tx_oob->vrcq_id = dest_virt_rcq_id;
0530     tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
0531     tx_oob->loopback = false;
0532     tx_oob->lso_override = false;
0533     tx_oob->dest_pf = dest_pf;
0534     tx_oob->vsq_id = hwc_txq->gdma_wq->id;
0535 
0536     sge = &req->sge;
0537     sge->address = (u64)req->buf_sge_addr;
0538     sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
0539     sge->size = req->msg_size;
0540 
0541     memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
0542     req->wqe_req.sgl = sge;
0543     req->wqe_req.num_sge = 1;
0544     req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
0545     req->wqe_req.inline_oob_data = tx_oob;
0546     req->wqe_req.client_data_unit = 0;
0547 
0548     err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
0549     if (err)
0550         dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
0551     return err;
0552 }
0553 
0554 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
0555                       u16 num_msg)
0556 {
0557     int err;
0558 
0559     sema_init(&hwc->sema, num_msg);
0560 
0561     err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
0562     if (err)
0563         dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
0564     return err;
0565 }
0566 
0567 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
0568                  u32 max_req_msg_size, u32 max_resp_msg_size)
0569 {
0570     struct gdma_context *gc = hwc->gdma_dev->gdma_context;
0571     struct hwc_wq *hwc_rxq = hwc->rxq;
0572     struct hwc_work_request *req;
0573     struct hwc_caller_ctx *ctx;
0574     int err;
0575     int i;
0576 
0577     /* Post all WQEs on the RQ */
0578     for (i = 0; i < q_depth; i++) {
0579         req = &hwc_rxq->msg_buf->reqs[i];
0580         err = mana_hwc_post_rx_wqe(hwc_rxq, req);
0581         if (err)
0582             return err;
0583     }
0584 
0585     ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
0586     if (!ctx)
0587         return -ENOMEM;
0588 
0589     for (i = 0; i < q_depth; ++i)
0590         init_completion(&ctx[i].comp_event);
0591 
0592     hwc->caller_ctx = ctx;
0593 
0594     return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
0595 }
0596 
0597 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
0598                       u32 *max_req_msg_size,
0599                       u32 *max_resp_msg_size)
0600 {
0601     struct hw_channel_context *hwc = gc->hwc.driver_data;
0602     struct gdma_queue *rq = hwc->rxq->gdma_wq;
0603     struct gdma_queue *sq = hwc->txq->gdma_wq;
0604     struct gdma_queue *eq = hwc->cq->gdma_eq;
0605     struct gdma_queue *cq = hwc->cq->gdma_cq;
0606     int err;
0607 
0608     init_completion(&hwc->hwc_init_eqe_comp);
0609 
0610     err = mana_smc_setup_hwc(&gc->shm_channel, false,
0611                  eq->mem_info.dma_handle,
0612                  cq->mem_info.dma_handle,
0613                  rq->mem_info.dma_handle,
0614                  sq->mem_info.dma_handle,
0615                  eq->eq.msix_index);
0616     if (err)
0617         return err;
0618 
0619     if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
0620         return -ETIMEDOUT;
0621 
0622     *q_depth = hwc->hwc_init_q_depth_max;
0623     *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
0624     *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
0625 
0626     /* Both were set in mana_hwc_init_event_handler(). */
0627     if (WARN_ON(cq->id >= gc->max_num_cqs))
0628         return -EPROTO;
0629 
0630     gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
0631     if (!gc->cq_table)
0632         return -ENOMEM;
0633 
0634     gc->cq_table[cq->id] = cq;
0635 
0636     return 0;
0637 }
0638 
0639 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
0640                 u32 max_req_msg_size, u32 max_resp_msg_size)
0641 {
0642     int err;
0643 
0644     err = mana_hwc_init_inflight_msg(hwc, q_depth);
0645     if (err)
0646         return err;
0647 
0648     /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
0649      * queue depth and RQ queue depth.
0650      */
0651     err = mana_hwc_create_cq(hwc, q_depth * 2,
0652                  mana_hwc_init_event_handler, hwc,
0653                  mana_hwc_rx_event_handler, hwc,
0654                  mana_hwc_tx_event_handler, hwc, &hwc->cq);
0655     if (err) {
0656         dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
0657         goto out;
0658     }
0659 
0660     err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
0661                  hwc->cq, &hwc->rxq);
0662     if (err) {
0663         dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
0664         goto out;
0665     }
0666 
0667     err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
0668                  hwc->cq, &hwc->txq);
0669     if (err) {
0670         dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
0671         goto out;
0672     }
0673 
0674     hwc->num_inflight_msg = q_depth;
0675     hwc->max_req_msg_size = max_req_msg_size;
0676 
0677     return 0;
0678 out:
0679     /* mana_hwc_create_channel() will do the cleanup.*/
0680     return err;
0681 }
0682 
0683 int mana_hwc_create_channel(struct gdma_context *gc)
0684 {
0685     u32 max_req_msg_size, max_resp_msg_size;
0686     struct gdma_dev *gd = &gc->hwc;
0687     struct hw_channel_context *hwc;
0688     u16 q_depth_max;
0689     int err;
0690 
0691     hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
0692     if (!hwc)
0693         return -ENOMEM;
0694 
0695     gd->gdma_context = gc;
0696     gd->driver_data = hwc;
0697     hwc->gdma_dev = gd;
0698     hwc->dev = gc->dev;
0699 
0700     /* HWC's instance number is always 0. */
0701     gd->dev_id.as_uint32 = 0;
0702     gd->dev_id.type = GDMA_DEVICE_HWC;
0703 
0704     gd->pdid = INVALID_PDID;
0705     gd->doorbell = INVALID_DOORBELL;
0706 
0707     /* mana_hwc_init_queues() only creates the required data structures,
0708      * and doesn't touch the HWC device.
0709      */
0710     err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
0711                    HW_CHANNEL_MAX_REQUEST_SIZE,
0712                    HW_CHANNEL_MAX_RESPONSE_SIZE);
0713     if (err) {
0714         dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
0715         goto out;
0716     }
0717 
0718     err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
0719                      &max_resp_msg_size);
0720     if (err) {
0721         dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
0722         goto out;
0723     }
0724 
0725     err = mana_hwc_test_channel(gc->hwc.driver_data,
0726                     HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
0727                     max_req_msg_size, max_resp_msg_size);
0728     if (err) {
0729         dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
0730         goto out;
0731     }
0732 
0733     return 0;
0734 out:
0735     mana_hwc_destroy_channel(gc);
0736     return err;
0737 }
0738 
0739 void mana_hwc_destroy_channel(struct gdma_context *gc)
0740 {
0741     struct hw_channel_context *hwc = gc->hwc.driver_data;
0742 
0743     if (!hwc)
0744         return;
0745 
0746     /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
0747      * non-zero, the HWC worked and we should tear down the HWC here.
0748      */
0749     if (gc->max_num_cqs > 0) {
0750         mana_smc_teardown_hwc(&gc->shm_channel, false);
0751         gc->max_num_cqs = 0;
0752     }
0753 
0754     kfree(hwc->caller_ctx);
0755     hwc->caller_ctx = NULL;
0756 
0757     if (hwc->txq)
0758         mana_hwc_destroy_wq(hwc, hwc->txq);
0759 
0760     if (hwc->rxq)
0761         mana_hwc_destroy_wq(hwc, hwc->rxq);
0762 
0763     if (hwc->cq)
0764         mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
0765 
0766     mana_gd_free_res_map(&hwc->inflight_msg_res);
0767 
0768     hwc->num_inflight_msg = 0;
0769 
0770     hwc->gdma_dev->doorbell = INVALID_DOORBELL;
0771     hwc->gdma_dev->pdid = INVALID_PDID;
0772 
0773     kfree(hwc);
0774     gc->hwc.driver_data = NULL;
0775     gc->hwc.gdma_context = NULL;
0776 
0777     vfree(gc->cq_table);
0778     gc->cq_table = NULL;
0779 }
0780 
0781 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
0782               const void *req, u32 resp_len, void *resp)
0783 {
0784     struct gdma_context *gc = hwc->gdma_dev->gdma_context;
0785     struct hwc_work_request *tx_wr;
0786     struct hwc_wq *txq = hwc->txq;
0787     struct gdma_req_hdr *req_msg;
0788     struct hwc_caller_ctx *ctx;
0789     u32 dest_vrcq = 0;
0790     u32 dest_vrq = 0;
0791     u16 msg_id;
0792     int err;
0793 
0794     mana_hwc_get_msg_index(hwc, &msg_id);
0795 
0796     tx_wr = &txq->msg_buf->reqs[msg_id];
0797 
0798     if (req_len > tx_wr->buf_len) {
0799         dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
0800             tx_wr->buf_len);
0801         err = -EINVAL;
0802         goto out;
0803     }
0804 
0805     ctx = hwc->caller_ctx + msg_id;
0806     ctx->output_buf = resp;
0807     ctx->output_buflen = resp_len;
0808 
0809     req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
0810     if (req)
0811         memcpy(req_msg, req, req_len);
0812 
0813     req_msg->req.hwc_msg_id = msg_id;
0814 
0815     tx_wr->msg_size = req_len;
0816 
0817     if (gc->is_pf) {
0818         dest_vrq = hwc->pf_dest_vrq_id;
0819         dest_vrcq = hwc->pf_dest_vrcq_id;
0820     }
0821 
0822     err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
0823     if (err) {
0824         dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
0825         goto out;
0826     }
0827 
0828     if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
0829         dev_err(hwc->dev, "HWC: Request timed out!\n");
0830         err = -ETIMEDOUT;
0831         goto out;
0832     }
0833 
0834     if (ctx->error) {
0835         err = ctx->error;
0836         goto out;
0837     }
0838 
0839     if (ctx->status_code) {
0840         dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
0841             ctx->status_code);
0842         err = -EPROTO;
0843         goto out;
0844     }
0845 out:
0846     mana_hwc_put_msg_index(hwc, msg_id);
0847     return err;
0848 }