Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
0004  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
0005  */
0006 
0007 #include "efct_driver.h"
0008 #include "efct_hw.h"
0009 #include "efct_unsol.h"
0010 
0011 int
0012 efct_hw_init_queues(struct efct_hw *hw)
0013 {
0014     struct hw_eq *eq = NULL;
0015     struct hw_cq *cq = NULL;
0016     struct hw_wq *wq = NULL;
0017     struct hw_mq *mq = NULL;
0018 
0019     struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
0020     struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
0021     struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
0022     u32 i = 0, j;
0023 
0024     hw->eq_count = 0;
0025     hw->cq_count = 0;
0026     hw->mq_count = 0;
0027     hw->wq_count = 0;
0028     hw->rq_count = 0;
0029     hw->hw_rq_count = 0;
0030     INIT_LIST_HEAD(&hw->eq_list);
0031 
0032     for (i = 0; i < hw->config.n_eq; i++) {
0033         /* Create EQ */
0034         eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
0035         if (!eq) {
0036             efct_hw_queue_teardown(hw);
0037             return -ENOMEM;
0038         }
0039 
0040         eqs[i] = eq;
0041 
0042         /* Create one MQ */
0043         if (!i) {
0044             cq = efct_hw_new_cq(eq,
0045                         hw->num_qentries[SLI4_QTYPE_CQ]);
0046             if (!cq) {
0047                 efct_hw_queue_teardown(hw);
0048                 return -ENOMEM;
0049             }
0050 
0051             mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
0052             if (!mq) {
0053                 efct_hw_queue_teardown(hw);
0054                 return -ENOMEM;
0055             }
0056         }
0057 
0058         /* Create WQ */
0059         cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
0060         if (!cq) {
0061             efct_hw_queue_teardown(hw);
0062             return -ENOMEM;
0063         }
0064 
0065         wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
0066         if (!wq) {
0067             efct_hw_queue_teardown(hw);
0068             return -ENOMEM;
0069         }
0070     }
0071 
0072     /* Create CQ set */
0073     if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
0074         efct_hw_queue_teardown(hw);
0075         return -EIO;
0076     }
0077 
0078     /* Create RQ set */
0079     if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
0080         efct_hw_queue_teardown(hw);
0081         return -EIO;
0082     }
0083 
0084     for (j = 0; j < i ; j++) {
0085         rqs[j]->filter_mask = 0;
0086         rqs[j]->is_mrq = true;
0087         rqs[j]->base_mrq_id = rqs[0]->hdr->id;
0088     }
0089 
0090     hw->hw_mrq_count = i;
0091 
0092     return 0;
0093 }
0094 
0095 int
0096 efct_hw_map_wq_cpu(struct efct_hw *hw)
0097 {
0098     struct efct *efct = hw->os;
0099     u32 cpu = 0, i;
0100 
0101     /* Init cpu_map array */
0102     hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
0103                    GFP_KERNEL);
0104     if (!hw->wq_cpu_array)
0105         return -ENOMEM;
0106 
0107     for (i = 0; i < hw->config.n_eq; i++) {
0108         const struct cpumask *maskp;
0109 
0110         /* Get a CPU mask for all CPUs affinitized to this vector */
0111         maskp = pci_irq_get_affinity(efct->pci, i);
0112         if (!maskp) {
0113             efc_log_debug(efct, "maskp null for vector:%d\n", i);
0114             continue;
0115         }
0116 
0117         /* Loop through all CPUs associated with vector idx */
0118         for_each_cpu_and(cpu, maskp, cpu_present_mask) {
0119             efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
0120             hw->wq_cpu_array[cpu] = hw->hw_wq[i];
0121         }
0122     }
0123 
0124     return 0;
0125 }
0126 
0127 struct hw_eq *
0128 efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
0129 {
0130     struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
0131 
0132     if (!eq)
0133         return NULL;
0134 
0135     eq->type = SLI4_QTYPE_EQ;
0136     eq->hw = hw;
0137     eq->entry_count = entry_count;
0138     eq->instance = hw->eq_count++;
0139     eq->queue = &hw->eq[eq->instance];
0140     INIT_LIST_HEAD(&eq->cq_list);
0141 
0142     if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
0143                 NULL)) {
0144         efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
0145         kfree(eq);
0146         return NULL;
0147     }
0148 
0149     sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
0150     hw->hw_eq[eq->instance] = eq;
0151     INIT_LIST_HEAD(&eq->list_entry);
0152     list_add_tail(&eq->list_entry, &hw->eq_list);
0153     efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
0154               eq->queue->id, eq->entry_count);
0155     return eq;
0156 }
0157 
0158 struct hw_cq *
0159 efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
0160 {
0161     struct efct_hw *hw = eq->hw;
0162     struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
0163 
0164     if (!cq)
0165         return NULL;
0166 
0167     cq->eq = eq;
0168     cq->type = SLI4_QTYPE_CQ;
0169     cq->instance = eq->hw->cq_count++;
0170     cq->entry_count = entry_count;
0171     cq->queue = &hw->cq[cq->instance];
0172 
0173     INIT_LIST_HEAD(&cq->q_list);
0174 
0175     if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
0176                 cq->entry_count, eq->queue)) {
0177         efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
0178                 eq->instance, eq->entry_count);
0179         kfree(cq);
0180         return NULL;
0181     }
0182 
0183     hw->hw_cq[cq->instance] = cq;
0184     INIT_LIST_HEAD(&cq->list_entry);
0185     list_add_tail(&cq->list_entry, &eq->cq_list);
0186     efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
0187               cq->queue->id, cq->entry_count);
0188     return cq;
0189 }
0190 
0191 u32
0192 efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
0193            u32 num_cqs, u32 entry_count)
0194 {
0195     u32 i;
0196     struct efct_hw *hw = eqs[0]->hw;
0197     struct sli4 *sli4 = &hw->sli;
0198     struct hw_cq *cq = NULL;
0199     struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
0200     struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
0201 
0202     /* Initialise CQS pointers to NULL */
0203     for (i = 0; i < num_cqs; i++)
0204         cqs[i] = NULL;
0205 
0206     for (i = 0; i < num_cqs; i++) {
0207         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
0208         if (!cq)
0209             goto error;
0210 
0211         cqs[i]          = cq;
0212         cq->eq          = eqs[i];
0213         cq->type        = SLI4_QTYPE_CQ;
0214         cq->instance    = hw->cq_count++;
0215         cq->entry_count = entry_count;
0216         cq->queue       = &hw->cq[cq->instance];
0217         qs[i]           = cq->queue;
0218         assefct[i]       = eqs[i]->queue;
0219         INIT_LIST_HEAD(&cq->q_list);
0220     }
0221 
0222     if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
0223         efc_log_err(hw->os, "Failed to create CQ Set.\n");
0224         goto error;
0225     }
0226 
0227     for (i = 0; i < num_cqs; i++) {
0228         hw->hw_cq[cqs[i]->instance] = cqs[i];
0229         INIT_LIST_HEAD(&cqs[i]->list_entry);
0230         list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
0231     }
0232 
0233     return 0;
0234 
0235 error:
0236     for (i = 0; i < num_cqs; i++) {
0237         kfree(cqs[i]);
0238         cqs[i] = NULL;
0239     }
0240     return -EIO;
0241 }
0242 
0243 struct hw_mq *
0244 efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
0245 {
0246     struct efct_hw *hw = cq->eq->hw;
0247     struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
0248 
0249     if (!mq)
0250         return NULL;
0251 
0252     mq->cq = cq;
0253     mq->type = SLI4_QTYPE_MQ;
0254     mq->instance = cq->eq->hw->mq_count++;
0255     mq->entry_count = entry_count;
0256     mq->entry_size = EFCT_HW_MQ_DEPTH;
0257     mq->queue = &hw->mq[mq->instance];
0258 
0259     if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
0260                 cq->queue)) {
0261         efc_log_err(hw->os, "MQ allocation failure\n");
0262         kfree(mq);
0263         return NULL;
0264     }
0265 
0266     hw->hw_mq[mq->instance] = mq;
0267     INIT_LIST_HEAD(&mq->list_entry);
0268     list_add_tail(&mq->list_entry, &cq->q_list);
0269     efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
0270               mq->queue->id, mq->entry_count);
0271     return mq;
0272 }
0273 
0274 struct hw_wq *
0275 efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
0276 {
0277     struct efct_hw *hw = cq->eq->hw;
0278     struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
0279 
0280     if (!wq)
0281         return NULL;
0282 
0283     wq->hw = cq->eq->hw;
0284     wq->cq = cq;
0285     wq->type = SLI4_QTYPE_WQ;
0286     wq->instance = cq->eq->hw->wq_count++;
0287     wq->entry_count = entry_count;
0288     wq->queue = &hw->wq[wq->instance];
0289     wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
0290     wq->wqec_count = wq->wqec_set_count;
0291     wq->free_count = wq->entry_count - 1;
0292     INIT_LIST_HEAD(&wq->pending_list);
0293 
0294     if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
0295                 wq->entry_count, cq->queue)) {
0296         efc_log_err(hw->os, "WQ allocation failure\n");
0297         kfree(wq);
0298         return NULL;
0299     }
0300 
0301     hw->hw_wq[wq->instance] = wq;
0302     INIT_LIST_HEAD(&wq->list_entry);
0303     list_add_tail(&wq->list_entry, &cq->q_list);
0304     efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
0305               wq->instance, wq->queue->id, wq->entry_count, wq->class);
0306     return wq;
0307 }
0308 
0309 u32
0310 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
0311            u32 num_rq_pairs, u32 entry_count)
0312 {
0313     struct efct_hw *hw = cqs[0]->eq->hw;
0314     struct hw_rq *rq = NULL;
0315     struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
0316     u32 i, q_count, size;
0317 
0318     /* Initialise RQS pointers */
0319     for (i = 0; i < num_rq_pairs; i++)
0320         rqs[i] = NULL;
0321 
0322     /*
0323      * Allocate an RQ object SET, where each element in set
0324      * encapsulates 2 SLI queues (for rq pair)
0325      */
0326     for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
0327         rq = kzalloc(sizeof(*rq), GFP_KERNEL);
0328         if (!rq)
0329             goto error;
0330 
0331         rqs[i] = rq;
0332         rq->instance = hw->hw_rq_count++;
0333         rq->cq = cqs[i];
0334         rq->type = SLI4_QTYPE_RQ;
0335         rq->entry_count = entry_count;
0336 
0337         /* Header RQ */
0338         rq->hdr = &hw->rq[hw->rq_count];
0339         rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
0340         hw->hw_rq_lookup[hw->rq_count] = rq->instance;
0341         hw->rq_count++;
0342         qs[q_count] = rq->hdr;
0343 
0344         /* Data RQ */
0345         rq->data = &hw->rq[hw->rq_count];
0346         rq->data_entry_size = hw->config.rq_default_buffer_size;
0347         hw->hw_rq_lookup[hw->rq_count] = rq->instance;
0348         hw->rq_count++;
0349         qs[q_count + 1] = rq->data;
0350 
0351         rq->rq_tracker = NULL;
0352     }
0353 
0354     if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
0355                 cqs[0]->queue->id,
0356                 rqs[0]->entry_count,
0357                 rqs[0]->hdr_entry_size,
0358                 rqs[0]->data_entry_size)) {
0359         efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
0360                 cqs[0]->queue->id);
0361         goto error;
0362     }
0363 
0364     for (i = 0; i < num_rq_pairs; i++) {
0365         hw->hw_rq[rqs[i]->instance] = rqs[i];
0366         INIT_LIST_HEAD(&rqs[i]->list_entry);
0367         list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
0368         size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
0369         rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
0370         if (!rqs[i]->rq_tracker)
0371             goto error;
0372     }
0373 
0374     return 0;
0375 
0376 error:
0377     for (i = 0; i < num_rq_pairs; i++) {
0378         if (rqs[i]) {
0379             kfree(rqs[i]->rq_tracker);
0380             kfree(rqs[i]);
0381         }
0382     }
0383 
0384     return -EIO;
0385 }
0386 
0387 void
0388 efct_hw_del_eq(struct hw_eq *eq)
0389 {
0390     struct hw_cq *cq;
0391     struct hw_cq *cq_next;
0392 
0393     if (!eq)
0394         return;
0395 
0396     list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
0397         efct_hw_del_cq(cq);
0398     list_del(&eq->list_entry);
0399     eq->hw->hw_eq[eq->instance] = NULL;
0400     kfree(eq);
0401 }
0402 
0403 void
0404 efct_hw_del_cq(struct hw_cq *cq)
0405 {
0406     struct hw_q *q;
0407     struct hw_q *q_next;
0408 
0409     if (!cq)
0410         return;
0411 
0412     list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
0413         switch (q->type) {
0414         case SLI4_QTYPE_MQ:
0415             efct_hw_del_mq((struct hw_mq *)q);
0416             break;
0417         case SLI4_QTYPE_WQ:
0418             efct_hw_del_wq((struct hw_wq *)q);
0419             break;
0420         case SLI4_QTYPE_RQ:
0421             efct_hw_del_rq((struct hw_rq *)q);
0422             break;
0423         default:
0424             break;
0425         }
0426     }
0427     list_del(&cq->list_entry);
0428     cq->eq->hw->hw_cq[cq->instance] = NULL;
0429     kfree(cq);
0430 }
0431 
0432 void
0433 efct_hw_del_mq(struct hw_mq *mq)
0434 {
0435     if (!mq)
0436         return;
0437 
0438     list_del(&mq->list_entry);
0439     mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
0440     kfree(mq);
0441 }
0442 
0443 void
0444 efct_hw_del_wq(struct hw_wq *wq)
0445 {
0446     if (!wq)
0447         return;
0448 
0449     list_del(&wq->list_entry);
0450     wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
0451     kfree(wq);
0452 }
0453 
0454 void
0455 efct_hw_del_rq(struct hw_rq *rq)
0456 {
0457     struct efct_hw *hw = NULL;
0458 
0459     if (!rq)
0460         return;
0461     /* Free RQ tracker */
0462     kfree(rq->rq_tracker);
0463     rq->rq_tracker = NULL;
0464     list_del(&rq->list_entry);
0465     hw = rq->cq->eq->hw;
0466     hw->hw_rq[rq->instance] = NULL;
0467     kfree(rq);
0468 }
0469 
0470 void
0471 efct_hw_queue_teardown(struct efct_hw *hw)
0472 {
0473     struct hw_eq *eq;
0474     struct hw_eq *eq_next;
0475 
0476     if (!hw->eq_list.next)
0477         return;
0478 
0479     list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
0480         efct_hw_del_eq(eq);
0481 }
0482 
0483 static inline int
0484 efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
0485 {
0486     return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
0487 }
0488 
0489 static struct efc_hw_sequence *
0490 efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
0491 {
0492     struct sli4_queue *rq_hdr = &hw->rq[rqindex];
0493     struct efc_hw_sequence *seq = NULL;
0494     struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
0495     unsigned long flags = 0;
0496 
0497     if (bufindex >= rq_hdr->length) {
0498         efc_log_err(hw->os,
0499                 "RQidx %d bufidx %d exceed ring len %d for id %d\n",
0500                 rqindex, bufindex, rq_hdr->length, rq_hdr->id);
0501         return NULL;
0502     }
0503 
0504     /* rq_hdr lock also covers rqindex+1 queue */
0505     spin_lock_irqsave(&rq_hdr->lock, flags);
0506 
0507     seq = rq->rq_tracker[bufindex];
0508     rq->rq_tracker[bufindex] = NULL;
0509 
0510     if (!seq) {
0511         efc_log_err(hw->os,
0512                 "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
0513                 rqindex, bufindex, rq_hdr->index);
0514     }
0515 
0516     spin_unlock_irqrestore(&rq_hdr->lock, flags);
0517     return seq;
0518 }
0519 
0520 int
0521 efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
0522               u8 *cqe)
0523 {
0524     u16 rq_id;
0525     u32 index;
0526     int rqindex;
0527     int rq_status;
0528     u32 h_len;
0529     u32 p_len;
0530     struct efc_hw_sequence *seq;
0531     struct hw_rq *rq;
0532 
0533     rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
0534                           &rq_id, &index);
0535     if (rq_status != 0) {
0536         switch (rq_status) {
0537         case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
0538         case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
0539             /* just get RQ buffer then return to chip */
0540             rqindex = efct_hw_rqpair_find(hw, rq_id);
0541             if (rqindex < 0) {
0542                 efc_log_debug(hw->os,
0543                           "status=%#x: lookup fail id=%#x\n",
0544                          rq_status, rq_id);
0545                 break;
0546             }
0547 
0548             /* get RQ buffer */
0549             seq = efct_hw_rqpair_get(hw, rqindex, index);
0550 
0551             /* return to chip */
0552             if (efct_hw_rqpair_sequence_free(hw, seq)) {
0553                 efc_log_debug(hw->os,
0554                           "status=%#x,fail rtrn buf to RQ\n",
0555                          rq_status);
0556                 break;
0557             }
0558             break;
0559         case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
0560         case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
0561             /*
0562              * since RQ buffers were not consumed, cannot return
0563              * them to chip
0564              */
0565             efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
0566                       rq_status);
0567             fallthrough;
0568         default:
0569             break;
0570         }
0571         return -EIO;
0572     }
0573 
0574     rqindex = efct_hw_rqpair_find(hw, rq_id);
0575     if (rqindex < 0) {
0576         efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
0577                   rq_id);
0578         return -EIO;
0579     }
0580 
0581     rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
0582     rq->use_count++;
0583 
0584     seq = efct_hw_rqpair_get(hw, rqindex, index);
0585     if (WARN_ON(!seq))
0586         return -EIO;
0587 
0588     seq->hw = hw;
0589 
0590     sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
0591     seq->header->dma.len = h_len;
0592     seq->payload->dma.len = p_len;
0593     seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
0594     seq->hw_priv = cq->eq;
0595 
0596     efct_unsolicited_cb(hw->os, seq);
0597 
0598     return 0;
0599 }
0600 
0601 static int
0602 efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
0603 {
0604     struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
0605     struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
0606     u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
0607     struct hw_rq *rq = hw->hw_rq[hw_rq_index];
0608     u32 phys_hdr[2];
0609     u32 phys_payload[2];
0610     int qindex_hdr;
0611     int qindex_payload;
0612     unsigned long flags = 0;
0613 
0614     /* Update the RQ verification lookup tables */
0615     phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
0616     phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
0617     phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
0618     phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
0619 
0620     /* rq_hdr lock also covers payload / header->rqindex+1 queue */
0621     spin_lock_irqsave(&rq_hdr->lock, flags);
0622 
0623     /*
0624      * Note: The header must be posted last for buffer pair mode because
0625      *       posting on the header queue posts the payload queue as well.
0626      *       We do not ring the payload queue independently in RQ pair mode.
0627      */
0628     qindex_payload = sli_rq_write(&hw->sli, rq_payload,
0629                       (void *)phys_payload);
0630     qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
0631     if (qindex_hdr < 0 ||
0632         qindex_payload < 0) {
0633         efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
0634         spin_unlock_irqrestore(&rq_hdr->lock, flags);
0635         return -EIO;
0636     }
0637 
0638     /* ensure the indexes are the same */
0639     WARN_ON(qindex_hdr != qindex_payload);
0640 
0641     /* Update the lookup table */
0642     if (!rq->rq_tracker[qindex_hdr]) {
0643         rq->rq_tracker[qindex_hdr] = seq;
0644     } else {
0645         efc_log_debug(hw->os,
0646                   "expected rq_tracker[%d][%d] buffer to be NULL\n",
0647                   hw_rq_index, qindex_hdr);
0648     }
0649 
0650     spin_unlock_irqrestore(&rq_hdr->lock, flags);
0651     return 0;
0652 }
0653 
0654 int
0655 efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
0656 {
0657     int rc = 0;
0658 
0659     /*
0660      * Post the data buffer first. Because in RQ pair mode, ringing the
0661      * doorbell of the header ring will post the data buffer as well.
0662      */
0663     if (efct_hw_rqpair_put(hw, seq)) {
0664         efc_log_err(hw->os, "error writing buffers\n");
0665         return -EIO;
0666     }
0667 
0668     return rc;
0669 }
0670 
0671 int
0672 efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
0673 {
0674     struct efct *efct = efc->base;
0675 
0676     return efct_hw_rqpair_sequence_free(&efct->hw, seq);
0677 }