Back to home page

OSCL-LXR

 
 

    


0001 /**********************************************************************
0002  * Author: Cavium, Inc.
0003  *
0004  * Contact: support@cavium.com
0005  *          Please include "LiquidIO" in the subject.
0006  *
0007  * Copyright (c) 2003-2016 Cavium, Inc.
0008  *
0009  * This file is free software; you can redistribute it and/or modify
0010  * it under the terms of the GNU General Public License, Version 2, as
0011  * published by the Free Software Foundation.
0012  *
0013  * This file is distributed in the hope that it will be useful, but
0014  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
0015  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
0016  * NONINFRINGEMENT.  See the GNU General Public License for more
0017  * details.
0018  **********************************************************************/
0019 #include <linux/pci.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/vmalloc.h>
0022 #include "liquidio_common.h"
0023 #include "octeon_droq.h"
0024 #include "octeon_iq.h"
0025 #include "response_manager.h"
0026 #include "octeon_device.h"
0027 #include "octeon_main.h"
0028 #include "octeon_network.h"
0029 #include "cn66xx_device.h"
0030 #include "cn23xx_pf_device.h"
0031 #include "cn23xx_vf_device.h"
0032 
0033 struct iq_post_status {
0034     int status;
0035     int index;
0036 };
0037 
0038 static void check_db_timeout(struct work_struct *work);
0039 static void  __check_db_timeout(struct octeon_device *oct, u64 iq_no);
0040 
0041 static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
0042 
0043 static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
0044 {
0045     struct octeon_instr_queue *iq =
0046         (struct octeon_instr_queue *)oct->instr_queue[iq_no];
0047     return iq->iqcmd_64B;
0048 }
0049 
0050 #define IQ_INSTR_MODE_32B(oct, iq_no)  (!IQ_INSTR_MODE_64B(oct, iq_no))
0051 
0052 /* Define this to return the request status comaptible to old code */
0053 /*#define OCTEON_USE_OLD_REQ_STATUS*/
0054 
0055 /* Return 0 on success, 1 on failure */
0056 int octeon_init_instr_queue(struct octeon_device *oct,
0057                 union oct_txpciq txpciq,
0058                 u32 num_descs)
0059 {
0060     struct octeon_instr_queue *iq;
0061     struct octeon_iq_config *conf = NULL;
0062     u32 iq_no = (u32)txpciq.s.q_no;
0063     u32 q_size;
0064     struct cavium_wq *db_wq;
0065     int numa_node = dev_to_node(&oct->pci_dev->dev);
0066 
0067     if (OCTEON_CN6XXX(oct))
0068         conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
0069     else if (OCTEON_CN23XX_PF(oct))
0070         conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
0071     else if (OCTEON_CN23XX_VF(oct))
0072         conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
0073 
0074     if (!conf) {
0075         dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
0076             oct->chip_id);
0077         return 1;
0078     }
0079 
0080     q_size = (u32)conf->instr_type * num_descs;
0081 
0082     iq = oct->instr_queue[iq_no];
0083 
0084     iq->oct_dev = oct;
0085 
0086     iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
0087     if (!iq->base_addr) {
0088         dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
0089             iq_no);
0090         return 1;
0091     }
0092 
0093     iq->max_count = num_descs;
0094 
0095     /* Initialize a list to holds requests that have been posted to Octeon
0096      * but has yet to be fetched by octeon
0097      */
0098     iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)),
0099                     numa_node);
0100     if (!iq->request_list)
0101         iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list)));
0102     if (!iq->request_list) {
0103         lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
0104         dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
0105             iq_no);
0106         return 1;
0107     }
0108 
0109     dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
0110         iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
0111 
0112     iq->txpciq.u64 = txpciq.u64;
0113     iq->fill_threshold = (u32)conf->db_min;
0114     iq->fill_cnt = 0;
0115     iq->host_write_index = 0;
0116     iq->octeon_read_index = 0;
0117     iq->flush_index = 0;
0118     iq->last_db_time = 0;
0119     iq->do_auto_flush = 1;
0120     iq->db_timeout = (u32)conf->db_timeout;
0121     atomic_set(&iq->instr_pending, 0);
0122     iq->pkts_processed = 0;
0123 
0124     /* Initialize the spinlock for this instruction queue */
0125     spin_lock_init(&iq->lock);
0126     if (iq_no == 0) {
0127         iq->allow_soft_cmds = true;
0128         spin_lock_init(&iq->post_lock);
0129     } else {
0130         iq->allow_soft_cmds = false;
0131     }
0132 
0133     spin_lock_init(&iq->iq_flush_running_lock);
0134 
0135     oct->io_qmask.iq |= BIT_ULL(iq_no);
0136 
0137     /* Set the 32B/64B mode for each input queue */
0138     oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
0139     iq->iqcmd_64B = (conf->instr_type == 64);
0140 
0141     oct->fn_list.setup_iq_regs(oct, iq_no);
0142 
0143     oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
0144                              WQ_MEM_RECLAIM,
0145                              0);
0146     if (!oct->check_db_wq[iq_no].wq) {
0147         vfree(iq->request_list);
0148         iq->request_list = NULL;
0149         lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
0150         dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
0151             iq_no);
0152         return 1;
0153     }
0154 
0155     db_wq = &oct->check_db_wq[iq_no];
0156 
0157     INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
0158     db_wq->wk.ctxptr = oct;
0159     db_wq->wk.ctxul = iq_no;
0160     queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
0161 
0162     return 0;
0163 }
0164 
0165 int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
0166 {
0167     u64 desc_size = 0, q_size;
0168     struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
0169 
0170     cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
0171     destroy_workqueue(oct->check_db_wq[iq_no].wq);
0172 
0173     if (OCTEON_CN6XXX(oct))
0174         desc_size =
0175             CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
0176     else if (OCTEON_CN23XX_PF(oct))
0177         desc_size =
0178             CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
0179     else if (OCTEON_CN23XX_VF(oct))
0180         desc_size =
0181             CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
0182 
0183     vfree(iq->request_list);
0184 
0185     if (iq->base_addr) {
0186         q_size = iq->max_count * desc_size;
0187         lio_dma_free(oct, (u32)q_size, iq->base_addr,
0188                  iq->base_addr_dma);
0189         oct->io_qmask.iq &= ~(1ULL << iq_no);
0190         vfree(oct->instr_queue[iq_no]);
0191         oct->instr_queue[iq_no] = NULL;
0192         oct->num_iqs--;
0193         return 0;
0194     }
0195     return 1;
0196 }
0197 
0198 /* Return 0 on success, 1 on failure */
0199 int octeon_setup_iq(struct octeon_device *oct,
0200             int ifidx,
0201             int q_index,
0202             union oct_txpciq txpciq,
0203             u32 num_descs,
0204             void *app_ctx)
0205 {
0206     u32 iq_no = (u32)txpciq.s.q_no;
0207     int numa_node = dev_to_node(&oct->pci_dev->dev);
0208 
0209     if (oct->instr_queue[iq_no]) {
0210         dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
0211             iq_no);
0212         oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
0213         oct->instr_queue[iq_no]->app_ctx = app_ctx;
0214         return 0;
0215     }
0216     oct->instr_queue[iq_no] =
0217         vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
0218     if (!oct->instr_queue[iq_no])
0219         oct->instr_queue[iq_no] =
0220             vzalloc(sizeof(struct octeon_instr_queue));
0221     if (!oct->instr_queue[iq_no])
0222         return 1;
0223 
0224 
0225     oct->instr_queue[iq_no]->q_index = q_index;
0226     oct->instr_queue[iq_no]->app_ctx = app_ctx;
0227     oct->instr_queue[iq_no]->ifidx = ifidx;
0228 
0229     if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
0230         vfree(oct->instr_queue[iq_no]);
0231         oct->instr_queue[iq_no] = NULL;
0232         return 1;
0233     }
0234 
0235     oct->num_iqs++;
0236     if (oct->fn_list.enable_io_queues(oct)) {
0237         octeon_delete_instr_queue(oct, iq_no);
0238         return 1;
0239     }
0240 
0241     return 0;
0242 }
0243 
0244 int lio_wait_for_instr_fetch(struct octeon_device *oct)
0245 {
0246     int i, retry = 1000, pending, instr_cnt = 0;
0247 
0248     do {
0249         instr_cnt = 0;
0250 
0251         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
0252             if (!(oct->io_qmask.iq & BIT_ULL(i)))
0253                 continue;
0254             pending =
0255                 atomic_read(&oct->instr_queue[i]->instr_pending);
0256             if (pending)
0257                 __check_db_timeout(oct, i);
0258             instr_cnt += pending;
0259         }
0260 
0261         if (instr_cnt == 0)
0262             break;
0263 
0264         schedule_timeout_uninterruptible(1);
0265 
0266     } while (retry-- && instr_cnt);
0267 
0268     return instr_cnt;
0269 }
0270 
0271 static inline void
0272 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
0273 {
0274     if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
0275         writel(iq->fill_cnt, iq->doorbell_reg);
0276         /* make sure doorbell write goes through */
0277         iq->fill_cnt = 0;
0278         iq->last_db_time = jiffies;
0279         return;
0280     }
0281 }
0282 
0283 void
0284 octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
0285 {
0286     struct octeon_instr_queue *iq;
0287 
0288     iq = oct->instr_queue[iq_no];
0289     spin_lock(&iq->post_lock);
0290     if (iq->fill_cnt)
0291         ring_doorbell(oct, iq);
0292     spin_unlock(&iq->post_lock);
0293 }
0294 
0295 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
0296                       u8 *cmd)
0297 {
0298     u8 *iqptr, cmdsize;
0299 
0300     cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
0301     iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
0302 
0303     memcpy(iqptr, cmd, cmdsize);
0304 }
0305 
0306 static inline struct iq_post_status
0307 __post_command2(struct octeon_instr_queue *iq, u8 *cmd)
0308 {
0309     struct iq_post_status st;
0310 
0311     st.status = IQ_SEND_OK;
0312 
0313     /* This ensures that the read index does not wrap around to the same
0314      * position if queue gets full before Octeon could fetch any instr.
0315      */
0316     if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
0317         st.status = IQ_SEND_FAILED;
0318         st.index = -1;
0319         return st;
0320     }
0321 
0322     if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
0323         st.status = IQ_SEND_STOP;
0324 
0325     __copy_cmd_into_iq(iq, cmd);
0326 
0327     /* "index" is returned, host_write_index is modified. */
0328     st.index = iq->host_write_index;
0329     iq->host_write_index = incr_index(iq->host_write_index, 1,
0330                       iq->max_count);
0331     iq->fill_cnt++;
0332 
0333     /* Flush the command into memory. We need to be sure the data is in
0334      * memory before indicating that the instruction is pending.
0335      */
0336     wmb();
0337 
0338     atomic_inc(&iq->instr_pending);
0339 
0340     return st;
0341 }
0342 
0343 int
0344 octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
0345                 void (*fn)(void *))
0346 {
0347     if (reqtype > REQTYPE_LAST) {
0348         dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
0349             __func__, reqtype);
0350         return -EINVAL;
0351     }
0352 
0353     reqtype_free_fn[oct->octeon_id][reqtype] = fn;
0354 
0355     return 0;
0356 }
0357 
0358 static inline void
0359 __add_to_request_list(struct octeon_instr_queue *iq,
0360               int idx, void *buf, int reqtype)
0361 {
0362     iq->request_list[idx].buf = buf;
0363     iq->request_list[idx].reqtype = reqtype;
0364 }
0365 
0366 /* Can only run in process context */
0367 int
0368 lio_process_iq_request_list(struct octeon_device *oct,
0369                 struct octeon_instr_queue *iq, u32 napi_budget)
0370 {
0371     struct cavium_wq *cwq = &oct->dma_comp_wq;
0372     int reqtype;
0373     void *buf;
0374     u32 old = iq->flush_index;
0375     u32 inst_count = 0;
0376     unsigned int pkts_compl = 0, bytes_compl = 0;
0377     struct octeon_soft_command *sc;
0378     unsigned long flags;
0379 
0380     while (old != iq->octeon_read_index) {
0381         reqtype = iq->request_list[old].reqtype;
0382         buf     = iq->request_list[old].buf;
0383 
0384         if (reqtype == REQTYPE_NONE)
0385             goto skip_this;
0386 
0387         octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
0388                              &bytes_compl);
0389 
0390         switch (reqtype) {
0391         case REQTYPE_NORESP_NET:
0392         case REQTYPE_NORESP_NET_SG:
0393         case REQTYPE_RESP_NET_SG:
0394             reqtype_free_fn[oct->octeon_id][reqtype](buf);
0395             break;
0396         case REQTYPE_RESP_NET:
0397         case REQTYPE_SOFT_COMMAND:
0398             sc = buf;
0399             /* We're expecting a response from Octeon.
0400              * It's up to lio_process_ordered_list() to
0401              * process  sc. Add sc to the ordered soft
0402              * command response list because we expect
0403              * a response from Octeon.
0404              */
0405             spin_lock_irqsave(&oct->response_list
0406                       [OCTEON_ORDERED_SC_LIST].lock, flags);
0407             atomic_inc(&oct->response_list
0408                    [OCTEON_ORDERED_SC_LIST].pending_req_count);
0409             list_add_tail(&sc->node, &oct->response_list
0410                 [OCTEON_ORDERED_SC_LIST].head);
0411             spin_unlock_irqrestore(&oct->response_list
0412                            [OCTEON_ORDERED_SC_LIST].lock,
0413                            flags);
0414             break;
0415         default:
0416             dev_err(&oct->pci_dev->dev,
0417                 "%s Unknown reqtype: %d buf: %p at idx %d\n",
0418                 __func__, reqtype, buf, old);
0419         }
0420 
0421         iq->request_list[old].buf = NULL;
0422         iq->request_list[old].reqtype = 0;
0423 
0424  skip_this:
0425         inst_count++;
0426         old = incr_index(old, 1, iq->max_count);
0427 
0428         if ((napi_budget) && (inst_count >= napi_budget))
0429             break;
0430     }
0431     if (bytes_compl)
0432         octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
0433                            bytes_compl);
0434     iq->flush_index = old;
0435 
0436     if (atomic_read(&oct->response_list
0437             [OCTEON_ORDERED_SC_LIST].pending_req_count))
0438         queue_work(cwq->wq, &cwq->wk.work.work);
0439 
0440     return inst_count;
0441 }
0442 
0443 /* Can only be called from process context */
0444 int
0445 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
0446         u32 napi_budget)
0447 {
0448     u32 inst_processed = 0;
0449     u32 tot_inst_processed = 0;
0450     int tx_done = 1;
0451 
0452     if (!spin_trylock(&iq->iq_flush_running_lock))
0453         return tx_done;
0454 
0455     spin_lock_bh(&iq->lock);
0456 
0457     iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
0458 
0459     do {
0460         /* Process any outstanding IQ packets. */
0461         if (iq->flush_index == iq->octeon_read_index)
0462             break;
0463 
0464         if (napi_budget)
0465             inst_processed =
0466                 lio_process_iq_request_list(oct, iq,
0467                                 napi_budget -
0468                                 tot_inst_processed);
0469         else
0470             inst_processed =
0471                 lio_process_iq_request_list(oct, iq, 0);
0472 
0473         if (inst_processed) {
0474             iq->pkts_processed += inst_processed;
0475             atomic_sub(inst_processed, &iq->instr_pending);
0476             iq->stats.instr_processed += inst_processed;
0477         }
0478 
0479         tot_inst_processed += inst_processed;
0480     } while (tot_inst_processed < napi_budget);
0481 
0482     if (napi_budget && (tot_inst_processed >= napi_budget))
0483         tx_done = 0;
0484 
0485     iq->last_db_time = jiffies;
0486 
0487     spin_unlock_bh(&iq->lock);
0488 
0489     spin_unlock(&iq->iq_flush_running_lock);
0490 
0491     return tx_done;
0492 }
0493 
0494 /* Process instruction queue after timeout.
0495  * This routine gets called from a workqueue or when removing the module.
0496  */
0497 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
0498 {
0499     struct octeon_instr_queue *iq;
0500     u64 next_time;
0501 
0502     if (!oct)
0503         return;
0504 
0505     iq = oct->instr_queue[iq_no];
0506     if (!iq)
0507         return;
0508 
0509     /* return immediately, if no work pending */
0510     if (!atomic_read(&iq->instr_pending))
0511         return;
0512     /* If jiffies - last_db_time < db_timeout do nothing  */
0513     next_time = iq->last_db_time + iq->db_timeout;
0514     if (!time_after(jiffies, (unsigned long)next_time))
0515         return;
0516     iq->last_db_time = jiffies;
0517 
0518     /* Flush the instruction queue */
0519     octeon_flush_iq(oct, iq, 0);
0520 
0521     lio_enable_irq(NULL, iq);
0522 }
0523 
0524 /* Called by the Poll thread at regular intervals to check the instruction
0525  * queue for commands to be posted and for commands that were fetched by Octeon.
0526  */
0527 static void check_db_timeout(struct work_struct *work)
0528 {
0529     struct cavium_wk *wk = (struct cavium_wk *)work;
0530     struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
0531     u64 iq_no = wk->ctxul;
0532     struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
0533     u32 delay = 10;
0534 
0535     __check_db_timeout(oct, iq_no);
0536     queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
0537 }
0538 
0539 int
0540 octeon_send_command(struct octeon_device *oct, u32 iq_no,
0541             u32 force_db, void *cmd, void *buf,
0542             u32 datasize, u32 reqtype)
0543 {
0544     int xmit_stopped;
0545     struct iq_post_status st;
0546     struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
0547 
0548     /* Get the lock and prevent other tasks and tx interrupt handler from
0549      * running.
0550      */
0551     if (iq->allow_soft_cmds)
0552         spin_lock_bh(&iq->post_lock);
0553 
0554     st = __post_command2(iq, cmd);
0555 
0556     if (st.status != IQ_SEND_FAILED) {
0557         xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype);
0558         __add_to_request_list(iq, st.index, buf, reqtype);
0559         INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
0560         INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
0561 
0562         if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
0563             xmit_stopped || st.status == IQ_SEND_STOP)
0564             ring_doorbell(oct, iq);
0565     } else {
0566         INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
0567     }
0568 
0569     if (iq->allow_soft_cmds)
0570         spin_unlock_bh(&iq->post_lock);
0571 
0572     /* This is only done here to expedite packets being flushed
0573      * for cases where there are no IQ completion interrupts.
0574      */
0575 
0576     return st.status;
0577 }
0578 
0579 void
0580 octeon_prepare_soft_command(struct octeon_device *oct,
0581                 struct octeon_soft_command *sc,
0582                 u8 opcode,
0583                 u8 subcode,
0584                 u32 irh_ossp,
0585                 u64 ossp0,
0586                 u64 ossp1)
0587 {
0588     struct octeon_config *oct_cfg;
0589     struct octeon_instr_ih2 *ih2;
0590     struct octeon_instr_ih3 *ih3;
0591     struct octeon_instr_pki_ih3 *pki_ih3;
0592     struct octeon_instr_irh *irh;
0593     struct octeon_instr_rdp *rdp;
0594 
0595     WARN_ON(opcode > 15);
0596     WARN_ON(subcode > 127);
0597 
0598     oct_cfg = octeon_get_conf(oct);
0599 
0600     if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
0601         ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
0602 
0603         ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
0604 
0605         pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
0606 
0607         pki_ih3->w           = 1;
0608         pki_ih3->raw         = 1;
0609         pki_ih3->utag        = 1;
0610         pki_ih3->uqpg        =
0611             oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
0612         pki_ih3->utt         = 1;
0613         pki_ih3->tag     = LIO_CONTROL;
0614         pki_ih3->tagtype = ATOMIC_TAG;
0615         pki_ih3->qpg         =
0616             oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg;
0617 
0618         pki_ih3->pm          = 0x7;
0619         pki_ih3->sl          = 8;
0620 
0621         if (sc->datasize)
0622             ih3->dlengsz = sc->datasize;
0623 
0624         irh            = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
0625         irh->opcode    = opcode;
0626         irh->subcode   = subcode;
0627 
0628         /* opcode/subcode specific parameters (ossp) */
0629         irh->ossp       = irh_ossp;
0630         sc->cmd.cmd3.ossp[0] = ossp0;
0631         sc->cmd.cmd3.ossp[1] = ossp1;
0632 
0633         if (sc->rdatasize) {
0634             rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
0635             rdp->pcie_port = oct->pcie_port;
0636             rdp->rlen      = sc->rdatasize;
0637 
0638             irh->rflag =  1;
0639             /*PKI IH3*/
0640             /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
0641             ih3->fsz    = LIO_SOFTCMDRESP_IH3;
0642         } else {
0643             irh->rflag =  0;
0644             /*PKI IH3*/
0645             /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
0646             ih3->fsz    = LIO_PCICMD_O3;
0647         }
0648 
0649     } else {
0650         ih2          = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
0651         ih2->tagtype = ATOMIC_TAG;
0652         ih2->tag     = LIO_CONTROL;
0653         ih2->raw     = 1;
0654         ih2->grp     = CFG_GET_CTRL_Q_GRP(oct_cfg);
0655 
0656         if (sc->datasize) {
0657             ih2->dlengsz = sc->datasize;
0658             ih2->rs = 1;
0659         }
0660 
0661         irh            = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
0662         irh->opcode    = opcode;
0663         irh->subcode   = subcode;
0664 
0665         /* opcode/subcode specific parameters (ossp) */
0666         irh->ossp       = irh_ossp;
0667         sc->cmd.cmd2.ossp[0] = ossp0;
0668         sc->cmd.cmd2.ossp[1] = ossp1;
0669 
0670         if (sc->rdatasize) {
0671             rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
0672             rdp->pcie_port = oct->pcie_port;
0673             rdp->rlen      = sc->rdatasize;
0674 
0675             irh->rflag =  1;
0676             /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
0677             ih2->fsz   = LIO_SOFTCMDRESP_IH2;
0678         } else {
0679             irh->rflag =  0;
0680             /* irh + ossp[0] + ossp[1] = 24 bytes */
0681             ih2->fsz   = LIO_PCICMD_O2;
0682         }
0683     }
0684 }
0685 
0686 int octeon_send_soft_command(struct octeon_device *oct,
0687                  struct octeon_soft_command *sc)
0688 {
0689     struct octeon_instr_queue *iq;
0690     struct octeon_instr_ih2 *ih2;
0691     struct octeon_instr_ih3 *ih3;
0692     struct octeon_instr_irh *irh;
0693     u32 len;
0694 
0695     iq = oct->instr_queue[sc->iq_no];
0696     if (!iq->allow_soft_cmds) {
0697         dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
0698             sc->iq_no);
0699         INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
0700         return IQ_SEND_FAILED;
0701     }
0702 
0703     if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
0704         ih3 =  (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
0705         if (ih3->dlengsz) {
0706             WARN_ON(!sc->dmadptr);
0707             sc->cmd.cmd3.dptr = sc->dmadptr;
0708         }
0709         irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
0710         if (irh->rflag) {
0711             WARN_ON(!sc->dmarptr);
0712             WARN_ON(!sc->status_word);
0713             *sc->status_word = COMPLETION_WORD_INIT;
0714             sc->cmd.cmd3.rptr = sc->dmarptr;
0715         }
0716         len = (u32)ih3->dlengsz;
0717     } else {
0718         ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
0719         if (ih2->dlengsz) {
0720             WARN_ON(!sc->dmadptr);
0721             sc->cmd.cmd2.dptr = sc->dmadptr;
0722         }
0723         irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
0724         if (irh->rflag) {
0725             WARN_ON(!sc->dmarptr);
0726             WARN_ON(!sc->status_word);
0727             *sc->status_word = COMPLETION_WORD_INIT;
0728             sc->cmd.cmd2.rptr = sc->dmarptr;
0729         }
0730         len = (u32)ih2->dlengsz;
0731     }
0732 
0733     sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
0734 
0735     return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
0736                     len, REQTYPE_SOFT_COMMAND));
0737 }
0738 
0739 int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
0740 {
0741     int i;
0742     u64 dma_addr;
0743     struct octeon_soft_command *sc;
0744 
0745     INIT_LIST_HEAD(&oct->sc_buf_pool.head);
0746     spin_lock_init(&oct->sc_buf_pool.lock);
0747     atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
0748 
0749     for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
0750         sc = (struct octeon_soft_command *)
0751             lio_dma_alloc(oct,
0752                       SOFT_COMMAND_BUFFER_SIZE,
0753                       (dma_addr_t *)&dma_addr);
0754         if (!sc) {
0755             octeon_free_sc_buffer_pool(oct);
0756             return 1;
0757         }
0758 
0759         sc->dma_addr = dma_addr;
0760         sc->size = SOFT_COMMAND_BUFFER_SIZE;
0761 
0762         list_add_tail(&sc->node, &oct->sc_buf_pool.head);
0763     }
0764 
0765     return 0;
0766 }
0767 
0768 int octeon_free_sc_done_list(struct octeon_device *oct)
0769 {
0770     struct octeon_response_list *done_sc_list, *zombie_sc_list;
0771     struct octeon_soft_command *sc;
0772     struct list_head *tmp, *tmp2;
0773     spinlock_t *sc_lists_lock; /* lock for response_list */
0774 
0775     done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
0776     zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
0777 
0778     if (!atomic_read(&done_sc_list->pending_req_count))
0779         return 0;
0780 
0781     sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
0782 
0783     spin_lock_bh(sc_lists_lock);
0784 
0785     list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
0786         sc = list_entry(tmp, struct octeon_soft_command, node);
0787 
0788         if (READ_ONCE(sc->caller_is_done)) {
0789             list_del(&sc->node);
0790             atomic_dec(&done_sc_list->pending_req_count);
0791 
0792             if (*sc->status_word == COMPLETION_WORD_INIT) {
0793                 /* timeout; move sc to zombie list */
0794                 list_add_tail(&sc->node, &zombie_sc_list->head);
0795                 atomic_inc(&zombie_sc_list->pending_req_count);
0796             } else {
0797                 octeon_free_soft_command(oct, sc);
0798             }
0799         }
0800     }
0801 
0802     spin_unlock_bh(sc_lists_lock);
0803 
0804     return 0;
0805 }
0806 
0807 int octeon_free_sc_zombie_list(struct octeon_device *oct)
0808 {
0809     struct octeon_response_list *zombie_sc_list;
0810     struct octeon_soft_command *sc;
0811     struct list_head *tmp, *tmp2;
0812     spinlock_t *sc_lists_lock; /* lock for response_list */
0813 
0814     zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
0815     sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
0816 
0817     spin_lock_bh(sc_lists_lock);
0818 
0819     list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
0820         list_del(tmp);
0821         atomic_dec(&zombie_sc_list->pending_req_count);
0822         sc = list_entry(tmp, struct octeon_soft_command, node);
0823         octeon_free_soft_command(oct, sc);
0824     }
0825 
0826     spin_unlock_bh(sc_lists_lock);
0827 
0828     return 0;
0829 }
0830 
0831 int octeon_free_sc_buffer_pool(struct octeon_device *oct)
0832 {
0833     struct list_head *tmp, *tmp2;
0834     struct octeon_soft_command *sc;
0835 
0836     octeon_free_sc_zombie_list(oct);
0837 
0838     spin_lock_bh(&oct->sc_buf_pool.lock);
0839 
0840     list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
0841         list_del(tmp);
0842 
0843         sc = (struct octeon_soft_command *)tmp;
0844 
0845         lio_dma_free(oct, sc->size, sc, sc->dma_addr);
0846     }
0847 
0848     INIT_LIST_HEAD(&oct->sc_buf_pool.head);
0849 
0850     spin_unlock_bh(&oct->sc_buf_pool.lock);
0851 
0852     return 0;
0853 }
0854 
0855 struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
0856                               u32 datasize,
0857                               u32 rdatasize,
0858                               u32 ctxsize)
0859 {
0860     u64 dma_addr;
0861     u32 size;
0862     u32 offset = sizeof(struct octeon_soft_command);
0863     struct octeon_soft_command *sc = NULL;
0864     struct list_head *tmp;
0865 
0866     if (!rdatasize)
0867         rdatasize = 16;
0868 
0869     WARN_ON((offset + datasize + rdatasize + ctxsize) >
0870            SOFT_COMMAND_BUFFER_SIZE);
0871 
0872     spin_lock_bh(&oct->sc_buf_pool.lock);
0873 
0874     if (list_empty(&oct->sc_buf_pool.head)) {
0875         spin_unlock_bh(&oct->sc_buf_pool.lock);
0876         return NULL;
0877     }
0878 
0879     list_for_each(tmp, &oct->sc_buf_pool.head)
0880         break;
0881 
0882     list_del(tmp);
0883 
0884     atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
0885 
0886     spin_unlock_bh(&oct->sc_buf_pool.lock);
0887 
0888     sc = (struct octeon_soft_command *)tmp;
0889 
0890     dma_addr = sc->dma_addr;
0891     size = sc->size;
0892 
0893     memset(sc, 0, sc->size);
0894 
0895     sc->dma_addr = dma_addr;
0896     sc->size = size;
0897 
0898     if (ctxsize) {
0899         sc->ctxptr = (u8 *)sc + offset;
0900         sc->ctxsize = ctxsize;
0901     }
0902 
0903     /* Start data at 128 byte boundary */
0904     offset = (offset + ctxsize + 127) & 0xffffff80;
0905 
0906     if (datasize) {
0907         sc->virtdptr = (u8 *)sc + offset;
0908         sc->dmadptr = dma_addr + offset;
0909         sc->datasize = datasize;
0910     }
0911 
0912     /* Start rdata at 128 byte boundary */
0913     offset = (offset + datasize + 127) & 0xffffff80;
0914 
0915     if (rdatasize) {
0916         WARN_ON(rdatasize < 16);
0917         sc->virtrptr = (u8 *)sc + offset;
0918         sc->dmarptr = dma_addr + offset;
0919         sc->rdatasize = rdatasize;
0920         sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
0921     }
0922 
0923     return sc;
0924 }
0925 
0926 void octeon_free_soft_command(struct octeon_device *oct,
0927                   struct octeon_soft_command *sc)
0928 {
0929     spin_lock_bh(&oct->sc_buf_pool.lock);
0930 
0931     list_add_tail(&sc->node, &oct->sc_buf_pool.head);
0932 
0933     atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
0934 
0935     spin_unlock_bh(&oct->sc_buf_pool.lock);
0936 }