0001
0002 #include <linux/cpumask.h>
0003 #include <linux/dma-mapping.h>
0004 #include <linux/dmapool.h>
0005 #include <linux/delay.h>
0006 #include <linux/gfp.h>
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/pci_regs.h>
0010 #include <linux/vmalloc.h>
0011 #include <linux/pci.h>
0012
0013 #include "nitrox_dev.h"
0014 #include "nitrox_common.h"
0015 #include "nitrox_req.h"
0016 #include "nitrox_csr.h"
0017
0018 #define CRYPTO_CTX_SIZE 256
0019
0020
0021 #define PKTIN_Q_ALIGN_BYTES 16
0022
0023 #define AQM_Q_ALIGN_BYTES 32
0024
0025 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
0026 {
0027 struct nitrox_device *ndev = cmdq->ndev;
0028
0029 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
0030 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
0031 &cmdq->unalign_dma,
0032 GFP_KERNEL);
0033 if (!cmdq->unalign_base)
0034 return -ENOMEM;
0035
0036 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
0037 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
0038 cmdq->write_idx = 0;
0039
0040 spin_lock_init(&cmdq->cmd_qlock);
0041 spin_lock_init(&cmdq->resp_qlock);
0042 spin_lock_init(&cmdq->backlog_qlock);
0043
0044 INIT_LIST_HEAD(&cmdq->response_head);
0045 INIT_LIST_HEAD(&cmdq->backlog_head);
0046 INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
0047
0048 atomic_set(&cmdq->pending_count, 0);
0049 atomic_set(&cmdq->backlog_count, 0);
0050 return 0;
0051 }
0052
0053 static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
0054 {
0055 cmdq->write_idx = 0;
0056 atomic_set(&cmdq->pending_count, 0);
0057 atomic_set(&cmdq->backlog_count, 0);
0058 }
0059
0060 static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
0061 {
0062 struct nitrox_device *ndev;
0063
0064 if (!cmdq)
0065 return;
0066
0067 if (!cmdq->unalign_base)
0068 return;
0069
0070 ndev = cmdq->ndev;
0071 cancel_work_sync(&cmdq->backlog_qflush);
0072
0073 dma_free_coherent(DEV(ndev), cmdq->qsize,
0074 cmdq->unalign_base, cmdq->unalign_dma);
0075 nitrox_cmdq_reset(cmdq);
0076
0077 cmdq->dbell_csr_addr = NULL;
0078 cmdq->compl_cnt_csr_addr = NULL;
0079 cmdq->unalign_base = NULL;
0080 cmdq->base = NULL;
0081 cmdq->unalign_dma = 0;
0082 cmdq->dma = 0;
0083 cmdq->qsize = 0;
0084 cmdq->instr_size = 0;
0085 }
0086
0087 static void nitrox_free_aqm_queues(struct nitrox_device *ndev)
0088 {
0089 int i;
0090
0091 for (i = 0; i < ndev->nr_queues; i++) {
0092 nitrox_cmdq_cleanup(ndev->aqmq[i]);
0093 kfree_sensitive(ndev->aqmq[i]);
0094 ndev->aqmq[i] = NULL;
0095 }
0096 }
0097
0098 static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev)
0099 {
0100 int i, err;
0101
0102 for (i = 0; i < ndev->nr_queues; i++) {
0103 struct nitrox_cmdq *cmdq;
0104 u64 offset;
0105
0106 cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
0107 if (!cmdq) {
0108 err = -ENOMEM;
0109 goto aqmq_fail;
0110 }
0111
0112 cmdq->ndev = ndev;
0113 cmdq->qno = i;
0114 cmdq->instr_size = sizeof(struct aqmq_command_s);
0115
0116
0117 offset = AQMQ_DRBLX(i);
0118 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
0119
0120 offset = AQMQ_CMD_CNTX(i);
0121 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
0122
0123 err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
0124 if (err) {
0125 kfree_sensitive(cmdq);
0126 goto aqmq_fail;
0127 }
0128 ndev->aqmq[i] = cmdq;
0129 }
0130
0131 return 0;
0132
0133 aqmq_fail:
0134 nitrox_free_aqm_queues(ndev);
0135 return err;
0136 }
0137
0138 static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
0139 {
0140 int i;
0141
0142 for (i = 0; i < ndev->nr_queues; i++) {
0143 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
0144
0145 nitrox_cmdq_cleanup(cmdq);
0146 }
0147 kfree(ndev->pkt_inq);
0148 ndev->pkt_inq = NULL;
0149 }
0150
0151 static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
0152 {
0153 int i, err;
0154
0155 ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
0156 sizeof(struct nitrox_cmdq),
0157 GFP_KERNEL, ndev->node);
0158 if (!ndev->pkt_inq)
0159 return -ENOMEM;
0160
0161 for (i = 0; i < ndev->nr_queues; i++) {
0162 struct nitrox_cmdq *cmdq;
0163 u64 offset;
0164
0165 cmdq = &ndev->pkt_inq[i];
0166 cmdq->ndev = ndev;
0167 cmdq->qno = i;
0168 cmdq->instr_size = sizeof(struct nps_pkt_instr);
0169
0170
0171 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
0172 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
0173
0174 offset = NPS_PKT_SLC_CNTSX(i);
0175 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
0176
0177 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
0178 if (err)
0179 goto pktq_fail;
0180 }
0181 return 0;
0182
0183 pktq_fail:
0184 nitrox_free_pktin_queues(ndev);
0185 return err;
0186 }
0187
0188 static int create_crypto_dma_pool(struct nitrox_device *ndev)
0189 {
0190 size_t size;
0191
0192
0193 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
0194 ndev->ctx_pool = dma_pool_create("nitrox-context",
0195 DEV(ndev), size, 16, 0);
0196 if (!ndev->ctx_pool)
0197 return -ENOMEM;
0198
0199 return 0;
0200 }
0201
0202 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
0203 {
0204 if (!ndev->ctx_pool)
0205 return;
0206
0207 dma_pool_destroy(ndev->ctx_pool);
0208 ndev->ctx_pool = NULL;
0209 }
0210
0211
0212
0213
0214
0215 void *crypto_alloc_context(struct nitrox_device *ndev)
0216 {
0217 struct ctx_hdr *ctx;
0218 struct crypto_ctx_hdr *chdr;
0219 void *vaddr;
0220 dma_addr_t dma;
0221
0222 chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
0223 if (!chdr)
0224 return NULL;
0225
0226 vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
0227 if (!vaddr) {
0228 kfree(chdr);
0229 return NULL;
0230 }
0231
0232
0233 ctx = vaddr;
0234 ctx->pool = ndev->ctx_pool;
0235 ctx->dma = dma;
0236 ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
0237
0238 chdr->pool = ndev->ctx_pool;
0239 chdr->dma = dma;
0240 chdr->vaddr = vaddr;
0241
0242 return chdr;
0243 }
0244
0245
0246
0247
0248
0249 void crypto_free_context(void *ctx)
0250 {
0251 struct crypto_ctx_hdr *ctxp;
0252
0253 if (!ctx)
0254 return;
0255
0256 ctxp = ctx;
0257 dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
0258 kfree(ctxp);
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 int nitrox_common_sw_init(struct nitrox_device *ndev)
0270 {
0271 int err = 0;
0272
0273
0274 err = create_crypto_dma_pool(ndev);
0275 if (err)
0276 return err;
0277
0278 err = nitrox_alloc_pktin_queues(ndev);
0279 if (err)
0280 destroy_crypto_dma_pool(ndev);
0281
0282 err = nitrox_alloc_aqm_queues(ndev);
0283 if (err) {
0284 nitrox_free_pktin_queues(ndev);
0285 destroy_crypto_dma_pool(ndev);
0286 }
0287
0288 return err;
0289 }
0290
0291
0292
0293
0294
0295 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
0296 {
0297 nitrox_free_aqm_queues(ndev);
0298 nitrox_free_pktin_queues(ndev);
0299 destroy_crypto_dma_pool(ndev);
0300 }