0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/interrupt.h>
0012 #include <linux/module.h>
0013 #include "otx_cptvf.h"
0014 #include "otx_cptvf_algs.h"
0015 #include "otx_cptvf_reqmgr.h"
0016
0017 #define DRV_NAME "octeontx-cptvf"
0018 #define DRV_VERSION "1.0"
0019
0020 static void vq_work_handler(unsigned long data)
0021 {
0022 struct otx_cptvf_wqe_info *cwqe_info =
0023 (struct otx_cptvf_wqe_info *) data;
0024
0025 otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
0026 }
0027
0028 static int init_worker_threads(struct otx_cptvf *cptvf)
0029 {
0030 struct pci_dev *pdev = cptvf->pdev;
0031 struct otx_cptvf_wqe_info *cwqe_info;
0032 int i;
0033
0034 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
0035 if (!cwqe_info)
0036 return -ENOMEM;
0037
0038 if (cptvf->num_queues) {
0039 dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
0040 cptvf->num_queues);
0041 }
0042
0043 for (i = 0; i < cptvf->num_queues; i++) {
0044 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
0045 (u64)cwqe_info);
0046 cwqe_info->vq_wqe[i].cptvf = cptvf;
0047 }
0048 cptvf->wqe_info = cwqe_info;
0049
0050 return 0;
0051 }
0052
0053 static void cleanup_worker_threads(struct otx_cptvf *cptvf)
0054 {
0055 struct pci_dev *pdev = cptvf->pdev;
0056 struct otx_cptvf_wqe_info *cwqe_info;
0057 int i;
0058
0059 cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
0060 if (!cwqe_info)
0061 return;
0062
0063 if (cptvf->num_queues) {
0064 dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
0065 cptvf->num_queues);
0066 }
0067
0068 for (i = 0; i < cptvf->num_queues; i++)
0069 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
0070
0071 kfree_sensitive(cwqe_info);
0072 cptvf->wqe_info = NULL;
0073 }
0074
0075 static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
0076 {
0077 struct otx_cpt_pending_queue *queue;
0078 int i;
0079
0080 for_each_pending_queue(pqinfo, queue, i) {
0081 if (!queue->head)
0082 continue;
0083
0084
0085 kfree_sensitive((queue->head));
0086 queue->front = 0;
0087 queue->rear = 0;
0088 queue->qlen = 0;
0089 }
0090 pqinfo->num_queues = 0;
0091 }
0092
0093 static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
0094 u32 num_queues)
0095 {
0096 struct otx_cpt_pending_queue *queue = NULL;
0097 int ret;
0098 u32 i;
0099
0100 pqinfo->num_queues = num_queues;
0101
0102 for_each_pending_queue(pqinfo, queue, i) {
0103 queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL);
0104 if (!queue->head) {
0105 ret = -ENOMEM;
0106 goto pending_qfail;
0107 }
0108
0109 queue->pending_count = 0;
0110 queue->front = 0;
0111 queue->rear = 0;
0112 queue->qlen = qlen;
0113
0114
0115 spin_lock_init(&queue->lock);
0116 }
0117 return 0;
0118
0119 pending_qfail:
0120 free_pending_queues(pqinfo);
0121
0122 return ret;
0123 }
0124
0125 static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
0126 u32 num_queues)
0127 {
0128 struct pci_dev *pdev = cptvf->pdev;
0129 int ret;
0130
0131 if (!num_queues)
0132 return 0;
0133
0134 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
0135 if (ret) {
0136 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
0137 num_queues);
0138 return ret;
0139 }
0140 return 0;
0141 }
0142
0143 static void cleanup_pending_queues(struct otx_cptvf *cptvf)
0144 {
0145 struct pci_dev *pdev = cptvf->pdev;
0146
0147 if (!cptvf->num_queues)
0148 return;
0149
0150 dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
0151 cptvf->num_queues);
0152 free_pending_queues(&cptvf->pqinfo);
0153 }
0154
0155 static void free_command_queues(struct otx_cptvf *cptvf,
0156 struct otx_cpt_cmd_qinfo *cqinfo)
0157 {
0158 struct otx_cpt_cmd_queue *queue = NULL;
0159 struct otx_cpt_cmd_chunk *chunk = NULL;
0160 struct pci_dev *pdev = cptvf->pdev;
0161 int i;
0162
0163
0164 for (i = 0; i < cptvf->num_queues; i++) {
0165 queue = &cqinfo->queue[i];
0166
0167 while (!list_empty(&cqinfo->queue[i].chead)) {
0168 chunk = list_first_entry(&cqinfo->queue[i].chead,
0169 struct otx_cpt_cmd_chunk, nextchunk);
0170
0171 dma_free_coherent(&pdev->dev, chunk->size,
0172 chunk->head,
0173 chunk->dma_addr);
0174 chunk->head = NULL;
0175 chunk->dma_addr = 0;
0176 list_del(&chunk->nextchunk);
0177 kfree_sensitive(chunk);
0178 }
0179 queue->num_chunks = 0;
0180 queue->idx = 0;
0181
0182 }
0183 }
0184
0185 static int alloc_command_queues(struct otx_cptvf *cptvf,
0186 struct otx_cpt_cmd_qinfo *cqinfo,
0187 u32 qlen)
0188 {
0189 struct otx_cpt_cmd_chunk *curr, *first, *last;
0190 struct otx_cpt_cmd_queue *queue = NULL;
0191 struct pci_dev *pdev = cptvf->pdev;
0192 size_t q_size, c_size, rem_q_size;
0193 u32 qcsize_bytes;
0194 int i;
0195
0196
0197
0198 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
0199 OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
0200
0201 q_size = qlen * OTX_CPT_INST_SIZE;
0202
0203 qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
0204
0205
0206 for (i = 0; i < cptvf->num_queues; i++) {
0207 rem_q_size = q_size;
0208 first = NULL;
0209 last = NULL;
0210
0211 queue = &cqinfo->queue[i];
0212 INIT_LIST_HEAD(&queue->chead);
0213 do {
0214 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
0215 if (!curr)
0216 goto cmd_qfail;
0217
0218 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
0219 rem_q_size;
0220 curr->head = dma_alloc_coherent(&pdev->dev,
0221 c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
0222 &curr->dma_addr, GFP_KERNEL);
0223 if (!curr->head) {
0224 dev_err(&pdev->dev,
0225 "Command Q (%d) chunk (%d) allocation failed\n",
0226 i, queue->num_chunks);
0227 goto free_curr;
0228 }
0229 curr->size = c_size;
0230
0231 if (queue->num_chunks == 0) {
0232 first = curr;
0233 queue->base = first;
0234 }
0235 list_add_tail(&curr->nextchunk,
0236 &cqinfo->queue[i].chead);
0237
0238 queue->num_chunks++;
0239 rem_q_size -= c_size;
0240 if (last)
0241 *((u64 *)(&last->head[last->size])) =
0242 (u64)curr->dma_addr;
0243
0244 last = curr;
0245 } while (rem_q_size);
0246
0247
0248
0249
0250 curr = first;
0251 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
0252 queue->qhead = curr;
0253 }
0254 return 0;
0255 free_curr:
0256 kfree(curr);
0257 cmd_qfail:
0258 free_command_queues(cptvf, cqinfo);
0259 return -ENOMEM;
0260 }
0261
0262 static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
0263 {
0264 struct pci_dev *pdev = cptvf->pdev;
0265 int ret;
0266
0267
0268 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
0269 if (ret) {
0270 dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
0271 cptvf->num_queues);
0272 return ret;
0273 }
0274 return ret;
0275 }
0276
0277 static void cleanup_command_queues(struct otx_cptvf *cptvf)
0278 {
0279 struct pci_dev *pdev = cptvf->pdev;
0280
0281 if (!cptvf->num_queues)
0282 return;
0283
0284 dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
0285 cptvf->num_queues);
0286 free_command_queues(cptvf, &cptvf->cqinfo);
0287 }
0288
0289 static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
0290 {
0291 cleanup_worker_threads(cptvf);
0292 cleanup_pending_queues(cptvf);
0293 cleanup_command_queues(cptvf);
0294 }
0295
0296 static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
0297 {
0298 struct pci_dev *pdev = cptvf->pdev;
0299 u32 max_dev_queues = 0;
0300 int ret;
0301
0302 max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
0303
0304 num_queues = min_t(u32, num_queues, max_dev_queues);
0305 cptvf->num_queues = num_queues;
0306
0307 ret = init_command_queues(cptvf, qlen);
0308 if (ret) {
0309 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
0310 num_queues);
0311 return ret;
0312 }
0313
0314 ret = init_pending_queues(cptvf, qlen, num_queues);
0315 if (ret) {
0316 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
0317 num_queues);
0318 goto setup_pqfail;
0319 }
0320
0321
0322 ret = init_worker_threads(cptvf);
0323 if (ret) {
0324 dev_err(&pdev->dev, "Failed to setup worker threads\n");
0325 goto init_work_fail;
0326 }
0327 return 0;
0328
0329 init_work_fail:
0330 cleanup_worker_threads(cptvf);
0331 cleanup_pending_queues(cptvf);
0332
0333 setup_pqfail:
0334 cleanup_command_queues(cptvf);
0335
0336 return ret;
0337 }
0338
0339 static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
0340 {
0341 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
0342 free_cpumask_var(cptvf->affinity_mask[vec]);
0343 }
0344
0345 static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
0346 {
0347 union otx_cptx_vqx_ctl vqx_ctl;
0348
0349 vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
0350 vqx_ctl.s.ena = val;
0351 writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
0352 }
0353
0354 void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
0355 {
0356 union otx_cptx_vqx_doorbell vqx_dbell;
0357
0358 vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
0359 vqx_dbell.s.dbell_cnt = val * 8;
0360 writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
0361 }
0362
0363 static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
0364 {
0365 union otx_cptx_vqx_inprog vqx_inprg;
0366
0367 vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
0368 vqx_inprg.s.inflight = val;
0369 writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
0370 }
0371
0372 static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
0373 {
0374 union otx_cptx_vqx_done_wait vqx_dwait;
0375
0376 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
0377 vqx_dwait.s.num_wait = val;
0378 writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
0379 }
0380
0381 static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
0382 {
0383 union otx_cptx_vqx_done_wait vqx_dwait;
0384
0385 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
0386 return vqx_dwait.s.num_wait;
0387 }
0388
0389 static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
0390 {
0391 union otx_cptx_vqx_done_wait vqx_dwait;
0392
0393 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
0394 vqx_dwait.s.time_wait = time;
0395 writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
0396 }
0397
0398
0399 static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
0400 {
0401 union otx_cptx_vqx_done_wait vqx_dwait;
0402
0403 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
0404 return vqx_dwait.s.time_wait;
0405 }
0406
0407 static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
0408 {
0409 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
0410
0411 vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
0412
0413 vqx_misc_ena.s.swerr = 1;
0414 writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
0415 }
0416
0417 static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
0418 {
0419 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
0420
0421 vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
0422
0423 vqx_misc_ena.s.mbox = 1;
0424 writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
0425 }
0426
0427 static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
0428 {
0429 union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
0430
0431 vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
0432
0433 vqx_done_ena.s.done = 1;
0434 writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
0435 }
0436
0437 static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
0438 {
0439 union otx_cptx_vqx_misc_int vqx_misc_int;
0440
0441 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0442
0443 vqx_misc_int.s.dovf = 1;
0444 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0445 }
0446
0447 static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
0448 {
0449 union otx_cptx_vqx_misc_int vqx_misc_int;
0450
0451 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0452
0453 vqx_misc_int.s.irde = 1;
0454 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0455 }
0456
0457 static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
0458 {
0459 union otx_cptx_vqx_misc_int vqx_misc_int;
0460
0461 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0462
0463 vqx_misc_int.s.nwrp = 1;
0464 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0465 }
0466
0467 static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
0468 {
0469 union otx_cptx_vqx_misc_int vqx_misc_int;
0470
0471 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0472
0473 vqx_misc_int.s.mbox = 1;
0474 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0475 }
0476
0477 static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
0478 {
0479 union otx_cptx_vqx_misc_int vqx_misc_int;
0480
0481 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0482
0483 vqx_misc_int.s.swerr = 1;
0484 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0485 }
0486
0487 static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
0488 {
0489 return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
0490 }
0491
0492 static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
0493 void *arg)
0494 {
0495 struct otx_cptvf *cptvf = arg;
0496 struct pci_dev *pdev = cptvf->pdev;
0497 u64 intr;
0498
0499 intr = cptvf_read_vf_misc_intr_status(cptvf);
0500
0501 if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
0502 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
0503 intr, cptvf->vfid);
0504 otx_cptvf_handle_mbox_intr(cptvf);
0505 cptvf_clear_mbox_intr(cptvf);
0506 } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
0507 cptvf_clear_dovf_intr(cptvf);
0508
0509 otx_cptvf_write_vq_doorbell(cptvf, 0);
0510 dev_err(&pdev->dev,
0511 "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
0512 intr, cptvf->vfid);
0513 } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
0514 cptvf_clear_irde_intr(cptvf);
0515 dev_err(&pdev->dev,
0516 "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
0517 intr, cptvf->vfid);
0518 } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
0519 cptvf_clear_nwrp_intr(cptvf);
0520 dev_err(&pdev->dev,
0521 "NCB response write error interrupt 0x%llx on CPT VF %d\n",
0522 intr, cptvf->vfid);
0523 } else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
0524 cptvf_clear_swerr_intr(cptvf);
0525 dev_err(&pdev->dev,
0526 "Software error interrupt 0x%llx on CPT VF %d\n",
0527 intr, cptvf->vfid);
0528 } else {
0529 dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
0530 cptvf->vfid);
0531 }
0532
0533 return IRQ_HANDLED;
0534 }
0535
0536 static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
0537 int qno)
0538 {
0539 struct otx_cptvf_wqe_info *nwqe_info;
0540
0541 if (unlikely(qno >= cptvf->num_queues))
0542 return NULL;
0543 nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
0544
0545 return &nwqe_info->vq_wqe[qno];
0546 }
0547
0548 static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
0549 {
0550 union otx_cptx_vqx_done vqx_done;
0551
0552 vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
0553 return vqx_done.s.done;
0554 }
0555
0556 static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
0557 u32 ackcnt)
0558 {
0559 union otx_cptx_vqx_done_ack vqx_dack_cnt;
0560
0561 vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
0562 vqx_dack_cnt.s.done_ack = ackcnt;
0563 writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
0564 }
0565
0566 static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
0567 void *cptvf_dev)
0568 {
0569 struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
0570 struct pci_dev *pdev = cptvf->pdev;
0571
0572 u32 intr = cptvf_read_vq_done_count(cptvf);
0573
0574 if (intr) {
0575 struct otx_cptvf_wqe *wqe;
0576
0577
0578
0579
0580
0581 cptvf_write_vq_done_ack(cptvf, intr);
0582 wqe = get_cptvf_vq_wqe(cptvf, 0);
0583 if (unlikely(!wqe)) {
0584 dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
0585 cptvf->vfid);
0586 return IRQ_NONE;
0587 }
0588 tasklet_hi_schedule(&wqe->twork);
0589 }
0590
0591 return IRQ_HANDLED;
0592 }
0593
0594 static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
0595 {
0596 struct pci_dev *pdev = cptvf->pdev;
0597 int cpu;
0598
0599 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
0600 GFP_KERNEL)) {
0601 dev_err(&pdev->dev,
0602 "Allocation failed for affinity_mask for VF %d\n",
0603 cptvf->vfid);
0604 return;
0605 }
0606
0607 cpu = cptvf->vfid % num_online_cpus();
0608 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
0609 cptvf->affinity_mask[vec]);
0610 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
0611 cptvf->affinity_mask[vec]);
0612 }
0613
0614 static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
0615 {
0616 union otx_cptx_vqx_saddr vqx_saddr;
0617
0618 vqx_saddr.u = val;
0619 writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
0620 }
0621
0622 static void cptvf_device_init(struct otx_cptvf *cptvf)
0623 {
0624 u64 base_addr = 0;
0625
0626
0627 cptvf_write_vq_ctl(cptvf, 0);
0628
0629 otx_cptvf_write_vq_doorbell(cptvf, 0);
0630
0631 cptvf_write_vq_inprog(cptvf, 0);
0632
0633 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
0634 cptvf_write_vq_saddr(cptvf, base_addr);
0635
0636 cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
0637 cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
0638
0639 cptvf_write_vq_ctl(cptvf, 1);
0640
0641 cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
0642 }
0643
0644 static ssize_t vf_type_show(struct device *dev,
0645 struct device_attribute *attr,
0646 char *buf)
0647 {
0648 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0649 char *msg;
0650
0651 switch (cptvf->vftype) {
0652 case OTX_CPT_AE_TYPES:
0653 msg = "AE";
0654 break;
0655
0656 case OTX_CPT_SE_TYPES:
0657 msg = "SE";
0658 break;
0659
0660 default:
0661 msg = "Invalid";
0662 }
0663
0664 return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
0665 }
0666
0667 static ssize_t vf_engine_group_show(struct device *dev,
0668 struct device_attribute *attr,
0669 char *buf)
0670 {
0671 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0672
0673 return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
0674 }
0675
0676 static ssize_t vf_engine_group_store(struct device *dev,
0677 struct device_attribute *attr,
0678 const char *buf, size_t count)
0679 {
0680 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0681 int val, ret;
0682
0683 ret = kstrtoint(buf, 10, &val);
0684 if (ret)
0685 return ret;
0686
0687 if (val < 0)
0688 return -EINVAL;
0689
0690 if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
0691 dev_err(dev, "Engine group >= than max available groups %d\n",
0692 OTX_CPT_MAX_ENGINE_GROUPS);
0693 return -EINVAL;
0694 }
0695
0696 ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
0697 if (ret)
0698 return ret;
0699
0700 return count;
0701 }
0702
0703 static ssize_t vf_coalesc_time_wait_show(struct device *dev,
0704 struct device_attribute *attr,
0705 char *buf)
0706 {
0707 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0708
0709 return scnprintf(buf, PAGE_SIZE, "%d\n",
0710 cptvf_read_vq_done_timewait(cptvf));
0711 }
0712
0713 static ssize_t vf_coalesc_num_wait_show(struct device *dev,
0714 struct device_attribute *attr,
0715 char *buf)
0716 {
0717 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0718
0719 return scnprintf(buf, PAGE_SIZE, "%d\n",
0720 cptvf_read_vq_done_numwait(cptvf));
0721 }
0722
0723 static ssize_t vf_coalesc_time_wait_store(struct device *dev,
0724 struct device_attribute *attr,
0725 const char *buf, size_t count)
0726 {
0727 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0728 long val;
0729 int ret;
0730
0731 ret = kstrtol(buf, 10, &val);
0732 if (ret != 0)
0733 return ret;
0734
0735 if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
0736 val > OTX_CPT_COALESC_MAX_TIME_WAIT)
0737 return -EINVAL;
0738
0739 cptvf_write_vq_done_timewait(cptvf, val);
0740 return count;
0741 }
0742
0743 static ssize_t vf_coalesc_num_wait_store(struct device *dev,
0744 struct device_attribute *attr,
0745 const char *buf, size_t count)
0746 {
0747 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
0748 long val;
0749 int ret;
0750
0751 ret = kstrtol(buf, 10, &val);
0752 if (ret != 0)
0753 return ret;
0754
0755 if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
0756 val > OTX_CPT_COALESC_MAX_NUM_WAIT)
0757 return -EINVAL;
0758
0759 cptvf_write_vq_done_numwait(cptvf, val);
0760 return count;
0761 }
0762
0763 static DEVICE_ATTR_RO(vf_type);
0764 static DEVICE_ATTR_RW(vf_engine_group);
0765 static DEVICE_ATTR_RW(vf_coalesc_time_wait);
0766 static DEVICE_ATTR_RW(vf_coalesc_num_wait);
0767
0768 static struct attribute *otx_cptvf_attrs[] = {
0769 &dev_attr_vf_type.attr,
0770 &dev_attr_vf_engine_group.attr,
0771 &dev_attr_vf_coalesc_time_wait.attr,
0772 &dev_attr_vf_coalesc_num_wait.attr,
0773 NULL
0774 };
0775
0776 static const struct attribute_group otx_cptvf_sysfs_group = {
0777 .attrs = otx_cptvf_attrs,
0778 };
0779
0780 static int otx_cptvf_probe(struct pci_dev *pdev,
0781 const struct pci_device_id *ent)
0782 {
0783 struct device *dev = &pdev->dev;
0784 struct otx_cptvf *cptvf;
0785 int err;
0786
0787 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
0788 if (!cptvf)
0789 return -ENOMEM;
0790
0791 pci_set_drvdata(pdev, cptvf);
0792 cptvf->pdev = pdev;
0793
0794 err = pci_enable_device(pdev);
0795 if (err) {
0796 dev_err(dev, "Failed to enable PCI device\n");
0797 goto clear_drvdata;
0798 }
0799 err = pci_request_regions(pdev, DRV_NAME);
0800 if (err) {
0801 dev_err(dev, "PCI request regions failed 0x%x\n", err);
0802 goto disable_device;
0803 }
0804 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
0805 if (err) {
0806 dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
0807 goto release_regions;
0808 }
0809
0810
0811 cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
0812 if (!cptvf->reg_base) {
0813 dev_err(dev, "Cannot map config register space, aborting\n");
0814 err = -ENOMEM;
0815 goto release_regions;
0816 }
0817
0818 cptvf->node = dev_to_node(&pdev->dev);
0819 err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
0820 OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
0821 if (err < 0) {
0822 dev_err(dev, "Request for #%d msix vectors failed\n",
0823 OTX_CPT_VF_MSIX_VECTORS);
0824 goto unmap_region;
0825 }
0826
0827 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
0828 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
0829 cptvf);
0830 if (err) {
0831 dev_err(dev, "Failed to request misc irq\n");
0832 goto free_vectors;
0833 }
0834
0835
0836 cptvf_enable_mbox_interrupts(cptvf);
0837 cptvf_enable_swerr_interrupts(cptvf);
0838
0839
0840 err = otx_cptvf_check_pf_ready(cptvf);
0841 if (err)
0842 goto free_misc_irq;
0843
0844
0845 cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
0846 err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
0847 if (err) {
0848 dev_err(dev, "cptvf_sw_init() failed\n");
0849 goto free_misc_irq;
0850 }
0851
0852 err = otx_cptvf_send_vq_size_msg(cptvf);
0853 if (err)
0854 goto sw_cleanup;
0855
0856
0857 cptvf_device_init(cptvf);
0858
0859 err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
0860 if (err)
0861 goto sw_cleanup;
0862
0863 cptvf->priority = 1;
0864 err = otx_cptvf_send_vf_priority_msg(cptvf);
0865 if (err)
0866 goto sw_cleanup;
0867
0868 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
0869 cptvf_done_intr_handler, 0, "CPT VF done intr",
0870 cptvf);
0871 if (err) {
0872 dev_err(dev, "Failed to request done irq\n");
0873 goto free_done_irq;
0874 }
0875
0876
0877 cptvf_enable_done_interrupts(cptvf);
0878
0879
0880 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
0881 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
0882
0883 err = otx_cptvf_send_vf_up(cptvf);
0884 if (err)
0885 goto free_irq_affinity;
0886
0887
0888 err = otx_cpt_crypto_init(pdev, THIS_MODULE,
0889 cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
0890 cptvf->vftype, 1, cptvf->num_vfs);
0891 if (err) {
0892 dev_err(dev, "Failed to register crypto algs\n");
0893 goto free_irq_affinity;
0894 }
0895
0896 err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
0897 if (err) {
0898 dev_err(dev, "Creating sysfs entries failed\n");
0899 goto crypto_exit;
0900 }
0901
0902 return 0;
0903
0904 crypto_exit:
0905 otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
0906 free_irq_affinity:
0907 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
0908 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
0909 free_done_irq:
0910 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
0911 sw_cleanup:
0912 cptvf_sw_cleanup(cptvf);
0913 free_misc_irq:
0914 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
0915 free_vectors:
0916 pci_free_irq_vectors(cptvf->pdev);
0917 unmap_region:
0918 pci_iounmap(pdev, cptvf->reg_base);
0919 release_regions:
0920 pci_release_regions(pdev);
0921 disable_device:
0922 pci_disable_device(pdev);
0923 clear_drvdata:
0924 pci_set_drvdata(pdev, NULL);
0925
0926 return err;
0927 }
0928
0929 static void otx_cptvf_remove(struct pci_dev *pdev)
0930 {
0931 struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
0932
0933 if (!cptvf) {
0934 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
0935 return;
0936 }
0937
0938
0939 if (otx_cptvf_send_vf_down(cptvf)) {
0940 dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
0941 } else {
0942 sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
0943 otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
0944 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
0945 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
0946 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
0947 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
0948 cptvf_sw_cleanup(cptvf);
0949 pci_free_irq_vectors(cptvf->pdev);
0950 pci_iounmap(pdev, cptvf->reg_base);
0951 pci_release_regions(pdev);
0952 pci_disable_device(pdev);
0953 pci_set_drvdata(pdev, NULL);
0954 }
0955 }
0956
0957
0958 static const struct pci_device_id otx_cptvf_id_table[] = {
0959 {PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
0960 { 0, }
0961 };
0962
0963 static struct pci_driver otx_cptvf_pci_driver = {
0964 .name = DRV_NAME,
0965 .id_table = otx_cptvf_id_table,
0966 .probe = otx_cptvf_probe,
0967 .remove = otx_cptvf_remove,
0968 };
0969
0970 module_pci_driver(otx_cptvf_pci_driver);
0971
0972 MODULE_AUTHOR("Marvell International Ltd.");
0973 MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
0974 MODULE_LICENSE("GPL v2");
0975 MODULE_VERSION(DRV_VERSION);
0976 MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);