0001
0002
0003 #include <linux/dma-mapping.h>
0004 #include <linux/interrupt.h>
0005 #include <linux/log2.h>
0006 #include <linux/mm.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/pci.h>
0009 #include <linux/slab.h>
0010
0011 #include "fun_dev.h"
0012 #include "fun_queue.h"
0013
0014
0015
0016
0017
0018
0019 void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
0020 size_t hw_desc_sz, size_t sw_desc_sz, bool wb,
0021 int numa_node, dma_addr_t *dma_addr, void **sw_va,
0022 volatile __be64 **wb_va)
0023 {
0024 int dev_node = dev_to_node(dma_dev);
0025 size_t dma_sz;
0026 void *va;
0027
0028 if (numa_node == NUMA_NO_NODE)
0029 numa_node = dev_node;
0030
0031
0032 dma_sz = hw_desc_sz * depth;
0033 if (wb)
0034 dma_sz += sizeof(u64);
0035
0036 set_dev_node(dma_dev, numa_node);
0037 va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
0038 set_dev_node(dma_dev, dev_node);
0039 if (!va)
0040 return NULL;
0041
0042 if (sw_desc_sz) {
0043 *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
0044 numa_node);
0045 if (!*sw_va) {
0046 dma_free_coherent(dma_dev, dma_sz, va, *dma_addr);
0047 return NULL;
0048 }
0049 }
0050
0051 if (wb)
0052 *wb_va = va + dma_sz - sizeof(u64);
0053 return va;
0054 }
0055 EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
0056
0057 void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
0058 bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va)
0059 {
0060 if (hw_va) {
0061 size_t sz = depth * hw_desc_sz;
0062
0063 if (wb)
0064 sz += sizeof(u64);
0065 dma_free_coherent(dma_dev, sz, hw_va, dma_addr);
0066 }
0067 kvfree(sw_va);
0068 }
0069 EXPORT_SYMBOL_GPL(fun_free_ring_mem);
0070
0071
0072
0073
0074
0075 int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
0076 u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
0077 u8 coal_nentries, u8 coal_usec, u32 irq_num,
0078 u32 scan_start_id, u32 scan_end_id,
0079 u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
0080 {
0081 union {
0082 struct fun_admin_epsq_req req;
0083 struct fun_admin_generic_create_rsp rsp;
0084 } cmd;
0085 dma_addr_t wb_addr;
0086 u32 hw_qid;
0087 int rc;
0088
0089 if (sq_depth > fdev->q_depth)
0090 return -EINVAL;
0091 if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
0092 sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
0093
0094 wb_addr = dma_addr + (sq_depth << sqe_size_log2);
0095
0096 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ,
0097 sizeof(cmd.req));
0098 cmd.req.u.create =
0099 FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
0100 sqid, cqid, sqe_size_log2,
0101 sq_depth - 1, dma_addr, 0,
0102 coal_nentries, coal_usec,
0103 irq_num, scan_start_id,
0104 scan_end_id, 0,
0105 rq_buf_size_log2,
0106 ilog2(sizeof(u64)), wb_addr);
0107
0108 rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
0109 &cmd.rsp, sizeof(cmd.rsp), 0);
0110 if (rc)
0111 return rc;
0112
0113 hw_qid = be32_to_cpu(cmd.rsp.id);
0114 *dbp = fun_sq_db_addr(fdev, hw_qid);
0115 if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
0116 *sqidp = hw_qid;
0117 return rc;
0118 }
0119 EXPORT_SYMBOL_GPL(fun_sq_create);
0120
0121
0122
0123
0124
0125 int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
0126 u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
0127 u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
0128 u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
0129 u32 __iomem **dbp)
0130 {
0131 union {
0132 struct fun_admin_epcq_req req;
0133 struct fun_admin_generic_create_rsp rsp;
0134 } cmd;
0135 u32 hw_qid;
0136 int rc;
0137
0138 if (cq_depth > fdev->q_depth)
0139 return -EINVAL;
0140
0141 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
0142 sizeof(cmd.req));
0143 cmd.req.u.create =
0144 FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
0145 cqid, rqid, cqe_size_log2,
0146 cq_depth - 1, dma_addr, tailroom,
0147 headroom / 2, 0, coal_nentries,
0148 coal_usec, irq_num,
0149 scan_start_id, scan_end_id, 0);
0150
0151 rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
0152 &cmd.rsp, sizeof(cmd.rsp), 0);
0153 if (rc)
0154 return rc;
0155
0156 hw_qid = be32_to_cpu(cmd.rsp.id);
0157 *dbp = fun_cq_db_addr(fdev, hw_qid);
0158 if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
0159 *cqidp = hw_qid;
0160 return rc;
0161 }
0162 EXPORT_SYMBOL_GPL(fun_cq_create);
0163
0164 static bool fun_sq_is_head_wb(const struct fun_queue *funq)
0165 {
0166 return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS;
0167 }
0168
0169 static void fun_clean_rq(struct fun_queue *funq)
0170 {
0171 struct fun_dev *fdev = funq->fdev;
0172 struct fun_rq_info *rqinfo;
0173 unsigned int i;
0174
0175 for (i = 0; i < funq->rq_depth; i++) {
0176 rqinfo = &funq->rq_info[i];
0177 if (rqinfo->page) {
0178 dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
0179 DMA_FROM_DEVICE);
0180 put_page(rqinfo->page);
0181 rqinfo->page = NULL;
0182 }
0183 }
0184 }
0185
0186 static int fun_fill_rq(struct fun_queue *funq)
0187 {
0188 struct device *dev = funq->fdev->dev;
0189 int i, node = dev_to_node(dev);
0190 struct fun_rq_info *rqinfo;
0191
0192 for (i = 0; i < funq->rq_depth; i++) {
0193 rqinfo = &funq->rq_info[i];
0194 rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
0195 if (unlikely(!rqinfo->page))
0196 return -ENOMEM;
0197
0198 rqinfo->dma = dma_map_page(dev, rqinfo->page, 0,
0199 PAGE_SIZE, DMA_FROM_DEVICE);
0200 if (unlikely(dma_mapping_error(dev, rqinfo->dma))) {
0201 put_page(rqinfo->page);
0202 rqinfo->page = NULL;
0203 return -ENOMEM;
0204 }
0205
0206 funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma);
0207 }
0208
0209 funq->rq_tail = funq->rq_depth - 1;
0210 return 0;
0211 }
0212
0213 static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset)
0214 {
0215 if (buf_offset <= funq->rq_buf_offset) {
0216 struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx];
0217 struct device *dev = funq->fdev->dev;
0218
0219 dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
0220 DMA_FROM_DEVICE);
0221 funq->num_rqe_to_fill++;
0222 if (++funq->rq_buf_idx == funq->rq_depth)
0223 funq->rq_buf_idx = 0;
0224 }
0225 funq->rq_buf_offset = buf_offset;
0226 }
0227
0228
0229
0230
0231
0232
0233 static void *fun_data_from_rq(struct fun_queue *funq,
0234 const struct fun_rsp_common *rsp, bool *need_free)
0235 {
0236 u32 bufoff, total_len, remaining, fragsize, dataoff;
0237 struct device *dma_dev = funq->fdev->dev;
0238 const struct fun_dataop_rqbuf *databuf;
0239 const struct fun_dataop_hdr *dataop;
0240 const struct fun_rq_info *rqinfo;
0241 void *data;
0242
0243 dataop = (void *)rsp + rsp->suboff8 * 8;
0244 total_len = be32_to_cpu(dataop->total_len);
0245
0246 if (likely(dataop->nsgl == 1)) {
0247 databuf = (struct fun_dataop_rqbuf *)dataop->imm;
0248 bufoff = be32_to_cpu(databuf->bufoff);
0249 fun_rq_update_pos(funq, bufoff);
0250 rqinfo = &funq->rq_info[funq->rq_buf_idx];
0251 dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff,
0252 total_len, DMA_FROM_DEVICE);
0253 *need_free = false;
0254 return page_address(rqinfo->page) + bufoff;
0255 }
0256
0257
0258
0259 data = kmalloc(total_len, GFP_ATOMIC);
0260
0261
0262
0263 if (likely(data))
0264 *need_free = true;
0265
0266 dataoff = 0;
0267 for (remaining = total_len; remaining; remaining -= fragsize) {
0268 fun_rq_update_pos(funq, 0);
0269 fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
0270 if (data) {
0271 rqinfo = &funq->rq_info[funq->rq_buf_idx];
0272 dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize,
0273 DMA_FROM_DEVICE);
0274 memcpy(data + dataoff, page_address(rqinfo->page),
0275 fragsize);
0276 dataoff += fragsize;
0277 }
0278 }
0279 return data;
0280 }
0281
0282 unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max)
0283 {
0284 const struct fun_cqe_info *info;
0285 struct fun_rsp_common *rsp;
0286 unsigned int new_cqes;
0287 u16 sf_p, flags;
0288 bool need_free;
0289 void *cqe;
0290
0291 if (!max)
0292 max = funq->cq_depth - 1;
0293
0294 for (new_cqes = 0; new_cqes < max; new_cqes++) {
0295 cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
0296 info = funq_cqe_info(funq, cqe);
0297 sf_p = be16_to_cpu(info->sf_p);
0298
0299 if ((sf_p & 1) != funq->cq_phase)
0300 break;
0301
0302
0303 dma_rmb();
0304
0305 if (++funq->cq_head == funq->cq_depth) {
0306 funq->cq_head = 0;
0307 funq->cq_phase = !funq->cq_phase;
0308 }
0309
0310 rsp = cqe;
0311 flags = be16_to_cpu(rsp->flags);
0312
0313 need_free = false;
0314 if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) {
0315 rsp = fun_data_from_rq(funq, rsp, &need_free);
0316 if (!rsp) {
0317 rsp = cqe;
0318 rsp->len8 = 1;
0319 if (rsp->ret == 0)
0320 rsp->ret = ENOMEM;
0321 }
0322 }
0323
0324 if (funq->cq_cb)
0325 funq->cq_cb(funq, funq->cb_data, rsp, info);
0326 if (need_free)
0327 kfree(rsp);
0328 }
0329
0330 dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n",
0331 funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase);
0332 return new_cqes;
0333 }
0334
0335 unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max)
0336 {
0337 unsigned int processed;
0338 u32 db;
0339
0340 processed = __fun_process_cq(funq, max);
0341
0342 if (funq->num_rqe_to_fill) {
0343 funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) %
0344 funq->rq_depth;
0345 funq->num_rqe_to_fill = 0;
0346 writel(funq->rq_tail, funq->rq_db);
0347 }
0348
0349 db = funq->cq_head | FUN_DB_IRQ_ARM_F;
0350 writel(db, funq->cq_db);
0351 return processed;
0352 }
0353
0354 static int fun_alloc_sqes(struct fun_queue *funq)
0355 {
0356 funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth,
0357 1 << funq->sqe_size_log2, 0,
0358 fun_sq_is_head_wb(funq),
0359 NUMA_NO_NODE, &funq->sq_dma_addr,
0360 NULL, &funq->sq_head);
0361 return funq->sq_cmds ? 0 : -ENOMEM;
0362 }
0363
0364 static int fun_alloc_cqes(struct fun_queue *funq)
0365 {
0366 funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
0367 1 << funq->cqe_size_log2, 0, false,
0368 NUMA_NO_NODE, &funq->cq_dma_addr, NULL,
0369 NULL);
0370 return funq->cqes ? 0 : -ENOMEM;
0371 }
0372
0373 static int fun_alloc_rqes(struct fun_queue *funq)
0374 {
0375 funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
0376 sizeof(*funq->rqes),
0377 sizeof(*funq->rq_info), false,
0378 NUMA_NO_NODE, &funq->rq_dma_addr,
0379 (void **)&funq->rq_info, NULL);
0380 return funq->rqes ? 0 : -ENOMEM;
0381 }
0382
0383
0384 void fun_free_queue(struct fun_queue *funq)
0385 {
0386 struct device *dev = funq->fdev->dev;
0387
0388 fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false,
0389 funq->cqes, funq->cq_dma_addr, NULL);
0390 fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2,
0391 fun_sq_is_head_wb(funq), funq->sq_cmds,
0392 funq->sq_dma_addr, NULL);
0393
0394 if (funq->rqes) {
0395 fun_clean_rq(funq);
0396 fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
0397 false, funq->rqes, funq->rq_dma_addr,
0398 funq->rq_info);
0399 }
0400
0401 kfree(funq);
0402 }
0403
0404
0405 struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
0406 const struct fun_queue_alloc_req *req)
0407 {
0408 struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL);
0409
0410 if (!funq)
0411 return NULL;
0412
0413 funq->fdev = fdev;
0414 spin_lock_init(&funq->sq_lock);
0415
0416 funq->qid = qid;
0417
0418
0419 if (req->rq_depth) {
0420 funq->cqid = 2 * qid;
0421 if (funq->qid) {
0422
0423 funq->rqid = funq->cqid;
0424 funq->sqid = funq->rqid + 1;
0425 } else {
0426
0427 funq->sqid = 0;
0428 funq->rqid = 1;
0429 }
0430 } else {
0431 funq->cqid = qid;
0432 funq->sqid = qid;
0433 }
0434
0435 funq->cq_flags = req->cq_flags;
0436 funq->sq_flags = req->sq_flags;
0437
0438 funq->cqe_size_log2 = req->cqe_size_log2;
0439 funq->sqe_size_log2 = req->sqe_size_log2;
0440
0441 funq->cq_depth = req->cq_depth;
0442 funq->sq_depth = req->sq_depth;
0443
0444 funq->cq_intcoal_nentries = req->cq_intcoal_nentries;
0445 funq->cq_intcoal_usec = req->cq_intcoal_usec;
0446
0447 funq->sq_intcoal_nentries = req->sq_intcoal_nentries;
0448 funq->sq_intcoal_usec = req->sq_intcoal_usec;
0449
0450 if (fun_alloc_cqes(funq))
0451 goto free_funq;
0452
0453 funq->cq_phase = 1;
0454
0455 if (fun_alloc_sqes(funq))
0456 goto free_funq;
0457
0458 if (req->rq_depth) {
0459 funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ;
0460 funq->rq_depth = req->rq_depth;
0461 funq->rq_buf_offset = -1;
0462
0463 if (fun_alloc_rqes(funq) || fun_fill_rq(funq))
0464 goto free_funq;
0465 }
0466
0467 funq->cq_vector = -1;
0468 funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info);
0469
0470
0471
0472
0473 if (funq->sqid == 0)
0474 funq->sq_db = fun_sq_db_addr(fdev, 0);
0475 if (funq->cqid == 0)
0476 funq->cq_db = fun_cq_db_addr(fdev, 0);
0477
0478 return funq;
0479
0480 free_funq:
0481 fun_free_queue(funq);
0482 return NULL;
0483 }
0484
0485
0486 static int fun_create_cq(struct fun_queue *funq)
0487 {
0488 struct fun_dev *fdev = funq->fdev;
0489 unsigned int rqid;
0490 int rc;
0491
0492 rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
0493 funq->rqid : FUN_HCI_ID_INVALID;
0494 rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
0495 funq->cqe_size_log2, funq->cq_depth,
0496 funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
0497 funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
0498 &funq->cqid, &funq->cq_db);
0499 if (!rc)
0500 dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
0501
0502 return rc;
0503 }
0504
0505
0506 static int fun_create_sq(struct fun_queue *funq)
0507 {
0508 struct fun_dev *fdev = funq->fdev;
0509 int rc;
0510
0511 rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
0512 funq->sqe_size_log2, funq->sq_depth,
0513 funq->sq_dma_addr, funq->sq_intcoal_nentries,
0514 funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
0515 0, &funq->sqid, &funq->sq_db);
0516 if (!rc)
0517 dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
0518
0519 return rc;
0520 }
0521
0522
0523 int fun_create_rq(struct fun_queue *funq)
0524 {
0525 struct fun_dev *fdev = funq->fdev;
0526 int rc;
0527
0528 rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0,
0529 funq->rq_depth, funq->rq_dma_addr, 0, 0,
0530 funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid,
0531 &funq->rq_db);
0532 if (!rc)
0533 dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid);
0534
0535 return rc;
0536 }
0537
0538 static unsigned int funq_irq(struct fun_queue *funq)
0539 {
0540 return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector);
0541 }
0542
0543 int fun_request_irq(struct fun_queue *funq, const char *devname,
0544 irq_handler_t handler, void *data)
0545 {
0546 int rc;
0547
0548 if (funq->cq_vector < 0)
0549 return -EINVAL;
0550
0551 funq->irq_handler = handler;
0552 funq->irq_data = data;
0553
0554 snprintf(funq->irqname, sizeof(funq->irqname),
0555 funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
0556
0557 rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data);
0558 if (rc)
0559 funq->irq_handler = NULL;
0560
0561 return rc;
0562 }
0563
0564
0565 int fun_create_queue(struct fun_queue *funq)
0566 {
0567 int rc;
0568
0569 rc = fun_create_cq(funq);
0570 if (rc)
0571 return rc;
0572
0573 if (funq->rq_depth) {
0574 rc = fun_create_rq(funq);
0575 if (rc)
0576 goto release_cq;
0577 }
0578
0579 rc = fun_create_sq(funq);
0580 if (rc)
0581 goto release_rq;
0582
0583 return 0;
0584
0585 release_rq:
0586 fun_destroy_sq(funq->fdev, funq->rqid);
0587 release_cq:
0588 fun_destroy_cq(funq->fdev, funq->cqid);
0589 return rc;
0590 }
0591
0592 void fun_free_irq(struct fun_queue *funq)
0593 {
0594 if (funq->irq_handler) {
0595 unsigned int vector = funq_irq(funq);
0596
0597 free_irq(vector, funq->irq_data);
0598 funq->irq_handler = NULL;
0599 funq->irq_data = NULL;
0600 }
0601 }