0001
0002
0003 #include <linux/aer.h>
0004 #include <linux/bitmap.h>
0005 #include <linux/delay.h>
0006 #include <linux/interrupt.h>
0007 #include <linux/io.h>
0008 #include <linux/io-64-nonatomic-lo-hi.h>
0009 #include <linux/mm.h>
0010 #include <linux/module.h>
0011 #include <linux/nvme.h>
0012 #include <linux/pci.h>
0013 #include <linux/wait.h>
0014 #include <linux/sched/signal.h>
0015
0016 #include "fun_queue.h"
0017 #include "fun_dev.h"
0018
0019 #define FUN_ADMIN_CMD_TO_MS 3000
0020
0021 enum {
0022 AQA_ASQS_SHIFT = 0,
0023 AQA_ACQS_SHIFT = 16,
0024 AQA_MIN_QUEUE_SIZE = 2,
0025 AQA_MAX_QUEUE_SIZE = 4096
0026 };
0027
0028
0029 struct fun_cmd_ctx {
0030 fun_admin_callback_t cb;
0031 void *cb_data;
0032 int cpu;
0033 };
0034
0035
0036 struct fun_sync_cmd_ctx {
0037 struct completion compl;
0038 u8 *rsp_buf;
0039 unsigned int rsp_len;
0040 u8 rsp_status;
0041 };
0042
0043
0044 static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
0045 {
0046 unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
0047 u32 bit = enabled ? NVME_CSTS_RDY : 0;
0048 unsigned long deadline;
0049
0050 deadline = ((cap_to + 1) * HZ / 2) + jiffies;
0051
0052 for (;;) {
0053 u32 csts = readl(fdev->bar + NVME_REG_CSTS);
0054
0055 if (csts == ~0) {
0056 dev_err(fdev->dev, "CSTS register read %#x\n", csts);
0057 return -EIO;
0058 }
0059
0060 if ((csts & NVME_CSTS_RDY) == bit)
0061 return 0;
0062
0063 if (time_is_before_jiffies(deadline))
0064 break;
0065
0066 msleep(100);
0067 }
0068
0069 dev_err(fdev->dev,
0070 "Timed out waiting for device to indicate RDY %u; aborting %s\n",
0071 enabled, enabled ? "initialization" : "reset");
0072 return -ETIMEDOUT;
0073 }
0074
0075
0076
0077
0078 static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
0079 {
0080 u32 csts = readl(fdev->bar + NVME_REG_CSTS);
0081 u32 actual_rdy = csts & NVME_CSTS_RDY;
0082
0083 if (csts == ~0) {
0084 dev_err(fdev->dev, "CSTS register read %#x\n", csts);
0085 return -EIO;
0086 }
0087 if (actual_rdy != expected_rdy) {
0088 dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
0089 return -EINVAL;
0090 }
0091 return 0;
0092 }
0093
0094
0095
0096
0097 static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
0098 {
0099 int rc = fun_check_csts_rdy(fdev, initial_rdy);
0100
0101 if (rc)
0102 return rc;
0103 writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
0104 return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
0105 }
0106
0107 static int fun_disable_ctrl(struct fun_dev *fdev)
0108 {
0109 fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
0110 return fun_update_cc_enable(fdev, 1);
0111 }
0112
0113 static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
0114 u32 admin_sqesz_log2)
0115 {
0116 fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
0117 (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
0118 ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
0119 NVME_CC_ENABLE;
0120
0121 return fun_update_cc_enable(fdev, 0);
0122 }
0123
0124 static int fun_map_bars(struct fun_dev *fdev, const char *name)
0125 {
0126 struct pci_dev *pdev = to_pci_dev(fdev->dev);
0127 int err;
0128
0129 err = pci_request_mem_regions(pdev, name);
0130 if (err) {
0131 dev_err(&pdev->dev,
0132 "Couldn't get PCI memory resources, err %d\n", err);
0133 return err;
0134 }
0135
0136 fdev->bar = pci_ioremap_bar(pdev, 0);
0137 if (!fdev->bar) {
0138 dev_err(&pdev->dev, "Couldn't map BAR 0\n");
0139 pci_release_mem_regions(pdev);
0140 return -ENOMEM;
0141 }
0142
0143 return 0;
0144 }
0145
0146 static void fun_unmap_bars(struct fun_dev *fdev)
0147 {
0148 struct pci_dev *pdev = to_pci_dev(fdev->dev);
0149
0150 if (fdev->bar) {
0151 iounmap(fdev->bar);
0152 fdev->bar = NULL;
0153 pci_release_mem_regions(pdev);
0154 }
0155 }
0156
0157 static int fun_set_dma_masks(struct device *dev)
0158 {
0159 int err;
0160
0161 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
0162 if (err)
0163 dev_err(dev, "DMA mask configuration failed, err %d\n", err);
0164 return err;
0165 }
0166
0167 static irqreturn_t fun_admin_irq(int irq, void *data)
0168 {
0169 struct fun_queue *funq = data;
0170
0171 return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
0172 }
0173
0174 static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
0175 void *entry, const struct fun_cqe_info *info)
0176 {
0177 const struct fun_admin_rsp_common *rsp_common = entry;
0178 struct fun_dev *fdev = funq->fdev;
0179 struct fun_cmd_ctx *cmd_ctx;
0180 int cpu;
0181 u16 cid;
0182
0183 if (info->sqhd == cpu_to_be16(0xffff)) {
0184 dev_dbg(fdev->dev, "adminq event");
0185 if (fdev->adminq_cb)
0186 fdev->adminq_cb(fdev, entry);
0187 return;
0188 }
0189
0190 cid = be16_to_cpu(rsp_common->cid);
0191 dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
0192 rsp_common->op, rsp_common->ret);
0193
0194 cmd_ctx = &fdev->cmd_ctx[cid];
0195 if (cmd_ctx->cpu < 0) {
0196 dev_err(fdev->dev,
0197 "admin CQE with CID=%u, op=%u does not match a pending command\n",
0198 cid, rsp_common->op);
0199 return;
0200 }
0201
0202 if (cmd_ctx->cb)
0203 cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
0204
0205 cpu = cmd_ctx->cpu;
0206 cmd_ctx->cpu = -1;
0207 sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
0208 }
0209
0210 static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
0211 {
0212 unsigned int i;
0213
0214 fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
0215 if (!fdev->cmd_ctx)
0216 return -ENOMEM;
0217
0218 for (i = 0; i < ntags; i++)
0219 fdev->cmd_ctx[i].cpu = -1;
0220
0221 return 0;
0222 }
0223
0224
0225 static int fun_enable_admin_queue(struct fun_dev *fdev,
0226 const struct fun_dev_params *areq)
0227 {
0228 struct fun_queue_alloc_req qreq = {
0229 .cqe_size_log2 = areq->cqe_size_log2,
0230 .sqe_size_log2 = areq->sqe_size_log2,
0231 .cq_depth = areq->cq_depth,
0232 .sq_depth = areq->sq_depth,
0233 .rq_depth = areq->rq_depth,
0234 };
0235 unsigned int ntags = areq->sq_depth - 1;
0236 struct fun_queue *funq;
0237 int rc;
0238
0239 if (fdev->admin_q)
0240 return -EEXIST;
0241
0242 if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
0243 areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
0244 areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
0245 areq->cq_depth > AQA_MAX_QUEUE_SIZE)
0246 return -EINVAL;
0247
0248 fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
0249 if (!fdev->admin_q)
0250 return -ENOMEM;
0251
0252 rc = fun_init_cmd_ctx(fdev, ntags);
0253 if (rc)
0254 goto free_q;
0255
0256 rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
0257 GFP_KERNEL, dev_to_node(fdev->dev));
0258 if (rc)
0259 goto free_cmd_ctx;
0260
0261 funq = fdev->admin_q;
0262 funq->cq_vector = 0;
0263 rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
0264 if (rc)
0265 goto free_sbq;
0266
0267 fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
0268 fdev->adminq_cb = areq->event_cb;
0269
0270 writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
0271 (funq->cq_depth - 1) << AQA_ACQS_SHIFT,
0272 fdev->bar + NVME_REG_AQA);
0273
0274 writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
0275 writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
0276
0277 rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
0278 if (rc)
0279 goto free_irq;
0280
0281 if (areq->rq_depth) {
0282 rc = fun_create_rq(funq);
0283 if (rc)
0284 goto disable_ctrl;
0285
0286 funq_rq_post(funq);
0287 }
0288
0289 return 0;
0290
0291 disable_ctrl:
0292 fun_disable_ctrl(fdev);
0293 free_irq:
0294 fun_free_irq(funq);
0295 free_sbq:
0296 sbitmap_queue_free(&fdev->admin_sbq);
0297 free_cmd_ctx:
0298 kvfree(fdev->cmd_ctx);
0299 fdev->cmd_ctx = NULL;
0300 free_q:
0301 fun_free_queue(fdev->admin_q);
0302 fdev->admin_q = NULL;
0303 return rc;
0304 }
0305
0306 static void fun_disable_admin_queue(struct fun_dev *fdev)
0307 {
0308 struct fun_queue *admq = fdev->admin_q;
0309
0310 if (!admq)
0311 return;
0312
0313 fun_disable_ctrl(fdev);
0314
0315 fun_free_irq(admq);
0316 __fun_process_cq(admq, 0);
0317
0318 sbitmap_queue_free(&fdev->admin_sbq);
0319
0320 kvfree(fdev->cmd_ctx);
0321 fdev->cmd_ctx = NULL;
0322
0323 fun_free_queue(admq);
0324 fdev->admin_q = NULL;
0325 }
0326
0327
0328
0329
0330
0331 static bool fun_adminq_stopped(struct fun_dev *fdev)
0332 {
0333 u32 csts = readl(fdev->bar + NVME_REG_CSTS);
0334
0335 return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
0336 }
0337
0338 static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
0339 {
0340 struct sbitmap_queue *sbq = &fdev->admin_sbq;
0341 struct sbq_wait_state *ws = &sbq->ws[0];
0342 DEFINE_SBQ_WAIT(wait);
0343 int tag;
0344
0345 for (;;) {
0346 sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
0347 if (fdev->suppress_cmds) {
0348 tag = -ESHUTDOWN;
0349 break;
0350 }
0351 tag = sbitmap_queue_get(sbq, cpup);
0352 if (tag >= 0)
0353 break;
0354 schedule();
0355 }
0356
0357 sbitmap_finish_wait(sbq, ws, &wait);
0358 return tag;
0359 }
0360
0361
0362
0363
0364 int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
0365 fun_admin_callback_t cb, void *cb_data, bool wait_ok)
0366 {
0367 struct fun_queue *funq = fdev->admin_q;
0368 unsigned int cmdsize = cmd->len8 * 8;
0369 struct fun_cmd_ctx *cmd_ctx;
0370 int tag, cpu, rc = 0;
0371
0372 if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
0373 return -EMSGSIZE;
0374
0375 tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
0376 if (tag < 0) {
0377 if (!wait_ok)
0378 return -EAGAIN;
0379 tag = fun_wait_for_tag(fdev, &cpu);
0380 if (tag < 0)
0381 return tag;
0382 }
0383
0384 cmd->cid = cpu_to_be16(tag);
0385
0386 cmd_ctx = &fdev->cmd_ctx[tag];
0387 cmd_ctx->cb = cb;
0388 cmd_ctx->cb_data = cb_data;
0389
0390 spin_lock(&funq->sq_lock);
0391
0392 if (unlikely(fdev->suppress_cmds)) {
0393 rc = -ESHUTDOWN;
0394 sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
0395 } else {
0396 cmd_ctx->cpu = cpu;
0397 memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
0398
0399 dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
0400 cmd);
0401
0402 if (++funq->sq_tail == funq->sq_depth)
0403 funq->sq_tail = 0;
0404 writel(funq->sq_tail, funq->sq_db);
0405 }
0406 spin_unlock(&funq->sq_lock);
0407 return rc;
0408 }
0409
0410
0411
0412
0413
0414 static bool fun_abandon_admin_cmd(struct fun_dev *fd,
0415 const struct fun_admin_req_common *cmd,
0416 void *cb_data)
0417 {
0418 u16 cid = be16_to_cpu(cmd->cid);
0419 struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
0420
0421 return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
0422 }
0423
0424
0425
0426
0427 static void fun_admin_stop(struct fun_dev *fdev)
0428 {
0429 spin_lock(&fdev->admin_q->sq_lock);
0430 fdev->suppress_cmds = true;
0431 spin_unlock(&fdev->admin_q->sq_lock);
0432 sbitmap_queue_wake_all(&fdev->admin_sbq);
0433 }
0434
0435
0436
0437
0438 static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
0439 {
0440 const struct fun_admin_rsp_common *rsp_common = rsp;
0441 struct fun_sync_cmd_ctx *ctx = cb_data;
0442
0443 if (!ctx)
0444 return;
0445 if (ctx->rsp_buf) {
0446 unsigned int rsp_len = rsp_common->len8 * 8;
0447
0448 if (unlikely(rsp_len > ctx->rsp_len)) {
0449 dev_err(fd->dev,
0450 "response for op %u is %uB > response buffer %uB\n",
0451 rsp_common->op, rsp_len, ctx->rsp_len);
0452 rsp_len = ctx->rsp_len;
0453 }
0454 memcpy(ctx->rsp_buf, rsp, rsp_len);
0455 }
0456 ctx->rsp_status = rsp_common->ret;
0457 complete(&ctx->compl);
0458 }
0459
0460
0461 int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
0462 struct fun_admin_req_common *cmd, void *rsp,
0463 size_t rspsize, unsigned int timeout)
0464 {
0465 struct fun_sync_cmd_ctx ctx = {
0466 .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
0467 .rsp_buf = rsp,
0468 .rsp_len = rspsize,
0469 };
0470 unsigned int cmdlen = cmd->len8 * 8;
0471 unsigned long jiffies_left;
0472 int ret;
0473
0474 ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
0475 true);
0476 if (ret)
0477 return ret;
0478
0479 if (!timeout)
0480 timeout = FUN_ADMIN_CMD_TO_MS;
0481
0482 jiffies_left = wait_for_completion_timeout(&ctx.compl,
0483 msecs_to_jiffies(timeout));
0484 if (!jiffies_left) {
0485
0486
0487
0488
0489 if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
0490 dev_err(fdev->dev, "admin command timed out: %*ph\n",
0491 cmdlen, cmd);
0492 fun_admin_stop(fdev);
0493
0494 if (fun_adminq_stopped(fdev))
0495 dev_err(fdev->dev,
0496 "device does not accept admin commands\n");
0497
0498 return -ETIMEDOUT;
0499 }
0500 wait_for_completion(&ctx.compl);
0501 }
0502
0503 if (ctx.rsp_status) {
0504 dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
0505 ctx.rsp_status, cmdlen, cmd);
0506 }
0507
0508 return -ctx.rsp_status;
0509 }
0510 EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
0511
0512
0513 int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
0514 {
0515 union {
0516 struct fun_admin_res_count_req req;
0517 struct fun_admin_res_count_rsp rsp;
0518 } cmd;
0519 int rc;
0520
0521 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
0522 cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
0523 0, 0);
0524
0525 rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
0526 sizeof(cmd), 0);
0527 return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
0528 }
0529 EXPORT_SYMBOL_GPL(fun_get_res_count);
0530
0531
0532 int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
0533 unsigned int flags, u32 id)
0534 {
0535 struct fun_admin_generic_destroy_req req = {
0536 .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
0537 .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
0538 flags, id)
0539 };
0540
0541 return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
0542 }
0543 EXPORT_SYMBOL_GPL(fun_res_destroy);
0544
0545
0546 int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
0547 unsigned int id0, enum fun_admin_bind_type type1,
0548 unsigned int id1)
0549 {
0550 struct {
0551 struct fun_admin_bind_req req;
0552 struct fun_admin_bind_entry entry[2];
0553 } cmd = {
0554 .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
0555 sizeof(cmd)),
0556 .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
0557 .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
0558 };
0559
0560 return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
0561 }
0562 EXPORT_SYMBOL_GPL(fun_bind);
0563
0564 static int fun_get_dev_limits(struct fun_dev *fdev)
0565 {
0566 struct pci_dev *pdev = to_pci_dev(fdev->dev);
0567 unsigned int cq_count, sq_count, num_dbs;
0568 int rc;
0569
0570 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
0571 if (rc < 0)
0572 return rc;
0573 cq_count = rc;
0574
0575 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
0576 if (rc < 0)
0577 return rc;
0578 sq_count = rc;
0579
0580
0581
0582
0583 if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
0584 return -EINVAL;
0585
0586
0587
0588
0589 num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
0590 (2 + NVME_CAP_STRIDE(fdev->cap_reg));
0591 fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
0592 fdev->kern_end_qid = fdev->max_qid + 1;
0593 return 0;
0594 }
0595
0596
0597 static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
0598 {
0599 int vecs, num_msix = pci_msix_vec_count(pdev);
0600
0601 if (num_msix < 0)
0602 return num_msix;
0603 if (min_vecs > num_msix)
0604 return -ERANGE;
0605
0606 vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
0607 if (vecs > 0) {
0608 dev_info(&pdev->dev,
0609 "Allocated %d IRQ vectors of %d requested\n",
0610 vecs, num_msix);
0611 } else {
0612 dev_err(&pdev->dev,
0613 "Unable to allocate at least %u IRQ vectors\n",
0614 min_vecs);
0615 }
0616 return vecs;
0617 }
0618
0619
0620 static int fun_alloc_irq_mgr(struct fun_dev *fdev)
0621 {
0622 fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
0623 if (!fdev->irq_map)
0624 return -ENOMEM;
0625
0626 spin_lock_init(&fdev->irqmgr_lock);
0627
0628 __set_bit(0, fdev->irq_map);
0629 fdev->irqs_avail = fdev->num_irqs - 1;
0630 return 0;
0631 }
0632
0633
0634 int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
0635 {
0636 unsigned int b, n = 0;
0637 int err = -ENOSPC;
0638
0639 if (!nirqs)
0640 return 0;
0641
0642 spin_lock(&fdev->irqmgr_lock);
0643 if (nirqs > fdev->irqs_avail)
0644 goto unlock;
0645
0646 for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
0647 __set_bit(b, fdev->irq_map);
0648 irq_indices[n++] = b;
0649 if (n >= nirqs)
0650 break;
0651 }
0652
0653 WARN_ON(n < nirqs);
0654 fdev->irqs_avail -= n;
0655 err = n;
0656 unlock:
0657 spin_unlock(&fdev->irqmgr_lock);
0658 return err;
0659 }
0660 EXPORT_SYMBOL(fun_reserve_irqs);
0661
0662
0663 void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
0664 u16 *irq_indices)
0665 {
0666 unsigned int i;
0667
0668 spin_lock(&fdev->irqmgr_lock);
0669 for (i = 0; i < nirqs; i++)
0670 __clear_bit(irq_indices[i], fdev->irq_map);
0671 fdev->irqs_avail += nirqs;
0672 spin_unlock(&fdev->irqmgr_lock);
0673 }
0674 EXPORT_SYMBOL(fun_release_irqs);
0675
0676 static void fun_serv_handler(struct work_struct *work)
0677 {
0678 struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
0679
0680 if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
0681 return;
0682 if (fd->serv_cb)
0683 fd->serv_cb(fd);
0684 }
0685
0686 void fun_serv_stop(struct fun_dev *fd)
0687 {
0688 set_bit(FUN_SERV_DISABLED, &fd->service_flags);
0689 cancel_work_sync(&fd->service_task);
0690 }
0691 EXPORT_SYMBOL_GPL(fun_serv_stop);
0692
0693 void fun_serv_restart(struct fun_dev *fd)
0694 {
0695 clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
0696 if (fd->service_flags)
0697 schedule_work(&fd->service_task);
0698 }
0699 EXPORT_SYMBOL_GPL(fun_serv_restart);
0700
0701 void fun_serv_sched(struct fun_dev *fd)
0702 {
0703 if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
0704 schedule_work(&fd->service_task);
0705 }
0706 EXPORT_SYMBOL_GPL(fun_serv_sched);
0707
0708
0709
0710
0711 static int sanitize_dev(struct fun_dev *fdev)
0712 {
0713 int rc;
0714
0715 fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
0716 fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
0717
0718
0719
0720
0721 rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
0722 if (rc)
0723 return rc;
0724
0725
0726 if (fdev->cc_reg & NVME_CC_ENABLE)
0727 rc = fun_disable_ctrl(fdev);
0728
0729 return rc;
0730 }
0731
0732
0733 void fun_dev_disable(struct fun_dev *fdev)
0734 {
0735 struct pci_dev *pdev = to_pci_dev(fdev->dev);
0736
0737 pci_set_drvdata(pdev, NULL);
0738
0739 if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
0740 fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
0741 fdev->fw_handle);
0742 fdev->fw_handle = FUN_HCI_ID_INVALID;
0743 }
0744
0745 fun_disable_admin_queue(fdev);
0746
0747 bitmap_free(fdev->irq_map);
0748 pci_free_irq_vectors(pdev);
0749
0750 pci_clear_master(pdev);
0751 pci_disable_pcie_error_reporting(pdev);
0752 pci_disable_device(pdev);
0753
0754 fun_unmap_bars(fdev);
0755 }
0756 EXPORT_SYMBOL(fun_dev_disable);
0757
0758
0759
0760
0761
0762
0763
0764 int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
0765 const struct fun_dev_params *areq, const char *name)
0766 {
0767 int rc;
0768
0769 fdev->dev = &pdev->dev;
0770 rc = fun_map_bars(fdev, name);
0771 if (rc)
0772 return rc;
0773
0774 rc = fun_set_dma_masks(fdev->dev);
0775 if (rc)
0776 goto unmap;
0777
0778 rc = pci_enable_device_mem(pdev);
0779 if (rc) {
0780 dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
0781 goto unmap;
0782 }
0783
0784 pci_enable_pcie_error_reporting(pdev);
0785
0786 rc = sanitize_dev(fdev);
0787 if (rc)
0788 goto disable_dev;
0789
0790 fdev->fw_handle = FUN_HCI_ID_INVALID;
0791 fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
0792 fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
0793 fdev->dbs = fdev->bar + NVME_REG_DBS;
0794
0795 INIT_WORK(&fdev->service_task, fun_serv_handler);
0796 fdev->service_flags = FUN_SERV_DISABLED;
0797 fdev->serv_cb = areq->serv_cb;
0798
0799 rc = fun_alloc_irqs(pdev, areq->min_msix + 1);
0800 if (rc < 0)
0801 goto disable_dev;
0802 fdev->num_irqs = rc;
0803
0804 rc = fun_alloc_irq_mgr(fdev);
0805 if (rc)
0806 goto free_irqs;
0807
0808 pci_set_master(pdev);
0809 rc = fun_enable_admin_queue(fdev, areq);
0810 if (rc)
0811 goto free_irq_mgr;
0812
0813 rc = fun_get_dev_limits(fdev);
0814 if (rc < 0)
0815 goto disable_admin;
0816
0817 pci_save_state(pdev);
0818 pci_set_drvdata(pdev, fdev);
0819 pcie_print_link_status(pdev);
0820 dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
0821 fdev->q_depth, fdev->db_stride, fdev->max_qid,
0822 fdev->kern_end_qid);
0823 return 0;
0824
0825 disable_admin:
0826 fun_disable_admin_queue(fdev);
0827 free_irq_mgr:
0828 pci_clear_master(pdev);
0829 bitmap_free(fdev->irq_map);
0830 free_irqs:
0831 pci_free_irq_vectors(pdev);
0832 disable_dev:
0833 pci_disable_pcie_error_reporting(pdev);
0834 pci_disable_device(pdev);
0835 unmap:
0836 fun_unmap_bars(fdev);
0837 return rc;
0838 }
0839 EXPORT_SYMBOL(fun_dev_enable);
0840
0841 MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
0842 MODULE_DESCRIPTION("Core services driver for Fungible devices");
0843 MODULE_LICENSE("Dual BSD/GPL");