0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/async.h>
0013 #include <linux/blkdev.h>
0014 #include <linux/blk-mq.h>
0015 #include <linux/device.h>
0016 #include <linux/dma-mapping.h>
0017 #include <linux/dmapool.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/io-64-nonatomic-lo-hi.h>
0020 #include <linux/io.h>
0021 #include <linux/iopoll.h>
0022 #include <linux/jiffies.h>
0023 #include <linux/mempool.h>
0024 #include <linux/module.h>
0025 #include <linux/of.h>
0026 #include <linux/of_platform.h>
0027 #include <linux/once.h>
0028 #include <linux/platform_device.h>
0029 #include <linux/pm_domain.h>
0030 #include <linux/soc/apple/rtkit.h>
0031 #include <linux/soc/apple/sart.h>
0032 #include <linux/reset.h>
0033 #include <linux/time64.h>
0034
0035 #include "nvme.h"
0036
0037 #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
0038 #define APPLE_ANS_MAX_QUEUE_DEPTH 64
0039
0040 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44
0041 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
0042
0043 #define APPLE_ANS_ACQ_DB 0x1004
0044 #define APPLE_ANS_IOCQ_DB 0x100c
0045
0046 #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
0047
0048 #define APPLE_ANS_BOOT_STATUS 0x1300
0049 #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
0050
0051 #define APPLE_ANS_UNKNOWN_CTRL 0x24008
0052 #define APPLE_ANS_PRP_NULL_CHECK BIT(11)
0053
0054 #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
0055 #define APPLE_ANS_LINEAR_SQ_EN BIT(0)
0056
0057 #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c
0058 #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
0059
0060 #define APPLE_NVMMU_NUM_TCBS 0x28100
0061 #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108
0062 #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
0063 #define APPLE_NVMMU_TCB_INVAL 0x28118
0064 #define APPLE_NVMMU_TCB_STAT 0x28120
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 #define APPLE_NVME_AQ_DEPTH 2
0076 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
0077
0078
0079
0080
0081
0082 #define NVME_MAX_KB_SZ 4096
0083 #define NVME_MAX_SEGS 127
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 struct apple_nvmmu_tcb {
0107 u8 opcode;
0108
0109 #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
0110 #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1)
0111 u8 dma_flags;
0112
0113 u8 command_id;
0114 u8 _unk0;
0115 __le16 length;
0116 u8 _unk1[18];
0117 __le64 prp1;
0118 __le64 prp2;
0119 u8 _unk2[16];
0120 u8 aes_iv[8];
0121 u8 _aes_unk[64];
0122 };
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 struct apple_nvme_queue {
0134 struct nvme_command *sqes;
0135 struct nvme_completion *cqes;
0136 struct apple_nvmmu_tcb *tcbs;
0137
0138 dma_addr_t sq_dma_addr;
0139 dma_addr_t cq_dma_addr;
0140 dma_addr_t tcb_dma_addr;
0141
0142 u32 __iomem *sq_db;
0143 u32 __iomem *cq_db;
0144
0145 u16 cq_head;
0146 u8 cq_phase;
0147
0148 bool is_adminq;
0149 bool enabled;
0150 };
0151
0152
0153
0154
0155
0156
0157
0158 struct apple_nvme_iod {
0159 struct nvme_request req;
0160 struct nvme_command cmd;
0161 struct apple_nvme_queue *q;
0162 int npages;
0163 int nents;
0164 dma_addr_t first_dma;
0165 unsigned int dma_len;
0166 struct scatterlist *sg;
0167 };
0168
0169 struct apple_nvme {
0170 struct device *dev;
0171
0172 void __iomem *mmio_coproc;
0173 void __iomem *mmio_nvme;
0174
0175 struct device **pd_dev;
0176 struct device_link **pd_link;
0177 int pd_count;
0178
0179 struct apple_sart *sart;
0180 struct apple_rtkit *rtk;
0181 struct reset_control *reset;
0182
0183 struct dma_pool *prp_page_pool;
0184 struct dma_pool *prp_small_pool;
0185 mempool_t *iod_mempool;
0186
0187 struct nvme_ctrl ctrl;
0188 struct work_struct remove_work;
0189
0190 struct apple_nvme_queue adminq;
0191 struct apple_nvme_queue ioq;
0192
0193 struct blk_mq_tag_set admin_tagset;
0194 struct blk_mq_tag_set tagset;
0195
0196 int irq;
0197 spinlock_t lock;
0198 };
0199
0200 static_assert(sizeof(struct nvme_command) == 64);
0201 static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
0202
0203 static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
0204 {
0205 return container_of(ctrl, struct apple_nvme, ctrl);
0206 }
0207
0208 static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
0209 {
0210 if (q->is_adminq)
0211 return container_of(q, struct apple_nvme, adminq);
0212 else
0213 return container_of(q, struct apple_nvme, ioq);
0214 }
0215
0216 static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
0217 {
0218 if (q->is_adminq)
0219 return APPLE_NVME_AQ_DEPTH;
0220 else
0221 return APPLE_ANS_MAX_QUEUE_DEPTH;
0222 }
0223
0224 static void apple_nvme_rtkit_crashed(void *cookie)
0225 {
0226 struct apple_nvme *anv = cookie;
0227
0228 dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
0229 nvme_reset_ctrl(&anv->ctrl);
0230 }
0231
0232 static int apple_nvme_sart_dma_setup(void *cookie,
0233 struct apple_rtkit_shmem *bfr)
0234 {
0235 struct apple_nvme *anv = cookie;
0236 int ret;
0237
0238 if (bfr->iova)
0239 return -EINVAL;
0240 if (!bfr->size)
0241 return -EINVAL;
0242
0243 bfr->buffer =
0244 dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
0245 if (!bfr->buffer)
0246 return -ENOMEM;
0247
0248 ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
0249 if (ret) {
0250 dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
0251 bfr->buffer = NULL;
0252 return -ENOMEM;
0253 }
0254
0255 return 0;
0256 }
0257
0258 static void apple_nvme_sart_dma_destroy(void *cookie,
0259 struct apple_rtkit_shmem *bfr)
0260 {
0261 struct apple_nvme *anv = cookie;
0262
0263 apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
0264 dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
0265 }
0266
0267 static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
0268 .crashed = apple_nvme_rtkit_crashed,
0269 .shmem_setup = apple_nvme_sart_dma_setup,
0270 .shmem_destroy = apple_nvme_sart_dma_destroy,
0271 };
0272
0273 static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
0274 {
0275 struct apple_nvme *anv = queue_to_apple_nvme(q);
0276
0277 writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
0278 if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
0279 dev_warn_ratelimited(anv->dev,
0280 "NVMMU TCB invalidation failed\n");
0281 }
0282
0283 static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
0284 struct nvme_command *cmd)
0285 {
0286 struct apple_nvme *anv = queue_to_apple_nvme(q);
0287 u32 tag = nvme_tag_from_cid(cmd->common.command_id);
0288 struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
0289
0290 tcb->opcode = cmd->common.opcode;
0291 tcb->prp1 = cmd->common.dptr.prp1;
0292 tcb->prp2 = cmd->common.dptr.prp2;
0293 tcb->length = cmd->rw.length;
0294 tcb->command_id = tag;
0295
0296 if (nvme_is_write(cmd))
0297 tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
0298 else
0299 tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
0300
0301 memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 spin_lock_irq(&anv->lock);
0314 writel(tag, q->sq_db);
0315 spin_unlock_irq(&anv->lock);
0316 }
0317
0318
0319
0320
0321
0322
0323
0324 static inline size_t apple_nvme_iod_alloc_size(void)
0325 {
0326 const unsigned int nprps = DIV_ROUND_UP(
0327 NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
0328 const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
0329 const size_t alloc_size = sizeof(__le64 *) * npages +
0330 sizeof(struct scatterlist) * NVME_MAX_SEGS;
0331
0332 return alloc_size;
0333 }
0334
0335 static void **apple_nvme_iod_list(struct request *req)
0336 {
0337 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0338
0339 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
0340 }
0341
0342 static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
0343 {
0344 const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
0345 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0346 dma_addr_t dma_addr = iod->first_dma;
0347 int i;
0348
0349 for (i = 0; i < iod->npages; i++) {
0350 __le64 *prp_list = apple_nvme_iod_list(req)[i];
0351 dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
0352
0353 dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
0354 dma_addr = next_dma_addr;
0355 }
0356 }
0357
0358 static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
0359 {
0360 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0361
0362 if (iod->dma_len) {
0363 dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
0364 rq_dma_dir(req));
0365 return;
0366 }
0367
0368 WARN_ON_ONCE(!iod->nents);
0369
0370 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
0371 if (iod->npages == 0)
0372 dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
0373 iod->first_dma);
0374 else
0375 apple_nvme_free_prps(anv, req);
0376 mempool_free(iod->sg, anv->iod_mempool);
0377 }
0378
0379 static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
0380 {
0381 int i;
0382 struct scatterlist *sg;
0383
0384 for_each_sg(sgl, sg, nents, i) {
0385 dma_addr_t phys = sg_phys(sg);
0386
0387 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
0388 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
0389 sg_dma_len(sg));
0390 }
0391 }
0392
0393 static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
0394 struct request *req,
0395 struct nvme_rw_command *cmnd)
0396 {
0397 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0398 struct dma_pool *pool;
0399 int length = blk_rq_payload_bytes(req);
0400 struct scatterlist *sg = iod->sg;
0401 int dma_len = sg_dma_len(sg);
0402 u64 dma_addr = sg_dma_address(sg);
0403 int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
0404 __le64 *prp_list;
0405 void **list = apple_nvme_iod_list(req);
0406 dma_addr_t prp_dma;
0407 int nprps, i;
0408
0409 length -= (NVME_CTRL_PAGE_SIZE - offset);
0410 if (length <= 0) {
0411 iod->first_dma = 0;
0412 goto done;
0413 }
0414
0415 dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
0416 if (dma_len) {
0417 dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
0418 } else {
0419 sg = sg_next(sg);
0420 dma_addr = sg_dma_address(sg);
0421 dma_len = sg_dma_len(sg);
0422 }
0423
0424 if (length <= NVME_CTRL_PAGE_SIZE) {
0425 iod->first_dma = dma_addr;
0426 goto done;
0427 }
0428
0429 nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
0430 if (nprps <= (256 / 8)) {
0431 pool = anv->prp_small_pool;
0432 iod->npages = 0;
0433 } else {
0434 pool = anv->prp_page_pool;
0435 iod->npages = 1;
0436 }
0437
0438 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
0439 if (!prp_list) {
0440 iod->first_dma = dma_addr;
0441 iod->npages = -1;
0442 return BLK_STS_RESOURCE;
0443 }
0444 list[0] = prp_list;
0445 iod->first_dma = prp_dma;
0446 i = 0;
0447 for (;;) {
0448 if (i == NVME_CTRL_PAGE_SIZE >> 3) {
0449 __le64 *old_prp_list = prp_list;
0450
0451 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
0452 if (!prp_list)
0453 goto free_prps;
0454 list[iod->npages++] = prp_list;
0455 prp_list[0] = old_prp_list[i - 1];
0456 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
0457 i = 1;
0458 }
0459 prp_list[i++] = cpu_to_le64(dma_addr);
0460 dma_len -= NVME_CTRL_PAGE_SIZE;
0461 dma_addr += NVME_CTRL_PAGE_SIZE;
0462 length -= NVME_CTRL_PAGE_SIZE;
0463 if (length <= 0)
0464 break;
0465 if (dma_len > 0)
0466 continue;
0467 if (unlikely(dma_len < 0))
0468 goto bad_sgl;
0469 sg = sg_next(sg);
0470 dma_addr = sg_dma_address(sg);
0471 dma_len = sg_dma_len(sg);
0472 }
0473 done:
0474 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
0475 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
0476 return BLK_STS_OK;
0477 free_prps:
0478 apple_nvme_free_prps(anv, req);
0479 return BLK_STS_RESOURCE;
0480 bad_sgl:
0481 WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
0482 "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
0483 iod->nents);
0484 return BLK_STS_IOERR;
0485 }
0486
0487 static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
0488 struct request *req,
0489 struct nvme_rw_command *cmnd,
0490 struct bio_vec *bv)
0491 {
0492 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0493 unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
0494 unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
0495
0496 iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
0497 if (dma_mapping_error(anv->dev, iod->first_dma))
0498 return BLK_STS_RESOURCE;
0499 iod->dma_len = bv->bv_len;
0500
0501 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
0502 if (bv->bv_len > first_prp_len)
0503 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
0504 return BLK_STS_OK;
0505 }
0506
0507 static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
0508 struct request *req,
0509 struct nvme_command *cmnd)
0510 {
0511 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0512 blk_status_t ret = BLK_STS_RESOURCE;
0513 int nr_mapped;
0514
0515 if (blk_rq_nr_phys_segments(req) == 1) {
0516 struct bio_vec bv = req_bvec(req);
0517
0518 if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
0519 return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
0520 &bv);
0521 }
0522
0523 iod->dma_len = 0;
0524 iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
0525 if (!iod->sg)
0526 return BLK_STS_RESOURCE;
0527 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
0528 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
0529 if (!iod->nents)
0530 goto out_free_sg;
0531
0532 nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
0533 rq_dma_dir(req), DMA_ATTR_NO_WARN);
0534 if (!nr_mapped)
0535 goto out_free_sg;
0536
0537 ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
0538 if (ret != BLK_STS_OK)
0539 goto out_unmap_sg;
0540 return BLK_STS_OK;
0541
0542 out_unmap_sg:
0543 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
0544 out_free_sg:
0545 mempool_free(iod->sg, anv->iod_mempool);
0546 return ret;
0547 }
0548
0549 static __always_inline void apple_nvme_unmap_rq(struct request *req)
0550 {
0551 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0552 struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
0553
0554 if (blk_rq_nr_phys_segments(req))
0555 apple_nvme_unmap_data(anv, req);
0556 }
0557
0558 static void apple_nvme_complete_rq(struct request *req)
0559 {
0560 apple_nvme_unmap_rq(req);
0561 nvme_complete_rq(req);
0562 }
0563
0564 static void apple_nvme_complete_batch(struct io_comp_batch *iob)
0565 {
0566 nvme_complete_batch(iob, apple_nvme_unmap_rq);
0567 }
0568
0569 static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
0570 {
0571 struct nvme_completion *hcqe = &q->cqes[q->cq_head];
0572
0573 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
0574 }
0575
0576 static inline struct blk_mq_tags *
0577 apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
0578 {
0579 if (q->is_adminq)
0580 return anv->admin_tagset.tags[0];
0581 else
0582 return anv->tagset.tags[0];
0583 }
0584
0585 static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
0586 struct io_comp_batch *iob, u16 idx)
0587 {
0588 struct apple_nvme *anv = queue_to_apple_nvme(q);
0589 struct nvme_completion *cqe = &q->cqes[idx];
0590 __u16 command_id = READ_ONCE(cqe->command_id);
0591 struct request *req;
0592
0593 apple_nvmmu_inval(q, command_id);
0594
0595 req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
0596 if (unlikely(!req)) {
0597 dev_warn(anv->dev, "invalid id %d completed", command_id);
0598 return;
0599 }
0600
0601 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
0602 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
0603 apple_nvme_complete_batch))
0604 apple_nvme_complete_rq(req);
0605 }
0606
0607 static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
0608 {
0609 u32 tmp = q->cq_head + 1;
0610
0611 if (tmp == apple_nvme_queue_depth(q)) {
0612 q->cq_head = 0;
0613 q->cq_phase ^= 1;
0614 } else {
0615 q->cq_head = tmp;
0616 }
0617 }
0618
0619 static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
0620 struct io_comp_batch *iob)
0621 {
0622 bool found = false;
0623
0624 while (apple_nvme_cqe_pending(q)) {
0625 found = true;
0626
0627
0628
0629
0630
0631 dma_rmb();
0632 apple_nvme_handle_cqe(q, iob, q->cq_head);
0633 apple_nvme_update_cq_head(q);
0634 }
0635
0636 if (found)
0637 writel(q->cq_head, q->cq_db);
0638
0639 return found;
0640 }
0641
0642 static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
0643 {
0644 bool found;
0645 DEFINE_IO_COMP_BATCH(iob);
0646
0647 if (!READ_ONCE(q->enabled) && !force)
0648 return false;
0649
0650 found = apple_nvme_poll_cq(q, &iob);
0651
0652 if (!rq_list_empty(iob.req_list))
0653 apple_nvme_complete_batch(&iob);
0654
0655 return found;
0656 }
0657
0658 static irqreturn_t apple_nvme_irq(int irq, void *data)
0659 {
0660 struct apple_nvme *anv = data;
0661 bool handled = false;
0662 unsigned long flags;
0663
0664 spin_lock_irqsave(&anv->lock, flags);
0665 if (apple_nvme_handle_cq(&anv->ioq, false))
0666 handled = true;
0667 if (apple_nvme_handle_cq(&anv->adminq, false))
0668 handled = true;
0669 spin_unlock_irqrestore(&anv->lock, flags);
0670
0671 if (handled)
0672 return IRQ_HANDLED;
0673 return IRQ_NONE;
0674 }
0675
0676 static int apple_nvme_create_cq(struct apple_nvme *anv)
0677 {
0678 struct nvme_command c = {};
0679
0680
0681
0682
0683
0684 c.create_cq.opcode = nvme_admin_create_cq;
0685 c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
0686 c.create_cq.cqid = cpu_to_le16(1);
0687 c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
0688 c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
0689 c.create_cq.irq_vector = cpu_to_le16(0);
0690
0691 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
0692 }
0693
0694 static int apple_nvme_remove_cq(struct apple_nvme *anv)
0695 {
0696 struct nvme_command c = {};
0697
0698 c.delete_queue.opcode = nvme_admin_delete_cq;
0699 c.delete_queue.qid = cpu_to_le16(1);
0700
0701 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
0702 }
0703
0704 static int apple_nvme_create_sq(struct apple_nvme *anv)
0705 {
0706 struct nvme_command c = {};
0707
0708
0709
0710
0711
0712 c.create_sq.opcode = nvme_admin_create_sq;
0713 c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
0714 c.create_sq.sqid = cpu_to_le16(1);
0715 c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
0716 c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
0717 c.create_sq.cqid = cpu_to_le16(1);
0718
0719 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
0720 }
0721
0722 static int apple_nvme_remove_sq(struct apple_nvme *anv)
0723 {
0724 struct nvme_command c = {};
0725
0726 c.delete_queue.opcode = nvme_admin_delete_sq;
0727 c.delete_queue.qid = cpu_to_le16(1);
0728
0729 return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
0730 }
0731
0732 static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
0733 const struct blk_mq_queue_data *bd)
0734 {
0735 struct nvme_ns *ns = hctx->queue->queuedata;
0736 struct apple_nvme_queue *q = hctx->driver_data;
0737 struct apple_nvme *anv = queue_to_apple_nvme(q);
0738 struct request *req = bd->rq;
0739 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0740 struct nvme_command *cmnd = &iod->cmd;
0741 blk_status_t ret;
0742
0743 iod->npages = -1;
0744 iod->nents = 0;
0745
0746
0747
0748
0749
0750 if (unlikely(!READ_ONCE(q->enabled)))
0751 return BLK_STS_IOERR;
0752
0753 if (!nvme_check_ready(&anv->ctrl, req, true))
0754 return nvme_fail_nonready_command(&anv->ctrl, req);
0755
0756 ret = nvme_setup_cmd(ns, req);
0757 if (ret)
0758 return ret;
0759
0760 if (blk_rq_nr_phys_segments(req)) {
0761 ret = apple_nvme_map_data(anv, req, cmnd);
0762 if (ret)
0763 goto out_free_cmd;
0764 }
0765
0766 blk_mq_start_request(req);
0767 apple_nvme_submit_cmd(q, cmnd);
0768 return BLK_STS_OK;
0769
0770 out_free_cmd:
0771 nvme_cleanup_cmd(req);
0772 return ret;
0773 }
0774
0775 static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
0776 unsigned int hctx_idx)
0777 {
0778 hctx->driver_data = data;
0779 return 0;
0780 }
0781
0782 static int apple_nvme_init_request(struct blk_mq_tag_set *set,
0783 struct request *req, unsigned int hctx_idx,
0784 unsigned int numa_node)
0785 {
0786 struct apple_nvme_queue *q = set->driver_data;
0787 struct apple_nvme *anv = queue_to_apple_nvme(q);
0788 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0789 struct nvme_request *nreq = nvme_req(req);
0790
0791 iod->q = q;
0792 nreq->ctrl = &anv->ctrl;
0793 nreq->cmd = &iod->cmd;
0794
0795 return 0;
0796 }
0797
0798 static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
0799 {
0800 u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
0801 bool dead = false, freeze = false;
0802 unsigned long flags;
0803
0804 if (apple_rtkit_is_crashed(anv->rtk))
0805 dead = true;
0806 if (!(csts & NVME_CSTS_RDY))
0807 dead = true;
0808 if (csts & NVME_CSTS_CFS)
0809 dead = true;
0810
0811 if (anv->ctrl.state == NVME_CTRL_LIVE ||
0812 anv->ctrl.state == NVME_CTRL_RESETTING) {
0813 freeze = true;
0814 nvme_start_freeze(&anv->ctrl);
0815 }
0816
0817
0818
0819
0820
0821 if (!dead && shutdown && freeze)
0822 nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
0823
0824 nvme_stop_queues(&anv->ctrl);
0825
0826 if (!dead) {
0827 if (READ_ONCE(anv->ioq.enabled)) {
0828 apple_nvme_remove_sq(anv);
0829 apple_nvme_remove_cq(anv);
0830 }
0831
0832 if (shutdown)
0833 nvme_shutdown_ctrl(&anv->ctrl);
0834 nvme_disable_ctrl(&anv->ctrl);
0835 }
0836
0837 WRITE_ONCE(anv->ioq.enabled, false);
0838 WRITE_ONCE(anv->adminq.enabled, false);
0839 mb();
0840 nvme_stop_admin_queue(&anv->ctrl);
0841
0842
0843 spin_lock_irqsave(&anv->lock, flags);
0844 apple_nvme_handle_cq(&anv->ioq, true);
0845 apple_nvme_handle_cq(&anv->adminq, true);
0846 spin_unlock_irqrestore(&anv->lock, flags);
0847
0848 nvme_cancel_tagset(&anv->ctrl);
0849 nvme_cancel_admin_tagset(&anv->ctrl);
0850
0851
0852
0853
0854
0855
0856 if (shutdown) {
0857 nvme_start_queues(&anv->ctrl);
0858 nvme_start_admin_queue(&anv->ctrl);
0859 }
0860 }
0861
0862 static enum blk_eh_timer_return apple_nvme_timeout(struct request *req)
0863 {
0864 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
0865 struct apple_nvme_queue *q = iod->q;
0866 struct apple_nvme *anv = queue_to_apple_nvme(q);
0867 unsigned long flags;
0868 u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
0869
0870 if (anv->ctrl.state != NVME_CTRL_LIVE) {
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885 dev_warn(anv->dev,
0886 "I/O %d(aq:%d) timeout while not in live state\n",
0887 req->tag, q->is_adminq);
0888 if (blk_mq_request_started(req) &&
0889 !blk_mq_request_completed(req)) {
0890 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
0891 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
0892 blk_mq_complete_request(req);
0893 }
0894 return BLK_EH_DONE;
0895 }
0896
0897
0898 if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
0899 spin_lock_irqsave(&anv->lock, flags);
0900 apple_nvme_handle_cq(q, false);
0901 spin_unlock_irqrestore(&anv->lock, flags);
0902 if (blk_mq_request_completed(req)) {
0903 dev_warn(anv->dev,
0904 "I/O %d(aq:%d) timeout: completion polled\n",
0905 req->tag, q->is_adminq);
0906 return BLK_EH_DONE;
0907 }
0908 }
0909
0910
0911
0912
0913
0914 dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
0915 req->tag, q->is_adminq);
0916 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
0917 apple_nvme_disable(anv, false);
0918 nvme_reset_ctrl(&anv->ctrl);
0919 return BLK_EH_DONE;
0920 }
0921
0922 static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
0923 struct io_comp_batch *iob)
0924 {
0925 struct apple_nvme_queue *q = hctx->driver_data;
0926 struct apple_nvme *anv = queue_to_apple_nvme(q);
0927 bool found;
0928 unsigned long flags;
0929
0930 spin_lock_irqsave(&anv->lock, flags);
0931 found = apple_nvme_poll_cq(q, iob);
0932 spin_unlock_irqrestore(&anv->lock, flags);
0933
0934 return found;
0935 }
0936
0937 static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
0938 .queue_rq = apple_nvme_queue_rq,
0939 .complete = apple_nvme_complete_rq,
0940 .init_hctx = apple_nvme_init_hctx,
0941 .init_request = apple_nvme_init_request,
0942 .timeout = apple_nvme_timeout,
0943 };
0944
0945 static const struct blk_mq_ops apple_nvme_mq_ops = {
0946 .queue_rq = apple_nvme_queue_rq,
0947 .complete = apple_nvme_complete_rq,
0948 .init_hctx = apple_nvme_init_hctx,
0949 .init_request = apple_nvme_init_request,
0950 .timeout = apple_nvme_timeout,
0951 .poll = apple_nvme_poll,
0952 };
0953
0954 static void apple_nvme_init_queue(struct apple_nvme_queue *q)
0955 {
0956 unsigned int depth = apple_nvme_queue_depth(q);
0957
0958 q->cq_head = 0;
0959 q->cq_phase = 1;
0960 memset(q->tcbs, 0,
0961 APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
0962 memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
0963 WRITE_ONCE(q->enabled, true);
0964 wmb();
0965 }
0966
0967 static void apple_nvme_reset_work(struct work_struct *work)
0968 {
0969 unsigned int nr_io_queues = 1;
0970 int ret;
0971 u32 boot_status, aqa;
0972 struct apple_nvme *anv =
0973 container_of(work, struct apple_nvme, ctrl.reset_work);
0974
0975 if (anv->ctrl.state != NVME_CTRL_RESETTING) {
0976 dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
0977 anv->ctrl.state);
0978 ret = -ENODEV;
0979 goto out;
0980 }
0981
0982
0983 if (apple_rtkit_is_crashed(anv->rtk)) {
0984 dev_err(anv->dev,
0985 "RTKit has crashed without any way to recover.");
0986 ret = -EIO;
0987 goto out;
0988 }
0989
0990 if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
0991 apple_nvme_disable(anv, false);
0992
0993
0994 if (apple_rtkit_is_running(anv->rtk)) {
0995 dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
0996 ret = apple_rtkit_shutdown(anv->rtk);
0997 if (ret)
0998 goto out;
0999 }
1000
1001 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1002
1003 ret = reset_control_assert(anv->reset);
1004 if (ret)
1005 goto out;
1006
1007 ret = apple_rtkit_reinit(anv->rtk);
1008 if (ret)
1009 goto out;
1010
1011 ret = reset_control_deassert(anv->reset);
1012 if (ret)
1013 goto out;
1014
1015 writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
1016 anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1017 ret = apple_rtkit_boot(anv->rtk);
1018 if (ret) {
1019 dev_err(anv->dev, "ANS did not boot");
1020 goto out;
1021 }
1022
1023 ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
1024 boot_status,
1025 boot_status == APPLE_ANS_BOOT_STATUS_OK,
1026 USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
1027 if (ret) {
1028 dev_err(anv->dev, "ANS did not initialize");
1029 goto out;
1030 }
1031
1032 dev_dbg(anv->dev, "ANS booted successfully.");
1033
1034
1035
1036
1037
1038 anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
1039 dma_max_mapping_size(anv->dev) >> 9);
1040 anv->ctrl.max_segments = NVME_MAX_SEGS;
1041
1042
1043
1044
1045
1046
1047
1048
1049 writel(APPLE_ANS_LINEAR_SQ_EN,
1050 anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
1051
1052
1053 writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
1054 anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
1055
1056
1057 writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
1058 anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
1059
1060
1061
1062
1063
1064
1065
1066 writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
1067 ~APPLE_ANS_PRP_NULL_CHECK,
1068 anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
1069
1070
1071 aqa = APPLE_NVME_AQ_DEPTH - 1;
1072 aqa |= aqa << 16;
1073 writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
1074 writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
1075 writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
1076
1077
1078 writeq(anv->adminq.tcb_dma_addr,
1079 anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
1080 writeq(anv->ioq.tcb_dma_addr,
1081 anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
1082
1083 anv->ctrl.sqsize =
1084 APPLE_ANS_MAX_QUEUE_DEPTH - 1;
1085 anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
1086
1087 dev_dbg(anv->dev, "Enabling controller now");
1088 ret = nvme_enable_ctrl(&anv->ctrl);
1089 if (ret)
1090 goto out;
1091
1092 dev_dbg(anv->dev, "Starting admin queue");
1093 apple_nvme_init_queue(&anv->adminq);
1094 nvme_start_admin_queue(&anv->ctrl);
1095
1096 if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
1097 dev_warn(anv->ctrl.device,
1098 "failed to mark controller CONNECTING\n");
1099 ret = -ENODEV;
1100 goto out;
1101 }
1102
1103 ret = nvme_init_ctrl_finish(&anv->ctrl);
1104 if (ret)
1105 goto out;
1106
1107 dev_dbg(anv->dev, "Creating IOCQ");
1108 ret = apple_nvme_create_cq(anv);
1109 if (ret)
1110 goto out;
1111 dev_dbg(anv->dev, "Creating IOSQ");
1112 ret = apple_nvme_create_sq(anv);
1113 if (ret)
1114 goto out_remove_cq;
1115
1116 apple_nvme_init_queue(&anv->ioq);
1117 nr_io_queues = 1;
1118 ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
1119 if (ret)
1120 goto out_remove_sq;
1121 if (nr_io_queues != 1) {
1122 ret = -ENXIO;
1123 goto out_remove_sq;
1124 }
1125
1126 anv->ctrl.queue_count = nr_io_queues + 1;
1127
1128 nvme_start_queues(&anv->ctrl);
1129 nvme_wait_freeze(&anv->ctrl);
1130 blk_mq_update_nr_hw_queues(&anv->tagset, 1);
1131 nvme_unfreeze(&anv->ctrl);
1132
1133 if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
1134 dev_warn(anv->ctrl.device,
1135 "failed to mark controller live state\n");
1136 ret = -ENODEV;
1137 goto out_remove_sq;
1138 }
1139
1140 nvme_start_ctrl(&anv->ctrl);
1141
1142 dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
1143 return;
1144
1145 out_remove_sq:
1146 apple_nvme_remove_sq(anv);
1147 out_remove_cq:
1148 apple_nvme_remove_cq(anv);
1149 out:
1150 dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
1151 nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1152 nvme_get_ctrl(&anv->ctrl);
1153 apple_nvme_disable(anv, false);
1154 nvme_kill_queues(&anv->ctrl);
1155 if (!queue_work(nvme_wq, &anv->remove_work))
1156 nvme_put_ctrl(&anv->ctrl);
1157 }
1158
1159 static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
1160 {
1161 struct apple_nvme *anv =
1162 container_of(work, struct apple_nvme, remove_work);
1163
1164 nvme_put_ctrl(&anv->ctrl);
1165 device_release_driver(anv->dev);
1166 }
1167
1168 static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
1169 {
1170 *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1171 return 0;
1172 }
1173
1174 static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
1175 {
1176 writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1177 return 0;
1178 }
1179
1180 static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
1181 {
1182 *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
1183 return 0;
1184 }
1185
1186 static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
1187 {
1188 struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
1189
1190 return snprintf(buf, size, "%s\n", dev_name(dev));
1191 }
1192
1193 static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
1194 {
1195 struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
1196
1197 if (anv->ctrl.admin_q)
1198 blk_put_queue(anv->ctrl.admin_q);
1199 put_device(anv->dev);
1200 }
1201
1202 static const struct nvme_ctrl_ops nvme_ctrl_ops = {
1203 .name = "apple-nvme",
1204 .module = THIS_MODULE,
1205 .flags = 0,
1206 .reg_read32 = apple_nvme_reg_read32,
1207 .reg_write32 = apple_nvme_reg_write32,
1208 .reg_read64 = apple_nvme_reg_read64,
1209 .free_ctrl = apple_nvme_free_ctrl,
1210 .get_address = apple_nvme_get_address,
1211 };
1212
1213 static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
1214 {
1215 struct apple_nvme *anv = data;
1216
1217 flush_work(&anv->ctrl.reset_work);
1218 flush_work(&anv->ctrl.scan_work);
1219 nvme_put_ctrl(&anv->ctrl);
1220 }
1221
1222 static void devm_apple_nvme_put_tag_set(void *data)
1223 {
1224 blk_mq_free_tag_set(data);
1225 }
1226
1227 static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
1228 {
1229 int ret;
1230
1231 anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
1232 anv->admin_tagset.nr_hw_queues = 1;
1233 anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
1234 anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
1235 anv->admin_tagset.numa_node = NUMA_NO_NODE;
1236 anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
1237 anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
1238 anv->admin_tagset.driver_data = &anv->adminq;
1239
1240 ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
1241 if (ret)
1242 return ret;
1243 ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1244 &anv->admin_tagset);
1245 if (ret)
1246 return ret;
1247
1248 anv->tagset.ops = &apple_nvme_mq_ops;
1249 anv->tagset.nr_hw_queues = 1;
1250 anv->tagset.nr_maps = 1;
1251
1252
1253
1254
1255
1256 anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
1257 anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
1258 anv->tagset.timeout = NVME_IO_TIMEOUT;
1259 anv->tagset.numa_node = NUMA_NO_NODE;
1260 anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
1261 anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
1262 anv->tagset.driver_data = &anv->ioq;
1263
1264 ret = blk_mq_alloc_tag_set(&anv->tagset);
1265 if (ret)
1266 return ret;
1267 ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set,
1268 &anv->tagset);
1269 if (ret)
1270 return ret;
1271
1272 anv->ctrl.admin_tagset = &anv->admin_tagset;
1273 anv->ctrl.tagset = &anv->tagset;
1274
1275 return 0;
1276 }
1277
1278 static int apple_nvme_queue_alloc(struct apple_nvme *anv,
1279 struct apple_nvme_queue *q)
1280 {
1281 unsigned int depth = apple_nvme_queue_depth(q);
1282
1283 q->cqes = dmam_alloc_coherent(anv->dev,
1284 depth * sizeof(struct nvme_completion),
1285 &q->cq_dma_addr, GFP_KERNEL);
1286 if (!q->cqes)
1287 return -ENOMEM;
1288
1289 q->sqes = dmam_alloc_coherent(anv->dev,
1290 depth * sizeof(struct nvme_command),
1291 &q->sq_dma_addr, GFP_KERNEL);
1292 if (!q->sqes)
1293 return -ENOMEM;
1294
1295
1296
1297
1298
1299 q->tcbs = dmam_alloc_coherent(anv->dev,
1300 APPLE_ANS_MAX_QUEUE_DEPTH *
1301 sizeof(struct apple_nvmmu_tcb),
1302 &q->tcb_dma_addr, GFP_KERNEL);
1303 if (!q->tcbs)
1304 return -ENOMEM;
1305
1306
1307
1308
1309
1310 q->cq_phase = 1;
1311 return 0;
1312 }
1313
1314 static void apple_nvme_detach_genpd(struct apple_nvme *anv)
1315 {
1316 int i;
1317
1318 if (anv->pd_count <= 1)
1319 return;
1320
1321 for (i = anv->pd_count - 1; i >= 0; i--) {
1322 if (anv->pd_link[i])
1323 device_link_del(anv->pd_link[i]);
1324 if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
1325 dev_pm_domain_detach(anv->pd_dev[i], true);
1326 }
1327 }
1328
1329 static int apple_nvme_attach_genpd(struct apple_nvme *anv)
1330 {
1331 struct device *dev = anv->dev;
1332 int i;
1333
1334 anv->pd_count = of_count_phandle_with_args(
1335 dev->of_node, "power-domains", "#power-domain-cells");
1336 if (anv->pd_count <= 1)
1337 return 0;
1338
1339 anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
1340 GFP_KERNEL);
1341 if (!anv->pd_dev)
1342 return -ENOMEM;
1343
1344 anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
1345 GFP_KERNEL);
1346 if (!anv->pd_link)
1347 return -ENOMEM;
1348
1349 for (i = 0; i < anv->pd_count; i++) {
1350 anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
1351 if (IS_ERR(anv->pd_dev[i])) {
1352 apple_nvme_detach_genpd(anv);
1353 return PTR_ERR(anv->pd_dev[i]);
1354 }
1355
1356 anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
1357 DL_FLAG_STATELESS |
1358 DL_FLAG_PM_RUNTIME |
1359 DL_FLAG_RPM_ACTIVE);
1360 if (!anv->pd_link[i]) {
1361 apple_nvme_detach_genpd(anv);
1362 return -EINVAL;
1363 }
1364 }
1365
1366 return 0;
1367 }
1368
1369 static void devm_apple_nvme_mempool_destroy(void *data)
1370 {
1371 mempool_destroy(data);
1372 }
1373
1374 static int apple_nvme_probe(struct platform_device *pdev)
1375 {
1376 struct device *dev = &pdev->dev;
1377 struct apple_nvme *anv;
1378 int ret;
1379
1380 anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
1381 if (!anv)
1382 return -ENOMEM;
1383
1384 anv->dev = get_device(dev);
1385 anv->adminq.is_adminq = true;
1386 platform_set_drvdata(pdev, anv);
1387
1388 ret = apple_nvme_attach_genpd(anv);
1389 if (ret < 0) {
1390 dev_err_probe(dev, ret, "Failed to attach power domains");
1391 goto put_dev;
1392 }
1393 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
1394 ret = -ENXIO;
1395 goto put_dev;
1396 }
1397
1398 anv->irq = platform_get_irq(pdev, 0);
1399 if (anv->irq < 0) {
1400 ret = anv->irq;
1401 goto put_dev;
1402 }
1403 if (!anv->irq) {
1404 ret = -ENXIO;
1405 goto put_dev;
1406 }
1407
1408 anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
1409 if (IS_ERR(anv->mmio_coproc)) {
1410 ret = PTR_ERR(anv->mmio_coproc);
1411 goto put_dev;
1412 }
1413 anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
1414 if (IS_ERR(anv->mmio_nvme)) {
1415 ret = PTR_ERR(anv->mmio_nvme);
1416 goto put_dev;
1417 }
1418
1419 anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
1420 anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
1421 anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
1422 anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
1423
1424 anv->sart = devm_apple_sart_get(dev);
1425 if (IS_ERR(anv->sart)) {
1426 ret = dev_err_probe(dev, PTR_ERR(anv->sart),
1427 "Failed to initialize SART");
1428 goto put_dev;
1429 }
1430
1431 anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
1432 if (IS_ERR(anv->reset)) {
1433 ret = dev_err_probe(dev, PTR_ERR(anv->reset),
1434 "Failed to get reset control");
1435 goto put_dev;
1436 }
1437
1438 INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
1439 INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
1440 spin_lock_init(&anv->lock);
1441
1442 ret = apple_nvme_queue_alloc(anv, &anv->adminq);
1443 if (ret)
1444 goto put_dev;
1445 ret = apple_nvme_queue_alloc(anv, &anv->ioq);
1446 if (ret)
1447 goto put_dev;
1448
1449 anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
1450 NVME_CTRL_PAGE_SIZE,
1451 NVME_CTRL_PAGE_SIZE, 0);
1452 if (!anv->prp_page_pool) {
1453 ret = -ENOMEM;
1454 goto put_dev;
1455 }
1456
1457 anv->prp_small_pool =
1458 dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
1459 if (!anv->prp_small_pool) {
1460 ret = -ENOMEM;
1461 goto put_dev;
1462 }
1463
1464 WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
1465 anv->iod_mempool =
1466 mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
1467 if (!anv->iod_mempool) {
1468 ret = -ENOMEM;
1469 goto put_dev;
1470 }
1471 ret = devm_add_action_or_reset(anv->dev,
1472 devm_apple_nvme_mempool_destroy, anv->iod_mempool);
1473 if (ret)
1474 goto put_dev;
1475
1476 ret = apple_nvme_alloc_tagsets(anv);
1477 if (ret)
1478 goto put_dev;
1479
1480 ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
1481 "nvme-apple", anv);
1482 if (ret) {
1483 dev_err_probe(dev, ret, "Failed to request IRQ");
1484 goto put_dev;
1485 }
1486
1487 anv->rtk =
1488 devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
1489 if (IS_ERR(anv->rtk)) {
1490 ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
1491 "Failed to initialize RTKit");
1492 goto put_dev;
1493 }
1494
1495 ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
1496 NVME_QUIRK_SKIP_CID_GEN);
1497 if (ret) {
1498 dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
1499 goto put_dev;
1500 }
1501
1502 anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
1503 if (IS_ERR(anv->ctrl.admin_q)) {
1504 ret = -ENOMEM;
1505 goto put_dev;
1506 }
1507
1508 if (!blk_get_queue(anv->ctrl.admin_q)) {
1509 nvme_start_admin_queue(&anv->ctrl);
1510 blk_mq_destroy_queue(anv->ctrl.admin_q);
1511 anv->ctrl.admin_q = NULL;
1512 ret = -ENODEV;
1513 goto put_dev;
1514 }
1515
1516 nvme_reset_ctrl(&anv->ctrl);
1517 async_schedule(apple_nvme_async_probe, anv);
1518
1519 return 0;
1520
1521 put_dev:
1522 put_device(anv->dev);
1523 return ret;
1524 }
1525
1526 static int apple_nvme_remove(struct platform_device *pdev)
1527 {
1528 struct apple_nvme *anv = platform_get_drvdata(pdev);
1529
1530 nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
1531 flush_work(&anv->ctrl.reset_work);
1532 nvme_stop_ctrl(&anv->ctrl);
1533 nvme_remove_namespaces(&anv->ctrl);
1534 apple_nvme_disable(anv, true);
1535 nvme_uninit_ctrl(&anv->ctrl);
1536
1537 if (apple_rtkit_is_running(anv->rtk))
1538 apple_rtkit_shutdown(anv->rtk);
1539
1540 apple_nvme_detach_genpd(anv);
1541
1542 return 0;
1543 }
1544
1545 static void apple_nvme_shutdown(struct platform_device *pdev)
1546 {
1547 struct apple_nvme *anv = platform_get_drvdata(pdev);
1548
1549 apple_nvme_disable(anv, true);
1550 if (apple_rtkit_is_running(anv->rtk))
1551 apple_rtkit_shutdown(anv->rtk);
1552 }
1553
1554 static int apple_nvme_resume(struct device *dev)
1555 {
1556 struct apple_nvme *anv = dev_get_drvdata(dev);
1557
1558 return nvme_reset_ctrl(&anv->ctrl);
1559 }
1560
1561 static int apple_nvme_suspend(struct device *dev)
1562 {
1563 struct apple_nvme *anv = dev_get_drvdata(dev);
1564 int ret = 0;
1565
1566 apple_nvme_disable(anv, true);
1567
1568 if (apple_rtkit_is_running(anv->rtk))
1569 ret = apple_rtkit_shutdown(anv->rtk);
1570
1571 writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
1572
1573 return ret;
1574 }
1575
1576 static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
1577 apple_nvme_resume);
1578
1579 static const struct of_device_id apple_nvme_of_match[] = {
1580 { .compatible = "apple,nvme-ans2" },
1581 {},
1582 };
1583 MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
1584
1585 static struct platform_driver apple_nvme_driver = {
1586 .driver = {
1587 .name = "nvme-apple",
1588 .of_match_table = apple_nvme_of_match,
1589 .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
1590 },
1591 .probe = apple_nvme_probe,
1592 .remove = apple_nvme_remove,
1593 .shutdown = apple_nvme_shutdown,
1594 };
1595 module_platform_driver(apple_nvme_driver);
1596
1597 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
1598 MODULE_LICENSE("GPL");