0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/delay.h>
0012 #include <linux/file.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/pci.h>
0015 #include <linux/syscalls.h>
0016 #include <asm/unaligned.h>
0017
0018 #include <scsi/scsi.h>
0019 #include <scsi/scsi_host.h>
0020 #include <scsi/scsi_cmnd.h>
0021 #include <scsi/scsi_eh.h>
0022 #include <uapi/scsi/cxlflash_ioctl.h>
0023
0024 #include "sislite.h"
0025 #include "common.h"
0026 #include "vlun.h"
0027 #include "superpipe.h"
0028
0029 struct cxlflash_global global;
0030
0031
0032
0033
0034
0035
0036 static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
0037 struct dk_cxlflash_resize *resize)
0038 {
0039 resize->hdr = release->hdr;
0040 resize->context_id = release->context_id;
0041 resize->rsrc_handle = release->rsrc_handle;
0042 }
0043
0044
0045
0046
0047
0048
0049 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
0050 struct dk_cxlflash_release *release)
0051 {
0052 release->hdr = detach->hdr;
0053 release->context_id = detach->context_id;
0054 }
0055
0056
0057
0058
0059
0060
0061 static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
0062 struct dk_cxlflash_release *release)
0063 {
0064 release->hdr = udirect->hdr;
0065 release->context_id = udirect->context_id;
0066 release->rsrc_handle = udirect->rsrc_handle;
0067 }
0068
0069
0070
0071
0072 void cxlflash_free_errpage(void)
0073 {
0074
0075 mutex_lock(&global.mutex);
0076 if (global.err_page) {
0077 __free_page(global.err_page);
0078 global.err_page = NULL;
0079 }
0080 mutex_unlock(&global.mutex);
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
0096 {
0097 struct device *dev = &cfg->dev->dev;
0098 int i, found = true;
0099
0100 cxlflash_mark_contexts_error(cfg);
0101
0102 while (true) {
0103 for (i = 0; i < MAX_CONTEXT; i++)
0104 if (cfg->ctx_tbl[i]) {
0105 found = true;
0106 break;
0107 }
0108
0109 if (!found && list_empty(&cfg->ctx_err_recovery))
0110 return;
0111
0112 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
0113 __func__);
0114 wake_up_all(&cfg->reset_waitq);
0115 ssleep(1);
0116 found = false;
0117 }
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
0129 struct file *file)
0130 {
0131 struct ctx_info *ctxi;
0132
0133 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
0134 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
0135 return ctxi;
0136
0137 return NULL;
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
0156 void *arg, enum ctx_ctrl ctx_ctrl)
0157 {
0158 struct device *dev = &cfg->dev->dev;
0159 struct ctx_info *ctxi = NULL;
0160 struct lun_access *lun_access = NULL;
0161 struct file *file = NULL;
0162 struct llun_info *lli = arg;
0163 u64 ctxid = DECODE_CTXID(rctxid);
0164 int rc;
0165 pid_t pid = task_tgid_nr(current), ctxpid = 0;
0166
0167 if (ctx_ctrl & CTX_CTRL_FILE) {
0168 lli = NULL;
0169 file = (struct file *)arg;
0170 }
0171
0172 if (ctx_ctrl & CTX_CTRL_CLONE)
0173 pid = task_ppid_nr(current);
0174
0175 if (likely(ctxid < MAX_CONTEXT)) {
0176 while (true) {
0177 mutex_lock(&cfg->ctx_tbl_list_mutex);
0178 ctxi = cfg->ctx_tbl[ctxid];
0179 if (ctxi)
0180 if ((file && (ctxi->file != file)) ||
0181 (!file && (ctxi->ctxid != rctxid)))
0182 ctxi = NULL;
0183
0184 if ((ctx_ctrl & CTX_CTRL_ERR) ||
0185 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
0186 ctxi = find_error_context(cfg, rctxid, file);
0187 if (!ctxi) {
0188 mutex_unlock(&cfg->ctx_tbl_list_mutex);
0189 goto out;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 rc = mutex_trylock(&ctxi->mutex);
0205 mutex_unlock(&cfg->ctx_tbl_list_mutex);
0206 if (rc)
0207 break;
0208 }
0209
0210 if (ctxi->unavail)
0211 goto denied;
0212
0213 ctxpid = ctxi->pid;
0214 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
0215 if (pid != ctxpid)
0216 goto denied;
0217
0218 if (lli) {
0219 list_for_each_entry(lun_access, &ctxi->luns, list)
0220 if (lun_access->lli == lli)
0221 goto out;
0222 goto denied;
0223 }
0224 }
0225
0226 out:
0227 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
0228 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
0229 ctx_ctrl);
0230
0231 return ctxi;
0232
0233 denied:
0234 mutex_unlock(&ctxi->mutex);
0235 ctxi = NULL;
0236 goto out;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245 void put_context(struct ctx_info *ctxi)
0246 {
0247 mutex_unlock(&ctxi->mutex);
0248 }
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
0262 {
0263 struct device *dev = &cfg->dev->dev;
0264 struct afu *afu = cfg->afu;
0265 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
0266 int rc = 0;
0267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
0268 u64 val;
0269 int i;
0270
0271
0272 readq_be(&ctrl_map->mbox_r);
0273 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
0274 writeq_be(val, &ctrl_map->ctx_cap);
0275 val = readq_be(&ctrl_map->ctx_cap);
0276 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
0277 dev_err(dev, "%s: ctx may be closed val=%016llx\n",
0278 __func__, val);
0279 rc = -EAGAIN;
0280 goto out;
0281 }
0282
0283 if (afu_is_ocxl_lisn(afu)) {
0284
0285 for (i = 0; i < ctxi->irqs; i++) {
0286 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
0287 writeq_be(val, &ctrl_map->lisn_ea[i]);
0288 }
0289
0290
0291 val = hwq->ctx_hndl;
0292 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
0293 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
0294 }
0295
0296
0297 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
0298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
0299 writeq_be(val, &ctrl_map->rht_cnt_id);
0300 out:
0301 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0302 return rc;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
0331 {
0332 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0333 struct device *dev = &cfg->dev->dev;
0334 struct glun_info *gli = lli->parent;
0335 struct scsi_sense_hdr sshdr;
0336 u8 *cmd_buf = NULL;
0337 u8 *scsi_cmd = NULL;
0338 int rc = 0;
0339 int result = 0;
0340 int retry_cnt = 0;
0341 u32 to = CMD_TIMEOUT * HZ;
0342
0343 retry:
0344 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
0345 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
0346 if (unlikely(!cmd_buf || !scsi_cmd)) {
0347 rc = -ENOMEM;
0348 goto out;
0349 }
0350
0351 scsi_cmd[0] = SERVICE_ACTION_IN_16;
0352 scsi_cmd[1] = SAI_READ_CAPACITY_16;
0353 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
0354
0355 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
0356 retry_cnt ? "re" : "", scsi_cmd[0]);
0357
0358
0359 up_read(&cfg->ioctl_rwsem);
0360 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
0361 CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
0362 0, 0, NULL);
0363 down_read(&cfg->ioctl_rwsem);
0364 rc = check_state(cfg);
0365 if (rc) {
0366 dev_err(dev, "%s: Failed state result=%08x\n",
0367 __func__, result);
0368 rc = -ENODEV;
0369 goto out;
0370 }
0371
0372 if (result > 0 && scsi_sense_valid(&sshdr)) {
0373 if (result & SAM_STAT_CHECK_CONDITION) {
0374 switch (sshdr.sense_key) {
0375 case NO_SENSE:
0376 case RECOVERED_ERROR:
0377 case NOT_READY:
0378 result &= ~SAM_STAT_CHECK_CONDITION;
0379 break;
0380 case UNIT_ATTENTION:
0381 switch (sshdr.asc) {
0382 case 0x29:
0383 fallthrough;
0384 case 0x2A:
0385 case 0x3F:
0386
0387 if (retry_cnt++ < 1) {
0388 kfree(cmd_buf);
0389 kfree(scsi_cmd);
0390 goto retry;
0391 }
0392 }
0393 break;
0394 default:
0395 break;
0396 }
0397 }
0398 }
0399
0400 if (result) {
0401 dev_err(dev, "%s: command failed, result=%08x\n",
0402 __func__, result);
0403 rc = -EIO;
0404 goto out;
0405 }
0406
0407
0408
0409
0410
0411
0412 mutex_lock(&gli->mutex);
0413 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
0414 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
0415 mutex_unlock(&gli->mutex);
0416
0417 out:
0418 kfree(cmd_buf);
0419 kfree(scsi_cmd);
0420
0421 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
0422 __func__, gli->max_lba, gli->blk_len, rc);
0423 return rc;
0424 }
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
0435 struct llun_info *lli)
0436 {
0437 struct cxlflash_cfg *cfg = ctxi->cfg;
0438 struct device *dev = &cfg->dev->dev;
0439 struct sisl_rht_entry *rhte = NULL;
0440
0441 if (unlikely(!ctxi->rht_start)) {
0442 dev_dbg(dev, "%s: Context does not have allocated RHT\n",
0443 __func__);
0444 goto out;
0445 }
0446
0447 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
0448 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
0449 __func__, rhndl);
0450 goto out;
0451 }
0452
0453 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
0454 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
0455 __func__, rhndl);
0456 goto out;
0457 }
0458
0459 rhte = &ctxi->rht_start[rhndl];
0460 if (unlikely(rhte->nmask == 0)) {
0461 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
0462 __func__, rhndl);
0463 rhte = NULL;
0464 goto out;
0465 }
0466
0467 out:
0468 return rhte;
0469 }
0470
0471
0472
0473
0474
0475
0476
0477
0478 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
0479 struct llun_info *lli)
0480 {
0481 struct cxlflash_cfg *cfg = ctxi->cfg;
0482 struct device *dev = &cfg->dev->dev;
0483 struct sisl_rht_entry *rhte = NULL;
0484 int i;
0485
0486
0487 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
0488 if (ctxi->rht_start[i].nmask == 0) {
0489 rhte = &ctxi->rht_start[i];
0490 ctxi->rht_out++;
0491 break;
0492 }
0493
0494 if (likely(rhte))
0495 ctxi->rht_lun[i] = lli;
0496
0497 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
0498 return rhte;
0499 }
0500
0501
0502
0503
0504
0505
0506 void rhte_checkin(struct ctx_info *ctxi,
0507 struct sisl_rht_entry *rhte)
0508 {
0509 u32 rsrc_handle = rhte - ctxi->rht_start;
0510
0511 rhte->nmask = 0;
0512 rhte->fp = 0;
0513 ctxi->rht_out--;
0514 ctxi->rht_lun[rsrc_handle] = NULL;
0515 ctxi->rht_needs_ws[rsrc_handle] = false;
0516 }
0517
0518
0519
0520
0521
0522
0523
0524
0525 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
0526 u32 port_sel)
0527 {
0528
0529
0530
0531
0532
0533 struct sisl_rht_entry_f1 dummy = { 0 };
0534 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
0535
0536 memset(rhte_f1, 0, sizeof(*rhte_f1));
0537 rhte_f1->fp = SISL_RHT_FP(1U, 0);
0538 dma_wmb();
0539
0540 rhte_f1->lun_id = lun_id;
0541 dma_wmb();
0542
0543
0544
0545
0546
0547
0548 dummy.valid = 0x80;
0549 dummy.fp = SISL_RHT_FP(1U, perm);
0550 dummy.port_sel = port_sel;
0551 rhte_f1->dw = dummy.dw;
0552
0553 dma_wmb();
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
0565 {
0566 int rc = 0;
0567
0568 if (!locked)
0569 mutex_lock(&gli->mutex);
0570
0571 if (gli->mode == MODE_NONE)
0572 gli->mode = mode;
0573 else if (gli->mode != mode) {
0574 pr_debug("%s: gli_mode=%d requested_mode=%d\n",
0575 __func__, gli->mode, mode);
0576 rc = -EINVAL;
0577 goto out;
0578 }
0579
0580 gli->users++;
0581 WARN_ON(gli->users <= 0);
0582 out:
0583 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
0584 __func__, rc, gli->mode, gli->users);
0585 if (!locked)
0586 mutex_unlock(&gli->mutex);
0587 return rc;
0588 }
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600 void cxlflash_lun_detach(struct glun_info *gli)
0601 {
0602 mutex_lock(&gli->mutex);
0603 WARN_ON(gli->mode == MODE_NONE);
0604 if (--gli->users == 0) {
0605 gli->mode = MODE_NONE;
0606 cxlflash_ba_terminate(&gli->blka.ba_lun);
0607 }
0608 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
0609 WARN_ON(gli->users < 0);
0610 mutex_unlock(&gli->mutex);
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628 int _cxlflash_disk_release(struct scsi_device *sdev,
0629 struct ctx_info *ctxi,
0630 struct dk_cxlflash_release *release)
0631 {
0632 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0633 struct device *dev = &cfg->dev->dev;
0634 struct llun_info *lli = sdev->hostdata;
0635 struct glun_info *gli = lli->parent;
0636 struct afu *afu = cfg->afu;
0637 bool put_ctx = false;
0638
0639 struct dk_cxlflash_resize size;
0640 res_hndl_t rhndl = release->rsrc_handle;
0641
0642 int rc = 0;
0643 int rcr = 0;
0644 u64 ctxid = DECODE_CTXID(release->context_id),
0645 rctxid = release->context_id;
0646
0647 struct sisl_rht_entry *rhte;
0648 struct sisl_rht_entry_f1 *rhte_f1;
0649
0650 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
0651 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
0652
0653 if (!ctxi) {
0654 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
0655 if (unlikely(!ctxi)) {
0656 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
0657 __func__, ctxid);
0658 rc = -EINVAL;
0659 goto out;
0660 }
0661
0662 put_ctx = true;
0663 }
0664
0665 rhte = get_rhte(ctxi, rhndl, lli);
0666 if (unlikely(!rhte)) {
0667 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
0668 __func__, rhndl);
0669 rc = -EINVAL;
0670 goto out;
0671 }
0672
0673
0674
0675
0676
0677
0678
0679
0680 switch (gli->mode) {
0681 case MODE_VIRTUAL:
0682 marshal_rele_to_resize(release, &size);
0683 size.req_size = 0;
0684 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
0685 if (rc) {
0686 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
0687 goto out;
0688 }
0689
0690 break;
0691 case MODE_PHYSICAL:
0692
0693
0694
0695
0696
0697 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
0698
0699 rhte_f1->valid = 0;
0700 dma_wmb();
0701
0702 rhte_f1->lun_id = 0;
0703 dma_wmb();
0704
0705 rhte_f1->dw = 0;
0706 dma_wmb();
0707
0708 if (!ctxi->err_recovery_active) {
0709 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
0710 if (unlikely(rcr))
0711 dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
0712 __func__, rcr);
0713 }
0714 break;
0715 default:
0716 WARN(1, "Unsupported LUN mode!");
0717 goto out;
0718 }
0719
0720 rhte_checkin(ctxi, rhte);
0721 cxlflash_lun_detach(gli);
0722
0723 out:
0724 if (put_ctx)
0725 put_context(ctxi);
0726 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0727 return rc;
0728 }
0729
0730 int cxlflash_disk_release(struct scsi_device *sdev,
0731 struct dk_cxlflash_release *release)
0732 {
0733 return _cxlflash_disk_release(sdev, NULL, release);
0734 }
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749 static void destroy_context(struct cxlflash_cfg *cfg,
0750 struct ctx_info *ctxi)
0751 {
0752 struct afu *afu = cfg->afu;
0753
0754 if (ctxi->initialized) {
0755 WARN_ON(!list_empty(&ctxi->luns));
0756
0757
0758 if (afu->afu_map && ctxi->ctrl_map) {
0759 writeq_be(0, &ctxi->ctrl_map->rht_start);
0760 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
0761 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
0762 }
0763 }
0764
0765
0766 free_page((ulong)ctxi->rht_start);
0767 kfree(ctxi->rht_needs_ws);
0768 kfree(ctxi->rht_lun);
0769 kfree(ctxi);
0770 }
0771
0772
0773
0774
0775
0776
0777
0778 static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
0779 {
0780 struct device *dev = &cfg->dev->dev;
0781 struct ctx_info *ctxi = NULL;
0782 struct llun_info **lli = NULL;
0783 u8 *ws = NULL;
0784 struct sisl_rht_entry *rhte;
0785
0786 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
0787 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
0788 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
0789 if (unlikely(!ctxi || !lli || !ws)) {
0790 dev_err(dev, "%s: Unable to allocate context\n", __func__);
0791 goto err;
0792 }
0793
0794 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
0795 if (unlikely(!rhte)) {
0796 dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
0797 goto err;
0798 }
0799
0800 ctxi->rht_lun = lli;
0801 ctxi->rht_needs_ws = ws;
0802 ctxi->rht_start = rhte;
0803 out:
0804 return ctxi;
0805
0806 err:
0807 kfree(ws);
0808 kfree(lli);
0809 kfree(ctxi);
0810 ctxi = NULL;
0811 goto out;
0812 }
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
0825 void *ctx, int ctxid, struct file *file, u32 perms,
0826 u64 irqs)
0827 {
0828 struct afu *afu = cfg->afu;
0829
0830 ctxi->rht_perms = perms;
0831 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
0832 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
0833 ctxi->irqs = irqs;
0834 ctxi->pid = task_tgid_nr(current);
0835 ctxi->ctx = ctx;
0836 ctxi->cfg = cfg;
0837 ctxi->file = file;
0838 ctxi->initialized = true;
0839 mutex_init(&ctxi->mutex);
0840 kref_init(&ctxi->kref);
0841 INIT_LIST_HEAD(&ctxi->luns);
0842 INIT_LIST_HEAD(&ctxi->list);
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853 static void remove_context(struct kref *kref)
0854 {
0855 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
0856 struct cxlflash_cfg *cfg = ctxi->cfg;
0857 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
0858
0859
0860 WARN_ON(!mutex_is_locked(&ctxi->mutex));
0861 ctxi->unavail = true;
0862 mutex_unlock(&ctxi->mutex);
0863 mutex_lock(&cfg->ctx_tbl_list_mutex);
0864 mutex_lock(&ctxi->mutex);
0865
0866 if (!list_empty(&ctxi->list))
0867 list_del(&ctxi->list);
0868 cfg->ctx_tbl[ctxid] = NULL;
0869 mutex_unlock(&cfg->ctx_tbl_list_mutex);
0870 mutex_unlock(&ctxi->mutex);
0871
0872
0873 destroy_context(cfg, ctxi);
0874 }
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888 static int _cxlflash_disk_detach(struct scsi_device *sdev,
0889 struct ctx_info *ctxi,
0890 struct dk_cxlflash_detach *detach)
0891 {
0892 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0893 struct device *dev = &cfg->dev->dev;
0894 struct llun_info *lli = sdev->hostdata;
0895 struct lun_access *lun_access, *t;
0896 struct dk_cxlflash_release rel;
0897 bool put_ctx = false;
0898
0899 int i;
0900 int rc = 0;
0901 u64 ctxid = DECODE_CTXID(detach->context_id),
0902 rctxid = detach->context_id;
0903
0904 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
0905
0906 if (!ctxi) {
0907 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
0908 if (unlikely(!ctxi)) {
0909 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
0910 __func__, ctxid);
0911 rc = -EINVAL;
0912 goto out;
0913 }
0914
0915 put_ctx = true;
0916 }
0917
0918
0919 if (ctxi->rht_out) {
0920 marshal_det_to_rele(detach, &rel);
0921 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
0922 if (ctxi->rht_lun[i] == lli) {
0923 rel.rsrc_handle = i;
0924 _cxlflash_disk_release(sdev, ctxi, &rel);
0925 }
0926
0927
0928 if (ctxi->rht_out == 0)
0929 break;
0930 }
0931 }
0932
0933
0934 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
0935 if (lun_access->lli == lli) {
0936 list_del(&lun_access->list);
0937 kfree(lun_access);
0938 lun_access = NULL;
0939 break;
0940 }
0941
0942
0943
0944
0945
0946 if (kref_put(&ctxi->kref, remove_context))
0947 put_ctx = false;
0948 scsi_device_put(sdev);
0949 out:
0950 if (put_ctx)
0951 put_context(ctxi);
0952 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0953 return rc;
0954 }
0955
0956 static int cxlflash_disk_detach(struct scsi_device *sdev,
0957 struct dk_cxlflash_detach *detach)
0958 {
0959 return _cxlflash_disk_detach(sdev, NULL, detach);
0960 }
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988 static int cxlflash_cxl_release(struct inode *inode, struct file *file)
0989 {
0990 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
0991 cxl_fops);
0992 void *ctx = cfg->ops->fops_get_context(file);
0993 struct device *dev = &cfg->dev->dev;
0994 struct ctx_info *ctxi = NULL;
0995 struct dk_cxlflash_detach detach = { { 0 }, 0 };
0996 struct lun_access *lun_access, *t;
0997 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
0998 int ctxid;
0999
1000 ctxid = cfg->ops->process_element(ctx);
1001 if (unlikely(ctxid < 0)) {
1002 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1003 __func__, ctx, ctxid);
1004 goto out;
1005 }
1006
1007 ctxi = get_context(cfg, ctxid, file, ctrl);
1008 if (unlikely(!ctxi)) {
1009 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
1010 if (!ctxi) {
1011 dev_dbg(dev, "%s: ctxid=%d already free\n",
1012 __func__, ctxid);
1013 goto out_release;
1014 }
1015
1016 dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
1017 __func__, ctxid);
1018 put_context(ctxi);
1019 goto out;
1020 }
1021
1022 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
1023
1024 down_read(&cfg->ioctl_rwsem);
1025 detach.context_id = ctxi->ctxid;
1026 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1027 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
1028 up_read(&cfg->ioctl_rwsem);
1029 out_release:
1030 cfg->ops->fd_release(inode, file);
1031 out:
1032 dev_dbg(dev, "%s: returning\n", __func__);
1033 return 0;
1034 }
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 static void unmap_context(struct ctx_info *ctxi)
1045 {
1046 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 static struct page *get_err_page(struct cxlflash_cfg *cfg)
1056 {
1057 struct page *err_page = global.err_page;
1058 struct device *dev = &cfg->dev->dev;
1059
1060 if (unlikely(!err_page)) {
1061 err_page = alloc_page(GFP_KERNEL);
1062 if (unlikely(!err_page)) {
1063 dev_err(dev, "%s: Unable to allocate err_page\n",
1064 __func__);
1065 goto out;
1066 }
1067
1068 memset(page_address(err_page), -1, PAGE_SIZE);
1069
1070
1071 mutex_lock(&global.mutex);
1072 if (likely(!global.err_page))
1073 global.err_page = err_page;
1074 else {
1075 __free_page(err_page);
1076 err_page = global.err_page;
1077 }
1078 mutex_unlock(&global.mutex);
1079 }
1080
1081 out:
1082 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
1083 return err_page;
1084 }
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
1102 {
1103 struct vm_area_struct *vma = vmf->vma;
1104 struct file *file = vma->vm_file;
1105 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1106 cxl_fops);
1107 void *ctx = cfg->ops->fops_get_context(file);
1108 struct device *dev = &cfg->dev->dev;
1109 struct ctx_info *ctxi = NULL;
1110 struct page *err_page = NULL;
1111 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1112 vm_fault_t rc = 0;
1113 int ctxid;
1114
1115 ctxid = cfg->ops->process_element(ctx);
1116 if (unlikely(ctxid < 0)) {
1117 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1118 __func__, ctx, ctxid);
1119 goto err;
1120 }
1121
1122 ctxi = get_context(cfg, ctxid, file, ctrl);
1123 if (unlikely(!ctxi)) {
1124 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1125 goto err;
1126 }
1127
1128 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1129
1130 if (likely(!ctxi->err_recovery_active)) {
1131 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1132 rc = ctxi->cxl_mmap_vmops->fault(vmf);
1133 } else {
1134 dev_dbg(dev, "%s: err recovery active, use err_page\n",
1135 __func__);
1136
1137 err_page = get_err_page(cfg);
1138 if (unlikely(!err_page)) {
1139 dev_err(dev, "%s: Could not get err_page\n", __func__);
1140 rc = VM_FAULT_RETRY;
1141 goto out;
1142 }
1143
1144 get_page(err_page);
1145 vmf->page = err_page;
1146 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1147 }
1148
1149 out:
1150 if (likely(ctxi))
1151 put_context(ctxi);
1152 dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
1153 return rc;
1154
1155 err:
1156 rc = VM_FAULT_SIGBUS;
1157 goto out;
1158 }
1159
1160
1161
1162
1163 static const struct vm_operations_struct cxlflash_mmap_vmops = {
1164 .fault = cxlflash_mmap_fault,
1165 };
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1177 {
1178 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1179 cxl_fops);
1180 void *ctx = cfg->ops->fops_get_context(file);
1181 struct device *dev = &cfg->dev->dev;
1182 struct ctx_info *ctxi = NULL;
1183 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1184 int ctxid;
1185 int rc = 0;
1186
1187 ctxid = cfg->ops->process_element(ctx);
1188 if (unlikely(ctxid < 0)) {
1189 dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
1190 __func__, ctx, ctxid);
1191 rc = -EIO;
1192 goto out;
1193 }
1194
1195 ctxi = get_context(cfg, ctxid, file, ctrl);
1196 if (unlikely(!ctxi)) {
1197 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
1198 rc = -EIO;
1199 goto out;
1200 }
1201
1202 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1203
1204 rc = cfg->ops->fd_mmap(file, vma);
1205 if (likely(!rc)) {
1206
1207 ctxi->cxl_mmap_vmops = vma->vm_ops;
1208 vma->vm_ops = &cxlflash_mmap_vmops;
1209 }
1210
1211 out:
1212 if (likely(ctxi))
1213 put_context(ctxi);
1214 return rc;
1215 }
1216
1217 const struct file_operations cxlflash_cxl_fops = {
1218 .owner = THIS_MODULE,
1219 .mmap = cxlflash_cxl_mmap,
1220 .release = cxlflash_cxl_release,
1221 };
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1233 {
1234 int i, rc = 0;
1235 struct ctx_info *ctxi = NULL;
1236
1237 mutex_lock(&cfg->ctx_tbl_list_mutex);
1238
1239 for (i = 0; i < MAX_CONTEXT; i++) {
1240 ctxi = cfg->ctx_tbl[i];
1241 if (ctxi) {
1242 mutex_lock(&ctxi->mutex);
1243 cfg->ctx_tbl[i] = NULL;
1244 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1245 ctxi->err_recovery_active = true;
1246 ctxi->ctrl_map = NULL;
1247 unmap_context(ctxi);
1248 mutex_unlock(&ctxi->mutex);
1249 }
1250 }
1251
1252 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1253 return rc;
1254 }
1255
1256
1257
1258
1259 static const struct file_operations null_fops = {
1260 .owner = THIS_MODULE,
1261 };
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 int check_state(struct cxlflash_cfg *cfg)
1277 {
1278 struct device *dev = &cfg->dev->dev;
1279 int rc = 0;
1280
1281 retry:
1282 switch (cfg->state) {
1283 case STATE_RESET:
1284 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
1285 up_read(&cfg->ioctl_rwsem);
1286 rc = wait_event_interruptible(cfg->reset_waitq,
1287 cfg->state != STATE_RESET);
1288 down_read(&cfg->ioctl_rwsem);
1289 if (unlikely(rc))
1290 break;
1291 goto retry;
1292 case STATE_FAILTERM:
1293 dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
1294 rc = -ENODEV;
1295 break;
1296 default:
1297 break;
1298 }
1299
1300 return rc;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 static int cxlflash_disk_attach(struct scsi_device *sdev,
1316 struct dk_cxlflash_attach *attach)
1317 {
1318 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1319 struct device *dev = &cfg->dev->dev;
1320 struct afu *afu = cfg->afu;
1321 struct llun_info *lli = sdev->hostdata;
1322 struct glun_info *gli = lli->parent;
1323 struct ctx_info *ctxi = NULL;
1324 struct lun_access *lun_access = NULL;
1325 int rc = 0;
1326 u32 perms;
1327 int ctxid = -1;
1328 u64 irqs = attach->num_interrupts;
1329 u64 flags = 0UL;
1330 u64 rctxid = 0UL;
1331 struct file *file = NULL;
1332
1333 void *ctx = NULL;
1334
1335 int fd = -1;
1336
1337 if (irqs > 4) {
1338 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1339 __func__, irqs);
1340 rc = -EINVAL;
1341 goto out;
1342 }
1343
1344 if (gli->max_lba == 0) {
1345 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
1346 __func__, lli->lun_id[sdev->channel]);
1347 rc = read_cap16(sdev, lli);
1348 if (rc) {
1349 dev_err(dev, "%s: Invalid device rc=%d\n",
1350 __func__, rc);
1351 rc = -ENODEV;
1352 goto out;
1353 }
1354 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1355 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
1356 }
1357
1358 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1359 rctxid = attach->context_id;
1360 ctxi = get_context(cfg, rctxid, NULL, 0);
1361 if (!ctxi) {
1362 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
1363 __func__, rctxid);
1364 rc = -EINVAL;
1365 goto out;
1366 }
1367
1368 list_for_each_entry(lun_access, &ctxi->luns, list)
1369 if (lun_access->lli == lli) {
1370 dev_dbg(dev, "%s: Already attached\n",
1371 __func__);
1372 rc = -EINVAL;
1373 goto out;
1374 }
1375 }
1376
1377 rc = scsi_device_get(sdev);
1378 if (unlikely(rc)) {
1379 dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
1380 goto out;
1381 }
1382
1383 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1384 if (unlikely(!lun_access)) {
1385 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
1386 rc = -ENOMEM;
1387 goto err;
1388 }
1389
1390 lun_access->lli = lli;
1391 lun_access->sdev = sdev;
1392
1393
1394 if (ctxi) {
1395 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
1396 __func__, rctxid);
1397 kref_get(&ctxi->kref);
1398 list_add(&lun_access->list, &ctxi->luns);
1399 goto out_attach;
1400 }
1401
1402 ctxi = create_context(cfg);
1403 if (unlikely(!ctxi)) {
1404 dev_err(dev, "%s: Failed to create context ctxid=%d\n",
1405 __func__, ctxid);
1406 rc = -ENOMEM;
1407 goto err;
1408 }
1409
1410 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1411 if (IS_ERR_OR_NULL(ctx)) {
1412 dev_err(dev, "%s: Could not initialize context %p\n",
1413 __func__, ctx);
1414 rc = -ENODEV;
1415 goto err;
1416 }
1417
1418 rc = cfg->ops->start_work(ctx, irqs);
1419 if (unlikely(rc)) {
1420 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1421 __func__, rc);
1422 goto err;
1423 }
1424
1425 ctxid = cfg->ops->process_element(ctx);
1426 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1427 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1428 rc = -EPERM;
1429 goto err;
1430 }
1431
1432 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1433 if (unlikely(fd < 0)) {
1434 rc = -ENODEV;
1435 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1436 goto err;
1437 }
1438
1439
1440 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1441
1442
1443 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
1444
1445 rc = afu_attach(cfg, ctxi);
1446 if (unlikely(rc)) {
1447 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1448 goto err;
1449 }
1450
1451
1452
1453
1454
1455
1456
1457 list_add(&lun_access->list, &ctxi->luns);
1458 mutex_lock(&cfg->ctx_tbl_list_mutex);
1459 mutex_lock(&ctxi->mutex);
1460 cfg->ctx_tbl[ctxid] = ctxi;
1461 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1462 fd_install(fd, file);
1463
1464 out_attach:
1465 if (fd != -1)
1466 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1467 if (afu_is_sq_cmd_mode(afu))
1468 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1469
1470 attach->hdr.return_flags = flags;
1471 attach->context_id = ctxi->ctxid;
1472 attach->block_size = gli->blk_len;
1473 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1474 attach->last_lba = gli->max_lba;
1475 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1476 attach->max_xfer /= gli->blk_len;
1477
1478 out:
1479 attach->adap_fd = fd;
1480
1481 if (ctxi)
1482 put_context(ctxi);
1483
1484 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1485 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1486 return rc;
1487
1488 err:
1489
1490 if (!IS_ERR_OR_NULL(ctx)) {
1491 cfg->ops->stop_context(ctx);
1492 cfg->ops->release_context(ctx);
1493 ctx = NULL;
1494 }
1495
1496
1497
1498
1499
1500
1501
1502
1503 if (fd > 0) {
1504 file->f_op = &null_fops;
1505 fput(file);
1506 put_unused_fd(fd);
1507 fd = -1;
1508 file = NULL;
1509 }
1510
1511
1512 if (ctxi) {
1513 destroy_context(cfg, ctxi);
1514 ctxi = NULL;
1515 }
1516
1517 kfree(lun_access);
1518 scsi_device_put(sdev);
1519 goto out;
1520 }
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 static int recover_context(struct cxlflash_cfg *cfg,
1533 struct ctx_info *ctxi,
1534 int *adap_fd)
1535 {
1536 struct device *dev = &cfg->dev->dev;
1537 int rc = 0;
1538 int fd = -1;
1539 int ctxid = -1;
1540 struct file *file;
1541 void *ctx;
1542 struct afu *afu = cfg->afu;
1543
1544 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1545 if (IS_ERR_OR_NULL(ctx)) {
1546 dev_err(dev, "%s: Could not initialize context %p\n",
1547 __func__, ctx);
1548 rc = -ENODEV;
1549 goto out;
1550 }
1551
1552 rc = cfg->ops->start_work(ctx, ctxi->irqs);
1553 if (unlikely(rc)) {
1554 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1555 __func__, rc);
1556 goto err1;
1557 }
1558
1559 ctxid = cfg->ops->process_element(ctx);
1560 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1561 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
1562 rc = -EPERM;
1563 goto err2;
1564 }
1565
1566 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
1567 if (unlikely(fd < 0)) {
1568 rc = -ENODEV;
1569 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1570 goto err2;
1571 }
1572
1573
1574 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1575
1576 rc = afu_attach(cfg, ctxi);
1577 if (rc) {
1578 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1579 goto err3;
1580 }
1581
1582
1583
1584
1585
1586 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1587 ctxi->ctx = ctx;
1588 ctxi->file = file;
1589
1590
1591
1592
1593
1594
1595
1596 mutex_unlock(&ctxi->mutex);
1597 mutex_lock(&cfg->ctx_tbl_list_mutex);
1598 mutex_lock(&ctxi->mutex);
1599 list_del_init(&ctxi->list);
1600 cfg->ctx_tbl[ctxid] = ctxi;
1601 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1602 fd_install(fd, file);
1603 *adap_fd = fd;
1604 out:
1605 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1606 __func__, ctxid, fd, rc);
1607 return rc;
1608
1609 err3:
1610 fput(file);
1611 put_unused_fd(fd);
1612 err2:
1613 cfg->ops->stop_context(ctx);
1614 err1:
1615 cfg->ops->release_context(ctx);
1616 goto out;
1617 }
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649 static int cxlflash_afu_recover(struct scsi_device *sdev,
1650 struct dk_cxlflash_recover_afu *recover)
1651 {
1652 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1653 struct device *dev = &cfg->dev->dev;
1654 struct llun_info *lli = sdev->hostdata;
1655 struct afu *afu = cfg->afu;
1656 struct ctx_info *ctxi = NULL;
1657 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1658 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1659 u64 flags;
1660 u64 ctxid = DECODE_CTXID(recover->context_id),
1661 rctxid = recover->context_id;
1662 long reg;
1663 bool locked = true;
1664 int lretry = 20;
1665 int new_adap_fd = -1;
1666 int rc = 0;
1667
1668 atomic_inc(&cfg->recovery_threads);
1669 up_read(&cfg->ioctl_rwsem);
1670 rc = mutex_lock_interruptible(mutex);
1671 down_read(&cfg->ioctl_rwsem);
1672 if (rc) {
1673 locked = false;
1674 goto out;
1675 }
1676
1677 rc = check_state(cfg);
1678 if (rc) {
1679 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1680 rc = -ENODEV;
1681 goto out;
1682 }
1683
1684 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
1685 __func__, recover->reason, rctxid);
1686
1687 retry:
1688
1689 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1690 if (unlikely(!ctxi)) {
1691 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1692 rc = -EINVAL;
1693 goto out;
1694 }
1695
1696 if (ctxi->err_recovery_active) {
1697 retry_recover:
1698 rc = recover_context(cfg, ctxi, &new_adap_fd);
1699 if (unlikely(rc)) {
1700 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
1701 __func__, ctxid, rc);
1702 if ((rc == -ENODEV) &&
1703 ((atomic_read(&cfg->recovery_threads) > 1) ||
1704 (lretry--))) {
1705 dev_dbg(dev, "%s: Going to try again\n",
1706 __func__);
1707 mutex_unlock(mutex);
1708 msleep(100);
1709 rc = mutex_lock_interruptible(mutex);
1710 if (rc) {
1711 locked = false;
1712 goto out;
1713 }
1714 goto retry_recover;
1715 }
1716
1717 goto out;
1718 }
1719
1720 ctxi->err_recovery_active = false;
1721
1722 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1723 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1724 if (afu_is_sq_cmd_mode(afu))
1725 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1726
1727 recover->hdr.return_flags = flags;
1728 recover->context_id = ctxi->ctxid;
1729 recover->adap_fd = new_adap_fd;
1730 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1731 goto out;
1732 }
1733
1734
1735 reg = readq_be(&hwq->ctrl_map->mbox_r);
1736 if (reg == -1) {
1737 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
1738
1739
1740
1741
1742
1743
1744 put_context(ctxi);
1745 ctxi = NULL;
1746 ssleep(1);
1747 rc = check_state(cfg);
1748 if (unlikely(rc))
1749 goto out;
1750 goto retry;
1751 }
1752
1753 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
1754 out:
1755 if (likely(ctxi))
1756 put_context(ctxi);
1757 if (locked)
1758 mutex_unlock(mutex);
1759 atomic_dec_if_positive(&cfg->recovery_threads);
1760 return rc;
1761 }
1762
1763
1764
1765
1766
1767
1768
1769
1770 static int process_sense(struct scsi_device *sdev,
1771 struct dk_cxlflash_verify *verify)
1772 {
1773 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1774 struct device *dev = &cfg->dev->dev;
1775 struct llun_info *lli = sdev->hostdata;
1776 struct glun_info *gli = lli->parent;
1777 u64 prev_lba = gli->max_lba;
1778 struct scsi_sense_hdr sshdr = { 0 };
1779 int rc = 0;
1780
1781 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1782 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1783 if (!rc) {
1784 dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
1785 rc = -EINVAL;
1786 goto out;
1787 }
1788
1789 switch (sshdr.sense_key) {
1790 case NO_SENSE:
1791 case RECOVERED_ERROR:
1792 case NOT_READY:
1793 break;
1794 case UNIT_ATTENTION:
1795 switch (sshdr.asc) {
1796 case 0x29:
1797 fallthrough;
1798 case 0x2A:
1799 rc = read_cap16(sdev, lli);
1800 if (rc) {
1801 rc = -ENODEV;
1802 break;
1803 }
1804 if (prev_lba != gli->max_lba)
1805 dev_dbg(dev, "%s: Capacity changed old=%lld "
1806 "new=%lld\n", __func__, prev_lba,
1807 gli->max_lba);
1808 break;
1809 case 0x3F:
1810 scsi_scan_host(cfg->host);
1811 break;
1812 default:
1813 rc = -EIO;
1814 break;
1815 }
1816 break;
1817 default:
1818 rc = -EIO;
1819 break;
1820 }
1821 out:
1822 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1823 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1824 return rc;
1825 }
1826
1827
1828
1829
1830
1831
1832
1833
1834 static int cxlflash_disk_verify(struct scsi_device *sdev,
1835 struct dk_cxlflash_verify *verify)
1836 {
1837 int rc = 0;
1838 struct ctx_info *ctxi = NULL;
1839 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1840 struct device *dev = &cfg->dev->dev;
1841 struct llun_info *lli = sdev->hostdata;
1842 struct glun_info *gli = lli->parent;
1843 struct sisl_rht_entry *rhte = NULL;
1844 res_hndl_t rhndl = verify->rsrc_handle;
1845 u64 ctxid = DECODE_CTXID(verify->context_id),
1846 rctxid = verify->context_id;
1847 u64 last_lba = 0;
1848
1849 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1850 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
1851 verify->hint, verify->hdr.flags);
1852
1853 ctxi = get_context(cfg, rctxid, lli, 0);
1854 if (unlikely(!ctxi)) {
1855 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1856 rc = -EINVAL;
1857 goto out;
1858 }
1859
1860 rhte = get_rhte(ctxi, rhndl, lli);
1861 if (unlikely(!rhte)) {
1862 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
1863 __func__, rhndl);
1864 rc = -EINVAL;
1865 goto out;
1866 }
1867
1868
1869
1870
1871
1872 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1873
1874
1875
1876 ctxi->unavail = true;
1877 mutex_unlock(&ctxi->mutex);
1878 rc = process_sense(sdev, verify);
1879 if (unlikely(rc)) {
1880 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1881 __func__, rc);
1882 mutex_lock(&ctxi->mutex);
1883 ctxi->unavail = false;
1884 goto out;
1885 }
1886 mutex_lock(&ctxi->mutex);
1887 ctxi->unavail = false;
1888 }
1889
1890 switch (gli->mode) {
1891 case MODE_PHYSICAL:
1892 last_lba = gli->max_lba;
1893 break;
1894 case MODE_VIRTUAL:
1895
1896 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1897 last_lba /= CXLFLASH_BLOCK_SIZE;
1898 last_lba--;
1899 break;
1900 default:
1901 WARN(1, "Unsupported LUN mode!");
1902 }
1903
1904 verify->last_lba = last_lba;
1905
1906 out:
1907 if (likely(ctxi))
1908 put_context(ctxi);
1909 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
1910 __func__, rc, verify->last_lba);
1911 return rc;
1912 }
1913
1914
1915
1916
1917
1918
1919
1920 static char *decode_ioctl(unsigned int cmd)
1921 {
1922 switch (cmd) {
1923 case DK_CXLFLASH_ATTACH:
1924 return __stringify_1(DK_CXLFLASH_ATTACH);
1925 case DK_CXLFLASH_USER_DIRECT:
1926 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1927 case DK_CXLFLASH_USER_VIRTUAL:
1928 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1929 case DK_CXLFLASH_VLUN_RESIZE:
1930 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1931 case DK_CXLFLASH_RELEASE:
1932 return __stringify_1(DK_CXLFLASH_RELEASE);
1933 case DK_CXLFLASH_DETACH:
1934 return __stringify_1(DK_CXLFLASH_DETACH);
1935 case DK_CXLFLASH_VERIFY:
1936 return __stringify_1(DK_CXLFLASH_VERIFY);
1937 case DK_CXLFLASH_VLUN_CLONE:
1938 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1939 case DK_CXLFLASH_RECOVER_AFU:
1940 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1941 case DK_CXLFLASH_MANAGE_LUN:
1942 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1943 }
1944
1945 return "UNKNOWN";
1946 }
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1960 {
1961 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1962 struct device *dev = &cfg->dev->dev;
1963 struct afu *afu = cfg->afu;
1964 struct llun_info *lli = sdev->hostdata;
1965 struct glun_info *gli = lli->parent;
1966 struct dk_cxlflash_release rel = { { 0 }, 0 };
1967
1968 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1969
1970 u64 ctxid = DECODE_CTXID(pphys->context_id),
1971 rctxid = pphys->context_id;
1972 u64 lun_size = 0;
1973 u64 last_lba = 0;
1974 u64 rsrc_handle = -1;
1975 u32 port = CHAN2PORTMASK(sdev->channel);
1976
1977 int rc = 0;
1978
1979 struct ctx_info *ctxi = NULL;
1980 struct sisl_rht_entry *rhte = NULL;
1981
1982 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
1983
1984 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1985 if (unlikely(rc)) {
1986 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
1987 goto out;
1988 }
1989
1990 ctxi = get_context(cfg, rctxid, lli, 0);
1991 if (unlikely(!ctxi)) {
1992 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1993 rc = -EINVAL;
1994 goto err1;
1995 }
1996
1997 rhte = rhte_checkout(ctxi, lli);
1998 if (unlikely(!rhte)) {
1999 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
2000 __func__, ctxid);
2001 rc = -EMFILE;
2002 goto err1;
2003 }
2004
2005 rsrc_handle = (rhte - ctxi->rht_start);
2006
2007 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
2008
2009 last_lba = gli->max_lba;
2010 pphys->hdr.return_flags = 0;
2011 pphys->last_lba = last_lba;
2012 pphys->rsrc_handle = rsrc_handle;
2013
2014 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2015 if (unlikely(rc)) {
2016 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2017 goto err2;
2018 }
2019
2020 out:
2021 if (likely(ctxi))
2022 put_context(ctxi);
2023 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
2024 __func__, rsrc_handle, rc, last_lba);
2025 return rc;
2026
2027 err2:
2028 marshal_udir_to_rele(pphys, &rel);
2029 _cxlflash_disk_release(sdev, ctxi, &rel);
2030 goto out;
2031 err1:
2032 cxlflash_lun_detach(gli);
2033 goto out;
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047 static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
2048 {
2049 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2050 struct device *dev = &cfg->dev->dev;
2051 struct llun_info *lli = sdev->hostdata;
2052 int rc = 0;
2053
2054 if (unlikely(!lli)) {
2055 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
2056 rc = -EINVAL;
2057 goto out;
2058 }
2059
2060 rc = check_state(cfg);
2061 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
2062 switch (cmd) {
2063 case DK_CXLFLASH_VLUN_RESIZE:
2064 case DK_CXLFLASH_RELEASE:
2065 case DK_CXLFLASH_DETACH:
2066 dev_dbg(dev, "%s: Command override rc=%d\n",
2067 __func__, rc);
2068 rc = 0;
2069 break;
2070 }
2071 }
2072 out:
2073 return rc;
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092 int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
2093 {
2094 typedef int (*sioctl) (struct scsi_device *, void *);
2095
2096 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
2097 struct device *dev = &cfg->dev->dev;
2098 struct afu *afu = cfg->afu;
2099 struct dk_cxlflash_hdr *hdr;
2100 char buf[sizeof(union cxlflash_ioctls)];
2101 size_t size = 0;
2102 bool known_ioctl = false;
2103 int idx;
2104 int rc = 0;
2105 struct Scsi_Host *shost = sdev->host;
2106 sioctl do_ioctl = NULL;
2107
2108 static const struct {
2109 size_t size;
2110 sioctl ioctl;
2111 } ioctl_tbl[] = {
2112 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
2113 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
2114 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
2115 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
2116 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
2117 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
2118 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
2119 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
2120 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
2121 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
2122 };
2123
2124
2125 down_read(&cfg->ioctl_rwsem);
2126
2127
2128 if (afu->internal_lun)
2129 switch (cmd) {
2130 case DK_CXLFLASH_RELEASE:
2131 case DK_CXLFLASH_USER_VIRTUAL:
2132 case DK_CXLFLASH_VLUN_RESIZE:
2133 case DK_CXLFLASH_VLUN_CLONE:
2134 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2135 __func__, decode_ioctl(cmd), afu->internal_lun);
2136 rc = -EINVAL;
2137 goto cxlflash_ioctl_exit;
2138 }
2139
2140 switch (cmd) {
2141 case DK_CXLFLASH_ATTACH:
2142 case DK_CXLFLASH_USER_DIRECT:
2143 case DK_CXLFLASH_RELEASE:
2144 case DK_CXLFLASH_DETACH:
2145 case DK_CXLFLASH_VERIFY:
2146 case DK_CXLFLASH_RECOVER_AFU:
2147 case DK_CXLFLASH_USER_VIRTUAL:
2148 case DK_CXLFLASH_VLUN_RESIZE:
2149 case DK_CXLFLASH_VLUN_CLONE:
2150 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2151 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2152 sdev->channel, sdev->id, sdev->lun);
2153 rc = ioctl_common(sdev, cmd);
2154 if (unlikely(rc))
2155 goto cxlflash_ioctl_exit;
2156
2157 fallthrough;
2158
2159 case DK_CXLFLASH_MANAGE_LUN:
2160 known_ioctl = true;
2161 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2162 size = ioctl_tbl[idx].size;
2163 do_ioctl = ioctl_tbl[idx].ioctl;
2164
2165 if (likely(do_ioctl))
2166 break;
2167
2168 fallthrough;
2169 default:
2170 rc = -EINVAL;
2171 goto cxlflash_ioctl_exit;
2172 }
2173
2174 if (unlikely(copy_from_user(&buf, arg, size))) {
2175 dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2176 __func__, size, cmd, decode_ioctl(cmd), arg);
2177 rc = -EFAULT;
2178 goto cxlflash_ioctl_exit;
2179 }
2180
2181 hdr = (struct dk_cxlflash_hdr *)&buf;
2182 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2183 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2184 __func__, hdr->version, decode_ioctl(cmd));
2185 rc = -EINVAL;
2186 goto cxlflash_ioctl_exit;
2187 }
2188
2189 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2190 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
2191 rc = -EINVAL;
2192 goto cxlflash_ioctl_exit;
2193 }
2194
2195 rc = do_ioctl(sdev, (void *)&buf);
2196 if (likely(!rc))
2197 if (unlikely(copy_to_user(arg, &buf, size))) {
2198 dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
2199 __func__, size, cmd, decode_ioctl(cmd), arg);
2200 rc = -EFAULT;
2201 }
2202
2203
2204
2205 cxlflash_ioctl_exit:
2206 up_read(&cfg->ioctl_rwsem);
2207 if (unlikely(rc && known_ioctl))
2208 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2209 "returned rc %d\n", __func__,
2210 decode_ioctl(cmd), cmd, shost->host_no,
2211 sdev->channel, sdev->id, sdev->lun, rc);
2212 else
2213 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2214 "returned rc %d\n", __func__, decode_ioctl(cmd),
2215 cmd, shost->host_no, sdev->channel, sdev->id,
2216 sdev->lun, rc);
2217 return rc;
2218 }