0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/interrupt.h>
0012 #include <linux/pci.h>
0013 #include <linux/syscalls.h>
0014 #include <asm/unaligned.h>
0015 #include <asm/bitsperlong.h>
0016
0017 #include <scsi/scsi_cmnd.h>
0018 #include <scsi/scsi_host.h>
0019 #include <uapi/scsi/cxlflash_ioctl.h>
0020
0021 #include "sislite.h"
0022 #include "common.h"
0023 #include "vlun.h"
0024 #include "superpipe.h"
0025
0026
0027
0028
0029
0030
0031 static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
0032 struct dk_cxlflash_resize *resize)
0033 {
0034 resize->hdr = virt->hdr;
0035 resize->context_id = virt->context_id;
0036 resize->rsrc_handle = virt->rsrc_handle;
0037 resize->req_size = virt->lun_size;
0038 resize->last_lba = virt->last_lba;
0039 }
0040
0041
0042
0043
0044
0045
0046 static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
0047 struct dk_cxlflash_release *release)
0048 {
0049 release->hdr = clone->hdr;
0050 release->context_id = clone->context_id_dst;
0051 }
0052
0053
0054
0055
0056
0057
0058
0059 static int ba_init(struct ba_lun *ba_lun)
0060 {
0061 struct ba_lun_info *bali = NULL;
0062 int lun_size_au = 0, i = 0;
0063 int last_word_underflow = 0;
0064 u64 *lam;
0065
0066 pr_debug("%s: Initializing LUN: lun_id=%016llx "
0067 "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
0068 __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
0069
0070
0071 lun_size_au = ba_lun->lsize / ba_lun->au_size;
0072 if (lun_size_au == 0) {
0073 pr_debug("%s: Requested LUN size of 0!\n", __func__);
0074 return -EINVAL;
0075 }
0076
0077
0078 bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
0079 if (unlikely(!bali)) {
0080 pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
0081 __func__, ba_lun->lun_id);
0082 return -ENOMEM;
0083 }
0084
0085 bali->total_aus = lun_size_au;
0086 bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
0087
0088 if (lun_size_au % BITS_PER_LONG)
0089 bali->lun_bmap_size++;
0090
0091
0092 bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
0093 GFP_KERNEL);
0094 if (unlikely(!bali->lun_alloc_map)) {
0095 pr_err("%s: Failed to allocate lun allocation map: "
0096 "lun_id=%016llx\n", __func__, ba_lun->lun_id);
0097 kfree(bali);
0098 return -ENOMEM;
0099 }
0100
0101
0102 bali->free_aun_cnt = lun_size_au;
0103
0104 for (i = 0; i < bali->lun_bmap_size; i++)
0105 bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
0106
0107
0108 last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
0109 last_word_underflow -= bali->free_aun_cnt;
0110 if (last_word_underflow > 0) {
0111 lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
0112 for (i = (HIBIT - last_word_underflow + 1);
0113 i < BITS_PER_LONG;
0114 i++)
0115 clear_bit(i, (ulong *)lam);
0116 }
0117
0118
0119 bali->free_high_idx = bali->lun_bmap_size;
0120
0121
0122 bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
0123 GFP_KERNEL);
0124 if (unlikely(!bali->aun_clone_map)) {
0125 pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
0126 __func__, ba_lun->lun_id);
0127 kfree(bali->lun_alloc_map);
0128 kfree(bali);
0129 return -ENOMEM;
0130 }
0131
0132
0133 ba_lun->ba_lun_handle = bali;
0134
0135 pr_debug("%s: Successfully initialized the LUN: "
0136 "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
0137 __func__, ba_lun->lun_id, bali->lun_bmap_size,
0138 bali->free_aun_cnt);
0139 return 0;
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151 static int find_free_range(u32 low,
0152 u32 high,
0153 struct ba_lun_info *bali, int *bit_word)
0154 {
0155 int i;
0156 u64 bit_pos = -1;
0157 ulong *lam, num_bits;
0158
0159 for (i = low; i < high; i++)
0160 if (bali->lun_alloc_map[i] != 0) {
0161 lam = (ulong *)&bali->lun_alloc_map[i];
0162 num_bits = (sizeof(*lam) * BITS_PER_BYTE);
0163 bit_pos = find_first_bit(lam, num_bits);
0164
0165 pr_devel("%s: Found free bit %llu in LUN "
0166 "map entry %016llx at bitmap index = %d\n",
0167 __func__, bit_pos, bali->lun_alloc_map[i], i);
0168
0169 *bit_word = i;
0170 bali->free_aun_cnt--;
0171 clear_bit(bit_pos, lam);
0172 break;
0173 }
0174
0175 return bit_pos;
0176 }
0177
0178
0179
0180
0181
0182
0183
0184 static u64 ba_alloc(struct ba_lun *ba_lun)
0185 {
0186 u64 bit_pos = -1;
0187 int bit_word = 0;
0188 struct ba_lun_info *bali = NULL;
0189
0190 bali = ba_lun->ba_lun_handle;
0191
0192 pr_debug("%s: Received block allocation request: "
0193 "lun_id=%016llx free_aun_cnt=%llx\n",
0194 __func__, ba_lun->lun_id, bali->free_aun_cnt);
0195
0196 if (bali->free_aun_cnt == 0) {
0197 pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
0198 __func__, ba_lun->lun_id);
0199 return -1ULL;
0200 }
0201
0202
0203 bit_pos = find_free_range(bali->free_curr_idx,
0204 bali->free_high_idx, bali, &bit_word);
0205 if (bit_pos == -1) {
0206 bit_pos = find_free_range(bali->free_low_idx,
0207 bali->free_curr_idx,
0208 bali, &bit_word);
0209 if (bit_pos == -1) {
0210 pr_debug("%s: Could not find an allocation unit on LUN:"
0211 " lun_id=%016llx\n", __func__, ba_lun->lun_id);
0212 return -1ULL;
0213 }
0214 }
0215
0216
0217 if (bit_pos == HIBIT)
0218 bali->free_curr_idx = bit_word + 1;
0219 else
0220 bali->free_curr_idx = bit_word;
0221
0222 pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
0223 "free_aun_cnt=%llx\n", __func__,
0224 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
0225 bali->free_aun_cnt);
0226
0227 return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237 static int validate_alloc(struct ba_lun_info *bali, u64 aun)
0238 {
0239 int idx = 0, bit_pos = 0;
0240
0241 idx = aun / BITS_PER_LONG;
0242 bit_pos = aun % BITS_PER_LONG;
0243
0244 if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
0245 return -1;
0246
0247 return 0;
0248 }
0249
0250
0251
0252
0253
0254
0255
0256
0257 static int ba_free(struct ba_lun *ba_lun, u64 to_free)
0258 {
0259 int idx = 0, bit_pos = 0;
0260 struct ba_lun_info *bali = NULL;
0261
0262 bali = ba_lun->ba_lun_handle;
0263
0264 if (validate_alloc(bali, to_free)) {
0265 pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
0266 __func__, to_free, ba_lun->lun_id);
0267 return -1;
0268 }
0269
0270 pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
0271 "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
0272 bali->free_aun_cnt);
0273
0274 if (bali->aun_clone_map[to_free] > 0) {
0275 pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
0276 __func__, to_free, ba_lun->lun_id,
0277 bali->aun_clone_map[to_free]);
0278 bali->aun_clone_map[to_free]--;
0279 return 0;
0280 }
0281
0282 idx = to_free / BITS_PER_LONG;
0283 bit_pos = to_free % BITS_PER_LONG;
0284
0285 set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
0286 bali->free_aun_cnt++;
0287
0288 if (idx < bali->free_low_idx)
0289 bali->free_low_idx = idx;
0290 else if (idx > bali->free_high_idx)
0291 bali->free_high_idx = idx;
0292
0293 pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
0294 "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
0295 ba_lun->lun_id, bali->free_aun_cnt);
0296
0297 return 0;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307 static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
0308 {
0309 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
0310
0311 if (validate_alloc(bali, to_clone)) {
0312 pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
0313 __func__, to_clone, ba_lun->lun_id);
0314 return -1;
0315 }
0316
0317 pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
0318 __func__, to_clone, ba_lun->lun_id);
0319
0320 if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
0321 pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
0322 __func__, to_clone, ba_lun->lun_id);
0323 return -1;
0324 }
0325
0326 bali->aun_clone_map[to_clone]++;
0327
0328 return 0;
0329 }
0330
0331
0332
0333
0334
0335
0336
0337 static u64 ba_space(struct ba_lun *ba_lun)
0338 {
0339 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
0340
0341 return bali->free_aun_cnt;
0342 }
0343
0344
0345
0346
0347
0348
0349
0350 void cxlflash_ba_terminate(struct ba_lun *ba_lun)
0351 {
0352 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
0353
0354 if (bali) {
0355 kfree(bali->aun_clone_map);
0356 kfree(bali->lun_alloc_map);
0357 kfree(bali);
0358 ba_lun->ba_lun_handle = NULL;
0359 }
0360 }
0361
0362
0363
0364
0365
0366
0367
0368 static int init_vlun(struct llun_info *lli)
0369 {
0370 int rc = 0;
0371 struct glun_info *gli = lli->parent;
0372 struct blka *blka = &gli->blka;
0373
0374 memset(blka, 0, sizeof(*blka));
0375 mutex_init(&blka->mutex);
0376
0377
0378 blka->ba_lun.lun_id = lli->lun_index;
0379 blka->ba_lun.lsize = gli->max_lba + 1;
0380 blka->ba_lun.lba_size = gli->blk_len;
0381
0382 blka->ba_lun.au_size = MC_CHUNK_SIZE;
0383 blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
0384
0385 rc = ba_init(&blka->ba_lun);
0386 if (unlikely(rc))
0387 pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
0388
0389 pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
0390 return rc;
0391 }
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 static int write_same16(struct scsi_device *sdev,
0420 u64 lba,
0421 u32 nblks)
0422 {
0423 u8 *cmd_buf = NULL;
0424 u8 *scsi_cmd = NULL;
0425 int rc = 0;
0426 int result = 0;
0427 u64 offset = lba;
0428 int left = nblks;
0429 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0430 struct device *dev = &cfg->dev->dev;
0431 const u32 s = ilog2(sdev->sector_size) - 9;
0432 const u32 to = sdev->request_queue->rq_timeout;
0433 const u32 ws_limit =
0434 sdev->request_queue->limits.max_write_zeroes_sectors >> s;
0435
0436 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
0437 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
0438 if (unlikely(!cmd_buf || !scsi_cmd)) {
0439 rc = -ENOMEM;
0440 goto out;
0441 }
0442
0443 while (left > 0) {
0444
0445 scsi_cmd[0] = WRITE_SAME_16;
0446 scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
0447 put_unaligned_be64(offset, &scsi_cmd[2]);
0448 put_unaligned_be32(ws_limit < left ? ws_limit : left,
0449 &scsi_cmd[10]);
0450
0451
0452 up_read(&cfg->ioctl_rwsem);
0453 result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
0454 CMD_BUFSIZE, NULL, NULL, to,
0455 CMD_RETRIES, 0, 0, NULL);
0456 down_read(&cfg->ioctl_rwsem);
0457 rc = check_state(cfg);
0458 if (rc) {
0459 dev_err(dev, "%s: Failed state result=%08x\n",
0460 __func__, result);
0461 rc = -ENODEV;
0462 goto out;
0463 }
0464
0465 if (result) {
0466 dev_err_ratelimited(dev, "%s: command failed for "
0467 "offset=%lld result=%08x\n",
0468 __func__, offset, result);
0469 rc = -EIO;
0470 goto out;
0471 }
0472 left -= ws_limit;
0473 offset += ws_limit;
0474 }
0475
0476 out:
0477 kfree(cmd_buf);
0478 kfree(scsi_cmd);
0479 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0480 return rc;
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500 static int grow_lxt(struct afu *afu,
0501 struct scsi_device *sdev,
0502 ctx_hndl_t ctxid,
0503 res_hndl_t rhndl,
0504 struct sisl_rht_entry *rhte,
0505 u64 *new_size)
0506 {
0507 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0508 struct device *dev = &cfg->dev->dev;
0509 struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
0510 struct llun_info *lli = sdev->hostdata;
0511 struct glun_info *gli = lli->parent;
0512 struct blka *blka = &gli->blka;
0513 u32 av_size;
0514 u32 ngrps, ngrps_old;
0515 u64 aun;
0516 u64 delta = *new_size - rhte->lxt_cnt;
0517 u64 my_new_size;
0518 int i, rc = 0;
0519
0520
0521
0522
0523
0524
0525 mutex_lock(&blka->mutex);
0526 av_size = ba_space(&blka->ba_lun);
0527 if (unlikely(av_size <= 0)) {
0528 dev_dbg(dev, "%s: ba_space error av_size=%d\n",
0529 __func__, av_size);
0530 mutex_unlock(&blka->mutex);
0531 rc = -ENOSPC;
0532 goto out;
0533 }
0534
0535 if (av_size < delta)
0536 delta = av_size;
0537
0538 lxt_old = rhte->lxt_start;
0539 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
0540 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
0541
0542 if (ngrps != ngrps_old) {
0543
0544 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
0545 GFP_KERNEL);
0546 if (unlikely(!lxt)) {
0547 mutex_unlock(&blka->mutex);
0548 rc = -ENOMEM;
0549 goto out;
0550 }
0551
0552
0553 memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
0554 } else
0555 lxt = lxt_old;
0556
0557
0558 my_new_size = rhte->lxt_cnt + delta;
0559
0560
0561 for (i = rhte->lxt_cnt; i < my_new_size; i++) {
0562
0563
0564
0565
0566
0567
0568 aun = ba_alloc(&blka->ba_lun);
0569 if ((aun == -1ULL) || (aun >= blka->nchunk))
0570 dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
0571 "max=%llu\n", __func__, aun, blka->nchunk - 1);
0572
0573
0574 lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
0575 (lli->lun_index << LXT_LUNIDX_SHIFT) |
0576 (RHT_PERM_RW << LXT_PERM_SHIFT |
0577 lli->port_sel));
0578 }
0579
0580 mutex_unlock(&blka->mutex);
0581
0582
0583
0584
0585
0586 dma_wmb();
0587
0588 rhte->lxt_start = lxt;
0589 dma_wmb();
0590
0591 rhte->lxt_cnt = my_new_size;
0592 dma_wmb();
0593
0594 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
0595 if (unlikely(rc))
0596 rc = -EAGAIN;
0597
0598
0599 if (lxt != lxt_old)
0600 kfree(lxt_old);
0601 *new_size = my_new_size;
0602 out:
0603 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0604 return rc;
0605 }
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618 static int shrink_lxt(struct afu *afu,
0619 struct scsi_device *sdev,
0620 res_hndl_t rhndl,
0621 struct sisl_rht_entry *rhte,
0622 struct ctx_info *ctxi,
0623 u64 *new_size)
0624 {
0625 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0626 struct device *dev = &cfg->dev->dev;
0627 struct sisl_lxt_entry *lxt, *lxt_old;
0628 struct llun_info *lli = sdev->hostdata;
0629 struct glun_info *gli = lli->parent;
0630 struct blka *blka = &gli->blka;
0631 ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
0632 bool needs_ws = ctxi->rht_needs_ws[rhndl];
0633 bool needs_sync = !ctxi->err_recovery_active;
0634 u32 ngrps, ngrps_old;
0635 u64 aun;
0636 u64 delta = rhte->lxt_cnt - *new_size;
0637 u64 my_new_size;
0638 int i, rc = 0;
0639
0640 lxt_old = rhte->lxt_start;
0641 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
0642 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
0643
0644 if (ngrps != ngrps_old) {
0645
0646 if (ngrps) {
0647 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
0648 GFP_KERNEL);
0649 if (unlikely(!lxt)) {
0650 rc = -ENOMEM;
0651 goto out;
0652 }
0653
0654
0655 memcpy(lxt, lxt_old,
0656 (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
0657 } else
0658 lxt = NULL;
0659 } else
0660 lxt = lxt_old;
0661
0662
0663 my_new_size = rhte->lxt_cnt - delta;
0664
0665
0666
0667
0668
0669 rhte->lxt_cnt = my_new_size;
0670 dma_wmb();
0671
0672 rhte->lxt_start = lxt;
0673 dma_wmb();
0674
0675 if (needs_sync) {
0676 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
0677 if (unlikely(rc))
0678 rc = -EAGAIN;
0679 }
0680
0681 if (needs_ws) {
0682
0683
0684
0685
0686 ctxi->unavail = true;
0687 mutex_unlock(&ctxi->mutex);
0688 }
0689
0690
0691 mutex_lock(&blka->mutex);
0692 for (i = delta - 1; i >= 0; i--) {
0693 aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
0694 if (needs_ws)
0695 write_same16(sdev, aun, MC_CHUNK_SIZE);
0696 ba_free(&blka->ba_lun, aun);
0697 }
0698 mutex_unlock(&blka->mutex);
0699
0700 if (needs_ws) {
0701
0702 mutex_lock(&ctxi->mutex);
0703 ctxi->unavail = false;
0704 }
0705
0706
0707 if (lxt != lxt_old)
0708 kfree(lxt_old);
0709 *new_size = my_new_size;
0710 out:
0711 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0712 return rc;
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 int _cxlflash_vlun_resize(struct scsi_device *sdev,
0730 struct ctx_info *ctxi,
0731 struct dk_cxlflash_resize *resize)
0732 {
0733 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0734 struct device *dev = &cfg->dev->dev;
0735 struct llun_info *lli = sdev->hostdata;
0736 struct glun_info *gli = lli->parent;
0737 struct afu *afu = cfg->afu;
0738 bool put_ctx = false;
0739
0740 res_hndl_t rhndl = resize->rsrc_handle;
0741 u64 new_size;
0742 u64 nsectors;
0743 u64 ctxid = DECODE_CTXID(resize->context_id),
0744 rctxid = resize->context_id;
0745
0746 struct sisl_rht_entry *rhte;
0747
0748 int rc = 0;
0749
0750
0751
0752
0753
0754 nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
0755 new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
0756
0757 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
0758 __func__, ctxid, resize->rsrc_handle, resize->req_size,
0759 new_size);
0760
0761 if (unlikely(gli->mode != MODE_VIRTUAL)) {
0762 dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
0763 __func__, gli->mode);
0764 rc = -EINVAL;
0765 goto out;
0766
0767 }
0768
0769 if (!ctxi) {
0770 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
0771 if (unlikely(!ctxi)) {
0772 dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
0773 __func__, ctxid);
0774 rc = -EINVAL;
0775 goto out;
0776 }
0777
0778 put_ctx = true;
0779 }
0780
0781 rhte = get_rhte(ctxi, rhndl, lli);
0782 if (unlikely(!rhte)) {
0783 dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
0784 __func__, rhndl);
0785 rc = -EINVAL;
0786 goto out;
0787 }
0788
0789 if (new_size > rhte->lxt_cnt)
0790 rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
0791 else if (new_size < rhte->lxt_cnt)
0792 rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
0793 else {
0794
0795
0796
0797
0798
0799
0800
0801
0802 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
0803 if (unlikely(rc)) {
0804 rc = -EAGAIN;
0805 goto out;
0806 }
0807 }
0808
0809 resize->hdr.return_flags = 0;
0810 resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
0811 resize->last_lba /= CXLFLASH_BLOCK_SIZE;
0812 resize->last_lba--;
0813
0814 out:
0815 if (put_ctx)
0816 put_context(ctxi);
0817 dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
0818 __func__, resize->last_lba, rc);
0819 return rc;
0820 }
0821
0822 int cxlflash_vlun_resize(struct scsi_device *sdev,
0823 struct dk_cxlflash_resize *resize)
0824 {
0825 return _cxlflash_vlun_resize(sdev, NULL, resize);
0826 }
0827
0828
0829
0830
0831
0832 void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
0833 {
0834 struct llun_info *lli, *temp;
0835 u32 lind;
0836 int k;
0837 struct device *dev = &cfg->dev->dev;
0838 __be64 __iomem *fc_port_luns;
0839
0840 mutex_lock(&global.mutex);
0841
0842 list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
0843 if (!lli->in_table)
0844 continue;
0845
0846 lind = lli->lun_index;
0847 dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
0848
0849 for (k = 0; k < cfg->num_fc_ports; k++)
0850 if (lli->port_sel & (1 << k)) {
0851 fc_port_luns = get_fc_port_luns(cfg, k);
0852 writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
0853 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
0854 }
0855 }
0856
0857 mutex_unlock(&global.mutex);
0858 }
0859
0860
0861
0862
0863
0864
0865
0866 static inline u8 get_num_ports(u32 psm)
0867 {
0868 static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
0869 1, 2, 2, 3, 2, 3, 3, 4 };
0870
0871 return bits[psm & 0xf];
0872 }
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885 static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
0886 {
0887 u32 chan;
0888 u32 lind;
0889 u32 nports;
0890 int rc = 0;
0891 int k;
0892 struct device *dev = &cfg->dev->dev;
0893 __be64 __iomem *fc_port_luns;
0894
0895 mutex_lock(&global.mutex);
0896
0897 if (lli->in_table)
0898 goto out;
0899
0900 nports = get_num_ports(lli->port_sel);
0901 if (nports == 0 || nports > cfg->num_fc_ports) {
0902 WARN(1, "Unsupported port configuration nports=%u", nports);
0903 rc = -EIO;
0904 goto out;
0905 }
0906
0907 if (nports > 1) {
0908
0909
0910
0911
0912 for (k = 0; k < cfg->num_fc_ports; k++) {
0913 if (!(lli->port_sel & (1 << k)))
0914 continue;
0915
0916 if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
0917 rc = -ENOSPC;
0918 goto out;
0919 }
0920 }
0921
0922 lind = lli->lun_index = cfg->promote_lun_index;
0923 dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
0924
0925 for (k = 0; k < cfg->num_fc_ports; k++) {
0926 if (!(lli->port_sel & (1 << k)))
0927 continue;
0928
0929 fc_port_luns = get_fc_port_luns(cfg, k);
0930 writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
0931 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
0932 }
0933
0934 cfg->promote_lun_index++;
0935 } else {
0936
0937
0938
0939
0940 chan = PORTMASK2CHAN(lli->port_sel);
0941 if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
0942 rc = -ENOSPC;
0943 goto out;
0944 }
0945
0946 lind = lli->lun_index = cfg->last_lun_index[chan];
0947 fc_port_luns = get_fc_port_luns(cfg, chan);
0948 writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
0949 cfg->last_lun_index[chan]--;
0950 dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
0951 __func__, lind, chan, lli->lun_id[chan]);
0952 }
0953
0954 lli->in_table = true;
0955 out:
0956 mutex_unlock(&global.mutex);
0957 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0958 return rc;
0959 }
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
0974 {
0975 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0976 struct device *dev = &cfg->dev->dev;
0977 struct llun_info *lli = sdev->hostdata;
0978 struct glun_info *gli = lli->parent;
0979
0980 struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
0981 struct dk_cxlflash_resize resize;
0982
0983 u64 ctxid = DECODE_CTXID(virt->context_id),
0984 rctxid = virt->context_id;
0985 u64 lun_size = virt->lun_size;
0986 u64 last_lba = 0;
0987 u64 rsrc_handle = -1;
0988
0989 int rc = 0;
0990
0991 struct ctx_info *ctxi = NULL;
0992 struct sisl_rht_entry *rhte = NULL;
0993
0994 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
0995
0996
0997 mutex_lock(&gli->mutex);
0998 if (gli->mode == MODE_NONE) {
0999 rc = init_vlun(lli);
1000 if (rc) {
1001 dev_err(dev, "%s: init_vlun failed rc=%d\n",
1002 __func__, rc);
1003 rc = -ENOMEM;
1004 goto err0;
1005 }
1006 }
1007
1008 rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
1009 if (unlikely(rc)) {
1010 dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
1011 goto err0;
1012 }
1013 mutex_unlock(&gli->mutex);
1014
1015 rc = init_luntable(cfg, lli);
1016 if (rc) {
1017 dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
1018 goto err1;
1019 }
1020
1021 ctxi = get_context(cfg, rctxid, lli, 0);
1022 if (unlikely(!ctxi)) {
1023 dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1024 rc = -EINVAL;
1025 goto err1;
1026 }
1027
1028 rhte = rhte_checkout(ctxi, lli);
1029 if (unlikely(!rhte)) {
1030 dev_err(dev, "%s: too many opens ctxid=%llu\n",
1031 __func__, ctxid);
1032 rc = -EMFILE;
1033 goto err1;
1034 }
1035
1036 rsrc_handle = (rhte - ctxi->rht_start);
1037
1038
1039 rhte->nmask = MC_RHT_NMASK;
1040 rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
1041
1042
1043 marshal_virt_to_resize(virt, &resize);
1044 resize.rsrc_handle = rsrc_handle;
1045 rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
1046 if (rc) {
1047 dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
1048 goto err2;
1049 }
1050 last_lba = resize.last_lba;
1051
1052 if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
1053 ctxi->rht_needs_ws[rsrc_handle] = true;
1054
1055 virt->hdr.return_flags = 0;
1056 virt->last_lba = last_lba;
1057 virt->rsrc_handle = rsrc_handle;
1058
1059 if (get_num_ports(lli->port_sel) > 1)
1060 virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
1061 out:
1062 if (likely(ctxi))
1063 put_context(ctxi);
1064 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1065 __func__, rsrc_handle, rc, last_lba);
1066 return rc;
1067
1068 err2:
1069 rhte_checkin(ctxi, rhte);
1070 err1:
1071 cxlflash_lun_detach(gli);
1072 goto out;
1073 err0:
1074
1075 cxlflash_ba_terminate(&gli->blka.ba_lun);
1076 mutex_unlock(&gli->mutex);
1077 goto out;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static int clone_lxt(struct afu *afu,
1092 struct blka *blka,
1093 ctx_hndl_t ctxid,
1094 res_hndl_t rhndl,
1095 struct sisl_rht_entry *rhte,
1096 struct sisl_rht_entry *rhte_src)
1097 {
1098 struct cxlflash_cfg *cfg = afu->parent;
1099 struct device *dev = &cfg->dev->dev;
1100 struct sisl_lxt_entry *lxt = NULL;
1101 bool locked = false;
1102 u32 ngrps;
1103 u64 aun;
1104 int j;
1105 int i = 0;
1106 int rc = 0;
1107
1108 ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
1109
1110 if (ngrps) {
1111
1112 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
1113 GFP_KERNEL);
1114 if (unlikely(!lxt)) {
1115 rc = -ENOMEM;
1116 goto out;
1117 }
1118
1119
1120 memcpy(lxt, rhte_src->lxt_start,
1121 (sizeof(*lxt) * rhte_src->lxt_cnt));
1122
1123
1124
1125
1126
1127
1128 mutex_lock(&blka->mutex);
1129 locked = true;
1130 for (i = 0; i < rhte_src->lxt_cnt; i++) {
1131 aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
1132 if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
1133 rc = -EIO;
1134 goto err;
1135 }
1136 }
1137 }
1138
1139
1140
1141
1142
1143 dma_wmb();
1144
1145 rhte->lxt_start = lxt;
1146 dma_wmb();
1147
1148 rhte->lxt_cnt = rhte_src->lxt_cnt;
1149 dma_wmb();
1150
1151 rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1152 if (unlikely(rc)) {
1153 rc = -EAGAIN;
1154 goto err2;
1155 }
1156
1157 out:
1158 if (locked)
1159 mutex_unlock(&blka->mutex);
1160 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1161 return rc;
1162 err2:
1163
1164 rhte->lxt_cnt = 0;
1165 dma_wmb();
1166 rhte->lxt_start = NULL;
1167 dma_wmb();
1168 err:
1169
1170 for (j = 0; j < i; j++) {
1171 aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
1172 ba_free(&blka->ba_lun, aun);
1173 }
1174 kfree(lxt);
1175 goto out;
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 int cxlflash_disk_clone(struct scsi_device *sdev,
1191 struct dk_cxlflash_clone *clone)
1192 {
1193 struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1194 struct device *dev = &cfg->dev->dev;
1195 struct llun_info *lli = sdev->hostdata;
1196 struct glun_info *gli = lli->parent;
1197 struct blka *blka = &gli->blka;
1198 struct afu *afu = cfg->afu;
1199 struct dk_cxlflash_release release = { { 0 }, 0 };
1200
1201 struct ctx_info *ctxi_src = NULL,
1202 *ctxi_dst = NULL;
1203 struct lun_access *lun_access_src, *lun_access_dst;
1204 u32 perms;
1205 u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
1206 ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1207 rctxid_src = clone->context_id_src,
1208 rctxid_dst = clone->context_id_dst;
1209 int i, j;
1210 int rc = 0;
1211 bool found;
1212 LIST_HEAD(sidecar);
1213
1214 dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
1215 __func__, ctxid_src, ctxid_dst);
1216
1217
1218 if (unlikely(rctxid_src == rctxid_dst)) {
1219 rc = -EINVAL;
1220 goto out;
1221 }
1222
1223 if (unlikely(gli->mode != MODE_VIRTUAL)) {
1224 rc = -EINVAL;
1225 dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
1226 __func__, gli->mode);
1227 goto out;
1228 }
1229
1230 ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1231 ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1232 if (unlikely(!ctxi_src || !ctxi_dst)) {
1233 dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
1234 __func__, ctxid_src, ctxid_dst);
1235 rc = -EINVAL;
1236 goto out;
1237 }
1238
1239
1240 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1241 if (ctxi_dst->rht_start[i].nmask != 0) {
1242 rc = -EINVAL;
1243 goto out;
1244 }
1245
1246
1247 list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
1248 found = false;
1249 list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
1250 if (lun_access_dst->sdev == lun_access_src->sdev) {
1251 found = true;
1252 break;
1253 }
1254
1255 if (!found) {
1256 lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1257 GFP_KERNEL);
1258 if (unlikely(!lun_access_dst)) {
1259 dev_err(dev, "%s: lun_access allocation fail\n",
1260 __func__);
1261 rc = -ENOMEM;
1262 goto out;
1263 }
1264
1265 *lun_access_dst = *lun_access_src;
1266 list_add(&lun_access_dst->list, &sidecar);
1267 }
1268 }
1269
1270 if (unlikely(!ctxi_src->rht_out)) {
1271 dev_dbg(dev, "%s: Nothing to clone\n", __func__);
1272 goto out_success;
1273 }
1274
1275
1276 perms = ctxi_dst->rht_perms;
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
1290 if (ctxi_src->rht_out == ctxi_dst->rht_out)
1291 break;
1292 if (ctxi_src->rht_start[i].nmask == 0)
1293 continue;
1294
1295
1296 ctxi_dst->rht_out++;
1297 ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
1298 ctxi_dst->rht_start[i].fp =
1299 SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
1300 ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
1301
1302 rc = clone_lxt(afu, blka, ctxid_dst, i,
1303 &ctxi_dst->rht_start[i],
1304 &ctxi_src->rht_start[i]);
1305 if (rc) {
1306 marshal_clone_to_rele(clone, &release);
1307 for (j = 0; j < i; j++) {
1308 release.rsrc_handle = j;
1309 _cxlflash_disk_release(sdev, ctxi_dst,
1310 &release);
1311 }
1312
1313
1314 rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
1315 goto err;
1316 }
1317
1318 cxlflash_lun_attach(gli, gli->mode, false);
1319 }
1320
1321 out_success:
1322 list_splice(&sidecar, &ctxi_dst->luns);
1323
1324
1325 out:
1326 if (ctxi_src)
1327 put_context(ctxi_src);
1328 if (ctxi_dst)
1329 put_context(ctxi_dst);
1330 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1331 return rc;
1332
1333 err:
1334 list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
1335 kfree(lun_access_src);
1336 goto out;
1337 }