Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * CXL Flash Device Driver
0004  *
0005  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
0006  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
0007  *
0008  * Copyright (C) 2015 IBM Corporation
0009  */
0010 
0011 #include <linux/interrupt.h>
0012 #include <linux/pci.h>
0013 #include <linux/syscalls.h>
0014 #include <asm/unaligned.h>
0015 #include <asm/bitsperlong.h>
0016 
0017 #include <scsi/scsi_cmnd.h>
0018 #include <scsi/scsi_host.h>
0019 #include <uapi/scsi/cxlflash_ioctl.h>
0020 
0021 #include "sislite.h"
0022 #include "common.h"
0023 #include "vlun.h"
0024 #include "superpipe.h"
0025 
0026 /**
0027  * marshal_virt_to_resize() - translate uvirtual to resize structure
0028  * @virt:   Source structure from which to translate/copy.
0029  * @resize: Destination structure for the translate/copy.
0030  */
0031 static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
0032                    struct dk_cxlflash_resize *resize)
0033 {
0034     resize->hdr = virt->hdr;
0035     resize->context_id = virt->context_id;
0036     resize->rsrc_handle = virt->rsrc_handle;
0037     resize->req_size = virt->lun_size;
0038     resize->last_lba = virt->last_lba;
0039 }
0040 
0041 /**
0042  * marshal_clone_to_rele() - translate clone to release structure
0043  * @clone:  Source structure from which to translate/copy.
0044  * @release:    Destination structure for the translate/copy.
0045  */
0046 static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
0047                   struct dk_cxlflash_release *release)
0048 {
0049     release->hdr = clone->hdr;
0050     release->context_id = clone->context_id_dst;
0051 }
0052 
0053 /**
0054  * ba_init() - initializes a block allocator
0055  * @ba_lun: Block allocator to initialize.
0056  *
0057  * Return: 0 on success, -errno on failure
0058  */
0059 static int ba_init(struct ba_lun *ba_lun)
0060 {
0061     struct ba_lun_info *bali = NULL;
0062     int lun_size_au = 0, i = 0;
0063     int last_word_underflow = 0;
0064     u64 *lam;
0065 
0066     pr_debug("%s: Initializing LUN: lun_id=%016llx "
0067          "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
0068         __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
0069 
0070     /* Calculate bit map size */
0071     lun_size_au = ba_lun->lsize / ba_lun->au_size;
0072     if (lun_size_au == 0) {
0073         pr_debug("%s: Requested LUN size of 0!\n", __func__);
0074         return -EINVAL;
0075     }
0076 
0077     /* Allocate lun information container */
0078     bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
0079     if (unlikely(!bali)) {
0080         pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
0081                __func__, ba_lun->lun_id);
0082         return -ENOMEM;
0083     }
0084 
0085     bali->total_aus = lun_size_au;
0086     bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
0087 
0088     if (lun_size_au % BITS_PER_LONG)
0089         bali->lun_bmap_size++;
0090 
0091     /* Allocate bitmap space */
0092     bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
0093                       GFP_KERNEL);
0094     if (unlikely(!bali->lun_alloc_map)) {
0095         pr_err("%s: Failed to allocate lun allocation map: "
0096                "lun_id=%016llx\n", __func__, ba_lun->lun_id);
0097         kfree(bali);
0098         return -ENOMEM;
0099     }
0100 
0101     /* Initialize the bit map size and set all bits to '1' */
0102     bali->free_aun_cnt = lun_size_au;
0103 
0104     for (i = 0; i < bali->lun_bmap_size; i++)
0105         bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
0106 
0107     /* If the last word not fully utilized, mark extra bits as allocated */
0108     last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
0109     last_word_underflow -= bali->free_aun_cnt;
0110     if (last_word_underflow > 0) {
0111         lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
0112         for (i = (HIBIT - last_word_underflow + 1);
0113              i < BITS_PER_LONG;
0114              i++)
0115             clear_bit(i, (ulong *)lam);
0116     }
0117 
0118     /* Initialize high elevator index, low/curr already at 0 from kzalloc */
0119     bali->free_high_idx = bali->lun_bmap_size;
0120 
0121     /* Allocate clone map */
0122     bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
0123                       GFP_KERNEL);
0124     if (unlikely(!bali->aun_clone_map)) {
0125         pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
0126                __func__, ba_lun->lun_id);
0127         kfree(bali->lun_alloc_map);
0128         kfree(bali);
0129         return -ENOMEM;
0130     }
0131 
0132     /* Pass the allocated LUN info as a handle to the user */
0133     ba_lun->ba_lun_handle = bali;
0134 
0135     pr_debug("%s: Successfully initialized the LUN: "
0136          "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
0137         __func__, ba_lun->lun_id, bali->lun_bmap_size,
0138         bali->free_aun_cnt);
0139     return 0;
0140 }
0141 
0142 /**
0143  * find_free_range() - locates a free bit within the block allocator
0144  * @low:    First word in block allocator to start search.
0145  * @high:   Last word in block allocator to search.
0146  * @bali:   LUN information structure owning the block allocator to search.
0147  * @bit_word:   Passes back the word in the block allocator owning the free bit.
0148  *
0149  * Return: The bit position within the passed back word, -1 on failure
0150  */
0151 static int find_free_range(u32 low,
0152                u32 high,
0153                struct ba_lun_info *bali, int *bit_word)
0154 {
0155     int i;
0156     u64 bit_pos = -1;
0157     ulong *lam, num_bits;
0158 
0159     for (i = low; i < high; i++)
0160         if (bali->lun_alloc_map[i] != 0) {
0161             lam = (ulong *)&bali->lun_alloc_map[i];
0162             num_bits = (sizeof(*lam) * BITS_PER_BYTE);
0163             bit_pos = find_first_bit(lam, num_bits);
0164 
0165             pr_devel("%s: Found free bit %llu in LUN "
0166                  "map entry %016llx at bitmap index = %d\n",
0167                  __func__, bit_pos, bali->lun_alloc_map[i], i);
0168 
0169             *bit_word = i;
0170             bali->free_aun_cnt--;
0171             clear_bit(bit_pos, lam);
0172             break;
0173         }
0174 
0175     return bit_pos;
0176 }
0177 
0178 /**
0179  * ba_alloc() - allocates a block from the block allocator
0180  * @ba_lun: Block allocator from which to allocate a block.
0181  *
0182  * Return: The allocated block, -1 on failure
0183  */
0184 static u64 ba_alloc(struct ba_lun *ba_lun)
0185 {
0186     u64 bit_pos = -1;
0187     int bit_word = 0;
0188     struct ba_lun_info *bali = NULL;
0189 
0190     bali = ba_lun->ba_lun_handle;
0191 
0192     pr_debug("%s: Received block allocation request: "
0193          "lun_id=%016llx free_aun_cnt=%llx\n",
0194          __func__, ba_lun->lun_id, bali->free_aun_cnt);
0195 
0196     if (bali->free_aun_cnt == 0) {
0197         pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
0198              __func__, ba_lun->lun_id);
0199         return -1ULL;
0200     }
0201 
0202     /* Search to find a free entry, curr->high then low->curr */
0203     bit_pos = find_free_range(bali->free_curr_idx,
0204                   bali->free_high_idx, bali, &bit_word);
0205     if (bit_pos == -1) {
0206         bit_pos = find_free_range(bali->free_low_idx,
0207                       bali->free_curr_idx,
0208                       bali, &bit_word);
0209         if (bit_pos == -1) {
0210             pr_debug("%s: Could not find an allocation unit on LUN:"
0211                  " lun_id=%016llx\n", __func__, ba_lun->lun_id);
0212             return -1ULL;
0213         }
0214     }
0215 
0216     /* Update the free_curr_idx */
0217     if (bit_pos == HIBIT)
0218         bali->free_curr_idx = bit_word + 1;
0219     else
0220         bali->free_curr_idx = bit_word;
0221 
0222     pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
0223          "free_aun_cnt=%llx\n", __func__,
0224          ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
0225          bali->free_aun_cnt);
0226 
0227     return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
0228 }
0229 
0230 /**
0231  * validate_alloc() - validates the specified block has been allocated
0232  * @bali:       LUN info owning the block allocator.
0233  * @aun:        Block to validate.
0234  *
0235  * Return: 0 on success, -1 on failure
0236  */
0237 static int validate_alloc(struct ba_lun_info *bali, u64 aun)
0238 {
0239     int idx = 0, bit_pos = 0;
0240 
0241     idx = aun / BITS_PER_LONG;
0242     bit_pos = aun % BITS_PER_LONG;
0243 
0244     if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
0245         return -1;
0246 
0247     return 0;
0248 }
0249 
0250 /**
0251  * ba_free() - frees a block from the block allocator
0252  * @ba_lun: Block allocator from which to allocate a block.
0253  * @to_free:    Block to free.
0254  *
0255  * Return: 0 on success, -1 on failure
0256  */
0257 static int ba_free(struct ba_lun *ba_lun, u64 to_free)
0258 {
0259     int idx = 0, bit_pos = 0;
0260     struct ba_lun_info *bali = NULL;
0261 
0262     bali = ba_lun->ba_lun_handle;
0263 
0264     if (validate_alloc(bali, to_free)) {
0265         pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
0266              __func__, to_free, ba_lun->lun_id);
0267         return -1;
0268     }
0269 
0270     pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
0271          "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
0272          bali->free_aun_cnt);
0273 
0274     if (bali->aun_clone_map[to_free] > 0) {
0275         pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
0276              __func__, to_free, ba_lun->lun_id,
0277              bali->aun_clone_map[to_free]);
0278         bali->aun_clone_map[to_free]--;
0279         return 0;
0280     }
0281 
0282     idx = to_free / BITS_PER_LONG;
0283     bit_pos = to_free % BITS_PER_LONG;
0284 
0285     set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
0286     bali->free_aun_cnt++;
0287 
0288     if (idx < bali->free_low_idx)
0289         bali->free_low_idx = idx;
0290     else if (idx > bali->free_high_idx)
0291         bali->free_high_idx = idx;
0292 
0293     pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
0294          "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
0295          ba_lun->lun_id, bali->free_aun_cnt);
0296 
0297     return 0;
0298 }
0299 
0300 /**
0301  * ba_clone() - Clone a chunk of the block allocation table
0302  * @ba_lun: Block allocator from which to allocate a block.
0303  * @to_clone:   Block to clone.
0304  *
0305  * Return: 0 on success, -1 on failure
0306  */
0307 static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
0308 {
0309     struct ba_lun_info *bali = ba_lun->ba_lun_handle;
0310 
0311     if (validate_alloc(bali, to_clone)) {
0312         pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
0313              __func__, to_clone, ba_lun->lun_id);
0314         return -1;
0315     }
0316 
0317     pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
0318          __func__, to_clone, ba_lun->lun_id);
0319 
0320     if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
0321         pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
0322              __func__, to_clone, ba_lun->lun_id);
0323         return -1;
0324     }
0325 
0326     bali->aun_clone_map[to_clone]++;
0327 
0328     return 0;
0329 }
0330 
0331 /**
0332  * ba_space() - returns the amount of free space left in the block allocator
0333  * @ba_lun: Block allocator.
0334  *
0335  * Return: Amount of free space in block allocator
0336  */
0337 static u64 ba_space(struct ba_lun *ba_lun)
0338 {
0339     struct ba_lun_info *bali = ba_lun->ba_lun_handle;
0340 
0341     return bali->free_aun_cnt;
0342 }
0343 
0344 /**
0345  * cxlflash_ba_terminate() - frees resources associated with the block allocator
0346  * @ba_lun: Block allocator.
0347  *
0348  * Safe to call in a partially allocated state.
0349  */
0350 void cxlflash_ba_terminate(struct ba_lun *ba_lun)
0351 {
0352     struct ba_lun_info *bali = ba_lun->ba_lun_handle;
0353 
0354     if (bali) {
0355         kfree(bali->aun_clone_map);
0356         kfree(bali->lun_alloc_map);
0357         kfree(bali);
0358         ba_lun->ba_lun_handle = NULL;
0359     }
0360 }
0361 
0362 /**
0363  * init_vlun() - initializes a LUN for virtual use
0364  * @lli:    LUN information structure that owns the block allocator.
0365  *
0366  * Return: 0 on success, -errno on failure
0367  */
0368 static int init_vlun(struct llun_info *lli)
0369 {
0370     int rc = 0;
0371     struct glun_info *gli = lli->parent;
0372     struct blka *blka = &gli->blka;
0373 
0374     memset(blka, 0, sizeof(*blka));
0375     mutex_init(&blka->mutex);
0376 
0377     /* LUN IDs are unique per port, save the index instead */
0378     blka->ba_lun.lun_id = lli->lun_index;
0379     blka->ba_lun.lsize = gli->max_lba + 1;
0380     blka->ba_lun.lba_size = gli->blk_len;
0381 
0382     blka->ba_lun.au_size = MC_CHUNK_SIZE;
0383     blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
0384 
0385     rc = ba_init(&blka->ba_lun);
0386     if (unlikely(rc))
0387         pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
0388 
0389     pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
0390     return rc;
0391 }
0392 
0393 /**
0394  * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
0395  * @sdev:   SCSI device associated with LUN.
0396  * @lba:    Logical block address to start write same.
0397  * @nblks:  Number of logical blocks to write same.
0398  *
0399  * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
0400  * while in scsi_execute(), the EEH handler will attempt to recover. As part of
0401  * the recovery, the handler drains all currently running ioctls, waiting until
0402  * they have completed before proceeding with a reset. As this routine is used
0403  * on the ioctl path, this can create a condition where the EEH handler becomes
0404  * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
0405  * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
0406  * semaphore. This will allow the EEH handler to proceed with a recovery while
0407  * this thread is still running. Once the scsi_execute() returns, reacquire the
0408  * ioctl read semaphore and check the adapter state in case it changed while
0409  * inside of scsi_execute(). The state check will wait if the adapter is still
0410  * being recovered or return a failure if the recovery failed. In the event that
0411  * the adapter reset failed, simply return the failure as the ioctl would be
0412  * unable to continue.
0413  *
0414  * Note that the above puts a requirement on this routine to only be called on
0415  * an ioctl thread.
0416  *
0417  * Return: 0 on success, -errno on failure
0418  */
0419 static int write_same16(struct scsi_device *sdev,
0420             u64 lba,
0421             u32 nblks)
0422 {
0423     u8 *cmd_buf = NULL;
0424     u8 *scsi_cmd = NULL;
0425     int rc = 0;
0426     int result = 0;
0427     u64 offset = lba;
0428     int left = nblks;
0429     struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0430     struct device *dev = &cfg->dev->dev;
0431     const u32 s = ilog2(sdev->sector_size) - 9;
0432     const u32 to = sdev->request_queue->rq_timeout;
0433     const u32 ws_limit =
0434         sdev->request_queue->limits.max_write_zeroes_sectors >> s;
0435 
0436     cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
0437     scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
0438     if (unlikely(!cmd_buf || !scsi_cmd)) {
0439         rc = -ENOMEM;
0440         goto out;
0441     }
0442 
0443     while (left > 0) {
0444 
0445         scsi_cmd[0] = WRITE_SAME_16;
0446         scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
0447         put_unaligned_be64(offset, &scsi_cmd[2]);
0448         put_unaligned_be32(ws_limit < left ? ws_limit : left,
0449                    &scsi_cmd[10]);
0450 
0451         /* Drop the ioctl read semahpore across lengthy call */
0452         up_read(&cfg->ioctl_rwsem);
0453         result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
0454                       CMD_BUFSIZE, NULL, NULL, to,
0455                       CMD_RETRIES, 0, 0, NULL);
0456         down_read(&cfg->ioctl_rwsem);
0457         rc = check_state(cfg);
0458         if (rc) {
0459             dev_err(dev, "%s: Failed state result=%08x\n",
0460                 __func__, result);
0461             rc = -ENODEV;
0462             goto out;
0463         }
0464 
0465         if (result) {
0466             dev_err_ratelimited(dev, "%s: command failed for "
0467                         "offset=%lld result=%08x\n",
0468                         __func__, offset, result);
0469             rc = -EIO;
0470             goto out;
0471         }
0472         left -= ws_limit;
0473         offset += ws_limit;
0474     }
0475 
0476 out:
0477     kfree(cmd_buf);
0478     kfree(scsi_cmd);
0479     dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0480     return rc;
0481 }
0482 
0483 /**
0484  * grow_lxt() - expands the translation table associated with the specified RHTE
0485  * @afu:    AFU associated with the host.
0486  * @sdev:   SCSI device associated with LUN.
0487  * @ctxid:  Context ID of context owning the RHTE.
0488  * @rhndl:  Resource handle associated with the RHTE.
0489  * @rhte:   Resource handle entry (RHTE).
0490  * @new_size:   Number of translation entries associated with RHTE.
0491  *
0492  * By design, this routine employs a 'best attempt' allocation and will
0493  * truncate the requested size down if there is not sufficient space in
0494  * the block allocator to satisfy the request but there does exist some
0495  * amount of space. The user is made aware of this by returning the size
0496  * allocated.
0497  *
0498  * Return: 0 on success, -errno on failure
0499  */
0500 static int grow_lxt(struct afu *afu,
0501             struct scsi_device *sdev,
0502             ctx_hndl_t ctxid,
0503             res_hndl_t rhndl,
0504             struct sisl_rht_entry *rhte,
0505             u64 *new_size)
0506 {
0507     struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0508     struct device *dev = &cfg->dev->dev;
0509     struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
0510     struct llun_info *lli = sdev->hostdata;
0511     struct glun_info *gli = lli->parent;
0512     struct blka *blka = &gli->blka;
0513     u32 av_size;
0514     u32 ngrps, ngrps_old;
0515     u64 aun;        /* chunk# allocated by block allocator */
0516     u64 delta = *new_size - rhte->lxt_cnt;
0517     u64 my_new_size;
0518     int i, rc = 0;
0519 
0520     /*
0521      * Check what is available in the block allocator before re-allocating
0522      * LXT array. This is done up front under the mutex which must not be
0523      * released until after allocation is complete.
0524      */
0525     mutex_lock(&blka->mutex);
0526     av_size = ba_space(&blka->ba_lun);
0527     if (unlikely(av_size <= 0)) {
0528         dev_dbg(dev, "%s: ba_space error av_size=%d\n",
0529             __func__, av_size);
0530         mutex_unlock(&blka->mutex);
0531         rc = -ENOSPC;
0532         goto out;
0533     }
0534 
0535     if (av_size < delta)
0536         delta = av_size;
0537 
0538     lxt_old = rhte->lxt_start;
0539     ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
0540     ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
0541 
0542     if (ngrps != ngrps_old) {
0543         /* reallocate to fit new size */
0544         lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
0545                   GFP_KERNEL);
0546         if (unlikely(!lxt)) {
0547             mutex_unlock(&blka->mutex);
0548             rc = -ENOMEM;
0549             goto out;
0550         }
0551 
0552         /* copy over all old entries */
0553         memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
0554     } else
0555         lxt = lxt_old;
0556 
0557     /* nothing can fail from now on */
0558     my_new_size = rhte->lxt_cnt + delta;
0559 
0560     /* add new entries to the end */
0561     for (i = rhte->lxt_cnt; i < my_new_size; i++) {
0562         /*
0563          * Due to the earlier check of available space, ba_alloc
0564          * cannot fail here. If it did due to internal error,
0565          * leave a rlba_base of -1u which will likely be a
0566          * invalid LUN (too large).
0567          */
0568         aun = ba_alloc(&blka->ba_lun);
0569         if ((aun == -1ULL) || (aun >= blka->nchunk))
0570             dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
0571                 "max=%llu\n", __func__, aun, blka->nchunk - 1);
0572 
0573         /* select both ports, use r/w perms from RHT */
0574         lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
0575                     (lli->lun_index << LXT_LUNIDX_SHIFT) |
0576                     (RHT_PERM_RW << LXT_PERM_SHIFT |
0577                      lli->port_sel));
0578     }
0579 
0580     mutex_unlock(&blka->mutex);
0581 
0582     /*
0583      * The following sequence is prescribed in the SISlite spec
0584      * for syncing up with the AFU when adding LXT entries.
0585      */
0586     dma_wmb(); /* Make LXT updates are visible */
0587 
0588     rhte->lxt_start = lxt;
0589     dma_wmb(); /* Make RHT entry's LXT table update visible */
0590 
0591     rhte->lxt_cnt = my_new_size;
0592     dma_wmb(); /* Make RHT entry's LXT table size update visible */
0593 
0594     rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
0595     if (unlikely(rc))
0596         rc = -EAGAIN;
0597 
0598     /* free old lxt if reallocated */
0599     if (lxt != lxt_old)
0600         kfree(lxt_old);
0601     *new_size = my_new_size;
0602 out:
0603     dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0604     return rc;
0605 }
0606 
0607 /**
0608  * shrink_lxt() - reduces translation table associated with the specified RHTE
0609  * @afu:    AFU associated with the host.
0610  * @sdev:   SCSI device associated with LUN.
0611  * @rhndl:  Resource handle associated with the RHTE.
0612  * @rhte:   Resource handle entry (RHTE).
0613  * @ctxi:   Context owning resources.
0614  * @new_size:   Number of translation entries associated with RHTE.
0615  *
0616  * Return: 0 on success, -errno on failure
0617  */
0618 static int shrink_lxt(struct afu *afu,
0619               struct scsi_device *sdev,
0620               res_hndl_t rhndl,
0621               struct sisl_rht_entry *rhte,
0622               struct ctx_info *ctxi,
0623               u64 *new_size)
0624 {
0625     struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0626     struct device *dev = &cfg->dev->dev;
0627     struct sisl_lxt_entry *lxt, *lxt_old;
0628     struct llun_info *lli = sdev->hostdata;
0629     struct glun_info *gli = lli->parent;
0630     struct blka *blka = &gli->blka;
0631     ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
0632     bool needs_ws = ctxi->rht_needs_ws[rhndl];
0633     bool needs_sync = !ctxi->err_recovery_active;
0634     u32 ngrps, ngrps_old;
0635     u64 aun;        /* chunk# allocated by block allocator */
0636     u64 delta = rhte->lxt_cnt - *new_size;
0637     u64 my_new_size;
0638     int i, rc = 0;
0639 
0640     lxt_old = rhte->lxt_start;
0641     ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
0642     ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
0643 
0644     if (ngrps != ngrps_old) {
0645         /* Reallocate to fit new size unless new size is 0 */
0646         if (ngrps) {
0647             lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
0648                       GFP_KERNEL);
0649             if (unlikely(!lxt)) {
0650                 rc = -ENOMEM;
0651                 goto out;
0652             }
0653 
0654             /* Copy over old entries that will remain */
0655             memcpy(lxt, lxt_old,
0656                    (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
0657         } else
0658             lxt = NULL;
0659     } else
0660         lxt = lxt_old;
0661 
0662     /* Nothing can fail from now on */
0663     my_new_size = rhte->lxt_cnt - delta;
0664 
0665     /*
0666      * The following sequence is prescribed in the SISlite spec
0667      * for syncing up with the AFU when removing LXT entries.
0668      */
0669     rhte->lxt_cnt = my_new_size;
0670     dma_wmb(); /* Make RHT entry's LXT table size update visible */
0671 
0672     rhte->lxt_start = lxt;
0673     dma_wmb(); /* Make RHT entry's LXT table update visible */
0674 
0675     if (needs_sync) {
0676         rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
0677         if (unlikely(rc))
0678             rc = -EAGAIN;
0679     }
0680 
0681     if (needs_ws) {
0682         /*
0683          * Mark the context as unavailable, so that we can release
0684          * the mutex safely.
0685          */
0686         ctxi->unavail = true;
0687         mutex_unlock(&ctxi->mutex);
0688     }
0689 
0690     /* Free LBAs allocated to freed chunks */
0691     mutex_lock(&blka->mutex);
0692     for (i = delta - 1; i >= 0; i--) {
0693         aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
0694         if (needs_ws)
0695             write_same16(sdev, aun, MC_CHUNK_SIZE);
0696         ba_free(&blka->ba_lun, aun);
0697     }
0698     mutex_unlock(&blka->mutex);
0699 
0700     if (needs_ws) {
0701         /* Make the context visible again */
0702         mutex_lock(&ctxi->mutex);
0703         ctxi->unavail = false;
0704     }
0705 
0706     /* Free old lxt if reallocated */
0707     if (lxt != lxt_old)
0708         kfree(lxt_old);
0709     *new_size = my_new_size;
0710 out:
0711     dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0712     return rc;
0713 }
0714 
0715 /**
0716  * _cxlflash_vlun_resize() - changes the size of a virtual LUN
0717  * @sdev:   SCSI device associated with LUN owning virtual LUN.
0718  * @ctxi:   Context owning resources.
0719  * @resize: Resize ioctl data structure.
0720  *
0721  * On successful return, the user is informed of the new size (in blocks)
0722  * of the virtual LUN in last LBA format. When the size of the virtual
0723  * LUN is zero, the last LBA is reflected as -1. See comment in the
0724  * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
0725  * on the error recovery list.
0726  *
0727  * Return: 0 on success, -errno on failure
0728  */
0729 int _cxlflash_vlun_resize(struct scsi_device *sdev,
0730               struct ctx_info *ctxi,
0731               struct dk_cxlflash_resize *resize)
0732 {
0733     struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0734     struct device *dev = &cfg->dev->dev;
0735     struct llun_info *lli = sdev->hostdata;
0736     struct glun_info *gli = lli->parent;
0737     struct afu *afu = cfg->afu;
0738     bool put_ctx = false;
0739 
0740     res_hndl_t rhndl = resize->rsrc_handle;
0741     u64 new_size;
0742     u64 nsectors;
0743     u64 ctxid = DECODE_CTXID(resize->context_id),
0744         rctxid = resize->context_id;
0745 
0746     struct sisl_rht_entry *rhte;
0747 
0748     int rc = 0;
0749 
0750     /*
0751      * The requested size (req_size) is always assumed to be in 4k blocks,
0752      * so we have to convert it here from 4k to chunk size.
0753      */
0754     nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
0755     new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
0756 
0757     dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
0758         __func__, ctxid, resize->rsrc_handle, resize->req_size,
0759         new_size);
0760 
0761     if (unlikely(gli->mode != MODE_VIRTUAL)) {
0762         dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
0763             __func__, gli->mode);
0764         rc = -EINVAL;
0765         goto out;
0766 
0767     }
0768 
0769     if (!ctxi) {
0770         ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
0771         if (unlikely(!ctxi)) {
0772             dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
0773                 __func__, ctxid);
0774             rc = -EINVAL;
0775             goto out;
0776         }
0777 
0778         put_ctx = true;
0779     }
0780 
0781     rhte = get_rhte(ctxi, rhndl, lli);
0782     if (unlikely(!rhte)) {
0783         dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
0784             __func__, rhndl);
0785         rc = -EINVAL;
0786         goto out;
0787     }
0788 
0789     if (new_size > rhte->lxt_cnt)
0790         rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
0791     else if (new_size < rhte->lxt_cnt)
0792         rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
0793     else {
0794         /*
0795          * Rare case where there is already sufficient space, just
0796          * need to perform a translation sync with the AFU. This
0797          * scenario likely follows a previous sync failure during
0798          * a resize operation. Accordingly, perform the heavyweight
0799          * form of translation sync as it is unknown which type of
0800          * resize failed previously.
0801          */
0802         rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
0803         if (unlikely(rc)) {
0804             rc = -EAGAIN;
0805             goto out;
0806         }
0807     }
0808 
0809     resize->hdr.return_flags = 0;
0810     resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
0811     resize->last_lba /= CXLFLASH_BLOCK_SIZE;
0812     resize->last_lba--;
0813 
0814 out:
0815     if (put_ctx)
0816         put_context(ctxi);
0817     dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
0818         __func__, resize->last_lba, rc);
0819     return rc;
0820 }
0821 
0822 int cxlflash_vlun_resize(struct scsi_device *sdev,
0823              struct dk_cxlflash_resize *resize)
0824 {
0825     return _cxlflash_vlun_resize(sdev, NULL, resize);
0826 }
0827 
0828 /**
0829  * cxlflash_restore_luntable() - Restore LUN table to prior state
0830  * @cfg:    Internal structure associated with the host.
0831  */
0832 void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
0833 {
0834     struct llun_info *lli, *temp;
0835     u32 lind;
0836     int k;
0837     struct device *dev = &cfg->dev->dev;
0838     __be64 __iomem *fc_port_luns;
0839 
0840     mutex_lock(&global.mutex);
0841 
0842     list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
0843         if (!lli->in_table)
0844             continue;
0845 
0846         lind = lli->lun_index;
0847         dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
0848 
0849         for (k = 0; k < cfg->num_fc_ports; k++)
0850             if (lli->port_sel & (1 << k)) {
0851                 fc_port_luns = get_fc_port_luns(cfg, k);
0852                 writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
0853                 dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
0854             }
0855     }
0856 
0857     mutex_unlock(&global.mutex);
0858 }
0859 
0860 /**
0861  * get_num_ports() - compute number of ports from port selection mask
0862  * @psm:    Port selection mask.
0863  *
0864  * Return: Population count of port selection mask
0865  */
0866 static inline u8 get_num_ports(u32 psm)
0867 {
0868     static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
0869                      1, 2, 2, 3, 2, 3, 3, 4 };
0870 
0871     return bits[psm & 0xf];
0872 }
0873 
0874 /**
0875  * init_luntable() - write an entry in the LUN table
0876  * @cfg:    Internal structure associated with the host.
0877  * @lli:    Per adapter LUN information structure.
0878  *
0879  * On successful return, a LUN table entry is created:
0880  *  - at the top for LUNs visible on multiple ports.
0881  *  - at the bottom for LUNs visible only on one port.
0882  *
0883  * Return: 0 on success, -errno on failure
0884  */
0885 static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
0886 {
0887     u32 chan;
0888     u32 lind;
0889     u32 nports;
0890     int rc = 0;
0891     int k;
0892     struct device *dev = &cfg->dev->dev;
0893     __be64 __iomem *fc_port_luns;
0894 
0895     mutex_lock(&global.mutex);
0896 
0897     if (lli->in_table)
0898         goto out;
0899 
0900     nports = get_num_ports(lli->port_sel);
0901     if (nports == 0 || nports > cfg->num_fc_ports) {
0902         WARN(1, "Unsupported port configuration nports=%u", nports);
0903         rc = -EIO;
0904         goto out;
0905     }
0906 
0907     if (nports > 1) {
0908         /*
0909          * When LUN is visible from multiple ports, we will put
0910          * it in the top half of the LUN table.
0911          */
0912         for (k = 0; k < cfg->num_fc_ports; k++) {
0913             if (!(lli->port_sel & (1 << k)))
0914                 continue;
0915 
0916             if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
0917                 rc = -ENOSPC;
0918                 goto out;
0919             }
0920         }
0921 
0922         lind = lli->lun_index = cfg->promote_lun_index;
0923         dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
0924 
0925         for (k = 0; k < cfg->num_fc_ports; k++) {
0926             if (!(lli->port_sel & (1 << k)))
0927                 continue;
0928 
0929             fc_port_luns = get_fc_port_luns(cfg, k);
0930             writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
0931             dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
0932         }
0933 
0934         cfg->promote_lun_index++;
0935     } else {
0936         /*
0937          * When LUN is visible only from one port, we will put
0938          * it in the bottom half of the LUN table.
0939          */
0940         chan = PORTMASK2CHAN(lli->port_sel);
0941         if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
0942             rc = -ENOSPC;
0943             goto out;
0944         }
0945 
0946         lind = lli->lun_index = cfg->last_lun_index[chan];
0947         fc_port_luns = get_fc_port_luns(cfg, chan);
0948         writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
0949         cfg->last_lun_index[chan]--;
0950         dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
0951             __func__, lind, chan, lli->lun_id[chan]);
0952     }
0953 
0954     lli->in_table = true;
0955 out:
0956     mutex_unlock(&global.mutex);
0957     dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
0958     return rc;
0959 }
0960 
0961 /**
0962  * cxlflash_disk_virtual_open() - open a virtual disk of specified size
0963  * @sdev:   SCSI device associated with LUN owning virtual LUN.
0964  * @arg:    UVirtual ioctl data structure.
0965  *
0966  * On successful return, the user is informed of the resource handle
0967  * to be used to identify the virtual LUN and the size (in blocks) of
0968  * the virtual LUN in last LBA format. When the size of the virtual LUN
0969  * is zero, the last LBA is reflected as -1.
0970  *
0971  * Return: 0 on success, -errno on failure
0972  */
0973 int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
0974 {
0975     struct cxlflash_cfg *cfg = shost_priv(sdev->host);
0976     struct device *dev = &cfg->dev->dev;
0977     struct llun_info *lli = sdev->hostdata;
0978     struct glun_info *gli = lli->parent;
0979 
0980     struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
0981     struct dk_cxlflash_resize resize;
0982 
0983     u64 ctxid = DECODE_CTXID(virt->context_id),
0984         rctxid = virt->context_id;
0985     u64 lun_size = virt->lun_size;
0986     u64 last_lba = 0;
0987     u64 rsrc_handle = -1;
0988 
0989     int rc = 0;
0990 
0991     struct ctx_info *ctxi = NULL;
0992     struct sisl_rht_entry *rhte = NULL;
0993 
0994     dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
0995 
0996     /* Setup the LUNs block allocator on first call */
0997     mutex_lock(&gli->mutex);
0998     if (gli->mode == MODE_NONE) {
0999         rc = init_vlun(lli);
1000         if (rc) {
1001             dev_err(dev, "%s: init_vlun failed rc=%d\n",
1002                 __func__, rc);
1003             rc = -ENOMEM;
1004             goto err0;
1005         }
1006     }
1007 
1008     rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
1009     if (unlikely(rc)) {
1010         dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
1011         goto err0;
1012     }
1013     mutex_unlock(&gli->mutex);
1014 
1015     rc = init_luntable(cfg, lli);
1016     if (rc) {
1017         dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
1018         goto err1;
1019     }
1020 
1021     ctxi = get_context(cfg, rctxid, lli, 0);
1022     if (unlikely(!ctxi)) {
1023         dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
1024         rc = -EINVAL;
1025         goto err1;
1026     }
1027 
1028     rhte = rhte_checkout(ctxi, lli);
1029     if (unlikely(!rhte)) {
1030         dev_err(dev, "%s: too many opens ctxid=%llu\n",
1031             __func__, ctxid);
1032         rc = -EMFILE;   /* too many opens  */
1033         goto err1;
1034     }
1035 
1036     rsrc_handle = (rhte - ctxi->rht_start);
1037 
1038     /* Populate RHT format 0 */
1039     rhte->nmask = MC_RHT_NMASK;
1040     rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
1041 
1042     /* Resize even if requested size is 0 */
1043     marshal_virt_to_resize(virt, &resize);
1044     resize.rsrc_handle = rsrc_handle;
1045     rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
1046     if (rc) {
1047         dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
1048         goto err2;
1049     }
1050     last_lba = resize.last_lba;
1051 
1052     if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
1053         ctxi->rht_needs_ws[rsrc_handle] = true;
1054 
1055     virt->hdr.return_flags = 0;
1056     virt->last_lba = last_lba;
1057     virt->rsrc_handle = rsrc_handle;
1058 
1059     if (get_num_ports(lli->port_sel) > 1)
1060         virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
1061 out:
1062     if (likely(ctxi))
1063         put_context(ctxi);
1064     dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
1065         __func__, rsrc_handle, rc, last_lba);
1066     return rc;
1067 
1068 err2:
1069     rhte_checkin(ctxi, rhte);
1070 err1:
1071     cxlflash_lun_detach(gli);
1072     goto out;
1073 err0:
1074     /* Special common cleanup prior to successful LUN attach */
1075     cxlflash_ba_terminate(&gli->blka.ba_lun);
1076     mutex_unlock(&gli->mutex);
1077     goto out;
1078 }
1079 
1080 /**
1081  * clone_lxt() - copies translation tables from source to destination RHTE
1082  * @afu:    AFU associated with the host.
1083  * @blka:   Block allocator associated with LUN.
1084  * @ctxid:  Context ID of context owning the RHTE.
1085  * @rhndl:  Resource handle associated with the RHTE.
1086  * @rhte:   Destination resource handle entry (RHTE).
1087  * @rhte_src:   Source resource handle entry (RHTE).
1088  *
1089  * Return: 0 on success, -errno on failure
1090  */
1091 static int clone_lxt(struct afu *afu,
1092              struct blka *blka,
1093              ctx_hndl_t ctxid,
1094              res_hndl_t rhndl,
1095              struct sisl_rht_entry *rhte,
1096              struct sisl_rht_entry *rhte_src)
1097 {
1098     struct cxlflash_cfg *cfg = afu->parent;
1099     struct device *dev = &cfg->dev->dev;
1100     struct sisl_lxt_entry *lxt = NULL;
1101     bool locked = false;
1102     u32 ngrps;
1103     u64 aun;        /* chunk# allocated by block allocator */
1104     int j;
1105     int i = 0;
1106     int rc = 0;
1107 
1108     ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
1109 
1110     if (ngrps) {
1111         /* allocate new LXTs for clone */
1112         lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
1113                 GFP_KERNEL);
1114         if (unlikely(!lxt)) {
1115             rc = -ENOMEM;
1116             goto out;
1117         }
1118 
1119         /* copy over */
1120         memcpy(lxt, rhte_src->lxt_start,
1121                (sizeof(*lxt) * rhte_src->lxt_cnt));
1122 
1123         /* clone the LBAs in block allocator via ref_cnt, note that the
1124          * block allocator mutex must be held until it is established
1125          * that this routine will complete without the need for a
1126          * cleanup.
1127          */
1128         mutex_lock(&blka->mutex);
1129         locked = true;
1130         for (i = 0; i < rhte_src->lxt_cnt; i++) {
1131             aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
1132             if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
1133                 rc = -EIO;
1134                 goto err;
1135             }
1136         }
1137     }
1138 
1139     /*
1140      * The following sequence is prescribed in the SISlite spec
1141      * for syncing up with the AFU when adding LXT entries.
1142      */
1143     dma_wmb(); /* Make LXT updates are visible */
1144 
1145     rhte->lxt_start = lxt;
1146     dma_wmb(); /* Make RHT entry's LXT table update visible */
1147 
1148     rhte->lxt_cnt = rhte_src->lxt_cnt;
1149     dma_wmb(); /* Make RHT entry's LXT table size update visible */
1150 
1151     rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1152     if (unlikely(rc)) {
1153         rc = -EAGAIN;
1154         goto err2;
1155     }
1156 
1157 out:
1158     if (locked)
1159         mutex_unlock(&blka->mutex);
1160     dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1161     return rc;
1162 err2:
1163     /* Reset the RHTE */
1164     rhte->lxt_cnt = 0;
1165     dma_wmb();
1166     rhte->lxt_start = NULL;
1167     dma_wmb();
1168 err:
1169     /* free the clones already made */
1170     for (j = 0; j < i; j++) {
1171         aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
1172         ba_free(&blka->ba_lun, aun);
1173     }
1174     kfree(lxt);
1175     goto out;
1176 }
1177 
1178 /**
1179  * cxlflash_disk_clone() - clone a context by making snapshot of another
1180  * @sdev:   SCSI device associated with LUN owning virtual LUN.
1181  * @clone:  Clone ioctl data structure.
1182  *
1183  * This routine effectively performs cxlflash_disk_open operation for each
1184  * in-use virtual resource in the source context. Note that the destination
1185  * context must be in pristine state and cannot have any resource handles
1186  * open at the time of the clone.
1187  *
1188  * Return: 0 on success, -errno on failure
1189  */
1190 int cxlflash_disk_clone(struct scsi_device *sdev,
1191             struct dk_cxlflash_clone *clone)
1192 {
1193     struct cxlflash_cfg *cfg = shost_priv(sdev->host);
1194     struct device *dev = &cfg->dev->dev;
1195     struct llun_info *lli = sdev->hostdata;
1196     struct glun_info *gli = lli->parent;
1197     struct blka *blka = &gli->blka;
1198     struct afu *afu = cfg->afu;
1199     struct dk_cxlflash_release release = { { 0 }, 0 };
1200 
1201     struct ctx_info *ctxi_src = NULL,
1202             *ctxi_dst = NULL;
1203     struct lun_access *lun_access_src, *lun_access_dst;
1204     u32 perms;
1205     u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
1206         ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1207         rctxid_src = clone->context_id_src,
1208         rctxid_dst = clone->context_id_dst;
1209     int i, j;
1210     int rc = 0;
1211     bool found;
1212     LIST_HEAD(sidecar);
1213 
1214     dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
1215         __func__, ctxid_src, ctxid_dst);
1216 
1217     /* Do not clone yourself */
1218     if (unlikely(rctxid_src == rctxid_dst)) {
1219         rc = -EINVAL;
1220         goto out;
1221     }
1222 
1223     if (unlikely(gli->mode != MODE_VIRTUAL)) {
1224         rc = -EINVAL;
1225         dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
1226             __func__, gli->mode);
1227         goto out;
1228     }
1229 
1230     ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1231     ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1232     if (unlikely(!ctxi_src || !ctxi_dst)) {
1233         dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
1234             __func__, ctxid_src, ctxid_dst);
1235         rc = -EINVAL;
1236         goto out;
1237     }
1238 
1239     /* Verify there is no open resource handle in the destination context */
1240     for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1241         if (ctxi_dst->rht_start[i].nmask != 0) {
1242             rc = -EINVAL;
1243             goto out;
1244         }
1245 
1246     /* Clone LUN access list */
1247     list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
1248         found = false;
1249         list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
1250             if (lun_access_dst->sdev == lun_access_src->sdev) {
1251                 found = true;
1252                 break;
1253             }
1254 
1255         if (!found) {
1256             lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1257                          GFP_KERNEL);
1258             if (unlikely(!lun_access_dst)) {
1259                 dev_err(dev, "%s: lun_access allocation fail\n",
1260                     __func__);
1261                 rc = -ENOMEM;
1262                 goto out;
1263             }
1264 
1265             *lun_access_dst = *lun_access_src;
1266             list_add(&lun_access_dst->list, &sidecar);
1267         }
1268     }
1269 
1270     if (unlikely(!ctxi_src->rht_out)) {
1271         dev_dbg(dev, "%s: Nothing to clone\n", __func__);
1272         goto out_success;
1273     }
1274 
1275     /* User specified permission on attach */
1276     perms = ctxi_dst->rht_perms;
1277 
1278     /*
1279      * Copy over checked-out RHT (and their associated LXT) entries by
1280      * hand, stopping after we've copied all outstanding entries and
1281      * cleaning up if the clone fails.
1282      *
1283      * Note: This loop is equivalent to performing cxlflash_disk_open and
1284      * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
1285      * account by attaching after each successful RHT entry clone. In the
1286      * event that a clone failure is experienced, the LUN detach is handled
1287      * via the cleanup performed by _cxlflash_disk_release.
1288      */
1289     for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
1290         if (ctxi_src->rht_out == ctxi_dst->rht_out)
1291             break;
1292         if (ctxi_src->rht_start[i].nmask == 0)
1293             continue;
1294 
1295         /* Consume a destination RHT entry */
1296         ctxi_dst->rht_out++;
1297         ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
1298         ctxi_dst->rht_start[i].fp =
1299             SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
1300         ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
1301 
1302         rc = clone_lxt(afu, blka, ctxid_dst, i,
1303                    &ctxi_dst->rht_start[i],
1304                    &ctxi_src->rht_start[i]);
1305         if (rc) {
1306             marshal_clone_to_rele(clone, &release);
1307             for (j = 0; j < i; j++) {
1308                 release.rsrc_handle = j;
1309                 _cxlflash_disk_release(sdev, ctxi_dst,
1310                                &release);
1311             }
1312 
1313             /* Put back the one we failed on */
1314             rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
1315             goto err;
1316         }
1317 
1318         cxlflash_lun_attach(gli, gli->mode, false);
1319     }
1320 
1321 out_success:
1322     list_splice(&sidecar, &ctxi_dst->luns);
1323 
1324     /* fall through */
1325 out:
1326     if (ctxi_src)
1327         put_context(ctxi_src);
1328     if (ctxi_dst)
1329         put_context(ctxi_dst);
1330     dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1331     return rc;
1332 
1333 err:
1334     list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
1335         kfree(lun_access_src);
1336     goto out;
1337 }