Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*******************************************************************************
0003  * SCSI RDMA Protocol lib functions
0004  *
0005  * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
0006  * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
0007  *
0008  ***********************************************************************/
0009 
0010 #define pr_fmt(fmt) "libsrp: " fmt
0011 
0012 #include <linux/printk.h>
0013 #include <linux/err.h>
0014 #include <linux/slab.h>
0015 #include <linux/kfifo.h>
0016 #include <linux/scatterlist.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/module.h>
0019 #include <scsi/srp.h>
0020 #include <target/target_core_base.h>
0021 #include "libsrp.h"
0022 #include "ibmvscsi_tgt.h"
0023 
0024 static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
0025                  struct srp_buf **ring)
0026 {
0027     struct iu_entry *iue;
0028     int i;
0029 
0030     q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
0031     if (!q->pool)
0032         return -ENOMEM;
0033     q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
0034     if (!q->items)
0035         goto free_pool;
0036 
0037     spin_lock_init(&q->lock);
0038     kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
0039 
0040     for (i = 0, iue = q->items; i < max; i++) {
0041         kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
0042         iue->sbuf = ring[i];
0043         iue++;
0044     }
0045     return 0;
0046 
0047 free_pool:
0048     kfree(q->pool);
0049     return -ENOMEM;
0050 }
0051 
0052 static void srp_iu_pool_free(struct srp_queue *q)
0053 {
0054     kfree(q->items);
0055     kfree(q->pool);
0056 }
0057 
0058 static struct srp_buf **srp_ring_alloc(struct device *dev,
0059                        size_t max, size_t size)
0060 {
0061     struct srp_buf **ring;
0062     int i;
0063 
0064     ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
0065     if (!ring)
0066         return NULL;
0067 
0068     for (i = 0; i < max; i++) {
0069         ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
0070         if (!ring[i])
0071             goto out;
0072         ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
0073                           GFP_KERNEL);
0074         if (!ring[i]->buf)
0075             goto out;
0076     }
0077     return ring;
0078 
0079 out:
0080     for (i = 0; i < max && ring[i]; i++) {
0081         if (ring[i]->buf) {
0082             dma_free_coherent(dev, size, ring[i]->buf,
0083                       ring[i]->dma);
0084         }
0085         kfree(ring[i]);
0086     }
0087     kfree(ring);
0088 
0089     return NULL;
0090 }
0091 
0092 static void srp_ring_free(struct device *dev, struct srp_buf **ring,
0093               size_t max, size_t size)
0094 {
0095     int i;
0096 
0097     for (i = 0; i < max; i++) {
0098         dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
0099         kfree(ring[i]);
0100     }
0101     kfree(ring);
0102 }
0103 
0104 int srp_target_alloc(struct srp_target *target, struct device *dev,
0105              size_t nr, size_t iu_size)
0106 {
0107     int err;
0108 
0109     spin_lock_init(&target->lock);
0110 
0111     target->dev = dev;
0112 
0113     target->srp_iu_size = iu_size;
0114     target->rx_ring_size = nr;
0115     target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
0116     if (!target->rx_ring)
0117         return -ENOMEM;
0118     err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
0119     if (err)
0120         goto free_ring;
0121 
0122     dev_set_drvdata(target->dev, target);
0123     return 0;
0124 
0125 free_ring:
0126     srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
0127     return -ENOMEM;
0128 }
0129 
0130 void srp_target_free(struct srp_target *target)
0131 {
0132     dev_set_drvdata(target->dev, NULL);
0133     srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
0134               target->srp_iu_size);
0135     srp_iu_pool_free(&target->iu_queue);
0136 }
0137 
0138 struct iu_entry *srp_iu_get(struct srp_target *target)
0139 {
0140     struct iu_entry *iue = NULL;
0141 
0142     if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
0143                  sizeof(void *),
0144                  &target->iu_queue.lock) != sizeof(void *)) {
0145         WARN_ONCE(1, "unexpected fifo state");
0146         return NULL;
0147     }
0148     if (!iue)
0149         return iue;
0150     iue->target = target;
0151     iue->flags = 0;
0152     return iue;
0153 }
0154 
0155 void srp_iu_put(struct iu_entry *iue)
0156 {
0157     kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
0158             sizeof(void *), &iue->target->iu_queue.lock);
0159 }
0160 
0161 static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
0162                enum dma_data_direction dir, srp_rdma_t rdma_io,
0163                int dma_map, int ext_desc)
0164 {
0165     struct iu_entry *iue = NULL;
0166     struct scatterlist *sg = NULL;
0167     int err, nsg = 0, len;
0168 
0169     if (dma_map) {
0170         iue = cmd->iue;
0171         sg = cmd->se_cmd.t_data_sg;
0172         nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
0173                  DMA_BIDIRECTIONAL);
0174         if (!nsg) {
0175             pr_err("fail to map %p %d\n", iue,
0176                    cmd->se_cmd.t_data_nents);
0177             return 0;
0178         }
0179         len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
0180     } else {
0181         len = be32_to_cpu(md->len);
0182     }
0183 
0184     err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
0185 
0186     if (dma_map)
0187         dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
0188 
0189     return err;
0190 }
0191 
0192 static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
0193                  struct srp_indirect_buf *id,
0194                  enum dma_data_direction dir, srp_rdma_t rdma_io,
0195                  int dma_map, int ext_desc)
0196 {
0197     struct iu_entry *iue = NULL;
0198     struct srp_direct_buf *md = NULL;
0199     struct scatterlist dummy, *sg = NULL;
0200     dma_addr_t token = 0;
0201     int err = 0;
0202     int nmd, nsg = 0, len;
0203 
0204     if (dma_map || ext_desc) {
0205         iue = cmd->iue;
0206         sg = cmd->se_cmd.t_data_sg;
0207     }
0208 
0209     nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
0210 
0211     if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
0212         (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
0213         md = &id->desc_list[0];
0214         goto rdma;
0215     }
0216 
0217     if (ext_desc && dma_map) {
0218         md = dma_alloc_coherent(iue->target->dev,
0219                     be32_to_cpu(id->table_desc.len),
0220                     &token, GFP_KERNEL);
0221         if (!md) {
0222             pr_err("Can't get dma memory %u\n",
0223                    be32_to_cpu(id->table_desc.len));
0224             return -ENOMEM;
0225         }
0226 
0227         sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
0228         sg_dma_address(&dummy) = token;
0229         sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
0230         err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
0231                   be32_to_cpu(id->table_desc.len));
0232         if (err) {
0233             pr_err("Error copying indirect table %d\n", err);
0234             goto free_mem;
0235         }
0236     } else {
0237         pr_err("This command uses external indirect buffer\n");
0238         return -EINVAL;
0239     }
0240 
0241 rdma:
0242     if (dma_map) {
0243         nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
0244                  DMA_BIDIRECTIONAL);
0245         if (!nsg) {
0246             pr_err("fail to map %p %d\n", iue,
0247                    cmd->se_cmd.t_data_nents);
0248             err = -EIO;
0249             goto free_mem;
0250         }
0251         len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
0252     } else {
0253         len = be32_to_cpu(id->len);
0254     }
0255 
0256     err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
0257 
0258     if (dma_map)
0259         dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
0260 
0261 free_mem:
0262     if (token && dma_map) {
0263         dma_free_coherent(iue->target->dev,
0264                   be32_to_cpu(id->table_desc.len), md, token);
0265     }
0266     return err;
0267 }
0268 
0269 static int data_out_desc_size(struct srp_cmd *cmd)
0270 {
0271     int size = 0;
0272     u8 fmt = cmd->buf_fmt >> 4;
0273 
0274     switch (fmt) {
0275     case SRP_NO_DATA_DESC:
0276         break;
0277     case SRP_DATA_DESC_DIRECT:
0278         size = sizeof(struct srp_direct_buf);
0279         break;
0280     case SRP_DATA_DESC_INDIRECT:
0281         size = sizeof(struct srp_indirect_buf) +
0282             sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
0283         break;
0284     default:
0285         pr_err("client error. Invalid data_out_format %x\n", fmt);
0286         break;
0287     }
0288     return size;
0289 }
0290 
0291 /*
0292  * TODO: this can be called multiple times for a single command if it
0293  * has very long data.
0294  */
0295 int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
0296               srp_rdma_t rdma_io, int dma_map, int ext_desc)
0297 {
0298     struct srp_direct_buf *md;
0299     struct srp_indirect_buf *id;
0300     enum dma_data_direction dir;
0301     int offset, err = 0;
0302     u8 format;
0303 
0304     if (!cmd->se_cmd.t_data_nents)
0305         return 0;
0306 
0307     offset = srp_cmd->add_cdb_len & ~3;
0308 
0309     dir = srp_cmd_direction(srp_cmd);
0310     if (dir == DMA_FROM_DEVICE)
0311         offset += data_out_desc_size(srp_cmd);
0312 
0313     if (dir == DMA_TO_DEVICE)
0314         format = srp_cmd->buf_fmt >> 4;
0315     else
0316         format = srp_cmd->buf_fmt & ((1U << 4) - 1);
0317 
0318     switch (format) {
0319     case SRP_NO_DATA_DESC:
0320         break;
0321     case SRP_DATA_DESC_DIRECT:
0322         md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
0323         err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
0324         break;
0325     case SRP_DATA_DESC_INDIRECT:
0326         id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
0327         err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
0328                     ext_desc);
0329         break;
0330     default:
0331         pr_err("Unknown format %d %x\n", dir, format);
0332         err = -EINVAL;
0333     }
0334 
0335     return err;
0336 }
0337 
0338 u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
0339 {
0340     struct srp_direct_buf *md;
0341     struct srp_indirect_buf *id;
0342     u64 len = 0;
0343     uint offset = cmd->add_cdb_len & ~3;
0344     u8 fmt;
0345 
0346     if (dir == DMA_TO_DEVICE) {
0347         fmt = cmd->buf_fmt >> 4;
0348     } else {
0349         fmt = cmd->buf_fmt & ((1U << 4) - 1);
0350         offset += data_out_desc_size(cmd);
0351     }
0352 
0353     switch (fmt) {
0354     case SRP_NO_DATA_DESC:
0355         break;
0356     case SRP_DATA_DESC_DIRECT:
0357         md = (struct srp_direct_buf *)(cmd->add_data + offset);
0358         len = be32_to_cpu(md->len);
0359         break;
0360     case SRP_DATA_DESC_INDIRECT:
0361         id = (struct srp_indirect_buf *)(cmd->add_data + offset);
0362         len = be32_to_cpu(id->len);
0363         break;
0364     default:
0365         pr_err("invalid data format %x\n", fmt);
0366         break;
0367     }
0368     return len;
0369 }
0370 
0371 int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
0372                u64 *data_len)
0373 {
0374     struct srp_indirect_buf *idb;
0375     struct srp_direct_buf *db;
0376     uint add_cdb_offset;
0377     int rc;
0378 
0379     /*
0380      * The pointer computations below will only be compiled correctly
0381      * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
0382      * whether srp_cmd::add_data has been declared as a byte pointer.
0383      */
0384     BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
0385              && !__same_type(srp_cmd->add_data[0], (u8)0));
0386 
0387     BUG_ON(!dir);
0388     BUG_ON(!data_len);
0389 
0390     rc = 0;
0391     *data_len = 0;
0392 
0393     *dir = DMA_NONE;
0394 
0395     if (srp_cmd->buf_fmt & 0xf)
0396         *dir = DMA_FROM_DEVICE;
0397     else if (srp_cmd->buf_fmt >> 4)
0398         *dir = DMA_TO_DEVICE;
0399 
0400     add_cdb_offset = srp_cmd->add_cdb_len & ~3;
0401     if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
0402         ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
0403         db = (struct srp_direct_buf *)(srp_cmd->add_data
0404                            + add_cdb_offset);
0405         *data_len = be32_to_cpu(db->len);
0406     } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
0407            ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
0408         idb = (struct srp_indirect_buf *)(srp_cmd->add_data
0409                           + add_cdb_offset);
0410 
0411         *data_len = be32_to_cpu(idb->len);
0412     }
0413     return rc;
0414 }
0415 
0416 MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
0417 MODULE_AUTHOR("FUJITA Tomonori");
0418 MODULE_LICENSE("GPL");