Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*******************************************************************************
0003  * Filename:  target_core_file.c
0004  *
0005  * This file contains the Storage Engine <-> FILEIO transport specific functions
0006  *
0007  * (c) Copyright 2005-2013 Datera, Inc.
0008  *
0009  * Nicholas A. Bellinger <nab@kernel.org>
0010  *
0011  ******************************************************************************/
0012 
0013 #include <linux/string.h>
0014 #include <linux/parser.h>
0015 #include <linux/timer.h>
0016 #include <linux/blkdev.h>
0017 #include <linux/slab.h>
0018 #include <linux/spinlock.h>
0019 #include <linux/module.h>
0020 #include <linux/vmalloc.h>
0021 #include <linux/falloc.h>
0022 #include <linux/uio.h>
0023 #include <linux/scatterlist.h>
0024 #include <scsi/scsi_proto.h>
0025 #include <asm/unaligned.h>
0026 
0027 #include <target/target_core_base.h>
0028 #include <target/target_core_backend.h>
0029 
0030 #include "target_core_file.h"
0031 
0032 static inline struct fd_dev *FD_DEV(struct se_device *dev)
0033 {
0034     return container_of(dev, struct fd_dev, dev);
0035 }
0036 
0037 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
0038 {
0039     struct fd_host *fd_host;
0040 
0041     fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
0042     if (!fd_host) {
0043         pr_err("Unable to allocate memory for struct fd_host\n");
0044         return -ENOMEM;
0045     }
0046 
0047     fd_host->fd_host_id = host_id;
0048 
0049     hba->hba_ptr = fd_host;
0050 
0051     pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
0052         " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
0053         TARGET_CORE_VERSION);
0054     pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
0055         hba->hba_id, fd_host->fd_host_id);
0056 
0057     return 0;
0058 }
0059 
0060 static void fd_detach_hba(struct se_hba *hba)
0061 {
0062     struct fd_host *fd_host = hba->hba_ptr;
0063 
0064     pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
0065         " Target Core\n", hba->hba_id, fd_host->fd_host_id);
0066 
0067     kfree(fd_host);
0068     hba->hba_ptr = NULL;
0069 }
0070 
0071 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
0072 {
0073     struct fd_dev *fd_dev;
0074     struct fd_host *fd_host = hba->hba_ptr;
0075 
0076     fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
0077     if (!fd_dev) {
0078         pr_err("Unable to allocate memory for struct fd_dev\n");
0079         return NULL;
0080     }
0081 
0082     fd_dev->fd_host = fd_host;
0083 
0084     pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
0085 
0086     return &fd_dev->dev;
0087 }
0088 
0089 static bool fd_configure_unmap(struct se_device *dev)
0090 {
0091     struct file *file = FD_DEV(dev)->fd_file;
0092     struct inode *inode = file->f_mapping->host;
0093 
0094     if (S_ISBLK(inode->i_mode))
0095         return target_configure_unmap_from_queue(&dev->dev_attrib,
0096                              I_BDEV(inode));
0097 
0098     /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
0099     dev->dev_attrib.max_unmap_lba_count = 0x2000;
0100     /* Currently hardcoded to 1 in Linux/SCSI code. */
0101     dev->dev_attrib.max_unmap_block_desc_count = 1;
0102     dev->dev_attrib.unmap_granularity = 1;
0103     dev->dev_attrib.unmap_granularity_alignment = 0;
0104     return true;
0105 }
0106 
0107 static int fd_configure_device(struct se_device *dev)
0108 {
0109     struct fd_dev *fd_dev = FD_DEV(dev);
0110     struct fd_host *fd_host = dev->se_hba->hba_ptr;
0111     struct file *file;
0112     struct inode *inode = NULL;
0113     int flags, ret = -EINVAL;
0114 
0115     if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
0116         pr_err("Missing fd_dev_name=\n");
0117         return -EINVAL;
0118     }
0119 
0120     /*
0121      * Use O_DSYNC by default instead of O_SYNC to forgo syncing
0122      * of pure timestamp updates.
0123      */
0124     flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
0125 
0126     /*
0127      * Optionally allow fd_buffered_io=1 to be enabled for people
0128      * who want use the fs buffer cache as an WriteCache mechanism.
0129      *
0130      * This means that in event of a hard failure, there is a risk
0131      * of silent data-loss if the SCSI client has *not* performed a
0132      * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
0133      * to write-out the entire device cache.
0134      */
0135     if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
0136         pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
0137         flags &= ~O_DSYNC;
0138     }
0139 
0140     file = filp_open(fd_dev->fd_dev_name, flags, 0600);
0141     if (IS_ERR(file)) {
0142         pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
0143         ret = PTR_ERR(file);
0144         goto fail;
0145     }
0146     fd_dev->fd_file = file;
0147     /*
0148      * If using a block backend with this struct file, we extract
0149      * fd_dev->fd_[block,dev]_size from struct block_device.
0150      *
0151      * Otherwise, we use the passed fd_size= from configfs
0152      */
0153     inode = file->f_mapping->host;
0154     if (S_ISBLK(inode->i_mode)) {
0155         struct block_device *bdev = I_BDEV(inode);
0156         unsigned long long dev_size;
0157 
0158         fd_dev->fd_block_size = bdev_logical_block_size(bdev);
0159         /*
0160          * Determine the number of bytes from i_size_read() minus
0161          * one (1) logical sector from underlying struct block_device
0162          */
0163         dev_size = (i_size_read(file->f_mapping->host) -
0164                        fd_dev->fd_block_size);
0165 
0166         pr_debug("FILEIO: Using size: %llu bytes from struct"
0167             " block_device blocks: %llu logical_block_size: %d\n",
0168             dev_size, div_u64(dev_size, fd_dev->fd_block_size),
0169             fd_dev->fd_block_size);
0170         /*
0171          * Enable write same emulation for IBLOCK and use 0xFFFF as
0172          * the smaller WRITE_SAME(10) only has a two-byte block count.
0173          */
0174         dev->dev_attrib.max_write_same_len = 0xFFFF;
0175 
0176         if (bdev_nonrot(bdev))
0177             dev->dev_attrib.is_nonrot = 1;
0178     } else {
0179         if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
0180             pr_err("FILEIO: Missing fd_dev_size="
0181                 " parameter, and no backing struct"
0182                 " block_device\n");
0183             goto fail;
0184         }
0185 
0186         fd_dev->fd_block_size = FD_BLOCKSIZE;
0187 
0188         /*
0189          * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
0190          * based upon struct iovec limit for vfs_writev()
0191          */
0192         dev->dev_attrib.max_write_same_len = 0x1000;
0193     }
0194 
0195     dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
0196     dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
0197     dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
0198     dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
0199 
0200     if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
0201         pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
0202             " with FDBD_HAS_BUFFERED_IO_WCE\n");
0203         dev->dev_attrib.emulate_write_cache = 1;
0204     }
0205 
0206     fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
0207     fd_dev->fd_queue_depth = dev->queue_depth;
0208 
0209     pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
0210         " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
0211             fd_dev->fd_dev_name, fd_dev->fd_dev_size);
0212 
0213     return 0;
0214 fail:
0215     if (fd_dev->fd_file) {
0216         filp_close(fd_dev->fd_file, NULL);
0217         fd_dev->fd_file = NULL;
0218     }
0219     return ret;
0220 }
0221 
0222 static void fd_dev_call_rcu(struct rcu_head *p)
0223 {
0224     struct se_device *dev = container_of(p, struct se_device, rcu_head);
0225     struct fd_dev *fd_dev = FD_DEV(dev);
0226 
0227     kfree(fd_dev);
0228 }
0229 
0230 static void fd_free_device(struct se_device *dev)
0231 {
0232     call_rcu(&dev->rcu_head, fd_dev_call_rcu);
0233 }
0234 
0235 static void fd_destroy_device(struct se_device *dev)
0236 {
0237     struct fd_dev *fd_dev = FD_DEV(dev);
0238 
0239     if (fd_dev->fd_file) {
0240         filp_close(fd_dev->fd_file, NULL);
0241         fd_dev->fd_file = NULL;
0242     }
0243 }
0244 
0245 struct target_core_file_cmd {
0246     unsigned long   len;
0247     struct se_cmd   *cmd;
0248     struct kiocb    iocb;
0249     struct bio_vec  bvecs[];
0250 };
0251 
0252 static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
0253 {
0254     struct target_core_file_cmd *cmd;
0255 
0256     cmd = container_of(iocb, struct target_core_file_cmd, iocb);
0257 
0258     if (ret != cmd->len)
0259         target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
0260     else
0261         target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
0262 
0263     kfree(cmd);
0264 }
0265 
0266 static sense_reason_t
0267 fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0268           enum dma_data_direction data_direction)
0269 {
0270     int is_write = !(data_direction == DMA_FROM_DEVICE);
0271     struct se_device *dev = cmd->se_dev;
0272     struct fd_dev *fd_dev = FD_DEV(dev);
0273     struct file *file = fd_dev->fd_file;
0274     struct target_core_file_cmd *aio_cmd;
0275     struct iov_iter iter;
0276     struct scatterlist *sg;
0277     ssize_t len = 0;
0278     int ret = 0, i;
0279 
0280     aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
0281     if (!aio_cmd)
0282         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0283 
0284     for_each_sg(sgl, sg, sgl_nents, i) {
0285         aio_cmd->bvecs[i].bv_page = sg_page(sg);
0286         aio_cmd->bvecs[i].bv_len = sg->length;
0287         aio_cmd->bvecs[i].bv_offset = sg->offset;
0288 
0289         len += sg->length;
0290     }
0291 
0292     iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
0293 
0294     aio_cmd->cmd = cmd;
0295     aio_cmd->len = len;
0296     aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
0297     aio_cmd->iocb.ki_filp = file;
0298     aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
0299     aio_cmd->iocb.ki_flags = IOCB_DIRECT;
0300 
0301     if (is_write && (cmd->se_cmd_flags & SCF_FUA))
0302         aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
0303 
0304     if (is_write)
0305         ret = call_write_iter(file, &aio_cmd->iocb, &iter);
0306     else
0307         ret = call_read_iter(file, &aio_cmd->iocb, &iter);
0308 
0309     if (ret != -EIOCBQUEUED)
0310         cmd_rw_aio_complete(&aio_cmd->iocb, ret);
0311 
0312     return 0;
0313 }
0314 
0315 static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
0316             u32 block_size, struct scatterlist *sgl,
0317             u32 sgl_nents, u32 data_length, int is_write)
0318 {
0319     struct scatterlist *sg;
0320     struct iov_iter iter;
0321     struct bio_vec *bvec;
0322     ssize_t len = 0;
0323     loff_t pos = (cmd->t_task_lba * block_size);
0324     int ret = 0, i;
0325 
0326     bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
0327     if (!bvec) {
0328         pr_err("Unable to allocate fd_do_readv iov[]\n");
0329         return -ENOMEM;
0330     }
0331 
0332     for_each_sg(sgl, sg, sgl_nents, i) {
0333         bvec[i].bv_page = sg_page(sg);
0334         bvec[i].bv_len = sg->length;
0335         bvec[i].bv_offset = sg->offset;
0336 
0337         len += sg->length;
0338     }
0339 
0340     iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
0341     if (is_write)
0342         ret = vfs_iter_write(fd, &iter, &pos, 0);
0343     else
0344         ret = vfs_iter_read(fd, &iter, &pos, 0);
0345 
0346     if (is_write) {
0347         if (ret < 0 || ret != data_length) {
0348             pr_err("%s() write returned %d\n", __func__, ret);
0349             if (ret >= 0)
0350                 ret = -EINVAL;
0351         }
0352     } else {
0353         /*
0354          * Return zeros and GOOD status even if the READ did not return
0355          * the expected virt_size for struct file w/o a backing struct
0356          * block_device.
0357          */
0358         if (S_ISBLK(file_inode(fd)->i_mode)) {
0359             if (ret < 0 || ret != data_length) {
0360                 pr_err("%s() returned %d, expecting %u for "
0361                         "S_ISBLK\n", __func__, ret,
0362                         data_length);
0363                 if (ret >= 0)
0364                     ret = -EINVAL;
0365             }
0366         } else {
0367             if (ret < 0) {
0368                 pr_err("%s() returned %d for non S_ISBLK\n",
0369                         __func__, ret);
0370             } else if (ret != data_length) {
0371                 /*
0372                  * Short read case:
0373                  * Probably some one truncate file under us.
0374                  * We must explicitly zero sg-pages to prevent
0375                  * expose uninizialized pages to userspace.
0376                  */
0377                 if (ret < data_length)
0378                     ret += iov_iter_zero(data_length - ret, &iter);
0379                 else
0380                     ret = -EINVAL;
0381             }
0382         }
0383     }
0384     kfree(bvec);
0385     return ret;
0386 }
0387 
0388 static sense_reason_t
0389 fd_execute_sync_cache(struct se_cmd *cmd)
0390 {
0391     struct se_device *dev = cmd->se_dev;
0392     struct fd_dev *fd_dev = FD_DEV(dev);
0393     int immed = (cmd->t_task_cdb[1] & 0x2);
0394     loff_t start, end;
0395     int ret;
0396 
0397     /*
0398      * If the Immediate bit is set, queue up the GOOD response
0399      * for this SYNCHRONIZE_CACHE op
0400      */
0401     if (immed)
0402         target_complete_cmd(cmd, SAM_STAT_GOOD);
0403 
0404     /*
0405      * Determine if we will be flushing the entire device.
0406      */
0407     if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
0408         start = 0;
0409         end = LLONG_MAX;
0410     } else {
0411         start = cmd->t_task_lba * dev->dev_attrib.block_size;
0412         if (cmd->data_length)
0413             end = start + cmd->data_length - 1;
0414         else
0415             end = LLONG_MAX;
0416     }
0417 
0418     ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
0419     if (ret != 0)
0420         pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
0421 
0422     if (immed)
0423         return 0;
0424 
0425     if (ret)
0426         target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
0427     else
0428         target_complete_cmd(cmd, SAM_STAT_GOOD);
0429 
0430     return 0;
0431 }
0432 
0433 static sense_reason_t
0434 fd_execute_write_same(struct se_cmd *cmd)
0435 {
0436     struct se_device *se_dev = cmd->se_dev;
0437     struct fd_dev *fd_dev = FD_DEV(se_dev);
0438     loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
0439     sector_t nolb = sbc_get_write_same_sectors(cmd);
0440     struct iov_iter iter;
0441     struct bio_vec *bvec;
0442     unsigned int len = 0, i;
0443     ssize_t ret;
0444 
0445     if (cmd->prot_op) {
0446         pr_err("WRITE_SAME: Protection information with FILEIO"
0447                " backends not supported\n");
0448         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0449     }
0450 
0451     if (!cmd->t_data_nents)
0452         return TCM_INVALID_CDB_FIELD;
0453 
0454     if (cmd->t_data_nents > 1 ||
0455         cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
0456         pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
0457             " block_size: %u\n",
0458             cmd->t_data_nents,
0459             cmd->t_data_sg[0].length,
0460             cmd->se_dev->dev_attrib.block_size);
0461         return TCM_INVALID_CDB_FIELD;
0462     }
0463 
0464     bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
0465     if (!bvec)
0466         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0467 
0468     for (i = 0; i < nolb; i++) {
0469         bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
0470         bvec[i].bv_len = cmd->t_data_sg[0].length;
0471         bvec[i].bv_offset = cmd->t_data_sg[0].offset;
0472 
0473         len += se_dev->dev_attrib.block_size;
0474     }
0475 
0476     iov_iter_bvec(&iter, READ, bvec, nolb, len);
0477     ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
0478 
0479     kfree(bvec);
0480     if (ret < 0 || ret != len) {
0481         pr_err("vfs_iter_write() returned %zd for write same\n", ret);
0482         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0483     }
0484 
0485     target_complete_cmd(cmd, SAM_STAT_GOOD);
0486     return 0;
0487 }
0488 
0489 static int
0490 fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
0491         void *buf, size_t bufsize)
0492 {
0493     struct fd_dev *fd_dev = FD_DEV(se_dev);
0494     struct file *prot_fd = fd_dev->fd_prot_file;
0495     sector_t prot_length, prot;
0496     loff_t pos = lba * se_dev->prot_length;
0497 
0498     if (!prot_fd) {
0499         pr_err("Unable to locate fd_dev->fd_prot_file\n");
0500         return -ENODEV;
0501     }
0502 
0503     prot_length = nolb * se_dev->prot_length;
0504 
0505     memset(buf, 0xff, bufsize);
0506     for (prot = 0; prot < prot_length;) {
0507         sector_t len = min_t(sector_t, bufsize, prot_length - prot);
0508         ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
0509 
0510         if (ret != len) {
0511             pr_err("vfs_write to prot file failed: %zd\n", ret);
0512             return ret < 0 ? ret : -ENODEV;
0513         }
0514         prot += ret;
0515     }
0516 
0517     return 0;
0518 }
0519 
0520 static int
0521 fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
0522 {
0523     void *buf;
0524     int rc;
0525 
0526     buf = (void *)__get_free_page(GFP_KERNEL);
0527     if (!buf) {
0528         pr_err("Unable to allocate FILEIO prot buf\n");
0529         return -ENOMEM;
0530     }
0531 
0532     rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
0533 
0534     free_page((unsigned long)buf);
0535 
0536     return rc;
0537 }
0538 
0539 static sense_reason_t
0540 fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
0541 {
0542     struct file *file = FD_DEV(cmd->se_dev)->fd_file;
0543     struct inode *inode = file->f_mapping->host;
0544     int ret;
0545 
0546     if (!nolb) {
0547         return 0;
0548     }
0549 
0550     if (cmd->se_dev->dev_attrib.pi_prot_type) {
0551         ret = fd_do_prot_unmap(cmd, lba, nolb);
0552         if (ret)
0553             return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0554     }
0555 
0556     if (S_ISBLK(inode->i_mode)) {
0557         /* The backend is block device, use discard */
0558         struct block_device *bdev = I_BDEV(inode);
0559         struct se_device *dev = cmd->se_dev;
0560 
0561         ret = blkdev_issue_discard(bdev,
0562                        target_to_linux_sector(dev, lba),
0563                        target_to_linux_sector(dev,  nolb),
0564                        GFP_KERNEL);
0565         if (ret < 0) {
0566             pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
0567                 ret);
0568             return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0569         }
0570     } else {
0571         /* The backend is normal file, use fallocate */
0572         struct se_device *se_dev = cmd->se_dev;
0573         loff_t pos = lba * se_dev->dev_attrib.block_size;
0574         unsigned int len = nolb * se_dev->dev_attrib.block_size;
0575         int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
0576 
0577         if (!file->f_op->fallocate)
0578             return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0579 
0580         ret = file->f_op->fallocate(file, mode, pos, len);
0581         if (ret < 0) {
0582             pr_warn("FILEIO: fallocate() failed: %d\n", ret);
0583             return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0584         }
0585     }
0586 
0587     return 0;
0588 }
0589 
0590 static sense_reason_t
0591 fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0592           enum dma_data_direction data_direction)
0593 {
0594     struct se_device *dev = cmd->se_dev;
0595     struct fd_dev *fd_dev = FD_DEV(dev);
0596     struct file *file = fd_dev->fd_file;
0597     struct file *pfile = fd_dev->fd_prot_file;
0598     sense_reason_t rc;
0599     int ret = 0;
0600     /*
0601      * Call vectorized fileio functions to map struct scatterlist
0602      * physical memory addresses to struct iovec virtual memory.
0603      */
0604     if (data_direction == DMA_FROM_DEVICE) {
0605         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
0606             ret = fd_do_rw(cmd, pfile, dev->prot_length,
0607                        cmd->t_prot_sg, cmd->t_prot_nents,
0608                        cmd->prot_length, 0);
0609             if (ret < 0)
0610                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0611         }
0612 
0613         ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
0614                    sgl, sgl_nents, cmd->data_length, 0);
0615 
0616         if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type &&
0617             dev->dev_attrib.pi_prot_verify) {
0618             u32 sectors = cmd->data_length >>
0619                     ilog2(dev->dev_attrib.block_size);
0620 
0621             rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
0622                         0, cmd->t_prot_sg, 0);
0623             if (rc)
0624                 return rc;
0625         }
0626     } else {
0627         if (cmd->prot_type && dev->dev_attrib.pi_prot_type &&
0628             dev->dev_attrib.pi_prot_verify) {
0629             u32 sectors = cmd->data_length >>
0630                     ilog2(dev->dev_attrib.block_size);
0631 
0632             rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
0633                         0, cmd->t_prot_sg, 0);
0634             if (rc)
0635                 return rc;
0636         }
0637 
0638         ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
0639                    sgl, sgl_nents, cmd->data_length, 1);
0640         /*
0641          * Perform implicit vfs_fsync_range() for fd_do_writev() ops
0642          * for SCSI WRITEs with Forced Unit Access (FUA) set.
0643          * Allow this to happen independent of WCE=0 setting.
0644          */
0645         if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
0646             loff_t start = cmd->t_task_lba *
0647                 dev->dev_attrib.block_size;
0648             loff_t end;
0649 
0650             if (cmd->data_length)
0651                 end = start + cmd->data_length - 1;
0652             else
0653                 end = LLONG_MAX;
0654 
0655             vfs_fsync_range(fd_dev->fd_file, start, end, 1);
0656         }
0657 
0658         if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
0659             ret = fd_do_rw(cmd, pfile, dev->prot_length,
0660                        cmd->t_prot_sg, cmd->t_prot_nents,
0661                        cmd->prot_length, 1);
0662             if (ret < 0)
0663                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0664         }
0665     }
0666 
0667     if (ret < 0)
0668         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0669 
0670     target_complete_cmd(cmd, SAM_STAT_GOOD);
0671     return 0;
0672 }
0673 
0674 static sense_reason_t
0675 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0676           enum dma_data_direction data_direction)
0677 {
0678     struct se_device *dev = cmd->se_dev;
0679     struct fd_dev *fd_dev = FD_DEV(dev);
0680 
0681     /*
0682      * We are currently limited by the number of iovecs (2048) per
0683      * single vfs_[writev,readv] call.
0684      */
0685     if (cmd->data_length > FD_MAX_BYTES) {
0686         pr_err("FILEIO: Not able to process I/O of %u bytes due to"
0687                "FD_MAX_BYTES: %u iovec count limitation\n",
0688             cmd->data_length, FD_MAX_BYTES);
0689         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
0690     }
0691 
0692     if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
0693         return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
0694     return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
0695 }
0696 
0697 enum {
0698     Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
0699     Opt_fd_async_io, Opt_err
0700 };
0701 
0702 static match_table_t tokens = {
0703     {Opt_fd_dev_name, "fd_dev_name=%s"},
0704     {Opt_fd_dev_size, "fd_dev_size=%s"},
0705     {Opt_fd_buffered_io, "fd_buffered_io=%d"},
0706     {Opt_fd_async_io, "fd_async_io=%d"},
0707     {Opt_err, NULL}
0708 };
0709 
0710 static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
0711         const char *page, ssize_t count)
0712 {
0713     struct fd_dev *fd_dev = FD_DEV(dev);
0714     char *orig, *ptr, *arg_p, *opts;
0715     substring_t args[MAX_OPT_ARGS];
0716     int ret = 0, arg, token;
0717 
0718     opts = kstrdup(page, GFP_KERNEL);
0719     if (!opts)
0720         return -ENOMEM;
0721 
0722     orig = opts;
0723 
0724     while ((ptr = strsep(&opts, ",\n")) != NULL) {
0725         if (!*ptr)
0726             continue;
0727 
0728         token = match_token(ptr, tokens, args);
0729         switch (token) {
0730         case Opt_fd_dev_name:
0731             if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
0732                 FD_MAX_DEV_NAME) == 0) {
0733                 ret = -EINVAL;
0734                 break;
0735             }
0736             pr_debug("FILEIO: Referencing Path: %s\n",
0737                     fd_dev->fd_dev_name);
0738             fd_dev->fbd_flags |= FBDF_HAS_PATH;
0739             break;
0740         case Opt_fd_dev_size:
0741             arg_p = match_strdup(&args[0]);
0742             if (!arg_p) {
0743                 ret = -ENOMEM;
0744                 break;
0745             }
0746             ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
0747             kfree(arg_p);
0748             if (ret < 0) {
0749                 pr_err("kstrtoull() failed for"
0750                         " fd_dev_size=\n");
0751                 goto out;
0752             }
0753             pr_debug("FILEIO: Referencing Size: %llu"
0754                     " bytes\n", fd_dev->fd_dev_size);
0755             fd_dev->fbd_flags |= FBDF_HAS_SIZE;
0756             break;
0757         case Opt_fd_buffered_io:
0758             ret = match_int(args, &arg);
0759             if (ret)
0760                 goto out;
0761             if (arg != 1) {
0762                 pr_err("bogus fd_buffered_io=%d value\n", arg);
0763                 ret = -EINVAL;
0764                 goto out;
0765             }
0766 
0767             pr_debug("FILEIO: Using buffered I/O"
0768                 " operations for struct fd_dev\n");
0769 
0770             fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
0771             break;
0772         case Opt_fd_async_io:
0773             ret = match_int(args, &arg);
0774             if (ret)
0775                 goto out;
0776             if (arg != 1) {
0777                 pr_err("bogus fd_async_io=%d value\n", arg);
0778                 ret = -EINVAL;
0779                 goto out;
0780             }
0781 
0782             pr_debug("FILEIO: Using async I/O"
0783                 " operations for struct fd_dev\n");
0784 
0785             fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
0786             break;
0787         default:
0788             break;
0789         }
0790     }
0791 
0792 out:
0793     kfree(orig);
0794     return (!ret) ? count : ret;
0795 }
0796 
0797 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
0798 {
0799     struct fd_dev *fd_dev = FD_DEV(dev);
0800     ssize_t bl = 0;
0801 
0802     bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
0803     bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s Async: %d\n",
0804         fd_dev->fd_dev_name, fd_dev->fd_dev_size,
0805         (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
0806         "Buffered-WCE" : "O_DSYNC",
0807         !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
0808     return bl;
0809 }
0810 
0811 static sector_t fd_get_blocks(struct se_device *dev)
0812 {
0813     struct fd_dev *fd_dev = FD_DEV(dev);
0814     struct file *f = fd_dev->fd_file;
0815     struct inode *i = f->f_mapping->host;
0816     unsigned long long dev_size;
0817     /*
0818      * When using a file that references an underlying struct block_device,
0819      * ensure dev_size is always based on the current inode size in order
0820      * to handle underlying block_device resize operations.
0821      */
0822     if (S_ISBLK(i->i_mode))
0823         dev_size = i_size_read(i);
0824     else
0825         dev_size = fd_dev->fd_dev_size;
0826 
0827     return div_u64(dev_size - dev->dev_attrib.block_size,
0828                dev->dev_attrib.block_size);
0829 }
0830 
0831 static int fd_init_prot(struct se_device *dev)
0832 {
0833     struct fd_dev *fd_dev = FD_DEV(dev);
0834     struct file *prot_file, *file = fd_dev->fd_file;
0835     struct inode *inode;
0836     int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
0837     char buf[FD_MAX_DEV_PROT_NAME];
0838 
0839     if (!file) {
0840         pr_err("Unable to locate fd_dev->fd_file\n");
0841         return -ENODEV;
0842     }
0843 
0844     inode = file->f_mapping->host;
0845     if (S_ISBLK(inode->i_mode)) {
0846         pr_err("FILEIO Protection emulation only supported on"
0847                " !S_ISBLK\n");
0848         return -ENOSYS;
0849     }
0850 
0851     if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
0852         flags &= ~O_DSYNC;
0853 
0854     snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
0855          fd_dev->fd_dev_name);
0856 
0857     prot_file = filp_open(buf, flags, 0600);
0858     if (IS_ERR(prot_file)) {
0859         pr_err("filp_open(%s) failed\n", buf);
0860         ret = PTR_ERR(prot_file);
0861         return ret;
0862     }
0863     fd_dev->fd_prot_file = prot_file;
0864 
0865     return 0;
0866 }
0867 
0868 static int fd_format_prot(struct se_device *dev)
0869 {
0870     unsigned char *buf;
0871     int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
0872     int ret;
0873 
0874     if (!dev->dev_attrib.pi_prot_type) {
0875         pr_err("Unable to format_prot while pi_prot_type == 0\n");
0876         return -ENODEV;
0877     }
0878 
0879     buf = vzalloc(unit_size);
0880     if (!buf) {
0881         pr_err("Unable to allocate FILEIO prot buf\n");
0882         return -ENOMEM;
0883     }
0884 
0885     pr_debug("Using FILEIO prot_length: %llu\n",
0886          (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
0887                     dev->prot_length);
0888 
0889     ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
0890                   buf, unit_size);
0891     vfree(buf);
0892     return ret;
0893 }
0894 
0895 static void fd_free_prot(struct se_device *dev)
0896 {
0897     struct fd_dev *fd_dev = FD_DEV(dev);
0898 
0899     if (!fd_dev->fd_prot_file)
0900         return;
0901 
0902     filp_close(fd_dev->fd_prot_file, NULL);
0903     fd_dev->fd_prot_file = NULL;
0904 }
0905 
0906 static struct sbc_ops fd_sbc_ops = {
0907     .execute_rw     = fd_execute_rw,
0908     .execute_sync_cache = fd_execute_sync_cache,
0909     .execute_write_same = fd_execute_write_same,
0910     .execute_unmap      = fd_execute_unmap,
0911 };
0912 
0913 static sense_reason_t
0914 fd_parse_cdb(struct se_cmd *cmd)
0915 {
0916     return sbc_parse_cdb(cmd, &fd_sbc_ops);
0917 }
0918 
0919 static const struct target_backend_ops fileio_ops = {
0920     .name           = "fileio",
0921     .inquiry_prod       = "FILEIO",
0922     .inquiry_rev        = FD_VERSION,
0923     .owner          = THIS_MODULE,
0924     .attach_hba     = fd_attach_hba,
0925     .detach_hba     = fd_detach_hba,
0926     .alloc_device       = fd_alloc_device,
0927     .configure_device   = fd_configure_device,
0928     .destroy_device     = fd_destroy_device,
0929     .free_device        = fd_free_device,
0930     .configure_unmap    = fd_configure_unmap,
0931     .parse_cdb      = fd_parse_cdb,
0932     .set_configfs_dev_params = fd_set_configfs_dev_params,
0933     .show_configfs_dev_params = fd_show_configfs_dev_params,
0934     .get_device_type    = sbc_get_device_type,
0935     .get_blocks     = fd_get_blocks,
0936     .init_prot      = fd_init_prot,
0937     .format_prot        = fd_format_prot,
0938     .free_prot      = fd_free_prot,
0939     .tb_dev_attrib_attrs    = sbc_attrib_attrs,
0940 };
0941 
0942 static int __init fileio_module_init(void)
0943 {
0944     return transport_backend_register(&fileio_ops);
0945 }
0946 
0947 static void __exit fileio_module_exit(void)
0948 {
0949     target_backend_unregister(&fileio_ops);
0950 }
0951 
0952 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
0953 MODULE_AUTHOR("nab@Linux-iSCSI.org");
0954 MODULE_LICENSE("GPL");
0955 
0956 module_init(fileio_module_init);
0957 module_exit(fileio_module_exit);