Back to home page

LXR

 
 

    


0001 /*
0002  * Functions related to mapping data to requests
0003  */
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/bio.h>
0007 #include <linux/blkdev.h>
0008 #include <linux/uio.h>
0009 
0010 #include "blk.h"
0011 
0012 /*
0013  * Append a bio to a passthrough request.  Only works can be merged into
0014  * the request based on the driver constraints.
0015  */
0016 int blk_rq_append_bio(struct request *rq, struct bio *bio)
0017 {
0018     if (!rq->bio) {
0019         rq->cmd_flags &= REQ_OP_MASK;
0020         rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
0021         blk_rq_bio_prep(rq->q, rq, bio);
0022     } else {
0023         if (!ll_back_merge_fn(rq->q, rq, bio))
0024             return -EINVAL;
0025 
0026         rq->biotail->bi_next = bio;
0027         rq->biotail = bio;
0028         rq->__data_len += bio->bi_iter.bi_size;
0029     }
0030 
0031     return 0;
0032 }
0033 EXPORT_SYMBOL(blk_rq_append_bio);
0034 
0035 static int __blk_rq_unmap_user(struct bio *bio)
0036 {
0037     int ret = 0;
0038 
0039     if (bio) {
0040         if (bio_flagged(bio, BIO_USER_MAPPED))
0041             bio_unmap_user(bio);
0042         else
0043             ret = bio_uncopy_user(bio);
0044     }
0045 
0046     return ret;
0047 }
0048 
0049 static int __blk_rq_map_user_iov(struct request *rq,
0050         struct rq_map_data *map_data, struct iov_iter *iter,
0051         gfp_t gfp_mask, bool copy)
0052 {
0053     struct request_queue *q = rq->q;
0054     struct bio *bio, *orig_bio;
0055     int ret;
0056 
0057     if (copy)
0058         bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
0059     else
0060         bio = bio_map_user_iov(q, iter, gfp_mask);
0061 
0062     if (IS_ERR(bio))
0063         return PTR_ERR(bio);
0064 
0065     if (map_data && map_data->null_mapped)
0066         bio_set_flag(bio, BIO_NULL_MAPPED);
0067 
0068     iov_iter_advance(iter, bio->bi_iter.bi_size);
0069     if (map_data)
0070         map_data->offset += bio->bi_iter.bi_size;
0071 
0072     orig_bio = bio;
0073     blk_queue_bounce(q, &bio);
0074 
0075     /*
0076      * We link the bounce buffer in and could have to traverse it
0077      * later so we have to get a ref to prevent it from being freed
0078      */
0079     bio_get(bio);
0080 
0081     ret = blk_rq_append_bio(rq, bio);
0082     if (ret) {
0083         bio_endio(bio);
0084         __blk_rq_unmap_user(orig_bio);
0085         bio_put(bio);
0086         return ret;
0087     }
0088 
0089     return 0;
0090 }
0091 
0092 /**
0093  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
0094  * @q:      request queue where request should be inserted
0095  * @rq:     request to map data to
0096  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
0097  * @iter:   iovec iterator
0098  * @gfp_mask:   memory allocation flags
0099  *
0100  * Description:
0101  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
0102  *    a kernel bounce buffer is used.
0103  *
0104  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
0105  *    still in process context.
0106  *
0107  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
0108  *    before being submitted to the device, as pages mapped may be out of
0109  *    reach. It's the callers responsibility to make sure this happens. The
0110  *    original bio must be passed back in to blk_rq_unmap_user() for proper
0111  *    unmapping.
0112  */
0113 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
0114             struct rq_map_data *map_data,
0115             const struct iov_iter *iter, gfp_t gfp_mask)
0116 {
0117     bool copy = false;
0118     unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
0119     struct bio *bio = NULL;
0120     struct iov_iter i;
0121     int ret;
0122 
0123     if (!iter_is_iovec(iter))
0124         goto fail;
0125 
0126     if (map_data)
0127         copy = true;
0128     else if (iov_iter_alignment(iter) & align)
0129         copy = true;
0130     else if (queue_virt_boundary(q))
0131         copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
0132 
0133     i = *iter;
0134     do {
0135         ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
0136         if (ret)
0137             goto unmap_rq;
0138         if (!bio)
0139             bio = rq->bio;
0140     } while (iov_iter_count(&i));
0141 
0142     if (!bio_flagged(bio, BIO_USER_MAPPED))
0143         rq->rq_flags |= RQF_COPY_USER;
0144     return 0;
0145 
0146 unmap_rq:
0147     __blk_rq_unmap_user(bio);
0148 fail:
0149     rq->bio = NULL;
0150     return -EINVAL;
0151 }
0152 EXPORT_SYMBOL(blk_rq_map_user_iov);
0153 
0154 int blk_rq_map_user(struct request_queue *q, struct request *rq,
0155             struct rq_map_data *map_data, void __user *ubuf,
0156             unsigned long len, gfp_t gfp_mask)
0157 {
0158     struct iovec iov;
0159     struct iov_iter i;
0160     int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
0161 
0162     if (unlikely(ret < 0))
0163         return ret;
0164 
0165     return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
0166 }
0167 EXPORT_SYMBOL(blk_rq_map_user);
0168 
0169 /**
0170  * blk_rq_unmap_user - unmap a request with user data
0171  * @bio:           start of bio list
0172  *
0173  * Description:
0174  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
0175  *    supply the original rq->bio from the blk_rq_map_user() return, since
0176  *    the I/O completion may have changed rq->bio.
0177  */
0178 int blk_rq_unmap_user(struct bio *bio)
0179 {
0180     struct bio *mapped_bio;
0181     int ret = 0, ret2;
0182 
0183     while (bio) {
0184         mapped_bio = bio;
0185         if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
0186             mapped_bio = bio->bi_private;
0187 
0188         ret2 = __blk_rq_unmap_user(mapped_bio);
0189         if (ret2 && !ret)
0190             ret = ret2;
0191 
0192         mapped_bio = bio;
0193         bio = bio->bi_next;
0194         bio_put(mapped_bio);
0195     }
0196 
0197     return ret;
0198 }
0199 EXPORT_SYMBOL(blk_rq_unmap_user);
0200 
0201 /**
0202  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
0203  * @q:      request queue where request should be inserted
0204  * @rq:     request to fill
0205  * @kbuf:   the kernel buffer
0206  * @len:    length of user data
0207  * @gfp_mask:   memory allocation flags
0208  *
0209  * Description:
0210  *    Data will be mapped directly if possible. Otherwise a bounce
0211  *    buffer is used. Can be called multiple times to append multiple
0212  *    buffers.
0213  */
0214 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
0215             unsigned int len, gfp_t gfp_mask)
0216 {
0217     int reading = rq_data_dir(rq) == READ;
0218     unsigned long addr = (unsigned long) kbuf;
0219     int do_copy = 0;
0220     struct bio *bio;
0221     int ret;
0222 
0223     if (len > (queue_max_hw_sectors(q) << 9))
0224         return -EINVAL;
0225     if (!len || !kbuf)
0226         return -EINVAL;
0227 
0228     do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
0229     if (do_copy)
0230         bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
0231     else
0232         bio = bio_map_kern(q, kbuf, len, gfp_mask);
0233 
0234     if (IS_ERR(bio))
0235         return PTR_ERR(bio);
0236 
0237     if (!reading)
0238         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
0239 
0240     if (do_copy)
0241         rq->rq_flags |= RQF_COPY_USER;
0242 
0243     ret = blk_rq_append_bio(rq, bio);
0244     if (unlikely(ret)) {
0245         /* request is too big */
0246         bio_put(bio);
0247         return ret;
0248     }
0249 
0250     blk_queue_bounce(q, &rq->bio);
0251     return 0;
0252 }
0253 EXPORT_SYMBOL(blk_rq_map_kern);