0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bsg.h>
0010 #include <linux/slab.h>
0011 #include <linux/blk-mq.h>
0012 #include <linux/delay.h>
0013 #include <linux/scatterlist.h>
0014 #include <linux/bsg-lib.h>
0015 #include <linux/export.h>
0016 #include <scsi/scsi_cmnd.h>
0017 #include <scsi/sg.h>
0018
0019 #define uptr64(val) ((void __user *)(uintptr_t)(val))
0020
0021 struct bsg_set {
0022 struct blk_mq_tag_set tag_set;
0023 struct bsg_device *bd;
0024 bsg_job_fn *job_fn;
0025 bsg_timeout_fn *timeout_fn;
0026 };
0027
0028 static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
0029 fmode_t mode, unsigned int timeout)
0030 {
0031 struct bsg_job *job;
0032 struct request *rq;
0033 struct bio *bio;
0034 void *reply;
0035 int ret;
0036
0037 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
0038 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_TRANSPORT)
0039 return -EINVAL;
0040 if (!capable(CAP_SYS_RAWIO))
0041 return -EPERM;
0042
0043 rq = blk_mq_alloc_request(q, hdr->dout_xfer_len ?
0044 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
0045 if (IS_ERR(rq))
0046 return PTR_ERR(rq);
0047 rq->timeout = timeout;
0048
0049 job = blk_mq_rq_to_pdu(rq);
0050 reply = job->reply;
0051 memset(job, 0, sizeof(*job));
0052 job->reply = reply;
0053 job->reply_len = SCSI_SENSE_BUFFERSIZE;
0054 job->dd_data = job + 1;
0055
0056 job->request_len = hdr->request_len;
0057 job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
0058 if (IS_ERR(job->request)) {
0059 ret = PTR_ERR(job->request);
0060 goto out_free_rq;
0061 }
0062
0063 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
0064 job->bidi_rq = blk_mq_alloc_request(rq->q, REQ_OP_DRV_IN, 0);
0065 if (IS_ERR(job->bidi_rq)) {
0066 ret = PTR_ERR(job->bidi_rq);
0067 goto out_free_job_request;
0068 }
0069
0070 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
0071 uptr64(hdr->din_xferp), hdr->din_xfer_len,
0072 GFP_KERNEL);
0073 if (ret)
0074 goto out_free_bidi_rq;
0075
0076 job->bidi_bio = job->bidi_rq->bio;
0077 } else {
0078 job->bidi_rq = NULL;
0079 job->bidi_bio = NULL;
0080 }
0081
0082 ret = 0;
0083 if (hdr->dout_xfer_len) {
0084 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp),
0085 hdr->dout_xfer_len, GFP_KERNEL);
0086 } else if (hdr->din_xfer_len) {
0087 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp),
0088 hdr->din_xfer_len, GFP_KERNEL);
0089 }
0090
0091 if (ret)
0092 goto out_unmap_bidi_rq;
0093
0094 bio = rq->bio;
0095 blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
0096
0097
0098
0099
0100
0101 hdr->device_status = job->result & 0xff;
0102 hdr->transport_status = host_byte(job->result);
0103 hdr->driver_status = 0;
0104 hdr->info = 0;
0105 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
0106 hdr->info |= SG_INFO_CHECK;
0107 hdr->response_len = 0;
0108
0109 if (job->result < 0) {
0110
0111 job->reply_len = sizeof(u32);
0112 ret = job->result;
0113 }
0114
0115 if (job->reply_len && hdr->response) {
0116 int len = min(hdr->max_response_len, job->reply_len);
0117
0118 if (copy_to_user(uptr64(hdr->response), job->reply, len))
0119 ret = -EFAULT;
0120 else
0121 hdr->response_len = len;
0122 }
0123
0124
0125 hdr->dout_resid = 0;
0126
0127 if (job->bidi_rq) {
0128 unsigned int rsp_len = job->reply_payload.payload_len;
0129
0130 if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
0131 hdr->din_resid = 0;
0132 else
0133 hdr->din_resid = rsp_len - job->reply_payload_rcv_len;
0134 } else {
0135 hdr->din_resid = 0;
0136 }
0137
0138 blk_rq_unmap_user(bio);
0139 out_unmap_bidi_rq:
0140 if (job->bidi_rq)
0141 blk_rq_unmap_user(job->bidi_bio);
0142 out_free_bidi_rq:
0143 if (job->bidi_rq)
0144 blk_mq_free_request(job->bidi_rq);
0145 out_free_job_request:
0146 kfree(job->request);
0147 out_free_rq:
0148 blk_mq_free_request(rq);
0149 return ret;
0150 }
0151
0152
0153
0154
0155
0156 static void bsg_teardown_job(struct kref *kref)
0157 {
0158 struct bsg_job *job = container_of(kref, struct bsg_job, kref);
0159 struct request *rq = blk_mq_rq_from_pdu(job);
0160
0161 put_device(job->dev);
0162
0163 kfree(job->request_payload.sg_list);
0164 kfree(job->reply_payload.sg_list);
0165
0166 blk_mq_end_request(rq, BLK_STS_OK);
0167 }
0168
0169 void bsg_job_put(struct bsg_job *job)
0170 {
0171 kref_put(&job->kref, bsg_teardown_job);
0172 }
0173 EXPORT_SYMBOL_GPL(bsg_job_put);
0174
0175 int bsg_job_get(struct bsg_job *job)
0176 {
0177 return kref_get_unless_zero(&job->kref);
0178 }
0179 EXPORT_SYMBOL_GPL(bsg_job_get);
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 void bsg_job_done(struct bsg_job *job, int result,
0190 unsigned int reply_payload_rcv_len)
0191 {
0192 struct request *rq = blk_mq_rq_from_pdu(job);
0193
0194 job->result = result;
0195 job->reply_payload_rcv_len = reply_payload_rcv_len;
0196 if (likely(!blk_should_fake_timeout(rq->q)))
0197 blk_mq_complete_request(rq);
0198 }
0199 EXPORT_SYMBOL_GPL(bsg_job_done);
0200
0201
0202
0203
0204
0205 static void bsg_complete(struct request *rq)
0206 {
0207 struct bsg_job *job = blk_mq_rq_to_pdu(rq);
0208
0209 bsg_job_put(job);
0210 }
0211
0212 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
0213 {
0214 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
0215
0216 BUG_ON(!req->nr_phys_segments);
0217
0218 buf->sg_list = kmalloc(sz, GFP_KERNEL);
0219 if (!buf->sg_list)
0220 return -ENOMEM;
0221 sg_init_table(buf->sg_list, req->nr_phys_segments);
0222 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
0223 buf->payload_len = blk_rq_bytes(req);
0224 return 0;
0225 }
0226
0227
0228
0229
0230
0231
0232 static bool bsg_prepare_job(struct device *dev, struct request *req)
0233 {
0234 struct bsg_job *job = blk_mq_rq_to_pdu(req);
0235 int ret;
0236
0237 job->timeout = req->timeout;
0238
0239 if (req->bio) {
0240 ret = bsg_map_buffer(&job->request_payload, req);
0241 if (ret)
0242 goto failjob_rls_job;
0243 }
0244 if (job->bidi_rq) {
0245 ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
0246 if (ret)
0247 goto failjob_rls_rqst_payload;
0248 }
0249 job->dev = dev;
0250
0251 get_device(job->dev);
0252 kref_init(&job->kref);
0253 return true;
0254
0255 failjob_rls_rqst_payload:
0256 kfree(job->request_payload.sg_list);
0257 failjob_rls_job:
0258 job->result = -ENOMEM;
0259 return false;
0260 }
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
0273 const struct blk_mq_queue_data *bd)
0274 {
0275 struct request_queue *q = hctx->queue;
0276 struct device *dev = q->queuedata;
0277 struct request *req = bd->rq;
0278 struct bsg_set *bset =
0279 container_of(q->tag_set, struct bsg_set, tag_set);
0280 blk_status_t sts = BLK_STS_IOERR;
0281 int ret;
0282
0283 blk_mq_start_request(req);
0284
0285 if (!get_device(dev))
0286 return BLK_STS_IOERR;
0287
0288 if (!bsg_prepare_job(dev, req))
0289 goto out;
0290
0291 ret = bset->job_fn(blk_mq_rq_to_pdu(req));
0292 if (!ret)
0293 sts = BLK_STS_OK;
0294
0295 out:
0296 put_device(dev);
0297 return sts;
0298 }
0299
0300
0301 static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
0302 unsigned int hctx_idx, unsigned int numa_node)
0303 {
0304 struct bsg_job *job = blk_mq_rq_to_pdu(req);
0305
0306 job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
0307 if (!job->reply)
0308 return -ENOMEM;
0309 return 0;
0310 }
0311
0312 static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
0313 unsigned int hctx_idx)
0314 {
0315 struct bsg_job *job = blk_mq_rq_to_pdu(req);
0316
0317 kfree(job->reply);
0318 }
0319
0320 void bsg_remove_queue(struct request_queue *q)
0321 {
0322 if (q) {
0323 struct bsg_set *bset =
0324 container_of(q->tag_set, struct bsg_set, tag_set);
0325
0326 bsg_unregister_queue(bset->bd);
0327 blk_mq_destroy_queue(q);
0328 blk_mq_free_tag_set(&bset->tag_set);
0329 kfree(bset);
0330 }
0331 }
0332 EXPORT_SYMBOL_GPL(bsg_remove_queue);
0333
0334 static enum blk_eh_timer_return bsg_timeout(struct request *rq)
0335 {
0336 struct bsg_set *bset =
0337 container_of(rq->q->tag_set, struct bsg_set, tag_set);
0338
0339 if (!bset->timeout_fn)
0340 return BLK_EH_DONE;
0341 return bset->timeout_fn(rq);
0342 }
0343
0344 static const struct blk_mq_ops bsg_mq_ops = {
0345 .queue_rq = bsg_queue_rq,
0346 .init_request = bsg_init_rq,
0347 .exit_request = bsg_exit_rq,
0348 .complete = bsg_complete,
0349 .timeout = bsg_timeout,
0350 };
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360 struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
0361 bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
0362 {
0363 struct bsg_set *bset;
0364 struct blk_mq_tag_set *set;
0365 struct request_queue *q;
0366 int ret = -ENOMEM;
0367
0368 bset = kzalloc(sizeof(*bset), GFP_KERNEL);
0369 if (!bset)
0370 return ERR_PTR(-ENOMEM);
0371
0372 bset->job_fn = job_fn;
0373 bset->timeout_fn = timeout;
0374
0375 set = &bset->tag_set;
0376 set->ops = &bsg_mq_ops;
0377 set->nr_hw_queues = 1;
0378 set->queue_depth = 128;
0379 set->numa_node = NUMA_NO_NODE;
0380 set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
0381 set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
0382 if (blk_mq_alloc_tag_set(set))
0383 goto out_tag_set;
0384
0385 q = blk_mq_init_queue(set);
0386 if (IS_ERR(q)) {
0387 ret = PTR_ERR(q);
0388 goto out_queue;
0389 }
0390
0391 q->queuedata = dev;
0392 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
0393
0394 bset->bd = bsg_register_queue(q, dev, name, bsg_transport_sg_io_fn);
0395 if (IS_ERR(bset->bd)) {
0396 ret = PTR_ERR(bset->bd);
0397 goto out_cleanup_queue;
0398 }
0399
0400 return q;
0401 out_cleanup_queue:
0402 blk_mq_destroy_queue(q);
0403 out_queue:
0404 blk_mq_free_tag_set(set);
0405 out_tag_set:
0406 kfree(bset);
0407 return ERR_PTR(ret);
0408 }
0409 EXPORT_SYMBOL_GPL(bsg_setup_queue);