0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014
0015 #include <linux/module.h>
0016 #include <linux/slab.h>
0017 #include <linux/mempool.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/virtio.h>
0020 #include <linux/virtio_ids.h>
0021 #include <linux/virtio_config.h>
0022 #include <linux/virtio_scsi.h>
0023 #include <linux/cpu.h>
0024 #include <linux/blkdev.h>
0025 #include <linux/blk-integrity.h>
0026 #include <scsi/scsi_host.h>
0027 #include <scsi/scsi_device.h>
0028 #include <scsi/scsi_cmnd.h>
0029 #include <scsi/scsi_tcq.h>
0030 #include <scsi/scsi_devinfo.h>
0031 #include <linux/seqlock.h>
0032 #include <linux/blk-mq-virtio.h>
0033
0034 #include "sd.h"
0035
0036 #define VIRTIO_SCSI_MEMPOOL_SZ 64
0037 #define VIRTIO_SCSI_EVENT_LEN 8
0038 #define VIRTIO_SCSI_VQ_BASE 2
0039
0040
0041 struct virtio_scsi_cmd {
0042 struct scsi_cmnd *sc;
0043 struct completion *comp;
0044 union {
0045 struct virtio_scsi_cmd_req cmd;
0046 struct virtio_scsi_cmd_req_pi cmd_pi;
0047 struct virtio_scsi_ctrl_tmf_req tmf;
0048 struct virtio_scsi_ctrl_an_req an;
0049 } req;
0050 union {
0051 struct virtio_scsi_cmd_resp cmd;
0052 struct virtio_scsi_ctrl_tmf_resp tmf;
0053 struct virtio_scsi_ctrl_an_resp an;
0054 struct virtio_scsi_event evt;
0055 } resp;
0056 } ____cacheline_aligned_in_smp;
0057
0058 struct virtio_scsi_event_node {
0059 struct virtio_scsi *vscsi;
0060 struct virtio_scsi_event event;
0061 struct work_struct work;
0062 };
0063
0064 struct virtio_scsi_vq {
0065
0066 spinlock_t vq_lock;
0067
0068 struct virtqueue *vq;
0069 };
0070
0071
0072 struct virtio_scsi {
0073 struct virtio_device *vdev;
0074
0075
0076 struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
0077
0078 u32 num_queues;
0079
0080 struct hlist_node node;
0081
0082
0083 bool stop_events;
0084
0085 struct virtio_scsi_vq ctrl_vq;
0086 struct virtio_scsi_vq event_vq;
0087 struct virtio_scsi_vq req_vqs[];
0088 };
0089
0090 static struct kmem_cache *virtscsi_cmd_cache;
0091 static mempool_t *virtscsi_cmd_pool;
0092
0093 static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
0094 {
0095 return vdev->priv;
0096 }
0097
0098 static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
0099 {
0100 if (resid)
0101 scsi_set_resid(sc, min(resid, scsi_bufflen(sc)));
0102 }
0103
0104
0105
0106
0107
0108
0109 static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
0110 {
0111 struct virtio_scsi_cmd *cmd = buf;
0112 struct scsi_cmnd *sc = cmd->sc;
0113 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
0114
0115 dev_dbg(&sc->device->sdev_gendev,
0116 "cmd %p response %u status %#02x sense_len %u\n",
0117 sc, resp->response, resp->status, resp->sense_len);
0118
0119 sc->result = resp->status;
0120 virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
0121 switch (resp->response) {
0122 case VIRTIO_SCSI_S_OK:
0123 set_host_byte(sc, DID_OK);
0124 break;
0125 case VIRTIO_SCSI_S_OVERRUN:
0126 set_host_byte(sc, DID_ERROR);
0127 break;
0128 case VIRTIO_SCSI_S_ABORTED:
0129 set_host_byte(sc, DID_ABORT);
0130 break;
0131 case VIRTIO_SCSI_S_BAD_TARGET:
0132 set_host_byte(sc, DID_BAD_TARGET);
0133 break;
0134 case VIRTIO_SCSI_S_RESET:
0135 set_host_byte(sc, DID_RESET);
0136 break;
0137 case VIRTIO_SCSI_S_BUSY:
0138 set_host_byte(sc, DID_BUS_BUSY);
0139 break;
0140 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
0141 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
0142 break;
0143 case VIRTIO_SCSI_S_TARGET_FAILURE:
0144 set_host_byte(sc, DID_TARGET_FAILURE);
0145 break;
0146 case VIRTIO_SCSI_S_NEXUS_FAILURE:
0147 set_host_byte(sc, DID_NEXUS_FAILURE);
0148 break;
0149 default:
0150 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
0151 resp->response);
0152 fallthrough;
0153 case VIRTIO_SCSI_S_FAILURE:
0154 set_host_byte(sc, DID_ERROR);
0155 break;
0156 }
0157
0158 WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
0159 VIRTIO_SCSI_SENSE_SIZE);
0160 if (resp->sense_len) {
0161 memcpy(sc->sense_buffer, resp->sense,
0162 min_t(u32,
0163 virtio32_to_cpu(vscsi->vdev, resp->sense_len),
0164 VIRTIO_SCSI_SENSE_SIZE));
0165 }
0166
0167 scsi_done(sc);
0168 }
0169
0170 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
0171 struct virtio_scsi_vq *virtscsi_vq,
0172 void (*fn)(struct virtio_scsi *vscsi, void *buf))
0173 {
0174 void *buf;
0175 unsigned int len;
0176 unsigned long flags;
0177 struct virtqueue *vq = virtscsi_vq->vq;
0178
0179 spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
0180 do {
0181 virtqueue_disable_cb(vq);
0182 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
0183 fn(vscsi, buf);
0184
0185 if (unlikely(virtqueue_is_broken(vq)))
0186 break;
0187 } while (!virtqueue_enable_cb(vq));
0188 spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
0189 }
0190
0191 static void virtscsi_req_done(struct virtqueue *vq)
0192 {
0193 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
0194 struct virtio_scsi *vscsi = shost_priv(sh);
0195 int index = vq->index - VIRTIO_SCSI_VQ_BASE;
0196 struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
0197
0198 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
0199 };
0200
0201 static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
0202 {
0203 int i, num_vqs;
0204
0205 num_vqs = vscsi->num_queues;
0206 for (i = 0; i < num_vqs; i++)
0207 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
0208 virtscsi_complete_cmd);
0209 }
0210
0211 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
0212 {
0213 struct virtio_scsi_cmd *cmd = buf;
0214
0215 if (cmd->comp)
0216 complete(cmd->comp);
0217 }
0218
0219 static void virtscsi_ctrl_done(struct virtqueue *vq)
0220 {
0221 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
0222 struct virtio_scsi *vscsi = shost_priv(sh);
0223
0224 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
0225 };
0226
0227 static void virtscsi_handle_event(struct work_struct *work);
0228
0229 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
0230 struct virtio_scsi_event_node *event_node)
0231 {
0232 int err;
0233 struct scatterlist sg;
0234 unsigned long flags;
0235
0236 INIT_WORK(&event_node->work, virtscsi_handle_event);
0237 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
0238
0239 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
0240
0241 err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
0242 GFP_ATOMIC);
0243 if (!err)
0244 virtqueue_kick(vscsi->event_vq.vq);
0245
0246 spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
0247
0248 return err;
0249 }
0250
0251 static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
0252 {
0253 int i;
0254
0255 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
0256 vscsi->event_list[i].vscsi = vscsi;
0257 virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
0258 }
0259
0260 return 0;
0261 }
0262
0263 static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
0264 {
0265 int i;
0266
0267
0268 spin_lock_irq(&vscsi->event_vq.vq_lock);
0269 vscsi->stop_events = true;
0270 spin_unlock_irq(&vscsi->event_vq.vq_lock);
0271
0272 for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
0273 cancel_work_sync(&vscsi->event_list[i].work);
0274 }
0275
0276 static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
0277 struct virtio_scsi_event *event)
0278 {
0279 struct scsi_device *sdev;
0280 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
0281 unsigned int target = event->lun[1];
0282 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
0283
0284 switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
0285 case VIRTIO_SCSI_EVT_RESET_RESCAN:
0286 if (lun == 0) {
0287 scsi_scan_target(&shost->shost_gendev, 0, target,
0288 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
0289 } else {
0290 scsi_add_device(shost, 0, target, lun);
0291 }
0292 break;
0293 case VIRTIO_SCSI_EVT_RESET_REMOVED:
0294 sdev = scsi_device_lookup(shost, 0, target, lun);
0295 if (sdev) {
0296 scsi_remove_device(sdev);
0297 scsi_device_put(sdev);
0298 } else {
0299 pr_err("SCSI device %d 0 %d %d not found\n",
0300 shost->host_no, target, lun);
0301 }
0302 break;
0303 default:
0304 pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
0305 }
0306 }
0307
0308 static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
0309 struct virtio_scsi_event *event)
0310 {
0311 struct scsi_device *sdev;
0312 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
0313 unsigned int target = event->lun[1];
0314 unsigned int lun = (event->lun[2] << 8) | event->lun[3];
0315 u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
0316 u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
0317
0318 sdev = scsi_device_lookup(shost, 0, target, lun);
0319 if (!sdev) {
0320 pr_err("SCSI device %d 0 %d %d not found\n",
0321 shost->host_no, target, lun);
0322 return;
0323 }
0324
0325
0326
0327 if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
0328 scsi_rescan_device(&sdev->sdev_gendev);
0329
0330 scsi_device_put(sdev);
0331 }
0332
0333 static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
0334 {
0335 struct scsi_device *sdev;
0336 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
0337 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
0338 int result, inquiry_len, inq_result_len = 256;
0339 char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
0340
0341 shost_for_each_device(sdev, shost) {
0342 inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
0343
0344 memset(scsi_cmd, 0, sizeof(scsi_cmd));
0345 scsi_cmd[0] = INQUIRY;
0346 scsi_cmd[4] = (unsigned char) inquiry_len;
0347
0348 memset(inq_result, 0, inq_result_len);
0349
0350 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
0351 inq_result, inquiry_len, NULL,
0352 SD_TIMEOUT, SD_MAX_RETRIES, NULL);
0353
0354 if (result == 0 && inq_result[0] >> 5) {
0355
0356 scsi_remove_device(sdev);
0357 } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) {
0358
0359
0360
0361
0362
0363
0364 scsi_remove_device(sdev);
0365 }
0366 }
0367
0368 kfree(inq_result);
0369 }
0370
0371 static void virtscsi_handle_event(struct work_struct *work)
0372 {
0373 struct virtio_scsi_event_node *event_node =
0374 container_of(work, struct virtio_scsi_event_node, work);
0375 struct virtio_scsi *vscsi = event_node->vscsi;
0376 struct virtio_scsi_event *event = &event_node->event;
0377
0378 if (event->event &
0379 cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
0380 event->event &= ~cpu_to_virtio32(vscsi->vdev,
0381 VIRTIO_SCSI_T_EVENTS_MISSED);
0382 virtscsi_rescan_hotunplug(vscsi);
0383 scsi_scan_host(virtio_scsi_host(vscsi->vdev));
0384 }
0385
0386 switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
0387 case VIRTIO_SCSI_T_NO_EVENT:
0388 break;
0389 case VIRTIO_SCSI_T_TRANSPORT_RESET:
0390 virtscsi_handle_transport_reset(vscsi, event);
0391 break;
0392 case VIRTIO_SCSI_T_PARAM_CHANGE:
0393 virtscsi_handle_param_change(vscsi, event);
0394 break;
0395 default:
0396 pr_err("Unsupported virtio scsi event %x\n", event->event);
0397 }
0398 virtscsi_kick_event(vscsi, event_node);
0399 }
0400
0401 static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
0402 {
0403 struct virtio_scsi_event_node *event_node = buf;
0404
0405 if (!vscsi->stop_events)
0406 queue_work(system_freezable_wq, &event_node->work);
0407 }
0408
0409 static void virtscsi_event_done(struct virtqueue *vq)
0410 {
0411 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
0412 struct virtio_scsi *vscsi = shost_priv(sh);
0413
0414 virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
0415 };
0416
0417 static int __virtscsi_add_cmd(struct virtqueue *vq,
0418 struct virtio_scsi_cmd *cmd,
0419 size_t req_size, size_t resp_size)
0420 {
0421 struct scsi_cmnd *sc = cmd->sc;
0422 struct scatterlist *sgs[6], req, resp;
0423 struct sg_table *out, *in;
0424 unsigned out_num = 0, in_num = 0;
0425
0426 out = in = NULL;
0427
0428 if (sc && sc->sc_data_direction != DMA_NONE) {
0429 if (sc->sc_data_direction != DMA_FROM_DEVICE)
0430 out = &sc->sdb.table;
0431 if (sc->sc_data_direction != DMA_TO_DEVICE)
0432 in = &sc->sdb.table;
0433 }
0434
0435
0436 sg_init_one(&req, &cmd->req, req_size);
0437 sgs[out_num++] = &req;
0438
0439
0440 if (out) {
0441
0442 if (scsi_prot_sg_count(sc))
0443 sgs[out_num++] = scsi_prot_sglist(sc);
0444 sgs[out_num++] = out->sgl;
0445 }
0446
0447
0448 sg_init_one(&resp, &cmd->resp, resp_size);
0449 sgs[out_num + in_num++] = &resp;
0450
0451
0452 if (in) {
0453
0454 if (scsi_prot_sg_count(sc))
0455 sgs[out_num + in_num++] = scsi_prot_sglist(sc);
0456 sgs[out_num + in_num++] = in->sgl;
0457 }
0458
0459 return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
0460 }
0461
0462 static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
0463 {
0464 bool needs_kick;
0465 unsigned long flags;
0466
0467 spin_lock_irqsave(&vq->vq_lock, flags);
0468 needs_kick = virtqueue_kick_prepare(vq->vq);
0469 spin_unlock_irqrestore(&vq->vq_lock, flags);
0470
0471 if (needs_kick)
0472 virtqueue_notify(vq->vq);
0473 }
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483 static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
0484 struct virtio_scsi_cmd *cmd,
0485 size_t req_size, size_t resp_size,
0486 bool kick)
0487 {
0488 unsigned long flags;
0489 int err;
0490 bool needs_kick = false;
0491
0492 spin_lock_irqsave(&vq->vq_lock, flags);
0493 err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
0494 if (!err && kick)
0495 needs_kick = virtqueue_kick_prepare(vq->vq);
0496
0497 spin_unlock_irqrestore(&vq->vq_lock, flags);
0498
0499 if (needs_kick)
0500 virtqueue_notify(vq->vq);
0501 return err;
0502 }
0503
0504 static void virtio_scsi_init_hdr(struct virtio_device *vdev,
0505 struct virtio_scsi_cmd_req *cmd,
0506 struct scsi_cmnd *sc)
0507 {
0508 cmd->lun[0] = 1;
0509 cmd->lun[1] = sc->device->id;
0510 cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
0511 cmd->lun[3] = sc->device->lun & 0xff;
0512 cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
0513 cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
0514 cmd->prio = 0;
0515 cmd->crn = 0;
0516 }
0517
0518 #ifdef CONFIG_BLK_DEV_INTEGRITY
0519 static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
0520 struct virtio_scsi_cmd_req_pi *cmd_pi,
0521 struct scsi_cmnd *sc)
0522 {
0523 struct request *rq = scsi_cmd_to_rq(sc);
0524 struct blk_integrity *bi;
0525
0526 virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
0527
0528 if (!rq || !scsi_prot_sg_count(sc))
0529 return;
0530
0531 bi = blk_get_integrity(rq->q->disk);
0532
0533 if (sc->sc_data_direction == DMA_TO_DEVICE)
0534 cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
0535 bio_integrity_bytes(bi,
0536 blk_rq_sectors(rq)));
0537 else if (sc->sc_data_direction == DMA_FROM_DEVICE)
0538 cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
0539 bio_integrity_bytes(bi,
0540 blk_rq_sectors(rq)));
0541 }
0542 #endif
0543
0544 static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
0545 struct scsi_cmnd *sc)
0546 {
0547 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
0548 u16 hwq = blk_mq_unique_tag_to_hwq(tag);
0549
0550 return &vscsi->req_vqs[hwq];
0551 }
0552
0553 static int virtscsi_queuecommand(struct Scsi_Host *shost,
0554 struct scsi_cmnd *sc)
0555 {
0556 struct virtio_scsi *vscsi = shost_priv(shost);
0557 struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
0558 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
0559 bool kick;
0560 unsigned long flags;
0561 int req_size;
0562 int ret;
0563
0564 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
0565
0566
0567 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
0568
0569 dev_dbg(&sc->device->sdev_gendev,
0570 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
0571
0572 cmd->sc = sc;
0573
0574 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
0575
0576 #ifdef CONFIG_BLK_DEV_INTEGRITY
0577 if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
0578 virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
0579 memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
0580 req_size = sizeof(cmd->req.cmd_pi);
0581 } else
0582 #endif
0583 {
0584 virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
0585 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
0586 req_size = sizeof(cmd->req.cmd);
0587 }
0588
0589 kick = (sc->flags & SCMD_LAST) != 0;
0590 ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
0591 if (ret == -EIO) {
0592 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
0593 spin_lock_irqsave(&req_vq->vq_lock, flags);
0594 virtscsi_complete_cmd(vscsi, cmd);
0595 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
0596 } else if (ret != 0) {
0597 return SCSI_MLQUEUE_HOST_BUSY;
0598 }
0599 return 0;
0600 }
0601
0602 static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
0603 {
0604 DECLARE_COMPLETION_ONSTACK(comp);
0605 int ret = FAILED;
0606
0607 cmd->comp = ∁
0608 if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
0609 sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
0610 goto out;
0611
0612 wait_for_completion(&comp);
0613 if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
0614 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
0615 ret = SUCCESS;
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626 virtscsi_poll_requests(vscsi);
0627
0628 out:
0629 mempool_free(cmd, virtscsi_cmd_pool);
0630 return ret;
0631 }
0632
0633 static int virtscsi_device_reset(struct scsi_cmnd *sc)
0634 {
0635 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
0636 struct virtio_scsi_cmd *cmd;
0637
0638 sdev_printk(KERN_INFO, sc->device, "device reset\n");
0639 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
0640 if (!cmd)
0641 return FAILED;
0642
0643 memset(cmd, 0, sizeof(*cmd));
0644 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
0645 .type = VIRTIO_SCSI_T_TMF,
0646 .subtype = cpu_to_virtio32(vscsi->vdev,
0647 VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
0648 .lun[0] = 1,
0649 .lun[1] = sc->device->id,
0650 .lun[2] = (sc->device->lun >> 8) | 0x40,
0651 .lun[3] = sc->device->lun & 0xff,
0652 };
0653 return virtscsi_tmf(vscsi, cmd);
0654 }
0655
0656 static int virtscsi_device_alloc(struct scsi_device *sdevice)
0657 {
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
0673
0674 return 0;
0675 }
0676
0677
0678
0679
0680
0681
0682
0683 static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
0684 {
0685 struct Scsi_Host *shost = sdev->host;
0686 int max_depth = shost->cmd_per_lun;
0687
0688 return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
0689 }
0690
0691 static int virtscsi_abort(struct scsi_cmnd *sc)
0692 {
0693 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
0694 struct virtio_scsi_cmd *cmd;
0695
0696 scmd_printk(KERN_INFO, sc, "abort\n");
0697 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
0698 if (!cmd)
0699 return FAILED;
0700
0701 memset(cmd, 0, sizeof(*cmd));
0702 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
0703 .type = VIRTIO_SCSI_T_TMF,
0704 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
0705 .lun[0] = 1,
0706 .lun[1] = sc->device->id,
0707 .lun[2] = (sc->device->lun >> 8) | 0x40,
0708 .lun[3] = sc->device->lun & 0xff,
0709 .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
0710 };
0711 return virtscsi_tmf(vscsi, cmd);
0712 }
0713
0714 static int virtscsi_map_queues(struct Scsi_Host *shost)
0715 {
0716 struct virtio_scsi *vscsi = shost_priv(shost);
0717 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
0718
0719 return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
0720 }
0721
0722 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
0723 {
0724 struct virtio_scsi *vscsi = shost_priv(shost);
0725
0726 virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
0727 }
0728
0729
0730
0731
0732
0733
0734 static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
0735 {
0736 return BLK_EH_RESET_TIMER;
0737 }
0738
0739 static struct scsi_host_template virtscsi_host_template = {
0740 .module = THIS_MODULE,
0741 .name = "Virtio SCSI HBA",
0742 .proc_name = "virtio_scsi",
0743 .this_id = -1,
0744 .cmd_size = sizeof(struct virtio_scsi_cmd),
0745 .queuecommand = virtscsi_queuecommand,
0746 .commit_rqs = virtscsi_commit_rqs,
0747 .change_queue_depth = virtscsi_change_queue_depth,
0748 .eh_abort_handler = virtscsi_abort,
0749 .eh_device_reset_handler = virtscsi_device_reset,
0750 .eh_timed_out = virtscsi_eh_timed_out,
0751 .slave_alloc = virtscsi_device_alloc,
0752
0753 .dma_boundary = UINT_MAX,
0754 .map_queues = virtscsi_map_queues,
0755 .track_queue_depth = 1,
0756 };
0757
0758 #define virtscsi_config_get(vdev, fld) \
0759 ({ \
0760 __virtio_native_type(struct virtio_scsi_config, fld) __val; \
0761 virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
0762 __val; \
0763 })
0764
0765 #define virtscsi_config_set(vdev, fld, val) \
0766 do { \
0767 __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
0768 virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
0769 } while(0)
0770
0771 static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
0772 struct virtqueue *vq)
0773 {
0774 spin_lock_init(&virtscsi_vq->vq_lock);
0775 virtscsi_vq->vq = vq;
0776 }
0777
0778 static void virtscsi_remove_vqs(struct virtio_device *vdev)
0779 {
0780
0781 virtio_reset_device(vdev);
0782 vdev->config->del_vqs(vdev);
0783 }
0784
0785 static int virtscsi_init(struct virtio_device *vdev,
0786 struct virtio_scsi *vscsi)
0787 {
0788 int err;
0789 u32 i;
0790 u32 num_vqs;
0791 vq_callback_t **callbacks;
0792 const char **names;
0793 struct virtqueue **vqs;
0794 struct irq_affinity desc = { .pre_vectors = 2 };
0795
0796 num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
0797 vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
0798 callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
0799 GFP_KERNEL);
0800 names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
0801
0802 if (!callbacks || !vqs || !names) {
0803 err = -ENOMEM;
0804 goto out;
0805 }
0806
0807 callbacks[0] = virtscsi_ctrl_done;
0808 callbacks[1] = virtscsi_event_done;
0809 names[0] = "control";
0810 names[1] = "event";
0811 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
0812 callbacks[i] = virtscsi_req_done;
0813 names[i] = "request";
0814 }
0815
0816
0817 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
0818 if (err)
0819 goto out;
0820
0821 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
0822 virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
0823 for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
0824 virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
0825 vqs[i]);
0826
0827 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
0828 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
0829
0830 err = 0;
0831
0832 out:
0833 kfree(names);
0834 kfree(callbacks);
0835 kfree(vqs);
0836 if (err)
0837 virtscsi_remove_vqs(vdev);
0838 return err;
0839 }
0840
0841 static int virtscsi_probe(struct virtio_device *vdev)
0842 {
0843 struct Scsi_Host *shost;
0844 struct virtio_scsi *vscsi;
0845 int err;
0846 u32 sg_elems, num_targets;
0847 u32 cmd_per_lun;
0848 u32 num_queues;
0849
0850 if (!vdev->config->get) {
0851 dev_err(&vdev->dev, "%s failure: config access disabled\n",
0852 __func__);
0853 return -EINVAL;
0854 }
0855
0856
0857 num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
0858 num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
0859
0860 num_targets = virtscsi_config_get(vdev, max_target) + 1;
0861
0862 shost = scsi_host_alloc(&virtscsi_host_template,
0863 struct_size(vscsi, req_vqs, num_queues));
0864 if (!shost)
0865 return -ENOMEM;
0866
0867 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
0868 shost->sg_tablesize = sg_elems;
0869 vscsi = shost_priv(shost);
0870 vscsi->vdev = vdev;
0871 vscsi->num_queues = num_queues;
0872 vdev->priv = shost;
0873
0874 err = virtscsi_init(vdev, vscsi);
0875 if (err)
0876 goto virtscsi_init_failed;
0877
0878 shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
0879
0880 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
0881 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
0882 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
0883
0884
0885
0886
0887 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
0888 shost->max_id = num_targets;
0889 shost->max_channel = 0;
0890 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
0891 shost->nr_hw_queues = num_queues;
0892
0893 #ifdef CONFIG_BLK_DEV_INTEGRITY
0894 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
0895 int host_prot;
0896
0897 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
0898 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
0899 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
0900
0901 scsi_host_set_prot(shost, host_prot);
0902 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
0903 }
0904 #endif
0905
0906 err = scsi_add_host(shost, &vdev->dev);
0907 if (err)
0908 goto scsi_add_host_failed;
0909
0910 virtio_device_ready(vdev);
0911
0912 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
0913 virtscsi_kick_event_all(vscsi);
0914
0915 scsi_scan_host(shost);
0916 return 0;
0917
0918 scsi_add_host_failed:
0919 vdev->config->del_vqs(vdev);
0920 virtscsi_init_failed:
0921 scsi_host_put(shost);
0922 return err;
0923 }
0924
0925 static void virtscsi_remove(struct virtio_device *vdev)
0926 {
0927 struct Scsi_Host *shost = virtio_scsi_host(vdev);
0928 struct virtio_scsi *vscsi = shost_priv(shost);
0929
0930 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
0931 virtscsi_cancel_event_work(vscsi);
0932
0933 scsi_remove_host(shost);
0934 virtscsi_remove_vqs(vdev);
0935 scsi_host_put(shost);
0936 }
0937
0938 #ifdef CONFIG_PM_SLEEP
0939 static int virtscsi_freeze(struct virtio_device *vdev)
0940 {
0941 virtscsi_remove_vqs(vdev);
0942 return 0;
0943 }
0944
0945 static int virtscsi_restore(struct virtio_device *vdev)
0946 {
0947 struct Scsi_Host *sh = virtio_scsi_host(vdev);
0948 struct virtio_scsi *vscsi = shost_priv(sh);
0949 int err;
0950
0951 err = virtscsi_init(vdev, vscsi);
0952 if (err)
0953 return err;
0954
0955 virtio_device_ready(vdev);
0956
0957 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
0958 virtscsi_kick_event_all(vscsi);
0959
0960 return err;
0961 }
0962 #endif
0963
0964 static struct virtio_device_id id_table[] = {
0965 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
0966 { 0 },
0967 };
0968
0969 static unsigned int features[] = {
0970 VIRTIO_SCSI_F_HOTPLUG,
0971 VIRTIO_SCSI_F_CHANGE,
0972 #ifdef CONFIG_BLK_DEV_INTEGRITY
0973 VIRTIO_SCSI_F_T10_PI,
0974 #endif
0975 };
0976
0977 static struct virtio_driver virtio_scsi_driver = {
0978 .feature_table = features,
0979 .feature_table_size = ARRAY_SIZE(features),
0980 .driver.name = KBUILD_MODNAME,
0981 .driver.owner = THIS_MODULE,
0982 .id_table = id_table,
0983 .probe = virtscsi_probe,
0984 #ifdef CONFIG_PM_SLEEP
0985 .freeze = virtscsi_freeze,
0986 .restore = virtscsi_restore,
0987 #endif
0988 .remove = virtscsi_remove,
0989 };
0990
0991 static int __init virtio_scsi_init(void)
0992 {
0993 int ret = -ENOMEM;
0994
0995 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
0996 if (!virtscsi_cmd_cache) {
0997 pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
0998 goto error;
0999 }
1000
1001
1002 virtscsi_cmd_pool =
1003 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
1004 virtscsi_cmd_cache);
1005 if (!virtscsi_cmd_pool) {
1006 pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
1007 goto error;
1008 }
1009 ret = register_virtio_driver(&virtio_scsi_driver);
1010 if (ret < 0)
1011 goto error;
1012
1013 return 0;
1014
1015 error:
1016 mempool_destroy(virtscsi_cmd_pool);
1017 virtscsi_cmd_pool = NULL;
1018 kmem_cache_destroy(virtscsi_cmd_cache);
1019 virtscsi_cmd_cache = NULL;
1020 return ret;
1021 }
1022
1023 static void __exit virtio_scsi_fini(void)
1024 {
1025 unregister_virtio_driver(&virtio_scsi_driver);
1026 mempool_destroy(virtscsi_cmd_pool);
1027 kmem_cache_destroy(virtscsi_cmd_cache);
1028 }
1029 module_init(virtio_scsi_init);
1030 module_exit(virtio_scsi_fini);
1031
1032 MODULE_DEVICE_TABLE(virtio, id_table);
1033 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
1034 MODULE_LICENSE("GPL");