0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/module.h>
0032 #include <linux/kernel.h>
0033 #include <linux/device.h>
0034 #include <linux/wait.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/mutex.h>
0037 #include <linux/spinlock.h>
0038 #include <linux/sched.h>
0039 #include <linux/blkdev.h>
0040 #include <linux/pfn.h>
0041 #include <linux/slab.h>
0042 #include <linux/bitops.h>
0043
0044 #include <scsi/scsi_cmnd.h>
0045 #include <scsi/scsi_device.h>
0046 #include <scsi/scsi.h>
0047 #include <scsi/scsi_host.h>
0048
0049 #include <xen/xen.h>
0050 #include <xen/xenbus.h>
0051 #include <xen/grant_table.h>
0052 #include <xen/events.h>
0053 #include <xen/page.h>
0054
0055 #include <xen/interface/grant_table.h>
0056 #include <xen/interface/io/vscsiif.h>
0057 #include <xen/interface/io/protocols.h>
0058
0059 #include <asm/xen/hypervisor.h>
0060
0061 #define VSCSIFRONT_OP_ADD_LUN 1
0062 #define VSCSIFRONT_OP_DEL_LUN 2
0063 #define VSCSIFRONT_OP_READD_LUN 3
0064
0065
0066 #define VSCSIIF_DEFAULT_CMD_PER_LUN 10
0067 #define VSCSIIF_MAX_TARGET 64
0068 #define VSCSIIF_MAX_LUN 255
0069
0070 #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
0071 #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
0072
0073 #define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
0074 sizeof(struct scsiif_request_segment)))
0075
0076 struct vscsifrnt_shadow {
0077
0078 unsigned char act;
0079 uint8_t nr_segments;
0080 uint16_t rqid;
0081 uint16_t ref_rqid;
0082
0083 bool inflight;
0084
0085 unsigned int nr_grants;
0086 struct scsiif_request_segment *sg;
0087 struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
0088
0089
0090 wait_queue_head_t wq_reset;
0091 int wait_reset;
0092 int32_t rslt_reset;
0093
0094 #define RSLT_RESET_WAITING 0
0095 #define RSLT_RESET_ERR -1
0096
0097
0098 struct scsi_cmnd *sc;
0099 int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
0100 };
0101
0102 struct vscsifrnt_info {
0103 struct xenbus_device *dev;
0104
0105 struct Scsi_Host *host;
0106 enum {
0107 STATE_INACTIVE,
0108 STATE_ACTIVE,
0109 STATE_ERROR
0110 } host_active;
0111
0112 unsigned int evtchn;
0113 unsigned int irq;
0114
0115 grant_ref_t ring_ref;
0116 struct vscsiif_front_ring ring;
0117 struct vscsiif_response ring_rsp;
0118
0119 spinlock_t shadow_lock;
0120 DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
0121 struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
0122
0123
0124 wait_queue_head_t wq_sync;
0125 wait_queue_head_t wq_pause;
0126 unsigned int wait_ring_available:1;
0127 unsigned int waiting_pause:1;
0128 unsigned int pause:1;
0129 unsigned callers;
0130
0131 char dev_state_path[64];
0132 struct task_struct *curr;
0133 };
0134
0135 static DEFINE_MUTEX(scsifront_mutex);
0136
0137 static void scsifront_wake_up(struct vscsifrnt_info *info)
0138 {
0139 info->wait_ring_available = 0;
0140 wake_up(&info->wq_sync);
0141 }
0142
0143 static int scsifront_get_rqid(struct vscsifrnt_info *info)
0144 {
0145 unsigned long flags;
0146 int free;
0147
0148 spin_lock_irqsave(&info->shadow_lock, flags);
0149
0150 free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
0151 __clear_bit(free, info->shadow_free_bitmap);
0152
0153 spin_unlock_irqrestore(&info->shadow_lock, flags);
0154
0155 return free;
0156 }
0157
0158 static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
0159 {
0160 int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
0161
0162 __set_bit(id, info->shadow_free_bitmap);
0163 info->shadow[id] = NULL;
0164
0165 return empty || info->wait_ring_available;
0166 }
0167
0168 static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
0169 {
0170 unsigned long flags;
0171 int kick;
0172
0173 spin_lock_irqsave(&info->shadow_lock, flags);
0174 kick = _scsifront_put_rqid(info, id);
0175 spin_unlock_irqrestore(&info->shadow_lock, flags);
0176
0177 if (kick)
0178 scsifront_wake_up(info);
0179 }
0180
0181 static int scsifront_do_request(struct vscsifrnt_info *info,
0182 struct vscsifrnt_shadow *shadow)
0183 {
0184 struct vscsiif_front_ring *ring = &(info->ring);
0185 struct vscsiif_request *ring_req;
0186 struct scsi_cmnd *sc = shadow->sc;
0187 uint32_t id;
0188 int i, notify;
0189
0190 if (RING_FULL(&info->ring))
0191 return -EBUSY;
0192
0193 id = scsifront_get_rqid(info);
0194 if (id >= VSCSIIF_MAX_REQS)
0195 return -EBUSY;
0196
0197 info->shadow[id] = shadow;
0198 shadow->rqid = id;
0199
0200 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
0201 ring->req_prod_pvt++;
0202
0203 ring_req->rqid = id;
0204 ring_req->act = shadow->act;
0205 ring_req->ref_rqid = shadow->ref_rqid;
0206 ring_req->nr_segments = shadow->nr_segments;
0207
0208 ring_req->id = sc->device->id;
0209 ring_req->lun = sc->device->lun;
0210 ring_req->channel = sc->device->channel;
0211 ring_req->cmd_len = sc->cmd_len;
0212
0213 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
0214
0215 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
0216
0217 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
0218 ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ;
0219
0220 for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
0221 ring_req->seg[i] = shadow->seg[i];
0222
0223 shadow->inflight = true;
0224
0225 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
0226 if (notify)
0227 notify_remote_via_irq(info->irq);
0228
0229 return 0;
0230 }
0231
0232 static void scsifront_set_error(struct vscsifrnt_info *info, const char *msg)
0233 {
0234 shost_printk(KERN_ERR, info->host, KBUILD_MODNAME "%s\n"
0235 "Disabling device for further use\n", msg);
0236 info->host_active = STATE_ERROR;
0237 }
0238
0239 static void scsifront_gnttab_done(struct vscsifrnt_info *info,
0240 struct vscsifrnt_shadow *shadow)
0241 {
0242 int i;
0243
0244 if (shadow->sc->sc_data_direction == DMA_NONE)
0245 return;
0246
0247 for (i = 0; i < shadow->nr_grants; i++) {
0248 if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
0249 scsifront_set_error(info, "grant still in use by backend");
0250 return;
0251 }
0252 }
0253
0254 kfree(shadow->sg);
0255 }
0256
0257 static unsigned int scsifront_host_byte(int32_t rslt)
0258 {
0259 switch (XEN_VSCSIIF_RSLT_HOST(rslt)) {
0260 case XEN_VSCSIIF_RSLT_HOST_OK:
0261 return DID_OK;
0262 case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT:
0263 return DID_NO_CONNECT;
0264 case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY:
0265 return DID_BUS_BUSY;
0266 case XEN_VSCSIIF_RSLT_HOST_TIME_OUT:
0267 return DID_TIME_OUT;
0268 case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET:
0269 return DID_BAD_TARGET;
0270 case XEN_VSCSIIF_RSLT_HOST_ABORT:
0271 return DID_ABORT;
0272 case XEN_VSCSIIF_RSLT_HOST_PARITY:
0273 return DID_PARITY;
0274 case XEN_VSCSIIF_RSLT_HOST_ERROR:
0275 return DID_ERROR;
0276 case XEN_VSCSIIF_RSLT_HOST_RESET:
0277 return DID_RESET;
0278 case XEN_VSCSIIF_RSLT_HOST_BAD_INTR:
0279 return DID_BAD_INTR;
0280 case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH:
0281 return DID_PASSTHROUGH;
0282 case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR:
0283 return DID_SOFT_ERROR;
0284 case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY:
0285 return DID_IMM_RETRY;
0286 case XEN_VSCSIIF_RSLT_HOST_REQUEUE:
0287 return DID_REQUEUE;
0288 case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED:
0289 return DID_TRANSPORT_DISRUPTED;
0290 case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST:
0291 return DID_TRANSPORT_FAILFAST;
0292 case XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE:
0293 return DID_TARGET_FAILURE;
0294 case XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE:
0295 return DID_NEXUS_FAILURE;
0296 case XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE:
0297 return DID_ALLOC_FAILURE;
0298 case XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR:
0299 return DID_MEDIUM_ERROR;
0300 case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL:
0301 return DID_TRANSPORT_MARGINAL;
0302 default:
0303 return DID_ERROR;
0304 }
0305 }
0306
0307 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
0308 struct vscsiif_response *ring_rsp)
0309 {
0310 struct vscsifrnt_shadow *shadow;
0311 struct scsi_cmnd *sc;
0312 uint32_t id;
0313 uint8_t sense_len;
0314
0315 id = ring_rsp->rqid;
0316 shadow = info->shadow[id];
0317 sc = shadow->sc;
0318
0319 BUG_ON(sc == NULL);
0320
0321 scsifront_gnttab_done(info, shadow);
0322 if (info->host_active == STATE_ERROR)
0323 return;
0324 scsifront_put_rqid(info, id);
0325
0326 set_host_byte(sc, scsifront_host_byte(ring_rsp->rslt));
0327 set_status_byte(sc, XEN_VSCSIIF_RSLT_STATUS(ring_rsp->rslt));
0328 scsi_set_resid(sc, ring_rsp->residual_len);
0329
0330 sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
0331 ring_rsp->sense_len);
0332
0333 if (sense_len)
0334 memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
0335
0336 scsi_done(sc);
0337 }
0338
0339 static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
0340 struct vscsiif_response *ring_rsp)
0341 {
0342 uint16_t id = ring_rsp->rqid;
0343 unsigned long flags;
0344 struct vscsifrnt_shadow *shadow = info->shadow[id];
0345 int kick;
0346
0347 spin_lock_irqsave(&info->shadow_lock, flags);
0348 shadow->wait_reset = 1;
0349 switch (shadow->rslt_reset) {
0350 case RSLT_RESET_WAITING:
0351 if (ring_rsp->rslt == XEN_VSCSIIF_RSLT_RESET_SUCCESS)
0352 shadow->rslt_reset = SUCCESS;
0353 else
0354 shadow->rslt_reset = FAILED;
0355 break;
0356 case RSLT_RESET_ERR:
0357 kick = _scsifront_put_rqid(info, id);
0358 spin_unlock_irqrestore(&info->shadow_lock, flags);
0359 kfree(shadow);
0360 if (kick)
0361 scsifront_wake_up(info);
0362 return;
0363 default:
0364 scsifront_set_error(info, "bad reset state");
0365 break;
0366 }
0367 spin_unlock_irqrestore(&info->shadow_lock, flags);
0368
0369 wake_up(&shadow->wq_reset);
0370 }
0371
0372 static void scsifront_do_response(struct vscsifrnt_info *info,
0373 struct vscsiif_response *ring_rsp)
0374 {
0375 struct vscsifrnt_shadow *shadow;
0376
0377 if (ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
0378 !info->shadow[ring_rsp->rqid]->inflight) {
0379 scsifront_set_error(info, "illegal rqid returned by backend!");
0380 return;
0381 }
0382 shadow = info->shadow[ring_rsp->rqid];
0383 shadow->inflight = false;
0384
0385 if (shadow->act == VSCSIIF_ACT_SCSI_CDB)
0386 scsifront_cdb_cmd_done(info, ring_rsp);
0387 else
0388 scsifront_sync_cmd_done(info, ring_rsp);
0389 }
0390
0391 static int scsifront_ring_drain(struct vscsifrnt_info *info,
0392 unsigned int *eoiflag)
0393 {
0394 struct vscsiif_response ring_rsp;
0395 RING_IDX i, rp;
0396 int more_to_do = 0;
0397
0398 rp = READ_ONCE(info->ring.sring->rsp_prod);
0399 virt_rmb();
0400 if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) {
0401 scsifront_set_error(info, "illegal number of responses");
0402 return 0;
0403 }
0404 for (i = info->ring.rsp_cons; i != rp; i++) {
0405 RING_COPY_RESPONSE(&info->ring, i, &ring_rsp);
0406 scsifront_do_response(info, &ring_rsp);
0407 if (info->host_active == STATE_ERROR)
0408 return 0;
0409 *eoiflag &= ~XEN_EOI_FLAG_SPURIOUS;
0410 }
0411
0412 info->ring.rsp_cons = i;
0413
0414 if (i != info->ring.req_prod_pvt)
0415 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
0416 else
0417 info->ring.sring->rsp_event = i + 1;
0418
0419 return more_to_do;
0420 }
0421
0422 static int scsifront_cmd_done(struct vscsifrnt_info *info,
0423 unsigned int *eoiflag)
0424 {
0425 int more_to_do;
0426 unsigned long flags;
0427
0428 spin_lock_irqsave(info->host->host_lock, flags);
0429
0430 more_to_do = scsifront_ring_drain(info, eoiflag);
0431
0432 info->wait_ring_available = 0;
0433
0434 spin_unlock_irqrestore(info->host->host_lock, flags);
0435
0436 wake_up(&info->wq_sync);
0437
0438 return more_to_do;
0439 }
0440
0441 static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
0442 {
0443 struct vscsifrnt_info *info = dev_id;
0444 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
0445
0446 if (info->host_active == STATE_ERROR) {
0447 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
0448 return IRQ_HANDLED;
0449 }
0450
0451 while (scsifront_cmd_done(info, &eoiflag))
0452
0453 cond_resched();
0454
0455 xen_irq_lateeoi(irq, eoiflag);
0456
0457 return IRQ_HANDLED;
0458 }
0459
0460 static void scsifront_finish_all(struct vscsifrnt_info *info)
0461 {
0462 unsigned int i, dummy;
0463 struct vscsiif_response resp;
0464
0465 scsifront_ring_drain(info, &dummy);
0466
0467 for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
0468 if (test_bit(i, info->shadow_free_bitmap))
0469 continue;
0470 resp.rqid = i;
0471 resp.sense_len = 0;
0472 resp.rslt = DID_RESET << 16;
0473 resp.residual_len = 0;
0474 scsifront_do_response(info, &resp);
0475 }
0476 }
0477
0478 static int map_data_for_request(struct vscsifrnt_info *info,
0479 struct scsi_cmnd *sc,
0480 struct vscsifrnt_shadow *shadow)
0481 {
0482 grant_ref_t gref_head;
0483 struct page *page;
0484 int err, ref, ref_cnt = 0;
0485 int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
0486 unsigned int i, off, len, bytes;
0487 unsigned int data_len = scsi_bufflen(sc);
0488 unsigned int data_grants = 0, seg_grants = 0;
0489 struct scatterlist *sg;
0490 struct scsiif_request_segment *seg;
0491
0492 if (sc->sc_data_direction == DMA_NONE || !data_len)
0493 return 0;
0494
0495 scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
0496 data_grants += PFN_UP(sg->offset + sg->length);
0497
0498 if (data_grants > VSCSIIF_SG_TABLESIZE) {
0499 if (data_grants > info->host->sg_tablesize) {
0500 shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
0501 "Unable to map request_buffer for command!\n");
0502 return -E2BIG;
0503 }
0504 seg_grants = vscsiif_grants_sg(data_grants);
0505 shadow->sg = kcalloc(data_grants,
0506 sizeof(struct scsiif_request_segment), GFP_ATOMIC);
0507 if (!shadow->sg)
0508 return -ENOMEM;
0509 }
0510 seg = shadow->sg ? : shadow->seg;
0511
0512 err = gnttab_alloc_grant_references(seg_grants + data_grants,
0513 &gref_head);
0514 if (err) {
0515 kfree(shadow->sg);
0516 shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
0517 "gnttab_alloc_grant_references() error\n");
0518 return -ENOMEM;
0519 }
0520
0521 if (seg_grants) {
0522 page = virt_to_page(seg);
0523 off = offset_in_page(seg);
0524 len = sizeof(struct scsiif_request_segment) * data_grants;
0525 while (len > 0) {
0526 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
0527
0528 ref = gnttab_claim_grant_reference(&gref_head);
0529 BUG_ON(ref == -ENOSPC);
0530
0531 gnttab_grant_foreign_access_ref(ref,
0532 info->dev->otherend_id,
0533 xen_page_to_gfn(page), 1);
0534 shadow->gref[ref_cnt] = ref;
0535 shadow->seg[ref_cnt].gref = ref;
0536 shadow->seg[ref_cnt].offset = (uint16_t)off;
0537 shadow->seg[ref_cnt].length = (uint16_t)bytes;
0538
0539 page++;
0540 len -= bytes;
0541 off = 0;
0542 ref_cnt++;
0543 }
0544 BUG_ON(seg_grants < ref_cnt);
0545 seg_grants = ref_cnt;
0546 }
0547
0548 scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
0549 page = sg_page(sg);
0550 off = sg->offset;
0551 len = sg->length;
0552
0553 while (len > 0 && data_len > 0) {
0554
0555
0556
0557
0558
0559 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
0560 bytes = min(bytes, data_len);
0561
0562 ref = gnttab_claim_grant_reference(&gref_head);
0563 BUG_ON(ref == -ENOSPC);
0564
0565 gnttab_grant_foreign_access_ref(ref,
0566 info->dev->otherend_id,
0567 xen_page_to_gfn(page),
0568 grant_ro);
0569
0570 shadow->gref[ref_cnt] = ref;
0571 seg->gref = ref;
0572 seg->offset = (uint16_t)off;
0573 seg->length = (uint16_t)bytes;
0574
0575 page++;
0576 seg++;
0577 len -= bytes;
0578 data_len -= bytes;
0579 off = 0;
0580 ref_cnt++;
0581 }
0582 }
0583
0584 if (seg_grants)
0585 shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
0586 else
0587 shadow->nr_segments = (uint8_t)ref_cnt;
0588 shadow->nr_grants = ref_cnt;
0589
0590 return 0;
0591 }
0592
0593 static int scsifront_enter(struct vscsifrnt_info *info)
0594 {
0595 if (info->pause)
0596 return 1;
0597 info->callers++;
0598 return 0;
0599 }
0600
0601 static void scsifront_return(struct vscsifrnt_info *info)
0602 {
0603 info->callers--;
0604 if (info->callers)
0605 return;
0606
0607 if (!info->waiting_pause)
0608 return;
0609
0610 info->waiting_pause = 0;
0611 wake_up(&info->wq_pause);
0612 }
0613
0614 static int scsifront_queuecommand(struct Scsi_Host *shost,
0615 struct scsi_cmnd *sc)
0616 {
0617 struct vscsifrnt_info *info = shost_priv(shost);
0618 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
0619 unsigned long flags;
0620 int err;
0621
0622 if (info->host_active == STATE_ERROR)
0623 return SCSI_MLQUEUE_HOST_BUSY;
0624
0625 sc->result = 0;
0626
0627 shadow->sc = sc;
0628 shadow->act = VSCSIIF_ACT_SCSI_CDB;
0629
0630 spin_lock_irqsave(shost->host_lock, flags);
0631 if (scsifront_enter(info)) {
0632 spin_unlock_irqrestore(shost->host_lock, flags);
0633 return SCSI_MLQUEUE_HOST_BUSY;
0634 }
0635
0636 err = map_data_for_request(info, sc, shadow);
0637 if (err < 0) {
0638 pr_debug("%s: err %d\n", __func__, err);
0639 scsifront_return(info);
0640 spin_unlock_irqrestore(shost->host_lock, flags);
0641 if (err == -ENOMEM)
0642 return SCSI_MLQUEUE_HOST_BUSY;
0643 sc->result = DID_ERROR << 16;
0644 scsi_done(sc);
0645 return 0;
0646 }
0647
0648 if (scsifront_do_request(info, shadow)) {
0649 scsifront_gnttab_done(info, shadow);
0650 goto busy;
0651 }
0652
0653 scsifront_return(info);
0654 spin_unlock_irqrestore(shost->host_lock, flags);
0655
0656 return 0;
0657
0658 busy:
0659 scsifront_return(info);
0660 spin_unlock_irqrestore(shost->host_lock, flags);
0661 pr_debug("%s: busy\n", __func__);
0662 return SCSI_MLQUEUE_HOST_BUSY;
0663 }
0664
0665
0666
0667
0668
0669
0670 static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
0671 {
0672 struct Scsi_Host *host = sc->device->host;
0673 struct vscsifrnt_info *info = shost_priv(host);
0674 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
0675 int err = 0;
0676
0677 if (info->host_active == STATE_ERROR)
0678 return FAILED;
0679
0680 shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
0681 if (!shadow)
0682 return FAILED;
0683
0684 shadow->act = act;
0685 shadow->rslt_reset = RSLT_RESET_WAITING;
0686 shadow->sc = sc;
0687 shadow->ref_rqid = s->rqid;
0688 init_waitqueue_head(&shadow->wq_reset);
0689
0690 spin_lock_irq(host->host_lock);
0691
0692 for (;;) {
0693 if (scsifront_enter(info))
0694 goto fail;
0695
0696 if (!scsifront_do_request(info, shadow))
0697 break;
0698
0699 scsifront_return(info);
0700 if (err)
0701 goto fail;
0702 info->wait_ring_available = 1;
0703 spin_unlock_irq(host->host_lock);
0704 err = wait_event_interruptible(info->wq_sync,
0705 !info->wait_ring_available);
0706 spin_lock_irq(host->host_lock);
0707 }
0708
0709 spin_unlock_irq(host->host_lock);
0710 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
0711 spin_lock_irq(host->host_lock);
0712
0713 if (!err) {
0714 err = shadow->rslt_reset;
0715 scsifront_put_rqid(info, shadow->rqid);
0716 kfree(shadow);
0717 } else {
0718 spin_lock(&info->shadow_lock);
0719 shadow->rslt_reset = RSLT_RESET_ERR;
0720 spin_unlock(&info->shadow_lock);
0721 err = FAILED;
0722 }
0723
0724 scsifront_return(info);
0725 spin_unlock_irq(host->host_lock);
0726 return err;
0727
0728 fail:
0729 spin_unlock_irq(host->host_lock);
0730 kfree(shadow);
0731 return FAILED;
0732 }
0733
0734 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
0735 {
0736 pr_debug("%s\n", __func__);
0737 return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
0738 }
0739
0740 static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
0741 {
0742 pr_debug("%s\n", __func__);
0743 return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
0744 }
0745
0746 static int scsifront_sdev_configure(struct scsi_device *sdev)
0747 {
0748 struct vscsifrnt_info *info = shost_priv(sdev->host);
0749 int err;
0750
0751 if (info->host_active == STATE_ERROR)
0752 return -EIO;
0753
0754 if (info && current == info->curr) {
0755 err = xenbus_printf(XBT_NIL, info->dev->nodename,
0756 info->dev_state_path, "%d", XenbusStateConnected);
0757 if (err) {
0758 xenbus_dev_error(info->dev, err,
0759 "%s: writing dev_state_path", __func__);
0760 return err;
0761 }
0762 }
0763
0764 return 0;
0765 }
0766
0767 static void scsifront_sdev_destroy(struct scsi_device *sdev)
0768 {
0769 struct vscsifrnt_info *info = shost_priv(sdev->host);
0770 int err;
0771
0772 if (info && current == info->curr) {
0773 err = xenbus_printf(XBT_NIL, info->dev->nodename,
0774 info->dev_state_path, "%d", XenbusStateClosed);
0775 if (err)
0776 xenbus_dev_error(info->dev, err,
0777 "%s: writing dev_state_path", __func__);
0778 }
0779 }
0780
0781 static struct scsi_host_template scsifront_sht = {
0782 .module = THIS_MODULE,
0783 .name = "Xen SCSI frontend driver",
0784 .queuecommand = scsifront_queuecommand,
0785 .eh_abort_handler = scsifront_eh_abort_handler,
0786 .eh_device_reset_handler = scsifront_dev_reset_handler,
0787 .slave_configure = scsifront_sdev_configure,
0788 .slave_destroy = scsifront_sdev_destroy,
0789 .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
0790 .can_queue = VSCSIIF_MAX_REQS,
0791 .this_id = -1,
0792 .cmd_size = sizeof(struct vscsifrnt_shadow),
0793 .sg_tablesize = VSCSIIF_SG_TABLESIZE,
0794 .proc_name = "scsifront",
0795 };
0796
0797 static int scsifront_alloc_ring(struct vscsifrnt_info *info)
0798 {
0799 struct xenbus_device *dev = info->dev;
0800 struct vscsiif_sring *sring;
0801 int err;
0802
0803
0804 err = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&sring, 1,
0805 &info->ring_ref);
0806 if (err)
0807 return err;
0808
0809 XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
0810
0811 err = xenbus_alloc_evtchn(dev, &info->evtchn);
0812 if (err) {
0813 xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
0814 goto free_gnttab;
0815 }
0816
0817 err = bind_evtchn_to_irq_lateeoi(info->evtchn);
0818 if (err <= 0) {
0819 xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
0820 goto free_gnttab;
0821 }
0822
0823 info->irq = err;
0824
0825 err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
0826 IRQF_ONESHOT, "scsifront", info);
0827 if (err) {
0828 xenbus_dev_fatal(dev, err, "request_threaded_irq");
0829 goto free_irq;
0830 }
0831
0832 return 0;
0833
0834
0835 free_irq:
0836 unbind_from_irqhandler(info->irq, info);
0837 free_gnttab:
0838 xenbus_teardown_ring((void **)&sring, 1, &info->ring_ref);
0839
0840 return err;
0841 }
0842
0843 static void scsifront_free_ring(struct vscsifrnt_info *info)
0844 {
0845 unbind_from_irqhandler(info->irq, info);
0846 xenbus_teardown_ring((void **)&info->ring.sring, 1, &info->ring_ref);
0847 }
0848
0849 static int scsifront_init_ring(struct vscsifrnt_info *info)
0850 {
0851 struct xenbus_device *dev = info->dev;
0852 struct xenbus_transaction xbt;
0853 int err;
0854
0855 pr_debug("%s\n", __func__);
0856
0857 err = scsifront_alloc_ring(info);
0858 if (err)
0859 return err;
0860 pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
0861
0862 again:
0863 err = xenbus_transaction_start(&xbt);
0864 if (err)
0865 xenbus_dev_fatal(dev, err, "starting transaction");
0866
0867 err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
0868 info->ring_ref);
0869 if (err) {
0870 xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
0871 goto fail;
0872 }
0873
0874 err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
0875 info->evtchn);
0876
0877 if (err) {
0878 xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
0879 goto fail;
0880 }
0881
0882 err = xenbus_transaction_end(xbt, 0);
0883 if (err) {
0884 if (err == -EAGAIN)
0885 goto again;
0886 xenbus_dev_fatal(dev, err, "completing transaction");
0887 goto free_sring;
0888 }
0889
0890 return 0;
0891
0892 fail:
0893 xenbus_transaction_end(xbt, 1);
0894 free_sring:
0895 scsifront_free_ring(info);
0896
0897 return err;
0898 }
0899
0900
0901 static int scsifront_probe(struct xenbus_device *dev,
0902 const struct xenbus_device_id *id)
0903 {
0904 struct vscsifrnt_info *info;
0905 struct Scsi_Host *host;
0906 int err = -ENOMEM;
0907 char name[TASK_COMM_LEN];
0908
0909 host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
0910 if (!host) {
0911 xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
0912 return err;
0913 }
0914 info = (struct vscsifrnt_info *)host->hostdata;
0915
0916 dev_set_drvdata(&dev->dev, info);
0917 info->dev = dev;
0918
0919 bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
0920
0921 err = scsifront_init_ring(info);
0922 if (err) {
0923 scsi_host_put(host);
0924 return err;
0925 }
0926
0927 init_waitqueue_head(&info->wq_sync);
0928 init_waitqueue_head(&info->wq_pause);
0929 spin_lock_init(&info->shadow_lock);
0930
0931 snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
0932
0933 host->max_id = VSCSIIF_MAX_TARGET;
0934 host->max_channel = 0;
0935 host->max_lun = VSCSIIF_MAX_LUN;
0936 host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
0937 host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
0938
0939 err = scsi_add_host(host, &dev->dev);
0940 if (err) {
0941 dev_err(&dev->dev, "fail to add scsi host %d\n", err);
0942 goto free_sring;
0943 }
0944 info->host = host;
0945 info->host_active = STATE_ACTIVE;
0946
0947 xenbus_switch_state(dev, XenbusStateInitialised);
0948
0949 return 0;
0950
0951 free_sring:
0952 scsifront_free_ring(info);
0953 scsi_host_put(host);
0954 return err;
0955 }
0956
0957 static int scsifront_resume(struct xenbus_device *dev)
0958 {
0959 struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
0960 struct Scsi_Host *host = info->host;
0961 int err;
0962
0963 spin_lock_irq(host->host_lock);
0964
0965
0966 scsifront_finish_all(info);
0967
0968 spin_unlock_irq(host->host_lock);
0969
0970
0971 scsifront_free_ring(info);
0972 err = scsifront_init_ring(info);
0973 if (err) {
0974 dev_err(&dev->dev, "fail to resume %d\n", err);
0975 scsi_host_put(host);
0976 return err;
0977 }
0978
0979 xenbus_switch_state(dev, XenbusStateInitialised);
0980
0981 return 0;
0982 }
0983
0984 static int scsifront_suspend(struct xenbus_device *dev)
0985 {
0986 struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
0987 struct Scsi_Host *host = info->host;
0988 int err = 0;
0989
0990
0991 spin_lock_irq(host->host_lock);
0992 info->pause = 1;
0993 while (info->callers && !err) {
0994 info->waiting_pause = 1;
0995 info->wait_ring_available = 0;
0996 spin_unlock_irq(host->host_lock);
0997 wake_up(&info->wq_sync);
0998 err = wait_event_interruptible(info->wq_pause,
0999 !info->waiting_pause);
1000 spin_lock_irq(host->host_lock);
1001 }
1002 spin_unlock_irq(host->host_lock);
1003 return err;
1004 }
1005
1006 static int scsifront_remove(struct xenbus_device *dev)
1007 {
1008 struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
1009
1010 pr_debug("%s: %s removed\n", __func__, dev->nodename);
1011
1012 mutex_lock(&scsifront_mutex);
1013 if (info->host_active != STATE_INACTIVE) {
1014
1015 scsi_remove_host(info->host);
1016 info->host_active = STATE_INACTIVE;
1017 }
1018 mutex_unlock(&scsifront_mutex);
1019
1020 scsifront_free_ring(info);
1021 scsi_host_put(info->host);
1022
1023 return 0;
1024 }
1025
1026 static void scsifront_disconnect(struct vscsifrnt_info *info)
1027 {
1028 struct xenbus_device *dev = info->dev;
1029 struct Scsi_Host *host = info->host;
1030
1031 pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
1032
1033
1034
1035
1036
1037
1038
1039 mutex_lock(&scsifront_mutex);
1040 if (info->host_active != STATE_INACTIVE) {
1041 scsi_remove_host(host);
1042 info->host_active = STATE_INACTIVE;
1043 }
1044 mutex_unlock(&scsifront_mutex);
1045
1046 xenbus_frontend_closed(dev);
1047 }
1048
1049 static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
1050 {
1051 struct xenbus_device *dev = info->dev;
1052 int i, err = 0;
1053 char str[64];
1054 char **dir;
1055 unsigned int dir_n = 0;
1056 unsigned int device_state;
1057 unsigned int hst, chn, tgt, lun;
1058 struct scsi_device *sdev;
1059
1060 if (info->host_active == STATE_ERROR)
1061 return;
1062
1063 dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
1064 if (IS_ERR(dir))
1065 return;
1066
1067
1068 BUG_ON(info->curr);
1069 info->curr = current;
1070
1071 for (i = 0; i < dir_n; i++) {
1072
1073 snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
1074 err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
1075 &device_state);
1076 if (XENBUS_EXIST_ERR(err))
1077 continue;
1078
1079
1080 snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
1081 err = xenbus_scanf(XBT_NIL, dev->otherend, str,
1082 "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
1083 if (XENBUS_EXIST_ERR(err))
1084 continue;
1085
1086
1087
1088
1089
1090
1091 snprintf(info->dev_state_path, sizeof(info->dev_state_path),
1092 "vscsi-devs/%s/state", dir[i]);
1093
1094 switch (op) {
1095 case VSCSIFRONT_OP_ADD_LUN:
1096 if (device_state != XenbusStateInitialised)
1097 break;
1098
1099 if (scsi_add_device(info->host, chn, tgt, lun)) {
1100 dev_err(&dev->dev, "scsi_add_device\n");
1101 err = xenbus_printf(XBT_NIL, dev->nodename,
1102 info->dev_state_path,
1103 "%d", XenbusStateClosed);
1104 if (err)
1105 xenbus_dev_error(dev, err,
1106 "%s: writing dev_state_path", __func__);
1107 }
1108 break;
1109 case VSCSIFRONT_OP_DEL_LUN:
1110 if (device_state != XenbusStateClosing)
1111 break;
1112
1113 sdev = scsi_device_lookup(info->host, chn, tgt, lun);
1114 if (sdev) {
1115 scsi_remove_device(sdev);
1116 scsi_device_put(sdev);
1117 }
1118 break;
1119 case VSCSIFRONT_OP_READD_LUN:
1120 if (device_state == XenbusStateConnected) {
1121 err = xenbus_printf(XBT_NIL, dev->nodename,
1122 info->dev_state_path,
1123 "%d", XenbusStateConnected);
1124 if (err)
1125 xenbus_dev_error(dev, err,
1126 "%s: writing dev_state_path", __func__);
1127 }
1128 break;
1129 default:
1130 break;
1131 }
1132 }
1133
1134 info->curr = NULL;
1135
1136 kfree(dir);
1137 }
1138
1139 static void scsifront_read_backend_params(struct xenbus_device *dev,
1140 struct vscsifrnt_info *info)
1141 {
1142 unsigned int sg_grant, nr_segs;
1143 struct Scsi_Host *host = info->host;
1144
1145 sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
1146 nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
1147 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
1148 nr_segs = min_t(unsigned int, nr_segs,
1149 VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
1150 sizeof(struct scsiif_request_segment));
1151
1152 if (!info->pause && sg_grant)
1153 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs);
1154 else if (info->pause && nr_segs < host->sg_tablesize)
1155 dev_warn(&dev->dev,
1156 "SG entries decreased from %d to %u - device may not work properly anymore\n",
1157 host->sg_tablesize, nr_segs);
1158
1159 host->sg_tablesize = nr_segs;
1160 host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
1161 }
1162
1163 static void scsifront_backend_changed(struct xenbus_device *dev,
1164 enum xenbus_state backend_state)
1165 {
1166 struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
1167
1168 pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
1169
1170 switch (backend_state) {
1171 case XenbusStateUnknown:
1172 case XenbusStateInitialising:
1173 case XenbusStateInitWait:
1174 case XenbusStateInitialised:
1175 break;
1176
1177 case XenbusStateConnected:
1178 scsifront_read_backend_params(dev, info);
1179
1180 if (info->pause) {
1181 scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN);
1182 xenbus_switch_state(dev, XenbusStateConnected);
1183 info->pause = 0;
1184 return;
1185 }
1186
1187 if (xenbus_read_driver_state(dev->nodename) ==
1188 XenbusStateInitialised)
1189 scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
1190
1191 if (dev->state != XenbusStateConnected)
1192 xenbus_switch_state(dev, XenbusStateConnected);
1193 break;
1194
1195 case XenbusStateClosed:
1196 if (dev->state == XenbusStateClosed)
1197 break;
1198 fallthrough;
1199 case XenbusStateClosing:
1200 scsifront_disconnect(info);
1201 break;
1202
1203 case XenbusStateReconfiguring:
1204 scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
1205 xenbus_switch_state(dev, XenbusStateReconfiguring);
1206 break;
1207
1208 case XenbusStateReconfigured:
1209 scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
1210 xenbus_switch_state(dev, XenbusStateConnected);
1211 break;
1212 }
1213 }
1214
1215 static const struct xenbus_device_id scsifront_ids[] = {
1216 { "vscsi" },
1217 { "" }
1218 };
1219
1220 static struct xenbus_driver scsifront_driver = {
1221 .ids = scsifront_ids,
1222 .probe = scsifront_probe,
1223 .remove = scsifront_remove,
1224 .resume = scsifront_resume,
1225 .suspend = scsifront_suspend,
1226 .otherend_changed = scsifront_backend_changed,
1227 };
1228
1229 static int __init scsifront_init(void)
1230 {
1231 if (!xen_domain())
1232 return -ENODEV;
1233
1234 return xenbus_register_frontend(&scsifront_driver);
1235 }
1236 module_init(scsifront_init);
1237
1238 static void __exit scsifront_exit(void)
1239 {
1240 xenbus_unregister_driver(&scsifront_driver);
1241 }
1242 module_exit(scsifront_exit);
1243
1244 MODULE_DESCRIPTION("Xen SCSI frontend driver");
1245 MODULE_LICENSE("GPL");
1246 MODULE_ALIAS("xen:vscsi");
1247 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");