0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #define pr_fmt(fmt) "xen-pvscsi: " fmt
0035
0036 #include <linux/module.h>
0037 #include <linux/utsname.h>
0038 #include <linux/interrupt.h>
0039 #include <linux/slab.h>
0040 #include <linux/wait.h>
0041 #include <linux/sched.h>
0042 #include <linux/list.h>
0043 #include <linux/gfp.h>
0044 #include <linux/delay.h>
0045 #include <linux/spinlock.h>
0046 #include <linux/configfs.h>
0047
0048 #include <generated/utsrelease.h>
0049
0050 #include <scsi/scsi_host.h> /* SG_ALL */
0051
0052 #include <target/target_core_base.h>
0053 #include <target/target_core_fabric.h>
0054
0055 #include <asm/hypervisor.h>
0056
0057 #include <xen/xen.h>
0058 #include <xen/balloon.h>
0059 #include <xen/events.h>
0060 #include <xen/xenbus.h>
0061 #include <xen/grant_table.h>
0062 #include <xen/page.h>
0063
0064 #include <xen/interface/grant_table.h>
0065 #include <xen/interface/io/vscsiif.h>
0066
0067 #define VSCSI_VERSION "v0.1"
0068 #define VSCSI_NAMELEN 32
0069
0070 struct ids_tuple {
0071 unsigned int hst;
0072 unsigned int chn;
0073 unsigned int tgt;
0074 unsigned int lun;
0075 };
0076
0077 struct v2p_entry {
0078 struct ids_tuple v;
0079 struct scsiback_tpg *tpg;
0080 unsigned int lun;
0081 struct kref kref;
0082 struct list_head l;
0083 };
0084
0085 struct vscsibk_info {
0086 struct xenbus_device *dev;
0087
0088 domid_t domid;
0089 unsigned int irq;
0090
0091 struct vscsiif_back_ring ring;
0092
0093 spinlock_t ring_lock;
0094 atomic_t nr_unreplied_reqs;
0095
0096 spinlock_t v2p_lock;
0097 struct list_head v2p_entry_lists;
0098
0099 wait_queue_head_t waiting_to_free;
0100
0101 struct gnttab_page_cache free_pages;
0102 };
0103
0104
0105 #define VSCSI_MAX_GRANTS (SG_ALL + VSCSIIF_SG_TABLESIZE)
0106
0107
0108
0109
0110
0111
0112 #define VSCSI_GRANT_BATCH 16
0113
0114 struct vscsibk_pend {
0115 uint16_t rqid;
0116
0117 uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
0118 uint8_t cmd_len;
0119
0120 uint8_t sc_data_direction;
0121 uint16_t n_sg;
0122 uint16_t n_grants;
0123 uint32_t data_len;
0124 uint32_t result;
0125
0126 struct vscsibk_info *info;
0127 struct v2p_entry *v2p;
0128 struct scatterlist *sgl;
0129
0130 uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
0131
0132 grant_handle_t grant_handles[VSCSI_MAX_GRANTS];
0133 struct page *pages[VSCSI_MAX_GRANTS];
0134
0135 struct se_cmd se_cmd;
0136
0137 struct completion tmr_done;
0138 };
0139
0140 #define VSCSI_DEFAULT_SESSION_TAGS 128
0141
0142 struct scsiback_nexus {
0143
0144 struct se_session *tvn_se_sess;
0145 };
0146
0147 struct scsiback_tport {
0148
0149 u8 tport_proto_id;
0150
0151 u64 tport_wwpn;
0152
0153 char tport_name[VSCSI_NAMELEN];
0154
0155 struct se_wwn tport_wwn;
0156 };
0157
0158 struct scsiback_tpg {
0159
0160 u16 tport_tpgt;
0161
0162 int tv_tpg_port_count;
0163
0164 int tv_tpg_fe_count;
0165
0166 struct list_head tv_tpg_list;
0167
0168 struct mutex tv_tpg_mutex;
0169
0170 struct scsiback_nexus *tpg_nexus;
0171
0172 struct scsiback_tport *tport;
0173
0174 struct se_portal_group se_tpg;
0175
0176 char param_alias[VSCSI_NAMELEN];
0177
0178 struct list_head info_list;
0179 };
0180
0181 #define SCSIBACK_INVALID_HANDLE (~0)
0182
0183 static bool log_print_stat;
0184 module_param(log_print_stat, bool, 0644);
0185
0186 static int scsiback_max_buffer_pages = 1024;
0187 module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
0188 MODULE_PARM_DESC(max_buffer_pages,
0189 "Maximum number of free pages to keep in backend buffer");
0190
0191
0192 static DEFINE_MUTEX(scsiback_mutex);
0193 static LIST_HEAD(scsiback_list);
0194
0195 static void scsiback_get(struct vscsibk_info *info)
0196 {
0197 atomic_inc(&info->nr_unreplied_reqs);
0198 }
0199
0200 static void scsiback_put(struct vscsibk_info *info)
0201 {
0202 if (atomic_dec_and_test(&info->nr_unreplied_reqs))
0203 wake_up(&info->waiting_to_free);
0204 }
0205
0206 static unsigned long vaddr_page(struct page *page)
0207 {
0208 unsigned long pfn = page_to_pfn(page);
0209
0210 return (unsigned long)pfn_to_kaddr(pfn);
0211 }
0212
0213 static unsigned long vaddr(struct vscsibk_pend *req, int seg)
0214 {
0215 return vaddr_page(req->pages[seg]);
0216 }
0217
0218 static void scsiback_print_status(char *sense_buffer, int errors,
0219 struct vscsibk_pend *pending_req)
0220 {
0221 struct scsiback_tpg *tpg = pending_req->v2p->tpg;
0222
0223 pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x\n",
0224 tpg->tport->tport_name, pending_req->v2p->lun,
0225 pending_req->cmnd[0], errors & 0xff, COMMAND_COMPLETE,
0226 host_byte(errors));
0227 }
0228
0229 static void scsiback_fast_flush_area(struct vscsibk_pend *req)
0230 {
0231 struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH];
0232 struct page *pages[VSCSI_GRANT_BATCH];
0233 unsigned int i, invcount = 0;
0234 grant_handle_t handle;
0235 int err;
0236
0237 kfree(req->sgl);
0238 req->sgl = NULL;
0239 req->n_sg = 0;
0240
0241 if (!req->n_grants)
0242 return;
0243
0244 for (i = 0; i < req->n_grants; i++) {
0245 handle = req->grant_handles[i];
0246 if (handle == SCSIBACK_INVALID_HANDLE)
0247 continue;
0248 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
0249 GNTMAP_host_map, handle);
0250 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
0251 pages[invcount] = req->pages[i];
0252 put_page(pages[invcount]);
0253 invcount++;
0254 if (invcount < VSCSI_GRANT_BATCH)
0255 continue;
0256 err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
0257 BUG_ON(err);
0258 invcount = 0;
0259 }
0260
0261 if (invcount) {
0262 err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
0263 BUG_ON(err);
0264 }
0265
0266 gnttab_page_cache_put(&req->info->free_pages, req->pages,
0267 req->n_grants);
0268 req->n_grants = 0;
0269 }
0270
0271 static void scsiback_free_translation_entry(struct kref *kref)
0272 {
0273 struct v2p_entry *entry = container_of(kref, struct v2p_entry, kref);
0274 struct scsiback_tpg *tpg = entry->tpg;
0275
0276 mutex_lock(&tpg->tv_tpg_mutex);
0277 tpg->tv_tpg_fe_count--;
0278 mutex_unlock(&tpg->tv_tpg_mutex);
0279
0280 kfree(entry);
0281 }
0282
0283 static int32_t scsiback_result(int32_t result)
0284 {
0285 int32_t host_status;
0286
0287 switch (XEN_VSCSIIF_RSLT_HOST(result)) {
0288 case DID_OK:
0289 host_status = XEN_VSCSIIF_RSLT_HOST_OK;
0290 break;
0291 case DID_NO_CONNECT:
0292 host_status = XEN_VSCSIIF_RSLT_HOST_NO_CONNECT;
0293 break;
0294 case DID_BUS_BUSY:
0295 host_status = XEN_VSCSIIF_RSLT_HOST_BUS_BUSY;
0296 break;
0297 case DID_TIME_OUT:
0298 host_status = XEN_VSCSIIF_RSLT_HOST_TIME_OUT;
0299 break;
0300 case DID_BAD_TARGET:
0301 host_status = XEN_VSCSIIF_RSLT_HOST_BAD_TARGET;
0302 break;
0303 case DID_ABORT:
0304 host_status = XEN_VSCSIIF_RSLT_HOST_ABORT;
0305 break;
0306 case DID_PARITY:
0307 host_status = XEN_VSCSIIF_RSLT_HOST_PARITY;
0308 break;
0309 case DID_ERROR:
0310 host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
0311 break;
0312 case DID_RESET:
0313 host_status = XEN_VSCSIIF_RSLT_HOST_RESET;
0314 break;
0315 case DID_BAD_INTR:
0316 host_status = XEN_VSCSIIF_RSLT_HOST_BAD_INTR;
0317 break;
0318 case DID_PASSTHROUGH:
0319 host_status = XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH;
0320 break;
0321 case DID_SOFT_ERROR:
0322 host_status = XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR;
0323 break;
0324 case DID_IMM_RETRY:
0325 host_status = XEN_VSCSIIF_RSLT_HOST_IMM_RETRY;
0326 break;
0327 case DID_REQUEUE:
0328 host_status = XEN_VSCSIIF_RSLT_HOST_REQUEUE;
0329 break;
0330 case DID_TRANSPORT_DISRUPTED:
0331 host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED;
0332 break;
0333 case DID_TRANSPORT_FAILFAST:
0334 host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST;
0335 break;
0336 case DID_TARGET_FAILURE:
0337 host_status = XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE;
0338 break;
0339 case DID_NEXUS_FAILURE:
0340 host_status = XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE;
0341 break;
0342 case DID_ALLOC_FAILURE:
0343 host_status = XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE;
0344 break;
0345 case DID_MEDIUM_ERROR:
0346 host_status = XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR;
0347 break;
0348 case DID_TRANSPORT_MARGINAL:
0349 host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL;
0350 break;
0351 default:
0352 host_status = XEN_VSCSIIF_RSLT_HOST_ERROR;
0353 break;
0354 }
0355
0356 return (host_status << 16) | (result & 0x00ffff);
0357 }
0358
0359 static void scsiback_send_response(struct vscsibk_info *info,
0360 char *sense_buffer, int32_t result, uint32_t resid,
0361 uint16_t rqid)
0362 {
0363 struct vscsiif_response *ring_res;
0364 int notify;
0365 struct scsi_sense_hdr sshdr;
0366 unsigned long flags;
0367 unsigned len;
0368
0369 spin_lock_irqsave(&info->ring_lock, flags);
0370
0371 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
0372 info->ring.rsp_prod_pvt++;
0373
0374 ring_res->rslt = scsiback_result(result);
0375 ring_res->rqid = rqid;
0376
0377 if (sense_buffer != NULL &&
0378 scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
0379 &sshdr)) {
0380 len = min_t(unsigned, 8 + sense_buffer[7],
0381 VSCSIIF_SENSE_BUFFERSIZE);
0382 memcpy(ring_res->sense_buffer, sense_buffer, len);
0383 ring_res->sense_len = len;
0384 } else {
0385 ring_res->sense_len = 0;
0386 }
0387
0388 ring_res->residual_len = resid;
0389
0390 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
0391 spin_unlock_irqrestore(&info->ring_lock, flags);
0392
0393 if (notify)
0394 notify_remote_via_irq(info->irq);
0395 }
0396
0397 static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
0398 uint32_t resid, struct vscsibk_pend *pending_req)
0399 {
0400 scsiback_send_response(pending_req->info, sense_buffer, result,
0401 resid, pending_req->rqid);
0402
0403 if (pending_req->v2p)
0404 kref_put(&pending_req->v2p->kref,
0405 scsiback_free_translation_entry);
0406 }
0407
0408 static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
0409 {
0410 struct vscsibk_info *info = pending_req->info;
0411 unsigned char *sense_buffer;
0412 unsigned int resid;
0413 int errors;
0414
0415 sense_buffer = pending_req->sense_buffer;
0416 resid = pending_req->se_cmd.residual_count;
0417 errors = pending_req->result;
0418
0419 if (errors && log_print_stat)
0420 scsiback_print_status(sense_buffer, errors, pending_req);
0421
0422 scsiback_fast_flush_area(pending_req);
0423 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
0424 scsiback_put(info);
0425
0426
0427
0428
0429
0430 target_put_sess_cmd(&pending_req->se_cmd);
0431 }
0432
0433 static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
0434 {
0435 struct se_cmd *se_cmd = &pending_req->se_cmd;
0436 struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
0437
0438 scsiback_get(pending_req->info);
0439 se_cmd->tag = pending_req->rqid;
0440 target_init_cmd(se_cmd, sess, pending_req->sense_buffer,
0441 pending_req->v2p->lun, pending_req->data_len, 0,
0442 pending_req->sc_data_direction, TARGET_SCF_ACK_KREF);
0443
0444 if (target_submit_prep(se_cmd, pending_req->cmnd, pending_req->sgl,
0445 pending_req->n_sg, NULL, 0, NULL, 0, GFP_KERNEL))
0446 return;
0447
0448 target_submit(se_cmd);
0449 }
0450
0451 static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
0452 struct page **pg, grant_handle_t *grant, int cnt)
0453 {
0454 int err, i;
0455
0456 if (!cnt)
0457 return 0;
0458
0459 err = gnttab_map_refs(map, NULL, pg, cnt);
0460 for (i = 0; i < cnt; i++) {
0461 if (unlikely(map[i].status != GNTST_okay)) {
0462 pr_err("invalid buffer -- could not remap it\n");
0463 map[i].handle = SCSIBACK_INVALID_HANDLE;
0464 if (!err)
0465 err = -ENOMEM;
0466 } else {
0467 get_page(pg[i]);
0468 }
0469 grant[i] = map[i].handle;
0470 }
0471 return err;
0472 }
0473
0474 static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
0475 struct scsiif_request_segment *seg, struct page **pg,
0476 grant_handle_t *grant, int cnt, u32 flags)
0477 {
0478 int mapcount = 0, i, err = 0;
0479 struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH];
0480 struct vscsibk_info *info = pending_req->info;
0481
0482 for (i = 0; i < cnt; i++) {
0483 if (gnttab_page_cache_get(&info->free_pages, pg + mapcount)) {
0484 gnttab_page_cache_put(&info->free_pages, pg, mapcount);
0485 pr_err("no grant page\n");
0486 return -ENOMEM;
0487 }
0488 gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
0489 flags, seg[i].gref, info->domid);
0490 mapcount++;
0491 if (mapcount < VSCSI_GRANT_BATCH)
0492 continue;
0493 err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
0494 pg += mapcount;
0495 grant += mapcount;
0496 pending_req->n_grants += mapcount;
0497 if (err)
0498 return err;
0499 mapcount = 0;
0500 }
0501 err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
0502 pending_req->n_grants += mapcount;
0503 return err;
0504 }
0505
0506 static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
0507 struct vscsibk_pend *pending_req)
0508 {
0509 u32 flags;
0510 int i, err, n_segs, i_seg = 0;
0511 struct page **pg;
0512 struct scsiif_request_segment *seg;
0513 unsigned long end_seg = 0;
0514 unsigned int nr_segments = (unsigned int)ring_req->nr_segments;
0515 unsigned int nr_sgl = 0;
0516 struct scatterlist *sg;
0517 grant_handle_t *grant;
0518
0519 pending_req->n_sg = 0;
0520 pending_req->n_grants = 0;
0521 pending_req->data_len = 0;
0522
0523 nr_segments &= ~VSCSIIF_SG_GRANT;
0524 if (!nr_segments)
0525 return 0;
0526
0527 if (nr_segments > VSCSIIF_SG_TABLESIZE) {
0528 pr_debug("invalid parameter nr_seg = %d\n",
0529 ring_req->nr_segments);
0530 return -EINVAL;
0531 }
0532
0533 if (ring_req->nr_segments & VSCSIIF_SG_GRANT) {
0534 err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg,
0535 pending_req->pages, pending_req->grant_handles,
0536 nr_segments, GNTMAP_host_map | GNTMAP_readonly);
0537 if (err)
0538 return err;
0539 nr_sgl = nr_segments;
0540 nr_segments = 0;
0541 for (i = 0; i < nr_sgl; i++) {
0542 n_segs = ring_req->seg[i].length /
0543 sizeof(struct scsiif_request_segment);
0544 if ((unsigned)ring_req->seg[i].offset +
0545 (unsigned)ring_req->seg[i].length > PAGE_SIZE ||
0546 n_segs * sizeof(struct scsiif_request_segment) !=
0547 ring_req->seg[i].length)
0548 return -EINVAL;
0549 nr_segments += n_segs;
0550 }
0551 if (nr_segments > SG_ALL) {
0552 pr_debug("invalid nr_seg = %d\n", nr_segments);
0553 return -EINVAL;
0554 }
0555 }
0556
0557
0558 pending_req->sgl = kmalloc_array(nr_segments,
0559 sizeof(struct scatterlist), GFP_KERNEL);
0560 if (!pending_req->sgl)
0561 return -ENOMEM;
0562
0563 sg_init_table(pending_req->sgl, nr_segments);
0564 pending_req->n_sg = nr_segments;
0565
0566 flags = GNTMAP_host_map;
0567 if (pending_req->sc_data_direction == DMA_TO_DEVICE)
0568 flags |= GNTMAP_readonly;
0569
0570 pg = pending_req->pages + nr_sgl;
0571 grant = pending_req->grant_handles + nr_sgl;
0572 if (!nr_sgl) {
0573 seg = ring_req->seg;
0574 err = scsiback_gnttab_data_map_list(pending_req, seg,
0575 pg, grant, nr_segments, flags);
0576 if (err)
0577 return err;
0578 } else {
0579 for (i = 0; i < nr_sgl; i++) {
0580 seg = (struct scsiif_request_segment *)(
0581 vaddr(pending_req, i) + ring_req->seg[i].offset);
0582 n_segs = ring_req->seg[i].length /
0583 sizeof(struct scsiif_request_segment);
0584 err = scsiback_gnttab_data_map_list(pending_req, seg,
0585 pg, grant, n_segs, flags);
0586 if (err)
0587 return err;
0588 pg += n_segs;
0589 grant += n_segs;
0590 }
0591 end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset;
0592 seg = (struct scsiif_request_segment *)end_seg;
0593 end_seg += ring_req->seg[0].length;
0594 pg = pending_req->pages + nr_sgl;
0595 }
0596
0597 for_each_sg(pending_req->sgl, sg, nr_segments, i) {
0598 sg_set_page(sg, pg[i], seg->length, seg->offset);
0599 pending_req->data_len += seg->length;
0600 seg++;
0601 if (nr_sgl && (unsigned long)seg >= end_seg) {
0602 i_seg++;
0603 end_seg = vaddr(pending_req, i_seg) +
0604 ring_req->seg[i_seg].offset;
0605 seg = (struct scsiif_request_segment *)end_seg;
0606 end_seg += ring_req->seg[i_seg].length;
0607 }
0608 if (sg->offset >= PAGE_SIZE ||
0609 sg->length > PAGE_SIZE ||
0610 sg->offset + sg->length > PAGE_SIZE)
0611 return -EINVAL;
0612 }
0613
0614 return 0;
0615 }
0616
0617 static void scsiback_disconnect(struct vscsibk_info *info)
0618 {
0619 wait_event(info->waiting_to_free,
0620 atomic_read(&info->nr_unreplied_reqs) == 0);
0621
0622 unbind_from_irqhandler(info->irq, info);
0623 info->irq = 0;
0624 xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
0625 }
0626
0627 static void scsiback_device_action(struct vscsibk_pend *pending_req,
0628 enum tcm_tmreq_table act, int tag)
0629 {
0630 struct scsiback_tpg *tpg = pending_req->v2p->tpg;
0631 struct scsiback_nexus *nexus = tpg->tpg_nexus;
0632 struct se_cmd *se_cmd = &pending_req->se_cmd;
0633 u64 unpacked_lun = pending_req->v2p->lun;
0634 int rc, err = XEN_VSCSIIF_RSLT_RESET_FAILED;
0635
0636 init_completion(&pending_req->tmr_done);
0637
0638 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
0639 &pending_req->sense_buffer[0],
0640 unpacked_lun, NULL, act, GFP_KERNEL,
0641 tag, TARGET_SCF_ACK_KREF);
0642 if (rc)
0643 goto err;
0644
0645 wait_for_completion(&pending_req->tmr_done);
0646
0647 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
0648 XEN_VSCSIIF_RSLT_RESET_SUCCESS : XEN_VSCSIIF_RSLT_RESET_FAILED;
0649
0650 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
0651 transport_generic_free_cmd(&pending_req->se_cmd, 0);
0652 return;
0653
0654 err:
0655 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
0656 }
0657
0658
0659
0660
0661 static struct v2p_entry *scsiback_do_translation(struct vscsibk_info *info,
0662 struct ids_tuple *v)
0663 {
0664 struct v2p_entry *entry;
0665 struct list_head *head = &(info->v2p_entry_lists);
0666 unsigned long flags;
0667
0668 spin_lock_irqsave(&info->v2p_lock, flags);
0669 list_for_each_entry(entry, head, l) {
0670 if ((entry->v.chn == v->chn) &&
0671 (entry->v.tgt == v->tgt) &&
0672 (entry->v.lun == v->lun)) {
0673 kref_get(&entry->kref);
0674 goto out;
0675 }
0676 }
0677 entry = NULL;
0678
0679 out:
0680 spin_unlock_irqrestore(&info->v2p_lock, flags);
0681 return entry;
0682 }
0683
0684 static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
0685 struct v2p_entry *v2p)
0686 {
0687 struct scsiback_tpg *tpg = v2p->tpg;
0688 struct scsiback_nexus *nexus = tpg->tpg_nexus;
0689 struct se_session *se_sess = nexus->tvn_se_sess;
0690 struct vscsibk_pend *req;
0691 int tag, cpu, i;
0692
0693 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
0694 if (tag < 0) {
0695 pr_err("Unable to obtain tag for vscsiif_request\n");
0696 return ERR_PTR(-ENOMEM);
0697 }
0698
0699 req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
0700 memset(req, 0, sizeof(*req));
0701 req->se_cmd.map_tag = tag;
0702 req->se_cmd.map_cpu = cpu;
0703
0704 for (i = 0; i < VSCSI_MAX_GRANTS; i++)
0705 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
0706
0707 return req;
0708 }
0709
0710 static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
0711 struct vscsiif_back_ring *ring,
0712 struct vscsiif_request *ring_req)
0713 {
0714 struct vscsibk_pend *pending_req;
0715 struct v2p_entry *v2p;
0716 struct ids_tuple vir;
0717
0718
0719 if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
0720 (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
0721 (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
0722 (ring_req->sc_data_direction != DMA_NONE)) {
0723 pr_debug("invalid parameter data_dir = %d\n",
0724 ring_req->sc_data_direction);
0725 return ERR_PTR(-EINVAL);
0726 }
0727 if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
0728 pr_debug("invalid parameter cmd_len = %d\n",
0729 ring_req->cmd_len);
0730 return ERR_PTR(-EINVAL);
0731 }
0732
0733 vir.chn = ring_req->channel;
0734 vir.tgt = ring_req->id;
0735 vir.lun = ring_req->lun;
0736
0737 v2p = scsiback_do_translation(info, &vir);
0738 if (!v2p) {
0739 pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
0740 vir.chn, vir.tgt, vir.lun);
0741 return ERR_PTR(-ENODEV);
0742 }
0743
0744 pending_req = scsiback_get_pend_req(ring, v2p);
0745 if (IS_ERR(pending_req)) {
0746 kref_put(&v2p->kref, scsiback_free_translation_entry);
0747 return ERR_PTR(-ENOMEM);
0748 }
0749 pending_req->rqid = ring_req->rqid;
0750 pending_req->info = info;
0751 pending_req->v2p = v2p;
0752 pending_req->sc_data_direction = ring_req->sc_data_direction;
0753 pending_req->cmd_len = ring_req->cmd_len;
0754 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
0755
0756 return pending_req;
0757 }
0758
0759 static int scsiback_do_cmd_fn(struct vscsibk_info *info,
0760 unsigned int *eoi_flags)
0761 {
0762 struct vscsiif_back_ring *ring = &info->ring;
0763 struct vscsiif_request ring_req;
0764 struct vscsibk_pend *pending_req;
0765 RING_IDX rc, rp;
0766 int more_to_do;
0767 uint32_t result;
0768
0769 rc = ring->req_cons;
0770 rp = ring->sring->req_prod;
0771 rmb();
0772
0773 if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
0774 rc = ring->rsp_prod_pvt;
0775 pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
0776 info->domid, rp, rc, rp - rc);
0777 return -EINVAL;
0778 }
0779
0780 while ((rc != rp)) {
0781 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
0782
0783 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
0784 break;
0785
0786 RING_COPY_REQUEST(ring, rc, &ring_req);
0787 ring->req_cons = ++rc;
0788
0789 pending_req = prepare_pending_reqs(info, ring, &ring_req);
0790 if (IS_ERR(pending_req)) {
0791 switch (PTR_ERR(pending_req)) {
0792 case -ENODEV:
0793 result = DID_NO_CONNECT;
0794 break;
0795 default:
0796 result = DID_ERROR;
0797 break;
0798 }
0799 scsiback_send_response(info, NULL, result << 16, 0,
0800 ring_req.rqid);
0801 return 1;
0802 }
0803
0804 switch (ring_req.act) {
0805 case VSCSIIF_ACT_SCSI_CDB:
0806 if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
0807 scsiback_fast_flush_area(pending_req);
0808 scsiback_do_resp_with_sense(NULL,
0809 DID_ERROR << 16, 0, pending_req);
0810 transport_generic_free_cmd(&pending_req->se_cmd, 0);
0811 } else {
0812 scsiback_cmd_exec(pending_req);
0813 }
0814 break;
0815 case VSCSIIF_ACT_SCSI_ABORT:
0816 scsiback_device_action(pending_req, TMR_ABORT_TASK,
0817 ring_req.ref_rqid);
0818 break;
0819 case VSCSIIF_ACT_SCSI_RESET:
0820 scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
0821 break;
0822 default:
0823 pr_err_ratelimited("invalid request\n");
0824 scsiback_do_resp_with_sense(NULL, DID_ERROR << 16, 0,
0825 pending_req);
0826 transport_generic_free_cmd(&pending_req->se_cmd, 0);
0827 break;
0828 }
0829
0830
0831 cond_resched();
0832 }
0833
0834 gnttab_page_cache_shrink(&info->free_pages, scsiback_max_buffer_pages);
0835
0836 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
0837 return more_to_do;
0838 }
0839
0840 static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
0841 {
0842 struct vscsibk_info *info = dev_id;
0843 int rc;
0844 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
0845
0846 while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
0847 cond_resched();
0848
0849
0850 if (!rc)
0851 xen_irq_lateeoi(irq, eoi_flags);
0852
0853 return IRQ_HANDLED;
0854 }
0855
0856 static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
0857 evtchn_port_t evtchn)
0858 {
0859 void *area;
0860 struct vscsiif_sring *sring;
0861 int err;
0862
0863 if (info->irq)
0864 return -1;
0865
0866 err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area);
0867 if (err)
0868 return err;
0869
0870 sring = (struct vscsiif_sring *)area;
0871 BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
0872
0873 err = bind_interdomain_evtchn_to_irq_lateeoi(info->dev, evtchn);
0874 if (err < 0)
0875 goto unmap_page;
0876
0877 info->irq = err;
0878
0879 err = request_threaded_irq(info->irq, NULL, scsiback_irq_fn,
0880 IRQF_ONESHOT, "vscsiif-backend", info);
0881 if (err)
0882 goto free_irq;
0883
0884 return 0;
0885
0886 free_irq:
0887 unbind_from_irqhandler(info->irq, info);
0888 info->irq = 0;
0889 unmap_page:
0890 xenbus_unmap_ring_vfree(info->dev, area);
0891
0892 return err;
0893 }
0894
0895 static int scsiback_map(struct vscsibk_info *info)
0896 {
0897 struct xenbus_device *dev = info->dev;
0898 unsigned int ring_ref;
0899 evtchn_port_t evtchn;
0900 int err;
0901
0902 err = xenbus_gather(XBT_NIL, dev->otherend,
0903 "ring-ref", "%u", &ring_ref,
0904 "event-channel", "%u", &evtchn, NULL);
0905 if (err) {
0906 xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
0907 return err;
0908 }
0909
0910 return scsiback_init_sring(info, ring_ref, evtchn);
0911 }
0912
0913
0914
0915
0916 static struct v2p_entry *scsiback_chk_translation_entry(
0917 struct vscsibk_info *info, struct ids_tuple *v)
0918 {
0919 struct list_head *head = &(info->v2p_entry_lists);
0920 struct v2p_entry *entry;
0921
0922 list_for_each_entry(entry, head, l)
0923 if ((entry->v.chn == v->chn) &&
0924 (entry->v.tgt == v->tgt) &&
0925 (entry->v.lun == v->lun))
0926 return entry;
0927
0928 return NULL;
0929 }
0930
0931
0932
0933
0934 static int scsiback_add_translation_entry(struct vscsibk_info *info,
0935 char *phy, struct ids_tuple *v)
0936 {
0937 int err = 0;
0938 struct v2p_entry *new;
0939 unsigned long flags;
0940 char *lunp;
0941 unsigned long long unpacked_lun;
0942 struct se_lun *se_lun;
0943 struct scsiback_tpg *tpg_entry, *tpg = NULL;
0944 char *error = "doesn't exist";
0945
0946 lunp = strrchr(phy, ':');
0947 if (!lunp) {
0948 pr_err("illegal format of physical device %s\n", phy);
0949 return -EINVAL;
0950 }
0951 *lunp = 0;
0952 lunp++;
0953 err = kstrtoull(lunp, 10, &unpacked_lun);
0954 if (err < 0) {
0955 pr_err("lun number not valid: %s\n", lunp);
0956 return err;
0957 }
0958
0959 mutex_lock(&scsiback_mutex);
0960 list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) {
0961 if (!strcmp(phy, tpg_entry->tport->tport_name) ||
0962 !strcmp(phy, tpg_entry->param_alias)) {
0963 mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex);
0964 hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) {
0965 if (se_lun->unpacked_lun == unpacked_lun) {
0966 if (!tpg_entry->tpg_nexus)
0967 error = "nexus undefined";
0968 else
0969 tpg = tpg_entry;
0970 break;
0971 }
0972 }
0973 mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex);
0974 break;
0975 }
0976 }
0977 if (tpg) {
0978 mutex_lock(&tpg->tv_tpg_mutex);
0979 tpg->tv_tpg_fe_count++;
0980 mutex_unlock(&tpg->tv_tpg_mutex);
0981 }
0982 mutex_unlock(&scsiback_mutex);
0983
0984 if (!tpg) {
0985 pr_err("%s:%llu %s\n", phy, unpacked_lun, error);
0986 return -ENODEV;
0987 }
0988
0989 new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL);
0990 if (new == NULL) {
0991 err = -ENOMEM;
0992 goto out_free;
0993 }
0994
0995 spin_lock_irqsave(&info->v2p_lock, flags);
0996
0997
0998 if (scsiback_chk_translation_entry(info, v)) {
0999 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
1000 err = -EEXIST;
1001 goto out;
1002 }
1003
1004
1005 kref_init(&new->kref);
1006 new->v = *v;
1007 new->tpg = tpg;
1008 new->lun = unpacked_lun;
1009 list_add_tail(&new->l, &info->v2p_entry_lists);
1010
1011 out:
1012 spin_unlock_irqrestore(&info->v2p_lock, flags);
1013
1014 out_free:
1015 if (err) {
1016 mutex_lock(&tpg->tv_tpg_mutex);
1017 tpg->tv_tpg_fe_count--;
1018 mutex_unlock(&tpg->tv_tpg_mutex);
1019 kfree(new);
1020 }
1021
1022 return err;
1023 }
1024
1025 static void __scsiback_del_translation_entry(struct v2p_entry *entry)
1026 {
1027 list_del(&entry->l);
1028 kref_put(&entry->kref, scsiback_free_translation_entry);
1029 }
1030
1031
1032
1033
1034 static int scsiback_del_translation_entry(struct vscsibk_info *info,
1035 struct ids_tuple *v)
1036 {
1037 struct v2p_entry *entry;
1038 unsigned long flags;
1039 int ret = 0;
1040
1041 spin_lock_irqsave(&info->v2p_lock, flags);
1042
1043 entry = scsiback_chk_translation_entry(info, v);
1044 if (entry)
1045 __scsiback_del_translation_entry(entry);
1046 else
1047 ret = -ENOENT;
1048
1049 spin_unlock_irqrestore(&info->v2p_lock, flags);
1050 return ret;
1051 }
1052
1053 static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
1054 char *phy, struct ids_tuple *vir, int try)
1055 {
1056 struct v2p_entry *entry;
1057 unsigned long flags;
1058 int err;
1059
1060 if (try) {
1061 spin_lock_irqsave(&info->v2p_lock, flags);
1062 entry = scsiback_chk_translation_entry(info, vir);
1063 spin_unlock_irqrestore(&info->v2p_lock, flags);
1064 if (entry)
1065 return;
1066 }
1067 if (!scsiback_add_translation_entry(info, phy, vir)) {
1068 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
1069 "%d", XenbusStateInitialised)) {
1070 pr_err("xenbus_printf error %s\n", state);
1071 scsiback_del_translation_entry(info, vir);
1072 }
1073 } else if (!try) {
1074 err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
1075 "%d", XenbusStateClosed);
1076 if (err)
1077 xenbus_dev_error(info->dev, err,
1078 "%s: writing %s", __func__, state);
1079 }
1080 }
1081
1082 static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state,
1083 struct ids_tuple *vir)
1084 {
1085 if (!scsiback_del_translation_entry(info, vir)) {
1086 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
1087 "%d", XenbusStateClosed))
1088 pr_err("xenbus_printf error %s\n", state);
1089 }
1090 }
1091
1092 #define VSCSIBACK_OP_ADD_OR_DEL_LUN 1
1093 #define VSCSIBACK_OP_UPDATEDEV_STATE 2
1094
1095 static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
1096 char *ent)
1097 {
1098 int err;
1099 struct ids_tuple vir;
1100 char *val;
1101 int device_state;
1102 char phy[VSCSI_NAMELEN];
1103 char str[64];
1104 char state[64];
1105 struct xenbus_device *dev = info->dev;
1106
1107
1108 snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent);
1109 err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state);
1110 if (XENBUS_EXIST_ERR(err))
1111 return;
1112
1113
1114 snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
1115 val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
1116 if (IS_ERR(val)) {
1117 err = xenbus_printf(XBT_NIL, dev->nodename, state,
1118 "%d", XenbusStateClosed);
1119 if (err)
1120 xenbus_dev_error(info->dev, err,
1121 "%s: writing %s", __func__, state);
1122 return;
1123 }
1124 strscpy(phy, val, VSCSI_NAMELEN);
1125 kfree(val);
1126
1127
1128 snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent);
1129 err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
1130 &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
1131 if (XENBUS_EXIST_ERR(err)) {
1132 err = xenbus_printf(XBT_NIL, dev->nodename, state,
1133 "%d", XenbusStateClosed);
1134 if (err)
1135 xenbus_dev_error(info->dev, err,
1136 "%s: writing %s", __func__, state);
1137 return;
1138 }
1139
1140 switch (op) {
1141 case VSCSIBACK_OP_ADD_OR_DEL_LUN:
1142 switch (device_state) {
1143 case XenbusStateInitialising:
1144 scsiback_do_add_lun(info, state, phy, &vir, 0);
1145 break;
1146 case XenbusStateConnected:
1147 scsiback_do_add_lun(info, state, phy, &vir, 1);
1148 break;
1149 case XenbusStateClosing:
1150 scsiback_do_del_lun(info, state, &vir);
1151 break;
1152 default:
1153 break;
1154 }
1155 break;
1156
1157 case VSCSIBACK_OP_UPDATEDEV_STATE:
1158 if (device_state == XenbusStateInitialised) {
1159
1160 if (xenbus_printf(XBT_NIL, dev->nodename, state,
1161 "%d", XenbusStateConnected)) {
1162 pr_err("xenbus_printf error %s\n", str);
1163 scsiback_del_translation_entry(info, &vir);
1164 xenbus_printf(XBT_NIL, dev->nodename, state,
1165 "%d", XenbusStateClosed);
1166 }
1167 }
1168 break;
1169
1170 default:
1171 break;
1172 }
1173 }
1174
1175 static void scsiback_do_lun_hotplug(struct vscsibk_info *info, int op)
1176 {
1177 int i;
1178 char **dir;
1179 unsigned int ndir = 0;
1180
1181 dir = xenbus_directory(XBT_NIL, info->dev->nodename, "vscsi-devs",
1182 &ndir);
1183 if (IS_ERR(dir))
1184 return;
1185
1186 for (i = 0; i < ndir; i++)
1187 scsiback_do_1lun_hotplug(info, op, dir[i]);
1188
1189 kfree(dir);
1190 }
1191
1192 static void scsiback_frontend_changed(struct xenbus_device *dev,
1193 enum xenbus_state frontend_state)
1194 {
1195 struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
1196
1197 switch (frontend_state) {
1198 case XenbusStateInitialising:
1199 break;
1200
1201 case XenbusStateInitialised:
1202 if (scsiback_map(info))
1203 break;
1204
1205 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
1206 xenbus_switch_state(dev, XenbusStateConnected);
1207 break;
1208
1209 case XenbusStateConnected:
1210 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_UPDATEDEV_STATE);
1211
1212 if (dev->state == XenbusStateConnected)
1213 break;
1214
1215 xenbus_switch_state(dev, XenbusStateConnected);
1216 break;
1217
1218 case XenbusStateClosing:
1219 if (info->irq)
1220 scsiback_disconnect(info);
1221
1222 xenbus_switch_state(dev, XenbusStateClosing);
1223 break;
1224
1225 case XenbusStateClosed:
1226 xenbus_switch_state(dev, XenbusStateClosed);
1227 if (xenbus_dev_is_online(dev))
1228 break;
1229 fallthrough;
1230 case XenbusStateUnknown:
1231 device_unregister(&dev->dev);
1232 break;
1233
1234 case XenbusStateReconfiguring:
1235 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
1236 xenbus_switch_state(dev, XenbusStateReconfigured);
1237
1238 break;
1239
1240 default:
1241 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1242 frontend_state);
1243 break;
1244 }
1245 }
1246
1247
1248
1249
1250 static void scsiback_release_translation_entry(struct vscsibk_info *info)
1251 {
1252 struct v2p_entry *entry, *tmp;
1253 struct list_head *head = &(info->v2p_entry_lists);
1254 unsigned long flags;
1255
1256 spin_lock_irqsave(&info->v2p_lock, flags);
1257
1258 list_for_each_entry_safe(entry, tmp, head, l)
1259 __scsiback_del_translation_entry(entry);
1260
1261 spin_unlock_irqrestore(&info->v2p_lock, flags);
1262 }
1263
1264 static int scsiback_remove(struct xenbus_device *dev)
1265 {
1266 struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
1267
1268 if (info->irq)
1269 scsiback_disconnect(info);
1270
1271 scsiback_release_translation_entry(info);
1272
1273 gnttab_page_cache_shrink(&info->free_pages, 0);
1274
1275 dev_set_drvdata(&dev->dev, NULL);
1276
1277 return 0;
1278 }
1279
1280 static int scsiback_probe(struct xenbus_device *dev,
1281 const struct xenbus_device_id *id)
1282 {
1283 int err;
1284
1285 struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info),
1286 GFP_KERNEL);
1287
1288 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
1289
1290 if (!info) {
1291 xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure");
1292 return -ENOMEM;
1293 }
1294 info->dev = dev;
1295 dev_set_drvdata(&dev->dev, info);
1296
1297 info->domid = dev->otherend_id;
1298 spin_lock_init(&info->ring_lock);
1299 atomic_set(&info->nr_unreplied_reqs, 0);
1300 init_waitqueue_head(&info->waiting_to_free);
1301 info->dev = dev;
1302 info->irq = 0;
1303 INIT_LIST_HEAD(&info->v2p_entry_lists);
1304 spin_lock_init(&info->v2p_lock);
1305 gnttab_page_cache_init(&info->free_pages);
1306
1307 err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
1308 SG_ALL);
1309 if (err)
1310 xenbus_dev_error(dev, err, "writing feature-sg-grant");
1311
1312 err = xenbus_switch_state(dev, XenbusStateInitWait);
1313 if (err)
1314 goto fail;
1315
1316 return 0;
1317
1318 fail:
1319 pr_warn("%s failed\n", __func__);
1320 scsiback_remove(dev);
1321
1322 return err;
1323 }
1324
1325 static char *scsiback_dump_proto_id(struct scsiback_tport *tport)
1326 {
1327 switch (tport->tport_proto_id) {
1328 case SCSI_PROTOCOL_SAS:
1329 return "SAS";
1330 case SCSI_PROTOCOL_FCP:
1331 return "FCP";
1332 case SCSI_PROTOCOL_ISCSI:
1333 return "iSCSI";
1334 default:
1335 break;
1336 }
1337
1338 return "Unknown";
1339 }
1340
1341 static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg)
1342 {
1343 struct scsiback_tpg *tpg = container_of(se_tpg,
1344 struct scsiback_tpg, se_tpg);
1345 struct scsiback_tport *tport = tpg->tport;
1346
1347 return &tport->tport_name[0];
1348 }
1349
1350 static u16 scsiback_get_tag(struct se_portal_group *se_tpg)
1351 {
1352 struct scsiback_tpg *tpg = container_of(se_tpg,
1353 struct scsiback_tpg, se_tpg);
1354 return tpg->tport_tpgt;
1355 }
1356
1357 static struct se_wwn *
1358 scsiback_make_tport(struct target_fabric_configfs *tf,
1359 struct config_group *group,
1360 const char *name)
1361 {
1362 struct scsiback_tport *tport;
1363 char *ptr;
1364 u64 wwpn = 0;
1365 int off = 0;
1366
1367 tport = kzalloc(sizeof(struct scsiback_tport), GFP_KERNEL);
1368 if (!tport)
1369 return ERR_PTR(-ENOMEM);
1370
1371 tport->tport_wwpn = wwpn;
1372
1373
1374
1375
1376 ptr = strstr(name, "naa.");
1377 if (ptr) {
1378 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1379 goto check_len;
1380 }
1381 ptr = strstr(name, "fc.");
1382 if (ptr) {
1383 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1384 off = 3;
1385 goto check_len;
1386 }
1387 ptr = strstr(name, "iqn.");
1388 if (ptr) {
1389 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1390 goto check_len;
1391 }
1392
1393 pr_err("Unable to locate prefix for emulated Target Port: %s\n", name);
1394 kfree(tport);
1395 return ERR_PTR(-EINVAL);
1396
1397 check_len:
1398 if (strlen(name) >= VSCSI_NAMELEN) {
1399 pr_err("Emulated %s Address: %s, exceeds max: %d\n", name,
1400 scsiback_dump_proto_id(tport), VSCSI_NAMELEN);
1401 kfree(tport);
1402 return ERR_PTR(-EINVAL);
1403 }
1404 snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]);
1405
1406 pr_debug("Allocated emulated Target %s Address: %s\n",
1407 scsiback_dump_proto_id(tport), name);
1408
1409 return &tport->tport_wwn;
1410 }
1411
1412 static void scsiback_drop_tport(struct se_wwn *wwn)
1413 {
1414 struct scsiback_tport *tport = container_of(wwn,
1415 struct scsiback_tport, tport_wwn);
1416
1417 pr_debug("Deallocating emulated Target %s Address: %s\n",
1418 scsiback_dump_proto_id(tport), tport->tport_name);
1419
1420 kfree(tport);
1421 }
1422
1423 static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
1424 {
1425 return 1;
1426 }
1427
1428 static int scsiback_check_stop_free(struct se_cmd *se_cmd)
1429 {
1430 return transport_generic_free_cmd(se_cmd, 0);
1431 }
1432
1433 static void scsiback_release_cmd(struct se_cmd *se_cmd)
1434 {
1435 target_free_tag(se_cmd->se_sess, se_cmd);
1436 }
1437
1438 static u32 scsiback_sess_get_index(struct se_session *se_sess)
1439 {
1440 return 0;
1441 }
1442
1443 static int scsiback_write_pending(struct se_cmd *se_cmd)
1444 {
1445
1446 target_execute_cmd(se_cmd);
1447
1448 return 0;
1449 }
1450
1451 static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
1452 {
1453 }
1454
1455 static int scsiback_get_cmd_state(struct se_cmd *se_cmd)
1456 {
1457 return 0;
1458 }
1459
1460 static int scsiback_queue_data_in(struct se_cmd *se_cmd)
1461 {
1462 struct vscsibk_pend *pending_req = container_of(se_cmd,
1463 struct vscsibk_pend, se_cmd);
1464
1465 pending_req->result = SAM_STAT_GOOD;
1466 scsiback_cmd_done(pending_req);
1467 return 0;
1468 }
1469
1470 static int scsiback_queue_status(struct se_cmd *se_cmd)
1471 {
1472 struct vscsibk_pend *pending_req = container_of(se_cmd,
1473 struct vscsibk_pend, se_cmd);
1474
1475 if (se_cmd->sense_buffer &&
1476 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1477 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
1478 pending_req->result = SAM_STAT_CHECK_CONDITION;
1479 else
1480 pending_req->result = se_cmd->scsi_status;
1481
1482 scsiback_cmd_done(pending_req);
1483 return 0;
1484 }
1485
1486 static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
1487 {
1488 struct vscsibk_pend *pending_req = container_of(se_cmd,
1489 struct vscsibk_pend, se_cmd);
1490
1491 complete(&pending_req->tmr_done);
1492 }
1493
1494 static void scsiback_aborted_task(struct se_cmd *se_cmd)
1495 {
1496 }
1497
1498 static ssize_t scsiback_tpg_param_alias_show(struct config_item *item,
1499 char *page)
1500 {
1501 struct se_portal_group *se_tpg = param_to_tpg(item);
1502 struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
1503 se_tpg);
1504 ssize_t rb;
1505
1506 mutex_lock(&tpg->tv_tpg_mutex);
1507 rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
1508 mutex_unlock(&tpg->tv_tpg_mutex);
1509
1510 return rb;
1511 }
1512
1513 static ssize_t scsiback_tpg_param_alias_store(struct config_item *item,
1514 const char *page, size_t count)
1515 {
1516 struct se_portal_group *se_tpg = param_to_tpg(item);
1517 struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
1518 se_tpg);
1519 int len;
1520
1521 if (strlen(page) >= VSCSI_NAMELEN) {
1522 pr_err("param alias: %s, exceeds max: %d\n", page,
1523 VSCSI_NAMELEN);
1524 return -EINVAL;
1525 }
1526
1527 mutex_lock(&tpg->tv_tpg_mutex);
1528 len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page);
1529 if (tpg->param_alias[len - 1] == '\n')
1530 tpg->param_alias[len - 1] = '\0';
1531 mutex_unlock(&tpg->tv_tpg_mutex);
1532
1533 return count;
1534 }
1535
1536 CONFIGFS_ATTR(scsiback_tpg_param_, alias);
1537
1538 static struct configfs_attribute *scsiback_param_attrs[] = {
1539 &scsiback_tpg_param_attr_alias,
1540 NULL,
1541 };
1542
1543 static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
1544 struct se_session *se_sess, void *p)
1545 {
1546 struct scsiback_tpg *tpg = container_of(se_tpg,
1547 struct scsiback_tpg, se_tpg);
1548
1549 tpg->tpg_nexus = p;
1550 return 0;
1551 }
1552
1553 static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1554 const char *name)
1555 {
1556 struct scsiback_nexus *tv_nexus;
1557 int ret = 0;
1558
1559 mutex_lock(&tpg->tv_tpg_mutex);
1560 if (tpg->tpg_nexus) {
1561 pr_debug("tpg->tpg_nexus already exists\n");
1562 ret = -EEXIST;
1563 goto out_unlock;
1564 }
1565
1566 tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
1567 if (!tv_nexus) {
1568 ret = -ENOMEM;
1569 goto out_unlock;
1570 }
1571
1572 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1573 VSCSI_DEFAULT_SESSION_TAGS,
1574 sizeof(struct vscsibk_pend),
1575 TARGET_PROT_NORMAL, name,
1576 tv_nexus, scsiback_alloc_sess_cb);
1577 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1578 kfree(tv_nexus);
1579 ret = -ENOMEM;
1580 goto out_unlock;
1581 }
1582
1583 out_unlock:
1584 mutex_unlock(&tpg->tv_tpg_mutex);
1585 return ret;
1586 }
1587
1588 static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
1589 {
1590 struct se_session *se_sess;
1591 struct scsiback_nexus *tv_nexus;
1592
1593 mutex_lock(&tpg->tv_tpg_mutex);
1594 tv_nexus = tpg->tpg_nexus;
1595 if (!tv_nexus) {
1596 mutex_unlock(&tpg->tv_tpg_mutex);
1597 return -ENODEV;
1598 }
1599
1600 se_sess = tv_nexus->tvn_se_sess;
1601 if (!se_sess) {
1602 mutex_unlock(&tpg->tv_tpg_mutex);
1603 return -ENODEV;
1604 }
1605
1606 if (tpg->tv_tpg_port_count != 0) {
1607 mutex_unlock(&tpg->tv_tpg_mutex);
1608 pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG port count: %d\n",
1609 tpg->tv_tpg_port_count);
1610 return -EBUSY;
1611 }
1612
1613 if (tpg->tv_tpg_fe_count != 0) {
1614 mutex_unlock(&tpg->tv_tpg_mutex);
1615 pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG frontend count: %d\n",
1616 tpg->tv_tpg_fe_count);
1617 return -EBUSY;
1618 }
1619
1620 pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n",
1621 scsiback_dump_proto_id(tpg->tport),
1622 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1623
1624
1625
1626
1627 target_remove_session(se_sess);
1628 tpg->tpg_nexus = NULL;
1629 mutex_unlock(&tpg->tv_tpg_mutex);
1630
1631 kfree(tv_nexus);
1632 return 0;
1633 }
1634
1635 static ssize_t scsiback_tpg_nexus_show(struct config_item *item, char *page)
1636 {
1637 struct se_portal_group *se_tpg = to_tpg(item);
1638 struct scsiback_tpg *tpg = container_of(se_tpg,
1639 struct scsiback_tpg, se_tpg);
1640 struct scsiback_nexus *tv_nexus;
1641 ssize_t ret;
1642
1643 mutex_lock(&tpg->tv_tpg_mutex);
1644 tv_nexus = tpg->tpg_nexus;
1645 if (!tv_nexus) {
1646 mutex_unlock(&tpg->tv_tpg_mutex);
1647 return -ENODEV;
1648 }
1649 ret = snprintf(page, PAGE_SIZE, "%s\n",
1650 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1651 mutex_unlock(&tpg->tv_tpg_mutex);
1652
1653 return ret;
1654 }
1655
1656 static ssize_t scsiback_tpg_nexus_store(struct config_item *item,
1657 const char *page, size_t count)
1658 {
1659 struct se_portal_group *se_tpg = to_tpg(item);
1660 struct scsiback_tpg *tpg = container_of(se_tpg,
1661 struct scsiback_tpg, se_tpg);
1662 struct scsiback_tport *tport_wwn = tpg->tport;
1663 unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr;
1664 int ret;
1665
1666
1667
1668 if (!strncmp(page, "NULL", 4)) {
1669 ret = scsiback_drop_nexus(tpg);
1670 return (!ret) ? count : ret;
1671 }
1672
1673
1674
1675
1676
1677 if (strlen(page) >= VSCSI_NAMELEN) {
1678 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
1679 page, VSCSI_NAMELEN);
1680 return -EINVAL;
1681 }
1682 snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page);
1683
1684 ptr = strstr(i_port, "naa.");
1685 if (ptr) {
1686 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1687 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
1688 i_port, scsiback_dump_proto_id(tport_wwn));
1689 return -EINVAL;
1690 }
1691 port_ptr = &i_port[0];
1692 goto check_newline;
1693 }
1694 ptr = strstr(i_port, "fc.");
1695 if (ptr) {
1696 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1697 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
1698 i_port, scsiback_dump_proto_id(tport_wwn));
1699 return -EINVAL;
1700 }
1701 port_ptr = &i_port[3];
1702 goto check_newline;
1703 }
1704 ptr = strstr(i_port, "iqn.");
1705 if (ptr) {
1706 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1707 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
1708 i_port, scsiback_dump_proto_id(tport_wwn));
1709 return -EINVAL;
1710 }
1711 port_ptr = &i_port[0];
1712 goto check_newline;
1713 }
1714 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
1715 i_port);
1716 return -EINVAL;
1717
1718
1719
1720 check_newline:
1721 if (i_port[strlen(i_port) - 1] == '\n')
1722 i_port[strlen(i_port) - 1] = '\0';
1723
1724 ret = scsiback_make_nexus(tpg, port_ptr);
1725 if (ret < 0)
1726 return ret;
1727
1728 return count;
1729 }
1730
1731 CONFIGFS_ATTR(scsiback_tpg_, nexus);
1732
1733 static struct configfs_attribute *scsiback_tpg_attrs[] = {
1734 &scsiback_tpg_attr_nexus,
1735 NULL,
1736 };
1737
1738 static ssize_t
1739 scsiback_wwn_version_show(struct config_item *item, char *page)
1740 {
1741 return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on "
1742 UTS_RELEASE"\n",
1743 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
1744 }
1745
1746 CONFIGFS_ATTR_RO(scsiback_wwn_, version);
1747
1748 static struct configfs_attribute *scsiback_wwn_attrs[] = {
1749 &scsiback_wwn_attr_version,
1750 NULL,
1751 };
1752
1753 static int scsiback_port_link(struct se_portal_group *se_tpg,
1754 struct se_lun *lun)
1755 {
1756 struct scsiback_tpg *tpg = container_of(se_tpg,
1757 struct scsiback_tpg, se_tpg);
1758
1759 mutex_lock(&tpg->tv_tpg_mutex);
1760 tpg->tv_tpg_port_count++;
1761 mutex_unlock(&tpg->tv_tpg_mutex);
1762
1763 return 0;
1764 }
1765
1766 static void scsiback_port_unlink(struct se_portal_group *se_tpg,
1767 struct se_lun *lun)
1768 {
1769 struct scsiback_tpg *tpg = container_of(se_tpg,
1770 struct scsiback_tpg, se_tpg);
1771
1772 mutex_lock(&tpg->tv_tpg_mutex);
1773 tpg->tv_tpg_port_count--;
1774 mutex_unlock(&tpg->tv_tpg_mutex);
1775 }
1776
1777 static struct se_portal_group *
1778 scsiback_make_tpg(struct se_wwn *wwn, const char *name)
1779 {
1780 struct scsiback_tport *tport = container_of(wwn,
1781 struct scsiback_tport, tport_wwn);
1782
1783 struct scsiback_tpg *tpg;
1784 u16 tpgt;
1785 int ret;
1786
1787 if (strstr(name, "tpgt_") != name)
1788 return ERR_PTR(-EINVAL);
1789 ret = kstrtou16(name + 5, 10, &tpgt);
1790 if (ret)
1791 return ERR_PTR(ret);
1792
1793 tpg = kzalloc(sizeof(struct scsiback_tpg), GFP_KERNEL);
1794 if (!tpg)
1795 return ERR_PTR(-ENOMEM);
1796
1797 mutex_init(&tpg->tv_tpg_mutex);
1798 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1799 INIT_LIST_HEAD(&tpg->info_list);
1800 tpg->tport = tport;
1801 tpg->tport_tpgt = tpgt;
1802
1803 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1804 if (ret < 0) {
1805 kfree(tpg);
1806 return NULL;
1807 }
1808 mutex_lock(&scsiback_mutex);
1809 list_add_tail(&tpg->tv_tpg_list, &scsiback_list);
1810 mutex_unlock(&scsiback_mutex);
1811
1812 return &tpg->se_tpg;
1813 }
1814
1815 static void scsiback_drop_tpg(struct se_portal_group *se_tpg)
1816 {
1817 struct scsiback_tpg *tpg = container_of(se_tpg,
1818 struct scsiback_tpg, se_tpg);
1819
1820 mutex_lock(&scsiback_mutex);
1821 list_del(&tpg->tv_tpg_list);
1822 mutex_unlock(&scsiback_mutex);
1823
1824
1825
1826 scsiback_drop_nexus(tpg);
1827
1828
1829
1830 core_tpg_deregister(se_tpg);
1831 kfree(tpg);
1832 }
1833
1834 static int scsiback_check_true(struct se_portal_group *se_tpg)
1835 {
1836 return 1;
1837 }
1838
1839 static int scsiback_check_false(struct se_portal_group *se_tpg)
1840 {
1841 return 0;
1842 }
1843
1844 static const struct target_core_fabric_ops scsiback_ops = {
1845 .module = THIS_MODULE,
1846 .fabric_name = "xen-pvscsi",
1847 .tpg_get_wwn = scsiback_get_fabric_wwn,
1848 .tpg_get_tag = scsiback_get_tag,
1849 .tpg_check_demo_mode = scsiback_check_true,
1850 .tpg_check_demo_mode_cache = scsiback_check_true,
1851 .tpg_check_demo_mode_write_protect = scsiback_check_false,
1852 .tpg_check_prod_mode_write_protect = scsiback_check_false,
1853 .tpg_get_inst_index = scsiback_tpg_get_inst_index,
1854 .check_stop_free = scsiback_check_stop_free,
1855 .release_cmd = scsiback_release_cmd,
1856 .sess_get_index = scsiback_sess_get_index,
1857 .sess_get_initiator_sid = NULL,
1858 .write_pending = scsiback_write_pending,
1859 .set_default_node_attributes = scsiback_set_default_node_attrs,
1860 .get_cmd_state = scsiback_get_cmd_state,
1861 .queue_data_in = scsiback_queue_data_in,
1862 .queue_status = scsiback_queue_status,
1863 .queue_tm_rsp = scsiback_queue_tm_rsp,
1864 .aborted_task = scsiback_aborted_task,
1865
1866
1867
1868 .fabric_make_wwn = scsiback_make_tport,
1869 .fabric_drop_wwn = scsiback_drop_tport,
1870 .fabric_make_tpg = scsiback_make_tpg,
1871 .fabric_drop_tpg = scsiback_drop_tpg,
1872 .fabric_post_link = scsiback_port_link,
1873 .fabric_pre_unlink = scsiback_port_unlink,
1874
1875 .tfc_wwn_attrs = scsiback_wwn_attrs,
1876 .tfc_tpg_base_attrs = scsiback_tpg_attrs,
1877 .tfc_tpg_param_attrs = scsiback_param_attrs,
1878 };
1879
1880 static const struct xenbus_device_id scsiback_ids[] = {
1881 { "vscsi" },
1882 { "" }
1883 };
1884
1885 static struct xenbus_driver scsiback_driver = {
1886 .ids = scsiback_ids,
1887 .probe = scsiback_probe,
1888 .remove = scsiback_remove,
1889 .otherend_changed = scsiback_frontend_changed
1890 };
1891
1892 static int __init scsiback_init(void)
1893 {
1894 int ret;
1895
1896 if (!xen_domain())
1897 return -ENODEV;
1898
1899 pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
1900 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
1901
1902 ret = xenbus_register_backend(&scsiback_driver);
1903 if (ret)
1904 goto out;
1905
1906 ret = target_register_template(&scsiback_ops);
1907 if (ret)
1908 goto out_unregister_xenbus;
1909
1910 return 0;
1911
1912 out_unregister_xenbus:
1913 xenbus_unregister_driver(&scsiback_driver);
1914 out:
1915 pr_err("%s: error %d\n", __func__, ret);
1916 return ret;
1917 }
1918
1919 static void __exit scsiback_exit(void)
1920 {
1921 target_unregister_template(&scsiback_ops);
1922 xenbus_unregister_driver(&scsiback_driver);
1923 }
1924
1925 module_init(scsiback_init);
1926 module_exit(scsiback_exit);
1927
1928 MODULE_DESCRIPTION("Xen SCSI backend driver");
1929 MODULE_LICENSE("Dual BSD/GPL");
1930 MODULE_ALIAS("xen-backend:vscsi");
1931 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");