0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define KMSG_COMPONENT "zfcp"
0011 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0012
0013 #include <linux/lockdep.h>
0014 #include <linux/slab.h>
0015 #include <linux/module.h>
0016 #include "zfcp_ext.h"
0017 #include "zfcp_qdio.h"
0018
0019 static bool enable_multibuffer = true;
0020 module_param_named(datarouter, enable_multibuffer, bool, 0400);
0021 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
0022
0023 #define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
0024 #define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
0025
0026 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
0027 unsigned int qdio_err)
0028 {
0029 struct zfcp_adapter *adapter = qdio->adapter;
0030
0031 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
0032
0033 if (qdio_err & QDIO_ERROR_SLSB_STATE) {
0034 zfcp_qdio_siosl(adapter);
0035 zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
0036 return;
0037 }
0038 zfcp_erp_adapter_reopen(adapter,
0039 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
0040 ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
0041 }
0042
0043 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
0044 {
0045 int i, sbal_idx;
0046
0047 for (i = first; i < first + cnt; i++) {
0048 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
0049 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
0050 }
0051 }
0052
0053
0054 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
0055 {
0056 unsigned long long now, span;
0057 int used;
0058
0059 now = get_tod_clock_monotonic();
0060 span = (now - qdio->req_q_time) >> 12;
0061 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
0062 qdio->req_q_util += used * span;
0063 qdio->req_q_time = now;
0064 }
0065
0066 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
0067 int queue_no, int idx, int count,
0068 unsigned long parm)
0069 {
0070 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
0071
0072 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
0073 }
0074
0075 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
0076 {
0077 struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
0078 struct ccw_device *cdev = qdio->adapter->ccw_device;
0079 unsigned int start, error;
0080 int completed;
0081
0082 completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
0083 if (completed > 0) {
0084 if (error) {
0085 zfcp_qdio_handler_error(qdio, "qdreqt1", error);
0086 } else {
0087
0088 zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
0089
0090 spin_lock_irq(&qdio->stat_lock);
0091 zfcp_qdio_account(qdio);
0092 spin_unlock_irq(&qdio->stat_lock);
0093 atomic_add(completed, &qdio->req_q_free);
0094 wake_up(&qdio->req_q_wq);
0095 }
0096 }
0097
0098 if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
0099 timer_reduce(&qdio->request_timer,
0100 jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
0101 }
0102
0103 static void zfcp_qdio_request_timer(struct timer_list *timer)
0104 {
0105 struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
0106
0107 tasklet_schedule(&qdio->request_tasklet);
0108 }
0109
0110 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
0111 int queue_no, int idx, int count,
0112 unsigned long parm)
0113 {
0114 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
0115 struct zfcp_adapter *adapter = qdio->adapter;
0116 int sbal_no, sbal_idx;
0117
0118 if (unlikely(qdio_err)) {
0119 if (zfcp_adapter_multi_buffer_active(adapter)) {
0120 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
0121 struct qdio_buffer_element *sbale;
0122 u64 req_id;
0123 u8 scount;
0124
0125 memset(pl, 0,
0126 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
0127 sbale = qdio->res_q[idx]->element;
0128 req_id = sbale->addr;
0129 scount = min(sbale->scount + 1,
0130 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
0131
0132
0133 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
0134 sbal_idx = (idx + sbal_no) %
0135 QDIO_MAX_BUFFERS_PER_Q;
0136 pl[sbal_no] = qdio->res_q[sbal_idx];
0137 }
0138 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
0139 }
0140 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
0141 return;
0142 }
0143
0144
0145
0146
0147
0148 for (sbal_no = 0; sbal_no < count; sbal_no++) {
0149 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
0150
0151 zfcp_fsf_reqid_check(qdio, sbal_idx);
0152 }
0153
0154
0155
0156
0157 if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
0158 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
0159 }
0160
0161 static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
0162 {
0163 struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
0164 struct ccw_device *cdev = qdio->adapter->ccw_device;
0165 unsigned int start, error;
0166 int completed;
0167
0168 if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
0169 tasklet_schedule(&qdio->request_tasklet);
0170
0171
0172 completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
0173 if (completed < 0)
0174 return;
0175 if (completed > 0)
0176 zfcp_qdio_int_resp(cdev, error, 0, start, completed,
0177 (unsigned long) qdio);
0178
0179 if (qdio_start_irq(cdev))
0180
0181 tasklet_schedule(&qdio->irq_tasklet);
0182 }
0183
0184 static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
0185 {
0186 struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
0187
0188 tasklet_schedule(&qdio->irq_tasklet);
0189 }
0190
0191 static struct qdio_buffer_element *
0192 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
0193 {
0194 struct qdio_buffer_element *sbale;
0195
0196
0197 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
0198 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
0199
0200
0201 if (q_req->sbal_last == q_req->sbal_limit)
0202 return NULL;
0203
0204
0205 sbale = zfcp_qdio_sbale_req(qdio, q_req);
0206 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
0207
0208
0209 q_req->sbal_last++;
0210 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
0211
0212
0213 q_req->sbal_number++;
0214 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
0215
0216
0217 q_req->sbale_curr = 0;
0218
0219
0220 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
0221 sbale->sflags |= q_req->sbtype;
0222
0223 return sbale;
0224 }
0225
0226 static struct qdio_buffer_element *
0227 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
0228 {
0229 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
0230 return zfcp_qdio_sbal_chain(qdio, q_req);
0231 q_req->sbale_curr++;
0232 return zfcp_qdio_sbale_curr(qdio, q_req);
0233 }
0234
0235
0236
0237
0238
0239
0240
0241
0242 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
0243 struct scatterlist *sg)
0244 {
0245 struct qdio_buffer_element *sbale;
0246
0247
0248 sbale = zfcp_qdio_sbale_req(qdio, q_req);
0249 sbale->sflags |= q_req->sbtype;
0250
0251 for (; sg; sg = sg_next(sg)) {
0252 sbale = zfcp_qdio_sbale_next(qdio, q_req);
0253 if (!sbale) {
0254 atomic_inc(&qdio->req_q_full);
0255 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
0256 q_req->sbal_number);
0257 return -EINVAL;
0258 }
0259 sbale->addr = sg_phys(sg);
0260 sbale->length = sg->length;
0261 }
0262 return 0;
0263 }
0264
0265 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
0266 {
0267 if (atomic_read(&qdio->req_q_free) ||
0268 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
0269 return 1;
0270 return 0;
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
0284 {
0285 long ret;
0286
0287 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
0288 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
0289
0290 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
0291 return -EIO;
0292
0293 if (ret > 0)
0294 return 0;
0295
0296 if (!ret) {
0297 atomic_inc(&qdio->req_q_full);
0298
0299 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
0300 }
0301
0302 return -EIO;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
0312 {
0313 int retval;
0314 u8 sbal_number = q_req->sbal_number;
0315
0316
0317
0318
0319
0320
0321
0322 lockdep_assert_irqs_disabled();
0323 spin_lock(&qdio->stat_lock);
0324 zfcp_qdio_account(qdio);
0325 spin_unlock(&qdio->stat_lock);
0326
0327 atomic_sub(sbal_number, &qdio->req_q_free);
0328
0329 retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
0330 q_req->sbal_first, sbal_number,
0331 NULL);
0332
0333 if (unlikely(retval)) {
0334
0335 atomic_add(sbal_number, &qdio->req_q_free);
0336 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
0337 sbal_number);
0338 return retval;
0339 }
0340
0341 if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
0342 tasklet_schedule(&qdio->request_tasklet);
0343 else
0344 timer_reduce(&qdio->request_timer,
0345 jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
0346
0347
0348 qdio->req_q_idx += sbal_number;
0349 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
0350
0351 return 0;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
0361 {
0362 int ret;
0363
0364 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
0365 if (ret)
0366 return -ENOMEM;
0367
0368 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
0369 if (ret)
0370 goto free_req_q;
0371
0372 init_waitqueue_head(&qdio->req_q_wq);
0373
0374 ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
0375 if (ret)
0376 goto free_res_q;
0377
0378 return 0;
0379
0380 free_res_q:
0381 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
0382 free_req_q:
0383 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
0384 return ret;
0385 }
0386
0387
0388
0389
0390
0391 void zfcp_qdio_close(struct zfcp_qdio *qdio)
0392 {
0393 struct zfcp_adapter *adapter = qdio->adapter;
0394 int idx, count;
0395
0396 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
0397 return;
0398
0399
0400
0401
0402
0403 spin_lock_irq(&qdio->req_q_lock);
0404 atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
0405 spin_unlock_irq(&qdio->req_q_lock);
0406
0407 wake_up(&qdio->req_q_wq);
0408
0409 tasklet_disable(&qdio->irq_tasklet);
0410 tasklet_disable(&qdio->request_tasklet);
0411 del_timer_sync(&qdio->request_timer);
0412 qdio_stop_irq(adapter->ccw_device);
0413 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
0414
0415
0416 count = atomic_read(&qdio->req_q_free);
0417 if (count < QDIO_MAX_BUFFERS_PER_Q) {
0418 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
0419 count = QDIO_MAX_BUFFERS_PER_Q - count;
0420 zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
0421 }
0422 qdio->req_q_idx = 0;
0423 atomic_set(&qdio->req_q_free, 0);
0424 }
0425
0426 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
0427 const struct zfcp_qdio *const qdio)
0428 {
0429 struct Scsi_Host *const shost = adapter->scsi_host;
0430
0431 if (shost == NULL)
0432 return;
0433
0434 shost->sg_tablesize = qdio->max_sbale_per_req;
0435 shost->max_sectors = qdio->max_sbale_per_req * 8;
0436 }
0437
0438
0439
0440
0441
0442
0443 int zfcp_qdio_open(struct zfcp_qdio *qdio)
0444 {
0445 struct qdio_buffer **input_sbals[1] = {qdio->res_q};
0446 struct qdio_buffer **output_sbals[1] = {qdio->req_q};
0447 struct qdio_buffer_element *sbale;
0448 struct qdio_initialize init_data = {0};
0449 struct zfcp_adapter *adapter = qdio->adapter;
0450 struct ccw_device *cdev = adapter->ccw_device;
0451 struct qdio_ssqd_desc ssqd;
0452 int cc;
0453
0454 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
0455 return -EIO;
0456
0457 atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
0458 &qdio->adapter->status);
0459
0460 init_data.q_format = QDIO_ZFCP_QFMT;
0461 init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
0462 if (enable_multibuffer)
0463 init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
0464 init_data.no_input_qs = 1;
0465 init_data.no_output_qs = 1;
0466 init_data.input_handler = zfcp_qdio_int_resp;
0467 init_data.output_handler = zfcp_qdio_int_req;
0468 init_data.irq_poll = zfcp_qdio_poll;
0469 init_data.int_parm = (unsigned long) qdio;
0470 init_data.input_sbal_addr_array = input_sbals;
0471 init_data.output_sbal_addr_array = output_sbals;
0472
0473 if (qdio_establish(cdev, &init_data))
0474 goto failed_establish;
0475
0476 if (qdio_get_ssqd_desc(cdev, &ssqd))
0477 goto failed_qdio;
0478
0479 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
0480 atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
0481 &qdio->adapter->status);
0482
0483 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
0484 atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
0485 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
0486 } else {
0487 atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
0488 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
0489 }
0490
0491 qdio->max_sbale_per_req =
0492 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
0493 - 2;
0494 if (qdio_activate(cdev))
0495 goto failed_qdio;
0496
0497 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
0498 sbale = &(qdio->res_q[cc]->element[0]);
0499 sbale->length = 0;
0500 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
0501 sbale->sflags = 0;
0502 sbale->addr = 0;
0503 }
0504
0505 if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
0506 goto failed_qdio;
0507
0508
0509 qdio->req_q_idx = 0;
0510 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
0511 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
0512
0513
0514 tasklet_enable(&qdio->request_tasklet);
0515
0516 tasklet_enable(&qdio->irq_tasklet);
0517
0518 tasklet_schedule(&qdio->irq_tasklet);
0519
0520 zfcp_qdio_shost_update(adapter, qdio);
0521
0522 return 0;
0523
0524 failed_qdio:
0525 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
0526 failed_establish:
0527 dev_err(&cdev->dev,
0528 "Setting up the QDIO connection to the FCP adapter failed\n");
0529 return -EIO;
0530 }
0531
0532 void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
0533 {
0534 if (!qdio)
0535 return;
0536
0537 tasklet_kill(&qdio->irq_tasklet);
0538 tasklet_kill(&qdio->request_tasklet);
0539
0540 if (qdio->adapter->ccw_device)
0541 qdio_free(qdio->adapter->ccw_device);
0542
0543 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
0544 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
0545 kfree(qdio);
0546 }
0547
0548 int zfcp_qdio_setup(struct zfcp_adapter *adapter)
0549 {
0550 struct zfcp_qdio *qdio;
0551
0552 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
0553 if (!qdio)
0554 return -ENOMEM;
0555
0556 qdio->adapter = adapter;
0557
0558 if (zfcp_qdio_allocate(qdio)) {
0559 kfree(qdio);
0560 return -ENOMEM;
0561 }
0562
0563 spin_lock_init(&qdio->req_q_lock);
0564 spin_lock_init(&qdio->stat_lock);
0565 timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
0566 tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
0567 tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
0568 tasklet_disable(&qdio->irq_tasklet);
0569 tasklet_disable(&qdio->request_tasklet);
0570
0571 adapter->qdio = qdio;
0572 return 0;
0573 }
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
0587 {
0588 int rc;
0589
0590 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
0591 return;
0592
0593 rc = ccw_device_siosl(adapter->ccw_device);
0594 if (!rc)
0595 atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
0596 &adapter->status);
0597 }