0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/types.h>
0022 #include <linux/sched.h>
0023 #include <linux/wait.h>
0024 #include <linux/pci.h>
0025 #include <linux/string.h>
0026 #include <linux/dma-mapping.h>
0027 #include <linux/delay.h>
0028 #include <linux/module.h>
0029 #include <linux/interrupt.h>
0030 #include <linux/crc-itu-t.h>
0031
0032 #include "card_base.h"
0033 #include "card_ddcb.h"
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 static int queue_empty(struct ddcb_queue *queue)
0083 {
0084 return queue->ddcb_next == queue->ddcb_act;
0085 }
0086
0087 static int queue_enqueued_ddcbs(struct ddcb_queue *queue)
0088 {
0089 if (queue->ddcb_next >= queue->ddcb_act)
0090 return queue->ddcb_next - queue->ddcb_act;
0091
0092 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next);
0093 }
0094
0095 static int queue_free_ddcbs(struct ddcb_queue *queue)
0096 {
0097 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1;
0098
0099 if (WARN_ON_ONCE(free_ddcbs < 0)) {
0100 return 0;
0101 }
0102 return free_ddcbs;
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 static inline void ddcb_mark_tapped(struct ddcb *pddcb)
0122 {
0123 pddcb->priv[7] = 0xbb;
0124 }
0125
0126 static inline void ddcb_mark_appended(struct ddcb *pddcb)
0127 {
0128 pddcb->priv[7] = 0xaa;
0129 }
0130
0131 static inline void ddcb_mark_cleared(struct ddcb *pddcb)
0132 {
0133 pddcb->priv[6] = 0xcc;
0134 }
0135
0136 static inline void ddcb_mark_finished(struct ddcb *pddcb)
0137 {
0138 pddcb->priv[6] = 0xff;
0139 }
0140
0141 static inline void ddcb_mark_unused(struct ddcb *pddcb)
0142 {
0143 pddcb->priv_64 = cpu_to_be64(0);
0144 }
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 static inline u16 genwqe_crc16(const u8 *buff, size_t len, u16 init)
0159 {
0160 return crc_itu_t(init, buff, len);
0161 }
0162
0163 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue)
0164 {
0165 int i;
0166 struct ddcb *pddcb;
0167 unsigned long flags;
0168 struct pci_dev *pci_dev = cd->pci_dev;
0169
0170 spin_lock_irqsave(&cd->print_lock, flags);
0171
0172 dev_info(&pci_dev->dev,
0173 "DDCB list for card #%d (ddcb_act=%d / ddcb_next=%d):\n",
0174 cd->card_idx, queue->ddcb_act, queue->ddcb_next);
0175
0176 pddcb = queue->ddcb_vaddr;
0177 for (i = 0; i < queue->ddcb_max; i++) {
0178 dev_err(&pci_dev->dev,
0179 " %c %-3d: RETC=%03x SEQ=%04x HSI=%02X SHI=%02x PRIV=%06llx CMD=%03x\n",
0180 i == queue->ddcb_act ? '>' : ' ',
0181 i,
0182 be16_to_cpu(pddcb->retc_16),
0183 be16_to_cpu(pddcb->seqnum_16),
0184 pddcb->hsi,
0185 pddcb->shi,
0186 be64_to_cpu(pddcb->priv_64),
0187 pddcb->cmd);
0188 pddcb++;
0189 }
0190 spin_unlock_irqrestore(&cd->print_lock, flags);
0191 }
0192
0193 struct genwqe_ddcb_cmd *ddcb_requ_alloc(void)
0194 {
0195 struct ddcb_requ *req;
0196
0197 req = kzalloc(sizeof(*req), GFP_KERNEL);
0198 if (!req)
0199 return NULL;
0200
0201 return &req->cmd;
0202 }
0203
0204 void ddcb_requ_free(struct genwqe_ddcb_cmd *cmd)
0205 {
0206 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
0207
0208 kfree(req);
0209 }
0210
0211 static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req)
0212 {
0213 return req->req_state;
0214 }
0215
0216 static inline void ddcb_requ_set_state(struct ddcb_requ *req,
0217 enum genwqe_requ_state new_state)
0218 {
0219 req->req_state = new_state;
0220 }
0221
0222 static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req)
0223 {
0224 return req->cmd.ddata_addr != 0x0;
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req)
0242 {
0243 return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) ||
0244 (cd->card_state != GENWQE_CARD_USED);
0245 }
0246
0247 #define RET_DDCB_APPENDED 1
0248 #define RET_DDCB_TAPPED 2
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265 static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
0266 struct ddcb *pddcb, int ddcb_no)
0267 {
0268 unsigned int try;
0269 int prev_no;
0270 struct ddcb *prev_ddcb;
0271 __be32 old, new, icrc_hsi_shi;
0272 u64 num;
0273
0274
0275
0276
0277
0278
0279 ddcb_mark_unused(pddcb);
0280
0281
0282 prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1;
0283 prev_ddcb = &queue->ddcb_vaddr[prev_no];
0284
0285
0286
0287
0288
0289
0290 ddcb_mark_appended(pddcb);
0291 for (try = 0; try < 2; try++) {
0292 old = prev_ddcb->icrc_hsi_shi_32;
0293
0294
0295 if ((old & DDCB_COMPLETED_BE32) != 0x00000000)
0296 break;
0297
0298 new = (old | DDCB_NEXT_BE32);
0299
0300 wmb();
0301 icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
0302
0303 if (icrc_hsi_shi == old)
0304 return RET_DDCB_APPENDED;
0305 }
0306
0307
0308 ddcb_mark_tapped(pddcb);
0309 num = (u64)ddcb_no << 8;
0310
0311 wmb();
0312 __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num);
0313
0314 return RET_DDCB_TAPPED;
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330 static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no)
0331 {
0332 struct ddcb_queue *queue = req->queue;
0333 struct ddcb *pddcb = &queue->ddcb_vaddr[req->num];
0334
0335 memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH);
0336
0337
0338 req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16);
0339 req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64);
0340 req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64);
0341
0342 req->cmd.attn = be16_to_cpu(pddcb->attn_16);
0343 req->cmd.progress = be32_to_cpu(pddcb->progress_32);
0344 req->cmd.retc = be16_to_cpu(pddcb->retc_16);
0345
0346 if (ddcb_requ_collect_debug_data(req)) {
0347 int prev_no = (ddcb_no == 0) ?
0348 queue->ddcb_max - 1 : ddcb_no - 1;
0349 struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no];
0350
0351 memcpy(&req->debug_data.ddcb_finished, pddcb,
0352 sizeof(req->debug_data.ddcb_finished));
0353 memcpy(&req->debug_data.ddcb_prev, prev_pddcb,
0354 sizeof(req->debug_data.ddcb_prev));
0355 }
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365 static int genwqe_check_ddcb_queue(struct genwqe_dev *cd,
0366 struct ddcb_queue *queue)
0367 {
0368 unsigned long flags;
0369 int ddcbs_finished = 0;
0370 struct pci_dev *pci_dev = cd->pci_dev;
0371
0372 spin_lock_irqsave(&queue->ddcb_lock, flags);
0373
0374
0375 while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) {
0376
0377 struct ddcb *pddcb;
0378 struct ddcb_requ *req;
0379 u16 vcrc, vcrc_16, retc_16;
0380
0381 pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
0382
0383 if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) ==
0384 0x00000000)
0385 goto go_home;
0386
0387 wmb();
0388
0389
0390 req = queue->ddcb_req[queue->ddcb_act];
0391 if (req == NULL) {
0392
0393
0394 goto pick_next_one;
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 retc_16 = be16_to_cpu(pddcb->retc_16);
0406 if ((pddcb->hsi == 0x44) && (retc_16 <= 0x101)) {
0407 u64 errcnts, status;
0408 u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr;
0409
0410 errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS);
0411 status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
0412
0413 dev_err(&pci_dev->dev,
0414 "[%s] SEQN=%04x HSI=%02x RETC=%03x Q_ERRCNTS=%016llx Q_STATUS=%016llx DDCB_DMA_ADDR=%016llx\n",
0415 __func__, be16_to_cpu(pddcb->seqnum_16),
0416 pddcb->hsi, retc_16, errcnts, status,
0417 queue->ddcb_daddr + ddcb_offs);
0418 }
0419
0420 copy_ddcb_results(req, queue->ddcb_act);
0421 queue->ddcb_req[queue->ddcb_act] = NULL;
0422
0423 dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num);
0424 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
0425
0426 ddcb_mark_finished(pddcb);
0427
0428
0429 vcrc = genwqe_crc16(pddcb->asv,
0430 VCRC_LENGTH(req->cmd.asv_length),
0431 0xffff);
0432 vcrc_16 = be16_to_cpu(pddcb->vcrc_16);
0433 if (vcrc != vcrc_16) {
0434 printk_ratelimited(KERN_ERR
0435 "%s %s: err: wrong VCRC pre=%02x vcrc_len=%d bytes vcrc_data=%04x is not vcrc_card=%04x\n",
0436 GENWQE_DEVNAME, dev_name(&pci_dev->dev),
0437 pddcb->pre, VCRC_LENGTH(req->cmd.asv_length),
0438 vcrc, vcrc_16);
0439 }
0440
0441 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
0442 queue->ddcbs_completed++;
0443 queue->ddcbs_in_flight--;
0444
0445
0446
0447 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
0448 wake_up_interruptible(&queue->busy_waitq);
0449
0450 pick_next_one:
0451 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max;
0452 ddcbs_finished++;
0453 }
0454
0455 go_home:
0456 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0457 return ddcbs_finished;
0458 }
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477 int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
0478 {
0479 int rc;
0480 unsigned int ddcb_no;
0481 struct ddcb_queue *queue;
0482 struct pci_dev *pci_dev = cd->pci_dev;
0483
0484 if (req == NULL)
0485 return -EINVAL;
0486
0487 queue = req->queue;
0488 if (queue == NULL)
0489 return -EINVAL;
0490
0491 ddcb_no = req->num;
0492 if (ddcb_no >= queue->ddcb_max)
0493 return -EINVAL;
0494
0495 rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no],
0496 ddcb_requ_finished(cd, req),
0497 GENWQE_DDCB_SOFTWARE_TIMEOUT * HZ);
0498
0499
0500
0501
0502
0503
0504
0505 if (rc == 0) {
0506 struct ddcb_queue *queue = req->queue;
0507 struct ddcb *pddcb;
0508
0509
0510
0511
0512
0513
0514 genwqe_check_ddcb_queue(cd, req->queue);
0515 if (ddcb_requ_finished(cd, req))
0516 return rc;
0517
0518 dev_err(&pci_dev->dev,
0519 "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n",
0520 __func__, req->num, rc, ddcb_requ_get_state(req),
0521 req);
0522 dev_err(&pci_dev->dev,
0523 "[%s] IO_QUEUE_STATUS=0x%016llx\n", __func__,
0524 __genwqe_readq(cd, queue->IO_QUEUE_STATUS));
0525
0526 pddcb = &queue->ddcb_vaddr[req->num];
0527 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
0528
0529 print_ddcb_info(cd, req->queue);
0530 return -ETIMEDOUT;
0531
0532 } else if (rc == -ERESTARTSYS) {
0533 return rc;
0534
0535
0536
0537
0538
0539 } else if (rc < 0) {
0540 dev_err(&pci_dev->dev,
0541 "[%s] err: DDCB#%d unknown result (rc=%d) %d!\n",
0542 __func__, req->num, rc, ddcb_requ_get_state(req));
0543 return -EINVAL;
0544 }
0545
0546
0547 if (cd->card_state != GENWQE_CARD_USED) {
0548 dev_err(&pci_dev->dev,
0549 "[%s] err: DDCB#%d forced to stop (rc=%d)\n",
0550 __func__, req->num, rc);
0551 return -EIO;
0552 }
0553 return rc;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 static struct ddcb *get_next_ddcb(struct genwqe_dev *cd,
0568 struct ddcb_queue *queue,
0569 int *num)
0570 {
0571 u64 *pu64;
0572 struct ddcb *pddcb;
0573
0574 if (queue_free_ddcbs(queue) == 0)
0575 return NULL;
0576
0577
0578 pddcb = &queue->ddcb_vaddr[queue->ddcb_next];
0579
0580
0581
0582 if ((pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) == 0x00000000)
0583 return NULL;
0584
0585 *num = queue->ddcb_next;
0586 queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max;
0587
0588
0589 pu64 = (u64 *)pddcb;
0590 pu64[0] = 0ULL;
0591 pu64[1] = 0ULL;
0592
0593
0594 pu64[0x80/8] = 0ULL;
0595 pu64[0x88/8] = 0ULL;
0596 pu64[0x90/8] = 0ULL;
0597 pu64[0x98/8] = 0ULL;
0598 pu64[0xd0/8] = 0ULL;
0599
0600 pddcb->pre = DDCB_PRESET_PRE;
0601 pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++);
0602 return pddcb;
0603 }
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620 int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req)
0621 {
0622 struct ddcb *pddcb = NULL;
0623 unsigned int t;
0624 unsigned long flags;
0625 struct ddcb_queue *queue = req->queue;
0626 struct pci_dev *pci_dev = cd->pci_dev;
0627 u64 queue_status;
0628 __be32 icrc_hsi_shi = 0x0000;
0629 __be32 old, new;
0630
0631
0632 if (GENWQE_DDCB_SOFTWARE_TIMEOUT <= 0) {
0633 dev_err(&pci_dev->dev,
0634 "[%s] err: software timeout is not set!\n", __func__);
0635 return -EFAULT;
0636 }
0637
0638 pddcb = &queue->ddcb_vaddr[req->num];
0639
0640 for (t = 0; t < GENWQE_DDCB_SOFTWARE_TIMEOUT * 10; t++) {
0641
0642 spin_lock_irqsave(&queue->ddcb_lock, flags);
0643
0644
0645 if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED)
0646 goto go_home;
0647
0648
0649 old = pddcb->icrc_hsi_shi_32;
0650 if ((old & DDCB_FETCHED_BE32) == 0x00000000) {
0651
0652 new = (old | DDCB_PURGE_BE32);
0653 icrc_hsi_shi = cmpxchg(&pddcb->icrc_hsi_shi_32,
0654 old, new);
0655 if (icrc_hsi_shi == old)
0656 goto finish_ddcb;
0657 }
0658
0659
0660 barrier();
0661 icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
0662 if (icrc_hsi_shi & DDCB_COMPLETED_BE32)
0663 goto finish_ddcb;
0664
0665 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0666
0667
0668
0669
0670
0671
0672
0673
0674 copy_ddcb_results(req, req->num);
0675 msleep(100);
0676 continue;
0677
0678 finish_ddcb:
0679 copy_ddcb_results(req, req->num);
0680 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED);
0681 queue->ddcbs_in_flight--;
0682 queue->ddcb_req[req->num] = NULL;
0683 ddcb_mark_cleared(pddcb);
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695 icrc_hsi_shi = pddcb->icrc_hsi_shi_32;
0696 if ((icrc_hsi_shi & DDCB_COMPLETED_BE32) &&
0697 (queue->ddcb_act == req->num)) {
0698 queue->ddcb_act = ((queue->ddcb_act + 1) %
0699 queue->ddcb_max);
0700 }
0701 go_home:
0702 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0703 return 0;
0704 }
0705
0706
0707
0708
0709
0710 queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS);
0711
0712 dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num);
0713 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
0714
0715 dev_err(&pci_dev->dev,
0716 "[%s] err: DDCB#%d not purged and not completed after %d seconds QSTAT=%016llx!!\n",
0717 __func__, req->num, GENWQE_DDCB_SOFTWARE_TIMEOUT,
0718 queue_status);
0719
0720 print_ddcb_info(cd, req->queue);
0721
0722 return -EFAULT;
0723 }
0724
0725 int genwqe_init_debug_data(struct genwqe_dev *cd, struct genwqe_debug_data *d)
0726 {
0727 int len;
0728 struct pci_dev *pci_dev = cd->pci_dev;
0729
0730 if (d == NULL) {
0731 dev_err(&pci_dev->dev,
0732 "[%s] err: invalid memory for debug data!\n",
0733 __func__);
0734 return -EFAULT;
0735 }
0736
0737 len = sizeof(d->driver_version);
0738 snprintf(d->driver_version, len, "%s", DRV_VERSION);
0739 d->slu_unitcfg = cd->slu_unitcfg;
0740 d->app_unitcfg = cd->app_unitcfg;
0741 return 0;
0742 }
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req,
0755 unsigned int f_flags)
0756 {
0757 struct ddcb *pddcb;
0758 unsigned long flags;
0759 struct ddcb_queue *queue;
0760 struct pci_dev *pci_dev = cd->pci_dev;
0761 u16 icrc;
0762
0763 retry:
0764 if (cd->card_state != GENWQE_CARD_USED) {
0765 printk_ratelimited(KERN_ERR
0766 "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n",
0767 GENWQE_DEVNAME, dev_name(&pci_dev->dev),
0768 __func__, req->num);
0769 return -EIO;
0770 }
0771
0772 queue = req->queue = &cd->queue;
0773
0774
0775
0776
0777 if (GENWQE_POLLING_ENABLED)
0778 genwqe_check_ddcb_queue(cd, queue);
0779
0780
0781
0782
0783
0784
0785 spin_lock_irqsave(&queue->ddcb_lock, flags);
0786
0787 pddcb = get_next_ddcb(cd, queue, &req->num);
0788 if (pddcb == NULL) {
0789 int rc;
0790
0791 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0792
0793 if (f_flags & O_NONBLOCK) {
0794 queue->return_on_busy++;
0795 return -EBUSY;
0796 }
0797
0798 queue->wait_on_busy++;
0799 rc = wait_event_interruptible(queue->busy_waitq,
0800 queue_free_ddcbs(queue) != 0);
0801 dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n",
0802 __func__, rc);
0803 if (rc == -ERESTARTSYS)
0804 return rc;
0805
0806 goto retry;
0807 }
0808
0809 if (queue->ddcb_req[req->num] != NULL) {
0810 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0811
0812 dev_err(&pci_dev->dev,
0813 "[%s] picked DDCB %d with req=%p still in use!!\n",
0814 __func__, req->num, req);
0815 return -EFAULT;
0816 }
0817 ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED);
0818 queue->ddcb_req[req->num] = req;
0819
0820 pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts);
0821 pddcb->cmd = req->cmd.cmd;
0822 pddcb->acfunc = req->cmd.acfunc;
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832 if ((cd->slu_unitcfg & 0xFFFF0ull) > 0x34199ull)
0833 pddcb->xdir = 0x1;
0834 else
0835 pddcb->xdir = 0x0;
0836
0837
0838 pddcb->psp = (((req->cmd.asiv_length / 8) << 4) |
0839 ((req->cmd.asv_length / 8)));
0840 pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts);
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855 if (genwqe_get_slu_id(cd) <= 0x2) {
0856 memcpy(&pddcb->__asiv[0],
0857 &req->cmd.__asiv[0],
0858 DDCB_ASIV_LENGTH);
0859 } else {
0860 pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats);
0861 memcpy(&pddcb->n.asiv[0],
0862 &req->cmd.asiv[0],
0863 DDCB_ASIV_LENGTH_ATS);
0864 }
0865
0866 pddcb->icrc_hsi_shi_32 = cpu_to_be32(0x00000000);
0867
0868
0869
0870
0871
0872 icrc = genwqe_crc16((const u8 *)pddcb,
0873 ICRC_LENGTH(req->cmd.asiv_length), 0xffff);
0874 pddcb->icrc_hsi_shi_32 = cpu_to_be32((u32)icrc << 16);
0875
0876
0877 if (!GENWQE_POLLING_ENABLED)
0878 pddcb->icrc_hsi_shi_32 |= DDCB_INTR_BE32;
0879
0880 dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num);
0881 genwqe_hexdump(pci_dev, pddcb, sizeof(*pddcb));
0882
0883 if (ddcb_requ_collect_debug_data(req)) {
0884
0885
0886
0887 genwqe_init_debug_data(cd, &req->debug_data);
0888 memcpy(&req->debug_data.ddcb_before, pddcb,
0889 sizeof(req->debug_data.ddcb_before));
0890 }
0891
0892 enqueue_ddcb(cd, queue, pddcb, req->num);
0893 queue->ddcbs_in_flight++;
0894
0895 if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight)
0896 queue->ddcbs_max_in_flight = queue->ddcbs_in_flight;
0897
0898 ddcb_requ_set_state(req, GENWQE_REQU_TAPPED);
0899 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0900 wake_up_interruptible(&cd->queue_waitq);
0901
0902 return 0;
0903 }
0904
0905
0906
0907
0908
0909
0910
0911 int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
0912 struct genwqe_ddcb_cmd *cmd,
0913 unsigned int f_flags)
0914 {
0915 int rc = 0;
0916 struct pci_dev *pci_dev = cd->pci_dev;
0917 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
0918
0919 if (cmd->asiv_length > DDCB_ASIV_LENGTH) {
0920 dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n",
0921 __func__, cmd->asiv_length);
0922 return -EINVAL;
0923 }
0924 if (cmd->asv_length > DDCB_ASV_LENGTH) {
0925 dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
0926 __func__, cmd->asiv_length);
0927 return -EINVAL;
0928 }
0929 rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
0930 if (rc != 0)
0931 return rc;
0932
0933 rc = __genwqe_wait_ddcb(cd, req);
0934 if (rc < 0)
0935 goto err_exit;
0936
0937 if (ddcb_requ_collect_debug_data(req)) {
0938 if (copy_to_user((struct genwqe_debug_data __user *)
0939 (unsigned long)cmd->ddata_addr,
0940 &req->debug_data,
0941 sizeof(struct genwqe_debug_data)))
0942 return -EFAULT;
0943 }
0944
0945
0946
0947
0948
0949
0950 if (cmd->retc != DDCB_RETC_COMPLETE) {
0951
0952
0953 rc = -EBADMSG;
0954 }
0955
0956 return rc;
0957
0958 err_exit:
0959 __genwqe_purge_ddcb(cd, req);
0960
0961 if (ddcb_requ_collect_debug_data(req)) {
0962 if (copy_to_user((struct genwqe_debug_data __user *)
0963 (unsigned long)cmd->ddata_addr,
0964 &req->debug_data,
0965 sizeof(struct genwqe_debug_data)))
0966 return -EFAULT;
0967 }
0968 return rc;
0969 }
0970
0971
0972
0973
0974
0975
0976
0977 static int genwqe_next_ddcb_ready(struct genwqe_dev *cd)
0978 {
0979 unsigned long flags;
0980 struct ddcb *pddcb;
0981 struct ddcb_queue *queue = &cd->queue;
0982
0983 spin_lock_irqsave(&queue->ddcb_lock, flags);
0984
0985 if (queue_empty(queue)) {
0986 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0987 return 0;
0988 }
0989
0990 pddcb = &queue->ddcb_vaddr[queue->ddcb_act];
0991 if (pddcb->icrc_hsi_shi_32 & DDCB_COMPLETED_BE32) {
0992 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0993 return 1;
0994 }
0995
0996 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
0997 return 0;
0998 }
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008 int genwqe_ddcbs_in_flight(struct genwqe_dev *cd)
1009 {
1010 unsigned long flags;
1011 int ddcbs_in_flight = 0;
1012 struct ddcb_queue *queue = &cd->queue;
1013
1014 spin_lock_irqsave(&queue->ddcb_lock, flags);
1015 ddcbs_in_flight += queue->ddcbs_in_flight;
1016 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
1017
1018 return ddcbs_in_flight;
1019 }
1020
1021 static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
1022 {
1023 int rc, i;
1024 struct ddcb *pddcb;
1025 u64 val64;
1026 unsigned int queue_size;
1027 struct pci_dev *pci_dev = cd->pci_dev;
1028
1029 if (GENWQE_DDCB_MAX < 2)
1030 return -EINVAL;
1031
1032 queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE);
1033
1034 queue->ddcbs_in_flight = 0;
1035 queue->ddcbs_max_in_flight = 0;
1036 queue->ddcbs_completed = 0;
1037 queue->return_on_busy = 0;
1038 queue->wait_on_busy = 0;
1039
1040 queue->ddcb_seq = 0x100;
1041 queue->ddcb_max = GENWQE_DDCB_MAX;
1042 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size,
1043 &queue->ddcb_daddr);
1044 if (queue->ddcb_vaddr == NULL) {
1045 dev_err(&pci_dev->dev,
1046 "[%s] **err: could not allocate DDCB **\n", __func__);
1047 return -ENOMEM;
1048 }
1049 queue->ddcb_req = kcalloc(queue->ddcb_max, sizeof(struct ddcb_requ *),
1050 GFP_KERNEL);
1051 if (!queue->ddcb_req) {
1052 rc = -ENOMEM;
1053 goto free_ddcbs;
1054 }
1055
1056 queue->ddcb_waitqs = kcalloc(queue->ddcb_max,
1057 sizeof(wait_queue_head_t),
1058 GFP_KERNEL);
1059 if (!queue->ddcb_waitqs) {
1060 rc = -ENOMEM;
1061 goto free_requs;
1062 }
1063
1064 for (i = 0; i < queue->ddcb_max; i++) {
1065 pddcb = &queue->ddcb_vaddr[i];
1066 pddcb->icrc_hsi_shi_32 = DDCB_COMPLETED_BE32;
1067 pddcb->retc_16 = cpu_to_be16(0xfff);
1068
1069 queue->ddcb_req[i] = NULL;
1070 init_waitqueue_head(&queue->ddcb_waitqs[i]);
1071 }
1072
1073 queue->ddcb_act = 0;
1074 queue->ddcb_next = 0;
1075
1076 spin_lock_init(&queue->ddcb_lock);
1077 init_waitqueue_head(&queue->busy_waitq);
1078
1079 val64 = ((u64)(queue->ddcb_max - 1) << 8);
1080 __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07);
1081 __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr);
1082 __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq);
1083 __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64);
1084 return 0;
1085
1086 free_requs:
1087 kfree(queue->ddcb_req);
1088 queue->ddcb_req = NULL;
1089 free_ddcbs:
1090 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
1091 queue->ddcb_daddr);
1092 queue->ddcb_vaddr = NULL;
1093 queue->ddcb_daddr = 0ull;
1094 return rc;
1095
1096 }
1097
1098 static int ddcb_queue_initialized(struct ddcb_queue *queue)
1099 {
1100 return queue->ddcb_vaddr != NULL;
1101 }
1102
1103 static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue)
1104 {
1105 unsigned int queue_size;
1106
1107 queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE);
1108
1109 kfree(queue->ddcb_req);
1110 queue->ddcb_req = NULL;
1111
1112 if (queue->ddcb_vaddr) {
1113 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr,
1114 queue->ddcb_daddr);
1115 queue->ddcb_vaddr = NULL;
1116 queue->ddcb_daddr = 0ull;
1117 }
1118 }
1119
1120 static irqreturn_t genwqe_pf_isr(int irq, void *dev_id)
1121 {
1122 u64 gfir;
1123 struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
1124 struct pci_dev *pci_dev = cd->pci_dev;
1125
1126
1127
1128
1129
1130 cd->irqs_processed++;
1131 wake_up_interruptible(&cd->queue_waitq);
1132
1133
1134
1135
1136
1137 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
1138 if (((gfir & GFIR_ERR_TRIGGER) != 0x0) &&
1139 !pci_channel_offline(pci_dev)) {
1140
1141 if (cd->use_platform_recovery) {
1142
1143
1144
1145
1146
1147 readq(cd->mmio + IO_SLC_CFGREG_GFIR);
1148
1149
1150 if (pci_channel_offline(pci_dev))
1151 goto exit;
1152 }
1153
1154 wake_up_interruptible(&cd->health_waitq);
1155
1156
1157
1158
1159
1160 dev_err_ratelimited(&pci_dev->dev,
1161 "[%s] GFIR=%016llx\n",
1162 __func__, gfir);
1163 }
1164
1165 exit:
1166 return IRQ_HANDLED;
1167 }
1168
1169 static irqreturn_t genwqe_vf_isr(int irq, void *dev_id)
1170 {
1171 struct genwqe_dev *cd = (struct genwqe_dev *)dev_id;
1172
1173 cd->irqs_processed++;
1174 wake_up_interruptible(&cd->queue_waitq);
1175
1176 return IRQ_HANDLED;
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188 static int genwqe_card_thread(void *data)
1189 {
1190 int should_stop = 0;
1191 struct genwqe_dev *cd = (struct genwqe_dev *)data;
1192
1193 while (!kthread_should_stop()) {
1194
1195 genwqe_check_ddcb_queue(cd, &cd->queue);
1196
1197 if (GENWQE_POLLING_ENABLED) {
1198 wait_event_interruptible_timeout(
1199 cd->queue_waitq,
1200 genwqe_ddcbs_in_flight(cd) ||
1201 (should_stop = kthread_should_stop()), 1);
1202 } else {
1203 wait_event_interruptible_timeout(
1204 cd->queue_waitq,
1205 genwqe_next_ddcb_ready(cd) ||
1206 (should_stop = kthread_should_stop()), HZ);
1207 }
1208 if (should_stop)
1209 break;
1210
1211
1212
1213
1214
1215 cond_resched();
1216 }
1217 return 0;
1218 }
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 int genwqe_setup_service_layer(struct genwqe_dev *cd)
1229 {
1230 int rc;
1231 struct ddcb_queue *queue;
1232 struct pci_dev *pci_dev = cd->pci_dev;
1233
1234 if (genwqe_is_privileged(cd)) {
1235 rc = genwqe_card_reset(cd);
1236 if (rc < 0) {
1237 dev_err(&pci_dev->dev,
1238 "[%s] err: reset failed.\n", __func__);
1239 return rc;
1240 }
1241 genwqe_read_softreset(cd);
1242 }
1243
1244 queue = &cd->queue;
1245 queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG;
1246 queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS;
1247 queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT;
1248 queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN;
1249 queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET;
1250 queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP;
1251 queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME;
1252 queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS;
1253 queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW;
1254
1255 rc = setup_ddcb_queue(cd, queue);
1256 if (rc != 0) {
1257 rc = -ENODEV;
1258 goto err_out;
1259 }
1260
1261 init_waitqueue_head(&cd->queue_waitq);
1262 cd->card_thread = kthread_run(genwqe_card_thread, cd,
1263 GENWQE_DEVNAME "%d_thread",
1264 cd->card_idx);
1265 if (IS_ERR(cd->card_thread)) {
1266 rc = PTR_ERR(cd->card_thread);
1267 cd->card_thread = NULL;
1268 goto stop_free_queue;
1269 }
1270
1271 rc = genwqe_set_interrupt_capability(cd, GENWQE_MSI_IRQS);
1272 if (rc)
1273 goto stop_kthread;
1274
1275
1276
1277
1278
1279
1280 init_waitqueue_head(&cd->health_waitq);
1281
1282 if (genwqe_is_privileged(cd)) {
1283 rc = request_irq(pci_dev->irq, genwqe_pf_isr, IRQF_SHARED,
1284 GENWQE_DEVNAME, cd);
1285 } else {
1286 rc = request_irq(pci_dev->irq, genwqe_vf_isr, IRQF_SHARED,
1287 GENWQE_DEVNAME, cd);
1288 }
1289 if (rc < 0) {
1290 dev_err(&pci_dev->dev, "irq %d not free.\n", pci_dev->irq);
1291 goto stop_irq_cap;
1292 }
1293
1294 cd->card_state = GENWQE_CARD_USED;
1295 return 0;
1296
1297 stop_irq_cap:
1298 genwqe_reset_interrupt_capability(cd);
1299 stop_kthread:
1300 kthread_stop(cd->card_thread);
1301 cd->card_thread = NULL;
1302 stop_free_queue:
1303 free_ddcb_queue(cd, queue);
1304 err_out:
1305 return rc;
1306 }
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 static int queue_wake_up_all(struct genwqe_dev *cd)
1317 {
1318 unsigned int i;
1319 unsigned long flags;
1320 struct ddcb_queue *queue = &cd->queue;
1321
1322 spin_lock_irqsave(&queue->ddcb_lock, flags);
1323
1324 for (i = 0; i < queue->ddcb_max; i++)
1325 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]);
1326
1327 wake_up_interruptible(&queue->busy_waitq);
1328 spin_unlock_irqrestore(&queue->ddcb_lock, flags);
1329
1330 return 0;
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342 int genwqe_finish_queue(struct genwqe_dev *cd)
1343 {
1344 int i, rc = 0, in_flight;
1345 int waitmax = GENWQE_DDCB_SOFTWARE_TIMEOUT;
1346 struct pci_dev *pci_dev = cd->pci_dev;
1347 struct ddcb_queue *queue = &cd->queue;
1348
1349 if (!ddcb_queue_initialized(queue))
1350 return 0;
1351
1352
1353 if (cd->card_state == GENWQE_CARD_USED)
1354 cd->card_state = GENWQE_CARD_UNUSED;
1355
1356
1357
1358 queue_wake_up_all(cd);
1359
1360
1361 for (i = 0; i < waitmax; i++) {
1362 in_flight = genwqe_ddcbs_in_flight(cd);
1363
1364 if (in_flight == 0)
1365 break;
1366
1367 dev_dbg(&pci_dev->dev,
1368 " DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n",
1369 i, waitmax, in_flight);
1370
1371
1372
1373
1374
1375
1376
1377
1378 msleep(1000);
1379 }
1380 if (i == waitmax) {
1381 dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n",
1382 __func__);
1383 rc = -EIO;
1384 }
1385 return rc;
1386 }
1387
1388
1389
1390
1391
1392
1393
1394 int genwqe_release_service_layer(struct genwqe_dev *cd)
1395 {
1396 struct pci_dev *pci_dev = cd->pci_dev;
1397
1398 if (!ddcb_queue_initialized(&cd->queue))
1399 return 1;
1400
1401 free_irq(pci_dev->irq, cd);
1402 genwqe_reset_interrupt_capability(cd);
1403
1404 if (cd->card_thread != NULL) {
1405 kthread_stop(cd->card_thread);
1406 cd->card_thread = NULL;
1407 }
1408
1409 free_ddcb_queue(cd, &cd->queue);
1410 return 0;
1411 }