0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #include <linux/io.h>
0046 #include <linux/pci.h>
0047 #include <linux/poll.h>
0048 #include <linux/vmalloc.h>
0049 #include <linux/export.h>
0050 #include <linux/fs.h>
0051 #include <linux/uaccess.h>
0052
0053 #include "qib.h"
0054 #include "qib_common.h"
0055
0056 #undef pr_fmt
0057 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
0058
0059
0060
0061
0062
0063
0064 enum diag_state { UNUSED = 0, OPENED, INIT, READY };
0065
0066
0067 static struct qib_diag_client {
0068 struct qib_diag_client *next;
0069 struct qib_devdata *dd;
0070 pid_t pid;
0071 enum diag_state state;
0072 } *client_pool;
0073
0074
0075
0076
0077
0078 static struct qib_diag_client *get_client(struct qib_devdata *dd)
0079 {
0080 struct qib_diag_client *dc;
0081
0082 dc = client_pool;
0083 if (dc)
0084
0085 client_pool = dc->next;
0086 else
0087
0088 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
0089
0090 if (dc) {
0091 dc->next = NULL;
0092 dc->dd = dd;
0093 dc->pid = current->pid;
0094 dc->state = OPENED;
0095 }
0096 return dc;
0097 }
0098
0099
0100
0101
0102 static void return_client(struct qib_diag_client *dc)
0103 {
0104 struct qib_devdata *dd = dc->dd;
0105 struct qib_diag_client *tdc, *rdc;
0106
0107 rdc = NULL;
0108 if (dc == dd->diag_client) {
0109 dd->diag_client = dc->next;
0110 rdc = dc;
0111 } else {
0112 tdc = dc->dd->diag_client;
0113 while (tdc) {
0114 if (dc == tdc->next) {
0115 tdc->next = dc->next;
0116 rdc = dc;
0117 break;
0118 }
0119 tdc = tdc->next;
0120 }
0121 }
0122 if (rdc) {
0123 rdc->state = UNUSED;
0124 rdc->dd = NULL;
0125 rdc->pid = 0;
0126 rdc->next = client_pool;
0127 client_pool = rdc;
0128 }
0129 }
0130
0131 static int qib_diag_open(struct inode *in, struct file *fp);
0132 static int qib_diag_release(struct inode *in, struct file *fp);
0133 static ssize_t qib_diag_read(struct file *fp, char __user *data,
0134 size_t count, loff_t *off);
0135 static ssize_t qib_diag_write(struct file *fp, const char __user *data,
0136 size_t count, loff_t *off);
0137
0138 static const struct file_operations diag_file_ops = {
0139 .owner = THIS_MODULE,
0140 .write = qib_diag_write,
0141 .read = qib_diag_read,
0142 .open = qib_diag_open,
0143 .release = qib_diag_release,
0144 .llseek = default_llseek,
0145 };
0146
0147 static atomic_t diagpkt_count = ATOMIC_INIT(0);
0148 static struct cdev *diagpkt_cdev;
0149 static struct device *diagpkt_device;
0150
0151 static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
0152 size_t count, loff_t *off);
0153
0154 static const struct file_operations diagpkt_file_ops = {
0155 .owner = THIS_MODULE,
0156 .write = qib_diagpkt_write,
0157 .llseek = noop_llseek,
0158 };
0159
0160 int qib_diag_add(struct qib_devdata *dd)
0161 {
0162 char name[16];
0163 int ret = 0;
0164
0165 if (atomic_inc_return(&diagpkt_count) == 1) {
0166 ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
0167 &diagpkt_file_ops, &diagpkt_cdev,
0168 &diagpkt_device);
0169 if (ret)
0170 goto done;
0171 }
0172
0173 snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
0174 ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
0175 &diag_file_ops, &dd->diag_cdev,
0176 &dd->diag_device);
0177 done:
0178 return ret;
0179 }
0180
0181 static void qib_unregister_observers(struct qib_devdata *dd);
0182
0183 void qib_diag_remove(struct qib_devdata *dd)
0184 {
0185 struct qib_diag_client *dc;
0186
0187 if (atomic_dec_and_test(&diagpkt_count))
0188 qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
0189
0190 qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
0191
0192
0193
0194
0195
0196 while (dd->diag_client)
0197 return_client(dd->diag_client);
0198
0199
0200 while (client_pool) {
0201 dc = client_pool;
0202 client_pool = dc->next;
0203 kfree(dc);
0204 }
0205
0206 qib_unregister_observers(dd);
0207 }
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
0235 u32 *cntp)
0236 {
0237 u32 kreglen;
0238 u32 snd_bottom, snd_lim = 0;
0239 u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
0240 u32 __iomem *map = NULL;
0241 u32 cnt = 0;
0242 u32 tot4k, offs4k;
0243
0244
0245 kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
0246 if (offset < kreglen) {
0247 map = krb32 + (offset / sizeof(u32));
0248 cnt = kreglen - offset;
0249 goto mapped;
0250 }
0251
0252
0253
0254
0255
0256
0257 if (dd->userbase) {
0258
0259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
0260
0261 if (!dd->piovl15base)
0262 snd_lim = dd->uregbase;
0263 krb32 = (u32 __iomem *)dd->userbase;
0264 if (offset >= dd->uregbase && offset < ulim) {
0265 map = krb32 + (offset - dd->uregbase) / sizeof(u32);
0266 cnt = ulim - offset;
0267 goto mapped;
0268 }
0269 }
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 snd_bottom = dd->pio2k_bufbase;
0282 if (snd_lim == 0) {
0283 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
0284
0285 snd_lim = snd_bottom + tot2k;
0286 }
0287
0288
0289
0290 tot4k = dd->piobcnt4k * dd->align4k;
0291 offs4k = dd->piobufbase >> 32;
0292 if (dd->piobcnt4k) {
0293 if (snd_bottom > offs4k)
0294 snd_bottom = offs4k;
0295 else {
0296
0297 if (!dd->userbase || dd->piovl15base)
0298 snd_lim = offs4k + tot4k;
0299 }
0300 }
0301
0302
0303
0304
0305 if (offset >= snd_bottom && offset < snd_lim) {
0306 offset -= snd_bottom;
0307 map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
0308 cnt = snd_lim - offset;
0309 }
0310
0311 if (!map && offs4k && dd->piovl15base) {
0312 snd_lim = offs4k + tot4k + 2 * dd->align4k;
0313 if (offset >= (offs4k + tot4k) && offset < snd_lim) {
0314 map = (u32 __iomem *)dd->piovl15base +
0315 ((offset - (offs4k + tot4k)) / sizeof(u32));
0316 cnt = snd_lim - offset;
0317 }
0318 }
0319
0320 mapped:
0321 if (cntp)
0322 *cntp = cnt;
0323 return map;
0324 }
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
0340 u32 regoffs, size_t count)
0341 {
0342 const u64 __iomem *reg_addr;
0343 const u64 __iomem *reg_end;
0344 u32 limit;
0345 int ret;
0346
0347 reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
0348 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
0349 ret = -EINVAL;
0350 goto bail;
0351 }
0352 if (count >= limit)
0353 count = limit;
0354 reg_end = reg_addr + (count / sizeof(u64));
0355
0356
0357 while (reg_addr < reg_end) {
0358 u64 data = readq(reg_addr);
0359
0360 if (copy_to_user(uaddr, &data, sizeof(u64))) {
0361 ret = -EFAULT;
0362 goto bail;
0363 }
0364 reg_addr++;
0365 uaddr += sizeof(u64);
0366 }
0367 ret = 0;
0368 bail:
0369 return ret;
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
0384 const void __user *uaddr, size_t count)
0385 {
0386 u64 __iomem *reg_addr;
0387 const u64 __iomem *reg_end;
0388 u32 limit;
0389 int ret;
0390
0391 reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
0392 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
0393 ret = -EINVAL;
0394 goto bail;
0395 }
0396 if (count >= limit)
0397 count = limit;
0398 reg_end = reg_addr + (count / sizeof(u64));
0399
0400
0401 while (reg_addr < reg_end) {
0402 u64 data;
0403
0404 if (copy_from_user(&data, uaddr, sizeof(data))) {
0405 ret = -EFAULT;
0406 goto bail;
0407 }
0408 writeq(data, reg_addr);
0409
0410 reg_addr++;
0411 uaddr += sizeof(u64);
0412 }
0413 ret = 0;
0414 bail:
0415 return ret;
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428 static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
0429 u32 regoffs, size_t count)
0430 {
0431 const u32 __iomem *reg_addr;
0432 const u32 __iomem *reg_end;
0433 u32 limit;
0434 int ret;
0435
0436 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
0437 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
0438 ret = -EINVAL;
0439 goto bail;
0440 }
0441 if (count >= limit)
0442 count = limit;
0443 reg_end = reg_addr + (count / sizeof(u32));
0444
0445
0446 while (reg_addr < reg_end) {
0447 u32 data = readl(reg_addr);
0448
0449 if (copy_to_user(uaddr, &data, sizeof(data))) {
0450 ret = -EFAULT;
0451 goto bail;
0452 }
0453
0454 reg_addr++;
0455 uaddr += sizeof(u32);
0456
0457 }
0458 ret = 0;
0459 bail:
0460 return ret;
0461 }
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474 static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
0475 const void __user *uaddr, size_t count)
0476 {
0477 u32 __iomem *reg_addr;
0478 const u32 __iomem *reg_end;
0479 u32 limit;
0480 int ret;
0481
0482 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
0483 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
0484 ret = -EINVAL;
0485 goto bail;
0486 }
0487 if (count >= limit)
0488 count = limit;
0489 reg_end = reg_addr + (count / sizeof(u32));
0490
0491 while (reg_addr < reg_end) {
0492 u32 data;
0493
0494 if (copy_from_user(&data, uaddr, sizeof(data))) {
0495 ret = -EFAULT;
0496 goto bail;
0497 }
0498 writel(data, reg_addr);
0499
0500 reg_addr++;
0501 uaddr += sizeof(u32);
0502 }
0503 ret = 0;
0504 bail:
0505 return ret;
0506 }
0507
0508 static int qib_diag_open(struct inode *in, struct file *fp)
0509 {
0510 int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
0511 struct qib_devdata *dd;
0512 struct qib_diag_client *dc;
0513 int ret;
0514
0515 mutex_lock(&qib_mutex);
0516
0517 dd = qib_lookup(unit);
0518
0519 if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
0520 !dd->kregbase) {
0521 ret = -ENODEV;
0522 goto bail;
0523 }
0524
0525 dc = get_client(dd);
0526 if (!dc) {
0527 ret = -ENOMEM;
0528 goto bail;
0529 }
0530 dc->next = dd->diag_client;
0531 dd->diag_client = dc;
0532 fp->private_data = dc;
0533 ret = 0;
0534 bail:
0535 mutex_unlock(&qib_mutex);
0536
0537 return ret;
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547 static ssize_t qib_diagpkt_write(struct file *fp,
0548 const char __user *data,
0549 size_t count, loff_t *off)
0550 {
0551 u32 __iomem *piobuf;
0552 u32 plen, pbufn, maxlen_reserve;
0553 struct qib_diag_xpkt dp;
0554 u32 *tmpbuf = NULL;
0555 struct qib_devdata *dd;
0556 struct qib_pportdata *ppd;
0557 ssize_t ret = 0;
0558
0559 if (count != sizeof(dp)) {
0560 ret = -EINVAL;
0561 goto bail;
0562 }
0563 if (copy_from_user(&dp, data, sizeof(dp))) {
0564 ret = -EFAULT;
0565 goto bail;
0566 }
0567
0568 dd = qib_lookup(dp.unit);
0569 if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
0570 ret = -ENODEV;
0571 goto bail;
0572 }
0573 if (!(dd->flags & QIB_INITTED)) {
0574
0575 ret = -ENODEV;
0576 goto bail;
0577 }
0578
0579 if (dp.version != _DIAG_XPKT_VERS) {
0580 qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
0581 dp.version);
0582 ret = -EINVAL;
0583 goto bail;
0584 }
0585
0586 if (dp.len & 3) {
0587 ret = -EINVAL;
0588 goto bail;
0589 }
0590 if (!dp.port || dp.port > dd->num_pports) {
0591 ret = -EINVAL;
0592 goto bail;
0593 }
0594 ppd = &dd->pport[dp.port - 1];
0595
0596
0597
0598
0599
0600
0601
0602 maxlen_reserve = 2 * sizeof(u32);
0603 if (dp.len > ppd->ibmaxlen - maxlen_reserve) {
0604 ret = -EINVAL;
0605 goto bail;
0606 }
0607
0608 plen = sizeof(u32) + dp.len;
0609
0610 tmpbuf = vmalloc(plen);
0611 if (!tmpbuf) {
0612 ret = -ENOMEM;
0613 goto bail;
0614 }
0615
0616 if (copy_from_user(tmpbuf,
0617 u64_to_user_ptr(dp.data),
0618 dp.len)) {
0619 ret = -EFAULT;
0620 goto bail;
0621 }
0622
0623 plen >>= 2;
0624
0625 if (dp.pbc_wd == 0)
0626 dp.pbc_wd = plen;
0627
0628 piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
0629 if (!piobuf) {
0630 ret = -EBUSY;
0631 goto bail;
0632 }
0633
0634 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
0635
0636
0637 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
0638
0639 writeq(dp.pbc_wd, piobuf);
0640
0641
0642
0643
0644
0645 if (dd->flags & QIB_PIO_FLUSH_WC) {
0646 qib_flush_wc();
0647 qib_pio_copy(piobuf + 2, tmpbuf, plen - 1);
0648 qib_flush_wc();
0649 __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
0650 } else
0651 qib_pio_copy(piobuf + 2, tmpbuf, plen);
0652
0653 if (dd->flags & QIB_USE_SPCL_TRIG) {
0654 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
0655
0656 qib_flush_wc();
0657 __raw_writel(0xaebecede, piobuf + spcl_off);
0658 }
0659
0660
0661
0662
0663
0664
0665 qib_flush_wc();
0666 qib_sendbuf_done(dd, pbufn);
0667 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
0668
0669 ret = sizeof(dp);
0670
0671 bail:
0672 vfree(tmpbuf);
0673 return ret;
0674 }
0675
0676 static int qib_diag_release(struct inode *in, struct file *fp)
0677 {
0678 mutex_lock(&qib_mutex);
0679 return_client(fp->private_data);
0680 fp->private_data = NULL;
0681 mutex_unlock(&qib_mutex);
0682 return 0;
0683 }
0684
0685
0686
0687
0688
0689 struct diag_observer_list_elt {
0690 struct diag_observer_list_elt *next;
0691 const struct diag_observer *op;
0692 };
0693
0694 int qib_register_observer(struct qib_devdata *dd,
0695 const struct diag_observer *op)
0696 {
0697 struct diag_observer_list_elt *olp;
0698 unsigned long flags;
0699
0700 if (!dd || !op)
0701 return -EINVAL;
0702 olp = vmalloc(sizeof(*olp));
0703 if (!olp)
0704 return -ENOMEM;
0705
0706 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
0707 olp->op = op;
0708 olp->next = dd->diag_observer_list;
0709 dd->diag_observer_list = olp;
0710 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
0711
0712 return 0;
0713 }
0714
0715
0716 static void qib_unregister_observers(struct qib_devdata *dd)
0717 {
0718 struct diag_observer_list_elt *olp;
0719 unsigned long flags;
0720
0721 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
0722 olp = dd->diag_observer_list;
0723 while (olp) {
0724
0725 dd->diag_observer_list = olp->next;
0726 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
0727 vfree(olp);
0728
0729 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
0730 olp = dd->diag_observer_list;
0731 }
0732 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
0733 }
0734
0735
0736
0737
0738
0739
0740 static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
0741 u32 addr)
0742 {
0743 struct diag_observer_list_elt *olp;
0744 const struct diag_observer *op = NULL;
0745
0746 olp = dd->diag_observer_list;
0747 while (olp) {
0748 op = olp->op;
0749 if (addr >= op->bottom && addr <= op->top)
0750 break;
0751 olp = olp->next;
0752 }
0753 if (!olp)
0754 op = NULL;
0755
0756 return op;
0757 }
0758
0759 static ssize_t qib_diag_read(struct file *fp, char __user *data,
0760 size_t count, loff_t *off)
0761 {
0762 struct qib_diag_client *dc = fp->private_data;
0763 struct qib_devdata *dd = dc->dd;
0764 ssize_t ret;
0765
0766 if (dc->pid != current->pid) {
0767 ret = -EPERM;
0768 goto bail;
0769 }
0770
0771 if (count == 0)
0772 ret = 0;
0773 else if ((count % 4) || (*off % 4))
0774
0775 ret = -EINVAL;
0776 else if (dc->state < READY && (*off || count != 8))
0777 ret = -EINVAL;
0778 else {
0779 unsigned long flags;
0780 u64 data64 = 0;
0781 int use_32;
0782 const struct diag_observer *op;
0783
0784 use_32 = (count % 8) || (*off % 8);
0785 ret = -1;
0786 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
0787
0788
0789
0790
0791
0792 op = diag_get_observer(dd, *off);
0793 if (op) {
0794 u32 offset = *off;
0795
0796 ret = op->hook(dd, op, offset, &data64, 0, use_32);
0797 }
0798
0799
0800
0801
0802 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
0803 if (!op) {
0804 if (use_32)
0805
0806
0807
0808
0809 ret = qib_read_umem32(dd, data, (u32) *off,
0810 count);
0811 else
0812 ret = qib_read_umem64(dd, data, (u32) *off,
0813 count);
0814 } else if (ret == count) {
0815
0816 ret = copy_to_user(data, &data64, use_32 ?
0817 sizeof(u32) : sizeof(u64));
0818 if (ret)
0819 ret = -EFAULT;
0820 }
0821 }
0822
0823 if (ret >= 0) {
0824 *off += count;
0825 ret = count;
0826 if (dc->state == OPENED)
0827 dc->state = INIT;
0828 }
0829 bail:
0830 return ret;
0831 }
0832
0833 static ssize_t qib_diag_write(struct file *fp, const char __user *data,
0834 size_t count, loff_t *off)
0835 {
0836 struct qib_diag_client *dc = fp->private_data;
0837 struct qib_devdata *dd = dc->dd;
0838 ssize_t ret;
0839
0840 if (dc->pid != current->pid) {
0841 ret = -EPERM;
0842 goto bail;
0843 }
0844
0845 if (count == 0)
0846 ret = 0;
0847 else if ((count % 4) || (*off % 4))
0848
0849 ret = -EINVAL;
0850 else if (dc->state < READY &&
0851 ((*off || count != 8) || dc->state != INIT))
0852
0853 ret = -EINVAL;
0854 else {
0855 unsigned long flags;
0856 const struct diag_observer *op = NULL;
0857 int use_32 = (count % 8) || (*off % 8);
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867 if (count == 4 || count == 8) {
0868 u64 data64;
0869 u32 offset = *off;
0870
0871 ret = copy_from_user(&data64, data, count);
0872 if (ret) {
0873 ret = -EFAULT;
0874 goto bail;
0875 }
0876 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
0877 op = diag_get_observer(dd, *off);
0878 if (op)
0879 ret = op->hook(dd, op, offset, &data64, ~0Ull,
0880 use_32);
0881 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
0882 }
0883
0884 if (!op) {
0885 if (use_32)
0886
0887
0888
0889
0890 ret = qib_write_umem32(dd, (u32) *off, data,
0891 count);
0892 else
0893 ret = qib_write_umem64(dd, (u32) *off, data,
0894 count);
0895 }
0896 }
0897
0898 if (ret >= 0) {
0899 *off += count;
0900 ret = count;
0901 if (dc->state == INIT)
0902 dc->state = READY;
0903 }
0904 bail:
0905 return ret;
0906 }