0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/kernel.h>
0018 #include <linux/module.h>
0019 #include <linux/uaccess.h>
0020 #include <linux/fpga-dfl.h>
0021
0022 #include "dfl-afu.h"
0023
0024 #define RST_POLL_INVL 10
0025 #define RST_POLL_TIMEOUT 1000
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 int __afu_port_enable(struct platform_device *pdev)
0039 {
0040 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0041 void __iomem *base;
0042 u64 v;
0043
0044 WARN_ON(!pdata->disable_count);
0045
0046 if (--pdata->disable_count != 0)
0047 return 0;
0048
0049 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
0050
0051
0052 v = readq(base + PORT_HDR_CTRL);
0053 v &= ~PORT_CTRL_SFTRST;
0054 writeq(v, base + PORT_HDR_CTRL);
0055
0056
0057
0058
0059
0060 if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
0061 !(v & PORT_CTRL_SFTRST_ACK),
0062 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
0063 dev_err(&pdev->dev, "timeout, failure to enable device\n");
0064 return -ETIMEDOUT;
0065 }
0066
0067 return 0;
0068 }
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 int __afu_port_disable(struct platform_device *pdev)
0079 {
0080 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0081 void __iomem *base;
0082 u64 v;
0083
0084 if (pdata->disable_count++ != 0)
0085 return 0;
0086
0087 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
0088
0089
0090 v = readq(base + PORT_HDR_CTRL);
0091 v |= PORT_CTRL_SFTRST;
0092 writeq(v, base + PORT_HDR_CTRL);
0093
0094
0095
0096
0097
0098
0099 if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
0100 v & PORT_CTRL_SFTRST_ACK,
0101 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
0102 dev_err(&pdev->dev, "timeout, failure to disable device\n");
0103 return -ETIMEDOUT;
0104 }
0105
0106 return 0;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 static int __port_reset(struct platform_device *pdev)
0122 {
0123 int ret;
0124
0125 ret = __afu_port_disable(pdev);
0126 if (ret)
0127 return ret;
0128
0129 return __afu_port_enable(pdev);
0130 }
0131
0132 static int port_reset(struct platform_device *pdev)
0133 {
0134 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0135 int ret;
0136
0137 mutex_lock(&pdata->lock);
0138 ret = __port_reset(pdev);
0139 mutex_unlock(&pdata->lock);
0140
0141 return ret;
0142 }
0143
0144 static int port_get_id(struct platform_device *pdev)
0145 {
0146 void __iomem *base;
0147
0148 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
0149
0150 return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
0151 }
0152
0153 static ssize_t
0154 id_show(struct device *dev, struct device_attribute *attr, char *buf)
0155 {
0156 int id = port_get_id(to_platform_device(dev));
0157
0158 return scnprintf(buf, PAGE_SIZE, "%d\n", id);
0159 }
0160 static DEVICE_ATTR_RO(id);
0161
0162 static ssize_t
0163 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
0164 {
0165 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0166 void __iomem *base;
0167 u64 v;
0168
0169 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0170
0171 mutex_lock(&pdata->lock);
0172 v = readq(base + PORT_HDR_CTRL);
0173 mutex_unlock(&pdata->lock);
0174
0175 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
0176 }
0177
0178 static ssize_t
0179 ltr_store(struct device *dev, struct device_attribute *attr,
0180 const char *buf, size_t count)
0181 {
0182 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0183 void __iomem *base;
0184 bool ltr;
0185 u64 v;
0186
0187 if (kstrtobool(buf, <r))
0188 return -EINVAL;
0189
0190 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0191
0192 mutex_lock(&pdata->lock);
0193 v = readq(base + PORT_HDR_CTRL);
0194 v &= ~PORT_CTRL_LATENCY;
0195 v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
0196 writeq(v, base + PORT_HDR_CTRL);
0197 mutex_unlock(&pdata->lock);
0198
0199 return count;
0200 }
0201 static DEVICE_ATTR_RW(ltr);
0202
0203 static ssize_t
0204 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
0205 {
0206 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0207 void __iomem *base;
0208 u64 v;
0209
0210 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0211
0212 mutex_lock(&pdata->lock);
0213 v = readq(base + PORT_HDR_STS);
0214 mutex_unlock(&pdata->lock);
0215
0216 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
0217 }
0218
0219 static ssize_t
0220 ap1_event_store(struct device *dev, struct device_attribute *attr,
0221 const char *buf, size_t count)
0222 {
0223 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0224 void __iomem *base;
0225 bool clear;
0226
0227 if (kstrtobool(buf, &clear) || !clear)
0228 return -EINVAL;
0229
0230 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0231
0232 mutex_lock(&pdata->lock);
0233 writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
0234 mutex_unlock(&pdata->lock);
0235
0236 return count;
0237 }
0238 static DEVICE_ATTR_RW(ap1_event);
0239
0240 static ssize_t
0241 ap2_event_show(struct device *dev, struct device_attribute *attr,
0242 char *buf)
0243 {
0244 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0245 void __iomem *base;
0246 u64 v;
0247
0248 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0249
0250 mutex_lock(&pdata->lock);
0251 v = readq(base + PORT_HDR_STS);
0252 mutex_unlock(&pdata->lock);
0253
0254 return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
0255 }
0256
0257 static ssize_t
0258 ap2_event_store(struct device *dev, struct device_attribute *attr,
0259 const char *buf, size_t count)
0260 {
0261 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0262 void __iomem *base;
0263 bool clear;
0264
0265 if (kstrtobool(buf, &clear) || !clear)
0266 return -EINVAL;
0267
0268 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0269
0270 mutex_lock(&pdata->lock);
0271 writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
0272 mutex_unlock(&pdata->lock);
0273
0274 return count;
0275 }
0276 static DEVICE_ATTR_RW(ap2_event);
0277
0278 static ssize_t
0279 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
0280 {
0281 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0282 void __iomem *base;
0283 u64 v;
0284
0285 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0286
0287 mutex_lock(&pdata->lock);
0288 v = readq(base + PORT_HDR_STS);
0289 mutex_unlock(&pdata->lock);
0290
0291 return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
0292 }
0293 static DEVICE_ATTR_RO(power_state);
0294
0295 static ssize_t
0296 userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
0297 const char *buf, size_t count)
0298 {
0299 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0300 u64 userclk_freq_cmd;
0301 void __iomem *base;
0302
0303 if (kstrtou64(buf, 0, &userclk_freq_cmd))
0304 return -EINVAL;
0305
0306 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0307
0308 mutex_lock(&pdata->lock);
0309 writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
0310 mutex_unlock(&pdata->lock);
0311
0312 return count;
0313 }
0314 static DEVICE_ATTR_WO(userclk_freqcmd);
0315
0316 static ssize_t
0317 userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
0318 const char *buf, size_t count)
0319 {
0320 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0321 u64 userclk_freqcntr_cmd;
0322 void __iomem *base;
0323
0324 if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
0325 return -EINVAL;
0326
0327 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0328
0329 mutex_lock(&pdata->lock);
0330 writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
0331 mutex_unlock(&pdata->lock);
0332
0333 return count;
0334 }
0335 static DEVICE_ATTR_WO(userclk_freqcntrcmd);
0336
0337 static ssize_t
0338 userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
0339 char *buf)
0340 {
0341 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0342 u64 userclk_freqsts;
0343 void __iomem *base;
0344
0345 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0346
0347 mutex_lock(&pdata->lock);
0348 userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
0349 mutex_unlock(&pdata->lock);
0350
0351 return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
0352 }
0353 static DEVICE_ATTR_RO(userclk_freqsts);
0354
0355 static ssize_t
0356 userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
0357 char *buf)
0358 {
0359 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0360 u64 userclk_freqcntrsts;
0361 void __iomem *base;
0362
0363 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0364
0365 mutex_lock(&pdata->lock);
0366 userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
0367 mutex_unlock(&pdata->lock);
0368
0369 return sprintf(buf, "0x%llx\n",
0370 (unsigned long long)userclk_freqcntrsts);
0371 }
0372 static DEVICE_ATTR_RO(userclk_freqcntrsts);
0373
0374 static struct attribute *port_hdr_attrs[] = {
0375 &dev_attr_id.attr,
0376 &dev_attr_ltr.attr,
0377 &dev_attr_ap1_event.attr,
0378 &dev_attr_ap2_event.attr,
0379 &dev_attr_power_state.attr,
0380 &dev_attr_userclk_freqcmd.attr,
0381 &dev_attr_userclk_freqcntrcmd.attr,
0382 &dev_attr_userclk_freqsts.attr,
0383 &dev_attr_userclk_freqcntrsts.attr,
0384 NULL,
0385 };
0386
0387 static umode_t port_hdr_attrs_visible(struct kobject *kobj,
0388 struct attribute *attr, int n)
0389 {
0390 struct device *dev = kobj_to_dev(kobj);
0391 umode_t mode = attr->mode;
0392 void __iomem *base;
0393
0394 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0395
0396 if (dfl_feature_revision(base) > 0) {
0397
0398
0399
0400
0401
0402 if (attr == &dev_attr_userclk_freqcmd.attr ||
0403 attr == &dev_attr_userclk_freqcntrcmd.attr ||
0404 attr == &dev_attr_userclk_freqsts.attr ||
0405 attr == &dev_attr_userclk_freqcntrsts.attr)
0406 mode = 0;
0407 }
0408
0409 return mode;
0410 }
0411
0412 static const struct attribute_group port_hdr_group = {
0413 .attrs = port_hdr_attrs,
0414 .is_visible = port_hdr_attrs_visible,
0415 };
0416
0417 static int port_hdr_init(struct platform_device *pdev,
0418 struct dfl_feature *feature)
0419 {
0420 port_reset(pdev);
0421
0422 return 0;
0423 }
0424
0425 static long
0426 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
0427 unsigned int cmd, unsigned long arg)
0428 {
0429 long ret;
0430
0431 switch (cmd) {
0432 case DFL_FPGA_PORT_RESET:
0433 if (!arg)
0434 ret = port_reset(pdev);
0435 else
0436 ret = -EINVAL;
0437 break;
0438 default:
0439 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
0440 ret = -ENODEV;
0441 }
0442
0443 return ret;
0444 }
0445
0446 static const struct dfl_feature_id port_hdr_id_table[] = {
0447 {.id = PORT_FEATURE_ID_HEADER,},
0448 {0,}
0449 };
0450
0451 static const struct dfl_feature_ops port_hdr_ops = {
0452 .init = port_hdr_init,
0453 .ioctl = port_hdr_ioctl,
0454 };
0455
0456 static ssize_t
0457 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
0458 {
0459 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0460 void __iomem *base;
0461 u64 guidl, guidh;
0462
0463 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
0464
0465 mutex_lock(&pdata->lock);
0466 if (pdata->disable_count) {
0467 mutex_unlock(&pdata->lock);
0468 return -EBUSY;
0469 }
0470
0471 guidl = readq(base + GUID_L);
0472 guidh = readq(base + GUID_H);
0473 mutex_unlock(&pdata->lock);
0474
0475 return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
0476 }
0477 static DEVICE_ATTR_RO(afu_id);
0478
0479 static struct attribute *port_afu_attrs[] = {
0480 &dev_attr_afu_id.attr,
0481 NULL
0482 };
0483
0484 static umode_t port_afu_attrs_visible(struct kobject *kobj,
0485 struct attribute *attr, int n)
0486 {
0487 struct device *dev = kobj_to_dev(kobj);
0488
0489
0490
0491
0492
0493 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
0494 return 0;
0495
0496 return attr->mode;
0497 }
0498
0499 static const struct attribute_group port_afu_group = {
0500 .attrs = port_afu_attrs,
0501 .is_visible = port_afu_attrs_visible,
0502 };
0503
0504 static int port_afu_init(struct platform_device *pdev,
0505 struct dfl_feature *feature)
0506 {
0507 struct resource *res = &pdev->resource[feature->resource_index];
0508
0509 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
0510 DFL_PORT_REGION_INDEX_AFU,
0511 resource_size(res), res->start,
0512 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
0513 DFL_PORT_REGION_WRITE);
0514 }
0515
0516 static const struct dfl_feature_id port_afu_id_table[] = {
0517 {.id = PORT_FEATURE_ID_AFU,},
0518 {0,}
0519 };
0520
0521 static const struct dfl_feature_ops port_afu_ops = {
0522 .init = port_afu_init,
0523 };
0524
0525 static int port_stp_init(struct platform_device *pdev,
0526 struct dfl_feature *feature)
0527 {
0528 struct resource *res = &pdev->resource[feature->resource_index];
0529
0530 return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
0531 DFL_PORT_REGION_INDEX_STP,
0532 resource_size(res), res->start,
0533 DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
0534 DFL_PORT_REGION_WRITE);
0535 }
0536
0537 static const struct dfl_feature_id port_stp_id_table[] = {
0538 {.id = PORT_FEATURE_ID_STP,},
0539 {0,}
0540 };
0541
0542 static const struct dfl_feature_ops port_stp_ops = {
0543 .init = port_stp_init,
0544 };
0545
0546 static long
0547 port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
0548 unsigned int cmd, unsigned long arg)
0549 {
0550 switch (cmd) {
0551 case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
0552 return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
0553 case DFL_FPGA_PORT_UINT_SET_IRQ:
0554 return dfl_feature_ioctl_set_irq(pdev, feature, arg);
0555 default:
0556 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
0557 return -ENODEV;
0558 }
0559 }
0560
0561 static const struct dfl_feature_id port_uint_id_table[] = {
0562 {.id = PORT_FEATURE_ID_UINT,},
0563 {0,}
0564 };
0565
0566 static const struct dfl_feature_ops port_uint_ops = {
0567 .ioctl = port_uint_ioctl,
0568 };
0569
0570 static struct dfl_feature_driver port_feature_drvs[] = {
0571 {
0572 .id_table = port_hdr_id_table,
0573 .ops = &port_hdr_ops,
0574 },
0575 {
0576 .id_table = port_afu_id_table,
0577 .ops = &port_afu_ops,
0578 },
0579 {
0580 .id_table = port_err_id_table,
0581 .ops = &port_err_ops,
0582 },
0583 {
0584 .id_table = port_stp_id_table,
0585 .ops = &port_stp_ops,
0586 },
0587 {
0588 .id_table = port_uint_id_table,
0589 .ops = &port_uint_ops,
0590 },
0591 {
0592 .ops = NULL,
0593 }
0594 };
0595
0596 static int afu_open(struct inode *inode, struct file *filp)
0597 {
0598 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
0599 struct dfl_feature_platform_data *pdata;
0600 int ret;
0601
0602 pdata = dev_get_platdata(&fdev->dev);
0603 if (WARN_ON(!pdata))
0604 return -ENODEV;
0605
0606 mutex_lock(&pdata->lock);
0607 ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
0608 if (!ret) {
0609 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
0610 dfl_feature_dev_use_count(pdata));
0611 filp->private_data = fdev;
0612 }
0613 mutex_unlock(&pdata->lock);
0614
0615 return ret;
0616 }
0617
0618 static int afu_release(struct inode *inode, struct file *filp)
0619 {
0620 struct platform_device *pdev = filp->private_data;
0621 struct dfl_feature_platform_data *pdata;
0622 struct dfl_feature *feature;
0623
0624 dev_dbg(&pdev->dev, "Device File Release\n");
0625
0626 pdata = dev_get_platdata(&pdev->dev);
0627
0628 mutex_lock(&pdata->lock);
0629 dfl_feature_dev_use_end(pdata);
0630
0631 if (!dfl_feature_dev_use_count(pdata)) {
0632 dfl_fpga_dev_for_each_feature(pdata, feature)
0633 dfl_fpga_set_irq_triggers(feature, 0,
0634 feature->nr_irqs, NULL);
0635 __port_reset(pdev);
0636 afu_dma_region_destroy(pdata);
0637 }
0638 mutex_unlock(&pdata->lock);
0639
0640 return 0;
0641 }
0642
0643 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
0644 unsigned long arg)
0645 {
0646
0647 return 0;
0648 }
0649
0650 static long
0651 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
0652 {
0653 struct dfl_fpga_port_info info;
0654 struct dfl_afu *afu;
0655 unsigned long minsz;
0656
0657 minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
0658
0659 if (copy_from_user(&info, arg, minsz))
0660 return -EFAULT;
0661
0662 if (info.argsz < minsz)
0663 return -EINVAL;
0664
0665 mutex_lock(&pdata->lock);
0666 afu = dfl_fpga_pdata_get_private(pdata);
0667 info.flags = 0;
0668 info.num_regions = afu->num_regions;
0669 info.num_umsgs = afu->num_umsgs;
0670 mutex_unlock(&pdata->lock);
0671
0672 if (copy_to_user(arg, &info, sizeof(info)))
0673 return -EFAULT;
0674
0675 return 0;
0676 }
0677
0678 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
0679 void __user *arg)
0680 {
0681 struct dfl_fpga_port_region_info rinfo;
0682 struct dfl_afu_mmio_region region;
0683 unsigned long minsz;
0684 long ret;
0685
0686 minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
0687
0688 if (copy_from_user(&rinfo, arg, minsz))
0689 return -EFAULT;
0690
0691 if (rinfo.argsz < minsz || rinfo.padding)
0692 return -EINVAL;
0693
0694 ret = afu_mmio_region_get_by_index(pdata, rinfo.index, ®ion);
0695 if (ret)
0696 return ret;
0697
0698 rinfo.flags = region.flags;
0699 rinfo.size = region.size;
0700 rinfo.offset = region.offset;
0701
0702 if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
0703 return -EFAULT;
0704
0705 return 0;
0706 }
0707
0708 static long
0709 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
0710 {
0711 struct dfl_fpga_port_dma_map map;
0712 unsigned long minsz;
0713 long ret;
0714
0715 minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
0716
0717 if (copy_from_user(&map, arg, minsz))
0718 return -EFAULT;
0719
0720 if (map.argsz < minsz || map.flags)
0721 return -EINVAL;
0722
0723 ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
0724 if (ret)
0725 return ret;
0726
0727 if (copy_to_user(arg, &map, sizeof(map))) {
0728 afu_dma_unmap_region(pdata, map.iova);
0729 return -EFAULT;
0730 }
0731
0732 dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
0733 (unsigned long long)map.user_addr,
0734 (unsigned long long)map.length,
0735 (unsigned long long)map.iova);
0736
0737 return 0;
0738 }
0739
0740 static long
0741 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
0742 {
0743 struct dfl_fpga_port_dma_unmap unmap;
0744 unsigned long minsz;
0745
0746 minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
0747
0748 if (copy_from_user(&unmap, arg, minsz))
0749 return -EFAULT;
0750
0751 if (unmap.argsz < minsz || unmap.flags)
0752 return -EINVAL;
0753
0754 return afu_dma_unmap_region(pdata, unmap.iova);
0755 }
0756
0757 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
0758 {
0759 struct platform_device *pdev = filp->private_data;
0760 struct dfl_feature_platform_data *pdata;
0761 struct dfl_feature *f;
0762 long ret;
0763
0764 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
0765
0766 pdata = dev_get_platdata(&pdev->dev);
0767
0768 switch (cmd) {
0769 case DFL_FPGA_GET_API_VERSION:
0770 return DFL_FPGA_API_VERSION;
0771 case DFL_FPGA_CHECK_EXTENSION:
0772 return afu_ioctl_check_extension(pdata, arg);
0773 case DFL_FPGA_PORT_GET_INFO:
0774 return afu_ioctl_get_info(pdata, (void __user *)arg);
0775 case DFL_FPGA_PORT_GET_REGION_INFO:
0776 return afu_ioctl_get_region_info(pdata, (void __user *)arg);
0777 case DFL_FPGA_PORT_DMA_MAP:
0778 return afu_ioctl_dma_map(pdata, (void __user *)arg);
0779 case DFL_FPGA_PORT_DMA_UNMAP:
0780 return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
0781 default:
0782
0783
0784
0785
0786
0787
0788 dfl_fpga_dev_for_each_feature(pdata, f)
0789 if (f->ops && f->ops->ioctl) {
0790 ret = f->ops->ioctl(pdev, f, cmd, arg);
0791 if (ret != -ENODEV)
0792 return ret;
0793 }
0794 }
0795
0796 return -EINVAL;
0797 }
0798
0799 static const struct vm_operations_struct afu_vma_ops = {
0800 #ifdef CONFIG_HAVE_IOREMAP_PROT
0801 .access = generic_access_phys,
0802 #endif
0803 };
0804
0805 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
0806 {
0807 struct platform_device *pdev = filp->private_data;
0808 struct dfl_feature_platform_data *pdata;
0809 u64 size = vma->vm_end - vma->vm_start;
0810 struct dfl_afu_mmio_region region;
0811 u64 offset;
0812 int ret;
0813
0814 if (!(vma->vm_flags & VM_SHARED))
0815 return -EINVAL;
0816
0817 pdata = dev_get_platdata(&pdev->dev);
0818
0819 offset = vma->vm_pgoff << PAGE_SHIFT;
0820 ret = afu_mmio_region_get_by_offset(pdata, offset, size, ®ion);
0821 if (ret)
0822 return ret;
0823
0824 if (!(region.flags & DFL_PORT_REGION_MMAP))
0825 return -EINVAL;
0826
0827 if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
0828 return -EPERM;
0829
0830 if ((vma->vm_flags & VM_WRITE) &&
0831 !(region.flags & DFL_PORT_REGION_WRITE))
0832 return -EPERM;
0833
0834
0835 vma->vm_ops = &afu_vma_ops;
0836
0837 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
0838
0839 return remap_pfn_range(vma, vma->vm_start,
0840 (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
0841 size, vma->vm_page_prot);
0842 }
0843
0844 static const struct file_operations afu_fops = {
0845 .owner = THIS_MODULE,
0846 .open = afu_open,
0847 .release = afu_release,
0848 .unlocked_ioctl = afu_ioctl,
0849 .mmap = afu_mmap,
0850 };
0851
0852 static int afu_dev_init(struct platform_device *pdev)
0853 {
0854 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0855 struct dfl_afu *afu;
0856
0857 afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
0858 if (!afu)
0859 return -ENOMEM;
0860
0861 afu->pdata = pdata;
0862
0863 mutex_lock(&pdata->lock);
0864 dfl_fpga_pdata_set_private(pdata, afu);
0865 afu_mmio_region_init(pdata);
0866 afu_dma_region_init(pdata);
0867 mutex_unlock(&pdata->lock);
0868
0869 return 0;
0870 }
0871
0872 static int afu_dev_destroy(struct platform_device *pdev)
0873 {
0874 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0875
0876 mutex_lock(&pdata->lock);
0877 afu_mmio_region_destroy(pdata);
0878 afu_dma_region_destroy(pdata);
0879 dfl_fpga_pdata_set_private(pdata, NULL);
0880 mutex_unlock(&pdata->lock);
0881
0882 return 0;
0883 }
0884
0885 static int port_enable_set(struct platform_device *pdev, bool enable)
0886 {
0887 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0888 int ret;
0889
0890 mutex_lock(&pdata->lock);
0891 if (enable)
0892 ret = __afu_port_enable(pdev);
0893 else
0894 ret = __afu_port_disable(pdev);
0895 mutex_unlock(&pdata->lock);
0896
0897 return ret;
0898 }
0899
0900 static struct dfl_fpga_port_ops afu_port_ops = {
0901 .name = DFL_FPGA_FEATURE_DEV_PORT,
0902 .owner = THIS_MODULE,
0903 .get_id = port_get_id,
0904 .enable_set = port_enable_set,
0905 };
0906
0907 static int afu_probe(struct platform_device *pdev)
0908 {
0909 int ret;
0910
0911 dev_dbg(&pdev->dev, "%s\n", __func__);
0912
0913 ret = afu_dev_init(pdev);
0914 if (ret)
0915 goto exit;
0916
0917 ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
0918 if (ret)
0919 goto dev_destroy;
0920
0921 ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
0922 if (ret) {
0923 dfl_fpga_dev_feature_uinit(pdev);
0924 goto dev_destroy;
0925 }
0926
0927 return 0;
0928
0929 dev_destroy:
0930 afu_dev_destroy(pdev);
0931 exit:
0932 return ret;
0933 }
0934
0935 static int afu_remove(struct platform_device *pdev)
0936 {
0937 dev_dbg(&pdev->dev, "%s\n", __func__);
0938
0939 dfl_fpga_dev_ops_unregister(pdev);
0940 dfl_fpga_dev_feature_uinit(pdev);
0941 afu_dev_destroy(pdev);
0942
0943 return 0;
0944 }
0945
0946 static const struct attribute_group *afu_dev_groups[] = {
0947 &port_hdr_group,
0948 &port_afu_group,
0949 &port_err_group,
0950 NULL
0951 };
0952
0953 static struct platform_driver afu_driver = {
0954 .driver = {
0955 .name = DFL_FPGA_FEATURE_DEV_PORT,
0956 .dev_groups = afu_dev_groups,
0957 },
0958 .probe = afu_probe,
0959 .remove = afu_remove,
0960 };
0961
0962 static int __init afu_init(void)
0963 {
0964 int ret;
0965
0966 dfl_fpga_port_ops_add(&afu_port_ops);
0967
0968 ret = platform_driver_register(&afu_driver);
0969 if (ret)
0970 dfl_fpga_port_ops_del(&afu_port_ops);
0971
0972 return ret;
0973 }
0974
0975 static void __exit afu_exit(void)
0976 {
0977 platform_driver_unregister(&afu_driver);
0978
0979 dfl_fpga_port_ops_del(&afu_port_ops);
0980 }
0981
0982 module_init(afu_init);
0983 module_exit(afu_exit);
0984
0985 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
0986 MODULE_AUTHOR("Intel Corporation");
0987 MODULE_LICENSE("GPL v2");
0988 MODULE_ALIAS("platform:dfl-port");