0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/dfl.h>
0014 #include <linux/fpga-dfl.h>
0015 #include <linux/module.h>
0016 #include <linux/uaccess.h>
0017
0018 #include "dfl.h"
0019
0020 static DEFINE_MUTEX(dfl_id_mutex);
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 enum dfl_fpga_devt_type {
0035 DFL_FPGA_DEVT_FME,
0036 DFL_FPGA_DEVT_PORT,
0037 DFL_FPGA_DEVT_MAX,
0038 };
0039
0040 static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
0041
0042 static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
0043 "dfl-fme-pdata",
0044 "dfl-port-pdata",
0045 };
0046
0047
0048
0049
0050
0051
0052
0053
0054 struct dfl_dev_info {
0055 const char *name;
0056 u16 dfh_id;
0057 struct idr id;
0058 enum dfl_fpga_devt_type devt_type;
0059 };
0060
0061
0062 static struct dfl_dev_info dfl_devs[] = {
0063 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
0064 .devt_type = DFL_FPGA_DEVT_FME},
0065 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
0066 .devt_type = DFL_FPGA_DEVT_PORT},
0067 };
0068
0069
0070
0071
0072
0073
0074 struct dfl_chardev_info {
0075 const char *name;
0076 dev_t devt;
0077 };
0078
0079
0080 static struct dfl_chardev_info dfl_chrdevs[] = {
0081 {.name = DFL_FPGA_FEATURE_DEV_FME},
0082 {.name = DFL_FPGA_FEATURE_DEV_PORT},
0083 };
0084
0085 static void dfl_ids_init(void)
0086 {
0087 int i;
0088
0089 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
0090 idr_init(&dfl_devs[i].id);
0091 }
0092
0093 static void dfl_ids_destroy(void)
0094 {
0095 int i;
0096
0097 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
0098 idr_destroy(&dfl_devs[i].id);
0099 }
0100
0101 static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
0102 {
0103 int id;
0104
0105 WARN_ON(type >= DFL_ID_MAX);
0106 mutex_lock(&dfl_id_mutex);
0107 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
0108 mutex_unlock(&dfl_id_mutex);
0109
0110 return id;
0111 }
0112
0113 static void dfl_id_free(enum dfl_id_type type, int id)
0114 {
0115 WARN_ON(type >= DFL_ID_MAX);
0116 mutex_lock(&dfl_id_mutex);
0117 idr_remove(&dfl_devs[type].id, id);
0118 mutex_unlock(&dfl_id_mutex);
0119 }
0120
0121 static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
0122 {
0123 int i;
0124
0125 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
0126 if (!strcmp(dfl_devs[i].name, pdev->name))
0127 return i;
0128
0129 return DFL_ID_MAX;
0130 }
0131
0132 static enum dfl_id_type dfh_id_to_type(u16 id)
0133 {
0134 int i;
0135
0136 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
0137 if (dfl_devs[i].dfh_id == id)
0138 return i;
0139
0140 return DFL_ID_MAX;
0141 }
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 static DEFINE_MUTEX(dfl_port_ops_mutex);
0154 static LIST_HEAD(dfl_port_ops_list);
0155
0156
0157
0158
0159
0160
0161
0162
0163 struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
0164 {
0165 struct dfl_fpga_port_ops *ops = NULL;
0166
0167 mutex_lock(&dfl_port_ops_mutex);
0168 if (list_empty(&dfl_port_ops_list))
0169 goto done;
0170
0171 list_for_each_entry(ops, &dfl_port_ops_list, node) {
0172
0173 if (!strcmp(pdev->name, ops->name)) {
0174 if (!try_module_get(ops->owner))
0175 ops = NULL;
0176 goto done;
0177 }
0178 }
0179
0180 ops = NULL;
0181 done:
0182 mutex_unlock(&dfl_port_ops_mutex);
0183 return ops;
0184 }
0185 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
0186
0187
0188
0189
0190
0191 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
0192 {
0193 if (ops && ops->owner)
0194 module_put(ops->owner);
0195 }
0196 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
0197
0198
0199
0200
0201
0202 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
0203 {
0204 mutex_lock(&dfl_port_ops_mutex);
0205 list_add_tail(&ops->node, &dfl_port_ops_list);
0206 mutex_unlock(&dfl_port_ops_mutex);
0207 }
0208 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
0209
0210
0211
0212
0213
0214 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
0215 {
0216 mutex_lock(&dfl_port_ops_mutex);
0217 list_del(&ops->node);
0218 mutex_unlock(&dfl_port_ops_mutex);
0219 }
0220 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
0221
0222
0223
0224
0225
0226
0227
0228
0229 int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
0230 {
0231 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0232 struct dfl_fpga_port_ops *port_ops;
0233
0234 if (pdata->id != FEATURE_DEV_ID_UNUSED)
0235 return pdata->id == *(int *)pport_id;
0236
0237 port_ops = dfl_fpga_port_ops_get(pdev);
0238 if (!port_ops || !port_ops->get_id)
0239 return 0;
0240
0241 pdata->id = port_ops->get_id(pdev);
0242 dfl_fpga_port_ops_put(port_ops);
0243
0244 return pdata->id == *(int *)pport_id;
0245 }
0246 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
0247
0248 static DEFINE_IDA(dfl_device_ida);
0249
0250 static const struct dfl_device_id *
0251 dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
0252 {
0253 if (id->type == ddev->type && id->feature_id == ddev->feature_id)
0254 return id;
0255
0256 return NULL;
0257 }
0258
0259 static int dfl_bus_match(struct device *dev, struct device_driver *drv)
0260 {
0261 struct dfl_device *ddev = to_dfl_dev(dev);
0262 struct dfl_driver *ddrv = to_dfl_drv(drv);
0263 const struct dfl_device_id *id_entry;
0264
0265 id_entry = ddrv->id_table;
0266 if (id_entry) {
0267 while (id_entry->feature_id) {
0268 if (dfl_match_one_device(id_entry, ddev)) {
0269 ddev->id_entry = id_entry;
0270 return 1;
0271 }
0272 id_entry++;
0273 }
0274 }
0275
0276 return 0;
0277 }
0278
0279 static int dfl_bus_probe(struct device *dev)
0280 {
0281 struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
0282 struct dfl_device *ddev = to_dfl_dev(dev);
0283
0284 return ddrv->probe(ddev);
0285 }
0286
0287 static void dfl_bus_remove(struct device *dev)
0288 {
0289 struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
0290 struct dfl_device *ddev = to_dfl_dev(dev);
0291
0292 if (ddrv->remove)
0293 ddrv->remove(ddev);
0294 }
0295
0296 static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
0297 {
0298 struct dfl_device *ddev = to_dfl_dev(dev);
0299
0300 return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
0301 ddev->type, ddev->feature_id);
0302 }
0303
0304 static ssize_t
0305 type_show(struct device *dev, struct device_attribute *attr, char *buf)
0306 {
0307 struct dfl_device *ddev = to_dfl_dev(dev);
0308
0309 return sprintf(buf, "0x%x\n", ddev->type);
0310 }
0311 static DEVICE_ATTR_RO(type);
0312
0313 static ssize_t
0314 feature_id_show(struct device *dev, struct device_attribute *attr, char *buf)
0315 {
0316 struct dfl_device *ddev = to_dfl_dev(dev);
0317
0318 return sprintf(buf, "0x%x\n", ddev->feature_id);
0319 }
0320 static DEVICE_ATTR_RO(feature_id);
0321
0322 static struct attribute *dfl_dev_attrs[] = {
0323 &dev_attr_type.attr,
0324 &dev_attr_feature_id.attr,
0325 NULL,
0326 };
0327 ATTRIBUTE_GROUPS(dfl_dev);
0328
0329 static struct bus_type dfl_bus_type = {
0330 .name = "dfl",
0331 .match = dfl_bus_match,
0332 .probe = dfl_bus_probe,
0333 .remove = dfl_bus_remove,
0334 .uevent = dfl_bus_uevent,
0335 .dev_groups = dfl_dev_groups,
0336 };
0337
0338 static void release_dfl_dev(struct device *dev)
0339 {
0340 struct dfl_device *ddev = to_dfl_dev(dev);
0341
0342 if (ddev->mmio_res.parent)
0343 release_resource(&ddev->mmio_res);
0344
0345 ida_free(&dfl_device_ida, ddev->id);
0346 kfree(ddev->irqs);
0347 kfree(ddev);
0348 }
0349
0350 static struct dfl_device *
0351 dfl_dev_add(struct dfl_feature_platform_data *pdata,
0352 struct dfl_feature *feature)
0353 {
0354 struct platform_device *pdev = pdata->dev;
0355 struct resource *parent_res;
0356 struct dfl_device *ddev;
0357 int id, i, ret;
0358
0359 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
0360 if (!ddev)
0361 return ERR_PTR(-ENOMEM);
0362
0363 id = ida_alloc(&dfl_device_ida, GFP_KERNEL);
0364 if (id < 0) {
0365 dev_err(&pdev->dev, "unable to get id\n");
0366 kfree(ddev);
0367 return ERR_PTR(id);
0368 }
0369
0370
0371 device_initialize(&ddev->dev);
0372 ddev->dev.parent = &pdev->dev;
0373 ddev->dev.bus = &dfl_bus_type;
0374 ddev->dev.release = release_dfl_dev;
0375 ddev->id = id;
0376 ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id);
0377 if (ret)
0378 goto put_dev;
0379
0380 ddev->type = feature_dev_id_type(pdev);
0381 ddev->feature_id = feature->id;
0382 ddev->revision = feature->revision;
0383 ddev->cdev = pdata->dfl_cdev;
0384
0385
0386 parent_res = &pdev->resource[feature->resource_index];
0387 ddev->mmio_res.flags = IORESOURCE_MEM;
0388 ddev->mmio_res.start = parent_res->start;
0389 ddev->mmio_res.end = parent_res->end;
0390 ddev->mmio_res.name = dev_name(&ddev->dev);
0391 ret = insert_resource(parent_res, &ddev->mmio_res);
0392 if (ret) {
0393 dev_err(&pdev->dev, "%s failed to claim resource: %pR\n",
0394 dev_name(&ddev->dev), &ddev->mmio_res);
0395 goto put_dev;
0396 }
0397
0398
0399 if (feature->nr_irqs) {
0400 ddev->irqs = kcalloc(feature->nr_irqs,
0401 sizeof(*ddev->irqs), GFP_KERNEL);
0402 if (!ddev->irqs) {
0403 ret = -ENOMEM;
0404 goto put_dev;
0405 }
0406
0407 for (i = 0; i < feature->nr_irqs; i++)
0408 ddev->irqs[i] = feature->irq_ctx[i].irq;
0409
0410 ddev->num_irqs = feature->nr_irqs;
0411 }
0412
0413 ret = device_add(&ddev->dev);
0414 if (ret)
0415 goto put_dev;
0416
0417 dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev));
0418 return ddev;
0419
0420 put_dev:
0421
0422 put_device(&ddev->dev);
0423 return ERR_PTR(ret);
0424 }
0425
0426 static void dfl_devs_remove(struct dfl_feature_platform_data *pdata)
0427 {
0428 struct dfl_feature *feature;
0429
0430 dfl_fpga_dev_for_each_feature(pdata, feature) {
0431 if (feature->ddev) {
0432 device_unregister(&feature->ddev->dev);
0433 feature->ddev = NULL;
0434 }
0435 }
0436 }
0437
0438 static int dfl_devs_add(struct dfl_feature_platform_data *pdata)
0439 {
0440 struct dfl_feature *feature;
0441 struct dfl_device *ddev;
0442 int ret;
0443
0444 dfl_fpga_dev_for_each_feature(pdata, feature) {
0445 if (feature->ioaddr)
0446 continue;
0447
0448 if (feature->ddev) {
0449 ret = -EEXIST;
0450 goto err;
0451 }
0452
0453 ddev = dfl_dev_add(pdata, feature);
0454 if (IS_ERR(ddev)) {
0455 ret = PTR_ERR(ddev);
0456 goto err;
0457 }
0458
0459 feature->ddev = ddev;
0460 }
0461
0462 return 0;
0463
0464 err:
0465 dfl_devs_remove(pdata);
0466 return ret;
0467 }
0468
0469 int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner)
0470 {
0471 if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table)
0472 return -EINVAL;
0473
0474 dfl_drv->drv.owner = owner;
0475 dfl_drv->drv.bus = &dfl_bus_type;
0476
0477 return driver_register(&dfl_drv->drv);
0478 }
0479 EXPORT_SYMBOL(__dfl_driver_register);
0480
0481 void dfl_driver_unregister(struct dfl_driver *dfl_drv)
0482 {
0483 driver_unregister(&dfl_drv->drv);
0484 }
0485 EXPORT_SYMBOL(dfl_driver_unregister);
0486
0487 #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
0488
0489
0490
0491
0492
0493 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
0494 {
0495 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0496 struct dfl_feature *feature;
0497
0498 dfl_devs_remove(pdata);
0499
0500 dfl_fpga_dev_for_each_feature(pdata, feature) {
0501 if (feature->ops) {
0502 if (feature->ops->uinit)
0503 feature->ops->uinit(pdev, feature);
0504 feature->ops = NULL;
0505 }
0506 }
0507 }
0508 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
0509
0510 static int dfl_feature_instance_init(struct platform_device *pdev,
0511 struct dfl_feature_platform_data *pdata,
0512 struct dfl_feature *feature,
0513 struct dfl_feature_driver *drv)
0514 {
0515 void __iomem *base;
0516 int ret = 0;
0517
0518 if (!is_header_feature(feature)) {
0519 base = devm_platform_ioremap_resource(pdev,
0520 feature->resource_index);
0521 if (IS_ERR(base)) {
0522 dev_err(&pdev->dev,
0523 "ioremap failed for feature 0x%x!\n",
0524 feature->id);
0525 return PTR_ERR(base);
0526 }
0527
0528 feature->ioaddr = base;
0529 }
0530
0531 if (drv->ops->init) {
0532 ret = drv->ops->init(pdev, feature);
0533 if (ret)
0534 return ret;
0535 }
0536
0537 feature->ops = drv->ops;
0538
0539 return ret;
0540 }
0541
0542 static bool dfl_feature_drv_match(struct dfl_feature *feature,
0543 struct dfl_feature_driver *driver)
0544 {
0545 const struct dfl_feature_id *ids = driver->id_table;
0546
0547 if (ids) {
0548 while (ids->id) {
0549 if (ids->id == feature->id)
0550 return true;
0551 ids++;
0552 }
0553 }
0554 return false;
0555 }
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 int dfl_fpga_dev_feature_init(struct platform_device *pdev,
0568 struct dfl_feature_driver *feature_drvs)
0569 {
0570 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0571 struct dfl_feature_driver *drv = feature_drvs;
0572 struct dfl_feature *feature;
0573 int ret;
0574
0575 while (drv->ops) {
0576 dfl_fpga_dev_for_each_feature(pdata, feature) {
0577 if (dfl_feature_drv_match(feature, drv)) {
0578 ret = dfl_feature_instance_init(pdev, pdata,
0579 feature, drv);
0580 if (ret)
0581 goto exit;
0582 }
0583 }
0584 drv++;
0585 }
0586
0587 ret = dfl_devs_add(pdata);
0588 if (ret)
0589 goto exit;
0590
0591 return 0;
0592 exit:
0593 dfl_fpga_dev_feature_uinit(pdev);
0594 return ret;
0595 }
0596 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
0597
0598 static void dfl_chardev_uinit(void)
0599 {
0600 int i;
0601
0602 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
0603 if (MAJOR(dfl_chrdevs[i].devt)) {
0604 unregister_chrdev_region(dfl_chrdevs[i].devt,
0605 MINORMASK + 1);
0606 dfl_chrdevs[i].devt = MKDEV(0, 0);
0607 }
0608 }
0609
0610 static int dfl_chardev_init(void)
0611 {
0612 int i, ret;
0613
0614 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
0615 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
0616 MINORMASK + 1, dfl_chrdevs[i].name);
0617 if (ret)
0618 goto exit;
0619 }
0620
0621 return 0;
0622
0623 exit:
0624 dfl_chardev_uinit();
0625 return ret;
0626 }
0627
0628 static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
0629 {
0630 if (type >= DFL_FPGA_DEVT_MAX)
0631 return 0;
0632
0633 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645 int dfl_fpga_dev_ops_register(struct platform_device *pdev,
0646 const struct file_operations *fops,
0647 struct module *owner)
0648 {
0649 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0650
0651 cdev_init(&pdata->cdev, fops);
0652 pdata->cdev.owner = owner;
0653
0654
0655
0656
0657
0658
0659
0660 pdata->cdev.kobj.parent = &pdev->dev.kobj;
0661
0662 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
0663 }
0664 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
0665
0666
0667
0668
0669
0670 void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
0671 {
0672 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0673
0674 cdev_del(&pdata->cdev);
0675 }
0676 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693 struct build_feature_devs_info {
0694 struct device *dev;
0695 struct dfl_fpga_cdev *cdev;
0696 unsigned int nr_irqs;
0697 int *irq_table;
0698
0699 struct platform_device *feature_dev;
0700 void __iomem *ioaddr;
0701 resource_size_t start;
0702 resource_size_t len;
0703 struct list_head sub_features;
0704 int feature_num;
0705 };
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 struct dfl_feature_info {
0718 u16 fid;
0719 u8 revision;
0720 struct resource mmio_res;
0721 void __iomem *ioaddr;
0722 struct list_head node;
0723 unsigned int irq_base;
0724 unsigned int nr_irqs;
0725 };
0726
0727 static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
0728 struct platform_device *port)
0729 {
0730 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
0731
0732 mutex_lock(&cdev->lock);
0733 list_add(&pdata->node, &cdev->port_dev_list);
0734 get_device(&pdata->dev->dev);
0735 mutex_unlock(&cdev->lock);
0736 }
0737
0738
0739
0740
0741
0742
0743 static int build_info_commit_dev(struct build_feature_devs_info *binfo)
0744 {
0745 struct platform_device *fdev = binfo->feature_dev;
0746 struct dfl_feature_platform_data *pdata;
0747 struct dfl_feature_info *finfo, *p;
0748 enum dfl_id_type type;
0749 int ret, index = 0, res_idx = 0;
0750
0751 type = feature_dev_id_type(fdev);
0752 if (WARN_ON_ONCE(type >= DFL_ID_MAX))
0753 return -EINVAL;
0754
0755
0756
0757
0758
0759
0760
0761 pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
0762 if (!pdata)
0763 return -ENOMEM;
0764
0765 pdata->dev = fdev;
0766 pdata->num = binfo->feature_num;
0767 pdata->dfl_cdev = binfo->cdev;
0768 pdata->id = FEATURE_DEV_ID_UNUSED;
0769 mutex_init(&pdata->lock);
0770 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
0771 dfl_pdata_key_strings[type]);
0772
0773
0774
0775
0776
0777
0778
0779 WARN_ON(pdata->disable_count);
0780
0781 fdev->dev.platform_data = pdata;
0782
0783
0784 fdev->num_resources = binfo->feature_num;
0785 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
0786 GFP_KERNEL);
0787 if (!fdev->resource)
0788 return -ENOMEM;
0789
0790
0791 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
0792 struct dfl_feature *feature = &pdata->features[index++];
0793 struct dfl_feature_irq_ctx *ctx;
0794 unsigned int i;
0795
0796
0797 feature->dev = fdev;
0798 feature->id = finfo->fid;
0799 feature->revision = finfo->revision;
0800
0801
0802
0803
0804
0805
0806
0807
0808 if (is_header_feature(feature)) {
0809 feature->resource_index = -1;
0810 feature->ioaddr =
0811 devm_ioremap_resource(binfo->dev,
0812 &finfo->mmio_res);
0813 if (IS_ERR(feature->ioaddr))
0814 return PTR_ERR(feature->ioaddr);
0815 } else {
0816 feature->resource_index = res_idx;
0817 fdev->resource[res_idx++] = finfo->mmio_res;
0818 }
0819
0820 if (finfo->nr_irqs) {
0821 ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
0822 sizeof(*ctx), GFP_KERNEL);
0823 if (!ctx)
0824 return -ENOMEM;
0825
0826 for (i = 0; i < finfo->nr_irqs; i++)
0827 ctx[i].irq =
0828 binfo->irq_table[finfo->irq_base + i];
0829
0830 feature->irq_ctx = ctx;
0831 feature->nr_irqs = finfo->nr_irqs;
0832 }
0833
0834 list_del(&finfo->node);
0835 kfree(finfo);
0836 }
0837
0838 ret = platform_device_add(binfo->feature_dev);
0839 if (!ret) {
0840 if (type == PORT_ID)
0841 dfl_fpga_cdev_add_port_dev(binfo->cdev,
0842 binfo->feature_dev);
0843 else
0844 binfo->cdev->fme_dev =
0845 get_device(&binfo->feature_dev->dev);
0846
0847
0848
0849
0850
0851
0852
0853 binfo->feature_dev = NULL;
0854 }
0855
0856 return ret;
0857 }
0858
0859 static int
0860 build_info_create_dev(struct build_feature_devs_info *binfo,
0861 enum dfl_id_type type)
0862 {
0863 struct platform_device *fdev;
0864
0865 if (type >= DFL_ID_MAX)
0866 return -EINVAL;
0867
0868
0869
0870
0871
0872 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
0873 if (!fdev)
0874 return -ENOMEM;
0875
0876 binfo->feature_dev = fdev;
0877 binfo->feature_num = 0;
0878
0879 INIT_LIST_HEAD(&binfo->sub_features);
0880
0881 fdev->id = dfl_id_alloc(type, &fdev->dev);
0882 if (fdev->id < 0)
0883 return fdev->id;
0884
0885 fdev->dev.parent = &binfo->cdev->region->dev;
0886 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
0887
0888 return 0;
0889 }
0890
0891 static void build_info_free(struct build_feature_devs_info *binfo)
0892 {
0893 struct dfl_feature_info *finfo, *p;
0894
0895
0896
0897
0898
0899 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
0900 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
0901 binfo->feature_dev->id);
0902
0903 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
0904 list_del(&finfo->node);
0905 kfree(finfo);
0906 }
0907 }
0908
0909 platform_device_put(binfo->feature_dev);
0910
0911 devm_kfree(binfo->dev, binfo);
0912 }
0913
0914 static inline u32 feature_size(u64 value)
0915 {
0916 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value);
0917
0918 return ofst ? ofst : 4096;
0919 }
0920
0921 static u16 feature_id(u64 value)
0922 {
0923 u16 id = FIELD_GET(DFH_ID, value);
0924 u8 type = FIELD_GET(DFH_TYPE, value);
0925
0926 if (type == DFH_TYPE_FIU)
0927 return FEATURE_ID_FIU_HEADER;
0928 else if (type == DFH_TYPE_PRIVATE)
0929 return id;
0930 else if (type == DFH_TYPE_AFU)
0931 return FEATURE_ID_AFU;
0932
0933 WARN_ON(1);
0934 return 0;
0935 }
0936
0937 static int parse_feature_irqs(struct build_feature_devs_info *binfo,
0938 resource_size_t ofst, u16 fid,
0939 unsigned int *irq_base, unsigned int *nr_irqs)
0940 {
0941 void __iomem *base = binfo->ioaddr + ofst;
0942 unsigned int i, ibase, inr = 0;
0943 enum dfl_id_type type;
0944 int virq;
0945 u64 v;
0946
0947 type = feature_dev_id_type(binfo->feature_dev);
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 if (type == PORT_ID) {
0964 switch (fid) {
0965 case PORT_FEATURE_ID_UINT:
0966 v = readq(base + PORT_UINT_CAP);
0967 ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
0968 inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
0969 break;
0970 case PORT_FEATURE_ID_ERROR:
0971 v = readq(base + PORT_ERROR_CAP);
0972 ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
0973 inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
0974 break;
0975 }
0976 } else if (type == FME_ID) {
0977 if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
0978 v = readq(base + FME_ERROR_CAP);
0979 ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
0980 inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
0981 }
0982 }
0983
0984 if (!inr) {
0985 *irq_base = 0;
0986 *nr_irqs = 0;
0987 return 0;
0988 }
0989
0990 dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
0991 fid, ibase, inr);
0992
0993 if (ibase + inr > binfo->nr_irqs) {
0994 dev_err(binfo->dev,
0995 "Invalid interrupt number in feature 0x%x\n", fid);
0996 return -EINVAL;
0997 }
0998
0999 for (i = 0; i < inr; i++) {
1000 virq = binfo->irq_table[ibase + i];
1001 if (virq < 0 || virq > NR_IRQS) {
1002 dev_err(binfo->dev,
1003 "Invalid irq table entry for feature 0x%x\n",
1004 fid);
1005 return -EINVAL;
1006 }
1007 }
1008
1009 *irq_base = ibase;
1010 *nr_irqs = inr;
1011
1012 return 0;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022 static int
1023 create_feature_instance(struct build_feature_devs_info *binfo,
1024 resource_size_t ofst, resource_size_t size, u16 fid)
1025 {
1026 unsigned int irq_base, nr_irqs;
1027 struct dfl_feature_info *finfo;
1028 u8 revision = 0;
1029 int ret;
1030 u64 v;
1031
1032 if (fid != FEATURE_ID_AFU) {
1033 v = readq(binfo->ioaddr + ofst);
1034 revision = FIELD_GET(DFH_REVISION, v);
1035
1036
1037 size = size ? size : feature_size(v);
1038 fid = fid ? fid : feature_id(v);
1039 }
1040
1041 if (binfo->len - ofst < size)
1042 return -EINVAL;
1043
1044 ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
1045 if (ret)
1046 return ret;
1047
1048 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
1049 if (!finfo)
1050 return -ENOMEM;
1051
1052 finfo->fid = fid;
1053 finfo->revision = revision;
1054 finfo->mmio_res.start = binfo->start + ofst;
1055 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
1056 finfo->mmio_res.flags = IORESOURCE_MEM;
1057 finfo->irq_base = irq_base;
1058 finfo->nr_irqs = nr_irqs;
1059
1060 list_add_tail(&finfo->node, &binfo->sub_features);
1061 binfo->feature_num++;
1062
1063 return 0;
1064 }
1065
1066 static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
1067 resource_size_t ofst)
1068 {
1069 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
1070 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
1071
1072 WARN_ON(!size);
1073
1074 return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
1075 }
1076
1077 #define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev)
1078
1079 static int parse_feature_afu(struct build_feature_devs_info *binfo,
1080 resource_size_t ofst)
1081 {
1082 if (!is_feature_dev_detected(binfo)) {
1083 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
1084 return -EINVAL;
1085 }
1086
1087 switch (feature_dev_id_type(binfo->feature_dev)) {
1088 case PORT_ID:
1089 return parse_feature_port_afu(binfo, ofst);
1090 default:
1091 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
1092 binfo->feature_dev->name);
1093 }
1094
1095 return 0;
1096 }
1097
1098 static int build_info_prepare(struct build_feature_devs_info *binfo,
1099 resource_size_t start, resource_size_t len)
1100 {
1101 struct device *dev = binfo->dev;
1102 void __iomem *ioaddr;
1103
1104 if (!devm_request_mem_region(dev, start, len, dev_name(dev))) {
1105 dev_err(dev, "request region fail, start:%pa, len:%pa\n",
1106 &start, &len);
1107 return -EBUSY;
1108 }
1109
1110 ioaddr = devm_ioremap(dev, start, len);
1111 if (!ioaddr) {
1112 dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n",
1113 &start, &len);
1114 return -ENOMEM;
1115 }
1116
1117 binfo->start = start;
1118 binfo->len = len;
1119 binfo->ioaddr = ioaddr;
1120
1121 return 0;
1122 }
1123
1124 static void build_info_complete(struct build_feature_devs_info *binfo)
1125 {
1126 devm_iounmap(binfo->dev, binfo->ioaddr);
1127 devm_release_mem_region(binfo->dev, binfo->start, binfo->len);
1128 }
1129
1130 static int parse_feature_fiu(struct build_feature_devs_info *binfo,
1131 resource_size_t ofst)
1132 {
1133 int ret = 0;
1134 u32 offset;
1135 u16 id;
1136 u64 v;
1137
1138 if (is_feature_dev_detected(binfo)) {
1139 build_info_complete(binfo);
1140
1141 ret = build_info_commit_dev(binfo);
1142 if (ret)
1143 return ret;
1144
1145 ret = build_info_prepare(binfo, binfo->start + ofst,
1146 binfo->len - ofst);
1147 if (ret)
1148 return ret;
1149 }
1150
1151 v = readq(binfo->ioaddr + DFH);
1152 id = FIELD_GET(DFH_ID, v);
1153
1154
1155 ret = build_info_create_dev(binfo, dfh_id_to_type(id));
1156 if (ret)
1157 return ret;
1158
1159 ret = create_feature_instance(binfo, 0, 0, 0);
1160 if (ret)
1161 return ret;
1162
1163
1164
1165
1166 v = readq(binfo->ioaddr + NEXT_AFU);
1167
1168 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
1169 if (offset)
1170 return parse_feature_afu(binfo, offset);
1171
1172 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
1173
1174 return ret;
1175 }
1176
1177 static int parse_feature_private(struct build_feature_devs_info *binfo,
1178 resource_size_t ofst)
1179 {
1180 if (!is_feature_dev_detected(binfo)) {
1181 dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n",
1182 feature_id(readq(binfo->ioaddr + ofst)));
1183 return -EINVAL;
1184 }
1185
1186 return create_feature_instance(binfo, ofst, 0, 0);
1187 }
1188
1189
1190
1191
1192
1193
1194
1195 static int parse_feature(struct build_feature_devs_info *binfo,
1196 resource_size_t ofst)
1197 {
1198 u64 v;
1199 u32 type;
1200
1201 v = readq(binfo->ioaddr + ofst + DFH);
1202 type = FIELD_GET(DFH_TYPE, v);
1203
1204 switch (type) {
1205 case DFH_TYPE_AFU:
1206 return parse_feature_afu(binfo, ofst);
1207 case DFH_TYPE_PRIVATE:
1208 return parse_feature_private(binfo, ofst);
1209 case DFH_TYPE_FIU:
1210 return parse_feature_fiu(binfo, ofst);
1211 default:
1212 dev_info(binfo->dev,
1213 "Feature Type %x is not supported.\n", type);
1214 }
1215
1216 return 0;
1217 }
1218
1219 static int parse_feature_list(struct build_feature_devs_info *binfo,
1220 resource_size_t start, resource_size_t len)
1221 {
1222 resource_size_t end = start + len;
1223 int ret = 0;
1224 u32 ofst = 0;
1225 u64 v;
1226
1227 ret = build_info_prepare(binfo, start, len);
1228 if (ret)
1229 return ret;
1230
1231
1232 for (; start < end; start += ofst) {
1233 if (end - start < DFH_SIZE) {
1234 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
1235 return -EINVAL;
1236 }
1237
1238 ret = parse_feature(binfo, start - binfo->start);
1239 if (ret)
1240 return ret;
1241
1242 v = readq(binfo->ioaddr + start - binfo->start + DFH);
1243 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
1244
1245
1246 if ((v & DFH_EOL) || !ofst)
1247 break;
1248 }
1249
1250
1251 build_info_complete(binfo);
1252
1253 if (is_feature_dev_detected(binfo))
1254 ret = build_info_commit_dev(binfo);
1255
1256 return ret;
1257 }
1258
1259 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
1260 {
1261 struct dfl_fpga_enum_info *info;
1262
1263 get_device(dev);
1264
1265 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1266 if (!info) {
1267 put_device(dev);
1268 return NULL;
1269 }
1270
1271 info->dev = dev;
1272 INIT_LIST_HEAD(&info->dfls);
1273
1274 return info;
1275 }
1276 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
1277
1278 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
1279 {
1280 struct dfl_fpga_enum_dfl *tmp, *dfl;
1281 struct device *dev;
1282
1283 if (!info)
1284 return;
1285
1286 dev = info->dev;
1287
1288
1289 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
1290 list_del(&dfl->node);
1291 devm_kfree(dev, dfl);
1292 }
1293
1294
1295 if (info->irq_table)
1296 devm_kfree(dev, info->irq_table);
1297
1298 devm_kfree(dev, info);
1299 put_device(dev);
1300 }
1301 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
1317 resource_size_t start, resource_size_t len)
1318 {
1319 struct dfl_fpga_enum_dfl *dfl;
1320
1321 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
1322 if (!dfl)
1323 return -ENOMEM;
1324
1325 dfl->start = start;
1326 dfl->len = len;
1327
1328 list_add_tail(&dfl->node, &info->dfls);
1329
1330 return 0;
1331 }
1332 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
1354 unsigned int nr_irqs, int *irq_table)
1355 {
1356 if (!nr_irqs || !irq_table)
1357 return -EINVAL;
1358
1359 if (info->irq_table)
1360 return -EEXIST;
1361
1362 info->irq_table = devm_kmemdup(info->dev, irq_table,
1363 sizeof(int) * nr_irqs, GFP_KERNEL);
1364 if (!info->irq_table)
1365 return -ENOMEM;
1366
1367 info->nr_irqs = nr_irqs;
1368
1369 return 0;
1370 }
1371 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
1372
1373 static int remove_feature_dev(struct device *dev, void *data)
1374 {
1375 struct platform_device *pdev = to_platform_device(dev);
1376 enum dfl_id_type type = feature_dev_id_type(pdev);
1377 int id = pdev->id;
1378
1379 platform_device_unregister(pdev);
1380
1381 dfl_id_free(type, id);
1382
1383 return 0;
1384 }
1385
1386 static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
1387 {
1388 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
1389 }
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 struct dfl_fpga_cdev *
1402 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
1403 {
1404 struct build_feature_devs_info *binfo;
1405 struct dfl_fpga_enum_dfl *dfl;
1406 struct dfl_fpga_cdev *cdev;
1407 int ret = 0;
1408
1409 if (!info->dev)
1410 return ERR_PTR(-ENODEV);
1411
1412 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
1413 if (!cdev)
1414 return ERR_PTR(-ENOMEM);
1415
1416 cdev->parent = info->dev;
1417 mutex_init(&cdev->lock);
1418 INIT_LIST_HEAD(&cdev->port_dev_list);
1419
1420 cdev->region = fpga_region_register(info->dev, NULL, NULL);
1421 if (IS_ERR(cdev->region)) {
1422 ret = PTR_ERR(cdev->region);
1423 goto free_cdev_exit;
1424 }
1425
1426
1427 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
1428 if (!binfo) {
1429 ret = -ENOMEM;
1430 goto unregister_region_exit;
1431 }
1432
1433 binfo->dev = info->dev;
1434 binfo->cdev = cdev;
1435
1436 binfo->nr_irqs = info->nr_irqs;
1437 if (info->nr_irqs)
1438 binfo->irq_table = info->irq_table;
1439
1440
1441
1442
1443
1444 list_for_each_entry(dfl, &info->dfls, node) {
1445 ret = parse_feature_list(binfo, dfl->start, dfl->len);
1446 if (ret) {
1447 remove_feature_devs(cdev);
1448 build_info_free(binfo);
1449 goto unregister_region_exit;
1450 }
1451 }
1452
1453 build_info_free(binfo);
1454
1455 return cdev;
1456
1457 unregister_region_exit:
1458 fpga_region_unregister(cdev->region);
1459 free_cdev_exit:
1460 devm_kfree(info->dev, cdev);
1461 return ERR_PTR(ret);
1462 }
1463 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
1464
1465
1466
1467
1468
1469
1470
1471
1472 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
1473 {
1474 struct dfl_feature_platform_data *pdata, *ptmp;
1475
1476 mutex_lock(&cdev->lock);
1477 if (cdev->fme_dev)
1478 put_device(cdev->fme_dev);
1479
1480 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
1481 struct platform_device *port_dev = pdata->dev;
1482
1483
1484 if (!device_is_registered(&port_dev->dev)) {
1485 dfl_id_free(feature_dev_id_type(port_dev),
1486 port_dev->id);
1487 platform_device_put(port_dev);
1488 }
1489
1490 list_del(&pdata->node);
1491 put_device(&port_dev->dev);
1492 }
1493 mutex_unlock(&cdev->lock);
1494
1495 remove_feature_devs(cdev);
1496
1497 fpga_region_unregister(cdev->region);
1498 devm_kfree(cdev->parent, cdev);
1499 }
1500 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 struct platform_device *
1517 __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1518 int (*match)(struct platform_device *, void *))
1519 {
1520 struct dfl_feature_platform_data *pdata;
1521 struct platform_device *port_dev;
1522
1523 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1524 port_dev = pdata->dev;
1525
1526 if (match(port_dev, data) && get_device(&port_dev->dev))
1527 return port_dev;
1528 }
1529
1530 return NULL;
1531 }
1532 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1533
1534 static int __init dfl_fpga_init(void)
1535 {
1536 int ret;
1537
1538 ret = bus_register(&dfl_bus_type);
1539 if (ret)
1540 return ret;
1541
1542 dfl_ids_init();
1543
1544 ret = dfl_chardev_init();
1545 if (ret) {
1546 dfl_ids_destroy();
1547 bus_unregister(&dfl_bus_type);
1548 }
1549
1550 return ret;
1551 }
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1565 {
1566 struct dfl_feature_platform_data *pdata;
1567 struct platform_device *port_pdev;
1568 int ret = -ENODEV;
1569
1570 mutex_lock(&cdev->lock);
1571 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1572 dfl_fpga_check_port_id);
1573 if (!port_pdev)
1574 goto unlock_exit;
1575
1576 if (!device_is_registered(&port_pdev->dev)) {
1577 ret = -EBUSY;
1578 goto put_dev_exit;
1579 }
1580
1581 pdata = dev_get_platdata(&port_pdev->dev);
1582
1583 mutex_lock(&pdata->lock);
1584 ret = dfl_feature_dev_use_begin(pdata, true);
1585 mutex_unlock(&pdata->lock);
1586 if (ret)
1587 goto put_dev_exit;
1588
1589 platform_device_del(port_pdev);
1590 cdev->released_port_num++;
1591 put_dev_exit:
1592 put_device(&port_pdev->dev);
1593 unlock_exit:
1594 mutex_unlock(&cdev->lock);
1595 return ret;
1596 }
1597 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1611 {
1612 struct dfl_feature_platform_data *pdata;
1613 struct platform_device *port_pdev;
1614 int ret = -ENODEV;
1615
1616 mutex_lock(&cdev->lock);
1617 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1618 dfl_fpga_check_port_id);
1619 if (!port_pdev)
1620 goto unlock_exit;
1621
1622 if (device_is_registered(&port_pdev->dev)) {
1623 ret = -EBUSY;
1624 goto put_dev_exit;
1625 }
1626
1627 ret = platform_device_add(port_pdev);
1628 if (ret)
1629 goto put_dev_exit;
1630
1631 pdata = dev_get_platdata(&port_pdev->dev);
1632
1633 mutex_lock(&pdata->lock);
1634 dfl_feature_dev_use_end(pdata);
1635 mutex_unlock(&pdata->lock);
1636
1637 cdev->released_port_num--;
1638 put_dev_exit:
1639 put_device(&port_pdev->dev);
1640 unlock_exit:
1641 mutex_unlock(&cdev->lock);
1642 return ret;
1643 }
1644 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1645
1646 static void config_port_access_mode(struct device *fme_dev, int port_id,
1647 bool is_vf)
1648 {
1649 void __iomem *base;
1650 u64 v;
1651
1652 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1653
1654 v = readq(base + FME_HDR_PORT_OFST(port_id));
1655
1656 v &= ~FME_PORT_OFST_ACC_CTRL;
1657 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1658 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1659
1660 writeq(v, base + FME_HDR_PORT_OFST(port_id));
1661 }
1662
1663 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1664 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1675 {
1676 struct dfl_feature_platform_data *pdata;
1677
1678 mutex_lock(&cdev->lock);
1679 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1680 if (device_is_registered(&pdata->dev->dev))
1681 continue;
1682
1683 config_port_pf_mode(cdev->fme_dev, pdata->id);
1684 }
1685 mutex_unlock(&cdev->lock);
1686 }
1687 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1701 {
1702 struct dfl_feature_platform_data *pdata;
1703 int ret = 0;
1704
1705 mutex_lock(&cdev->lock);
1706
1707
1708
1709
1710
1711 if (cdev->released_port_num != num_vfs) {
1712 ret = -EINVAL;
1713 goto done;
1714 }
1715
1716 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1717 if (device_is_registered(&pdata->dev->dev))
1718 continue;
1719
1720 config_port_vf_mode(cdev->fme_dev, pdata->id);
1721 }
1722 done:
1723 mutex_unlock(&cdev->lock);
1724 return ret;
1725 }
1726 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1727
1728 static irqreturn_t dfl_irq_handler(int irq, void *arg)
1729 {
1730 struct eventfd_ctx *trigger = arg;
1731
1732 eventfd_signal(trigger, 1);
1733 return IRQ_HANDLED;
1734 }
1735
1736 static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
1737 int fd)
1738 {
1739 struct platform_device *pdev = feature->dev;
1740 struct eventfd_ctx *trigger;
1741 int irq, ret;
1742
1743 irq = feature->irq_ctx[idx].irq;
1744
1745 if (feature->irq_ctx[idx].trigger) {
1746 free_irq(irq, feature->irq_ctx[idx].trigger);
1747 kfree(feature->irq_ctx[idx].name);
1748 eventfd_ctx_put(feature->irq_ctx[idx].trigger);
1749 feature->irq_ctx[idx].trigger = NULL;
1750 }
1751
1752 if (fd < 0)
1753 return 0;
1754
1755 feature->irq_ctx[idx].name =
1756 kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx,
1757 dev_name(&pdev->dev), feature->id);
1758 if (!feature->irq_ctx[idx].name)
1759 return -ENOMEM;
1760
1761 trigger = eventfd_ctx_fdget(fd);
1762 if (IS_ERR(trigger)) {
1763 ret = PTR_ERR(trigger);
1764 goto free_name;
1765 }
1766
1767 ret = request_irq(irq, dfl_irq_handler, 0,
1768 feature->irq_ctx[idx].name, trigger);
1769 if (!ret) {
1770 feature->irq_ctx[idx].trigger = trigger;
1771 return ret;
1772 }
1773
1774 eventfd_ctx_put(trigger);
1775 free_name:
1776 kfree(feature->irq_ctx[idx].name);
1777
1778 return ret;
1779 }
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796 int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
1797 unsigned int count, int32_t *fds)
1798 {
1799 unsigned int i;
1800 int ret = 0;
1801
1802
1803 if (unlikely(start + count < start))
1804 return -EINVAL;
1805
1806
1807 if (start + count > feature->nr_irqs)
1808 return -EINVAL;
1809
1810 for (i = 0; i < count; i++) {
1811 int fd = fds ? fds[i] : -1;
1812
1813 ret = do_set_irq_trigger(feature, start + i, fd);
1814 if (ret) {
1815 while (i--)
1816 do_set_irq_trigger(feature, start + i, -1);
1817 break;
1818 }
1819 }
1820
1821 return ret;
1822 }
1823 EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833 long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
1834 struct dfl_feature *feature,
1835 unsigned long arg)
1836 {
1837 return put_user(feature->nr_irqs, (__u32 __user *)arg);
1838 }
1839 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849 long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
1850 struct dfl_feature *feature,
1851 unsigned long arg)
1852 {
1853 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
1854 struct dfl_fpga_irq_set hdr;
1855 s32 *fds;
1856 long ret;
1857
1858 if (!feature->nr_irqs)
1859 return -ENOENT;
1860
1861 if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
1862 return -EFAULT;
1863
1864 if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
1865 (hdr.start + hdr.count < hdr.start))
1866 return -EINVAL;
1867
1868 fds = memdup_user((void __user *)(arg + sizeof(hdr)),
1869 hdr.count * sizeof(s32));
1870 if (IS_ERR(fds))
1871 return PTR_ERR(fds);
1872
1873 mutex_lock(&pdata->lock);
1874 ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
1875 mutex_unlock(&pdata->lock);
1876
1877 kfree(fds);
1878 return ret;
1879 }
1880 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
1881
1882 static void __exit dfl_fpga_exit(void)
1883 {
1884 dfl_chardev_uinit();
1885 dfl_ids_destroy();
1886 bus_unregister(&dfl_bus_type);
1887 }
1888
1889 module_init(dfl_fpga_init);
1890 module_exit(dfl_fpga_exit);
1891
1892 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1893 MODULE_AUTHOR("Intel Corporation");
1894 MODULE_LICENSE("GPL v2");