0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #include <linux/hwmon.h>
0018 #include <linux/hwmon-sysfs.h>
0019 #include <linux/kernel.h>
0020 #include <linux/module.h>
0021 #include <linux/uaccess.h>
0022 #include <linux/fpga-dfl.h>
0023
0024 #include "dfl.h"
0025 #include "dfl-fme.h"
0026
0027 static ssize_t ports_num_show(struct device *dev,
0028 struct device_attribute *attr, char *buf)
0029 {
0030 void __iomem *base;
0031 u64 v;
0032
0033 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
0034
0035 v = readq(base + FME_HDR_CAP);
0036
0037 return scnprintf(buf, PAGE_SIZE, "%u\n",
0038 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
0039 }
0040 static DEVICE_ATTR_RO(ports_num);
0041
0042
0043
0044
0045
0046 static ssize_t bitstream_id_show(struct device *dev,
0047 struct device_attribute *attr, char *buf)
0048 {
0049 void __iomem *base;
0050 u64 v;
0051
0052 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
0053
0054 v = readq(base + FME_HDR_BITSTREAM_ID);
0055
0056 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
0057 }
0058 static DEVICE_ATTR_RO(bitstream_id);
0059
0060
0061
0062
0063
0064 static ssize_t bitstream_metadata_show(struct device *dev,
0065 struct device_attribute *attr, char *buf)
0066 {
0067 void __iomem *base;
0068 u64 v;
0069
0070 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
0071
0072 v = readq(base + FME_HDR_BITSTREAM_MD);
0073
0074 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
0075 }
0076 static DEVICE_ATTR_RO(bitstream_metadata);
0077
0078 static ssize_t cache_size_show(struct device *dev,
0079 struct device_attribute *attr, char *buf)
0080 {
0081 void __iomem *base;
0082 u64 v;
0083
0084 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
0085
0086 v = readq(base + FME_HDR_CAP);
0087
0088 return sprintf(buf, "%u\n",
0089 (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
0090 }
0091 static DEVICE_ATTR_RO(cache_size);
0092
0093 static ssize_t fabric_version_show(struct device *dev,
0094 struct device_attribute *attr, char *buf)
0095 {
0096 void __iomem *base;
0097 u64 v;
0098
0099 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
0100
0101 v = readq(base + FME_HDR_CAP);
0102
0103 return sprintf(buf, "%u\n",
0104 (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
0105 }
0106 static DEVICE_ATTR_RO(fabric_version);
0107
0108 static ssize_t socket_id_show(struct device *dev,
0109 struct device_attribute *attr, char *buf)
0110 {
0111 void __iomem *base;
0112 u64 v;
0113
0114 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
0115
0116 v = readq(base + FME_HDR_CAP);
0117
0118 return sprintf(buf, "%u\n",
0119 (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
0120 }
0121 static DEVICE_ATTR_RO(socket_id);
0122
0123 static struct attribute *fme_hdr_attrs[] = {
0124 &dev_attr_ports_num.attr,
0125 &dev_attr_bitstream_id.attr,
0126 &dev_attr_bitstream_metadata.attr,
0127 &dev_attr_cache_size.attr,
0128 &dev_attr_fabric_version.attr,
0129 &dev_attr_socket_id.attr,
0130 NULL,
0131 };
0132
0133 static const struct attribute_group fme_hdr_group = {
0134 .attrs = fme_hdr_attrs,
0135 };
0136
0137 static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata,
0138 unsigned long arg)
0139 {
0140 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
0141 int port_id;
0142
0143 if (get_user(port_id, (int __user *)arg))
0144 return -EFAULT;
0145
0146 return dfl_fpga_cdev_release_port(cdev, port_id);
0147 }
0148
0149 static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata,
0150 unsigned long arg)
0151 {
0152 struct dfl_fpga_cdev *cdev = pdata->dfl_cdev;
0153 int port_id;
0154
0155 if (get_user(port_id, (int __user *)arg))
0156 return -EFAULT;
0157
0158 return dfl_fpga_cdev_assign_port(cdev, port_id);
0159 }
0160
0161 static long fme_hdr_ioctl(struct platform_device *pdev,
0162 struct dfl_feature *feature,
0163 unsigned int cmd, unsigned long arg)
0164 {
0165 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0166
0167 switch (cmd) {
0168 case DFL_FPGA_FME_PORT_RELEASE:
0169 return fme_hdr_ioctl_release_port(pdata, arg);
0170 case DFL_FPGA_FME_PORT_ASSIGN:
0171 return fme_hdr_ioctl_assign_port(pdata, arg);
0172 }
0173
0174 return -ENODEV;
0175 }
0176
0177 static const struct dfl_feature_id fme_hdr_id_table[] = {
0178 {.id = FME_FEATURE_ID_HEADER,},
0179 {0,}
0180 };
0181
0182 static const struct dfl_feature_ops fme_hdr_ops = {
0183 .ioctl = fme_hdr_ioctl,
0184 };
0185
0186 #define FME_THERM_THRESHOLD 0x8
0187 #define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
0188 #define TEMP_THRESHOLD1_EN BIT_ULL(7)
0189 #define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
0190 #define TEMP_THRESHOLD2_EN BIT_ULL(15)
0191 #define TRIP_THRESHOLD GENMASK_ULL(30, 24)
0192 #define TEMP_THRESHOLD1_STATUS BIT_ULL(32)
0193 #define TEMP_THRESHOLD2_STATUS BIT_ULL(33)
0194
0195 #define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
0196
0197 #define FME_THERM_RDSENSOR_FMT1 0x10
0198 #define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
0199
0200 #define FME_THERM_CAP 0x20
0201 #define THERM_NO_THROTTLE BIT_ULL(0)
0202
0203 #define MD_PRE_DEG
0204
0205 static bool fme_thermal_throttle_support(void __iomem *base)
0206 {
0207 u64 v = readq(base + FME_THERM_CAP);
0208
0209 return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
0210 }
0211
0212 static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
0213 enum hwmon_sensor_types type,
0214 u32 attr, int channel)
0215 {
0216 const struct dfl_feature *feature = drvdata;
0217
0218
0219 if (attr == hwmon_temp_input)
0220 return 0444;
0221
0222 return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
0223 }
0224
0225 static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
0226 u32 attr, int channel, long *val)
0227 {
0228 struct dfl_feature *feature = dev_get_drvdata(dev);
0229 u64 v;
0230
0231 switch (attr) {
0232 case hwmon_temp_input:
0233 v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
0234 *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
0235 break;
0236 case hwmon_temp_max:
0237 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
0238 *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
0239 break;
0240 case hwmon_temp_crit:
0241 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
0242 *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
0243 break;
0244 case hwmon_temp_emergency:
0245 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
0246 *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
0247 break;
0248 case hwmon_temp_max_alarm:
0249 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
0250 *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
0251 break;
0252 case hwmon_temp_crit_alarm:
0253 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
0254 *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
0255 break;
0256 default:
0257 return -EOPNOTSUPP;
0258 }
0259
0260 return 0;
0261 }
0262
0263 static const struct hwmon_ops thermal_hwmon_ops = {
0264 .is_visible = thermal_hwmon_attrs_visible,
0265 .read = thermal_hwmon_read,
0266 };
0267
0268 static const struct hwmon_channel_info *thermal_hwmon_info[] = {
0269 HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
0270 HWMON_T_MAX | HWMON_T_MAX_ALARM |
0271 HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
0272 NULL
0273 };
0274
0275 static const struct hwmon_chip_info thermal_hwmon_chip_info = {
0276 .ops = &thermal_hwmon_ops,
0277 .info = thermal_hwmon_info,
0278 };
0279
0280 static ssize_t temp1_max_policy_show(struct device *dev,
0281 struct device_attribute *attr, char *buf)
0282 {
0283 struct dfl_feature *feature = dev_get_drvdata(dev);
0284 u64 v;
0285
0286 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
0287
0288 return sprintf(buf, "%u\n",
0289 (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
0290 }
0291
0292 static DEVICE_ATTR_RO(temp1_max_policy);
0293
0294 static struct attribute *thermal_extra_attrs[] = {
0295 &dev_attr_temp1_max_policy.attr,
0296 NULL,
0297 };
0298
0299 static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
0300 struct attribute *attr, int index)
0301 {
0302 struct device *dev = kobj_to_dev(kobj);
0303 struct dfl_feature *feature = dev_get_drvdata(dev);
0304
0305 return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
0306 }
0307
0308 static const struct attribute_group thermal_extra_group = {
0309 .attrs = thermal_extra_attrs,
0310 .is_visible = thermal_extra_attrs_visible,
0311 };
0312 __ATTRIBUTE_GROUPS(thermal_extra);
0313
0314 static int fme_thermal_mgmt_init(struct platform_device *pdev,
0315 struct dfl_feature *feature)
0316 {
0317 struct device *hwmon;
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
0338 "dfl_fme_thermal", feature,
0339 &thermal_hwmon_chip_info,
0340 thermal_extra_groups);
0341 if (IS_ERR(hwmon)) {
0342 dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
0343 return PTR_ERR(hwmon);
0344 }
0345
0346 return 0;
0347 }
0348
0349 static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
0350 {.id = FME_FEATURE_ID_THERMAL_MGMT,},
0351 {0,}
0352 };
0353
0354 static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
0355 .init = fme_thermal_mgmt_init,
0356 };
0357
0358 #define FME_PWR_STATUS 0x8
0359 #define FME_LATENCY_TOLERANCE BIT_ULL(18)
0360 #define PWR_CONSUMED GENMASK_ULL(17, 0)
0361
0362 #define FME_PWR_THRESHOLD 0x10
0363 #define PWR_THRESHOLD1 GENMASK_ULL(6, 0)
0364 #define PWR_THRESHOLD2 GENMASK_ULL(14, 8)
0365 #define PWR_THRESHOLD_MAX 0x7f
0366 #define PWR_THRESHOLD1_STATUS BIT_ULL(16)
0367 #define PWR_THRESHOLD2_STATUS BIT_ULL(17)
0368
0369 #define FME_PWR_XEON_LIMIT 0x18
0370 #define XEON_PWR_LIMIT GENMASK_ULL(14, 0)
0371 #define XEON_PWR_EN BIT_ULL(15)
0372 #define FME_PWR_FPGA_LIMIT 0x20
0373 #define FPGA_PWR_LIMIT GENMASK_ULL(14, 0)
0374 #define FPGA_PWR_EN BIT_ULL(15)
0375
0376 static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
0377 u32 attr, int channel, long *val)
0378 {
0379 struct dfl_feature *feature = dev_get_drvdata(dev);
0380 u64 v;
0381
0382 switch (attr) {
0383 case hwmon_power_input:
0384 v = readq(feature->ioaddr + FME_PWR_STATUS);
0385 *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
0386 break;
0387 case hwmon_power_max:
0388 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
0389 *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
0390 break;
0391 case hwmon_power_crit:
0392 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
0393 *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
0394 break;
0395 case hwmon_power_max_alarm:
0396 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
0397 *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
0398 break;
0399 case hwmon_power_crit_alarm:
0400 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
0401 *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
0402 break;
0403 default:
0404 return -EOPNOTSUPP;
0405 }
0406
0407 return 0;
0408 }
0409
0410 static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
0411 u32 attr, int channel, long val)
0412 {
0413 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev->parent);
0414 struct dfl_feature *feature = dev_get_drvdata(dev);
0415 int ret = 0;
0416 u64 v;
0417
0418 val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
0419
0420 mutex_lock(&pdata->lock);
0421
0422 switch (attr) {
0423 case hwmon_power_max:
0424 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
0425 v &= ~PWR_THRESHOLD1;
0426 v |= FIELD_PREP(PWR_THRESHOLD1, val);
0427 writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
0428 break;
0429 case hwmon_power_crit:
0430 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
0431 v &= ~PWR_THRESHOLD2;
0432 v |= FIELD_PREP(PWR_THRESHOLD2, val);
0433 writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
0434 break;
0435 default:
0436 ret = -EOPNOTSUPP;
0437 break;
0438 }
0439
0440 mutex_unlock(&pdata->lock);
0441
0442 return ret;
0443 }
0444
0445 static umode_t power_hwmon_attrs_visible(const void *drvdata,
0446 enum hwmon_sensor_types type,
0447 u32 attr, int channel)
0448 {
0449 switch (attr) {
0450 case hwmon_power_input:
0451 case hwmon_power_max_alarm:
0452 case hwmon_power_crit_alarm:
0453 return 0444;
0454 case hwmon_power_max:
0455 case hwmon_power_crit:
0456 return 0644;
0457 }
0458
0459 return 0;
0460 }
0461
0462 static const struct hwmon_ops power_hwmon_ops = {
0463 .is_visible = power_hwmon_attrs_visible,
0464 .read = power_hwmon_read,
0465 .write = power_hwmon_write,
0466 };
0467
0468 static const struct hwmon_channel_info *power_hwmon_info[] = {
0469 HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
0470 HWMON_P_MAX | HWMON_P_MAX_ALARM |
0471 HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
0472 NULL
0473 };
0474
0475 static const struct hwmon_chip_info power_hwmon_chip_info = {
0476 .ops = &power_hwmon_ops,
0477 .info = power_hwmon_info,
0478 };
0479
0480 static ssize_t power1_xeon_limit_show(struct device *dev,
0481 struct device_attribute *attr, char *buf)
0482 {
0483 struct dfl_feature *feature = dev_get_drvdata(dev);
0484 u16 xeon_limit = 0;
0485 u64 v;
0486
0487 v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
0488
0489 if (FIELD_GET(XEON_PWR_EN, v))
0490 xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
0491
0492 return sprintf(buf, "%u\n", xeon_limit * 100000);
0493 }
0494
0495 static ssize_t power1_fpga_limit_show(struct device *dev,
0496 struct device_attribute *attr, char *buf)
0497 {
0498 struct dfl_feature *feature = dev_get_drvdata(dev);
0499 u16 fpga_limit = 0;
0500 u64 v;
0501
0502 v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
0503
0504 if (FIELD_GET(FPGA_PWR_EN, v))
0505 fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
0506
0507 return sprintf(buf, "%u\n", fpga_limit * 100000);
0508 }
0509
0510 static ssize_t power1_ltr_show(struct device *dev,
0511 struct device_attribute *attr, char *buf)
0512 {
0513 struct dfl_feature *feature = dev_get_drvdata(dev);
0514 u64 v;
0515
0516 v = readq(feature->ioaddr + FME_PWR_STATUS);
0517
0518 return sprintf(buf, "%u\n",
0519 (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
0520 }
0521
0522 static DEVICE_ATTR_RO(power1_xeon_limit);
0523 static DEVICE_ATTR_RO(power1_fpga_limit);
0524 static DEVICE_ATTR_RO(power1_ltr);
0525
0526 static struct attribute *power_extra_attrs[] = {
0527 &dev_attr_power1_xeon_limit.attr,
0528 &dev_attr_power1_fpga_limit.attr,
0529 &dev_attr_power1_ltr.attr,
0530 NULL
0531 };
0532
0533 ATTRIBUTE_GROUPS(power_extra);
0534
0535 static int fme_power_mgmt_init(struct platform_device *pdev,
0536 struct dfl_feature *feature)
0537 {
0538 struct device *hwmon;
0539
0540 hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
0541 "dfl_fme_power", feature,
0542 &power_hwmon_chip_info,
0543 power_extra_groups);
0544 if (IS_ERR(hwmon)) {
0545 dev_err(&pdev->dev, "Fail to register power hwmon\n");
0546 return PTR_ERR(hwmon);
0547 }
0548
0549 return 0;
0550 }
0551
0552 static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
0553 {.id = FME_FEATURE_ID_POWER_MGMT,},
0554 {0,}
0555 };
0556
0557 static const struct dfl_feature_ops fme_power_mgmt_ops = {
0558 .init = fme_power_mgmt_init,
0559 };
0560
0561 static struct dfl_feature_driver fme_feature_drvs[] = {
0562 {
0563 .id_table = fme_hdr_id_table,
0564 .ops = &fme_hdr_ops,
0565 },
0566 {
0567 .id_table = fme_pr_mgmt_id_table,
0568 .ops = &fme_pr_mgmt_ops,
0569 },
0570 {
0571 .id_table = fme_global_err_id_table,
0572 .ops = &fme_global_err_ops,
0573 },
0574 {
0575 .id_table = fme_thermal_mgmt_id_table,
0576 .ops = &fme_thermal_mgmt_ops,
0577 },
0578 {
0579 .id_table = fme_power_mgmt_id_table,
0580 .ops = &fme_power_mgmt_ops,
0581 },
0582 {
0583 .id_table = fme_perf_id_table,
0584 .ops = &fme_perf_ops,
0585 },
0586 {
0587 .ops = NULL,
0588 },
0589 };
0590
0591 static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
0592 unsigned long arg)
0593 {
0594
0595 return 0;
0596 }
0597
0598 static int fme_open(struct inode *inode, struct file *filp)
0599 {
0600 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
0601 struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
0602 int ret;
0603
0604 if (WARN_ON(!pdata))
0605 return -ENODEV;
0606
0607 mutex_lock(&pdata->lock);
0608 ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
0609 if (!ret) {
0610 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
0611 dfl_feature_dev_use_count(pdata));
0612 filp->private_data = pdata;
0613 }
0614 mutex_unlock(&pdata->lock);
0615
0616 return ret;
0617 }
0618
0619 static int fme_release(struct inode *inode, struct file *filp)
0620 {
0621 struct dfl_feature_platform_data *pdata = filp->private_data;
0622 struct platform_device *pdev = pdata->dev;
0623 struct dfl_feature *feature;
0624
0625 dev_dbg(&pdev->dev, "Device File Release\n");
0626
0627 mutex_lock(&pdata->lock);
0628 dfl_feature_dev_use_end(pdata);
0629
0630 if (!dfl_feature_dev_use_count(pdata))
0631 dfl_fpga_dev_for_each_feature(pdata, feature)
0632 dfl_fpga_set_irq_triggers(feature, 0,
0633 feature->nr_irqs, NULL);
0634 mutex_unlock(&pdata->lock);
0635
0636 return 0;
0637 }
0638
0639 static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
0640 {
0641 struct dfl_feature_platform_data *pdata = filp->private_data;
0642 struct platform_device *pdev = pdata->dev;
0643 struct dfl_feature *f;
0644 long ret;
0645
0646 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
0647
0648 switch (cmd) {
0649 case DFL_FPGA_GET_API_VERSION:
0650 return DFL_FPGA_API_VERSION;
0651 case DFL_FPGA_CHECK_EXTENSION:
0652 return fme_ioctl_check_extension(pdata, arg);
0653 default:
0654
0655
0656
0657
0658
0659
0660 dfl_fpga_dev_for_each_feature(pdata, f) {
0661 if (f->ops && f->ops->ioctl) {
0662 ret = f->ops->ioctl(pdev, f, cmd, arg);
0663 if (ret != -ENODEV)
0664 return ret;
0665 }
0666 }
0667 }
0668
0669 return -EINVAL;
0670 }
0671
0672 static int fme_dev_init(struct platform_device *pdev)
0673 {
0674 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0675 struct dfl_fme *fme;
0676
0677 fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
0678 if (!fme)
0679 return -ENOMEM;
0680
0681 fme->pdata = pdata;
0682
0683 mutex_lock(&pdata->lock);
0684 dfl_fpga_pdata_set_private(pdata, fme);
0685 mutex_unlock(&pdata->lock);
0686
0687 return 0;
0688 }
0689
0690 static void fme_dev_destroy(struct platform_device *pdev)
0691 {
0692 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
0693
0694 mutex_lock(&pdata->lock);
0695 dfl_fpga_pdata_set_private(pdata, NULL);
0696 mutex_unlock(&pdata->lock);
0697 }
0698
0699 static const struct file_operations fme_fops = {
0700 .owner = THIS_MODULE,
0701 .open = fme_open,
0702 .release = fme_release,
0703 .unlocked_ioctl = fme_ioctl,
0704 };
0705
0706 static int fme_probe(struct platform_device *pdev)
0707 {
0708 int ret;
0709
0710 ret = fme_dev_init(pdev);
0711 if (ret)
0712 goto exit;
0713
0714 ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
0715 if (ret)
0716 goto dev_destroy;
0717
0718 ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
0719 if (ret)
0720 goto feature_uinit;
0721
0722 return 0;
0723
0724 feature_uinit:
0725 dfl_fpga_dev_feature_uinit(pdev);
0726 dev_destroy:
0727 fme_dev_destroy(pdev);
0728 exit:
0729 return ret;
0730 }
0731
0732 static int fme_remove(struct platform_device *pdev)
0733 {
0734 dfl_fpga_dev_ops_unregister(pdev);
0735 dfl_fpga_dev_feature_uinit(pdev);
0736 fme_dev_destroy(pdev);
0737
0738 return 0;
0739 }
0740
0741 static const struct attribute_group *fme_dev_groups[] = {
0742 &fme_hdr_group,
0743 &fme_global_err_group,
0744 NULL
0745 };
0746
0747 static struct platform_driver fme_driver = {
0748 .driver = {
0749 .name = DFL_FPGA_FEATURE_DEV_FME,
0750 .dev_groups = fme_dev_groups,
0751 },
0752 .probe = fme_probe,
0753 .remove = fme_remove,
0754 };
0755
0756 module_platform_driver(fme_driver);
0757
0758 MODULE_DESCRIPTION("FPGA Management Engine driver");
0759 MODULE_AUTHOR("Intel Corporation");
0760 MODULE_LICENSE("GPL v2");
0761 MODULE_ALIAS("platform:dfl-fme");