0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/fpga-dfl.h>
0019 #include <linux/uaccess.h>
0020
0021 #include "dfl.h"
0022 #include "dfl-fme.h"
0023
0024 #define FME_ERROR_MASK 0x8
0025 #define FME_ERROR 0x10
0026 #define MBP_ERROR BIT_ULL(6)
0027 #define PCIE0_ERROR_MASK 0x18
0028 #define PCIE0_ERROR 0x20
0029 #define PCIE1_ERROR_MASK 0x28
0030 #define PCIE1_ERROR 0x30
0031 #define FME_FIRST_ERROR 0x38
0032 #define FME_NEXT_ERROR 0x40
0033 #define RAS_NONFAT_ERROR_MASK 0x48
0034 #define RAS_NONFAT_ERROR 0x50
0035 #define RAS_CATFAT_ERROR_MASK 0x58
0036 #define RAS_CATFAT_ERROR 0x60
0037 #define RAS_ERROR_INJECT 0x68
0038 #define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
0039
0040 #define ERROR_MASK GENMASK_ULL(63, 0)
0041
0042 static ssize_t pcie0_errors_show(struct device *dev,
0043 struct device_attribute *attr, char *buf)
0044 {
0045 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0046 void __iomem *base;
0047 u64 value;
0048
0049 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0050
0051 mutex_lock(&pdata->lock);
0052 value = readq(base + PCIE0_ERROR);
0053 mutex_unlock(&pdata->lock);
0054
0055 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
0056 }
0057
0058 static ssize_t pcie0_errors_store(struct device *dev,
0059 struct device_attribute *attr,
0060 const char *buf, size_t count)
0061 {
0062 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0063 void __iomem *base;
0064 int ret = 0;
0065 u64 v, val;
0066
0067 if (kstrtou64(buf, 0, &val))
0068 return -EINVAL;
0069
0070 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0071
0072 mutex_lock(&pdata->lock);
0073 writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
0074
0075 v = readq(base + PCIE0_ERROR);
0076 if (val == v)
0077 writeq(v, base + PCIE0_ERROR);
0078 else
0079 ret = -EINVAL;
0080
0081 writeq(0ULL, base + PCIE0_ERROR_MASK);
0082 mutex_unlock(&pdata->lock);
0083 return ret ? ret : count;
0084 }
0085 static DEVICE_ATTR_RW(pcie0_errors);
0086
0087 static ssize_t pcie1_errors_show(struct device *dev,
0088 struct device_attribute *attr, char *buf)
0089 {
0090 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0091 void __iomem *base;
0092 u64 value;
0093
0094 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0095
0096 mutex_lock(&pdata->lock);
0097 value = readq(base + PCIE1_ERROR);
0098 mutex_unlock(&pdata->lock);
0099
0100 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
0101 }
0102
0103 static ssize_t pcie1_errors_store(struct device *dev,
0104 struct device_attribute *attr,
0105 const char *buf, size_t count)
0106 {
0107 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0108 void __iomem *base;
0109 int ret = 0;
0110 u64 v, val;
0111
0112 if (kstrtou64(buf, 0, &val))
0113 return -EINVAL;
0114
0115 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0116
0117 mutex_lock(&pdata->lock);
0118 writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
0119
0120 v = readq(base + PCIE1_ERROR);
0121 if (val == v)
0122 writeq(v, base + PCIE1_ERROR);
0123 else
0124 ret = -EINVAL;
0125
0126 writeq(0ULL, base + PCIE1_ERROR_MASK);
0127 mutex_unlock(&pdata->lock);
0128 return ret ? ret : count;
0129 }
0130 static DEVICE_ATTR_RW(pcie1_errors);
0131
0132 static ssize_t nonfatal_errors_show(struct device *dev,
0133 struct device_attribute *attr, char *buf)
0134 {
0135 void __iomem *base;
0136
0137 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0138
0139 return sprintf(buf, "0x%llx\n",
0140 (unsigned long long)readq(base + RAS_NONFAT_ERROR));
0141 }
0142 static DEVICE_ATTR_RO(nonfatal_errors);
0143
0144 static ssize_t catfatal_errors_show(struct device *dev,
0145 struct device_attribute *attr, char *buf)
0146 {
0147 void __iomem *base;
0148
0149 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0150
0151 return sprintf(buf, "0x%llx\n",
0152 (unsigned long long)readq(base + RAS_CATFAT_ERROR));
0153 }
0154 static DEVICE_ATTR_RO(catfatal_errors);
0155
0156 static ssize_t inject_errors_show(struct device *dev,
0157 struct device_attribute *attr, char *buf)
0158 {
0159 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0160 void __iomem *base;
0161 u64 v;
0162
0163 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0164
0165 mutex_lock(&pdata->lock);
0166 v = readq(base + RAS_ERROR_INJECT);
0167 mutex_unlock(&pdata->lock);
0168
0169 return sprintf(buf, "0x%llx\n",
0170 (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
0171 }
0172
0173 static ssize_t inject_errors_store(struct device *dev,
0174 struct device_attribute *attr,
0175 const char *buf, size_t count)
0176 {
0177 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0178 void __iomem *base;
0179 u8 inject_error;
0180 u64 v;
0181
0182 if (kstrtou8(buf, 0, &inject_error))
0183 return -EINVAL;
0184
0185 if (inject_error & ~INJECT_ERROR_MASK)
0186 return -EINVAL;
0187
0188 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0189
0190 mutex_lock(&pdata->lock);
0191 v = readq(base + RAS_ERROR_INJECT);
0192 v &= ~INJECT_ERROR_MASK;
0193 v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
0194 writeq(v, base + RAS_ERROR_INJECT);
0195 mutex_unlock(&pdata->lock);
0196
0197 return count;
0198 }
0199 static DEVICE_ATTR_RW(inject_errors);
0200
0201 static ssize_t fme_errors_show(struct device *dev,
0202 struct device_attribute *attr, char *buf)
0203 {
0204 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0205 void __iomem *base;
0206 u64 value;
0207
0208 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0209
0210 mutex_lock(&pdata->lock);
0211 value = readq(base + FME_ERROR);
0212 mutex_unlock(&pdata->lock);
0213
0214 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
0215 }
0216
0217 static ssize_t fme_errors_store(struct device *dev,
0218 struct device_attribute *attr,
0219 const char *buf, size_t count)
0220 {
0221 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0222 void __iomem *base;
0223 u64 v, val;
0224 int ret = 0;
0225
0226 if (kstrtou64(buf, 0, &val))
0227 return -EINVAL;
0228
0229 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0230
0231 mutex_lock(&pdata->lock);
0232 writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
0233
0234 v = readq(base + FME_ERROR);
0235 if (val == v)
0236 writeq(v, base + FME_ERROR);
0237 else
0238 ret = -EINVAL;
0239
0240
0241 writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
0242 base + FME_ERROR_MASK);
0243 mutex_unlock(&pdata->lock);
0244 return ret ? ret : count;
0245 }
0246 static DEVICE_ATTR_RW(fme_errors);
0247
0248 static ssize_t first_error_show(struct device *dev,
0249 struct device_attribute *attr, char *buf)
0250 {
0251 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0252 void __iomem *base;
0253 u64 value;
0254
0255 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0256
0257 mutex_lock(&pdata->lock);
0258 value = readq(base + FME_FIRST_ERROR);
0259 mutex_unlock(&pdata->lock);
0260
0261 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
0262 }
0263 static DEVICE_ATTR_RO(first_error);
0264
0265 static ssize_t next_error_show(struct device *dev,
0266 struct device_attribute *attr, char *buf)
0267 {
0268 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0269 void __iomem *base;
0270 u64 value;
0271
0272 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0273
0274 mutex_lock(&pdata->lock);
0275 value = readq(base + FME_NEXT_ERROR);
0276 mutex_unlock(&pdata->lock);
0277
0278 return sprintf(buf, "0x%llx\n", (unsigned long long)value);
0279 }
0280 static DEVICE_ATTR_RO(next_error);
0281
0282 static struct attribute *fme_global_err_attrs[] = {
0283 &dev_attr_pcie0_errors.attr,
0284 &dev_attr_pcie1_errors.attr,
0285 &dev_attr_nonfatal_errors.attr,
0286 &dev_attr_catfatal_errors.attr,
0287 &dev_attr_inject_errors.attr,
0288 &dev_attr_fme_errors.attr,
0289 &dev_attr_first_error.attr,
0290 &dev_attr_next_error.attr,
0291 NULL,
0292 };
0293
0294 static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
0295 struct attribute *attr, int n)
0296 {
0297 struct device *dev = kobj_to_dev(kobj);
0298
0299
0300
0301
0302
0303 if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
0304 return 0;
0305
0306 return attr->mode;
0307 }
0308
0309 const struct attribute_group fme_global_err_group = {
0310 .name = "errors",
0311 .attrs = fme_global_err_attrs,
0312 .is_visible = fme_global_err_attrs_visible,
0313 };
0314
0315 static void fme_err_mask(struct device *dev, bool mask)
0316 {
0317 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0318 void __iomem *base;
0319
0320 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
0321
0322 mutex_lock(&pdata->lock);
0323
0324
0325 if (dfl_feature_revision(base))
0326 writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
0327 else
0328 writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
0329
0330 writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
0331 writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
0332 writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
0333 writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
0334
0335 mutex_unlock(&pdata->lock);
0336 }
0337
0338 static int fme_global_err_init(struct platform_device *pdev,
0339 struct dfl_feature *feature)
0340 {
0341 fme_err_mask(&pdev->dev, false);
0342
0343 return 0;
0344 }
0345
0346 static void fme_global_err_uinit(struct platform_device *pdev,
0347 struct dfl_feature *feature)
0348 {
0349 fme_err_mask(&pdev->dev, true);
0350 }
0351
0352 static long
0353 fme_global_error_ioctl(struct platform_device *pdev,
0354 struct dfl_feature *feature,
0355 unsigned int cmd, unsigned long arg)
0356 {
0357 switch (cmd) {
0358 case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
0359 return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
0360 case DFL_FPGA_FME_ERR_SET_IRQ:
0361 return dfl_feature_ioctl_set_irq(pdev, feature, arg);
0362 default:
0363 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
0364 return -ENODEV;
0365 }
0366 }
0367
0368 const struct dfl_feature_id fme_global_err_id_table[] = {
0369 {.id = FME_FEATURE_ID_GLOBAL_ERR,},
0370 {0,}
0371 };
0372
0373 const struct dfl_feature_ops fme_global_err_ops = {
0374 .init = fme_global_err_init,
0375 .uinit = fme_global_err_uinit,
0376 .ioctl = fme_global_error_ioctl,
0377 };