Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
0004  *
0005  * Copyright 2019 Intel Corporation, Inc.
0006  *
0007  * Authors:
0008  *   Wu Hao <hao.wu@linux.intel.com>
0009  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
0010  *   Joseph Grecco <joe.grecco@intel.com>
0011  *   Enno Luebbers <enno.luebbers@intel.com>
0012  *   Tim Whisonant <tim.whisonant@intel.com>
0013  *   Ananda Ravuri <ananda.ravuri@intel.com>
0014  *   Mitchel Henry <henry.mitchel@intel.com>
0015  */
0016 
0017 #include <linux/fpga-dfl.h>
0018 #include <linux/uaccess.h>
0019 
0020 #include "dfl-afu.h"
0021 
0022 #define PORT_ERROR_MASK     0x8
0023 #define PORT_ERROR      0x10
0024 #define PORT_FIRST_ERROR    0x18
0025 #define PORT_MALFORMED_REQ0 0x20
0026 #define PORT_MALFORMED_REQ1 0x28
0027 
0028 #define ERROR_MASK      GENMASK_ULL(63, 0)
0029 
0030 /* mask or unmask port errors by the error mask register. */
0031 static void __afu_port_err_mask(struct device *dev, bool mask)
0032 {
0033     void __iomem *base;
0034 
0035     base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
0036 
0037     writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
0038 }
0039 
0040 static void afu_port_err_mask(struct device *dev, bool mask)
0041 {
0042     struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0043 
0044     mutex_lock(&pdata->lock);
0045     __afu_port_err_mask(dev, mask);
0046     mutex_unlock(&pdata->lock);
0047 }
0048 
0049 /* clear port errors. */
0050 static int afu_port_err_clear(struct device *dev, u64 err)
0051 {
0052     struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0053     struct platform_device *pdev = to_platform_device(dev);
0054     void __iomem *base_err, *base_hdr;
0055     int enable_ret = 0, ret = -EBUSY;
0056     u64 v;
0057 
0058     base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
0059     base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
0060 
0061     mutex_lock(&pdata->lock);
0062 
0063     /*
0064      * clear Port Errors
0065      *
0066      * - Check for AP6 State
0067      * - Halt Port by keeping Port in reset
0068      * - Set PORT Error mask to all 1 to mask errors
0069      * - Clear all errors
0070      * - Set Port mask to all 0 to enable errors
0071      * - All errors start capturing new errors
0072      * - Enable Port by pulling the port out of reset
0073      */
0074 
0075     /* if device is still in AP6 power state, can not clear any error. */
0076     v = readq(base_hdr + PORT_HDR_STS);
0077     if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
0078         dev_err(dev, "Could not clear errors, device in AP6 state.\n");
0079         goto done;
0080     }
0081 
0082     /* Halt Port by keeping Port in reset */
0083     ret = __afu_port_disable(pdev);
0084     if (ret)
0085         goto done;
0086 
0087     /* Mask all errors */
0088     __afu_port_err_mask(dev, true);
0089 
0090     /* Clear errors if err input matches with current port errors.*/
0091     v = readq(base_err + PORT_ERROR);
0092 
0093     if (v == err) {
0094         writeq(v, base_err + PORT_ERROR);
0095 
0096         v = readq(base_err + PORT_FIRST_ERROR);
0097         writeq(v, base_err + PORT_FIRST_ERROR);
0098     } else {
0099         dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
0100              __func__, v, err);
0101         ret = -EINVAL;
0102     }
0103 
0104     /* Clear mask */
0105     __afu_port_err_mask(dev, false);
0106 
0107     /* Enable the Port by clearing the reset */
0108     enable_ret = __afu_port_enable(pdev);
0109 
0110 done:
0111     mutex_unlock(&pdata->lock);
0112     return enable_ret ? enable_ret : ret;
0113 }
0114 
0115 static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
0116                char *buf)
0117 {
0118     struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0119     void __iomem *base;
0120     u64 error;
0121 
0122     base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
0123 
0124     mutex_lock(&pdata->lock);
0125     error = readq(base + PORT_ERROR);
0126     mutex_unlock(&pdata->lock);
0127 
0128     return sprintf(buf, "0x%llx\n", (unsigned long long)error);
0129 }
0130 
0131 static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
0132                 const char *buff, size_t count)
0133 {
0134     u64 value;
0135     int ret;
0136 
0137     if (kstrtou64(buff, 0, &value))
0138         return -EINVAL;
0139 
0140     ret = afu_port_err_clear(dev, value);
0141 
0142     return ret ? ret : count;
0143 }
0144 static DEVICE_ATTR_RW(errors);
0145 
0146 static ssize_t first_error_show(struct device *dev,
0147                 struct device_attribute *attr, char *buf)
0148 {
0149     struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0150     void __iomem *base;
0151     u64 error;
0152 
0153     base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
0154 
0155     mutex_lock(&pdata->lock);
0156     error = readq(base + PORT_FIRST_ERROR);
0157     mutex_unlock(&pdata->lock);
0158 
0159     return sprintf(buf, "0x%llx\n", (unsigned long long)error);
0160 }
0161 static DEVICE_ATTR_RO(first_error);
0162 
0163 static ssize_t first_malformed_req_show(struct device *dev,
0164                     struct device_attribute *attr,
0165                     char *buf)
0166 {
0167     struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
0168     void __iomem *base;
0169     u64 req0, req1;
0170 
0171     base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
0172 
0173     mutex_lock(&pdata->lock);
0174     req0 = readq(base + PORT_MALFORMED_REQ0);
0175     req1 = readq(base + PORT_MALFORMED_REQ1);
0176     mutex_unlock(&pdata->lock);
0177 
0178     return sprintf(buf, "0x%016llx%016llx\n",
0179                (unsigned long long)req1, (unsigned long long)req0);
0180 }
0181 static DEVICE_ATTR_RO(first_malformed_req);
0182 
0183 static struct attribute *port_err_attrs[] = {
0184     &dev_attr_errors.attr,
0185     &dev_attr_first_error.attr,
0186     &dev_attr_first_malformed_req.attr,
0187     NULL,
0188 };
0189 
0190 static umode_t port_err_attrs_visible(struct kobject *kobj,
0191                       struct attribute *attr, int n)
0192 {
0193     struct device *dev = kobj_to_dev(kobj);
0194 
0195     /*
0196      * sysfs entries are visible only if related private feature is
0197      * enumerated.
0198      */
0199     if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
0200         return 0;
0201 
0202     return attr->mode;
0203 }
0204 
0205 const struct attribute_group port_err_group = {
0206     .name       = "errors",
0207     .attrs      = port_err_attrs,
0208     .is_visible = port_err_attrs_visible,
0209 };
0210 
0211 static int port_err_init(struct platform_device *pdev,
0212              struct dfl_feature *feature)
0213 {
0214     afu_port_err_mask(&pdev->dev, false);
0215 
0216     return 0;
0217 }
0218 
0219 static void port_err_uinit(struct platform_device *pdev,
0220                struct dfl_feature *feature)
0221 {
0222     afu_port_err_mask(&pdev->dev, true);
0223 }
0224 
0225 static long
0226 port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
0227            unsigned int cmd, unsigned long arg)
0228 {
0229     switch (cmd) {
0230     case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
0231         return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
0232     case DFL_FPGA_PORT_ERR_SET_IRQ:
0233         return dfl_feature_ioctl_set_irq(pdev, feature, arg);
0234     default:
0235         dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
0236         return -ENODEV;
0237     }
0238 }
0239 
0240 const struct dfl_feature_id port_err_id_table[] = {
0241     {.id = PORT_FEATURE_ID_ERROR,},
0242     {0,}
0243 };
0244 
0245 const struct dfl_feature_ops port_err_ops = {
0246     .init = port_err_init,
0247     .uinit = port_err_uinit,
0248     .ioctl = port_err_ioctl,
0249 };