0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/pci.h>
0010 #include <linux/io-64-nonatomic-lo-hi.h>
0011 #include "processor_thermal_device.h"
0012
0013 #define MBOX_CMD_WORKLOAD_TYPE_READ 0x0E
0014 #define MBOX_CMD_WORKLOAD_TYPE_WRITE 0x0F
0015
0016 #define MBOX_OFFSET_DATA 0x5810
0017 #define MBOX_OFFSET_INTERFACE 0x5818
0018
0019 #define MBOX_BUSY_BIT 31
0020 #define MBOX_RETRY_COUNT 100
0021
0022 #define MBOX_DATA_BIT_VALID 31
0023 #define MBOX_DATA_BIT_AC_DC 30
0024
0025 static DEFINE_MUTEX(mbox_lock);
0026
0027 static int wait_for_mbox_ready(struct proc_thermal_device *proc_priv)
0028 {
0029 u32 retries, data;
0030 int ret;
0031
0032
0033 retries = MBOX_RETRY_COUNT;
0034 do {
0035 data = readl(proc_priv->mmio_base + MBOX_OFFSET_INTERFACE);
0036 if (data & BIT_ULL(MBOX_BUSY_BIT)) {
0037 ret = -EBUSY;
0038 continue;
0039 }
0040 ret = 0;
0041 break;
0042 } while (--retries);
0043
0044 return ret;
0045 }
0046
0047 static int send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data)
0048 {
0049 struct proc_thermal_device *proc_priv;
0050 u32 reg_data;
0051 int ret;
0052
0053 proc_priv = pci_get_drvdata(pdev);
0054
0055 mutex_lock(&mbox_lock);
0056
0057 ret = wait_for_mbox_ready(proc_priv);
0058 if (ret)
0059 goto unlock_mbox;
0060
0061 writel(data, (proc_priv->mmio_base + MBOX_OFFSET_DATA));
0062
0063 reg_data = BIT_ULL(MBOX_BUSY_BIT) | id;
0064 writel(reg_data, (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
0065
0066 ret = wait_for_mbox_ready(proc_priv);
0067
0068 unlock_mbox:
0069 mutex_unlock(&mbox_lock);
0070 return ret;
0071 }
0072
0073 static int send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp)
0074 {
0075 struct proc_thermal_device *proc_priv;
0076 u32 reg_data;
0077 int ret;
0078
0079 proc_priv = pci_get_drvdata(pdev);
0080
0081 mutex_lock(&mbox_lock);
0082
0083 ret = wait_for_mbox_ready(proc_priv);
0084 if (ret)
0085 goto unlock_mbox;
0086
0087
0088 reg_data = BIT_ULL(MBOX_BUSY_BIT) | id;
0089 writel(reg_data, (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
0090
0091 ret = wait_for_mbox_ready(proc_priv);
0092 if (ret)
0093 goto unlock_mbox;
0094
0095 if (id == MBOX_CMD_WORKLOAD_TYPE_READ)
0096 *resp = readl(proc_priv->mmio_base + MBOX_OFFSET_DATA);
0097 else
0098 *resp = readq(proc_priv->mmio_base + MBOX_OFFSET_DATA);
0099
0100 unlock_mbox:
0101 mutex_unlock(&mbox_lock);
0102 return ret;
0103 }
0104
0105 int processor_thermal_send_mbox_read_cmd(struct pci_dev *pdev, u16 id, u64 *resp)
0106 {
0107 return send_mbox_read_cmd(pdev, id, resp);
0108 }
0109 EXPORT_SYMBOL_NS_GPL(processor_thermal_send_mbox_read_cmd, INT340X_THERMAL);
0110
0111 int processor_thermal_send_mbox_write_cmd(struct pci_dev *pdev, u16 id, u32 data)
0112 {
0113 return send_mbox_write_cmd(pdev, id, data);
0114 }
0115 EXPORT_SYMBOL_NS_GPL(processor_thermal_send_mbox_write_cmd, INT340X_THERMAL);
0116
0117
0118 static const char * const workload_types[] = {
0119 "none",
0120 "idle",
0121 "semi_active",
0122 "bursty",
0123 "sustained",
0124 "battery_life",
0125 NULL
0126 };
0127
0128 static ssize_t workload_available_types_show(struct device *dev,
0129 struct device_attribute *attr,
0130 char *buf)
0131 {
0132 int i = 0;
0133 int ret = 0;
0134
0135 while (workload_types[i] != NULL)
0136 ret += sprintf(&buf[ret], "%s ", workload_types[i++]);
0137
0138 ret += sprintf(&buf[ret], "\n");
0139
0140 return ret;
0141 }
0142
0143 static DEVICE_ATTR_RO(workload_available_types);
0144
0145 static ssize_t workload_type_store(struct device *dev,
0146 struct device_attribute *attr,
0147 const char *buf, size_t count)
0148 {
0149 struct pci_dev *pdev = to_pci_dev(dev);
0150 char str_preference[15];
0151 u32 data = 0;
0152 ssize_t ret;
0153
0154 ret = sscanf(buf, "%14s", str_preference);
0155 if (ret != 1)
0156 return -EINVAL;
0157
0158 ret = match_string(workload_types, -1, str_preference);
0159 if (ret < 0)
0160 return ret;
0161
0162 ret &= 0xff;
0163
0164 if (ret)
0165 data = BIT(MBOX_DATA_BIT_VALID) | BIT(MBOX_DATA_BIT_AC_DC);
0166
0167 data |= ret;
0168
0169 ret = send_mbox_write_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_WRITE, data);
0170 if (ret)
0171 return false;
0172
0173 return count;
0174 }
0175
0176 static ssize_t workload_type_show(struct device *dev,
0177 struct device_attribute *attr,
0178 char *buf)
0179 {
0180 struct pci_dev *pdev = to_pci_dev(dev);
0181 u64 cmd_resp;
0182 int ret;
0183
0184 ret = send_mbox_read_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, &cmd_resp);
0185 if (ret)
0186 return false;
0187
0188 cmd_resp &= 0xff;
0189
0190 if (cmd_resp > ARRAY_SIZE(workload_types) - 1)
0191 return -EINVAL;
0192
0193 return sprintf(buf, "%s\n", workload_types[cmd_resp]);
0194 }
0195
0196 static DEVICE_ATTR_RW(workload_type);
0197
0198 static struct attribute *workload_req_attrs[] = {
0199 &dev_attr_workload_available_types.attr,
0200 &dev_attr_workload_type.attr,
0201 NULL
0202 };
0203
0204 static const struct attribute_group workload_req_attribute_group = {
0205 .attrs = workload_req_attrs,
0206 .name = "workload_request"
0207 };
0208
0209 static bool workload_req_created;
0210
0211 int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
0212 {
0213 u64 cmd_resp;
0214 int ret;
0215
0216
0217 ret = send_mbox_read_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, &cmd_resp);
0218 if (ret)
0219 return 0;
0220
0221 ret = sysfs_create_group(&pdev->dev.kobj, &workload_req_attribute_group);
0222 if (ret)
0223 return ret;
0224
0225 workload_req_created = true;
0226
0227 return 0;
0228 }
0229 EXPORT_SYMBOL_GPL(proc_thermal_mbox_add);
0230
0231 void proc_thermal_mbox_remove(struct pci_dev *pdev)
0232 {
0233 if (workload_req_created)
0234 sysfs_remove_group(&pdev->dev.kobj, &workload_req_attribute_group);
0235
0236 workload_req_created = false;
0237
0238 }
0239 EXPORT_SYMBOL_GPL(proc_thermal_mbox_remove);
0240
0241 MODULE_LICENSE("GPL v2");