0001
0002
0003
0004 #include <linux/bug.h>
0005 #include <linux/export.h>
0006 #include <linux/pci.h>
0007 #include <linux/peci.h>
0008 #include <linux/slab.h>
0009 #include <linux/types.h>
0010
0011 #include <asm/unaligned.h>
0012
0013 #include "internal.h"
0014
0015 #define PECI_GET_DIB_CMD 0xf7
0016 #define PECI_GET_DIB_WR_LEN 1
0017 #define PECI_GET_DIB_RD_LEN 8
0018
0019 #define PECI_GET_TEMP_CMD 0x01
0020 #define PECI_GET_TEMP_WR_LEN 1
0021 #define PECI_GET_TEMP_RD_LEN 2
0022
0023 #define PECI_RDPKGCFG_CMD 0xa1
0024 #define PECI_RDPKGCFG_WR_LEN 5
0025 #define PECI_RDPKGCFG_RD_LEN_BASE 1
0026 #define PECI_WRPKGCFG_CMD 0xa5
0027 #define PECI_WRPKGCFG_WR_LEN_BASE 6
0028 #define PECI_WRPKGCFG_RD_LEN 1
0029
0030 #define PECI_RDIAMSR_CMD 0xb1
0031 #define PECI_RDIAMSR_WR_LEN 5
0032 #define PECI_RDIAMSR_RD_LEN 9
0033 #define PECI_WRIAMSR_CMD 0xb5
0034 #define PECI_RDIAMSREX_CMD 0xd1
0035 #define PECI_RDIAMSREX_WR_LEN 6
0036 #define PECI_RDIAMSREX_RD_LEN 9
0037
0038 #define PECI_RDPCICFG_CMD 0x61
0039 #define PECI_RDPCICFG_WR_LEN 6
0040 #define PECI_RDPCICFG_RD_LEN 5
0041 #define PECI_RDPCICFG_RD_LEN_MAX 24
0042 #define PECI_WRPCICFG_CMD 0x65
0043
0044 #define PECI_RDPCICFGLOCAL_CMD 0xe1
0045 #define PECI_RDPCICFGLOCAL_WR_LEN 5
0046 #define PECI_RDPCICFGLOCAL_RD_LEN_BASE 1
0047 #define PECI_WRPCICFGLOCAL_CMD 0xe5
0048 #define PECI_WRPCICFGLOCAL_WR_LEN_BASE 6
0049 #define PECI_WRPCICFGLOCAL_RD_LEN 1
0050
0051 #define PECI_ENDPTCFG_TYPE_LOCAL_PCI 0x03
0052 #define PECI_ENDPTCFG_TYPE_PCI 0x04
0053 #define PECI_ENDPTCFG_TYPE_MMIO 0x05
0054 #define PECI_ENDPTCFG_ADDR_TYPE_PCI 0x04
0055 #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_D 0x05
0056 #define PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q 0x06
0057 #define PECI_RDENDPTCFG_CMD 0xc1
0058 #define PECI_RDENDPTCFG_PCI_WR_LEN 12
0059 #define PECI_RDENDPTCFG_MMIO_WR_LEN_BASE 10
0060 #define PECI_RDENDPTCFG_MMIO_D_WR_LEN 14
0061 #define PECI_RDENDPTCFG_MMIO_Q_WR_LEN 18
0062 #define PECI_RDENDPTCFG_RD_LEN_BASE 1
0063 #define PECI_WRENDPTCFG_CMD 0xc5
0064 #define PECI_WRENDPTCFG_PCI_WR_LEN_BASE 13
0065 #define PECI_WRENDPTCFG_MMIO_D_WR_LEN_BASE 15
0066 #define PECI_WRENDPTCFG_MMIO_Q_WR_LEN_BASE 19
0067 #define PECI_WRENDPTCFG_RD_LEN 1
0068
0069
0070 #define PECI_CC_SUCCESS 0x40
0071 #define PECI_CC_NEED_RETRY 0x80
0072 #define PECI_CC_OUT_OF_RESOURCE 0x81
0073 #define PECI_CC_UNAVAIL_RESOURCE 0x82
0074 #define PECI_CC_INVALID_REQ 0x90
0075 #define PECI_CC_MCA_ERROR 0x91
0076 #define PECI_CC_CATASTROPHIC_MCA_ERROR 0x93
0077 #define PECI_CC_FATAL_MCA_ERROR 0x94
0078 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB 0x98
0079 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR 0x9B
0080 #define PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA 0x9C
0081
0082 #define PECI_RETRY_BIT BIT(0)
0083
0084 #define PECI_RETRY_TIMEOUT msecs_to_jiffies(700)
0085 #define PECI_RETRY_INTERVAL_MIN msecs_to_jiffies(1)
0086 #define PECI_RETRY_INTERVAL_MAX msecs_to_jiffies(128)
0087
0088 static u8 peci_request_data_cc(struct peci_request *req)
0089 {
0090 return req->rx.buf[0];
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102 int peci_request_status(struct peci_request *req)
0103 {
0104 u8 cc = peci_request_data_cc(req);
0105
0106 if (cc != PECI_CC_SUCCESS)
0107 dev_dbg(&req->device->dev, "ret: %#02x\n", cc);
0108
0109 switch (cc) {
0110 case PECI_CC_SUCCESS:
0111 return 0;
0112 case PECI_CC_NEED_RETRY:
0113 case PECI_CC_OUT_OF_RESOURCE:
0114 case PECI_CC_UNAVAIL_RESOURCE:
0115 return -EAGAIN;
0116 case PECI_CC_INVALID_REQ:
0117 return -EINVAL;
0118 case PECI_CC_MCA_ERROR:
0119 case PECI_CC_CATASTROPHIC_MCA_ERROR:
0120 case PECI_CC_FATAL_MCA_ERROR:
0121 case PECI_CC_PARITY_ERR_GPSB_OR_PMSB:
0122 case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_IERR:
0123 case PECI_CC_PARITY_ERR_GPSB_OR_PMSB_MCA:
0124 return -EIO;
0125 }
0126
0127 WARN_ONCE(1, "Unknown PECI completion code: %#02x\n", cc);
0128
0129 return -EIO;
0130 }
0131 EXPORT_SYMBOL_NS_GPL(peci_request_status, PECI);
0132
0133 static int peci_request_xfer(struct peci_request *req)
0134 {
0135 struct peci_device *device = req->device;
0136 struct peci_controller *controller = to_peci_controller(device->dev.parent);
0137 int ret;
0138
0139 mutex_lock(&controller->bus_lock);
0140 ret = controller->ops->xfer(controller, device->addr, req);
0141 mutex_unlock(&controller->bus_lock);
0142
0143 return ret;
0144 }
0145
0146 static int peci_request_xfer_retry(struct peci_request *req)
0147 {
0148 long wait_interval = PECI_RETRY_INTERVAL_MIN;
0149 struct peci_device *device = req->device;
0150 struct peci_controller *controller = to_peci_controller(device->dev.parent);
0151 unsigned long start = jiffies;
0152 int ret;
0153
0154
0155 if (WARN_ON(req->tx.len == 0))
0156 return 0;
0157
0158 do {
0159 ret = peci_request_xfer(req);
0160 if (ret) {
0161 dev_dbg(&controller->dev, "xfer error: %d\n", ret);
0162 return ret;
0163 }
0164
0165 if (peci_request_status(req) != -EAGAIN)
0166 return 0;
0167
0168
0169 req->tx.buf[1] |= PECI_RETRY_BIT;
0170
0171 if (schedule_timeout_interruptible(wait_interval))
0172 return -ERESTARTSYS;
0173
0174 wait_interval = min_t(long, wait_interval * 2, PECI_RETRY_INTERVAL_MAX);
0175 } while (time_before(jiffies, start + PECI_RETRY_TIMEOUT));
0176
0177 dev_dbg(&controller->dev, "request timed out\n");
0178
0179 return -ETIMEDOUT;
0180 }
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190 struct peci_request *peci_request_alloc(struct peci_device *device, u8 tx_len, u8 rx_len)
0191 {
0192 struct peci_request *req;
0193
0194
0195
0196
0197
0198
0199 if (WARN_ON_ONCE(tx_len > PECI_REQUEST_MAX_BUF_SIZE || rx_len > PECI_REQUEST_MAX_BUF_SIZE))
0200 return NULL;
0201
0202
0203
0204
0205
0206 req = kzalloc(sizeof(*req), GFP_KERNEL);
0207 if (!req)
0208 return NULL;
0209
0210 req->device = device;
0211 req->tx.len = tx_len;
0212 req->rx.len = rx_len;
0213
0214 return req;
0215 }
0216 EXPORT_SYMBOL_NS_GPL(peci_request_alloc, PECI);
0217
0218
0219
0220
0221
0222 void peci_request_free(struct peci_request *req)
0223 {
0224 kfree(req);
0225 }
0226 EXPORT_SYMBOL_NS_GPL(peci_request_free, PECI);
0227
0228 struct peci_request *peci_xfer_get_dib(struct peci_device *device)
0229 {
0230 struct peci_request *req;
0231 int ret;
0232
0233 req = peci_request_alloc(device, PECI_GET_DIB_WR_LEN, PECI_GET_DIB_RD_LEN);
0234 if (!req)
0235 return ERR_PTR(-ENOMEM);
0236
0237 req->tx.buf[0] = PECI_GET_DIB_CMD;
0238
0239 ret = peci_request_xfer(req);
0240 if (ret) {
0241 peci_request_free(req);
0242 return ERR_PTR(ret);
0243 }
0244
0245 return req;
0246 }
0247 EXPORT_SYMBOL_NS_GPL(peci_xfer_get_dib, PECI);
0248
0249 struct peci_request *peci_xfer_get_temp(struct peci_device *device)
0250 {
0251 struct peci_request *req;
0252 int ret;
0253
0254 req = peci_request_alloc(device, PECI_GET_TEMP_WR_LEN, PECI_GET_TEMP_RD_LEN);
0255 if (!req)
0256 return ERR_PTR(-ENOMEM);
0257
0258 req->tx.buf[0] = PECI_GET_TEMP_CMD;
0259
0260 ret = peci_request_xfer(req);
0261 if (ret) {
0262 peci_request_free(req);
0263 return ERR_PTR(ret);
0264 }
0265
0266 return req;
0267 }
0268 EXPORT_SYMBOL_NS_GPL(peci_xfer_get_temp, PECI);
0269
0270 static struct peci_request *
0271 __pkg_cfg_read(struct peci_device *device, u8 index, u16 param, u8 len)
0272 {
0273 struct peci_request *req;
0274 int ret;
0275
0276 req = peci_request_alloc(device, PECI_RDPKGCFG_WR_LEN, PECI_RDPKGCFG_RD_LEN_BASE + len);
0277 if (!req)
0278 return ERR_PTR(-ENOMEM);
0279
0280 req->tx.buf[0] = PECI_RDPKGCFG_CMD;
0281 req->tx.buf[1] = 0;
0282 req->tx.buf[2] = index;
0283 put_unaligned_le16(param, &req->tx.buf[3]);
0284
0285 ret = peci_request_xfer_retry(req);
0286 if (ret) {
0287 peci_request_free(req);
0288 return ERR_PTR(ret);
0289 }
0290
0291 return req;
0292 }
0293
0294 static u32 __get_pci_addr(u8 bus, u8 dev, u8 func, u16 reg)
0295 {
0296 return reg | PCI_DEVID(bus, PCI_DEVFN(dev, func)) << 12;
0297 }
0298
0299 static struct peci_request *
0300 __pci_cfg_local_read(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg, u8 len)
0301 {
0302 struct peci_request *req;
0303 u32 pci_addr;
0304 int ret;
0305
0306 req = peci_request_alloc(device, PECI_RDPCICFGLOCAL_WR_LEN,
0307 PECI_RDPCICFGLOCAL_RD_LEN_BASE + len);
0308 if (!req)
0309 return ERR_PTR(-ENOMEM);
0310
0311 pci_addr = __get_pci_addr(bus, dev, func, reg);
0312
0313 req->tx.buf[0] = PECI_RDPCICFGLOCAL_CMD;
0314 req->tx.buf[1] = 0;
0315 put_unaligned_le24(pci_addr, &req->tx.buf[2]);
0316
0317 ret = peci_request_xfer_retry(req);
0318 if (ret) {
0319 peci_request_free(req);
0320 return ERR_PTR(ret);
0321 }
0322
0323 return req;
0324 }
0325
0326 static struct peci_request *
0327 __ep_pci_cfg_read(struct peci_device *device, u8 msg_type, u8 seg,
0328 u8 bus, u8 dev, u8 func, u16 reg, u8 len)
0329 {
0330 struct peci_request *req;
0331 u32 pci_addr;
0332 int ret;
0333
0334 req = peci_request_alloc(device, PECI_RDENDPTCFG_PCI_WR_LEN,
0335 PECI_RDENDPTCFG_RD_LEN_BASE + len);
0336 if (!req)
0337 return ERR_PTR(-ENOMEM);
0338
0339 pci_addr = __get_pci_addr(bus, dev, func, reg);
0340
0341 req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
0342 req->tx.buf[1] = 0;
0343 req->tx.buf[2] = msg_type;
0344 req->tx.buf[3] = 0;
0345 req->tx.buf[4] = 0;
0346 req->tx.buf[5] = 0;
0347 req->tx.buf[6] = PECI_ENDPTCFG_ADDR_TYPE_PCI;
0348 req->tx.buf[7] = seg;
0349 put_unaligned_le32(pci_addr, &req->tx.buf[8]);
0350
0351 ret = peci_request_xfer_retry(req);
0352 if (ret) {
0353 peci_request_free(req);
0354 return ERR_PTR(ret);
0355 }
0356
0357 return req;
0358 }
0359
0360 static struct peci_request *
0361 __ep_mmio_read(struct peci_device *device, u8 bar, u8 addr_type, u8 seg,
0362 u8 bus, u8 dev, u8 func, u64 offset, u8 tx_len, u8 len)
0363 {
0364 struct peci_request *req;
0365 int ret;
0366
0367 req = peci_request_alloc(device, tx_len, PECI_RDENDPTCFG_RD_LEN_BASE + len);
0368 if (!req)
0369 return ERR_PTR(-ENOMEM);
0370
0371 req->tx.buf[0] = PECI_RDENDPTCFG_CMD;
0372 req->tx.buf[1] = 0;
0373 req->tx.buf[2] = PECI_ENDPTCFG_TYPE_MMIO;
0374 req->tx.buf[3] = 0;
0375 req->tx.buf[4] = 0;
0376 req->tx.buf[5] = bar;
0377 req->tx.buf[6] = addr_type;
0378 req->tx.buf[7] = seg;
0379 req->tx.buf[8] = PCI_DEVFN(dev, func);
0380 req->tx.buf[9] = bus;
0381
0382 if (addr_type == PECI_ENDPTCFG_ADDR_TYPE_MMIO_D)
0383 put_unaligned_le32(offset, &req->tx.buf[10]);
0384 else
0385 put_unaligned_le64(offset, &req->tx.buf[10]);
0386
0387 ret = peci_request_xfer_retry(req);
0388 if (ret) {
0389 peci_request_free(req);
0390 return ERR_PTR(ret);
0391 }
0392
0393 return req;
0394 }
0395
0396 u8 peci_request_data_readb(struct peci_request *req)
0397 {
0398 return req->rx.buf[1];
0399 }
0400 EXPORT_SYMBOL_NS_GPL(peci_request_data_readb, PECI);
0401
0402 u16 peci_request_data_readw(struct peci_request *req)
0403 {
0404 return get_unaligned_le16(&req->rx.buf[1]);
0405 }
0406 EXPORT_SYMBOL_NS_GPL(peci_request_data_readw, PECI);
0407
0408 u32 peci_request_data_readl(struct peci_request *req)
0409 {
0410 return get_unaligned_le32(&req->rx.buf[1]);
0411 }
0412 EXPORT_SYMBOL_NS_GPL(peci_request_data_readl, PECI);
0413
0414 u64 peci_request_data_readq(struct peci_request *req)
0415 {
0416 return get_unaligned_le64(&req->rx.buf[1]);
0417 }
0418 EXPORT_SYMBOL_NS_GPL(peci_request_data_readq, PECI);
0419
0420 u64 peci_request_dib_read(struct peci_request *req)
0421 {
0422 return get_unaligned_le64(&req->rx.buf[0]);
0423 }
0424 EXPORT_SYMBOL_NS_GPL(peci_request_dib_read, PECI);
0425
0426 s16 peci_request_temp_read(struct peci_request *req)
0427 {
0428 return get_unaligned_le16(&req->rx.buf[0]);
0429 }
0430 EXPORT_SYMBOL_NS_GPL(peci_request_temp_read, PECI);
0431
0432 #define __read_pkg_config(x, type) \
0433 struct peci_request *peci_xfer_pkg_cfg_##x(struct peci_device *device, u8 index, u16 param) \
0434 { \
0435 return __pkg_cfg_read(device, index, param, sizeof(type)); \
0436 } \
0437 EXPORT_SYMBOL_NS_GPL(peci_xfer_pkg_cfg_##x, PECI)
0438
0439 __read_pkg_config(readb, u8);
0440 __read_pkg_config(readw, u16);
0441 __read_pkg_config(readl, u32);
0442 __read_pkg_config(readq, u64);
0443
0444 #define __read_pci_config_local(x, type) \
0445 struct peci_request * \
0446 peci_xfer_pci_cfg_local_##x(struct peci_device *device, u8 bus, u8 dev, u8 func, u16 reg) \
0447 { \
0448 return __pci_cfg_local_read(device, bus, dev, func, reg, sizeof(type)); \
0449 } \
0450 EXPORT_SYMBOL_NS_GPL(peci_xfer_pci_cfg_local_##x, PECI)
0451
0452 __read_pci_config_local(readb, u8);
0453 __read_pci_config_local(readw, u16);
0454 __read_pci_config_local(readl, u32);
0455
0456 #define __read_ep_pci_config(x, msg_type, type) \
0457 struct peci_request * \
0458 peci_xfer_ep_pci_cfg_##x(struct peci_device *device, u8 seg, u8 bus, u8 dev, u8 func, u16 reg) \
0459 { \
0460 return __ep_pci_cfg_read(device, msg_type, seg, bus, dev, func, reg, sizeof(type)); \
0461 } \
0462 EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_pci_cfg_##x, PECI)
0463
0464 __read_ep_pci_config(local_readb, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u8);
0465 __read_ep_pci_config(local_readw, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u16);
0466 __read_ep_pci_config(local_readl, PECI_ENDPTCFG_TYPE_LOCAL_PCI, u32);
0467 __read_ep_pci_config(readb, PECI_ENDPTCFG_TYPE_PCI, u8);
0468 __read_ep_pci_config(readw, PECI_ENDPTCFG_TYPE_PCI, u16);
0469 __read_ep_pci_config(readl, PECI_ENDPTCFG_TYPE_PCI, u32);
0470
0471 #define __read_ep_mmio(x, y, addr_type, type1, type2) \
0472 struct peci_request *peci_xfer_ep_mmio##y##_##x(struct peci_device *device, u8 bar, u8 seg, \
0473 u8 bus, u8 dev, u8 func, u64 offset) \
0474 { \
0475 return __ep_mmio_read(device, bar, addr_type, seg, bus, dev, func, \
0476 offset, PECI_RDENDPTCFG_MMIO_WR_LEN_BASE + sizeof(type1), \
0477 sizeof(type2)); \
0478 } \
0479 EXPORT_SYMBOL_NS_GPL(peci_xfer_ep_mmio##y##_##x, PECI)
0480
0481 __read_ep_mmio(readl, 32, PECI_ENDPTCFG_ADDR_TYPE_MMIO_D, u32, u32);
0482 __read_ep_mmio(readl, 64, PECI_ENDPTCFG_ADDR_TYPE_MMIO_Q, u64, u32);