0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define dev_fmt(fmt) "DOE: " fmt
0014
0015 #include <linux/bitfield.h>
0016 #include <linux/delay.h>
0017 #include <linux/jiffies.h>
0018 #include <linux/mutex.h>
0019 #include <linux/pci.h>
0020 #include <linux/pci-doe.h>
0021 #include <linux/workqueue.h>
0022
0023 #define PCI_DOE_PROTOCOL_DISCOVERY 0
0024
0025
0026 #define PCI_DOE_TIMEOUT HZ
0027 #define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
0028
0029 #define PCI_DOE_FLAG_CANCEL 0
0030 #define PCI_DOE_FLAG_DEAD 1
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 struct pci_doe_mb {
0047 struct pci_dev *pdev;
0048 u16 cap_offset;
0049 struct xarray prots;
0050
0051 wait_queue_head_t wq;
0052 struct workqueue_struct *work_queue;
0053 unsigned long flags;
0054 };
0055
0056 static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
0057 {
0058 if (wait_event_timeout(doe_mb->wq,
0059 test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
0060 timeout))
0061 return -EIO;
0062 return 0;
0063 }
0064
0065 static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
0066 {
0067 struct pci_dev *pdev = doe_mb->pdev;
0068 int offset = doe_mb->cap_offset;
0069
0070 pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
0071 }
0072
0073 static int pci_doe_abort(struct pci_doe_mb *doe_mb)
0074 {
0075 struct pci_dev *pdev = doe_mb->pdev;
0076 int offset = doe_mb->cap_offset;
0077 unsigned long timeout_jiffies;
0078
0079 pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
0080
0081 timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
0082 pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
0083
0084 do {
0085 int rc;
0086 u32 val;
0087
0088 rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
0089 if (rc)
0090 return rc;
0091 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
0092
0093
0094 if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
0095 !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
0096 return 0;
0097
0098 } while (!time_after(jiffies, timeout_jiffies));
0099
0100
0101 pci_err(pdev, "[%x] ABORT timed out\n", offset);
0102 return -EIO;
0103 }
0104
0105 static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
0106 struct pci_doe_task *task)
0107 {
0108 struct pci_dev *pdev = doe_mb->pdev;
0109 int offset = doe_mb->cap_offset;
0110 u32 val;
0111 int i;
0112
0113
0114
0115
0116
0117
0118
0119 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
0120 if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
0121 return -EBUSY;
0122
0123 if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
0124 return -EIO;
0125
0126
0127 val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
0128 FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
0129 pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
0130
0131 pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
0132 FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
0133 2 + task->request_pl_sz /
0134 sizeof(u32)));
0135 for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
0136 pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
0137 task->request_pl[i]);
0138
0139 pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
0140
0141 return 0;
0142 }
0143
0144 static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
0145 {
0146 struct pci_dev *pdev = doe_mb->pdev;
0147 int offset = doe_mb->cap_offset;
0148 u32 val;
0149
0150 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
0151 if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
0152 return true;
0153 return false;
0154 }
0155
0156 static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
0157 {
0158 struct pci_dev *pdev = doe_mb->pdev;
0159 int offset = doe_mb->cap_offset;
0160 size_t length, payload_length;
0161 u32 val;
0162 int i;
0163
0164
0165 pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
0166 if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
0167 (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
0168 dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
0169 doe_mb->cap_offset, task->prot.vid, task->prot.type,
0170 FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
0171 FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
0172 return -EIO;
0173 }
0174
0175 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
0176
0177 pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
0178 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
0179
0180 length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
0181 if (length > SZ_1M || length < 2)
0182 return -EIO;
0183
0184
0185 length -= 2;
0186 payload_length = min(length, task->response_pl_sz / sizeof(u32));
0187
0188 for (i = 0; i < payload_length; i++) {
0189 pci_read_config_dword(pdev, offset + PCI_DOE_READ,
0190 &task->response_pl[i]);
0191
0192 if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
0193 return -EIO;
0194 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
0195 }
0196
0197
0198 for (; i < length; i++) {
0199 pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
0200 pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
0201 }
0202
0203
0204 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
0205 if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
0206 return -EIO;
0207
0208 return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
0209 }
0210
0211 static void signal_task_complete(struct pci_doe_task *task, int rv)
0212 {
0213 task->rv = rv;
0214 task->complete(task);
0215 }
0216
0217 static void signal_task_abort(struct pci_doe_task *task, int rv)
0218 {
0219 struct pci_doe_mb *doe_mb = task->doe_mb;
0220 struct pci_dev *pdev = doe_mb->pdev;
0221
0222 if (pci_doe_abort(doe_mb)) {
0223
0224
0225
0226
0227 pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
0228 doe_mb->cap_offset);
0229 set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
0230 }
0231 signal_task_complete(task, rv);
0232 }
0233
0234 static void doe_statemachine_work(struct work_struct *work)
0235 {
0236 struct pci_doe_task *task = container_of(work, struct pci_doe_task,
0237 work);
0238 struct pci_doe_mb *doe_mb = task->doe_mb;
0239 struct pci_dev *pdev = doe_mb->pdev;
0240 int offset = doe_mb->cap_offset;
0241 unsigned long timeout_jiffies;
0242 u32 val;
0243 int rc;
0244
0245 if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
0246 signal_task_complete(task, -EIO);
0247 return;
0248 }
0249
0250
0251 rc = pci_doe_send_req(doe_mb, task);
0252 if (rc) {
0253
0254
0255
0256
0257
0258
0259
0260 if (rc == -EBUSY)
0261 dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
0262 offset);
0263 signal_task_abort(task, rc);
0264 return;
0265 }
0266
0267 timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
0268
0269 retry_resp:
0270 pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
0271 if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
0272 signal_task_abort(task, -EIO);
0273 return;
0274 }
0275
0276 if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
0277 if (time_after(jiffies, timeout_jiffies)) {
0278 signal_task_abort(task, -EIO);
0279 return;
0280 }
0281 rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
0282 if (rc) {
0283 signal_task_abort(task, rc);
0284 return;
0285 }
0286 goto retry_resp;
0287 }
0288
0289 rc = pci_doe_recv_resp(doe_mb, task);
0290 if (rc < 0) {
0291 signal_task_abort(task, rc);
0292 return;
0293 }
0294
0295 signal_task_complete(task, rc);
0296 }
0297
0298 static void pci_doe_task_complete(struct pci_doe_task *task)
0299 {
0300 complete(task->private);
0301 }
0302
0303 static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
0304 u8 *protocol)
0305 {
0306 u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
0307 *index);
0308 u32 response_pl;
0309 DECLARE_COMPLETION_ONSTACK(c);
0310 struct pci_doe_task task = {
0311 .prot.vid = PCI_VENDOR_ID_PCI_SIG,
0312 .prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
0313 .request_pl = &request_pl,
0314 .request_pl_sz = sizeof(request_pl),
0315 .response_pl = &response_pl,
0316 .response_pl_sz = sizeof(response_pl),
0317 .complete = pci_doe_task_complete,
0318 .private = &c,
0319 };
0320 int rc;
0321
0322 rc = pci_doe_submit_task(doe_mb, &task);
0323 if (rc < 0)
0324 return rc;
0325
0326 wait_for_completion(&c);
0327
0328 if (task.rv != sizeof(response_pl))
0329 return -EIO;
0330
0331 *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
0332 *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
0333 response_pl);
0334 *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
0335 response_pl);
0336
0337 return 0;
0338 }
0339
0340 static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
0341 {
0342 return xa_mk_value((vid << 8) | prot);
0343 }
0344
0345 static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
0346 {
0347 u8 index = 0;
0348 u8 xa_idx = 0;
0349
0350 do {
0351 int rc;
0352 u16 vid;
0353 u8 prot;
0354
0355 rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
0356 if (rc)
0357 return rc;
0358
0359 pci_dbg(doe_mb->pdev,
0360 "[%x] Found protocol %d vid: %x prot: %x\n",
0361 doe_mb->cap_offset, xa_idx, vid, prot);
0362
0363 rc = xa_insert(&doe_mb->prots, xa_idx++,
0364 pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
0365 if (rc)
0366 return rc;
0367 } while (index);
0368
0369 return 0;
0370 }
0371
0372 static void pci_doe_xa_destroy(void *mb)
0373 {
0374 struct pci_doe_mb *doe_mb = mb;
0375
0376 xa_destroy(&doe_mb->prots);
0377 }
0378
0379 static void pci_doe_destroy_workqueue(void *mb)
0380 {
0381 struct pci_doe_mb *doe_mb = mb;
0382
0383 destroy_workqueue(doe_mb->work_queue);
0384 }
0385
0386 static void pci_doe_flush_mb(void *mb)
0387 {
0388 struct pci_doe_mb *doe_mb = mb;
0389
0390
0391 set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
0392
0393
0394 set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
0395 wake_up(&doe_mb->wq);
0396
0397
0398 flush_workqueue(doe_mb->work_queue);
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset)
0414 {
0415 struct pci_doe_mb *doe_mb;
0416 struct device *dev = &pdev->dev;
0417 int rc;
0418
0419 doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL);
0420 if (!doe_mb)
0421 return ERR_PTR(-ENOMEM);
0422
0423 doe_mb->pdev = pdev;
0424 doe_mb->cap_offset = cap_offset;
0425 init_waitqueue_head(&doe_mb->wq);
0426
0427 xa_init(&doe_mb->prots);
0428 rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb);
0429 if (rc)
0430 return ERR_PTR(rc);
0431
0432 doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
0433 dev_driver_string(&pdev->dev),
0434 pci_name(pdev),
0435 doe_mb->cap_offset);
0436 if (!doe_mb->work_queue) {
0437 pci_err(pdev, "[%x] failed to allocate work queue\n",
0438 doe_mb->cap_offset);
0439 return ERR_PTR(-ENOMEM);
0440 }
0441 rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb);
0442 if (rc)
0443 return ERR_PTR(rc);
0444
0445
0446 rc = pci_doe_abort(doe_mb);
0447 if (rc) {
0448 pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
0449 doe_mb->cap_offset, rc);
0450 return ERR_PTR(rc);
0451 }
0452
0453
0454
0455
0456
0457 rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb);
0458 if (rc)
0459 return ERR_PTR(rc);
0460
0461 rc = pci_doe_cache_protocols(doe_mb);
0462 if (rc) {
0463 pci_err(pdev, "[%x] failed to cache protocols : %d\n",
0464 doe_mb->cap_offset, rc);
0465 return ERR_PTR(rc);
0466 }
0467
0468 return doe_mb;
0469 }
0470 EXPORT_SYMBOL_GPL(pcim_doe_create_mb);
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481 bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
0482 {
0483 unsigned long index;
0484 void *entry;
0485
0486
0487 if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
0488 return true;
0489
0490 xa_for_each(&doe_mb->prots, index, entry)
0491 if (entry == pci_doe_xa_prot_entry(vid, type))
0492 return true;
0493
0494 return false;
0495 }
0496 EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
0516 {
0517 if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
0518 return -EINVAL;
0519
0520
0521
0522
0523
0524 if (task->request_pl_sz % sizeof(u32) ||
0525 task->response_pl_sz < sizeof(u32))
0526 return -EINVAL;
0527
0528 if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
0529 return -EIO;
0530
0531 task->doe_mb = doe_mb;
0532 INIT_WORK(&task->work, doe_statemachine_work);
0533 queue_work(doe_mb->work_queue, &task->work);
0534 return 0;
0535 }
0536 EXPORT_SYMBOL_GPL(pci_doe_submit_task);