0001
0002
0003
0004
0005
0006
0007 #include <linux/export.h>
0008 #include <linux/kthread.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/fs.h>
0011 #include <linux/jiffies.h>
0012 #include <linux/slab.h>
0013 #include <linux/pm_runtime.h>
0014
0015 #include <linux/mei.h>
0016
0017 #include "mei_dev.h"
0018 #include "hbm.h"
0019 #include "client.h"
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
0030 {
0031 struct mei_cl_cb *cb, *next;
0032 struct mei_cl *cl;
0033
0034 list_for_each_entry_safe(cb, next, cmpl_list, list) {
0035 cl = cb->cl;
0036 list_del_init(&cb->list);
0037
0038 dev_dbg(dev->dev, "completing call back.\n");
0039 mei_cl_complete(cl, cb);
0040 }
0041 }
0042 EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 static inline int mei_cl_hbm_equal(struct mei_cl *cl,
0053 struct mei_msg_hdr *mei_hdr)
0054 {
0055 return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
0056 mei_cl_me_id(cl) == mei_hdr->me_addr;
0057 }
0058
0059
0060
0061
0062
0063
0064
0065
0066 static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
0067 size_t discard_len)
0068 {
0069 if (hdr->dma_ring) {
0070 mei_dma_ring_read(dev, NULL,
0071 hdr->extension[dev->rd_msg_hdr_count - 2]);
0072 discard_len = 0;
0073 }
0074
0075
0076
0077
0078 mei_read_slots(dev, dev->rd_msg_buf, discard_len);
0079 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
0080 MEI_HDR_PRM(hdr));
0081 }
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093 static int mei_cl_irq_read_msg(struct mei_cl *cl,
0094 struct mei_msg_hdr *mei_hdr,
0095 struct mei_ext_meta_hdr *meta,
0096 struct list_head *cmpl_list)
0097 {
0098 struct mei_device *dev = cl->dev;
0099 struct mei_cl_cb *cb;
0100
0101 size_t buf_sz;
0102 u32 length;
0103 int ext_len;
0104
0105 length = mei_hdr->length;
0106 ext_len = 0;
0107 if (mei_hdr->extended) {
0108 ext_len = sizeof(*meta) + mei_slots2data(meta->size);
0109 length -= ext_len;
0110 }
0111
0112 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
0113 if (!cb) {
0114 if (!mei_cl_is_fixed_address(cl)) {
0115 cl_err(dev, cl, "pending read cb not found\n");
0116 goto discard;
0117 }
0118 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
0119 if (!cb)
0120 goto discard;
0121 list_add_tail(&cb->list, &cl->rd_pending);
0122 }
0123
0124 if (mei_hdr->extended) {
0125 struct mei_ext_hdr *ext;
0126 struct mei_ext_hdr_vtag *vtag_hdr = NULL;
0127
0128 ext = mei_ext_begin(meta);
0129 do {
0130 switch (ext->type) {
0131 case MEI_EXT_HDR_VTAG:
0132 vtag_hdr = (struct mei_ext_hdr_vtag *)ext;
0133 break;
0134 case MEI_EXT_HDR_NONE:
0135 fallthrough;
0136 default:
0137 cb->status = -EPROTO;
0138 break;
0139 }
0140
0141 ext = mei_ext_next(ext);
0142 } while (!mei_ext_last(meta, ext));
0143
0144 if (!vtag_hdr) {
0145 cl_dbg(dev, cl, "vtag not found in extended header.\n");
0146 cb->status = -EPROTO;
0147 goto discard;
0148 }
0149
0150 cl_dbg(dev, cl, "vtag: %d\n", vtag_hdr->vtag);
0151 if (cb->vtag && cb->vtag != vtag_hdr->vtag) {
0152 cl_err(dev, cl, "mismatched tag: %d != %d\n",
0153 cb->vtag, vtag_hdr->vtag);
0154 cb->status = -EPROTO;
0155 goto discard;
0156 }
0157 cb->vtag = vtag_hdr->vtag;
0158 }
0159
0160 if (!mei_cl_is_connected(cl)) {
0161 cl_dbg(dev, cl, "not connected\n");
0162 cb->status = -ENODEV;
0163 goto discard;
0164 }
0165
0166 if (mei_hdr->dma_ring)
0167 length = mei_hdr->extension[mei_data2slots(ext_len)];
0168
0169 buf_sz = length + cb->buf_idx;
0170
0171 if (buf_sz < cb->buf_idx) {
0172 cl_err(dev, cl, "message is too big len %d idx %zu\n",
0173 length, cb->buf_idx);
0174 cb->status = -EMSGSIZE;
0175 goto discard;
0176 }
0177
0178 if (cb->buf.size < buf_sz) {
0179 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
0180 cb->buf.size, length, cb->buf_idx);
0181 cb->status = -EMSGSIZE;
0182 goto discard;
0183 }
0184
0185 if (mei_hdr->dma_ring) {
0186 mei_dma_ring_read(dev, cb->buf.data + cb->buf_idx, length);
0187
0188 mei_read_slots(dev, cb->buf.data + cb->buf_idx, 0);
0189 } else {
0190 mei_read_slots(dev, cb->buf.data + cb->buf_idx, length);
0191 }
0192
0193 cb->buf_idx += length;
0194
0195 if (mei_hdr->msg_complete) {
0196 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
0197 list_move_tail(&cb->list, cmpl_list);
0198 } else {
0199 pm_runtime_mark_last_busy(dev->dev);
0200 pm_request_autosuspend(dev->dev);
0201 }
0202
0203 return 0;
0204
0205 discard:
0206 if (cb)
0207 list_move_tail(&cb->list, cmpl_list);
0208 mei_irq_discard_msg(dev, mei_hdr, length);
0209 return 0;
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
0222 struct list_head *cmpl_list)
0223 {
0224 struct mei_device *dev = cl->dev;
0225 u32 msg_slots;
0226 int slots;
0227 int ret;
0228
0229 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_response));
0230 slots = mei_hbuf_empty_slots(dev);
0231 if (slots < 0)
0232 return -EOVERFLOW;
0233
0234 if ((u32)slots < msg_slots)
0235 return -EMSGSIZE;
0236
0237 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
0238 list_move_tail(&cb->list, cmpl_list);
0239
0240 return ret;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
0254 struct list_head *cmpl_list)
0255 {
0256 struct mei_device *dev = cl->dev;
0257 u32 msg_slots;
0258 int slots;
0259 int ret;
0260
0261 if (!list_empty(&cl->rd_pending))
0262 return 0;
0263
0264 msg_slots = mei_hbm2slots(sizeof(struct hbm_flow_control));
0265 slots = mei_hbuf_empty_slots(dev);
0266 if (slots < 0)
0267 return -EOVERFLOW;
0268
0269 if ((u32)slots < msg_slots)
0270 return -EMSGSIZE;
0271
0272 ret = mei_hbm_cl_flow_control_req(dev, cl);
0273 if (ret) {
0274 cl->status = ret;
0275 cb->buf_idx = 0;
0276 list_move_tail(&cb->list, cmpl_list);
0277 return ret;
0278 }
0279
0280 pm_runtime_mark_last_busy(dev->dev);
0281 pm_request_autosuspend(dev->dev);
0282
0283 list_move_tail(&cb->list, &cl->rd_pending);
0284
0285 return 0;
0286 }
0287
0288 static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
0289 {
0290 return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
0291 }
0292
0293 static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
0294 {
0295 return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
0296 }
0297
0298 static inline int hdr_is_valid(u32 msg_hdr)
0299 {
0300 struct mei_msg_hdr *mei_hdr;
0301 u32 expected_len = 0;
0302
0303 mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
0304 if (!msg_hdr || mei_hdr->reserved)
0305 return -EBADMSG;
0306
0307 if (mei_hdr->dma_ring)
0308 expected_len += MEI_SLOT_SIZE;
0309 if (mei_hdr->extended)
0310 expected_len += MEI_SLOT_SIZE;
0311 if (mei_hdr->length < expected_len)
0312 return -EBADMSG;
0313
0314 return 0;
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 int mei_irq_read_handler(struct mei_device *dev,
0328 struct list_head *cmpl_list, s32 *slots)
0329 {
0330 struct mei_msg_hdr *mei_hdr;
0331 struct mei_ext_meta_hdr *meta_hdr = NULL;
0332 struct mei_cl *cl;
0333 int ret;
0334 u32 hdr_size_left;
0335 u32 hdr_size_ext;
0336 int i;
0337 int ext_hdr_end;
0338
0339 if (!dev->rd_msg_hdr[0]) {
0340 dev->rd_msg_hdr[0] = mei_read_hdr(dev);
0341 dev->rd_msg_hdr_count = 1;
0342 (*slots)--;
0343 dev_dbg(dev->dev, "slots =%08x.\n", *slots);
0344
0345 ret = hdr_is_valid(dev->rd_msg_hdr[0]);
0346 if (ret) {
0347 dev_err(dev->dev, "corrupted message header 0x%08X\n",
0348 dev->rd_msg_hdr[0]);
0349 goto end;
0350 }
0351 }
0352
0353 mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr;
0354 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
0355
0356 if (mei_slots2data(*slots) < mei_hdr->length) {
0357 dev_err(dev->dev, "less data available than length=%08x.\n",
0358 *slots);
0359
0360 ret = -ENODATA;
0361 goto end;
0362 }
0363
0364 ext_hdr_end = 1;
0365 hdr_size_left = mei_hdr->length;
0366
0367 if (mei_hdr->extended) {
0368 if (!dev->rd_msg_hdr[1]) {
0369 dev->rd_msg_hdr[1] = mei_read_hdr(dev);
0370 dev->rd_msg_hdr_count++;
0371 (*slots)--;
0372 dev_dbg(dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
0373 }
0374 meta_hdr = ((struct mei_ext_meta_hdr *)&dev->rd_msg_hdr[1]);
0375 if (check_add_overflow((u32)sizeof(*meta_hdr),
0376 mei_slots2data(meta_hdr->size),
0377 &hdr_size_ext)) {
0378 dev_err(dev->dev, "extended message size too big %d\n",
0379 meta_hdr->size);
0380 return -EBADMSG;
0381 }
0382 if (hdr_size_left < hdr_size_ext) {
0383 dev_err(dev->dev, "corrupted message header len %d\n",
0384 mei_hdr->length);
0385 return -EBADMSG;
0386 }
0387 hdr_size_left -= hdr_size_ext;
0388
0389 ext_hdr_end = meta_hdr->size + 2;
0390 for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
0391 dev->rd_msg_hdr[i] = mei_read_hdr(dev);
0392 dev_dbg(dev->dev, "extended header %d is %08x\n", i,
0393 dev->rd_msg_hdr[i]);
0394 dev->rd_msg_hdr_count++;
0395 (*slots)--;
0396 }
0397 }
0398
0399 if (mei_hdr->dma_ring) {
0400 if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
0401 dev_err(dev->dev, "corrupted message header len %d\n",
0402 mei_hdr->length);
0403 return -EBADMSG;
0404 }
0405
0406 dev->rd_msg_hdr[ext_hdr_end] = mei_read_hdr(dev);
0407 dev->rd_msg_hdr_count++;
0408 (*slots)--;
0409 mei_hdr->length -= sizeof(dev->rd_msg_hdr[ext_hdr_end]);
0410 }
0411
0412
0413 if (hdr_is_hbm(mei_hdr)) {
0414 ret = mei_hbm_dispatch(dev, mei_hdr);
0415 if (ret) {
0416 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
0417 ret);
0418 goto end;
0419 }
0420 goto reset_slots;
0421 }
0422
0423
0424 list_for_each_entry(cl, &dev->file_list, link) {
0425 if (mei_cl_hbm_equal(cl, mei_hdr)) {
0426 cl_dbg(dev, cl, "got a message\n");
0427 ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list);
0428 goto reset_slots;
0429 }
0430 }
0431
0432
0433
0434
0435
0436
0437
0438 if (hdr_is_fixed(mei_hdr) ||
0439 dev->dev_state == MEI_DEV_POWER_DOWN) {
0440 mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length);
0441 ret = 0;
0442 goto reset_slots;
0443 }
0444 dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
0445 ret = -EBADMSG;
0446 goto end;
0447
0448 reset_slots:
0449
0450 memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
0451 dev->rd_msg_hdr_count = 0;
0452 *slots = mei_count_full_read_slots(dev);
0453 if (*slots == -EOVERFLOW) {
0454
0455 dev_err(dev->dev, "resetting due to slots overflow.\n");
0456
0457 ret = -ERANGE;
0458 goto end;
0459 }
0460 end:
0461 return ret;
0462 }
0463 EXPORT_SYMBOL_GPL(mei_irq_read_handler);
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
0476 {
0477
0478 struct mei_cl *cl;
0479 struct mei_cl_cb *cb, *next;
0480 s32 slots;
0481 int ret;
0482
0483
0484 if (!mei_hbuf_acquire(dev))
0485 return 0;
0486
0487 slots = mei_hbuf_empty_slots(dev);
0488 if (slots < 0)
0489 return -EOVERFLOW;
0490
0491 if (slots == 0)
0492 return -EMSGSIZE;
0493
0494
0495 dev_dbg(dev->dev, "complete all waiting for write cb.\n");
0496
0497 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
0498 cl = cb->cl;
0499
0500 cl->status = 0;
0501 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
0502 cl->writing_state = MEI_WRITE_COMPLETE;
0503 list_move_tail(&cb->list, cmpl_list);
0504 }
0505
0506
0507 dev_dbg(dev->dev, "complete control write list cb.\n");
0508 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
0509 cl = cb->cl;
0510 switch (cb->fop_type) {
0511 case MEI_FOP_DISCONNECT:
0512
0513 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
0514 if (ret)
0515 return ret;
0516
0517 break;
0518 case MEI_FOP_READ:
0519
0520 ret = mei_cl_irq_read(cl, cb, cmpl_list);
0521 if (ret)
0522 return ret;
0523
0524 break;
0525 case MEI_FOP_CONNECT:
0526
0527 ret = mei_cl_irq_connect(cl, cb, cmpl_list);
0528 if (ret)
0529 return ret;
0530
0531 break;
0532 case MEI_FOP_DISCONNECT_RSP:
0533
0534 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
0535 if (ret)
0536 return ret;
0537 break;
0538
0539 case MEI_FOP_NOTIFY_START:
0540 case MEI_FOP_NOTIFY_STOP:
0541 ret = mei_cl_irq_notify(cl, cb, cmpl_list);
0542 if (ret)
0543 return ret;
0544 break;
0545 case MEI_FOP_DMA_MAP:
0546 ret = mei_cl_irq_dma_map(cl, cb, cmpl_list);
0547 if (ret)
0548 return ret;
0549 break;
0550 case MEI_FOP_DMA_UNMAP:
0551 ret = mei_cl_irq_dma_unmap(cl, cb, cmpl_list);
0552 if (ret)
0553 return ret;
0554 break;
0555 default:
0556 BUG();
0557 }
0558
0559 }
0560
0561 dev_dbg(dev->dev, "complete write list cb.\n");
0562 list_for_each_entry_safe(cb, next, &dev->write_list, list) {
0563 cl = cb->cl;
0564 ret = mei_cl_irq_write(cl, cb, cmpl_list);
0565 if (ret)
0566 return ret;
0567 }
0568 return 0;
0569 }
0570 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
0571
0572
0573
0574
0575
0576
0577
0578 static void mei_connect_timeout(struct mei_cl *cl)
0579 {
0580 struct mei_device *dev = cl->dev;
0581
0582 if (cl->state == MEI_FILE_CONNECTING) {
0583 if (dev->hbm_f_dot_supported) {
0584 cl->state = MEI_FILE_DISCONNECT_REQUIRED;
0585 wake_up(&cl->wait);
0586 return;
0587 }
0588 }
0589 mei_reset(dev);
0590 }
0591
0592 #define MEI_STALL_TIMER_FREQ (2 * HZ)
0593
0594
0595
0596
0597
0598
0599
0600 void mei_schedule_stall_timer(struct mei_device *dev)
0601 {
0602 schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ);
0603 }
0604
0605
0606
0607
0608
0609
0610
0611 void mei_timer(struct work_struct *work)
0612 {
0613 struct mei_cl *cl;
0614 struct mei_device *dev = container_of(work,
0615 struct mei_device, timer_work.work);
0616 bool reschedule_timer = false;
0617
0618 mutex_lock(&dev->device_lock);
0619
0620
0621 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
0622 dev->hbm_state != MEI_HBM_IDLE) {
0623
0624 if (dev->init_clients_timer) {
0625 if (--dev->init_clients_timer == 0) {
0626 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
0627 dev->hbm_state);
0628 mei_reset(dev);
0629 goto out;
0630 }
0631 reschedule_timer = true;
0632 }
0633 }
0634
0635 if (dev->dev_state != MEI_DEV_ENABLED)
0636 goto out;
0637
0638
0639 list_for_each_entry(cl, &dev->file_list, link) {
0640 if (cl->timer_count) {
0641 if (--cl->timer_count == 0) {
0642 dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
0643 mei_connect_timeout(cl);
0644 goto out;
0645 }
0646 reschedule_timer = true;
0647 }
0648 }
0649
0650 out:
0651 if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
0652 mei_schedule_stall_timer(dev);
0653
0654 mutex_unlock(&dev->device_lock);
0655 }