0001
0002
0003
0004
0005
0006
0007 #include <linux/sched/signal.h>
0008 #include <linux/wait.h>
0009 #include <linux/delay.h>
0010 #include <linux/slab.h>
0011 #include <linux/pm_runtime.h>
0012 #include <linux/dma-mapping.h>
0013
0014 #include <linux/mei.h>
0015
0016 #include "mei_dev.h"
0017 #include "hbm.h"
0018 #include "client.h"
0019
0020
0021
0022
0023
0024
0025 void mei_me_cl_init(struct mei_me_client *me_cl)
0026 {
0027 INIT_LIST_HEAD(&me_cl->list);
0028 kref_init(&me_cl->refcnt);
0029 }
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
0041 {
0042 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
0043 return me_cl;
0044
0045 return NULL;
0046 }
0047
0048
0049
0050
0051
0052
0053
0054
0055 static void mei_me_cl_release(struct kref *ref)
0056 {
0057 struct mei_me_client *me_cl =
0058 container_of(ref, struct mei_me_client, refcnt);
0059
0060 kfree(me_cl);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070 void mei_me_cl_put(struct mei_me_client *me_cl)
0071 {
0072 if (me_cl)
0073 kref_put(&me_cl->refcnt, mei_me_cl_release);
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
0086 {
0087 if (!me_cl)
0088 return;
0089
0090 list_del_init(&me_cl->list);
0091 mei_me_cl_put(me_cl);
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
0102 {
0103 down_write(&dev->me_clients_rwsem);
0104 __mei_me_cl_del(dev, me_cl);
0105 up_write(&dev->me_clients_rwsem);
0106 }
0107
0108
0109
0110
0111
0112
0113
0114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
0115 {
0116 down_write(&dev->me_clients_rwsem);
0117 list_add(&me_cl->list, &dev->me_clients);
0118 up_write(&dev->me_clients_rwsem);
0119 }
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
0133 const uuid_le *uuid)
0134 {
0135 struct mei_me_client *me_cl;
0136 const uuid_le *pn;
0137
0138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
0139
0140 list_for_each_entry(me_cl, &dev->me_clients, list) {
0141 pn = &me_cl->props.protocol_name;
0142 if (uuid_le_cmp(*uuid, *pn) == 0)
0143 return mei_me_cl_get(me_cl);
0144 }
0145
0146 return NULL;
0147 }
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
0161 const uuid_le *uuid)
0162 {
0163 struct mei_me_client *me_cl;
0164
0165 down_read(&dev->me_clients_rwsem);
0166 me_cl = __mei_me_cl_by_uuid(dev, uuid);
0167 up_read(&dev->me_clients_rwsem);
0168
0169 return me_cl;
0170 }
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
0184 {
0185
0186 struct mei_me_client *__me_cl, *me_cl = NULL;
0187
0188 down_read(&dev->me_clients_rwsem);
0189 list_for_each_entry(__me_cl, &dev->me_clients, list) {
0190 if (__me_cl->client_id == client_id) {
0191 me_cl = mei_me_cl_get(__me_cl);
0192 break;
0193 }
0194 }
0195 up_read(&dev->me_clients_rwsem);
0196
0197 return me_cl;
0198 }
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
0213 const uuid_le *uuid, u8 client_id)
0214 {
0215 struct mei_me_client *me_cl;
0216 const uuid_le *pn;
0217
0218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
0219
0220 list_for_each_entry(me_cl, &dev->me_clients, list) {
0221 pn = &me_cl->props.protocol_name;
0222 if (uuid_le_cmp(*uuid, *pn) == 0 &&
0223 me_cl->client_id == client_id)
0224 return mei_me_cl_get(me_cl);
0225 }
0226
0227 return NULL;
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
0242 const uuid_le *uuid, u8 client_id)
0243 {
0244 struct mei_me_client *me_cl;
0245
0246 down_read(&dev->me_clients_rwsem);
0247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
0248 up_read(&dev->me_clients_rwsem);
0249
0250 return me_cl;
0251 }
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
0262 {
0263 struct mei_me_client *me_cl;
0264
0265 dev_dbg(dev->dev, "remove %pUl\n", uuid);
0266
0267 down_write(&dev->me_clients_rwsem);
0268 me_cl = __mei_me_cl_by_uuid(dev, uuid);
0269 __mei_me_cl_del(dev, me_cl);
0270 mei_me_cl_put(me_cl);
0271 up_write(&dev->me_clients_rwsem);
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
0284 {
0285 struct mei_me_client *me_cl;
0286
0287 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
0288
0289 down_write(&dev->me_clients_rwsem);
0290 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
0291 __mei_me_cl_del(dev, me_cl);
0292 mei_me_cl_put(me_cl);
0293 up_write(&dev->me_clients_rwsem);
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303 void mei_me_cl_rm_all(struct mei_device *dev)
0304 {
0305 struct mei_me_client *me_cl, *next;
0306
0307 down_write(&dev->me_clients_rwsem);
0308 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
0309 __mei_me_cl_del(dev, me_cl);
0310 up_write(&dev->me_clients_rwsem);
0311 }
0312
0313
0314
0315
0316
0317
0318 void mei_io_cb_free(struct mei_cl_cb *cb)
0319 {
0320 if (cb == NULL)
0321 return;
0322
0323 list_del(&cb->list);
0324 kfree(cb->buf.data);
0325 kfree(cb);
0326 }
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
0337 struct list_head *head)
0338 {
0339 list_add_tail(&cb->list, head);
0340 cb->cl->tx_cb_queued++;
0341 }
0342
0343
0344
0345
0346
0347
0348
0349
0350 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
0351 {
0352 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
0353 cb->cl->tx_cb_queued--;
0354
0355 mei_io_cb_free(cb);
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
0367 const struct file *fp)
0368 {
0369 struct mei_cl_vtag *cl_vtag;
0370
0371 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
0372 if (cl_vtag->fp == fp) {
0373 cl_vtag->pending_read = true;
0374 return;
0375 }
0376 }
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
0389 enum mei_cb_file_ops type,
0390 const struct file *fp)
0391 {
0392 struct mei_cl_cb *cb;
0393
0394 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
0395 if (!cb)
0396 return NULL;
0397
0398 INIT_LIST_HEAD(&cb->list);
0399 cb->fp = fp;
0400 cb->cl = cl;
0401 cb->buf_idx = 0;
0402 cb->fop_type = type;
0403 cb->vtag = 0;
0404
0405 return cb;
0406 }
0407
0408
0409
0410
0411
0412
0413
0414 static void mei_io_list_flush_cl(struct list_head *head,
0415 const struct mei_cl *cl)
0416 {
0417 struct mei_cl_cb *cb, *next;
0418
0419 list_for_each_entry_safe(cb, next, head, list) {
0420 if (cl == cb->cl) {
0421 list_del_init(&cb->list);
0422 if (cb->fop_type == MEI_FOP_READ)
0423 mei_io_cb_free(cb);
0424 }
0425 }
0426 }
0427
0428
0429
0430
0431
0432
0433
0434
0435 static void mei_io_tx_list_free_cl(struct list_head *head,
0436 const struct mei_cl *cl,
0437 const struct file *fp)
0438 {
0439 struct mei_cl_cb *cb, *next;
0440
0441 list_for_each_entry_safe(cb, next, head, list) {
0442 if (cl == cb->cl && (!fp || fp == cb->fp))
0443 mei_tx_cb_dequeue(cb);
0444 }
0445 }
0446
0447
0448
0449
0450
0451
0452
0453 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
0454 {
0455 struct mei_cl_cb *cb, *next;
0456
0457 list_for_each_entry_safe(cb, next, head, list)
0458 if (!fp || fp == cb->fp)
0459 mei_io_cb_free(cb);
0460 }
0461
0462
0463
0464
0465
0466
0467 static void mei_cl_free_pending(struct mei_cl *cl)
0468 {
0469 struct mei_cl_cb *cb;
0470
0471 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
0472 mei_io_cb_free(cb);
0473 }
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
0486 enum mei_cb_file_ops fop_type,
0487 const struct file *fp)
0488 {
0489 struct mei_cl_cb *cb;
0490
0491 cb = mei_io_cb_init(cl, fop_type, fp);
0492 if (!cb)
0493 return NULL;
0494
0495 if (length == 0)
0496 return cb;
0497
0498 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
0499 if (!cb->buf.data) {
0500 mei_io_cb_free(cb);
0501 return NULL;
0502 }
0503 cb->buf.size = length;
0504
0505 return cb;
0506 }
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
0521 enum mei_cb_file_ops fop_type,
0522 const struct file *fp)
0523 {
0524 struct mei_cl_cb *cb;
0525
0526
0527 if (length)
0528 length = max_t(size_t, length, mei_cl_mtu(cl));
0529
0530 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
0531 if (!cb)
0532 return NULL;
0533
0534 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
0535 return cb;
0536 }
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
0548 {
0549 struct mei_cl_cb *cb;
0550 struct mei_cl_cb *ret_cb = NULL;
0551
0552 spin_lock(&cl->rd_completed_lock);
0553 list_for_each_entry(cb, &cl->rd_completed, list)
0554 if (!fp || fp == cb->fp) {
0555 ret_cb = cb;
0556 break;
0557 }
0558 spin_unlock(&cl->rd_completed_lock);
0559 return ret_cb;
0560 }
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
0571 {
0572 struct mei_device *dev;
0573
0574 if (WARN_ON(!cl || !cl->dev))
0575 return -EINVAL;
0576
0577 dev = cl->dev;
0578
0579 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
0580 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
0581 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
0582
0583 if (!fp) {
0584 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
0585 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
0586 mei_cl_free_pending(cl);
0587 }
0588 spin_lock(&cl->rd_completed_lock);
0589 mei_io_list_free_fp(&cl->rd_completed, fp);
0590 spin_unlock(&cl->rd_completed_lock);
0591
0592 return 0;
0593 }
0594
0595
0596
0597
0598
0599
0600
0601 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
0602 {
0603 memset(cl, 0, sizeof(*cl));
0604 init_waitqueue_head(&cl->wait);
0605 init_waitqueue_head(&cl->rx_wait);
0606 init_waitqueue_head(&cl->tx_wait);
0607 init_waitqueue_head(&cl->ev_wait);
0608 INIT_LIST_HEAD(&cl->vtag_map);
0609 spin_lock_init(&cl->rd_completed_lock);
0610 INIT_LIST_HEAD(&cl->rd_completed);
0611 INIT_LIST_HEAD(&cl->rd_pending);
0612 INIT_LIST_HEAD(&cl->link);
0613 cl->writing_state = MEI_IDLE;
0614 cl->state = MEI_FILE_UNINITIALIZED;
0615 cl->dev = dev;
0616 }
0617
0618
0619
0620
0621
0622
0623
0624 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
0625 {
0626 struct mei_cl *cl;
0627
0628 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
0629 if (!cl)
0630 return NULL;
0631
0632 mei_cl_init(cl, dev);
0633
0634 return cl;
0635 }
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 int mei_cl_link(struct mei_cl *cl)
0647 {
0648 struct mei_device *dev;
0649 int id;
0650
0651 if (WARN_ON(!cl || !cl->dev))
0652 return -EINVAL;
0653
0654 dev = cl->dev;
0655
0656 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
0657 if (id >= MEI_CLIENTS_MAX) {
0658 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
0659 return -EMFILE;
0660 }
0661
0662 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
0663 dev_err(dev->dev, "open_handle_count exceeded %d",
0664 MEI_MAX_OPEN_HANDLE_COUNT);
0665 return -EMFILE;
0666 }
0667
0668 dev->open_handle_count++;
0669
0670 cl->host_client_id = id;
0671 list_add_tail(&cl->link, &dev->file_list);
0672
0673 set_bit(id, dev->host_clients_map);
0674
0675 cl->state = MEI_FILE_INITIALIZING;
0676
0677 cl_dbg(dev, cl, "link cl\n");
0678 return 0;
0679 }
0680
0681
0682
0683
0684
0685
0686
0687
0688 int mei_cl_unlink(struct mei_cl *cl)
0689 {
0690 struct mei_device *dev;
0691
0692
0693 if (!cl)
0694 return 0;
0695
0696 if (WARN_ON(!cl->dev))
0697 return 0;
0698
0699 dev = cl->dev;
0700
0701 cl_dbg(dev, cl, "unlink client");
0702
0703 if (cl->state == MEI_FILE_UNINITIALIZED)
0704 return 0;
0705
0706 if (dev->open_handle_count > 0)
0707 dev->open_handle_count--;
0708
0709
0710 if (cl->host_client_id)
0711 clear_bit(cl->host_client_id, dev->host_clients_map);
0712
0713 list_del_init(&cl->link);
0714
0715 cl->state = MEI_FILE_UNINITIALIZED;
0716 cl->writing_state = MEI_IDLE;
0717
0718 WARN_ON(!list_empty(&cl->rd_completed) ||
0719 !list_empty(&cl->rd_pending) ||
0720 !list_empty(&cl->link));
0721
0722 return 0;
0723 }
0724
0725 void mei_host_client_init(struct mei_device *dev)
0726 {
0727 mei_set_devstate(dev, MEI_DEV_ENABLED);
0728 dev->reset_count = 0;
0729
0730 schedule_work(&dev->bus_rescan_work);
0731
0732 pm_runtime_mark_last_busy(dev->dev);
0733 dev_dbg(dev->dev, "rpm: autosuspend\n");
0734 pm_request_autosuspend(dev->dev);
0735 }
0736
0737
0738
0739
0740
0741
0742
0743 bool mei_hbuf_acquire(struct mei_device *dev)
0744 {
0745 if (mei_pg_state(dev) == MEI_PG_ON ||
0746 mei_pg_in_transition(dev)) {
0747 dev_dbg(dev->dev, "device is in pg\n");
0748 return false;
0749 }
0750
0751 if (!dev->hbuf_is_ready) {
0752 dev_dbg(dev->dev, "hbuf is not ready\n");
0753 return false;
0754 }
0755
0756 dev->hbuf_is_ready = false;
0757
0758 return true;
0759 }
0760
0761
0762
0763
0764
0765
0766
0767 static void mei_cl_wake_all(struct mei_cl *cl)
0768 {
0769 struct mei_device *dev = cl->dev;
0770
0771
0772 if (waitqueue_active(&cl->rx_wait)) {
0773 cl_dbg(dev, cl, "Waking up reading client!\n");
0774 wake_up_interruptible(&cl->rx_wait);
0775 }
0776
0777 if (waitqueue_active(&cl->tx_wait)) {
0778 cl_dbg(dev, cl, "Waking up writing client!\n");
0779 wake_up_interruptible(&cl->tx_wait);
0780 }
0781
0782 if (waitqueue_active(&cl->ev_wait)) {
0783 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
0784 wake_up_interruptible(&cl->ev_wait);
0785 }
0786
0787 if (waitqueue_active(&cl->wait)) {
0788 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
0789 wake_up(&cl->wait);
0790 }
0791 }
0792
0793
0794
0795
0796
0797
0798
0799 static void mei_cl_set_disconnected(struct mei_cl *cl)
0800 {
0801 struct mei_device *dev = cl->dev;
0802
0803 if (cl->state == MEI_FILE_DISCONNECTED ||
0804 cl->state <= MEI_FILE_INITIALIZING)
0805 return;
0806
0807 cl->state = MEI_FILE_DISCONNECTED;
0808 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
0809 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
0810 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
0811 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
0812 mei_cl_wake_all(cl);
0813 cl->rx_flow_ctrl_creds = 0;
0814 cl->tx_flow_ctrl_creds = 0;
0815 cl->timer_count = 0;
0816
0817 if (!cl->me_cl)
0818 return;
0819
0820 if (!WARN_ON(cl->me_cl->connect_count == 0))
0821 cl->me_cl->connect_count--;
0822
0823 if (cl->me_cl->connect_count == 0)
0824 cl->me_cl->tx_flow_ctrl_creds = 0;
0825
0826 mei_me_cl_put(cl->me_cl);
0827 cl->me_cl = NULL;
0828 }
0829
0830 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
0831 {
0832 if (!mei_me_cl_get(me_cl))
0833 return -ENOENT;
0834
0835
0836 if (me_cl->props.fixed_address) {
0837 if (me_cl->connect_count) {
0838 mei_me_cl_put(me_cl);
0839 return -EBUSY;
0840 }
0841 }
0842
0843 cl->me_cl = me_cl;
0844 cl->state = MEI_FILE_CONNECTING;
0845 cl->me_cl->connect_count++;
0846
0847 return 0;
0848 }
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
0859 {
0860 struct mei_device *dev;
0861 int ret;
0862
0863 dev = cl->dev;
0864
0865 ret = mei_hbm_cl_disconnect_req(dev, cl);
0866 cl->status = ret;
0867 if (ret) {
0868 cl->state = MEI_FILE_DISCONNECT_REPLY;
0869 return ret;
0870 }
0871
0872 list_move_tail(&cb->list, &dev->ctrl_rd_list);
0873 cl->timer_count = MEI_CONNECT_TIMEOUT;
0874 mei_schedule_stall_timer(dev);
0875
0876 return 0;
0877 }
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
0890 struct list_head *cmpl_list)
0891 {
0892 struct mei_device *dev = cl->dev;
0893 u32 msg_slots;
0894 int slots;
0895 int ret;
0896
0897 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
0898 slots = mei_hbuf_empty_slots(dev);
0899 if (slots < 0)
0900 return -EOVERFLOW;
0901
0902 if ((u32)slots < msg_slots)
0903 return -EMSGSIZE;
0904
0905 ret = mei_cl_send_disconnect(cl, cb);
0906 if (ret)
0907 list_move_tail(&cb->list, cmpl_list);
0908
0909 return ret;
0910 }
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920 static int __mei_cl_disconnect(struct mei_cl *cl)
0921 {
0922 struct mei_device *dev;
0923 struct mei_cl_cb *cb;
0924 int rets;
0925
0926 dev = cl->dev;
0927
0928 cl->state = MEI_FILE_DISCONNECTING;
0929
0930 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
0931 if (!cb) {
0932 rets = -ENOMEM;
0933 goto out;
0934 }
0935
0936 if (mei_hbuf_acquire(dev)) {
0937 rets = mei_cl_send_disconnect(cl, cb);
0938 if (rets) {
0939 cl_err(dev, cl, "failed to disconnect.\n");
0940 goto out;
0941 }
0942 }
0943
0944 mutex_unlock(&dev->device_lock);
0945 wait_event_timeout(cl->wait,
0946 cl->state == MEI_FILE_DISCONNECT_REPLY ||
0947 cl->state == MEI_FILE_DISCONNECTED,
0948 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
0949 mutex_lock(&dev->device_lock);
0950
0951 rets = cl->status;
0952 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
0953 cl->state != MEI_FILE_DISCONNECTED) {
0954 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
0955 rets = -ETIME;
0956 }
0957
0958 out:
0959
0960 mei_cl_set_disconnected(cl);
0961 if (!rets)
0962 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
0963
0964 mei_io_cb_free(cb);
0965 return rets;
0966 }
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 int mei_cl_disconnect(struct mei_cl *cl)
0978 {
0979 struct mei_device *dev;
0980 int rets;
0981
0982 if (WARN_ON(!cl || !cl->dev))
0983 return -ENODEV;
0984
0985 dev = cl->dev;
0986
0987 cl_dbg(dev, cl, "disconnecting");
0988
0989 if (!mei_cl_is_connected(cl))
0990 return 0;
0991
0992 if (mei_cl_is_fixed_address(cl)) {
0993 mei_cl_set_disconnected(cl);
0994 return 0;
0995 }
0996
0997 if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
0998 dev->dev_state == MEI_DEV_POWER_DOWN) {
0999 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
1000 mei_cl_set_disconnected(cl);
1001 return 0;
1002 }
1003
1004 rets = pm_runtime_get(dev->dev);
1005 if (rets < 0 && rets != -EINPROGRESS) {
1006 pm_runtime_put_noidle(dev->dev);
1007 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1008 return rets;
1009 }
1010
1011 rets = __mei_cl_disconnect(cl);
1012
1013 cl_dbg(dev, cl, "rpm: autosuspend\n");
1014 pm_runtime_mark_last_busy(dev->dev);
1015 pm_runtime_put_autosuspend(dev->dev);
1016
1017 return rets;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
1030 {
1031 struct mei_device *dev;
1032 struct mei_cl_cb *cb;
1033
1034 dev = cl->dev;
1035
1036 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
1037 if (cb->fop_type == MEI_FOP_CONNECT &&
1038 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1039 return true;
1040 }
1041
1042 return false;
1043 }
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1054 {
1055 struct mei_device *dev;
1056 int ret;
1057
1058 dev = cl->dev;
1059
1060 ret = mei_hbm_cl_connect_req(dev, cl);
1061 cl->status = ret;
1062 if (ret) {
1063 cl->state = MEI_FILE_DISCONNECT_REPLY;
1064 return ret;
1065 }
1066
1067 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1068 cl->timer_count = MEI_CONNECT_TIMEOUT;
1069 mei_schedule_stall_timer(dev);
1070 return 0;
1071 }
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1083 struct list_head *cmpl_list)
1084 {
1085 struct mei_device *dev = cl->dev;
1086 u32 msg_slots;
1087 int slots;
1088 int rets;
1089
1090 if (mei_cl_is_other_connecting(cl))
1091 return 0;
1092
1093 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1094 slots = mei_hbuf_empty_slots(dev);
1095 if (slots < 0)
1096 return -EOVERFLOW;
1097
1098 if ((u32)slots < msg_slots)
1099 return -EMSGSIZE;
1100
1101 rets = mei_cl_send_connect(cl, cb);
1102 if (rets)
1103 list_move_tail(&cb->list, cmpl_list);
1104
1105 return rets;
1106 }
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1120 const struct file *fp)
1121 {
1122 struct mei_device *dev;
1123 struct mei_cl_cb *cb;
1124 int rets;
1125
1126 if (WARN_ON(!cl || !cl->dev || !me_cl))
1127 return -ENODEV;
1128
1129 dev = cl->dev;
1130
1131 rets = mei_cl_set_connecting(cl, me_cl);
1132 if (rets)
1133 goto nortpm;
1134
1135 if (mei_cl_is_fixed_address(cl)) {
1136 cl->state = MEI_FILE_CONNECTED;
1137 rets = 0;
1138 goto nortpm;
1139 }
1140
1141 rets = pm_runtime_get(dev->dev);
1142 if (rets < 0 && rets != -EINPROGRESS) {
1143 pm_runtime_put_noidle(dev->dev);
1144 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1145 goto nortpm;
1146 }
1147
1148 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1149 if (!cb) {
1150 rets = -ENOMEM;
1151 goto out;
1152 }
1153
1154
1155 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1156 rets = mei_cl_send_connect(cl, cb);
1157 if (rets)
1158 goto out;
1159 }
1160
1161 mutex_unlock(&dev->device_lock);
1162 wait_event_timeout(cl->wait,
1163 (cl->state == MEI_FILE_CONNECTED ||
1164 cl->state == MEI_FILE_DISCONNECTED ||
1165 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1166 cl->state == MEI_FILE_DISCONNECT_REPLY),
1167 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1168 mutex_lock(&dev->device_lock);
1169
1170 if (!mei_cl_is_connected(cl)) {
1171 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1172 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1173 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1174
1175
1176
1177 __mei_cl_disconnect(cl);
1178 rets = -EFAULT;
1179 goto out;
1180 }
1181
1182
1183 if (!cl->status)
1184 cl->status = -EFAULT;
1185 }
1186
1187 rets = cl->status;
1188 out:
1189 cl_dbg(dev, cl, "rpm: autosuspend\n");
1190 pm_runtime_mark_last_busy(dev->dev);
1191 pm_runtime_put_autosuspend(dev->dev);
1192
1193 mei_io_cb_free(cb);
1194
1195 nortpm:
1196 if (!mei_cl_is_connected(cl))
1197 mei_cl_set_disconnected(cl);
1198
1199 return rets;
1200 }
1201
1202
1203
1204
1205
1206
1207
1208
1209 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1210 {
1211 struct mei_cl *cl;
1212 int ret;
1213
1214 cl = mei_cl_allocate(dev);
1215 if (!cl) {
1216 ret = -ENOMEM;
1217 goto err;
1218 }
1219
1220 ret = mei_cl_link(cl);
1221 if (ret)
1222 goto err;
1223
1224 return cl;
1225 err:
1226 kfree(cl);
1227 return ERR_PTR(ret);
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1238 {
1239 if (WARN_ON(!cl || !cl->me_cl))
1240 return -EINVAL;
1241
1242 if (cl->tx_flow_ctrl_creds > 0)
1243 return 1;
1244
1245 if (mei_cl_is_fixed_address(cl))
1246 return 1;
1247
1248 if (mei_cl_is_single_recv_buf(cl)) {
1249 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1250 return 1;
1251 }
1252 return 0;
1253 }
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1266 {
1267 if (WARN_ON(!cl || !cl->me_cl))
1268 return -EINVAL;
1269
1270 if (mei_cl_is_fixed_address(cl))
1271 return 0;
1272
1273 if (mei_cl_is_single_recv_buf(cl)) {
1274 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1275 return -EINVAL;
1276 cl->me_cl->tx_flow_ctrl_creds--;
1277 } else {
1278 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1279 return -EINVAL;
1280 cl->tx_flow_ctrl_creds--;
1281 }
1282 return 0;
1283 }
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1296 {
1297 struct mei_cl_vtag *cl_vtag;
1298
1299 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1300 if (!cl_vtag)
1301 return ERR_PTR(-ENOMEM);
1302
1303 INIT_LIST_HEAD(&cl_vtag->list);
1304 cl_vtag->vtag = vtag;
1305 cl_vtag->fp = fp;
1306
1307 return cl_vtag;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1321 {
1322 struct mei_cl_vtag *vtag_l;
1323
1324 list_for_each_entry(vtag_l, &cl->vtag_map, list)
1325
1326 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
1327 vtag_l->vtag == vtag)
1328 return vtag_l->fp;
1329
1330 return ERR_PTR(-ENOENT);
1331 }
1332
1333
1334
1335
1336
1337
1338
1339 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1340 {
1341 struct mei_cl_vtag *vtag_l;
1342
1343 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1344 if (vtag_l->vtag == vtag) {
1345 vtag_l->pending_read = false;
1346 break;
1347 }
1348 }
1349 }
1350
1351
1352
1353
1354
1355
1356
1357 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1358 {
1359 struct mei_cl_vtag *cl_vtag;
1360
1361 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1362 if (cl_vtag->pending_read) {
1363 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1364 mei_cl_mtu(cl),
1365 MEI_FOP_READ,
1366 cl_vtag->fp))
1367 cl->rx_flow_ctrl_creds++;
1368 break;
1369 }
1370 }
1371 }
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 int mei_cl_vt_support_check(const struct mei_cl *cl)
1383 {
1384 struct mei_device *dev = cl->dev;
1385
1386 if (!dev->hbm_f_vt_supported)
1387 return -EOPNOTSUPP;
1388
1389 if (!cl->me_cl)
1390 return 0;
1391
1392 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1393 }
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1404 {
1405 const struct file *fp;
1406
1407 if (!mei_cl_vt_support_check(cl)) {
1408 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1409 if (IS_ERR(fp)) {
1410
1411 mei_io_cb_free(cb);
1412 return;
1413 }
1414 cb->fp = fp;
1415 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1416 mei_cl_read_vtag_add_fc(cl);
1417 }
1418
1419 spin_lock(&cl->rd_completed_lock);
1420 list_add_tail(&cb->list, &cl->rd_completed);
1421 spin_unlock(&cl->rd_completed_lock);
1422 }
1423
1424
1425
1426
1427
1428
1429
1430
1431 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1432 {
1433 spin_lock(&cl->rd_completed_lock);
1434 mei_io_cb_free(cb);
1435 spin_unlock(&cl->rd_completed_lock);
1436 }
1437
1438
1439
1440
1441
1442
1443
1444
1445 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1446 {
1447 if (fop == MEI_FOP_NOTIFY_START)
1448 return MEI_HBM_NOTIFICATION_START;
1449 else
1450 return MEI_HBM_NOTIFICATION_STOP;
1451 }
1452
1453
1454
1455
1456
1457
1458
1459
1460 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1461 {
1462 if (req == MEI_HBM_NOTIFICATION_START)
1463 return MEI_FOP_NOTIFY_START;
1464 else
1465 return MEI_FOP_NOTIFY_STOP;
1466 }
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1478 struct list_head *cmpl_list)
1479 {
1480 struct mei_device *dev = cl->dev;
1481 u32 msg_slots;
1482 int slots;
1483 int ret;
1484 bool request;
1485
1486 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1487 slots = mei_hbuf_empty_slots(dev);
1488 if (slots < 0)
1489 return -EOVERFLOW;
1490
1491 if ((u32)slots < msg_slots)
1492 return -EMSGSIZE;
1493
1494 request = mei_cl_notify_fop2req(cb->fop_type);
1495 ret = mei_hbm_cl_notify_req(dev, cl, request);
1496 if (ret) {
1497 cl->status = ret;
1498 list_move_tail(&cb->list, cmpl_list);
1499 return ret;
1500 }
1501
1502 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1503 return 0;
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 int mei_cl_notify_request(struct mei_cl *cl,
1518 const struct file *fp, u8 request)
1519 {
1520 struct mei_device *dev;
1521 struct mei_cl_cb *cb;
1522 enum mei_cb_file_ops fop_type;
1523 int rets;
1524
1525 if (WARN_ON(!cl || !cl->dev))
1526 return -ENODEV;
1527
1528 dev = cl->dev;
1529
1530 if (!dev->hbm_f_ev_supported) {
1531 cl_dbg(dev, cl, "notifications not supported\n");
1532 return -EOPNOTSUPP;
1533 }
1534
1535 if (!mei_cl_is_connected(cl))
1536 return -ENODEV;
1537
1538 rets = pm_runtime_get(dev->dev);
1539 if (rets < 0 && rets != -EINPROGRESS) {
1540 pm_runtime_put_noidle(dev->dev);
1541 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1542 return rets;
1543 }
1544
1545 fop_type = mei_cl_notify_req2fop(request);
1546 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1547 if (!cb) {
1548 rets = -ENOMEM;
1549 goto out;
1550 }
1551
1552 if (mei_hbuf_acquire(dev)) {
1553 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1554 rets = -ENODEV;
1555 goto out;
1556 }
1557 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1558 }
1559
1560 mutex_unlock(&dev->device_lock);
1561 wait_event_timeout(cl->wait,
1562 cl->notify_en == request ||
1563 cl->status ||
1564 !mei_cl_is_connected(cl),
1565 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1566 mutex_lock(&dev->device_lock);
1567
1568 if (cl->notify_en != request && !cl->status)
1569 cl->status = -EFAULT;
1570
1571 rets = cl->status;
1572
1573 out:
1574 cl_dbg(dev, cl, "rpm: autosuspend\n");
1575 pm_runtime_mark_last_busy(dev->dev);
1576 pm_runtime_put_autosuspend(dev->dev);
1577
1578 mei_io_cb_free(cb);
1579 return rets;
1580 }
1581
1582
1583
1584
1585
1586
1587
1588
1589 void mei_cl_notify(struct mei_cl *cl)
1590 {
1591 struct mei_device *dev;
1592
1593 if (!cl || !cl->dev)
1594 return;
1595
1596 dev = cl->dev;
1597
1598 if (!cl->notify_en)
1599 return;
1600
1601 cl_dbg(dev, cl, "notify event");
1602 cl->notify_ev = true;
1603 if (!mei_cl_bus_notify_event(cl))
1604 wake_up_interruptible(&cl->ev_wait);
1605
1606 if (cl->ev_async)
1607 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1608
1609 }
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1623 {
1624 struct mei_device *dev;
1625 int rets;
1626
1627 *notify_ev = false;
1628
1629 if (WARN_ON(!cl || !cl->dev))
1630 return -ENODEV;
1631
1632 dev = cl->dev;
1633
1634 if (!dev->hbm_f_ev_supported) {
1635 cl_dbg(dev, cl, "notifications not supported\n");
1636 return -EOPNOTSUPP;
1637 }
1638
1639 if (!mei_cl_is_connected(cl))
1640 return -ENODEV;
1641
1642 if (cl->notify_ev)
1643 goto out;
1644
1645 if (!block)
1646 return -EAGAIN;
1647
1648 mutex_unlock(&dev->device_lock);
1649 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1650 mutex_lock(&dev->device_lock);
1651
1652 if (rets < 0)
1653 return rets;
1654
1655 out:
1656 *notify_ev = cl->notify_ev;
1657 cl->notify_ev = false;
1658 return 0;
1659 }
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1671 {
1672 struct mei_device *dev;
1673 struct mei_cl_cb *cb;
1674 int rets;
1675
1676 if (WARN_ON(!cl || !cl->dev))
1677 return -ENODEV;
1678
1679 dev = cl->dev;
1680
1681 if (!mei_cl_is_connected(cl))
1682 return -ENODEV;
1683
1684 if (!mei_me_cl_is_active(cl->me_cl)) {
1685 cl_err(dev, cl, "no such me client\n");
1686 return -ENOTTY;
1687 }
1688
1689 if (mei_cl_is_fixed_address(cl))
1690 return 0;
1691
1692
1693 if (cl->rx_flow_ctrl_creds) {
1694 mei_cl_set_read_by_fp(cl, fp);
1695 return -EBUSY;
1696 }
1697
1698 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1699 if (!cb)
1700 return -ENOMEM;
1701
1702 mei_cl_set_read_by_fp(cl, fp);
1703
1704 rets = pm_runtime_get(dev->dev);
1705 if (rets < 0 && rets != -EINPROGRESS) {
1706 pm_runtime_put_noidle(dev->dev);
1707 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1708 goto nortpm;
1709 }
1710
1711 rets = 0;
1712 if (mei_hbuf_acquire(dev)) {
1713 rets = mei_hbm_cl_flow_control_req(dev, cl);
1714 if (rets < 0)
1715 goto out;
1716
1717 list_move_tail(&cb->list, &cl->rd_pending);
1718 }
1719 cl->rx_flow_ctrl_creds++;
1720
1721 out:
1722 cl_dbg(dev, cl, "rpm: autosuspend\n");
1723 pm_runtime_mark_last_busy(dev->dev);
1724 pm_runtime_put_autosuspend(dev->dev);
1725 nortpm:
1726 if (rets)
1727 mei_io_cb_free(cb);
1728
1729 return rets;
1730 }
1731
1732 static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
1733 {
1734 struct mei_ext_hdr_vtag *vtag_hdr = ext;
1735
1736 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
1737 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
1738 vtag_hdr->vtag = vtag;
1739 vtag_hdr->reserved = 0;
1740 return vtag_hdr->hdr.length;
1741 }
1742
1743
1744
1745
1746
1747
1748
1749
1750 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1751 {
1752 size_t hdr_len;
1753 struct mei_ext_meta_hdr *meta;
1754 struct mei_msg_hdr *mei_hdr;
1755 bool is_ext, is_vtag;
1756
1757 if (!cb)
1758 return ERR_PTR(-EINVAL);
1759
1760
1761 is_vtag = (cb->vtag && cb->buf_idx == 0);
1762 is_ext = is_vtag;
1763
1764
1765 hdr_len = sizeof(*mei_hdr);
1766
1767 if (!is_ext)
1768 goto setup_hdr;
1769
1770 hdr_len += sizeof(*meta);
1771 if (is_vtag)
1772 hdr_len += sizeof(struct mei_ext_hdr_vtag);
1773
1774 setup_hdr:
1775 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1776 if (!mei_hdr)
1777 return ERR_PTR(-ENOMEM);
1778
1779 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1780 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1781 mei_hdr->internal = cb->internal;
1782 mei_hdr->extended = is_ext;
1783
1784 if (!is_ext)
1785 goto out;
1786
1787 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1788 if (is_vtag) {
1789 meta->count++;
1790 meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
1791 }
1792 out:
1793 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1794 return mei_hdr;
1795 }
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1808 struct list_head *cmpl_list)
1809 {
1810 struct mei_device *dev;
1811 struct mei_msg_data *buf;
1812 struct mei_msg_hdr *mei_hdr = NULL;
1813 size_t hdr_len;
1814 size_t hbuf_len, dr_len;
1815 size_t buf_len;
1816 size_t data_len;
1817 int hbuf_slots;
1818 u32 dr_slots;
1819 u32 dma_len;
1820 int rets;
1821 bool first_chunk;
1822 const void *data;
1823
1824 if (WARN_ON(!cl || !cl->dev))
1825 return -ENODEV;
1826
1827 dev = cl->dev;
1828
1829 buf = &cb->buf;
1830
1831 first_chunk = cb->buf_idx == 0;
1832
1833 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1834 if (rets < 0)
1835 goto err;
1836
1837 if (rets == 0) {
1838 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1839 return 0;
1840 }
1841
1842 buf_len = buf->size - cb->buf_idx;
1843 data = buf->data + cb->buf_idx;
1844 hbuf_slots = mei_hbuf_empty_slots(dev);
1845 if (hbuf_slots < 0) {
1846 rets = -EOVERFLOW;
1847 goto err;
1848 }
1849
1850 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1851 dr_slots = mei_dma_ring_empty_slots(dev);
1852 dr_len = mei_slots2data(dr_slots);
1853
1854 mei_hdr = mei_msg_hdr_init(cb);
1855 if (IS_ERR(mei_hdr)) {
1856 rets = PTR_ERR(mei_hdr);
1857 mei_hdr = NULL;
1858 goto err;
1859 }
1860
1861 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1862 mei_hdr->extended, cb->vtag);
1863
1864 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1865
1866
1867
1868
1869
1870 if (hdr_len + buf_len <= hbuf_len) {
1871 data_len = buf_len;
1872 mei_hdr->msg_complete = 1;
1873 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1874 mei_hdr->dma_ring = 1;
1875 if (buf_len > dr_len)
1876 buf_len = dr_len;
1877 else
1878 mei_hdr->msg_complete = 1;
1879
1880 data_len = sizeof(dma_len);
1881 dma_len = buf_len;
1882 data = &dma_len;
1883 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1884 buf_len = hbuf_len - hdr_len;
1885 data_len = buf_len;
1886 } else {
1887 kfree(mei_hdr);
1888 return 0;
1889 }
1890 mei_hdr->length += data_len;
1891
1892 if (mei_hdr->dma_ring)
1893 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1894 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1895
1896 if (rets)
1897 goto err;
1898
1899 cl->status = 0;
1900 cl->writing_state = MEI_WRITING;
1901 cb->buf_idx += buf_len;
1902
1903 if (first_chunk) {
1904 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1905 rets = -EIO;
1906 goto err;
1907 }
1908 }
1909
1910 if (mei_hdr->msg_complete)
1911 list_move_tail(&cb->list, &dev->write_waiting_list);
1912
1913 kfree(mei_hdr);
1914 return 0;
1915
1916 err:
1917 kfree(mei_hdr);
1918 cl->status = rets;
1919 list_move_tail(&cb->list, cmpl_list);
1920 return rets;
1921 }
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1933 {
1934 struct mei_device *dev;
1935 struct mei_msg_data *buf;
1936 struct mei_msg_hdr *mei_hdr = NULL;
1937 size_t hdr_len;
1938 size_t hbuf_len, dr_len;
1939 size_t buf_len;
1940 size_t data_len;
1941 int hbuf_slots;
1942 u32 dr_slots;
1943 u32 dma_len;
1944 ssize_t rets;
1945 bool blocking;
1946 const void *data;
1947
1948 if (WARN_ON(!cl || !cl->dev))
1949 return -ENODEV;
1950
1951 if (WARN_ON(!cb))
1952 return -EINVAL;
1953
1954 dev = cl->dev;
1955
1956 buf = &cb->buf;
1957 buf_len = buf->size;
1958
1959 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1960
1961 blocking = cb->blocking;
1962 data = buf->data;
1963
1964 rets = pm_runtime_get(dev->dev);
1965 if (rets < 0 && rets != -EINPROGRESS) {
1966 pm_runtime_put_noidle(dev->dev);
1967 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1968 goto free;
1969 }
1970
1971 cb->buf_idx = 0;
1972 cl->writing_state = MEI_IDLE;
1973
1974
1975 rets = mei_cl_tx_flow_ctrl_creds(cl);
1976 if (rets < 0)
1977 goto err;
1978
1979 mei_hdr = mei_msg_hdr_init(cb);
1980 if (IS_ERR(mei_hdr)) {
1981 rets = -PTR_ERR(mei_hdr);
1982 mei_hdr = NULL;
1983 goto err;
1984 }
1985
1986 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1987 mei_hdr->extended, cb->vtag);
1988
1989 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1990
1991 if (rets == 0) {
1992 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1993 rets = buf_len;
1994 goto out;
1995 }
1996
1997 if (!mei_hbuf_acquire(dev)) {
1998 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1999 rets = buf_len;
2000 goto out;
2001 }
2002
2003 hbuf_slots = mei_hbuf_empty_slots(dev);
2004 if (hbuf_slots < 0) {
2005 rets = -EOVERFLOW;
2006 goto out;
2007 }
2008
2009 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
2010 dr_slots = mei_dma_ring_empty_slots(dev);
2011 dr_len = mei_slots2data(dr_slots);
2012
2013 if (hdr_len + buf_len <= hbuf_len) {
2014 data_len = buf_len;
2015 mei_hdr->msg_complete = 1;
2016 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
2017 mei_hdr->dma_ring = 1;
2018 if (buf_len > dr_len)
2019 buf_len = dr_len;
2020 else
2021 mei_hdr->msg_complete = 1;
2022
2023 data_len = sizeof(dma_len);
2024 dma_len = buf_len;
2025 data = &dma_len;
2026 } else {
2027 buf_len = hbuf_len - hdr_len;
2028 data_len = buf_len;
2029 }
2030
2031 mei_hdr->length += data_len;
2032
2033 if (mei_hdr->dma_ring)
2034 mei_dma_ring_write(dev, buf->data, buf_len);
2035 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2036
2037 if (rets)
2038 goto err;
2039
2040 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
2041 if (rets)
2042 goto err;
2043
2044 cl->writing_state = MEI_WRITING;
2045 cb->buf_idx = buf_len;
2046
2047 buf_len = buf->size;
2048
2049 out:
2050 if (mei_hdr->msg_complete)
2051 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
2052 else
2053 mei_tx_cb_enqueue(cb, &dev->write_list);
2054
2055 cb = NULL;
2056 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2057
2058 mutex_unlock(&dev->device_lock);
2059 rets = wait_event_interruptible(cl->tx_wait,
2060 cl->writing_state == MEI_WRITE_COMPLETE ||
2061 (!mei_cl_is_connected(cl)));
2062 mutex_lock(&dev->device_lock);
2063
2064 if (rets) {
2065 if (signal_pending(current))
2066 rets = -EINTR;
2067 goto err;
2068 }
2069 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2070 rets = -EFAULT;
2071 goto err;
2072 }
2073 }
2074
2075 rets = buf_len;
2076 err:
2077 cl_dbg(dev, cl, "rpm: autosuspend\n");
2078 pm_runtime_mark_last_busy(dev->dev);
2079 pm_runtime_put_autosuspend(dev->dev);
2080 free:
2081 mei_io_cb_free(cb);
2082
2083 kfree(mei_hdr);
2084
2085 return rets;
2086 }
2087
2088
2089
2090
2091
2092
2093
2094 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2095 {
2096 struct mei_device *dev = cl->dev;
2097
2098 switch (cb->fop_type) {
2099 case MEI_FOP_WRITE:
2100 mei_tx_cb_dequeue(cb);
2101 cl->writing_state = MEI_WRITE_COMPLETE;
2102 if (waitqueue_active(&cl->tx_wait)) {
2103 wake_up_interruptible(&cl->tx_wait);
2104 } else {
2105 pm_runtime_mark_last_busy(dev->dev);
2106 pm_request_autosuspend(dev->dev);
2107 }
2108 break;
2109
2110 case MEI_FOP_READ:
2111 mei_cl_add_rd_completed(cl, cb);
2112 if (!mei_cl_is_fixed_address(cl) &&
2113 !WARN_ON(!cl->rx_flow_ctrl_creds))
2114 cl->rx_flow_ctrl_creds--;
2115 if (!mei_cl_bus_rx_event(cl))
2116 wake_up_interruptible(&cl->rx_wait);
2117 break;
2118
2119 case MEI_FOP_CONNECT:
2120 case MEI_FOP_DISCONNECT:
2121 case MEI_FOP_NOTIFY_STOP:
2122 case MEI_FOP_NOTIFY_START:
2123 case MEI_FOP_DMA_MAP:
2124 case MEI_FOP_DMA_UNMAP:
2125 if (waitqueue_active(&cl->wait))
2126 wake_up(&cl->wait);
2127
2128 break;
2129 case MEI_FOP_DISCONNECT_RSP:
2130 mei_io_cb_free(cb);
2131 mei_cl_set_disconnected(cl);
2132 break;
2133 default:
2134 BUG_ON(0);
2135 }
2136 }
2137
2138
2139
2140
2141
2142
2143
2144 void mei_cl_all_disconnect(struct mei_device *dev)
2145 {
2146 struct mei_cl *cl;
2147
2148 list_for_each_entry(cl, &dev->file_list, link)
2149 mei_cl_set_disconnected(cl);
2150 }
2151 EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
2152
2153 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
2154 {
2155 struct mei_cl *cl;
2156
2157 list_for_each_entry(cl, &dev->file_list, link)
2158 if (cl->dma.buffer_id == buffer_id)
2159 return cl;
2160 return NULL;
2161 }
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
2173 struct list_head *cmpl_list)
2174 {
2175 struct mei_device *dev = cl->dev;
2176 u32 msg_slots;
2177 int slots;
2178 int ret;
2179
2180 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
2181 slots = mei_hbuf_empty_slots(dev);
2182 if (slots < 0)
2183 return -EOVERFLOW;
2184
2185 if ((u32)slots < msg_slots)
2186 return -EMSGSIZE;
2187
2188 ret = mei_hbm_cl_dma_map_req(dev, cl);
2189 if (ret) {
2190 cl->status = ret;
2191 list_move_tail(&cb->list, cmpl_list);
2192 return ret;
2193 }
2194
2195 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2196 return 0;
2197 }
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
2209 struct list_head *cmpl_list)
2210 {
2211 struct mei_device *dev = cl->dev;
2212 u32 msg_slots;
2213 int slots;
2214 int ret;
2215
2216 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
2217 slots = mei_hbuf_empty_slots(dev);
2218 if (slots < 0)
2219 return -EOVERFLOW;
2220
2221 if ((u32)slots < msg_slots)
2222 return -EMSGSIZE;
2223
2224 ret = mei_hbm_cl_dma_unmap_req(dev, cl);
2225 if (ret) {
2226 cl->status = ret;
2227 list_move_tail(&cb->list, cmpl_list);
2228 return ret;
2229 }
2230
2231 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2232 return 0;
2233 }
2234
2235 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
2236 {
2237 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
2238 &cl->dma.daddr, GFP_KERNEL);
2239 if (!cl->dma.vaddr)
2240 return -ENOMEM;
2241
2242 cl->dma.buffer_id = buf_id;
2243 cl->dma.size = size;
2244
2245 return 0;
2246 }
2247
2248 static void mei_cl_dma_free(struct mei_cl *cl)
2249 {
2250 cl->dma.buffer_id = 0;
2251 dmam_free_coherent(cl->dev->dev,
2252 cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
2253 cl->dma.size = 0;
2254 cl->dma.vaddr = NULL;
2255 cl->dma.daddr = 0;
2256 }
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
2276 u8 buffer_id, size_t size)
2277 {
2278 struct mei_device *dev;
2279 struct mei_cl_cb *cb;
2280 int rets;
2281
2282 if (WARN_ON(!cl || !cl->dev))
2283 return -ENODEV;
2284
2285 dev = cl->dev;
2286
2287 if (!dev->hbm_f_cd_supported) {
2288 cl_dbg(dev, cl, "client dma is not supported\n");
2289 return -EOPNOTSUPP;
2290 }
2291
2292 if (buffer_id == 0)
2293 return -EINVAL;
2294
2295 if (mei_cl_is_connected(cl))
2296 return -EPROTO;
2297
2298 if (cl->dma_mapped)
2299 return -EPROTO;
2300
2301 if (mei_cl_dma_map_find(dev, buffer_id)) {
2302 cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
2303 cl->dma.buffer_id);
2304 return -EPROTO;
2305 }
2306
2307 rets = pm_runtime_get(dev->dev);
2308 if (rets < 0 && rets != -EINPROGRESS) {
2309 pm_runtime_put_noidle(dev->dev);
2310 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2311 return rets;
2312 }
2313
2314 rets = mei_cl_dma_alloc(cl, buffer_id, size);
2315 if (rets) {
2316 pm_runtime_put_noidle(dev->dev);
2317 return rets;
2318 }
2319
2320 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
2321 if (!cb) {
2322 rets = -ENOMEM;
2323 goto out;
2324 }
2325
2326 if (mei_hbuf_acquire(dev)) {
2327 if (mei_hbm_cl_dma_map_req(dev, cl)) {
2328 rets = -ENODEV;
2329 goto out;
2330 }
2331 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2332 }
2333
2334 cl->status = 0;
2335
2336 mutex_unlock(&dev->device_lock);
2337 wait_event_timeout(cl->wait,
2338 cl->dma_mapped || cl->status,
2339 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
2340 mutex_lock(&dev->device_lock);
2341
2342 if (!cl->dma_mapped && !cl->status)
2343 cl->status = -EFAULT;
2344
2345 rets = cl->status;
2346
2347 out:
2348 if (rets)
2349 mei_cl_dma_free(cl);
2350
2351 cl_dbg(dev, cl, "rpm: autosuspend\n");
2352 pm_runtime_mark_last_busy(dev->dev);
2353 pm_runtime_put_autosuspend(dev->dev);
2354
2355 mei_io_cb_free(cb);
2356 return rets;
2357 }
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
2370 {
2371 struct mei_device *dev;
2372 struct mei_cl_cb *cb;
2373 int rets;
2374
2375 if (WARN_ON(!cl || !cl->dev))
2376 return -ENODEV;
2377
2378 dev = cl->dev;
2379
2380 if (!dev->hbm_f_cd_supported) {
2381 cl_dbg(dev, cl, "client dma is not supported\n");
2382 return -EOPNOTSUPP;
2383 }
2384
2385
2386 if (mei_cl_is_connected(cl))
2387 return -EPROTO;
2388
2389 if (!cl->dma_mapped)
2390 return -EPROTO;
2391
2392 rets = pm_runtime_get(dev->dev);
2393 if (rets < 0 && rets != -EINPROGRESS) {
2394 pm_runtime_put_noidle(dev->dev);
2395 cl_err(dev, cl, "rpm: get failed %d\n", rets);
2396 return rets;
2397 }
2398
2399 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
2400 if (!cb) {
2401 rets = -ENOMEM;
2402 goto out;
2403 }
2404
2405 if (mei_hbuf_acquire(dev)) {
2406 if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
2407 rets = -ENODEV;
2408 goto out;
2409 }
2410 list_move_tail(&cb->list, &dev->ctrl_rd_list);
2411 }
2412
2413 cl->status = 0;
2414
2415 mutex_unlock(&dev->device_lock);
2416 wait_event_timeout(cl->wait,
2417 !cl->dma_mapped || cl->status,
2418 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
2419 mutex_lock(&dev->device_lock);
2420
2421 if (cl->dma_mapped && !cl->status)
2422 cl->status = -EFAULT;
2423
2424 rets = cl->status;
2425
2426 if (!rets)
2427 mei_cl_dma_free(cl);
2428 out:
2429 cl_dbg(dev, cl, "rpm: autosuspend\n");
2430 pm_runtime_mark_last_busy(dev->dev);
2431 pm_runtime_put_autosuspend(dev->dev);
2432
2433 mei_io_cb_free(cb);
2434 return rets;
2435 }