0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/slab.h>
0009 #include <linux/sched.h>
0010 #include <linux/wait.h>
0011 #include <linux/delay.h>
0012 #include <linux/dma-mapping.h>
0013 #include <asm/cacheflush.h>
0014 #include "hbm.h"
0015 #include "client.h"
0016
0017 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
0018 {
0019 unsigned long tx_free_flags;
0020 int size;
0021
0022 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0023 size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
0024 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
0025
0026 return size;
0027 }
0028 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
0029
0030 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
0031 {
0032 return cl->tx_ring_free_size;
0033 }
0034 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
0035
0036
0037
0038
0039
0040
0041
0042 static void ishtp_read_list_flush(struct ishtp_cl *cl)
0043 {
0044 struct ishtp_cl_rb *rb;
0045 struct ishtp_cl_rb *next;
0046 unsigned long flags;
0047
0048 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
0049 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
0050 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
0051 list_del(&rb->list);
0052 ishtp_io_rb_free(rb);
0053 }
0054 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
0055 }
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
0067 {
0068 if (WARN_ON(!cl || !cl->dev))
0069 return -EINVAL;
0070
0071 ishtp_read_list_flush(cl);
0072
0073 return 0;
0074 }
0075 EXPORT_SYMBOL(ishtp_cl_flush_queues);
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
0086 {
0087 memset(cl, 0, sizeof(struct ishtp_cl));
0088 init_waitqueue_head(&cl->wait_ctrl_res);
0089 spin_lock_init(&cl->free_list_spinlock);
0090 spin_lock_init(&cl->in_process_spinlock);
0091 spin_lock_init(&cl->tx_list_spinlock);
0092 spin_lock_init(&cl->tx_free_list_spinlock);
0093 spin_lock_init(&cl->fc_spinlock);
0094 INIT_LIST_HEAD(&cl->link);
0095 cl->dev = dev;
0096
0097 INIT_LIST_HEAD(&cl->free_rb_list.list);
0098 INIT_LIST_HEAD(&cl->tx_list.list);
0099 INIT_LIST_HEAD(&cl->tx_free_list.list);
0100 INIT_LIST_HEAD(&cl->in_process_list.list);
0101
0102 cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
0103 cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
0104 cl->tx_ring_free_size = cl->tx_ring_size;
0105
0106
0107 cl->last_tx_path = CL_TX_PATH_IPC;
0108 cl->last_dma_acked = 1;
0109 cl->last_dma_addr = NULL;
0110 cl->last_ipc_acked = 1;
0111 }
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
0122 {
0123 struct ishtp_cl *cl;
0124
0125 cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
0126 if (!cl)
0127 return NULL;
0128
0129 ishtp_cl_init(cl, cl_device->ishtp_dev);
0130 return cl;
0131 }
0132 EXPORT_SYMBOL(ishtp_cl_allocate);
0133
0134
0135
0136
0137
0138
0139
0140 void ishtp_cl_free(struct ishtp_cl *cl)
0141 {
0142 struct ishtp_device *dev;
0143 unsigned long flags;
0144
0145 if (!cl)
0146 return;
0147
0148 dev = cl->dev;
0149 if (!dev)
0150 return;
0151
0152 spin_lock_irqsave(&dev->cl_list_lock, flags);
0153 ishtp_cl_free_rx_ring(cl);
0154 ishtp_cl_free_tx_ring(cl);
0155 kfree(cl);
0156 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0157 }
0158 EXPORT_SYMBOL(ishtp_cl_free);
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 int ishtp_cl_link(struct ishtp_cl *cl)
0172 {
0173 struct ishtp_device *dev;
0174 unsigned long flags, flags_cl;
0175 int id, ret = 0;
0176
0177 if (WARN_ON(!cl || !cl->dev))
0178 return -EINVAL;
0179
0180 dev = cl->dev;
0181
0182 spin_lock_irqsave(&dev->device_lock, flags);
0183
0184 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
0185 ret = -EMFILE;
0186 goto unlock_dev;
0187 }
0188
0189 id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
0190
0191 if (id >= ISHTP_CLIENTS_MAX) {
0192 spin_unlock_irqrestore(&dev->device_lock, flags);
0193 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
0194 return -ENOENT;
0195 }
0196
0197 dev->open_handle_count++;
0198 cl->host_client_id = id;
0199 spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
0200 if (dev->dev_state != ISHTP_DEV_ENABLED) {
0201 ret = -ENODEV;
0202 goto unlock_cl;
0203 }
0204 list_add_tail(&cl->link, &dev->cl_list);
0205 set_bit(id, dev->host_clients_map);
0206 cl->state = ISHTP_CL_INITIALIZING;
0207
0208 unlock_cl:
0209 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
0210 unlock_dev:
0211 spin_unlock_irqrestore(&dev->device_lock, flags);
0212 return ret;
0213 }
0214 EXPORT_SYMBOL(ishtp_cl_link);
0215
0216
0217
0218
0219
0220
0221
0222 void ishtp_cl_unlink(struct ishtp_cl *cl)
0223 {
0224 struct ishtp_device *dev;
0225 struct ishtp_cl *pos;
0226 unsigned long flags;
0227
0228
0229 if (!cl || !cl->dev)
0230 return;
0231
0232 dev = cl->dev;
0233
0234 spin_lock_irqsave(&dev->device_lock, flags);
0235 if (dev->open_handle_count > 0) {
0236 clear_bit(cl->host_client_id, dev->host_clients_map);
0237 dev->open_handle_count--;
0238 }
0239 spin_unlock_irqrestore(&dev->device_lock, flags);
0240
0241
0242
0243
0244
0245 spin_lock_irqsave(&dev->cl_list_lock, flags);
0246 list_for_each_entry(pos, &dev->cl_list, link)
0247 if (cl->host_client_id == pos->host_client_id) {
0248 list_del_init(&pos->link);
0249 break;
0250 }
0251 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0252 }
0253 EXPORT_SYMBOL(ishtp_cl_unlink);
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264 int ishtp_cl_disconnect(struct ishtp_cl *cl)
0265 {
0266 struct ishtp_device *dev;
0267
0268 if (WARN_ON(!cl || !cl->dev))
0269 return -ENODEV;
0270
0271 dev = cl->dev;
0272
0273 dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
0274
0275 if (cl->state != ISHTP_CL_DISCONNECTING) {
0276 dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
0277 return 0;
0278 }
0279
0280 if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
0281 dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
0282 dev_err(&cl->device->dev, "failed to disconnect.\n");
0283 return -ENODEV;
0284 }
0285
0286 wait_event_interruptible_timeout(cl->wait_ctrl_res,
0287 (dev->dev_state != ISHTP_DEV_ENABLED ||
0288 cl->state == ISHTP_CL_DISCONNECTED),
0289 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
0290
0291
0292
0293
0294
0295 if (dev->dev_state != ISHTP_DEV_ENABLED) {
0296 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
0297 __func__);
0298 return -ENODEV;
0299 }
0300
0301 if (cl->state == ISHTP_CL_DISCONNECTED) {
0302 dev->print_log(dev, "%s() successful\n", __func__);
0303 return 0;
0304 }
0305
0306 return -ENODEV;
0307 }
0308 EXPORT_SYMBOL(ishtp_cl_disconnect);
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
0319 {
0320 struct ishtp_device *dev;
0321 struct ishtp_cl *pos;
0322 unsigned long flags;
0323
0324 if (WARN_ON(!cl || !cl->dev))
0325 return false;
0326
0327 dev = cl->dev;
0328 spin_lock_irqsave(&dev->cl_list_lock, flags);
0329 list_for_each_entry(pos, &dev->cl_list, link) {
0330 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
0331 cl->fw_client_id == pos->fw_client_id) {
0332 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0333 return true;
0334 }
0335 }
0336 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0337
0338 return false;
0339 }
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351 int ishtp_cl_connect(struct ishtp_cl *cl)
0352 {
0353 struct ishtp_device *dev;
0354 int rets;
0355
0356 if (WARN_ON(!cl || !cl->dev))
0357 return -ENODEV;
0358
0359 dev = cl->dev;
0360
0361 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
0362
0363 if (ishtp_cl_is_other_connecting(cl)) {
0364 dev->print_log(dev, "%s() Busy\n", __func__);
0365 return -EBUSY;
0366 }
0367
0368 if (ishtp_hbm_cl_connect_req(dev, cl)) {
0369 dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
0370 return -ENODEV;
0371 }
0372
0373 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
0374 (dev->dev_state == ISHTP_DEV_ENABLED &&
0375 (cl->state == ISHTP_CL_CONNECTED ||
0376 cl->state == ISHTP_CL_DISCONNECTED)),
0377 ishtp_secs_to_jiffies(
0378 ISHTP_CL_CONNECT_TIMEOUT));
0379
0380
0381
0382
0383 if (dev->dev_state != ISHTP_DEV_ENABLED) {
0384 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
0385 __func__);
0386 return -EFAULT;
0387 }
0388
0389 if (cl->state != ISHTP_CL_CONNECTED) {
0390 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
0391 __func__);
0392 return -EFAULT;
0393 }
0394
0395 rets = cl->status;
0396 if (rets) {
0397 dev->print_log(dev, "%s() Invalid status\n", __func__);
0398 return rets;
0399 }
0400
0401 rets = ishtp_cl_device_bind(cl);
0402 if (rets) {
0403 dev->print_log(dev, "%s() Bind error\n", __func__);
0404 ishtp_cl_disconnect(cl);
0405 return rets;
0406 }
0407
0408 rets = ishtp_cl_alloc_rx_ring(cl);
0409 if (rets) {
0410 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
0411
0412 ishtp_cl_disconnect(cl);
0413 return rets;
0414 }
0415
0416 rets = ishtp_cl_alloc_tx_ring(cl);
0417 if (rets) {
0418 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
0419
0420 ishtp_cl_free_rx_ring(cl);
0421 ishtp_cl_disconnect(cl);
0422 return rets;
0423 }
0424
0425
0426 rets = ishtp_cl_read_start(cl);
0427
0428 dev->print_log(dev, "%s() successful\n", __func__);
0429
0430 return rets;
0431 }
0432 EXPORT_SYMBOL(ishtp_cl_connect);
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444 int ishtp_cl_read_start(struct ishtp_cl *cl)
0445 {
0446 struct ishtp_device *dev;
0447 struct ishtp_cl_rb *rb;
0448 int rets;
0449 int i;
0450 unsigned long flags;
0451 unsigned long dev_flags;
0452
0453 if (WARN_ON(!cl || !cl->dev))
0454 return -ENODEV;
0455
0456 dev = cl->dev;
0457
0458 if (cl->state != ISHTP_CL_CONNECTED)
0459 return -ENODEV;
0460
0461 if (dev->dev_state != ISHTP_DEV_ENABLED)
0462 return -ENODEV;
0463
0464 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
0465 if (i < 0) {
0466 dev_err(&cl->device->dev, "no such fw client %d\n",
0467 cl->fw_client_id);
0468 return -ENODEV;
0469 }
0470
0471
0472 spin_lock_irqsave(&cl->free_list_spinlock, flags);
0473 if (list_empty(&cl->free_rb_list.list)) {
0474 dev_warn(&cl->device->dev,
0475 "[ishtp-ish] Rx buffers pool is empty\n");
0476 rets = -ENOMEM;
0477 rb = NULL;
0478 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0479 goto out;
0480 }
0481 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
0482 list_del_init(&rb->list);
0483 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0484
0485 rb->cl = cl;
0486 rb->buf_idx = 0;
0487
0488 INIT_LIST_HEAD(&rb->list);
0489 rets = 0;
0490
0491
0492
0493
0494
0495 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
0496 list_add_tail(&rb->list, &dev->read_list.list);
0497 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
0498 if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
0499 rets = -ENODEV;
0500 goto out;
0501 }
0502 out:
0503
0504 if (rets && rb) {
0505 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
0506 list_del(&rb->list);
0507 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
0508
0509 spin_lock_irqsave(&cl->free_list_spinlock, flags);
0510 list_add_tail(&rb->list, &cl->free_rb_list.list);
0511 spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0512 }
0513 return rets;
0514 }
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
0529 {
0530 struct ishtp_device *dev;
0531 int id;
0532 struct ishtp_cl_tx_ring *cl_msg;
0533 int have_msg_to_send = 0;
0534 unsigned long tx_flags, tx_free_flags;
0535
0536 if (WARN_ON(!cl || !cl->dev))
0537 return -ENODEV;
0538
0539 dev = cl->dev;
0540
0541 if (cl->state != ISHTP_CL_CONNECTED) {
0542 ++cl->err_send_msg;
0543 return -EPIPE;
0544 }
0545
0546 if (dev->dev_state != ISHTP_DEV_ENABLED) {
0547 ++cl->err_send_msg;
0548 return -ENODEV;
0549 }
0550
0551
0552 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
0553 if (id < 0) {
0554 ++cl->err_send_msg;
0555 return -ENOENT;
0556 }
0557
0558 if (length > dev->fw_clients[id].props.max_msg_length) {
0559 ++cl->err_send_msg;
0560 return -EMSGSIZE;
0561 }
0562
0563
0564 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0565 if (list_empty(&cl->tx_free_list.list)) {
0566 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
0567 tx_free_flags);
0568 ++cl->err_send_msg;
0569 return -ENOMEM;
0570 }
0571
0572 cl_msg = list_first_entry(&cl->tx_free_list.list,
0573 struct ishtp_cl_tx_ring, list);
0574 if (!cl_msg->send_buf.data) {
0575 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
0576 tx_free_flags);
0577 return -EIO;
0578
0579 }
0580
0581
0582
0583
0584 list_del_init(&cl_msg->list);
0585 --cl->tx_ring_free_size;
0586
0587 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
0588 memcpy(cl_msg->send_buf.data, buf, length);
0589 cl_msg->send_buf.size = length;
0590 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0591 have_msg_to_send = !list_empty(&cl->tx_list.list);
0592 list_add_tail(&cl_msg->list, &cl->tx_list.list);
0593 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0594
0595 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
0596 ishtp_cl_send_msg(dev, cl);
0597
0598 return 0;
0599 }
0600 EXPORT_SYMBOL(ishtp_cl_send);
0601
0602
0603
0604
0605
0606
0607
0608
0609 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
0610 {
0611 unsigned long flags;
0612 int schedule_work_flag = 0;
0613 struct ishtp_cl *cl = rb->cl;
0614
0615 spin_lock_irqsave(&cl->in_process_spinlock, flags);
0616
0617
0618
0619
0620 schedule_work_flag = list_empty(&cl->in_process_list.list);
0621 list_add_tail(&rb->list, &cl->in_process_list.list);
0622 spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
0623
0624 if (schedule_work_flag)
0625 ishtp_cl_bus_rx_event(cl->device);
0626 }
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636 static void ipc_tx_send(void *prm)
0637 {
0638 struct ishtp_cl *cl = prm;
0639 struct ishtp_cl_tx_ring *cl_msg;
0640 size_t rem;
0641 struct ishtp_device *dev = (cl ? cl->dev : NULL);
0642 struct ishtp_msg_hdr ishtp_hdr;
0643 unsigned long tx_flags, tx_free_flags;
0644 unsigned char *pmsg;
0645
0646 if (!dev)
0647 return;
0648
0649
0650
0651
0652
0653 if (dev->dev_state != ISHTP_DEV_ENABLED)
0654 return;
0655
0656 if (cl->state != ISHTP_CL_CONNECTED)
0657 return;
0658
0659 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0660 if (list_empty(&cl->tx_list.list)) {
0661 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0662 return;
0663 }
0664
0665 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
0666 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0667 return;
0668 }
0669
0670 if (!cl->sending) {
0671 --cl->ishtp_flow_ctrl_creds;
0672 cl->last_ipc_acked = 0;
0673 cl->last_tx_path = CL_TX_PATH_IPC;
0674 cl->sending = 1;
0675 }
0676
0677 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
0678 list);
0679 rem = cl_msg->send_buf.size - cl->tx_offs;
0680
0681 while (rem > 0) {
0682 ishtp_hdr.host_addr = cl->host_client_id;
0683 ishtp_hdr.fw_addr = cl->fw_client_id;
0684 ishtp_hdr.reserved = 0;
0685 pmsg = cl_msg->send_buf.data + cl->tx_offs;
0686
0687 if (rem <= dev->mtu) {
0688
0689 ishtp_hdr.length = rem;
0690 ishtp_hdr.msg_complete = 1;
0691
0692 ishtp_write_message(dev, &ishtp_hdr, pmsg);
0693 cl->tx_offs = 0;
0694 cl->sending = 0;
0695
0696 break;
0697 } else {
0698
0699 ishtp_hdr.length = dev->mtu;
0700 ishtp_hdr.msg_complete = 0;
0701
0702 ishtp_write_message(dev, &ishtp_hdr, pmsg);
0703 cl->tx_offs += dev->mtu;
0704 rem = cl_msg->send_buf.size - cl->tx_offs;
0705 }
0706 }
0707
0708 list_del_init(&cl_msg->list);
0709 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0710
0711 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0712 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
0713 ++cl->tx_ring_free_size;
0714 spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
0715 tx_free_flags);
0716 }
0717
0718
0719
0720
0721
0722
0723
0724
0725 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
0726 struct ishtp_cl *cl)
0727 {
0728
0729 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
0730 return;
0731
0732 cl->tx_offs = 0;
0733 ipc_tx_send(cl);
0734 ++cl->send_msg_cnt_ipc;
0735 }
0736
0737
0738
0739
0740
0741
0742
0743
0744 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
0745 struct ishtp_cl *cl)
0746 {
0747 struct ishtp_msg_hdr hdr;
0748 struct dma_xfer_hbm dma_xfer;
0749 unsigned char *msg_addr;
0750 int off;
0751 struct ishtp_cl_tx_ring *cl_msg;
0752 unsigned long tx_flags, tx_free_flags;
0753
0754
0755 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
0756 return;
0757
0758 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0759 if (list_empty(&cl->tx_list.list)) {
0760 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0761 return;
0762 }
0763
0764 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
0765 list);
0766
0767 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
0768 if (!msg_addr) {
0769 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0770 if (dev->transfer_path == CL_TX_PATH_DEFAULT)
0771 ishtp_cl_send_msg_ipc(dev, cl);
0772 return;
0773 }
0774
0775 list_del_init(&cl_msg->list);
0776 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0777
0778 --cl->ishtp_flow_ctrl_creds;
0779 cl->last_dma_acked = 0;
0780 cl->last_dma_addr = msg_addr;
0781 cl->last_tx_path = CL_TX_PATH_DMA;
0782
0783
0784 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
0785
0786
0787
0788
0789
0790 if (dev->ops->dma_no_cache_snooping &&
0791 dev->ops->dma_no_cache_snooping(dev))
0792 clflush_cache_range(msg_addr, cl_msg->send_buf.size);
0793
0794
0795 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
0796 ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
0797 dma_xfer.hbm = DMA_XFER;
0798 dma_xfer.fw_client_id = cl->fw_client_id;
0799 dma_xfer.host_client_id = cl->host_client_id;
0800 dma_xfer.reserved = 0;
0801 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
0802 dma_xfer.msg_length = cl_msg->send_buf.size;
0803 dma_xfer.reserved2 = 0;
0804 ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
0805 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0806 list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
0807 ++cl->tx_ring_free_size;
0808 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
0809 ++cl->send_msg_cnt_dma;
0810 }
0811
0812
0813
0814
0815
0816
0817
0818
0819 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
0820 {
0821 if (dev->transfer_path == CL_TX_PATH_DMA)
0822 ishtp_cl_send_msg_dma(dev, cl);
0823 else
0824 ishtp_cl_send_msg_ipc(dev, cl);
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835 void recv_ishtp_cl_msg(struct ishtp_device *dev,
0836 struct ishtp_msg_hdr *ishtp_hdr)
0837 {
0838 struct ishtp_cl *cl;
0839 struct ishtp_cl_rb *rb;
0840 struct ishtp_cl_rb *new_rb;
0841 unsigned char *buffer = NULL;
0842 struct ishtp_cl_rb *complete_rb = NULL;
0843 unsigned long flags;
0844 int rb_count;
0845
0846 if (ishtp_hdr->reserved) {
0847 dev_err(dev->devc, "corrupted message header.\n");
0848 goto eoi;
0849 }
0850
0851 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
0852 dev_err(dev->devc,
0853 "ISHTP message length in hdr exceeds IPC MTU\n");
0854 goto eoi;
0855 }
0856
0857 spin_lock_irqsave(&dev->read_list_spinlock, flags);
0858 rb_count = -1;
0859 list_for_each_entry(rb, &dev->read_list.list, list) {
0860 ++rb_count;
0861 cl = rb->cl;
0862 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
0863 cl->fw_client_id == ishtp_hdr->fw_addr) ||
0864 !(cl->state == ISHTP_CL_CONNECTED))
0865 continue;
0866
0867
0868 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
0869 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0870 dev_err(&cl->device->dev,
0871 "Rx buffer is not allocated.\n");
0872 list_del(&rb->list);
0873 ishtp_io_rb_free(rb);
0874 cl->status = -ENOMEM;
0875 goto eoi;
0876 }
0877
0878
0879
0880
0881
0882
0883
0884 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
0885 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0886 dev_err(&cl->device->dev,
0887 "message overflow. size %d len %d idx %ld\n",
0888 rb->buffer.size, ishtp_hdr->length,
0889 rb->buf_idx);
0890 list_del(&rb->list);
0891 ishtp_cl_io_rb_recycle(rb);
0892 cl->status = -EIO;
0893 goto eoi;
0894 }
0895
0896 buffer = rb->buffer.data + rb->buf_idx;
0897 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
0898
0899 rb->buf_idx += ishtp_hdr->length;
0900 if (ishtp_hdr->msg_complete) {
0901
0902 cl->status = 0;
0903 list_del(&rb->list);
0904 complete_rb = rb;
0905
0906 --cl->out_flow_ctrl_creds;
0907
0908
0909
0910
0911 spin_lock(&cl->free_list_spinlock);
0912
0913 if (!list_empty(&cl->free_rb_list.list)) {
0914 new_rb = list_entry(cl->free_rb_list.list.next,
0915 struct ishtp_cl_rb, list);
0916 list_del_init(&new_rb->list);
0917 spin_unlock(&cl->free_list_spinlock);
0918 new_rb->cl = cl;
0919 new_rb->buf_idx = 0;
0920 INIT_LIST_HEAD(&new_rb->list);
0921 list_add_tail(&new_rb->list,
0922 &dev->read_list.list);
0923
0924 ishtp_hbm_cl_flow_control_req(dev, cl);
0925 } else {
0926 spin_unlock(&cl->free_list_spinlock);
0927 }
0928 }
0929
0930 ++cl->recv_msg_num_frags;
0931
0932
0933
0934
0935
0936 break;
0937 }
0938
0939 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0940
0941 if (!buffer) {
0942 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
0943
0944 dev_err(dev->devc, "Dropped Rx msg - no request\n");
0945 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
0946 goto eoi;
0947 }
0948
0949 if (complete_rb) {
0950 cl = complete_rb->cl;
0951 cl->ts_rx = ktime_get();
0952 ++cl->recv_msg_cnt_ipc;
0953 ishtp_cl_read_complete(complete_rb);
0954 }
0955 eoi:
0956 return;
0957 }
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
0969 struct dma_xfer_hbm *hbm)
0970 {
0971 struct ishtp_cl *cl;
0972 struct ishtp_cl_rb *rb;
0973 struct ishtp_cl_rb *new_rb;
0974 unsigned char *buffer = NULL;
0975 struct ishtp_cl_rb *complete_rb = NULL;
0976 unsigned long flags;
0977
0978 spin_lock_irqsave(&dev->read_list_spinlock, flags);
0979
0980 list_for_each_entry(rb, &dev->read_list.list, list) {
0981 cl = rb->cl;
0982 if (!cl || !(cl->host_client_id == hbm->host_client_id &&
0983 cl->fw_client_id == hbm->fw_client_id) ||
0984 !(cl->state == ISHTP_CL_CONNECTED))
0985 continue;
0986
0987
0988
0989
0990 if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
0991 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0992 dev_err(&cl->device->dev,
0993 "response buffer is not allocated.\n");
0994 list_del(&rb->list);
0995 ishtp_io_rb_free(rb);
0996 cl->status = -ENOMEM;
0997 goto eoi;
0998 }
0999
1000
1001
1002
1003
1004
1005
1006 if (rb->buffer.size < hbm->msg_length) {
1007 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1008 dev_err(&cl->device->dev,
1009 "message overflow. size %d len %d idx %ld\n",
1010 rb->buffer.size, hbm->msg_length, rb->buf_idx);
1011 list_del(&rb->list);
1012 ishtp_cl_io_rb_recycle(rb);
1013 cl->status = -EIO;
1014 goto eoi;
1015 }
1016
1017 buffer = rb->buffer.data;
1018
1019
1020
1021
1022
1023 if (dev->ops->dma_no_cache_snooping &&
1024 dev->ops->dma_no_cache_snooping(dev))
1025 clflush_cache_range(msg, hbm->msg_length);
1026
1027 memcpy(buffer, msg, hbm->msg_length);
1028 rb->buf_idx = hbm->msg_length;
1029
1030
1031 cl->status = 0;
1032 list_del(&rb->list);
1033 complete_rb = rb;
1034
1035 --cl->out_flow_ctrl_creds;
1036
1037
1038
1039
1040 spin_lock(&cl->free_list_spinlock);
1041
1042 if (!list_empty(&cl->free_rb_list.list)) {
1043 new_rb = list_entry(cl->free_rb_list.list.next,
1044 struct ishtp_cl_rb, list);
1045 list_del_init(&new_rb->list);
1046 spin_unlock(&cl->free_list_spinlock);
1047 new_rb->cl = cl;
1048 new_rb->buf_idx = 0;
1049 INIT_LIST_HEAD(&new_rb->list);
1050 list_add_tail(&new_rb->list,
1051 &dev->read_list.list);
1052
1053 ishtp_hbm_cl_flow_control_req(dev, cl);
1054 } else {
1055 spin_unlock(&cl->free_list_spinlock);
1056 }
1057
1058
1059 ++cl->recv_msg_num_frags;
1060
1061
1062
1063
1064
1065 break;
1066 }
1067
1068 spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1069
1070 if (!buffer) {
1071 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1072 goto eoi;
1073 }
1074
1075 if (complete_rb) {
1076 cl = complete_rb->cl;
1077 cl->ts_rx = ktime_get();
1078 ++cl->recv_msg_cnt_dma;
1079 ishtp_cl_read_complete(complete_rb);
1080 }
1081 eoi:
1082 return;
1083 }
1084
1085 void *ishtp_get_client_data(struct ishtp_cl *cl)
1086 {
1087 return cl->client_data;
1088 }
1089 EXPORT_SYMBOL(ishtp_get_client_data);
1090
1091 void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1092 {
1093 cl->client_data = data;
1094 }
1095 EXPORT_SYMBOL(ishtp_set_client_data);
1096
1097 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1098 {
1099 return cl->dev;
1100 }
1101 EXPORT_SYMBOL(ishtp_get_ishtp_device);
1102
1103 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1104 {
1105 cl->tx_ring_size = size;
1106 }
1107 EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1108
1109 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1110 {
1111 cl->rx_ring_size = size;
1112 }
1113 EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1114
1115 void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1116 {
1117 cl->state = state;
1118 }
1119 EXPORT_SYMBOL(ishtp_set_connection_state);
1120
1121 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1122 {
1123 cl->fw_client_id = fw_client_id;
1124 }
1125 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);