0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/export.h>
0009 #include <linux/slab.h>
0010 #include <linux/sched.h>
0011 #include <linux/wait.h>
0012 #include <linux/spinlock.h>
0013 #include "ishtp-dev.h"
0014 #include "hbm.h"
0015 #include "client.h"
0016
0017
0018
0019
0020
0021
0022
0023 static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
0024 {
0025 struct ishtp_fw_client *clients;
0026 int b;
0027
0028
0029 for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
0030 dev->fw_clients_num++;
0031
0032 if (dev->fw_clients_num <= 0)
0033 return;
0034
0035
0036 clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
0037 GFP_KERNEL);
0038 if (!clients) {
0039 dev->dev_state = ISHTP_DEV_RESETTING;
0040 ish_hw_reset(dev);
0041 return;
0042 }
0043 dev->fw_clients = clients;
0044 }
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
0056 void *buf, size_t len)
0057 {
0058 struct ishtp_hbm_cl_cmd *cmd = buf;
0059
0060 memset(cmd, 0, len);
0061
0062 cmd->hbm_cmd = hbm_cmd;
0063 cmd->host_addr = cl->host_client_id;
0064 cmd->fw_addr = cl->fw_client_id;
0065 }
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
0077 {
0078 struct ishtp_hbm_cl_cmd *cmd = buf;
0079
0080 return cl->host_client_id == cmd->host_addr &&
0081 cl->fw_client_id == cmd->fw_addr;
0082 }
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 int ishtp_hbm_start_wait(struct ishtp_device *dev)
0093 {
0094 int ret;
0095
0096 if (dev->hbm_state > ISHTP_HBM_START)
0097 return 0;
0098
0099 dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
0100 dev->hbm_state);
0101 ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
0102 dev->hbm_state >= ISHTP_HBM_STARTED,
0103 (ISHTP_INTEROP_TIMEOUT * HZ));
0104
0105 dev_dbg(dev->devc,
0106 "Woke up from waiting for ishtp start. hbm_state=%08X\n",
0107 dev->hbm_state);
0108
0109 if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
0110 dev->hbm_state = ISHTP_HBM_IDLE;
0111 dev_err(dev->devc,
0112 "waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
0113 ret, dev->hbm_state);
0114 return -ETIMEDOUT;
0115 }
0116 return 0;
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 int ishtp_hbm_start_req(struct ishtp_device *dev)
0128 {
0129 struct ishtp_msg_hdr hdr;
0130 struct hbm_host_version_request start_req = { 0 };
0131
0132 ishtp_hbm_hdr(&hdr, sizeof(start_req));
0133
0134
0135 start_req.hbm_cmd = HOST_START_REQ_CMD;
0136 start_req.host_version.major_version = HBM_MAJOR_VERSION;
0137 start_req.host_version.minor_version = HBM_MINOR_VERSION;
0138
0139
0140
0141
0142
0143
0144 dev->hbm_state = ISHTP_HBM_START;
0145 if (ishtp_write_message(dev, &hdr, &start_req)) {
0146 dev_err(dev->devc, "version message send failed\n");
0147 dev->dev_state = ISHTP_DEV_RESETTING;
0148 dev->hbm_state = ISHTP_HBM_IDLE;
0149 ish_hw_reset(dev);
0150 return -ENODEV;
0151 }
0152
0153 return 0;
0154 }
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164 void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
0165 {
0166 struct ishtp_msg_hdr hdr;
0167 struct hbm_host_enum_request enum_req = { 0 };
0168
0169
0170 ishtp_hbm_hdr(&hdr, sizeof(enum_req));
0171 enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
0172
0173 if (ishtp_write_message(dev, &hdr, &enum_req)) {
0174 dev->dev_state = ISHTP_DEV_RESETTING;
0175 dev_err(dev->devc, "enumeration request send failed\n");
0176 ish_hw_reset(dev);
0177 }
0178 dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
0179 }
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 static int ishtp_hbm_prop_req(struct ishtp_device *dev)
0190 {
0191 struct ishtp_msg_hdr hdr;
0192 struct hbm_props_request prop_req = { 0 };
0193 unsigned long next_client_index;
0194 uint8_t client_num;
0195
0196 client_num = dev->fw_client_presentation_num;
0197
0198 next_client_index = find_next_bit(dev->fw_clients_map,
0199 ISHTP_CLIENTS_MAX, dev->fw_client_index);
0200
0201
0202 if (next_client_index == ISHTP_CLIENTS_MAX) {
0203 dev->hbm_state = ISHTP_HBM_WORKING;
0204 dev->dev_state = ISHTP_DEV_ENABLED;
0205
0206 for (dev->fw_client_presentation_num = 1;
0207 dev->fw_client_presentation_num < client_num + 1;
0208 ++dev->fw_client_presentation_num)
0209
0210 ishtp_bus_new_client(dev);
0211 return 0;
0212 }
0213
0214 dev->fw_clients[client_num].client_id = next_client_index;
0215
0216 ishtp_hbm_hdr(&hdr, sizeof(prop_req));
0217
0218 prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
0219 prop_req.address = next_client_index;
0220
0221 if (ishtp_write_message(dev, &hdr, &prop_req)) {
0222 dev->dev_state = ISHTP_DEV_RESETTING;
0223 dev_err(dev->devc, "properties request send failed\n");
0224 ish_hw_reset(dev);
0225 return -EIO;
0226 }
0227
0228 dev->fw_client_index = next_client_index;
0229
0230 return 0;
0231 }
0232
0233
0234
0235
0236
0237
0238
0239 static void ishtp_hbm_stop_req(struct ishtp_device *dev)
0240 {
0241 struct ishtp_msg_hdr hdr;
0242 struct hbm_host_stop_request stop_req = { 0 } ;
0243
0244 ishtp_hbm_hdr(&hdr, sizeof(stop_req));
0245
0246 stop_req.hbm_cmd = HOST_STOP_REQ_CMD;
0247 stop_req.reason = DRIVER_STOP_REQUEST;
0248
0249 ishtp_write_message(dev, &hdr, &stop_req);
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261 int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
0262 struct ishtp_cl *cl)
0263 {
0264 struct ishtp_msg_hdr hdr;
0265 struct hbm_flow_control flow_ctrl;
0266 const size_t len = sizeof(flow_ctrl);
0267 int rv;
0268 unsigned long flags;
0269
0270 spin_lock_irqsave(&cl->fc_spinlock, flags);
0271
0272 ishtp_hbm_hdr(&hdr, len);
0273 ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, &flow_ctrl, len);
0274
0275
0276
0277
0278
0279 if (cl->out_flow_ctrl_creds) {
0280 spin_unlock_irqrestore(&cl->fc_spinlock, flags);
0281 return 0;
0282 }
0283
0284 cl->recv_msg_num_frags = 0;
0285
0286 rv = ishtp_write_message(dev, &hdr, &flow_ctrl);
0287 if (!rv) {
0288 ++cl->out_flow_ctrl_creds;
0289 ++cl->out_flow_ctrl_cnt;
0290 cl->ts_out_fc = ktime_get();
0291 if (cl->ts_rx) {
0292 ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx);
0293 if (ktime_after(ts_diff, cl->ts_max_fc_delay))
0294 cl->ts_max_fc_delay = ts_diff;
0295 }
0296 } else {
0297 ++cl->err_send_fc;
0298 }
0299
0300 spin_unlock_irqrestore(&cl->fc_spinlock, flags);
0301 return rv;
0302 }
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
0314 {
0315 struct ishtp_msg_hdr hdr;
0316 struct hbm_client_connect_request disconn_req;
0317 const size_t len = sizeof(disconn_req);
0318
0319 ishtp_hbm_hdr(&hdr, len);
0320 ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, &disconn_req, len);
0321
0322 return ishtp_write_message(dev, &hdr, &disconn_req);
0323 }
0324
0325
0326
0327
0328
0329
0330
0331
0332 static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
0333 struct hbm_client_connect_response *rs)
0334 {
0335 struct ishtp_cl *cl = NULL;
0336 unsigned long flags;
0337
0338 spin_lock_irqsave(&dev->cl_list_lock, flags);
0339 list_for_each_entry(cl, &dev->cl_list, link) {
0340 if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
0341 cl->state = ISHTP_CL_DISCONNECTED;
0342 wake_up_interruptible(&cl->wait_ctrl_res);
0343 break;
0344 }
0345 }
0346 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0347 }
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
0359 {
0360 struct ishtp_msg_hdr hdr;
0361 struct hbm_client_connect_request conn_req;
0362 const size_t len = sizeof(conn_req);
0363
0364 ishtp_hbm_hdr(&hdr, len);
0365 ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, &conn_req, len);
0366
0367 return ishtp_write_message(dev, &hdr, &conn_req);
0368 }
0369
0370
0371
0372
0373
0374
0375
0376
0377 static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
0378 struct hbm_client_connect_response *rs)
0379 {
0380 struct ishtp_cl *cl = NULL;
0381 unsigned long flags;
0382
0383 spin_lock_irqsave(&dev->cl_list_lock, flags);
0384 list_for_each_entry(cl, &dev->cl_list, link) {
0385 if (ishtp_hbm_cl_addr_equal(cl, rs)) {
0386 if (!rs->status) {
0387 cl->state = ISHTP_CL_CONNECTED;
0388 cl->status = 0;
0389 } else {
0390 cl->state = ISHTP_CL_DISCONNECTED;
0391 cl->status = -ENODEV;
0392 }
0393 wake_up_interruptible(&cl->wait_ctrl_res);
0394 break;
0395 }
0396 }
0397 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407 static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
0408 struct hbm_client_connect_request *disconnect_req)
0409 {
0410 struct ishtp_cl *cl;
0411 const size_t len = sizeof(struct hbm_client_connect_response);
0412 unsigned long flags;
0413 struct ishtp_msg_hdr hdr;
0414 unsigned char data[4];
0415
0416 spin_lock_irqsave(&dev->cl_list_lock, flags);
0417 list_for_each_entry(cl, &dev->cl_list, link) {
0418 if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
0419 cl->state = ISHTP_CL_DISCONNECTED;
0420
0421
0422 ishtp_hbm_hdr(&hdr, len);
0423 ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
0424 len);
0425 ishtp_write_message(dev, &hdr, data);
0426 break;
0427 }
0428 }
0429 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439 static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
0440 struct dma_xfer_hbm *dma_xfer)
0441 {
0442 void *msg;
0443 uint64_t offs;
0444 struct ishtp_msg_hdr *ishtp_hdr =
0445 (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
0446 unsigned int msg_offs;
0447 struct ishtp_cl *cl;
0448
0449 for (msg_offs = 0; msg_offs < ishtp_hdr->length;
0450 msg_offs += sizeof(struct dma_xfer_hbm)) {
0451 offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
0452 if (offs > dev->ishtp_host_dma_tx_buf_size) {
0453 dev_err(dev->devc, "Bad DMA Tx ack message address\n");
0454 return;
0455 }
0456 if (dma_xfer->msg_length >
0457 dev->ishtp_host_dma_tx_buf_size - offs) {
0458 dev_err(dev->devc, "Bad DMA Tx ack message size\n");
0459 return;
0460 }
0461
0462
0463 msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
0464 ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
0465
0466 list_for_each_entry(cl, &dev->cl_list, link) {
0467 if (cl->fw_client_id == dma_xfer->fw_client_id &&
0468 cl->host_client_id == dma_xfer->host_client_id)
0469
0470
0471
0472
0473
0474
0475 if (cl->last_dma_addr >=
0476 (unsigned char *)msg &&
0477 cl->last_dma_addr <
0478 (unsigned char *)msg +
0479 dma_xfer->msg_length) {
0480 cl->last_dma_acked = 1;
0481
0482 if (!list_empty(&cl->tx_list.list) &&
0483 cl->ishtp_flow_ctrl_creds) {
0484
0485
0486
0487 ishtp_cl_send_msg(dev, cl);
0488 }
0489 }
0490 }
0491 ++dma_xfer;
0492 }
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502 static void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
0503 struct dma_xfer_hbm *dma_xfer)
0504 {
0505 void *msg;
0506 uint64_t offs;
0507 struct ishtp_msg_hdr hdr;
0508 struct ishtp_msg_hdr *ishtp_hdr =
0509 (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
0510 struct dma_xfer_hbm *prm = dma_xfer;
0511 unsigned int msg_offs;
0512
0513 for (msg_offs = 0; msg_offs < ishtp_hdr->length;
0514 msg_offs += sizeof(struct dma_xfer_hbm)) {
0515
0516 offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
0517 if (offs > dev->ishtp_host_dma_rx_buf_size) {
0518 dev_err(dev->devc, "Bad DMA Rx message address\n");
0519 return;
0520 }
0521 if (dma_xfer->msg_length >
0522 dev->ishtp_host_dma_rx_buf_size - offs) {
0523 dev_err(dev->devc, "Bad DMA Rx message size\n");
0524 return;
0525 }
0526 msg = dev->ishtp_host_dma_rx_buf + offs;
0527 recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
0528 dma_xfer->hbm = DMA_XFER_ACK;
0529 ++dma_xfer;
0530 }
0531
0532
0533 ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
0534 ishtp_write_message(dev, &hdr, (unsigned char *)prm);
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545 void ishtp_hbm_dispatch(struct ishtp_device *dev,
0546 struct ishtp_bus_message *hdr)
0547 {
0548 struct ishtp_bus_message *ishtp_msg;
0549 struct ishtp_fw_client *fw_client;
0550 struct hbm_host_version_response *version_res;
0551 struct hbm_client_connect_response *connect_res;
0552 struct hbm_client_connect_response *disconnect_res;
0553 struct hbm_client_connect_request *disconnect_req;
0554 struct hbm_props_response *props_res;
0555 struct hbm_host_enum_response *enum_res;
0556 struct ishtp_msg_hdr ishtp_hdr;
0557 struct dma_alloc_notify dma_alloc_notify;
0558 struct dma_xfer_hbm *dma_xfer;
0559
0560 ishtp_msg = hdr;
0561
0562 switch (ishtp_msg->hbm_cmd) {
0563 case HOST_START_RES_CMD:
0564 version_res = (struct hbm_host_version_response *)ishtp_msg;
0565 if (!version_res->host_version_supported) {
0566 dev->version = version_res->fw_max_version;
0567
0568 dev->hbm_state = ISHTP_HBM_STOPPED;
0569 ishtp_hbm_stop_req(dev);
0570 return;
0571 }
0572
0573 dev->version.major_version = HBM_MAJOR_VERSION;
0574 dev->version.minor_version = HBM_MINOR_VERSION;
0575 if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
0576 dev->hbm_state == ISHTP_HBM_START) {
0577 dev->hbm_state = ISHTP_HBM_STARTED;
0578 ishtp_hbm_enum_clients_req(dev);
0579 } else {
0580 dev_err(dev->devc,
0581 "reset: wrong host start response\n");
0582
0583 ish_hw_reset(dev);
0584 return;
0585 }
0586
0587 wake_up_interruptible(&dev->wait_hbm_recvd_msg);
0588 break;
0589
0590 case CLIENT_CONNECT_RES_CMD:
0591 connect_res = (struct hbm_client_connect_response *)ishtp_msg;
0592 ishtp_hbm_cl_connect_res(dev, connect_res);
0593 break;
0594
0595 case CLIENT_DISCONNECT_RES_CMD:
0596 disconnect_res =
0597 (struct hbm_client_connect_response *)ishtp_msg;
0598 ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
0599 break;
0600
0601 case HOST_CLIENT_PROPERTIES_RES_CMD:
0602 props_res = (struct hbm_props_response *)ishtp_msg;
0603 fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
0604
0605 if (props_res->status || !dev->fw_clients) {
0606 dev_err(dev->devc,
0607 "reset: properties response hbm wrong status\n");
0608 ish_hw_reset(dev);
0609 return;
0610 }
0611
0612 if (fw_client->client_id != props_res->address) {
0613 dev_err(dev->devc,
0614 "reset: host properties response address mismatch [%02X %02X]\n",
0615 fw_client->client_id, props_res->address);
0616 ish_hw_reset(dev);
0617 return;
0618 }
0619
0620 if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
0621 dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
0622 dev_err(dev->devc,
0623 "reset: unexpected properties response\n");
0624 ish_hw_reset(dev);
0625 return;
0626 }
0627
0628 fw_client->props = props_res->client_properties;
0629 dev->fw_client_index++;
0630 dev->fw_client_presentation_num++;
0631
0632
0633 ishtp_hbm_prop_req(dev);
0634
0635 if (dev->dev_state != ISHTP_DEV_ENABLED)
0636 break;
0637
0638 if (!ishtp_use_dma_transfer())
0639 break;
0640
0641 dev_dbg(dev->devc, "Requesting to use DMA\n");
0642 ishtp_cl_alloc_dma_buf(dev);
0643 if (dev->ishtp_host_dma_rx_buf) {
0644 const size_t len = sizeof(dma_alloc_notify);
0645
0646 memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
0647 dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
0648 dma_alloc_notify.buf_size =
0649 dev->ishtp_host_dma_rx_buf_size;
0650 dma_alloc_notify.buf_address =
0651 dev->ishtp_host_dma_rx_buf_phys;
0652 ishtp_hbm_hdr(&ishtp_hdr, len);
0653 ishtp_write_message(dev, &ishtp_hdr,
0654 (unsigned char *)&dma_alloc_notify);
0655 }
0656
0657 break;
0658
0659 case HOST_ENUM_RES_CMD:
0660 enum_res = (struct hbm_host_enum_response *) ishtp_msg;
0661 memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
0662 if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
0663 dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
0664 dev->fw_client_presentation_num = 0;
0665 dev->fw_client_index = 0;
0666
0667 ishtp_hbm_fw_cl_allocate(dev);
0668 dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
0669
0670
0671 ishtp_hbm_prop_req(dev);
0672 } else {
0673 dev_err(dev->devc,
0674 "reset: unexpected enumeration response hbm\n");
0675 ish_hw_reset(dev);
0676 return;
0677 }
0678 break;
0679
0680 case HOST_STOP_RES_CMD:
0681 if (dev->hbm_state != ISHTP_HBM_STOPPED)
0682 dev_err(dev->devc, "unexpected stop response\n");
0683
0684 dev->dev_state = ISHTP_DEV_DISABLED;
0685 dev_info(dev->devc, "reset: FW stop response\n");
0686 ish_hw_reset(dev);
0687 break;
0688
0689 case CLIENT_DISCONNECT_REQ_CMD:
0690
0691 disconnect_req =
0692 (struct hbm_client_connect_request *)ishtp_msg;
0693 ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
0694 break;
0695
0696 case FW_STOP_REQ_CMD:
0697 dev->hbm_state = ISHTP_HBM_STOPPED;
0698 break;
0699
0700 case DMA_BUFFER_ALLOC_RESPONSE:
0701 dev->ishtp_host_dma_enabled = 1;
0702 break;
0703
0704 case DMA_XFER:
0705 dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
0706 if (!dev->ishtp_host_dma_enabled) {
0707 dev_err(dev->devc,
0708 "DMA XFER requested but DMA is not enabled\n");
0709 break;
0710 }
0711 ishtp_hbm_dma_xfer(dev, dma_xfer);
0712 break;
0713
0714 case DMA_XFER_ACK:
0715 dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
0716 if (!dev->ishtp_host_dma_enabled ||
0717 !dev->ishtp_host_dma_tx_buf) {
0718 dev_err(dev->devc,
0719 "DMA XFER acked but DMA Tx is not enabled\n");
0720 break;
0721 }
0722 ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
0723 break;
0724
0725 default:
0726 dev_err(dev->devc, "unknown HBM: %u\n",
0727 (unsigned int)ishtp_msg->hbm_cmd);
0728
0729 break;
0730 }
0731 }
0732
0733
0734
0735
0736
0737
0738
0739
0740 void bh_hbm_work_fn(struct work_struct *work)
0741 {
0742 unsigned long flags;
0743 struct ishtp_device *dev;
0744 unsigned char hbm[IPC_PAYLOAD_SIZE];
0745
0746 dev = container_of(work, struct ishtp_device, bh_hbm_work);
0747 spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
0748 if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
0749 memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
0750 IPC_PAYLOAD_SIZE);
0751 dev->rd_msg_fifo_head =
0752 (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
0753 (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
0754 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
0755 ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
0756 } else {
0757 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
0758 }
0759 }
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769 void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
0770 {
0771 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
0772 struct ishtp_bus_message *ishtp_msg =
0773 (struct ishtp_bus_message *)rd_msg_buf;
0774 unsigned long flags;
0775
0776 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
0777
0778
0779 if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
0780 struct hbm_flow_control *flow_control =
0781 (struct hbm_flow_control *)ishtp_msg;
0782 struct ishtp_cl *cl = NULL;
0783 unsigned long flags, tx_flags;
0784
0785 spin_lock_irqsave(&dev->cl_list_lock, flags);
0786 list_for_each_entry(cl, &dev->cl_list, link) {
0787 if (cl->host_client_id == flow_control->host_addr &&
0788 cl->fw_client_id ==
0789 flow_control->fw_addr) {
0790
0791
0792
0793
0794
0795
0796 if (cl->ishtp_flow_ctrl_creds)
0797 dev_err(dev->devc,
0798 "recv extra FC from FW client %u (host client %u) (FC count was %d)\n",
0799 (unsigned int)cl->fw_client_id,
0800 (unsigned int)cl->host_client_id,
0801 cl->ishtp_flow_ctrl_creds);
0802 else {
0803 ++cl->ishtp_flow_ctrl_creds;
0804 ++cl->ishtp_flow_ctrl_cnt;
0805 cl->last_ipc_acked = 1;
0806 spin_lock_irqsave(
0807 &cl->tx_list_spinlock,
0808 tx_flags);
0809 if (!list_empty(&cl->tx_list.list)) {
0810
0811
0812
0813
0814 spin_unlock_irqrestore(
0815 &cl->tx_list_spinlock,
0816 tx_flags);
0817 ishtp_cl_send_msg(dev, cl);
0818 } else {
0819 spin_unlock_irqrestore(
0820 &cl->tx_list_spinlock,
0821 tx_flags);
0822 }
0823 }
0824 break;
0825 }
0826 }
0827 spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0828 goto eoi;
0829 }
0830
0831
0832
0833
0834
0835 if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
0836 ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
0837 ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
0838 ishtp_msg->hbm_cmd == DMA_XFER) {
0839 ishtp_hbm_dispatch(dev, ishtp_msg);
0840 goto eoi;
0841 }
0842
0843
0844
0845
0846
0847
0848 spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
0849 if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
0850 (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
0851 dev->rd_msg_fifo_head) {
0852 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
0853 dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
0854 (unsigned int)ishtp_msg->hbm_cmd);
0855 goto eoi;
0856 }
0857 memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
0858 ishtp_hdr->length);
0859 dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
0860 (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
0861 spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
0862 schedule_work(&dev->bh_hbm_work);
0863 eoi:
0864 return;
0865 }
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875 void recv_fixed_cl_msg(struct ishtp_device *dev,
0876 struct ishtp_msg_hdr *ishtp_hdr)
0877 {
0878 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
0879
0880 dev->print_log(dev,
0881 "%s() got fixed client msg from client #%d\n",
0882 __func__, ishtp_hdr->fw_addr);
0883 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
0884 if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
0885 struct ish_system_states_header *msg_hdr =
0886 (struct ish_system_states_header *)rd_msg_buf;
0887 if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
0888 ishtp_send_resume(dev);
0889
0890 else
0891 dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
0892 msg_hdr->cmd);
0893 }
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904 static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
0905 uint8_t cl_addr)
0906 {
0907 hdr->host_addr = 0;
0908 hdr->fw_addr = cl_addr;
0909 hdr->length = length;
0910 hdr->msg_complete = 1;
0911 hdr->reserved = 0;
0912 }
0913
0914
0915
0916 static uint32_t current_state;
0917 static uint32_t supported_states = SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT;
0918
0919
0920
0921
0922
0923
0924
0925 void ishtp_send_suspend(struct ishtp_device *dev)
0926 {
0927 struct ishtp_msg_hdr ishtp_hdr;
0928 struct ish_system_states_status state_status_msg;
0929 const size_t len = sizeof(struct ish_system_states_status);
0930
0931 fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
0932
0933 memset(&state_status_msg, 0, len);
0934 state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
0935 state_status_msg.supported_states = supported_states;
0936 current_state |= (SUSPEND_STATE_BIT | CONNECTED_STANDBY_STATE_BIT);
0937 dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
0938 state_status_msg.states_status = current_state;
0939
0940 ishtp_write_message(dev, &ishtp_hdr,
0941 (unsigned char *)&state_status_msg);
0942 }
0943 EXPORT_SYMBOL(ishtp_send_suspend);
0944
0945
0946
0947
0948
0949
0950
0951 void ishtp_send_resume(struct ishtp_device *dev)
0952 {
0953 struct ishtp_msg_hdr ishtp_hdr;
0954 struct ish_system_states_status state_status_msg;
0955 const size_t len = sizeof(struct ish_system_states_status);
0956
0957 fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
0958
0959 memset(&state_status_msg, 0, len);
0960 state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
0961 state_status_msg.supported_states = supported_states;
0962 current_state &= ~(CONNECTED_STANDBY_STATE_BIT | SUSPEND_STATE_BIT);
0963 dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
0964 state_status_msg.states_status = current_state;
0965
0966 ishtp_write_message(dev, &ishtp_hdr,
0967 (unsigned char *)&state_status_msg);
0968 }
0969 EXPORT_SYMBOL(ishtp_send_resume);
0970
0971
0972
0973
0974
0975
0976
0977 void ishtp_query_subscribers(struct ishtp_device *dev)
0978 {
0979 struct ishtp_msg_hdr ishtp_hdr;
0980 struct ish_system_states_query_subscribers query_subscribers_msg;
0981 const size_t len = sizeof(struct ish_system_states_query_subscribers);
0982
0983 fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
0984
0985 memset(&query_subscribers_msg, 0, len);
0986 query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
0987
0988 ishtp_write_message(dev, &ishtp_hdr,
0989 (unsigned char *)&query_subscribers_msg);
0990 }