Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * ISHTP client logic
0004  *
0005  * Copyright (c) 2003-2016, Intel Corporation.
0006  */
0007 
0008 #include <linux/slab.h>
0009 #include <linux/sched.h>
0010 #include <linux/wait.h>
0011 #include <linux/delay.h>
0012 #include <linux/dma-mapping.h>
0013 #include <asm/cacheflush.h>
0014 #include "hbm.h"
0015 #include "client.h"
0016 
0017 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
0018 {
0019     unsigned long tx_free_flags;
0020     int size;
0021 
0022     spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0023     size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
0024     spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
0025 
0026     return size;
0027 }
0028 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
0029 
0030 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
0031 {
0032     return cl->tx_ring_free_size;
0033 }
0034 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
0035 
0036 /**
0037  * ishtp_read_list_flush() - Flush read queue
0038  * @cl: ishtp client instance
0039  *
0040  * Used to remove all entries from read queue for a client
0041  */
0042 static void ishtp_read_list_flush(struct ishtp_cl *cl)
0043 {
0044     struct ishtp_cl_rb *rb;
0045     struct ishtp_cl_rb *next;
0046     unsigned long   flags;
0047 
0048     spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
0049     list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
0050         if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
0051             list_del(&rb->list);
0052             ishtp_io_rb_free(rb);
0053         }
0054     spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
0055 }
0056 
0057 /**
0058  * ishtp_cl_flush_queues() - Flush all queues for a client
0059  * @cl: ishtp client instance
0060  *
0061  * Used to remove all queues for a client. This is called when a client device
0062  * needs reset due to error, S3 resume or during module removal
0063  *
0064  * Return: 0 on success else -EINVAL if device is NULL
0065  */
0066 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
0067 {
0068     if (WARN_ON(!cl || !cl->dev))
0069         return -EINVAL;
0070 
0071     ishtp_read_list_flush(cl);
0072 
0073     return 0;
0074 }
0075 EXPORT_SYMBOL(ishtp_cl_flush_queues);
0076 
0077 /**
0078  * ishtp_cl_init() - Initialize all fields of a client device
0079  * @cl: ishtp client instance
0080  * @dev: ishtp device
0081  *
0082  * Initializes a client device fields: Init spinlocks, init queues etc.
0083  * This function is called during new client creation
0084  */
0085 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
0086 {
0087     memset(cl, 0, sizeof(struct ishtp_cl));
0088     init_waitqueue_head(&cl->wait_ctrl_res);
0089     spin_lock_init(&cl->free_list_spinlock);
0090     spin_lock_init(&cl->in_process_spinlock);
0091     spin_lock_init(&cl->tx_list_spinlock);
0092     spin_lock_init(&cl->tx_free_list_spinlock);
0093     spin_lock_init(&cl->fc_spinlock);
0094     INIT_LIST_HEAD(&cl->link);
0095     cl->dev = dev;
0096 
0097     INIT_LIST_HEAD(&cl->free_rb_list.list);
0098     INIT_LIST_HEAD(&cl->tx_list.list);
0099     INIT_LIST_HEAD(&cl->tx_free_list.list);
0100     INIT_LIST_HEAD(&cl->in_process_list.list);
0101 
0102     cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
0103     cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
0104     cl->tx_ring_free_size = cl->tx_ring_size;
0105 
0106     /* dma */
0107     cl->last_tx_path = CL_TX_PATH_IPC;
0108     cl->last_dma_acked = 1;
0109     cl->last_dma_addr = NULL;
0110     cl->last_ipc_acked = 1;
0111 }
0112 
0113 /**
0114  * ishtp_cl_allocate() - allocates client structure and sets it up.
0115  * @cl_device: ishtp client device
0116  *
0117  * Allocate memory for new client device and call to initialize each field.
0118  *
0119  * Return: The allocated client instance or NULL on failure
0120  */
0121 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
0122 {
0123     struct ishtp_cl *cl;
0124 
0125     cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
0126     if (!cl)
0127         return NULL;
0128 
0129     ishtp_cl_init(cl, cl_device->ishtp_dev);
0130     return cl;
0131 }
0132 EXPORT_SYMBOL(ishtp_cl_allocate);
0133 
0134 /**
0135  * ishtp_cl_free() - Frees a client device
0136  * @cl: client device instance
0137  *
0138  * Frees a client device
0139  */
0140 void    ishtp_cl_free(struct ishtp_cl *cl)
0141 {
0142     struct ishtp_device *dev;
0143     unsigned long flags;
0144 
0145     if (!cl)
0146         return;
0147 
0148     dev = cl->dev;
0149     if (!dev)
0150         return;
0151 
0152     spin_lock_irqsave(&dev->cl_list_lock, flags);
0153     ishtp_cl_free_rx_ring(cl);
0154     ishtp_cl_free_tx_ring(cl);
0155     kfree(cl);
0156     spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0157 }
0158 EXPORT_SYMBOL(ishtp_cl_free);
0159 
0160 /**
0161  * ishtp_cl_link() - Reserve a host id and link the client instance
0162  * @cl: client device instance
0163  *
0164  * This allocates a single bit in the hostmap. This function will make sure
0165  * that not many client sessions are opened at the same time. Once allocated
0166  * the client device instance is added to the ishtp device in the current
0167  * client list
0168  *
0169  * Return: 0 or error code on failure
0170  */
0171 int ishtp_cl_link(struct ishtp_cl *cl)
0172 {
0173     struct ishtp_device *dev;
0174     unsigned long flags, flags_cl;
0175     int id, ret = 0;
0176 
0177     if (WARN_ON(!cl || !cl->dev))
0178         return -EINVAL;
0179 
0180     dev = cl->dev;
0181 
0182     spin_lock_irqsave(&dev->device_lock, flags);
0183 
0184     if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
0185         ret = -EMFILE;
0186         goto unlock_dev;
0187     }
0188 
0189     id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
0190 
0191     if (id >= ISHTP_CLIENTS_MAX) {
0192         spin_unlock_irqrestore(&dev->device_lock, flags);
0193         dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
0194         return -ENOENT;
0195     }
0196 
0197     dev->open_handle_count++;
0198     cl->host_client_id = id;
0199     spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
0200     if (dev->dev_state != ISHTP_DEV_ENABLED) {
0201         ret = -ENODEV;
0202         goto unlock_cl;
0203     }
0204     list_add_tail(&cl->link, &dev->cl_list);
0205     set_bit(id, dev->host_clients_map);
0206     cl->state = ISHTP_CL_INITIALIZING;
0207 
0208 unlock_cl:
0209     spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
0210 unlock_dev:
0211     spin_unlock_irqrestore(&dev->device_lock, flags);
0212     return ret;
0213 }
0214 EXPORT_SYMBOL(ishtp_cl_link);
0215 
0216 /**
0217  * ishtp_cl_unlink() - remove fw_cl from the client device list
0218  * @cl: client device instance
0219  *
0220  * Remove a previously linked device to a ishtp device
0221  */
0222 void ishtp_cl_unlink(struct ishtp_cl *cl)
0223 {
0224     struct ishtp_device *dev;
0225     struct ishtp_cl *pos;
0226     unsigned long   flags;
0227 
0228     /* don't shout on error exit path */
0229     if (!cl || !cl->dev)
0230         return;
0231 
0232     dev = cl->dev;
0233 
0234     spin_lock_irqsave(&dev->device_lock, flags);
0235     if (dev->open_handle_count > 0) {
0236         clear_bit(cl->host_client_id, dev->host_clients_map);
0237         dev->open_handle_count--;
0238     }
0239     spin_unlock_irqrestore(&dev->device_lock, flags);
0240 
0241     /*
0242      * This checks that 'cl' is actually linked into device's structure,
0243      * before attempting 'list_del'
0244      */
0245     spin_lock_irqsave(&dev->cl_list_lock, flags);
0246     list_for_each_entry(pos, &dev->cl_list, link)
0247         if (cl->host_client_id == pos->host_client_id) {
0248             list_del_init(&pos->link);
0249             break;
0250         }
0251     spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0252 }
0253 EXPORT_SYMBOL(ishtp_cl_unlink);
0254 
0255 /**
0256  * ishtp_cl_disconnect() - Send disconnect request to firmware
0257  * @cl: client device instance
0258  *
0259  * Send a disconnect request for a client to firmware.
0260  *
0261  * Return: 0 if successful disconnect response from the firmware or error
0262  * code on failure
0263  */
0264 int ishtp_cl_disconnect(struct ishtp_cl *cl)
0265 {
0266     struct ishtp_device *dev;
0267 
0268     if (WARN_ON(!cl || !cl->dev))
0269         return -ENODEV;
0270 
0271     dev = cl->dev;
0272 
0273     dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
0274 
0275     if (cl->state != ISHTP_CL_DISCONNECTING) {
0276         dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
0277         return 0;
0278     }
0279 
0280     if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
0281         dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
0282         dev_err(&cl->device->dev, "failed to disconnect.\n");
0283         return -ENODEV;
0284     }
0285 
0286     wait_event_interruptible_timeout(cl->wait_ctrl_res,
0287             (dev->dev_state != ISHTP_DEV_ENABLED ||
0288             cl->state == ISHTP_CL_DISCONNECTED),
0289             ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
0290 
0291     /*
0292      * If FW reset arrived, this will happen. Don't check cl->,
0293      * as 'cl' may be freed already
0294      */
0295     if (dev->dev_state != ISHTP_DEV_ENABLED) {
0296         dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
0297                    __func__);
0298         return -ENODEV;
0299     }
0300 
0301     if (cl->state == ISHTP_CL_DISCONNECTED) {
0302         dev->print_log(dev, "%s() successful\n", __func__);
0303         return 0;
0304     }
0305 
0306     return -ENODEV;
0307 }
0308 EXPORT_SYMBOL(ishtp_cl_disconnect);
0309 
0310 /**
0311  * ishtp_cl_is_other_connecting() - Check other client is connecting
0312  * @cl: client device instance
0313  *
0314  * Checks if other client with the same fw client id is connecting
0315  *
0316  * Return: true if other client is connected else false
0317  */
0318 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
0319 {
0320     struct ishtp_device *dev;
0321     struct ishtp_cl *pos;
0322     unsigned long   flags;
0323 
0324     if (WARN_ON(!cl || !cl->dev))
0325         return false;
0326 
0327     dev = cl->dev;
0328     spin_lock_irqsave(&dev->cl_list_lock, flags);
0329     list_for_each_entry(pos, &dev->cl_list, link) {
0330         if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
0331                 cl->fw_client_id == pos->fw_client_id) {
0332             spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0333             return true;
0334         }
0335     }
0336     spin_unlock_irqrestore(&dev->cl_list_lock, flags);
0337 
0338     return false;
0339 }
0340 
0341 /**
0342  * ishtp_cl_connect() - Send connect request to firmware
0343  * @cl: client device instance
0344  *
0345  * Send a connect request for a client to firmware. If successful it will
0346  * RX and TX ring buffers
0347  *
0348  * Return: 0 if successful connect response from the firmware and able
0349  * to bind and allocate ring buffers or error code on failure
0350  */
0351 int ishtp_cl_connect(struct ishtp_cl *cl)
0352 {
0353     struct ishtp_device *dev;
0354     int rets;
0355 
0356     if (WARN_ON(!cl || !cl->dev))
0357         return -ENODEV;
0358 
0359     dev = cl->dev;
0360 
0361     dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
0362 
0363     if (ishtp_cl_is_other_connecting(cl)) {
0364         dev->print_log(dev, "%s() Busy\n", __func__);
0365         return  -EBUSY;
0366     }
0367 
0368     if (ishtp_hbm_cl_connect_req(dev, cl)) {
0369         dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
0370         return -ENODEV;
0371     }
0372 
0373     rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
0374                 (dev->dev_state == ISHTP_DEV_ENABLED &&
0375                 (cl->state == ISHTP_CL_CONNECTED ||
0376                  cl->state == ISHTP_CL_DISCONNECTED)),
0377                 ishtp_secs_to_jiffies(
0378                     ISHTP_CL_CONNECT_TIMEOUT));
0379     /*
0380      * If FW reset arrived, this will happen. Don't check cl->,
0381      * as 'cl' may be freed already
0382      */
0383     if (dev->dev_state != ISHTP_DEV_ENABLED) {
0384         dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
0385                    __func__);
0386         return -EFAULT;
0387     }
0388 
0389     if (cl->state != ISHTP_CL_CONNECTED) {
0390         dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
0391                    __func__);
0392         return -EFAULT;
0393     }
0394 
0395     rets = cl->status;
0396     if (rets) {
0397         dev->print_log(dev, "%s() Invalid status\n", __func__);
0398         return rets;
0399     }
0400 
0401     rets = ishtp_cl_device_bind(cl);
0402     if (rets) {
0403         dev->print_log(dev, "%s() Bind error\n", __func__);
0404         ishtp_cl_disconnect(cl);
0405         return rets;
0406     }
0407 
0408     rets = ishtp_cl_alloc_rx_ring(cl);
0409     if (rets) {
0410         dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
0411         /* if failed allocation, disconnect */
0412         ishtp_cl_disconnect(cl);
0413         return rets;
0414     }
0415 
0416     rets = ishtp_cl_alloc_tx_ring(cl);
0417     if (rets) {
0418         dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
0419         /* if failed allocation, disconnect */
0420         ishtp_cl_free_rx_ring(cl);
0421         ishtp_cl_disconnect(cl);
0422         return rets;
0423     }
0424 
0425     /* Upon successful connection and allocation, emit flow-control */
0426     rets = ishtp_cl_read_start(cl);
0427 
0428     dev->print_log(dev, "%s() successful\n", __func__);
0429 
0430     return rets;
0431 }
0432 EXPORT_SYMBOL(ishtp_cl_connect);
0433 
0434 /**
0435  * ishtp_cl_read_start() - Prepare to read client message
0436  * @cl: client device instance
0437  *
0438  * Get a free buffer from pool of free read buffers and add to read buffer
0439  * pool to add contents. Send a flow control request to firmware to be able
0440  * send next message.
0441  *
0442  * Return: 0 if successful or error code on failure
0443  */
0444 int ishtp_cl_read_start(struct ishtp_cl *cl)
0445 {
0446     struct ishtp_device *dev;
0447     struct ishtp_cl_rb *rb;
0448     int rets;
0449     int i;
0450     unsigned long   flags;
0451     unsigned long   dev_flags;
0452 
0453     if (WARN_ON(!cl || !cl->dev))
0454         return -ENODEV;
0455 
0456     dev = cl->dev;
0457 
0458     if (cl->state != ISHTP_CL_CONNECTED)
0459         return -ENODEV;
0460 
0461     if (dev->dev_state != ISHTP_DEV_ENABLED)
0462         return -ENODEV;
0463 
0464     i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
0465     if (i < 0) {
0466         dev_err(&cl->device->dev, "no such fw client %d\n",
0467             cl->fw_client_id);
0468         return -ENODEV;
0469     }
0470 
0471     /* The current rb is the head of the free rb list */
0472     spin_lock_irqsave(&cl->free_list_spinlock, flags);
0473     if (list_empty(&cl->free_rb_list.list)) {
0474         dev_warn(&cl->device->dev,
0475              "[ishtp-ish] Rx buffers pool is empty\n");
0476         rets = -ENOMEM;
0477         rb = NULL;
0478         spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0479         goto out;
0480     }
0481     rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
0482     list_del_init(&rb->list);
0483     spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0484 
0485     rb->cl = cl;
0486     rb->buf_idx = 0;
0487 
0488     INIT_LIST_HEAD(&rb->list);
0489     rets = 0;
0490 
0491     /*
0492      * This must be BEFORE sending flow control -
0493      * response in ISR may come too fast...
0494      */
0495     spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
0496     list_add_tail(&rb->list, &dev->read_list.list);
0497     spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
0498     if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
0499         rets = -ENODEV;
0500         goto out;
0501     }
0502 out:
0503     /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
0504     if (rets && rb) {
0505         spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
0506         list_del(&rb->list);
0507         spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
0508 
0509         spin_lock_irqsave(&cl->free_list_spinlock, flags);
0510         list_add_tail(&rb->list, &cl->free_rb_list.list);
0511         spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
0512     }
0513     return rets;
0514 }
0515 
0516 /**
0517  * ishtp_cl_send() - Send a message to firmware
0518  * @cl: client device instance
0519  * @buf: message buffer
0520  * @length: length of message
0521  *
0522  * If the client is correct state to send message, this function gets a buffer
0523  * from tx ring buffers, copy the message data and call to send the message
0524  * using ishtp_cl_send_msg()
0525  *
0526  * Return: 0 if successful or error code on failure
0527  */
0528 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
0529 {
0530     struct ishtp_device *dev;
0531     int id;
0532     struct ishtp_cl_tx_ring *cl_msg;
0533     int have_msg_to_send = 0;
0534     unsigned long   tx_flags, tx_free_flags;
0535 
0536     if (WARN_ON(!cl || !cl->dev))
0537         return -ENODEV;
0538 
0539     dev = cl->dev;
0540 
0541     if (cl->state != ISHTP_CL_CONNECTED) {
0542         ++cl->err_send_msg;
0543         return -EPIPE;
0544     }
0545 
0546     if (dev->dev_state != ISHTP_DEV_ENABLED) {
0547         ++cl->err_send_msg;
0548         return -ENODEV;
0549     }
0550 
0551     /* Check if we have fw client device */
0552     id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
0553     if (id < 0) {
0554         ++cl->err_send_msg;
0555         return -ENOENT;
0556     }
0557 
0558     if (length > dev->fw_clients[id].props.max_msg_length) {
0559         ++cl->err_send_msg;
0560         return -EMSGSIZE;
0561     }
0562 
0563     /* No free bufs */
0564     spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0565     if (list_empty(&cl->tx_free_list.list)) {
0566         spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
0567             tx_free_flags);
0568         ++cl->err_send_msg;
0569         return  -ENOMEM;
0570     }
0571 
0572     cl_msg = list_first_entry(&cl->tx_free_list.list,
0573         struct ishtp_cl_tx_ring, list);
0574     if (!cl_msg->send_buf.data) {
0575         spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
0576             tx_free_flags);
0577         return  -EIO;
0578         /* Should not happen, as free list is pre-allocated */
0579     }
0580     /*
0581      * This is safe, as 'length' is already checked for not exceeding
0582      * max ISHTP message size per client
0583      */
0584     list_del_init(&cl_msg->list);
0585     --cl->tx_ring_free_size;
0586 
0587     spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
0588     memcpy(cl_msg->send_buf.data, buf, length);
0589     cl_msg->send_buf.size = length;
0590     spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0591     have_msg_to_send = !list_empty(&cl->tx_list.list);
0592     list_add_tail(&cl_msg->list, &cl->tx_list.list);
0593     spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0594 
0595     if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
0596         ishtp_cl_send_msg(dev, cl);
0597 
0598     return  0;
0599 }
0600 EXPORT_SYMBOL(ishtp_cl_send);
0601 
0602 /**
0603  * ishtp_cl_read_complete() - read complete
0604  * @rb: Pointer to client request block
0605  *
0606  * If the message is completely received call ishtp_cl_bus_rx_event()
0607  * to process message
0608  */
0609 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
0610 {
0611     unsigned long   flags;
0612     int schedule_work_flag = 0;
0613     struct ishtp_cl *cl = rb->cl;
0614 
0615     spin_lock_irqsave(&cl->in_process_spinlock, flags);
0616     /*
0617      * if in-process list is empty, then need to schedule
0618      * the processing thread
0619      */
0620     schedule_work_flag = list_empty(&cl->in_process_list.list);
0621     list_add_tail(&rb->list, &cl->in_process_list.list);
0622     spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
0623 
0624     if (schedule_work_flag)
0625         ishtp_cl_bus_rx_event(cl->device);
0626 }
0627 
0628 /**
0629  * ipc_tx_send() - IPC tx send function
0630  * @prm: Pointer to client device instance
0631  *
0632  * Send message over IPC. Message will be split into fragments
0633  * if message size is bigger than IPC FIFO size, and all
0634  * fragments will be sent one by one.
0635  */
0636 static void ipc_tx_send(void *prm)
0637 {
0638     struct ishtp_cl *cl = prm;
0639     struct ishtp_cl_tx_ring *cl_msg;
0640     size_t  rem;
0641     struct ishtp_device *dev = (cl ? cl->dev : NULL);
0642     struct ishtp_msg_hdr    ishtp_hdr;
0643     unsigned long   tx_flags, tx_free_flags;
0644     unsigned char   *pmsg;
0645 
0646     if (!dev)
0647         return;
0648 
0649     /*
0650      * Other conditions if some critical error has
0651      * occurred before this callback is called
0652      */
0653     if (dev->dev_state != ISHTP_DEV_ENABLED)
0654         return;
0655 
0656     if (cl->state != ISHTP_CL_CONNECTED)
0657         return;
0658 
0659     spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0660     if (list_empty(&cl->tx_list.list)) {
0661         spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0662         return;
0663     }
0664 
0665     if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
0666         spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0667         return;
0668     }
0669 
0670     if (!cl->sending) {
0671         --cl->ishtp_flow_ctrl_creds;
0672         cl->last_ipc_acked = 0;
0673         cl->last_tx_path = CL_TX_PATH_IPC;
0674         cl->sending = 1;
0675     }
0676 
0677     cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
0678                 list);
0679     rem = cl_msg->send_buf.size - cl->tx_offs;
0680 
0681     while (rem > 0) {
0682         ishtp_hdr.host_addr = cl->host_client_id;
0683         ishtp_hdr.fw_addr = cl->fw_client_id;
0684         ishtp_hdr.reserved = 0;
0685         pmsg = cl_msg->send_buf.data + cl->tx_offs;
0686 
0687         if (rem <= dev->mtu) {
0688             /* Last fragment or only one packet */
0689             ishtp_hdr.length = rem;
0690             ishtp_hdr.msg_complete = 1;
0691             /* Submit to IPC queue with no callback */
0692             ishtp_write_message(dev, &ishtp_hdr, pmsg);
0693             cl->tx_offs = 0;
0694             cl->sending = 0;
0695 
0696             break;
0697         } else {
0698             /* Send ipc fragment */
0699             ishtp_hdr.length = dev->mtu;
0700             ishtp_hdr.msg_complete = 0;
0701             /* All fregments submitted to IPC queue with no callback */
0702             ishtp_write_message(dev, &ishtp_hdr, pmsg);
0703             cl->tx_offs += dev->mtu;
0704             rem = cl_msg->send_buf.size - cl->tx_offs;
0705         }
0706     }
0707 
0708     list_del_init(&cl_msg->list);
0709     spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0710 
0711     spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0712     list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
0713     ++cl->tx_ring_free_size;
0714     spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
0715         tx_free_flags);
0716 }
0717 
0718 /**
0719  * ishtp_cl_send_msg_ipc() -Send message using IPC
0720  * @dev: ISHTP device instance
0721  * @cl: Pointer to client device instance
0722  *
0723  * Send message over IPC not using DMA
0724  */
0725 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
0726                   struct ishtp_cl *cl)
0727 {
0728     /* If last DMA message wasn't acked yet, leave this one in Tx queue */
0729     if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
0730         return;
0731 
0732     cl->tx_offs = 0;
0733     ipc_tx_send(cl);
0734     ++cl->send_msg_cnt_ipc;
0735 }
0736 
0737 /**
0738  * ishtp_cl_send_msg_dma() -Send message using DMA
0739  * @dev: ISHTP device instance
0740  * @cl: Pointer to client device instance
0741  *
0742  * Send message using DMA
0743  */
0744 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
0745     struct ishtp_cl *cl)
0746 {
0747     struct ishtp_msg_hdr    hdr;
0748     struct dma_xfer_hbm dma_xfer;
0749     unsigned char   *msg_addr;
0750     int off;
0751     struct ishtp_cl_tx_ring *cl_msg;
0752     unsigned long tx_flags, tx_free_flags;
0753 
0754     /* If last IPC message wasn't acked yet, leave this one in Tx queue */
0755     if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
0756         return;
0757 
0758     spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
0759     if (list_empty(&cl->tx_list.list)) {
0760         spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0761         return;
0762     }
0763 
0764     cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
0765         list);
0766 
0767     msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
0768     if (!msg_addr) {
0769         spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0770         if (dev->transfer_path == CL_TX_PATH_DEFAULT)
0771             ishtp_cl_send_msg_ipc(dev, cl);
0772         return;
0773     }
0774 
0775     list_del_init(&cl_msg->list);   /* Must be before write */
0776     spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
0777 
0778     --cl->ishtp_flow_ctrl_creds;
0779     cl->last_dma_acked = 0;
0780     cl->last_dma_addr = msg_addr;
0781     cl->last_tx_path = CL_TX_PATH_DMA;
0782 
0783     /* write msg to dma buf */
0784     memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
0785 
0786     /*
0787      * if current fw don't support cache snooping, driver have to
0788      * flush the cache manually.
0789      */
0790     if (dev->ops->dma_no_cache_snooping &&
0791         dev->ops->dma_no_cache_snooping(dev))
0792         clflush_cache_range(msg_addr, cl_msg->send_buf.size);
0793 
0794     /* send dma_xfer hbm msg */
0795     off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
0796     ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
0797     dma_xfer.hbm = DMA_XFER;
0798     dma_xfer.fw_client_id = cl->fw_client_id;
0799     dma_xfer.host_client_id = cl->host_client_id;
0800     dma_xfer.reserved = 0;
0801     dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
0802     dma_xfer.msg_length = cl_msg->send_buf.size;
0803     dma_xfer.reserved2 = 0;
0804     ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
0805     spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
0806     list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
0807     ++cl->tx_ring_free_size;
0808     spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
0809     ++cl->send_msg_cnt_dma;
0810 }
0811 
0812 /**
0813  * ishtp_cl_send_msg() -Send message using DMA or IPC
0814  * @dev: ISHTP device instance
0815  * @cl: Pointer to client device instance
0816  *
0817  * Send message using DMA or IPC based on transfer_path
0818  */
0819 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
0820 {
0821     if (dev->transfer_path == CL_TX_PATH_DMA)
0822         ishtp_cl_send_msg_dma(dev, cl);
0823     else
0824         ishtp_cl_send_msg_ipc(dev, cl);
0825 }
0826 
0827 /**
0828  * recv_ishtp_cl_msg() -Receive client message
0829  * @dev: ISHTP device instance
0830  * @ishtp_hdr: Pointer to message header
0831  *
0832  * Receive and dispatch ISHTP client messages. This function executes in ISR
0833  * or work queue context
0834  */
0835 void recv_ishtp_cl_msg(struct ishtp_device *dev,
0836                struct ishtp_msg_hdr *ishtp_hdr)
0837 {
0838     struct ishtp_cl *cl;
0839     struct ishtp_cl_rb *rb;
0840     struct ishtp_cl_rb *new_rb;
0841     unsigned char *buffer = NULL;
0842     struct ishtp_cl_rb *complete_rb = NULL;
0843     unsigned long   flags;
0844     int rb_count;
0845 
0846     if (ishtp_hdr->reserved) {
0847         dev_err(dev->devc, "corrupted message header.\n");
0848         goto    eoi;
0849     }
0850 
0851     if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
0852         dev_err(dev->devc,
0853             "ISHTP message length in hdr exceeds IPC MTU\n");
0854         goto    eoi;
0855     }
0856 
0857     spin_lock_irqsave(&dev->read_list_spinlock, flags);
0858     rb_count = -1;
0859     list_for_each_entry(rb, &dev->read_list.list, list) {
0860         ++rb_count;
0861         cl = rb->cl;
0862         if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
0863                 cl->fw_client_id == ishtp_hdr->fw_addr) ||
0864                 !(cl->state == ISHTP_CL_CONNECTED))
0865             continue;
0866 
0867          /* If no Rx buffer is allocated, disband the rb */
0868         if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
0869             spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0870             dev_err(&cl->device->dev,
0871                 "Rx buffer is not allocated.\n");
0872             list_del(&rb->list);
0873             ishtp_io_rb_free(rb);
0874             cl->status = -ENOMEM;
0875             goto    eoi;
0876         }
0877 
0878         /*
0879          * If message buffer overflown (exceeds max. client msg
0880          * size, drop message and return to free buffer.
0881          * Do we need to disconnect such a client? (We don't send
0882          * back FC, so communication will be stuck anyway)
0883          */
0884         if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
0885             spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0886             dev_err(&cl->device->dev,
0887                 "message overflow. size %d len %d idx %ld\n",
0888                 rb->buffer.size, ishtp_hdr->length,
0889                 rb->buf_idx);
0890             list_del(&rb->list);
0891             ishtp_cl_io_rb_recycle(rb);
0892             cl->status = -EIO;
0893             goto    eoi;
0894         }
0895 
0896         buffer = rb->buffer.data + rb->buf_idx;
0897         dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
0898 
0899         rb->buf_idx += ishtp_hdr->length;
0900         if (ishtp_hdr->msg_complete) {
0901             /* Last fragment in message - it's complete */
0902             cl->status = 0;
0903             list_del(&rb->list);
0904             complete_rb = rb;
0905 
0906             --cl->out_flow_ctrl_creds;
0907             /*
0908              * the whole msg arrived, send a new FC, and add a new
0909              * rb buffer for the next coming msg
0910              */
0911             spin_lock(&cl->free_list_spinlock);
0912 
0913             if (!list_empty(&cl->free_rb_list.list)) {
0914                 new_rb = list_entry(cl->free_rb_list.list.next,
0915                     struct ishtp_cl_rb, list);
0916                 list_del_init(&new_rb->list);
0917                 spin_unlock(&cl->free_list_spinlock);
0918                 new_rb->cl = cl;
0919                 new_rb->buf_idx = 0;
0920                 INIT_LIST_HEAD(&new_rb->list);
0921                 list_add_tail(&new_rb->list,
0922                     &dev->read_list.list);
0923 
0924                 ishtp_hbm_cl_flow_control_req(dev, cl);
0925             } else {
0926                 spin_unlock(&cl->free_list_spinlock);
0927             }
0928         }
0929         /* One more fragment in message (even if this was last) */
0930         ++cl->recv_msg_num_frags;
0931 
0932         /*
0933          * We can safely break here (and in BH too),
0934          * a single input message can go only to a single request!
0935          */
0936         break;
0937     }
0938 
0939     spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0940     /* If it's nobody's message, just read and discard it */
0941     if (!buffer) {
0942         uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
0943 
0944         dev_err(dev->devc, "Dropped Rx msg - no request\n");
0945         dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
0946         goto    eoi;
0947     }
0948 
0949     if (complete_rb) {
0950         cl = complete_rb->cl;
0951         cl->ts_rx = ktime_get();
0952         ++cl->recv_msg_cnt_ipc;
0953         ishtp_cl_read_complete(complete_rb);
0954     }
0955 eoi:
0956     return;
0957 }
0958 
0959 /**
0960  * recv_ishtp_cl_msg_dma() -Receive client message
0961  * @dev: ISHTP device instance
0962  * @msg: message pointer
0963  * @hbm: hbm buffer
0964  *
0965  * Receive and dispatch ISHTP client messages using DMA. This function executes
0966  * in ISR or work queue context
0967  */
0968 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
0969                struct dma_xfer_hbm *hbm)
0970 {
0971     struct ishtp_cl *cl;
0972     struct ishtp_cl_rb *rb;
0973     struct ishtp_cl_rb *new_rb;
0974     unsigned char *buffer = NULL;
0975     struct ishtp_cl_rb *complete_rb = NULL;
0976     unsigned long   flags;
0977 
0978     spin_lock_irqsave(&dev->read_list_spinlock, flags);
0979 
0980     list_for_each_entry(rb, &dev->read_list.list, list) {
0981         cl = rb->cl;
0982         if (!cl || !(cl->host_client_id == hbm->host_client_id &&
0983                 cl->fw_client_id == hbm->fw_client_id) ||
0984                 !(cl->state == ISHTP_CL_CONNECTED))
0985             continue;
0986 
0987         /*
0988          * If no Rx buffer is allocated, disband the rb
0989          */
0990         if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
0991             spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
0992             dev_err(&cl->device->dev,
0993                 "response buffer is not allocated.\n");
0994             list_del(&rb->list);
0995             ishtp_io_rb_free(rb);
0996             cl->status = -ENOMEM;
0997             goto    eoi;
0998         }
0999 
1000         /*
1001          * If message buffer overflown (exceeds max. client msg
1002          * size, drop message and return to free buffer.
1003          * Do we need to disconnect such a client? (We don't send
1004          * back FC, so communication will be stuck anyway)
1005          */
1006         if (rb->buffer.size < hbm->msg_length) {
1007             spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1008             dev_err(&cl->device->dev,
1009                 "message overflow. size %d len %d idx %ld\n",
1010                 rb->buffer.size, hbm->msg_length, rb->buf_idx);
1011             list_del(&rb->list);
1012             ishtp_cl_io_rb_recycle(rb);
1013             cl->status = -EIO;
1014             goto    eoi;
1015         }
1016 
1017         buffer = rb->buffer.data;
1018 
1019         /*
1020          * if current fw don't support cache snooping, driver have to
1021          * flush the cache manually.
1022          */
1023         if (dev->ops->dma_no_cache_snooping &&
1024             dev->ops->dma_no_cache_snooping(dev))
1025             clflush_cache_range(msg, hbm->msg_length);
1026 
1027         memcpy(buffer, msg, hbm->msg_length);
1028         rb->buf_idx = hbm->msg_length;
1029 
1030         /* Last fragment in message - it's complete */
1031         cl->status = 0;
1032         list_del(&rb->list);
1033         complete_rb = rb;
1034 
1035         --cl->out_flow_ctrl_creds;
1036         /*
1037          * the whole msg arrived, send a new FC, and add a new
1038          * rb buffer for the next coming msg
1039          */
1040         spin_lock(&cl->free_list_spinlock);
1041 
1042         if (!list_empty(&cl->free_rb_list.list)) {
1043             new_rb = list_entry(cl->free_rb_list.list.next,
1044                 struct ishtp_cl_rb, list);
1045             list_del_init(&new_rb->list);
1046             spin_unlock(&cl->free_list_spinlock);
1047             new_rb->cl = cl;
1048             new_rb->buf_idx = 0;
1049             INIT_LIST_HEAD(&new_rb->list);
1050             list_add_tail(&new_rb->list,
1051                 &dev->read_list.list);
1052 
1053             ishtp_hbm_cl_flow_control_req(dev, cl);
1054         } else {
1055             spin_unlock(&cl->free_list_spinlock);
1056         }
1057 
1058         /* One more fragment in message (this is always last) */
1059         ++cl->recv_msg_num_frags;
1060 
1061         /*
1062          * We can safely break here (and in BH too),
1063          * a single input message can go only to a single request!
1064          */
1065         break;
1066     }
1067 
1068     spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1069     /* If it's nobody's message, just read and discard it */
1070     if (!buffer) {
1071         dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1072         goto    eoi;
1073     }
1074 
1075     if (complete_rb) {
1076         cl = complete_rb->cl;
1077         cl->ts_rx = ktime_get();
1078         ++cl->recv_msg_cnt_dma;
1079         ishtp_cl_read_complete(complete_rb);
1080     }
1081 eoi:
1082     return;
1083 }
1084 
1085 void *ishtp_get_client_data(struct ishtp_cl *cl)
1086 {
1087     return cl->client_data;
1088 }
1089 EXPORT_SYMBOL(ishtp_get_client_data);
1090 
1091 void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
1092 {
1093     cl->client_data = data;
1094 }
1095 EXPORT_SYMBOL(ishtp_set_client_data);
1096 
1097 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
1098 {
1099     return cl->dev;
1100 }
1101 EXPORT_SYMBOL(ishtp_get_ishtp_device);
1102 
1103 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
1104 {
1105     cl->tx_ring_size = size;
1106 }
1107 EXPORT_SYMBOL(ishtp_set_tx_ring_size);
1108 
1109 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
1110 {
1111     cl->rx_ring_size = size;
1112 }
1113 EXPORT_SYMBOL(ishtp_set_rx_ring_size);
1114 
1115 void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
1116 {
1117     cl->state = state;
1118 }
1119 EXPORT_SYMBOL(ishtp_set_connection_state);
1120 
1121 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
1122 {
1123     cl->fw_client_id = fw_client_id;
1124 }
1125 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);