Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Texas Instruments System Control Interface Protocol Driver
0004  *
0005  * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
0006  *  Nishanth Menon
0007  */
0008 
0009 #define pr_fmt(fmt) "%s: " fmt, __func__
0010 
0011 #include <linux/bitmap.h>
0012 #include <linux/debugfs.h>
0013 #include <linux/export.h>
0014 #include <linux/io.h>
0015 #include <linux/iopoll.h>
0016 #include <linux/kernel.h>
0017 #include <linux/mailbox_client.h>
0018 #include <linux/module.h>
0019 #include <linux/of_device.h>
0020 #include <linux/semaphore.h>
0021 #include <linux/slab.h>
0022 #include <linux/soc/ti/ti-msgmgr.h>
0023 #include <linux/soc/ti/ti_sci_protocol.h>
0024 #include <linux/reboot.h>
0025 
0026 #include "ti_sci.h"
0027 
0028 /* List of all TI SCI devices active in system */
0029 static LIST_HEAD(ti_sci_list);
0030 /* Protection for the entire list */
0031 static DEFINE_MUTEX(ti_sci_list_mutex);
0032 
0033 /**
0034  * struct ti_sci_xfer - Structure representing a message flow
0035  * @tx_message: Transmit message
0036  * @rx_len: Receive message length
0037  * @xfer_buf:   Preallocated buffer to store receive message
0038  *      Since we work with request-ACK protocol, we can
0039  *      reuse the same buffer for the rx path as we
0040  *      use for the tx path.
0041  * @done:   completion event
0042  */
0043 struct ti_sci_xfer {
0044     struct ti_msgmgr_message tx_message;
0045     u8 rx_len;
0046     u8 *xfer_buf;
0047     struct completion done;
0048 };
0049 
0050 /**
0051  * struct ti_sci_xfers_info - Structure to manage transfer information
0052  * @sem_xfer_count: Counting Semaphore for managing max simultaneous
0053  *          Messages.
0054  * @xfer_block:     Preallocated Message array
0055  * @xfer_alloc_table:   Bitmap table for allocated messages.
0056  *          Index of this bitmap table is also used for message
0057  *          sequence identifier.
0058  * @xfer_lock:      Protection for message allocation
0059  */
0060 struct ti_sci_xfers_info {
0061     struct semaphore sem_xfer_count;
0062     struct ti_sci_xfer *xfer_block;
0063     unsigned long *xfer_alloc_table;
0064     /* protect transfer allocation */
0065     spinlock_t xfer_lock;
0066 };
0067 
0068 /**
0069  * struct ti_sci_desc - Description of SoC integration
0070  * @default_host_id:    Host identifier representing the compute entity
0071  * @max_rx_timeout_ms:  Timeout for communication with SoC (in Milliseconds)
0072  * @max_msgs: Maximum number of messages that can be pending
0073  *        simultaneously in the system
0074  * @max_msg_size: Maximum size of data per message that can be handled.
0075  */
0076 struct ti_sci_desc {
0077     u8 default_host_id;
0078     int max_rx_timeout_ms;
0079     int max_msgs;
0080     int max_msg_size;
0081 };
0082 
0083 /**
0084  * struct ti_sci_info - Structure representing a TI SCI instance
0085  * @dev:    Device pointer
0086  * @desc:   SoC description for this instance
0087  * @nb: Reboot Notifier block
0088  * @d:      Debugfs file entry
0089  * @debug_region: Memory region where the debug message are available
0090  * @debug_region_size: Debug region size
0091  * @debug_buffer: Buffer allocated to copy debug messages.
0092  * @handle: Instance of TI SCI handle to send to clients.
0093  * @cl:     Mailbox Client
0094  * @chan_tx:    Transmit mailbox channel
0095  * @chan_rx:    Receive mailbox channel
0096  * @minfo:  Message info
0097  * @node:   list head
0098  * @host_id:    Host ID
0099  * @users:  Number of users of this instance
0100  * @is_suspending: Flag set to indicate in suspend path.
0101  */
0102 struct ti_sci_info {
0103     struct device *dev;
0104     struct notifier_block nb;
0105     const struct ti_sci_desc *desc;
0106     struct dentry *d;
0107     void __iomem *debug_region;
0108     char *debug_buffer;
0109     size_t debug_region_size;
0110     struct ti_sci_handle handle;
0111     struct mbox_client cl;
0112     struct mbox_chan *chan_tx;
0113     struct mbox_chan *chan_rx;
0114     struct ti_sci_xfers_info minfo;
0115     struct list_head node;
0116     u8 host_id;
0117     /* protected by ti_sci_list_mutex */
0118     int users;
0119     bool is_suspending;
0120 };
0121 
0122 #define cl_to_ti_sci_info(c)    container_of(c, struct ti_sci_info, cl)
0123 #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
0124 #define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
0125 
0126 #ifdef CONFIG_DEBUG_FS
0127 
0128 /**
0129  * ti_sci_debug_show() - Helper to dump the debug log
0130  * @s:  sequence file pointer
0131  * @unused: unused.
0132  *
0133  * Return: 0
0134  */
0135 static int ti_sci_debug_show(struct seq_file *s, void *unused)
0136 {
0137     struct ti_sci_info *info = s->private;
0138 
0139     memcpy_fromio(info->debug_buffer, info->debug_region,
0140               info->debug_region_size);
0141     /*
0142      * We don't trust firmware to leave NULL terminated last byte (hence
0143      * we have allocated 1 extra 0 byte). Since we cannot guarantee any
0144      * specific data format for debug messages, We just present the data
0145      * in the buffer as is - we expect the messages to be self explanatory.
0146      */
0147     seq_puts(s, info->debug_buffer);
0148     return 0;
0149 }
0150 
0151 /* Provide the log file operations interface*/
0152 DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
0153 
0154 /**
0155  * ti_sci_debugfs_create() - Create log debug file
0156  * @pdev:   platform device pointer
0157  * @info:   Pointer to SCI entity information
0158  *
0159  * Return: 0 if all went fine, else corresponding error.
0160  */
0161 static int ti_sci_debugfs_create(struct platform_device *pdev,
0162                  struct ti_sci_info *info)
0163 {
0164     struct device *dev = &pdev->dev;
0165     struct resource *res;
0166     char debug_name[50] = "ti_sci_debug@";
0167 
0168     /* Debug region is optional */
0169     res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
0170                        "debug_messages");
0171     info->debug_region = devm_ioremap_resource(dev, res);
0172     if (IS_ERR(info->debug_region))
0173         return 0;
0174     info->debug_region_size = resource_size(res);
0175 
0176     info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
0177                       sizeof(char), GFP_KERNEL);
0178     if (!info->debug_buffer)
0179         return -ENOMEM;
0180     /* Setup NULL termination */
0181     info->debug_buffer[info->debug_region_size] = 0;
0182 
0183     info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
0184                           sizeof(debug_name) -
0185                           sizeof("ti_sci_debug@")),
0186                       0444, NULL, info, &ti_sci_debug_fops);
0187     if (IS_ERR(info->d))
0188         return PTR_ERR(info->d);
0189 
0190     dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
0191         info->debug_region, info->debug_region_size, res);
0192     return 0;
0193 }
0194 
0195 /**
0196  * ti_sci_debugfs_destroy() - clean up log debug file
0197  * @pdev:   platform device pointer
0198  * @info:   Pointer to SCI entity information
0199  */
0200 static void ti_sci_debugfs_destroy(struct platform_device *pdev,
0201                    struct ti_sci_info *info)
0202 {
0203     if (IS_ERR(info->debug_region))
0204         return;
0205 
0206     debugfs_remove(info->d);
0207 }
0208 #else /* CONFIG_DEBUG_FS */
0209 static inline int ti_sci_debugfs_create(struct platform_device *dev,
0210                     struct ti_sci_info *info)
0211 {
0212     return 0;
0213 }
0214 
0215 static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
0216                       struct ti_sci_info *info)
0217 {
0218 }
0219 #endif /* CONFIG_DEBUG_FS */
0220 
0221 /**
0222  * ti_sci_dump_header_dbg() - Helper to dump a message header.
0223  * @dev:    Device pointer corresponding to the SCI entity
0224  * @hdr:    pointer to header.
0225  */
0226 static inline void ti_sci_dump_header_dbg(struct device *dev,
0227                       struct ti_sci_msg_hdr *hdr)
0228 {
0229     dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
0230         hdr->type, hdr->host, hdr->seq, hdr->flags);
0231 }
0232 
0233 /**
0234  * ti_sci_rx_callback() - mailbox client callback for receive messages
0235  * @cl: client pointer
0236  * @m:  mailbox message
0237  *
0238  * Processes one received message to appropriate transfer information and
0239  * signals completion of the transfer.
0240  *
0241  * NOTE: This function will be invoked in IRQ context, hence should be
0242  * as optimal as possible.
0243  */
0244 static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
0245 {
0246     struct ti_sci_info *info = cl_to_ti_sci_info(cl);
0247     struct device *dev = info->dev;
0248     struct ti_sci_xfers_info *minfo = &info->minfo;
0249     struct ti_msgmgr_message *mbox_msg = m;
0250     struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
0251     struct ti_sci_xfer *xfer;
0252     u8 xfer_id;
0253 
0254     xfer_id = hdr->seq;
0255 
0256     /*
0257      * Are we even expecting this?
0258      * NOTE: barriers were implicit in locks used for modifying the bitmap
0259      */
0260     if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
0261         dev_err(dev, "Message for %d is not expected!\n", xfer_id);
0262         return;
0263     }
0264 
0265     xfer = &minfo->xfer_block[xfer_id];
0266 
0267     /* Is the message of valid length? */
0268     if (mbox_msg->len > info->desc->max_msg_size) {
0269         dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
0270             mbox_msg->len, info->desc->max_msg_size);
0271         ti_sci_dump_header_dbg(dev, hdr);
0272         return;
0273     }
0274     if (mbox_msg->len < xfer->rx_len) {
0275         dev_err(dev, "Recv xfer %zu < expected %d length\n",
0276             mbox_msg->len, xfer->rx_len);
0277         ti_sci_dump_header_dbg(dev, hdr);
0278         return;
0279     }
0280 
0281     ti_sci_dump_header_dbg(dev, hdr);
0282     /* Take a copy to the rx buffer.. */
0283     memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
0284     complete(&xfer->done);
0285 }
0286 
0287 /**
0288  * ti_sci_get_one_xfer() - Allocate one message
0289  * @info:   Pointer to SCI entity information
0290  * @msg_type:   Message type
0291  * @msg_flags:  Flag to set for the message
0292  * @tx_message_size: transmit message size
0293  * @rx_message_size: receive message size
0294  *
0295  * Helper function which is used by various command functions that are
0296  * exposed to clients of this driver for allocating a message traffic event.
0297  *
0298  * This function can sleep depending on pending requests already in the system
0299  * for the SCI entity. Further, this also holds a spinlock to maintain integrity
0300  * of internal data structures.
0301  *
0302  * Return: 0 if all went fine, else corresponding error.
0303  */
0304 static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
0305                            u16 msg_type, u32 msg_flags,
0306                            size_t tx_message_size,
0307                            size_t rx_message_size)
0308 {
0309     struct ti_sci_xfers_info *minfo = &info->minfo;
0310     struct ti_sci_xfer *xfer;
0311     struct ti_sci_msg_hdr *hdr;
0312     unsigned long flags;
0313     unsigned long bit_pos;
0314     u8 xfer_id;
0315     int ret;
0316     int timeout;
0317 
0318     /* Ensure we have sane transfer sizes */
0319     if (rx_message_size > info->desc->max_msg_size ||
0320         tx_message_size > info->desc->max_msg_size ||
0321         rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
0322         return ERR_PTR(-ERANGE);
0323 
0324     /*
0325      * Ensure we have only controlled number of pending messages.
0326      * Ideally, we might just have to wait a single message, be
0327      * conservative and wait 5 times that..
0328      */
0329     timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
0330     ret = down_timeout(&minfo->sem_xfer_count, timeout);
0331     if (ret < 0)
0332         return ERR_PTR(ret);
0333 
0334     /* Keep the locked section as small as possible */
0335     spin_lock_irqsave(&minfo->xfer_lock, flags);
0336     bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
0337                       info->desc->max_msgs);
0338     set_bit(bit_pos, minfo->xfer_alloc_table);
0339     spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0340 
0341     /*
0342      * We already ensured in probe that we can have max messages that can
0343      * fit in  hdr.seq - NOTE: this improves access latencies
0344      * to predictable O(1) access, BUT, it opens us to risk if
0345      * remote misbehaves with corrupted message sequence responses.
0346      * If that happens, we are going to be messed up anyways..
0347      */
0348     xfer_id = (u8)bit_pos;
0349 
0350     xfer = &minfo->xfer_block[xfer_id];
0351 
0352     hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
0353     xfer->tx_message.len = tx_message_size;
0354     xfer->tx_message.chan_rx = info->chan_rx;
0355     xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
0356     xfer->rx_len = (u8)rx_message_size;
0357 
0358     reinit_completion(&xfer->done);
0359 
0360     hdr->seq = xfer_id;
0361     hdr->type = msg_type;
0362     hdr->host = info->host_id;
0363     hdr->flags = msg_flags;
0364 
0365     return xfer;
0366 }
0367 
0368 /**
0369  * ti_sci_put_one_xfer() - Release a message
0370  * @minfo:  transfer info pointer
0371  * @xfer:   message that was reserved by ti_sci_get_one_xfer
0372  *
0373  * This holds a spinlock to maintain integrity of internal data structures.
0374  */
0375 static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
0376                 struct ti_sci_xfer *xfer)
0377 {
0378     unsigned long flags;
0379     struct ti_sci_msg_hdr *hdr;
0380     u8 xfer_id;
0381 
0382     hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
0383     xfer_id = hdr->seq;
0384 
0385     /*
0386      * Keep the locked section as small as possible
0387      * NOTE: we might escape with smp_mb and no lock here..
0388      * but just be conservative and symmetric.
0389      */
0390     spin_lock_irqsave(&minfo->xfer_lock, flags);
0391     clear_bit(xfer_id, minfo->xfer_alloc_table);
0392     spin_unlock_irqrestore(&minfo->xfer_lock, flags);
0393 
0394     /* Increment the count for the next user to get through */
0395     up(&minfo->sem_xfer_count);
0396 }
0397 
0398 /**
0399  * ti_sci_do_xfer() - Do one transfer
0400  * @info:   Pointer to SCI entity information
0401  * @xfer:   Transfer to initiate and wait for response
0402  *
0403  * Return: -ETIMEDOUT in case of no response, if transmit error,
0404  *     return corresponding error, else if all goes well,
0405  *     return 0.
0406  */
0407 static inline int ti_sci_do_xfer(struct ti_sci_info *info,
0408                  struct ti_sci_xfer *xfer)
0409 {
0410     int ret;
0411     int timeout;
0412     struct device *dev = info->dev;
0413     bool done_state = true;
0414 
0415     ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
0416     if (ret < 0)
0417         return ret;
0418 
0419     ret = 0;
0420 
0421     if (!info->is_suspending) {
0422         /* And we wait for the response. */
0423         timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
0424         if (!wait_for_completion_timeout(&xfer->done, timeout))
0425             ret = -ETIMEDOUT;
0426     } else {
0427         /*
0428          * If we are suspending, we cannot use wait_for_completion_timeout
0429          * during noirq phase, so we must manually poll the completion.
0430          */
0431         ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
0432                            true, 1,
0433                            info->desc->max_rx_timeout_ms * 1000,
0434                            false, &xfer->done);
0435     }
0436 
0437     if (ret == -ETIMEDOUT || !done_state) {
0438         dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
0439             (void *)_RET_IP_);
0440     }
0441 
0442     /*
0443      * NOTE: we might prefer not to need the mailbox ticker to manage the
0444      * transfer queueing since the protocol layer queues things by itself.
0445      * Unfortunately, we have to kick the mailbox framework after we have
0446      * received our message.
0447      */
0448     mbox_client_txdone(info->chan_tx, ret);
0449 
0450     return ret;
0451 }
0452 
0453 /**
0454  * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
0455  * @info:   Pointer to SCI entity information
0456  *
0457  * Updates the SCI information in the internal data structure.
0458  *
0459  * Return: 0 if all went fine, else return appropriate error.
0460  */
0461 static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
0462 {
0463     struct device *dev = info->dev;
0464     struct ti_sci_handle *handle = &info->handle;
0465     struct ti_sci_version_info *ver = &handle->version;
0466     struct ti_sci_msg_resp_version *rev_info;
0467     struct ti_sci_xfer *xfer;
0468     int ret;
0469 
0470     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
0471                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
0472                    sizeof(struct ti_sci_msg_hdr),
0473                    sizeof(*rev_info));
0474     if (IS_ERR(xfer)) {
0475         ret = PTR_ERR(xfer);
0476         dev_err(dev, "Message alloc failed(%d)\n", ret);
0477         return ret;
0478     }
0479 
0480     rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
0481 
0482     ret = ti_sci_do_xfer(info, xfer);
0483     if (ret) {
0484         dev_err(dev, "Mbox send fail %d\n", ret);
0485         goto fail;
0486     }
0487 
0488     ver->abi_major = rev_info->abi_major;
0489     ver->abi_minor = rev_info->abi_minor;
0490     ver->firmware_revision = rev_info->firmware_revision;
0491     strncpy(ver->firmware_description, rev_info->firmware_description,
0492         sizeof(ver->firmware_description));
0493 
0494 fail:
0495     ti_sci_put_one_xfer(&info->minfo, xfer);
0496     return ret;
0497 }
0498 
0499 /**
0500  * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
0501  * @r:  pointer to response buffer
0502  *
0503  * Return: true if the response was an ACK, else returns false.
0504  */
0505 static inline bool ti_sci_is_response_ack(void *r)
0506 {
0507     struct ti_sci_msg_hdr *hdr = r;
0508 
0509     return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
0510 }
0511 
0512 /**
0513  * ti_sci_set_device_state() - Set device state helper
0514  * @handle: pointer to TI SCI handle
0515  * @id:     Device identifier
0516  * @flags:  flags to setup for the device
0517  * @state:  State to move the device to
0518  *
0519  * Return: 0 if all went well, else returns appropriate error value.
0520  */
0521 static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
0522                    u32 id, u32 flags, u8 state)
0523 {
0524     struct ti_sci_info *info;
0525     struct ti_sci_msg_req_set_device_state *req;
0526     struct ti_sci_msg_hdr *resp;
0527     struct ti_sci_xfer *xfer;
0528     struct device *dev;
0529     int ret = 0;
0530 
0531     if (IS_ERR(handle))
0532         return PTR_ERR(handle);
0533     if (!handle)
0534         return -EINVAL;
0535 
0536     info = handle_to_ti_sci_info(handle);
0537     dev = info->dev;
0538 
0539     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
0540                    flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
0541                    sizeof(*req), sizeof(*resp));
0542     if (IS_ERR(xfer)) {
0543         ret = PTR_ERR(xfer);
0544         dev_err(dev, "Message alloc failed(%d)\n", ret);
0545         return ret;
0546     }
0547     req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
0548     req->id = id;
0549     req->state = state;
0550 
0551     ret = ti_sci_do_xfer(info, xfer);
0552     if (ret) {
0553         dev_err(dev, "Mbox send fail %d\n", ret);
0554         goto fail;
0555     }
0556 
0557     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
0558 
0559     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
0560 
0561 fail:
0562     ti_sci_put_one_xfer(&info->minfo, xfer);
0563 
0564     return ret;
0565 }
0566 
0567 /**
0568  * ti_sci_get_device_state() - Get device state helper
0569  * @handle: Handle to the device
0570  * @id:     Device Identifier
0571  * @clcnt:  Pointer to Context Loss Count
0572  * @resets: pointer to resets
0573  * @p_state:    pointer to p_state
0574  * @c_state:    pointer to c_state
0575  *
0576  * Return: 0 if all went fine, else return appropriate error.
0577  */
0578 static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
0579                    u32 id,  u32 *clcnt,  u32 *resets,
0580                     u8 *p_state,  u8 *c_state)
0581 {
0582     struct ti_sci_info *info;
0583     struct ti_sci_msg_req_get_device_state *req;
0584     struct ti_sci_msg_resp_get_device_state *resp;
0585     struct ti_sci_xfer *xfer;
0586     struct device *dev;
0587     int ret = 0;
0588 
0589     if (IS_ERR(handle))
0590         return PTR_ERR(handle);
0591     if (!handle)
0592         return -EINVAL;
0593 
0594     if (!clcnt && !resets && !p_state && !c_state)
0595         return -EINVAL;
0596 
0597     info = handle_to_ti_sci_info(handle);
0598     dev = info->dev;
0599 
0600     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
0601                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
0602                    sizeof(*req), sizeof(*resp));
0603     if (IS_ERR(xfer)) {
0604         ret = PTR_ERR(xfer);
0605         dev_err(dev, "Message alloc failed(%d)\n", ret);
0606         return ret;
0607     }
0608     req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
0609     req->id = id;
0610 
0611     ret = ti_sci_do_xfer(info, xfer);
0612     if (ret) {
0613         dev_err(dev, "Mbox send fail %d\n", ret);
0614         goto fail;
0615     }
0616 
0617     resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
0618     if (!ti_sci_is_response_ack(resp)) {
0619         ret = -ENODEV;
0620         goto fail;
0621     }
0622 
0623     if (clcnt)
0624         *clcnt = resp->context_loss_count;
0625     if (resets)
0626         *resets = resp->resets;
0627     if (p_state)
0628         *p_state = resp->programmed_state;
0629     if (c_state)
0630         *c_state = resp->current_state;
0631 fail:
0632     ti_sci_put_one_xfer(&info->minfo, xfer);
0633 
0634     return ret;
0635 }
0636 
0637 /**
0638  * ti_sci_cmd_get_device() - command to request for device managed by TISCI
0639  *               that can be shared with other hosts.
0640  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0641  * @id:     Device Identifier
0642  *
0643  * Request for the device - NOTE: the client MUST maintain integrity of
0644  * usage count by balancing get_device with put_device. No refcounting is
0645  * managed by driver for that purpose.
0646  *
0647  * Return: 0 if all went fine, else return appropriate error.
0648  */
0649 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
0650 {
0651     return ti_sci_set_device_state(handle, id, 0,
0652                        MSG_DEVICE_SW_STATE_ON);
0653 }
0654 
0655 /**
0656  * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
0657  *                     TISCI that is exclusively owned by the
0658  *                     requesting host.
0659  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0660  * @id:     Device Identifier
0661  *
0662  * Request for the device - NOTE: the client MUST maintain integrity of
0663  * usage count by balancing get_device with put_device. No refcounting is
0664  * managed by driver for that purpose.
0665  *
0666  * Return: 0 if all went fine, else return appropriate error.
0667  */
0668 static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
0669                        u32 id)
0670 {
0671     return ti_sci_set_device_state(handle, id,
0672                        MSG_FLAG_DEVICE_EXCLUSIVE,
0673                        MSG_DEVICE_SW_STATE_ON);
0674 }
0675 
0676 /**
0677  * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
0678  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0679  * @id:     Device Identifier
0680  *
0681  * Request for the device - NOTE: the client MUST maintain integrity of
0682  * usage count by balancing get_device with put_device. No refcounting is
0683  * managed by driver for that purpose.
0684  *
0685  * Return: 0 if all went fine, else return appropriate error.
0686  */
0687 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
0688 {
0689     return ti_sci_set_device_state(handle, id, 0,
0690                        MSG_DEVICE_SW_STATE_RETENTION);
0691 }
0692 
0693 /**
0694  * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
0695  *                  TISCI that is exclusively owned by
0696  *                  requesting host.
0697  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0698  * @id:     Device Identifier
0699  *
0700  * Request for the device - NOTE: the client MUST maintain integrity of
0701  * usage count by balancing get_device with put_device. No refcounting is
0702  * managed by driver for that purpose.
0703  *
0704  * Return: 0 if all went fine, else return appropriate error.
0705  */
0706 static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
0707                         u32 id)
0708 {
0709     return ti_sci_set_device_state(handle, id,
0710                        MSG_FLAG_DEVICE_EXCLUSIVE,
0711                        MSG_DEVICE_SW_STATE_RETENTION);
0712 }
0713 
0714 /**
0715  * ti_sci_cmd_put_device() - command to release a device managed by TISCI
0716  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0717  * @id:     Device Identifier
0718  *
0719  * Request for the device - NOTE: the client MUST maintain integrity of
0720  * usage count by balancing get_device with put_device. No refcounting is
0721  * managed by driver for that purpose.
0722  *
0723  * Return: 0 if all went fine, else return appropriate error.
0724  */
0725 static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
0726 {
0727     return ti_sci_set_device_state(handle, id,
0728                        0, MSG_DEVICE_SW_STATE_AUTO_OFF);
0729 }
0730 
0731 /**
0732  * ti_sci_cmd_dev_is_valid() - Is the device valid
0733  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0734  * @id:     Device Identifier
0735  *
0736  * Return: 0 if all went fine and the device ID is valid, else return
0737  * appropriate error.
0738  */
0739 static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
0740 {
0741     u8 unused;
0742 
0743     /* check the device state which will also tell us if the ID is valid */
0744     return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
0745 }
0746 
0747 /**
0748  * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
0749  * @handle: Pointer to TISCI handle
0750  * @id:     Device Identifier
0751  * @count:  Pointer to Context Loss counter to populate
0752  *
0753  * Return: 0 if all went fine, else return appropriate error.
0754  */
0755 static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
0756                     u32 *count)
0757 {
0758     return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
0759 }
0760 
0761 /**
0762  * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
0763  * @handle: Pointer to TISCI handle
0764  * @id:     Device Identifier
0765  * @r_state:    true if requested to be idle
0766  *
0767  * Return: 0 if all went fine, else return appropriate error.
0768  */
0769 static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
0770                   bool *r_state)
0771 {
0772     int ret;
0773     u8 state;
0774 
0775     if (!r_state)
0776         return -EINVAL;
0777 
0778     ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
0779     if (ret)
0780         return ret;
0781 
0782     *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
0783 
0784     return 0;
0785 }
0786 
0787 /**
0788  * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
0789  * @handle: Pointer to TISCI handle
0790  * @id:     Device Identifier
0791  * @r_state:    true if requested to be stopped
0792  * @curr_state: true if currently stopped.
0793  *
0794  * Return: 0 if all went fine, else return appropriate error.
0795  */
0796 static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
0797                   bool *r_state,  bool *curr_state)
0798 {
0799     int ret;
0800     u8 p_state, c_state;
0801 
0802     if (!r_state && !curr_state)
0803         return -EINVAL;
0804 
0805     ret =
0806         ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
0807     if (ret)
0808         return ret;
0809 
0810     if (r_state)
0811         *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
0812     if (curr_state)
0813         *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
0814 
0815     return 0;
0816 }
0817 
0818 /**
0819  * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
0820  * @handle: Pointer to TISCI handle
0821  * @id:     Device Identifier
0822  * @r_state:    true if requested to be ON
0823  * @curr_state: true if currently ON and active
0824  *
0825  * Return: 0 if all went fine, else return appropriate error.
0826  */
0827 static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
0828                 bool *r_state,  bool *curr_state)
0829 {
0830     int ret;
0831     u8 p_state, c_state;
0832 
0833     if (!r_state && !curr_state)
0834         return -EINVAL;
0835 
0836     ret =
0837         ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
0838     if (ret)
0839         return ret;
0840 
0841     if (r_state)
0842         *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
0843     if (curr_state)
0844         *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
0845 
0846     return 0;
0847 }
0848 
0849 /**
0850  * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
0851  * @handle: Pointer to TISCI handle
0852  * @id:     Device Identifier
0853  * @curr_state: true if currently transitioning.
0854  *
0855  * Return: 0 if all went fine, else return appropriate error.
0856  */
0857 static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
0858                    bool *curr_state)
0859 {
0860     int ret;
0861     u8 state;
0862 
0863     if (!curr_state)
0864         return -EINVAL;
0865 
0866     ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
0867     if (ret)
0868         return ret;
0869 
0870     *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
0871 
0872     return 0;
0873 }
0874 
0875 /**
0876  * ti_sci_cmd_set_device_resets() - command to set resets for device managed
0877  *                  by TISCI
0878  * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
0879  * @id:     Device Identifier
0880  * @reset_state: Device specific reset bit field
0881  *
0882  * Return: 0 if all went fine, else return appropriate error.
0883  */
0884 static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
0885                     u32 id, u32 reset_state)
0886 {
0887     struct ti_sci_info *info;
0888     struct ti_sci_msg_req_set_device_resets *req;
0889     struct ti_sci_msg_hdr *resp;
0890     struct ti_sci_xfer *xfer;
0891     struct device *dev;
0892     int ret = 0;
0893 
0894     if (IS_ERR(handle))
0895         return PTR_ERR(handle);
0896     if (!handle)
0897         return -EINVAL;
0898 
0899     info = handle_to_ti_sci_info(handle);
0900     dev = info->dev;
0901 
0902     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
0903                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
0904                    sizeof(*req), sizeof(*resp));
0905     if (IS_ERR(xfer)) {
0906         ret = PTR_ERR(xfer);
0907         dev_err(dev, "Message alloc failed(%d)\n", ret);
0908         return ret;
0909     }
0910     req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
0911     req->id = id;
0912     req->resets = reset_state;
0913 
0914     ret = ti_sci_do_xfer(info, xfer);
0915     if (ret) {
0916         dev_err(dev, "Mbox send fail %d\n", ret);
0917         goto fail;
0918     }
0919 
0920     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
0921 
0922     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
0923 
0924 fail:
0925     ti_sci_put_one_xfer(&info->minfo, xfer);
0926 
0927     return ret;
0928 }
0929 
0930 /**
0931  * ti_sci_cmd_get_device_resets() - Get reset state for device managed
0932  *                  by TISCI
0933  * @handle:     Pointer to TISCI handle
0934  * @id:         Device Identifier
0935  * @reset_state:    Pointer to reset state to populate
0936  *
0937  * Return: 0 if all went fine, else return appropriate error.
0938  */
0939 static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
0940                     u32 id, u32 *reset_state)
0941 {
0942     return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
0943                        NULL);
0944 }
0945 
0946 /**
0947  * ti_sci_set_clock_state() - Set clock state helper
0948  * @handle: pointer to TI SCI handle
0949  * @dev_id: Device identifier this request is for
0950  * @clk_id: Clock identifier for the device for this request.
0951  *      Each device has it's own set of clock inputs. This indexes
0952  *      which clock input to modify.
0953  * @flags:  Header flags as needed
0954  * @state:  State to request for the clock.
0955  *
0956  * Return: 0 if all went well, else returns appropriate error value.
0957  */
0958 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
0959                   u32 dev_id, u32 clk_id,
0960                   u32 flags, u8 state)
0961 {
0962     struct ti_sci_info *info;
0963     struct ti_sci_msg_req_set_clock_state *req;
0964     struct ti_sci_msg_hdr *resp;
0965     struct ti_sci_xfer *xfer;
0966     struct device *dev;
0967     int ret = 0;
0968 
0969     if (IS_ERR(handle))
0970         return PTR_ERR(handle);
0971     if (!handle)
0972         return -EINVAL;
0973 
0974     info = handle_to_ti_sci_info(handle);
0975     dev = info->dev;
0976 
0977     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
0978                    flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
0979                    sizeof(*req), sizeof(*resp));
0980     if (IS_ERR(xfer)) {
0981         ret = PTR_ERR(xfer);
0982         dev_err(dev, "Message alloc failed(%d)\n", ret);
0983         return ret;
0984     }
0985     req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
0986     req->dev_id = dev_id;
0987     if (clk_id < 255) {
0988         req->clk_id = clk_id;
0989     } else {
0990         req->clk_id = 255;
0991         req->clk_id_32 = clk_id;
0992     }
0993     req->request_state = state;
0994 
0995     ret = ti_sci_do_xfer(info, xfer);
0996     if (ret) {
0997         dev_err(dev, "Mbox send fail %d\n", ret);
0998         goto fail;
0999     }
1000 
1001     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1002 
1003     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1004 
1005 fail:
1006     ti_sci_put_one_xfer(&info->minfo, xfer);
1007 
1008     return ret;
1009 }
1010 
1011 /**
1012  * ti_sci_cmd_get_clock_state() - Get clock state helper
1013  * @handle: pointer to TI SCI handle
1014  * @dev_id: Device identifier this request is for
1015  * @clk_id: Clock identifier for the device for this request.
1016  *      Each device has it's own set of clock inputs. This indexes
1017  *      which clock input to modify.
1018  * @programmed_state:   State requested for clock to move to
1019  * @current_state:  State that the clock is currently in
1020  *
1021  * Return: 0 if all went well, else returns appropriate error value.
1022  */
1023 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1024                       u32 dev_id, u32 clk_id,
1025                       u8 *programmed_state, u8 *current_state)
1026 {
1027     struct ti_sci_info *info;
1028     struct ti_sci_msg_req_get_clock_state *req;
1029     struct ti_sci_msg_resp_get_clock_state *resp;
1030     struct ti_sci_xfer *xfer;
1031     struct device *dev;
1032     int ret = 0;
1033 
1034     if (IS_ERR(handle))
1035         return PTR_ERR(handle);
1036     if (!handle)
1037         return -EINVAL;
1038 
1039     if (!programmed_state && !current_state)
1040         return -EINVAL;
1041 
1042     info = handle_to_ti_sci_info(handle);
1043     dev = info->dev;
1044 
1045     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1046                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1047                    sizeof(*req), sizeof(*resp));
1048     if (IS_ERR(xfer)) {
1049         ret = PTR_ERR(xfer);
1050         dev_err(dev, "Message alloc failed(%d)\n", ret);
1051         return ret;
1052     }
1053     req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
1054     req->dev_id = dev_id;
1055     if (clk_id < 255) {
1056         req->clk_id = clk_id;
1057     } else {
1058         req->clk_id = 255;
1059         req->clk_id_32 = clk_id;
1060     }
1061 
1062     ret = ti_sci_do_xfer(info, xfer);
1063     if (ret) {
1064         dev_err(dev, "Mbox send fail %d\n", ret);
1065         goto fail;
1066     }
1067 
1068     resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
1069 
1070     if (!ti_sci_is_response_ack(resp)) {
1071         ret = -ENODEV;
1072         goto fail;
1073     }
1074 
1075     if (programmed_state)
1076         *programmed_state = resp->programmed_state;
1077     if (current_state)
1078         *current_state = resp->current_state;
1079 
1080 fail:
1081     ti_sci_put_one_xfer(&info->minfo, xfer);
1082 
1083     return ret;
1084 }
1085 
1086 /**
1087  * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1088  * @handle: pointer to TI SCI handle
1089  * @dev_id: Device identifier this request is for
1090  * @clk_id: Clock identifier for the device for this request.
1091  *      Each device has it's own set of clock inputs. This indexes
1092  *      which clock input to modify.
1093  * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1094  * @can_change_freq: 'true' if frequency change is desired, else 'false'
1095  * @enable_input_term: 'true' if input termination is desired, else 'false'
1096  *
1097  * Return: 0 if all went well, else returns appropriate error value.
1098  */
1099 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1100                 u32 clk_id, bool needs_ssc,
1101                 bool can_change_freq, bool enable_input_term)
1102 {
1103     u32 flags = 0;
1104 
1105     flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1106     flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1107     flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1108 
1109     return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1110                       MSG_CLOCK_SW_STATE_REQ);
1111 }
1112 
1113 /**
1114  * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1115  * @handle: pointer to TI SCI handle
1116  * @dev_id: Device identifier this request is for
1117  * @clk_id: Clock identifier for the device for this request.
1118  *      Each device has it's own set of clock inputs. This indexes
1119  *      which clock input to modify.
1120  *
1121  * NOTE: This clock must have been requested by get_clock previously.
1122  *
1123  * Return: 0 if all went well, else returns appropriate error value.
1124  */
1125 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1126                  u32 dev_id, u32 clk_id)
1127 {
1128     return ti_sci_set_clock_state(handle, dev_id, clk_id,
1129                       MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1130                       MSG_CLOCK_SW_STATE_UNREQ);
1131 }
1132 
1133 /**
1134  * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1135  * @handle: pointer to TI SCI handle
1136  * @dev_id: Device identifier this request is for
1137  * @clk_id: Clock identifier for the device for this request.
1138  *      Each device has it's own set of clock inputs. This indexes
1139  *      which clock input to modify.
1140  *
1141  * NOTE: This clock must have been requested by get_clock previously.
1142  *
1143  * Return: 0 if all went well, else returns appropriate error value.
1144  */
1145 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1146                 u32 dev_id, u32 clk_id)
1147 {
1148     return ti_sci_set_clock_state(handle, dev_id, clk_id,
1149                       MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
1150                       MSG_CLOCK_SW_STATE_AUTO);
1151 }
1152 
1153 /**
1154  * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1155  * @handle: pointer to TI SCI handle
1156  * @dev_id: Device identifier this request is for
1157  * @clk_id: Clock identifier for the device for this request.
1158  *      Each device has it's own set of clock inputs. This indexes
1159  *      which clock input to modify.
1160  * @req_state: state indicating if the clock is auto managed
1161  *
1162  * Return: 0 if all went well, else returns appropriate error value.
1163  */
1164 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1165                   u32 dev_id, u32 clk_id, bool *req_state)
1166 {
1167     u8 state = 0;
1168     int ret;
1169 
1170     if (!req_state)
1171         return -EINVAL;
1172 
1173     ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1174     if (ret)
1175         return ret;
1176 
1177     *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1178     return 0;
1179 }
1180 
1181 /**
1182  * ti_sci_cmd_clk_is_on() - Is the clock ON
1183  * @handle: pointer to TI SCI handle
1184  * @dev_id: Device identifier this request is for
1185  * @clk_id: Clock identifier for the device for this request.
1186  *      Each device has it's own set of clock inputs. This indexes
1187  *      which clock input to modify.
1188  * @req_state: state indicating if the clock is managed by us and enabled
1189  * @curr_state: state indicating if the clock is ready for operation
1190  *
1191  * Return: 0 if all went well, else returns appropriate error value.
1192  */
1193 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1194                 u32 clk_id, bool *req_state, bool *curr_state)
1195 {
1196     u8 c_state = 0, r_state = 0;
1197     int ret;
1198 
1199     if (!req_state && !curr_state)
1200         return -EINVAL;
1201 
1202     ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1203                      &r_state, &c_state);
1204     if (ret)
1205         return ret;
1206 
1207     if (req_state)
1208         *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1209     if (curr_state)
1210         *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1211     return 0;
1212 }
1213 
1214 /**
1215  * ti_sci_cmd_clk_is_off() - Is the clock OFF
1216  * @handle: pointer to TI SCI handle
1217  * @dev_id: Device identifier this request is for
1218  * @clk_id: Clock identifier for the device for this request.
1219  *      Each device has it's own set of clock inputs. This indexes
1220  *      which clock input to modify.
1221  * @req_state: state indicating if the clock is managed by us and disabled
1222  * @curr_state: state indicating if the clock is NOT ready for operation
1223  *
1224  * Return: 0 if all went well, else returns appropriate error value.
1225  */
1226 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1227                  u32 clk_id, bool *req_state, bool *curr_state)
1228 {
1229     u8 c_state = 0, r_state = 0;
1230     int ret;
1231 
1232     if (!req_state && !curr_state)
1233         return -EINVAL;
1234 
1235     ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1236                      &r_state, &c_state);
1237     if (ret)
1238         return ret;
1239 
1240     if (req_state)
1241         *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1242     if (curr_state)
1243         *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1244     return 0;
1245 }
1246 
1247 /**
1248  * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1249  * @handle: pointer to TI SCI handle
1250  * @dev_id: Device identifier this request is for
1251  * @clk_id: Clock identifier for the device for this request.
1252  *      Each device has it's own set of clock inputs. This indexes
1253  *      which clock input to modify.
1254  * @parent_id:  Parent clock identifier to set
1255  *
1256  * Return: 0 if all went well, else returns appropriate error value.
1257  */
1258 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1259                      u32 dev_id, u32 clk_id, u32 parent_id)
1260 {
1261     struct ti_sci_info *info;
1262     struct ti_sci_msg_req_set_clock_parent *req;
1263     struct ti_sci_msg_hdr *resp;
1264     struct ti_sci_xfer *xfer;
1265     struct device *dev;
1266     int ret = 0;
1267 
1268     if (IS_ERR(handle))
1269         return PTR_ERR(handle);
1270     if (!handle)
1271         return -EINVAL;
1272 
1273     info = handle_to_ti_sci_info(handle);
1274     dev = info->dev;
1275 
1276     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1277                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1278                    sizeof(*req), sizeof(*resp));
1279     if (IS_ERR(xfer)) {
1280         ret = PTR_ERR(xfer);
1281         dev_err(dev, "Message alloc failed(%d)\n", ret);
1282         return ret;
1283     }
1284     req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
1285     req->dev_id = dev_id;
1286     if (clk_id < 255) {
1287         req->clk_id = clk_id;
1288     } else {
1289         req->clk_id = 255;
1290         req->clk_id_32 = clk_id;
1291     }
1292     if (parent_id < 255) {
1293         req->parent_id = parent_id;
1294     } else {
1295         req->parent_id = 255;
1296         req->parent_id_32 = parent_id;
1297     }
1298 
1299     ret = ti_sci_do_xfer(info, xfer);
1300     if (ret) {
1301         dev_err(dev, "Mbox send fail %d\n", ret);
1302         goto fail;
1303     }
1304 
1305     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1306 
1307     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1308 
1309 fail:
1310     ti_sci_put_one_xfer(&info->minfo, xfer);
1311 
1312     return ret;
1313 }
1314 
1315 /**
1316  * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1317  * @handle: pointer to TI SCI handle
1318  * @dev_id: Device identifier this request is for
1319  * @clk_id: Clock identifier for the device for this request.
1320  *      Each device has it's own set of clock inputs. This indexes
1321  *      which clock input to modify.
1322  * @parent_id:  Current clock parent
1323  *
1324  * Return: 0 if all went well, else returns appropriate error value.
1325  */
1326 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1327                      u32 dev_id, u32 clk_id, u32 *parent_id)
1328 {
1329     struct ti_sci_info *info;
1330     struct ti_sci_msg_req_get_clock_parent *req;
1331     struct ti_sci_msg_resp_get_clock_parent *resp;
1332     struct ti_sci_xfer *xfer;
1333     struct device *dev;
1334     int ret = 0;
1335 
1336     if (IS_ERR(handle))
1337         return PTR_ERR(handle);
1338     if (!handle || !parent_id)
1339         return -EINVAL;
1340 
1341     info = handle_to_ti_sci_info(handle);
1342     dev = info->dev;
1343 
1344     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1345                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1346                    sizeof(*req), sizeof(*resp));
1347     if (IS_ERR(xfer)) {
1348         ret = PTR_ERR(xfer);
1349         dev_err(dev, "Message alloc failed(%d)\n", ret);
1350         return ret;
1351     }
1352     req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
1353     req->dev_id = dev_id;
1354     if (clk_id < 255) {
1355         req->clk_id = clk_id;
1356     } else {
1357         req->clk_id = 255;
1358         req->clk_id_32 = clk_id;
1359     }
1360 
1361     ret = ti_sci_do_xfer(info, xfer);
1362     if (ret) {
1363         dev_err(dev, "Mbox send fail %d\n", ret);
1364         goto fail;
1365     }
1366 
1367     resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
1368 
1369     if (!ti_sci_is_response_ack(resp)) {
1370         ret = -ENODEV;
1371     } else {
1372         if (resp->parent_id < 255)
1373             *parent_id = resp->parent_id;
1374         else
1375             *parent_id = resp->parent_id_32;
1376     }
1377 
1378 fail:
1379     ti_sci_put_one_xfer(&info->minfo, xfer);
1380 
1381     return ret;
1382 }
1383 
1384 /**
1385  * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1386  * @handle: pointer to TI SCI handle
1387  * @dev_id: Device identifier this request is for
1388  * @clk_id: Clock identifier for the device for this request.
1389  *      Each device has it's own set of clock inputs. This indexes
1390  *      which clock input to modify.
1391  * @num_parents: Returns he number of parents to the current clock.
1392  *
1393  * Return: 0 if all went well, else returns appropriate error value.
1394  */
1395 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1396                       u32 dev_id, u32 clk_id,
1397                       u32 *num_parents)
1398 {
1399     struct ti_sci_info *info;
1400     struct ti_sci_msg_req_get_clock_num_parents *req;
1401     struct ti_sci_msg_resp_get_clock_num_parents *resp;
1402     struct ti_sci_xfer *xfer;
1403     struct device *dev;
1404     int ret = 0;
1405 
1406     if (IS_ERR(handle))
1407         return PTR_ERR(handle);
1408     if (!handle || !num_parents)
1409         return -EINVAL;
1410 
1411     info = handle_to_ti_sci_info(handle);
1412     dev = info->dev;
1413 
1414     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1415                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1416                    sizeof(*req), sizeof(*resp));
1417     if (IS_ERR(xfer)) {
1418         ret = PTR_ERR(xfer);
1419         dev_err(dev, "Message alloc failed(%d)\n", ret);
1420         return ret;
1421     }
1422     req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
1423     req->dev_id = dev_id;
1424     if (clk_id < 255) {
1425         req->clk_id = clk_id;
1426     } else {
1427         req->clk_id = 255;
1428         req->clk_id_32 = clk_id;
1429     }
1430 
1431     ret = ti_sci_do_xfer(info, xfer);
1432     if (ret) {
1433         dev_err(dev, "Mbox send fail %d\n", ret);
1434         goto fail;
1435     }
1436 
1437     resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
1438 
1439     if (!ti_sci_is_response_ack(resp)) {
1440         ret = -ENODEV;
1441     } else {
1442         if (resp->num_parents < 255)
1443             *num_parents = resp->num_parents;
1444         else
1445             *num_parents = resp->num_parents_32;
1446     }
1447 
1448 fail:
1449     ti_sci_put_one_xfer(&info->minfo, xfer);
1450 
1451     return ret;
1452 }
1453 
1454 /**
1455  * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1456  * @handle: pointer to TI SCI handle
1457  * @dev_id: Device identifier this request is for
1458  * @clk_id: Clock identifier for the device for this request.
1459  *      Each device has it's own set of clock inputs. This indexes
1460  *      which clock input to modify.
1461  * @min_freq:   The minimum allowable frequency in Hz. This is the minimum
1462  *      allowable programmed frequency and does not account for clock
1463  *      tolerances and jitter.
1464  * @target_freq: The target clock frequency in Hz. A frequency will be
1465  *      processed as close to this target frequency as possible.
1466  * @max_freq:   The maximum allowable frequency in Hz. This is the maximum
1467  *      allowable programmed frequency and does not account for clock
1468  *      tolerances and jitter.
1469  * @match_freq: Frequency match in Hz response.
1470  *
1471  * Return: 0 if all went well, else returns appropriate error value.
1472  */
1473 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1474                      u32 dev_id, u32 clk_id, u64 min_freq,
1475                      u64 target_freq, u64 max_freq,
1476                      u64 *match_freq)
1477 {
1478     struct ti_sci_info *info;
1479     struct ti_sci_msg_req_query_clock_freq *req;
1480     struct ti_sci_msg_resp_query_clock_freq *resp;
1481     struct ti_sci_xfer *xfer;
1482     struct device *dev;
1483     int ret = 0;
1484 
1485     if (IS_ERR(handle))
1486         return PTR_ERR(handle);
1487     if (!handle || !match_freq)
1488         return -EINVAL;
1489 
1490     info = handle_to_ti_sci_info(handle);
1491     dev = info->dev;
1492 
1493     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1494                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1495                    sizeof(*req), sizeof(*resp));
1496     if (IS_ERR(xfer)) {
1497         ret = PTR_ERR(xfer);
1498         dev_err(dev, "Message alloc failed(%d)\n", ret);
1499         return ret;
1500     }
1501     req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
1502     req->dev_id = dev_id;
1503     if (clk_id < 255) {
1504         req->clk_id = clk_id;
1505     } else {
1506         req->clk_id = 255;
1507         req->clk_id_32 = clk_id;
1508     }
1509     req->min_freq_hz = min_freq;
1510     req->target_freq_hz = target_freq;
1511     req->max_freq_hz = max_freq;
1512 
1513     ret = ti_sci_do_xfer(info, xfer);
1514     if (ret) {
1515         dev_err(dev, "Mbox send fail %d\n", ret);
1516         goto fail;
1517     }
1518 
1519     resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
1520 
1521     if (!ti_sci_is_response_ack(resp))
1522         ret = -ENODEV;
1523     else
1524         *match_freq = resp->freq_hz;
1525 
1526 fail:
1527     ti_sci_put_one_xfer(&info->minfo, xfer);
1528 
1529     return ret;
1530 }
1531 
1532 /**
1533  * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1534  * @handle: pointer to TI SCI handle
1535  * @dev_id: Device identifier this request is for
1536  * @clk_id: Clock identifier for the device for this request.
1537  *      Each device has it's own set of clock inputs. This indexes
1538  *      which clock input to modify.
1539  * @min_freq:   The minimum allowable frequency in Hz. This is the minimum
1540  *      allowable programmed frequency and does not account for clock
1541  *      tolerances and jitter.
1542  * @target_freq: The target clock frequency in Hz. A frequency will be
1543  *      processed as close to this target frequency as possible.
1544  * @max_freq:   The maximum allowable frequency in Hz. This is the maximum
1545  *      allowable programmed frequency and does not account for clock
1546  *      tolerances and jitter.
1547  *
1548  * Return: 0 if all went well, else returns appropriate error value.
1549  */
1550 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1551                    u32 dev_id, u32 clk_id, u64 min_freq,
1552                    u64 target_freq, u64 max_freq)
1553 {
1554     struct ti_sci_info *info;
1555     struct ti_sci_msg_req_set_clock_freq *req;
1556     struct ti_sci_msg_hdr *resp;
1557     struct ti_sci_xfer *xfer;
1558     struct device *dev;
1559     int ret = 0;
1560 
1561     if (IS_ERR(handle))
1562         return PTR_ERR(handle);
1563     if (!handle)
1564         return -EINVAL;
1565 
1566     info = handle_to_ti_sci_info(handle);
1567     dev = info->dev;
1568 
1569     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1570                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1571                    sizeof(*req), sizeof(*resp));
1572     if (IS_ERR(xfer)) {
1573         ret = PTR_ERR(xfer);
1574         dev_err(dev, "Message alloc failed(%d)\n", ret);
1575         return ret;
1576     }
1577     req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
1578     req->dev_id = dev_id;
1579     if (clk_id < 255) {
1580         req->clk_id = clk_id;
1581     } else {
1582         req->clk_id = 255;
1583         req->clk_id_32 = clk_id;
1584     }
1585     req->min_freq_hz = min_freq;
1586     req->target_freq_hz = target_freq;
1587     req->max_freq_hz = max_freq;
1588 
1589     ret = ti_sci_do_xfer(info, xfer);
1590     if (ret) {
1591         dev_err(dev, "Mbox send fail %d\n", ret);
1592         goto fail;
1593     }
1594 
1595     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1596 
1597     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1598 
1599 fail:
1600     ti_sci_put_one_xfer(&info->minfo, xfer);
1601 
1602     return ret;
1603 }
1604 
1605 /**
1606  * ti_sci_cmd_clk_get_freq() - Get current frequency
1607  * @handle: pointer to TI SCI handle
1608  * @dev_id: Device identifier this request is for
1609  * @clk_id: Clock identifier for the device for this request.
1610  *      Each device has it's own set of clock inputs. This indexes
1611  *      which clock input to modify.
1612  * @freq:   Currently frequency in Hz
1613  *
1614  * Return: 0 if all went well, else returns appropriate error value.
1615  */
1616 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1617                    u32 dev_id, u32 clk_id, u64 *freq)
1618 {
1619     struct ti_sci_info *info;
1620     struct ti_sci_msg_req_get_clock_freq *req;
1621     struct ti_sci_msg_resp_get_clock_freq *resp;
1622     struct ti_sci_xfer *xfer;
1623     struct device *dev;
1624     int ret = 0;
1625 
1626     if (IS_ERR(handle))
1627         return PTR_ERR(handle);
1628     if (!handle || !freq)
1629         return -EINVAL;
1630 
1631     info = handle_to_ti_sci_info(handle);
1632     dev = info->dev;
1633 
1634     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1635                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1636                    sizeof(*req), sizeof(*resp));
1637     if (IS_ERR(xfer)) {
1638         ret = PTR_ERR(xfer);
1639         dev_err(dev, "Message alloc failed(%d)\n", ret);
1640         return ret;
1641     }
1642     req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
1643     req->dev_id = dev_id;
1644     if (clk_id < 255) {
1645         req->clk_id = clk_id;
1646     } else {
1647         req->clk_id = 255;
1648         req->clk_id_32 = clk_id;
1649     }
1650 
1651     ret = ti_sci_do_xfer(info, xfer);
1652     if (ret) {
1653         dev_err(dev, "Mbox send fail %d\n", ret);
1654         goto fail;
1655     }
1656 
1657     resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
1658 
1659     if (!ti_sci_is_response_ack(resp))
1660         ret = -ENODEV;
1661     else
1662         *freq = resp->freq_hz;
1663 
1664 fail:
1665     ti_sci_put_one_xfer(&info->minfo, xfer);
1666 
1667     return ret;
1668 }
1669 
1670 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1671 {
1672     struct ti_sci_info *info;
1673     struct ti_sci_msg_req_reboot *req;
1674     struct ti_sci_msg_hdr *resp;
1675     struct ti_sci_xfer *xfer;
1676     struct device *dev;
1677     int ret = 0;
1678 
1679     if (IS_ERR(handle))
1680         return PTR_ERR(handle);
1681     if (!handle)
1682         return -EINVAL;
1683 
1684     info = handle_to_ti_sci_info(handle);
1685     dev = info->dev;
1686 
1687     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1688                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1689                    sizeof(*req), sizeof(*resp));
1690     if (IS_ERR(xfer)) {
1691         ret = PTR_ERR(xfer);
1692         dev_err(dev, "Message alloc failed(%d)\n", ret);
1693         return ret;
1694     }
1695     req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
1696 
1697     ret = ti_sci_do_xfer(info, xfer);
1698     if (ret) {
1699         dev_err(dev, "Mbox send fail %d\n", ret);
1700         goto fail;
1701     }
1702 
1703     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1704 
1705     if (!ti_sci_is_response_ack(resp))
1706         ret = -ENODEV;
1707     else
1708         ret = 0;
1709 
1710 fail:
1711     ti_sci_put_one_xfer(&info->minfo, xfer);
1712 
1713     return ret;
1714 }
1715 
1716 /**
1717  * ti_sci_get_resource_range - Helper to get a range of resources assigned
1718  *                 to a host. Resource is uniquely identified by
1719  *                 type and subtype.
1720  * @handle:     Pointer to TISCI handle.
1721  * @dev_id:     TISCI device ID.
1722  * @subtype:        Resource assignment subtype that is being requested
1723  *          from the given device.
1724  * @s_host:     Host processor ID to which the resources are allocated
1725  * @desc:       Pointer to ti_sci_resource_desc to be updated with the
1726  *          resource range start index and number of resources
1727  *
1728  * Return: 0 if all went fine, else return appropriate error.
1729  */
1730 static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1731                      u32 dev_id, u8 subtype, u8 s_host,
1732                      struct ti_sci_resource_desc *desc)
1733 {
1734     struct ti_sci_msg_resp_get_resource_range *resp;
1735     struct ti_sci_msg_req_get_resource_range *req;
1736     struct ti_sci_xfer *xfer;
1737     struct ti_sci_info *info;
1738     struct device *dev;
1739     int ret = 0;
1740 
1741     if (IS_ERR(handle))
1742         return PTR_ERR(handle);
1743     if (!handle || !desc)
1744         return -EINVAL;
1745 
1746     info = handle_to_ti_sci_info(handle);
1747     dev = info->dev;
1748 
1749     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1750                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1751                    sizeof(*req), sizeof(*resp));
1752     if (IS_ERR(xfer)) {
1753         ret = PTR_ERR(xfer);
1754         dev_err(dev, "Message alloc failed(%d)\n", ret);
1755         return ret;
1756     }
1757 
1758     req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1759     req->secondary_host = s_host;
1760     req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1761     req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1762 
1763     ret = ti_sci_do_xfer(info, xfer);
1764     if (ret) {
1765         dev_err(dev, "Mbox send fail %d\n", ret);
1766         goto fail;
1767     }
1768 
1769     resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1770 
1771     if (!ti_sci_is_response_ack(resp)) {
1772         ret = -ENODEV;
1773     } else if (!resp->range_num && !resp->range_num_sec) {
1774         /* Neither of the two resource range is valid */
1775         ret = -ENODEV;
1776     } else {
1777         desc->start = resp->range_start;
1778         desc->num = resp->range_num;
1779         desc->start_sec = resp->range_start_sec;
1780         desc->num_sec = resp->range_num_sec;
1781     }
1782 
1783 fail:
1784     ti_sci_put_one_xfer(&info->minfo, xfer);
1785 
1786     return ret;
1787 }
1788 
1789 /**
1790  * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1791  *                 that is same as ti sci interface host.
1792  * @handle:     Pointer to TISCI handle.
1793  * @dev_id:     TISCI device ID.
1794  * @subtype:        Resource assignment subtype that is being requested
1795  *          from the given device.
1796  * @desc:       Pointer to ti_sci_resource_desc to be updated with the
1797  *          resource range start index and number of resources
1798  *
1799  * Return: 0 if all went fine, else return appropriate error.
1800  */
1801 static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1802                      u32 dev_id, u8 subtype,
1803                      struct ti_sci_resource_desc *desc)
1804 {
1805     return ti_sci_get_resource_range(handle, dev_id, subtype,
1806                      TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1807                      desc);
1808 }
1809 
1810 /**
1811  * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1812  *                        assigned to a specified host.
1813  * @handle:     Pointer to TISCI handle.
1814  * @dev_id:     TISCI device ID.
1815  * @subtype:        Resource assignment subtype that is being requested
1816  *          from the given device.
1817  * @s_host:     Host processor ID to which the resources are allocated
1818  * @desc:       Pointer to ti_sci_resource_desc to be updated with the
1819  *          resource range start index and number of resources
1820  *
1821  * Return: 0 if all went fine, else return appropriate error.
1822  */
1823 static
1824 int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1825                          u32 dev_id, u8 subtype, u8 s_host,
1826                          struct ti_sci_resource_desc *desc)
1827 {
1828     return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
1829 }
1830 
1831 /**
1832  * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1833  *           the requested source and destination
1834  * @handle:     Pointer to TISCI handle.
1835  * @valid_params:   Bit fields defining the validity of certain params
1836  * @src_id:     Device ID of the IRQ source
1837  * @src_index:      IRQ source index within the source device
1838  * @dst_id:     Device ID of the IRQ destination
1839  * @dst_host_irq:   IRQ number of the destination device
1840  * @ia_id:      Device ID of the IA, if the IRQ flows through this IA
1841  * @vint:       Virtual interrupt to be used within the IA
1842  * @global_event:   Global event number to be used for the requesting event
1843  * @vint_status_bit:    Virtual interrupt status bit to be used for the event
1844  * @s_host:     Secondary host ID to which the irq/event is being
1845  *          requested for.
1846  * @type:       Request type irq set or release.
1847  *
1848  * Return: 0 if all went fine, else return appropriate error.
1849  */
1850 static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1851                  u32 valid_params, u16 src_id, u16 src_index,
1852                  u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1853                  u16 global_event, u8 vint_status_bit, u8 s_host,
1854                  u16 type)
1855 {
1856     struct ti_sci_msg_req_manage_irq *req;
1857     struct ti_sci_msg_hdr *resp;
1858     struct ti_sci_xfer *xfer;
1859     struct ti_sci_info *info;
1860     struct device *dev;
1861     int ret = 0;
1862 
1863     if (IS_ERR(handle))
1864         return PTR_ERR(handle);
1865     if (!handle)
1866         return -EINVAL;
1867 
1868     info = handle_to_ti_sci_info(handle);
1869     dev = info->dev;
1870 
1871     xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1872                    sizeof(*req), sizeof(*resp));
1873     if (IS_ERR(xfer)) {
1874         ret = PTR_ERR(xfer);
1875         dev_err(dev, "Message alloc failed(%d)\n", ret);
1876         return ret;
1877     }
1878     req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1879     req->valid_params = valid_params;
1880     req->src_id = src_id;
1881     req->src_index = src_index;
1882     req->dst_id = dst_id;
1883     req->dst_host_irq = dst_host_irq;
1884     req->ia_id = ia_id;
1885     req->vint = vint;
1886     req->global_event = global_event;
1887     req->vint_status_bit = vint_status_bit;
1888     req->secondary_host = s_host;
1889 
1890     ret = ti_sci_do_xfer(info, xfer);
1891     if (ret) {
1892         dev_err(dev, "Mbox send fail %d\n", ret);
1893         goto fail;
1894     }
1895 
1896     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1897 
1898     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1899 
1900 fail:
1901     ti_sci_put_one_xfer(&info->minfo, xfer);
1902 
1903     return ret;
1904 }
1905 
1906 /**
1907  * ti_sci_set_irq() - Helper api to configure the irq route between the
1908  *            requested source and destination
1909  * @handle:     Pointer to TISCI handle.
1910  * @valid_params:   Bit fields defining the validity of certain params
1911  * @src_id:     Device ID of the IRQ source
1912  * @src_index:      IRQ source index within the source device
1913  * @dst_id:     Device ID of the IRQ destination
1914  * @dst_host_irq:   IRQ number of the destination device
1915  * @ia_id:      Device ID of the IA, if the IRQ flows through this IA
1916  * @vint:       Virtual interrupt to be used within the IA
1917  * @global_event:   Global event number to be used for the requesting event
1918  * @vint_status_bit:    Virtual interrupt status bit to be used for the event
1919  * @s_host:     Secondary host ID to which the irq/event is being
1920  *          requested for.
1921  *
1922  * Return: 0 if all went fine, else return appropriate error.
1923  */
1924 static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1925               u16 src_id, u16 src_index, u16 dst_id,
1926               u16 dst_host_irq, u16 ia_id, u16 vint,
1927               u16 global_event, u8 vint_status_bit, u8 s_host)
1928 {
1929     pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1930          __func__, valid_params, src_id, src_index,
1931          dst_id, dst_host_irq, ia_id, vint, global_event,
1932          vint_status_bit);
1933 
1934     return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1935                  dst_id, dst_host_irq, ia_id, vint,
1936                  global_event, vint_status_bit, s_host,
1937                  TI_SCI_MSG_SET_IRQ);
1938 }
1939 
1940 /**
1941  * ti_sci_free_irq() - Helper api to free the irq route between the
1942  *             requested source and destination
1943  * @handle:     Pointer to TISCI handle.
1944  * @valid_params:   Bit fields defining the validity of certain params
1945  * @src_id:     Device ID of the IRQ source
1946  * @src_index:      IRQ source index within the source device
1947  * @dst_id:     Device ID of the IRQ destination
1948  * @dst_host_irq:   IRQ number of the destination device
1949  * @ia_id:      Device ID of the IA, if the IRQ flows through this IA
1950  * @vint:       Virtual interrupt to be used within the IA
1951  * @global_event:   Global event number to be used for the requesting event
1952  * @vint_status_bit:    Virtual interrupt status bit to be used for the event
1953  * @s_host:     Secondary host ID to which the irq/event is being
1954  *          requested for.
1955  *
1956  * Return: 0 if all went fine, else return appropriate error.
1957  */
1958 static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1959                u16 src_id, u16 src_index, u16 dst_id,
1960                u16 dst_host_irq, u16 ia_id, u16 vint,
1961                u16 global_event, u8 vint_status_bit, u8 s_host)
1962 {
1963     pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1964          __func__, valid_params, src_id, src_index,
1965          dst_id, dst_host_irq, ia_id, vint, global_event,
1966          vint_status_bit);
1967 
1968     return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1969                  dst_id, dst_host_irq, ia_id, vint,
1970                  global_event, vint_status_bit, s_host,
1971                  TI_SCI_MSG_FREE_IRQ);
1972 }
1973 
1974 /**
1975  * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1976  *            source and destination.
1977  * @handle:     Pointer to TISCI handle.
1978  * @src_id:     Device ID of the IRQ source
1979  * @src_index:      IRQ source index within the source device
1980  * @dst_id:     Device ID of the IRQ destination
1981  * @dst_host_irq:   IRQ number of the destination device
1982  * @vint_irq:       Boolean specifying if this interrupt belongs to
1983  *          Interrupt Aggregator.
1984  *
1985  * Return: 0 if all went fine, else return appropriate error.
1986  */
1987 static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1988                   u16 src_index, u16 dst_id, u16 dst_host_irq)
1989 {
1990     u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1991 
1992     return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1993                   dst_host_irq, 0, 0, 0, 0, 0);
1994 }
1995 
1996 /**
1997  * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1998  *              requested source and Interrupt Aggregator.
1999  * @handle:     Pointer to TISCI handle.
2000  * @src_id:     Device ID of the IRQ source
2001  * @src_index:      IRQ source index within the source device
2002  * @ia_id:      Device ID of the IA, if the IRQ flows through this IA
2003  * @vint:       Virtual interrupt to be used within the IA
2004  * @global_event:   Global event number to be used for the requesting event
2005  * @vint_status_bit:    Virtual interrupt status bit to be used for the event
2006  *
2007  * Return: 0 if all went fine, else return appropriate error.
2008  */
2009 static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
2010                     u16 src_id, u16 src_index, u16 ia_id,
2011                     u16 vint, u16 global_event,
2012                     u8 vint_status_bit)
2013 {
2014     u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
2015                MSG_FLAG_GLB_EVNT_VALID |
2016                MSG_FLAG_VINT_STS_BIT_VALID;
2017 
2018     return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
2019                   ia_id, vint, global_event, vint_status_bit, 0);
2020 }
2021 
2022 /**
2023  * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2024  *             requested source and destination.
2025  * @handle:     Pointer to TISCI handle.
2026  * @src_id:     Device ID of the IRQ source
2027  * @src_index:      IRQ source index within the source device
2028  * @dst_id:     Device ID of the IRQ destination
2029  * @dst_host_irq:   IRQ number of the destination device
2030  * @vint_irq:       Boolean specifying if this interrupt belongs to
2031  *          Interrupt Aggregator.
2032  *
2033  * Return: 0 if all went fine, else return appropriate error.
2034  */
2035 static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2036                    u16 src_index, u16 dst_id, u16 dst_host_irq)
2037 {
2038     u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2039 
2040     return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2041                    dst_host_irq, 0, 0, 0, 0, 0);
2042 }
2043 
2044 /**
2045  * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2046  *               and Interrupt Aggregator.
2047  * @handle:     Pointer to TISCI handle.
2048  * @src_id:     Device ID of the IRQ source
2049  * @src_index:      IRQ source index within the source device
2050  * @ia_id:      Device ID of the IA, if the IRQ flows through this IA
2051  * @vint:       Virtual interrupt to be used within the IA
2052  * @global_event:   Global event number to be used for the requesting event
2053  * @vint_status_bit:    Virtual interrupt status bit to be used for the event
2054  *
2055  * Return: 0 if all went fine, else return appropriate error.
2056  */
2057 static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2058                      u16 src_id, u16 src_index, u16 ia_id,
2059                      u16 vint, u16 global_event,
2060                      u8 vint_status_bit)
2061 {
2062     u32 valid_params = MSG_FLAG_IA_ID_VALID |
2063                MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2064                MSG_FLAG_VINT_STS_BIT_VALID;
2065 
2066     return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2067                    ia_id, vint, global_event, vint_status_bit, 0);
2068 }
2069 
2070 /**
2071  * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
2072  * @handle: Pointer to TI SCI handle.
2073  * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
2074  *
2075  * Return: 0 if all went well, else returns appropriate error value.
2076  *
2077  * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
2078  * more info.
2079  */
2080 static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
2081                   const struct ti_sci_msg_rm_ring_cfg *params)
2082 {
2083     struct ti_sci_msg_rm_ring_cfg_req *req;
2084     struct ti_sci_msg_hdr *resp;
2085     struct ti_sci_xfer *xfer;
2086     struct ti_sci_info *info;
2087     struct device *dev;
2088     int ret = 0;
2089 
2090     if (IS_ERR_OR_NULL(handle))
2091         return -EINVAL;
2092 
2093     info = handle_to_ti_sci_info(handle);
2094     dev = info->dev;
2095 
2096     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2097                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2098                    sizeof(*req), sizeof(*resp));
2099     if (IS_ERR(xfer)) {
2100         ret = PTR_ERR(xfer);
2101         dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2102         return ret;
2103     }
2104     req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2105     req->valid_params = params->valid_params;
2106     req->nav_id = params->nav_id;
2107     req->index = params->index;
2108     req->addr_lo = params->addr_lo;
2109     req->addr_hi = params->addr_hi;
2110     req->count = params->count;
2111     req->mode = params->mode;
2112     req->size = params->size;
2113     req->order_id = params->order_id;
2114     req->virtid = params->virtid;
2115     req->asel = params->asel;
2116 
2117     ret = ti_sci_do_xfer(info, xfer);
2118     if (ret) {
2119         dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2120         goto fail;
2121     }
2122 
2123     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2124     ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2125 
2126 fail:
2127     ti_sci_put_one_xfer(&info->minfo, xfer);
2128     dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
2129     return ret;
2130 }
2131 
2132 /**
2133  * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2134  * @handle: Pointer to TI SCI handle.
2135  * @nav_id: Device ID of Navigator Subsystem which should be used for
2136  *      pairing
2137  * @src_thread: Source PSI-L thread ID
2138  * @dst_thread: Destination PSI-L thread ID
2139  *
2140  * Return: 0 if all went well, else returns appropriate error value.
2141  */
2142 static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2143                    u32 nav_id, u32 src_thread, u32 dst_thread)
2144 {
2145     struct ti_sci_msg_psil_pair *req;
2146     struct ti_sci_msg_hdr *resp;
2147     struct ti_sci_xfer *xfer;
2148     struct ti_sci_info *info;
2149     struct device *dev;
2150     int ret = 0;
2151 
2152     if (IS_ERR(handle))
2153         return PTR_ERR(handle);
2154     if (!handle)
2155         return -EINVAL;
2156 
2157     info = handle_to_ti_sci_info(handle);
2158     dev = info->dev;
2159 
2160     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2161                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2162                    sizeof(*req), sizeof(*resp));
2163     if (IS_ERR(xfer)) {
2164         ret = PTR_ERR(xfer);
2165         dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2166         return ret;
2167     }
2168     req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2169     req->nav_id = nav_id;
2170     req->src_thread = src_thread;
2171     req->dst_thread = dst_thread;
2172 
2173     ret = ti_sci_do_xfer(info, xfer);
2174     if (ret) {
2175         dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2176         goto fail;
2177     }
2178 
2179     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2180     ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2181 
2182 fail:
2183     ti_sci_put_one_xfer(&info->minfo, xfer);
2184 
2185     return ret;
2186 }
2187 
2188 /**
2189  * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2190  * @handle: Pointer to TI SCI handle.
2191  * @nav_id: Device ID of Navigator Subsystem which should be used for
2192  *      unpairing
2193  * @src_thread: Source PSI-L thread ID
2194  * @dst_thread: Destination PSI-L thread ID
2195  *
2196  * Return: 0 if all went well, else returns appropriate error value.
2197  */
2198 static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2199                      u32 nav_id, u32 src_thread, u32 dst_thread)
2200 {
2201     struct ti_sci_msg_psil_unpair *req;
2202     struct ti_sci_msg_hdr *resp;
2203     struct ti_sci_xfer *xfer;
2204     struct ti_sci_info *info;
2205     struct device *dev;
2206     int ret = 0;
2207 
2208     if (IS_ERR(handle))
2209         return PTR_ERR(handle);
2210     if (!handle)
2211         return -EINVAL;
2212 
2213     info = handle_to_ti_sci_info(handle);
2214     dev = info->dev;
2215 
2216     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2217                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2218                    sizeof(*req), sizeof(*resp));
2219     if (IS_ERR(xfer)) {
2220         ret = PTR_ERR(xfer);
2221         dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2222         return ret;
2223     }
2224     req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2225     req->nav_id = nav_id;
2226     req->src_thread = src_thread;
2227     req->dst_thread = dst_thread;
2228 
2229     ret = ti_sci_do_xfer(info, xfer);
2230     if (ret) {
2231         dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2232         goto fail;
2233     }
2234 
2235     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2236     ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2237 
2238 fail:
2239     ti_sci_put_one_xfer(&info->minfo, xfer);
2240 
2241     return ret;
2242 }
2243 
2244 /**
2245  * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2246  * @handle: Pointer to TI SCI handle.
2247  * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2248  *      structure
2249  *
2250  * Return: 0 if all went well, else returns appropriate error value.
2251  *
2252  * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2253  * more info.
2254  */
2255 static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2256             const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2257 {
2258     struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2259     struct ti_sci_msg_hdr *resp;
2260     struct ti_sci_xfer *xfer;
2261     struct ti_sci_info *info;
2262     struct device *dev;
2263     int ret = 0;
2264 
2265     if (IS_ERR_OR_NULL(handle))
2266         return -EINVAL;
2267 
2268     info = handle_to_ti_sci_info(handle);
2269     dev = info->dev;
2270 
2271     xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2272                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2273                    sizeof(*req), sizeof(*resp));
2274     if (IS_ERR(xfer)) {
2275         ret = PTR_ERR(xfer);
2276         dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2277         return ret;
2278     }
2279     req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2280     req->valid_params = params->valid_params;
2281     req->nav_id = params->nav_id;
2282     req->index = params->index;
2283     req->tx_pause_on_err = params->tx_pause_on_err;
2284     req->tx_filt_einfo = params->tx_filt_einfo;
2285     req->tx_filt_pswords = params->tx_filt_pswords;
2286     req->tx_atype = params->tx_atype;
2287     req->tx_chan_type = params->tx_chan_type;
2288     req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2289     req->tx_fetch_size = params->tx_fetch_size;
2290     req->tx_credit_count = params->tx_credit_count;
2291     req->txcq_qnum = params->txcq_qnum;
2292     req->tx_priority = params->tx_priority;
2293     req->tx_qos = params->tx_qos;
2294     req->tx_orderid = params->tx_orderid;
2295     req->fdepth = params->fdepth;
2296     req->tx_sched_priority = params->tx_sched_priority;
2297     req->tx_burst_size = params->tx_burst_size;
2298     req->tx_tdtype = params->tx_tdtype;
2299     req->extended_ch_type = params->extended_ch_type;
2300 
2301     ret = ti_sci_do_xfer(info, xfer);
2302     if (ret) {
2303         dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2304         goto fail;
2305     }
2306 
2307     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2308     ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2309 
2310 fail:
2311     ti_sci_put_one_xfer(&info->minfo, xfer);
2312     dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2313     return ret;
2314 }
2315 
2316 /**
2317  * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2318  * @handle: Pointer to TI SCI handle.
2319  * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2320  *      structure
2321  *
2322  * Return: 0 if all went well, else returns appropriate error value.
2323  *
2324  * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2325  * more info.
2326  */
2327 static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2328             const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2329 {
2330     struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2331     struct ti_sci_msg_hdr *resp;
2332     struct ti_sci_xfer *xfer;
2333     struct ti_sci_info *info;
2334     struct device *dev;
2335     int ret = 0;
2336 
2337     if (IS_ERR_OR_NULL(handle))
2338         return -EINVAL;
2339 
2340     info = handle_to_ti_sci_info(handle);
2341     dev = info->dev;
2342 
2343     xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2344                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2345                    sizeof(*req), sizeof(*resp));
2346     if (IS_ERR(xfer)) {
2347         ret = PTR_ERR(xfer);
2348         dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2349         return ret;
2350     }
2351     req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2352     req->valid_params = params->valid_params;
2353     req->nav_id = params->nav_id;
2354     req->index = params->index;
2355     req->rx_fetch_size = params->rx_fetch_size;
2356     req->rxcq_qnum = params->rxcq_qnum;
2357     req->rx_priority = params->rx_priority;
2358     req->rx_qos = params->rx_qos;
2359     req->rx_orderid = params->rx_orderid;
2360     req->rx_sched_priority = params->rx_sched_priority;
2361     req->flowid_start = params->flowid_start;
2362     req->flowid_cnt = params->flowid_cnt;
2363     req->rx_pause_on_err = params->rx_pause_on_err;
2364     req->rx_atype = params->rx_atype;
2365     req->rx_chan_type = params->rx_chan_type;
2366     req->rx_ignore_short = params->rx_ignore_short;
2367     req->rx_ignore_long = params->rx_ignore_long;
2368     req->rx_burst_size = params->rx_burst_size;
2369 
2370     ret = ti_sci_do_xfer(info, xfer);
2371     if (ret) {
2372         dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2373         goto fail;
2374     }
2375 
2376     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2377     ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2378 
2379 fail:
2380     ti_sci_put_one_xfer(&info->minfo, xfer);
2381     dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2382     return ret;
2383 }
2384 
2385 /**
2386  * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2387  * @handle: Pointer to TI SCI handle.
2388  * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2389  *      structure
2390  *
2391  * Return: 0 if all went well, else returns appropriate error value.
2392  *
2393  * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2394  * more info.
2395  */
2396 static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2397             const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2398 {
2399     struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2400     struct ti_sci_msg_hdr *resp;
2401     struct ti_sci_xfer *xfer;
2402     struct ti_sci_info *info;
2403     struct device *dev;
2404     int ret = 0;
2405 
2406     if (IS_ERR_OR_NULL(handle))
2407         return -EINVAL;
2408 
2409     info = handle_to_ti_sci_info(handle);
2410     dev = info->dev;
2411 
2412     xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2413                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2414                    sizeof(*req), sizeof(*resp));
2415     if (IS_ERR(xfer)) {
2416         ret = PTR_ERR(xfer);
2417         dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2418         return ret;
2419     }
2420     req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2421     req->valid_params = params->valid_params;
2422     req->nav_id = params->nav_id;
2423     req->flow_index = params->flow_index;
2424     req->rx_einfo_present = params->rx_einfo_present;
2425     req->rx_psinfo_present = params->rx_psinfo_present;
2426     req->rx_error_handling = params->rx_error_handling;
2427     req->rx_desc_type = params->rx_desc_type;
2428     req->rx_sop_offset = params->rx_sop_offset;
2429     req->rx_dest_qnum = params->rx_dest_qnum;
2430     req->rx_src_tag_hi = params->rx_src_tag_hi;
2431     req->rx_src_tag_lo = params->rx_src_tag_lo;
2432     req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2433     req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2434     req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2435     req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2436     req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2437     req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2438     req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2439     req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2440     req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2441     req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2442     req->rx_ps_location = params->rx_ps_location;
2443 
2444     ret = ti_sci_do_xfer(info, xfer);
2445     if (ret) {
2446         dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2447         goto fail;
2448     }
2449 
2450     resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2451     ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2452 
2453 fail:
2454     ti_sci_put_one_xfer(&info->minfo, xfer);
2455     dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2456     return ret;
2457 }
2458 
2459 /**
2460  * ti_sci_cmd_proc_request() - Command to request a physical processor control
2461  * @handle: Pointer to TI SCI handle
2462  * @proc_id:    Processor ID this request is for
2463  *
2464  * Return: 0 if all went well, else returns appropriate error value.
2465  */
2466 static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2467                    u8 proc_id)
2468 {
2469     struct ti_sci_msg_req_proc_request *req;
2470     struct ti_sci_msg_hdr *resp;
2471     struct ti_sci_info *info;
2472     struct ti_sci_xfer *xfer;
2473     struct device *dev;
2474     int ret = 0;
2475 
2476     if (!handle)
2477         return -EINVAL;
2478     if (IS_ERR(handle))
2479         return PTR_ERR(handle);
2480 
2481     info = handle_to_ti_sci_info(handle);
2482     dev = info->dev;
2483 
2484     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2485                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2486                    sizeof(*req), sizeof(*resp));
2487     if (IS_ERR(xfer)) {
2488         ret = PTR_ERR(xfer);
2489         dev_err(dev, "Message alloc failed(%d)\n", ret);
2490         return ret;
2491     }
2492     req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2493     req->processor_id = proc_id;
2494 
2495     ret = ti_sci_do_xfer(info, xfer);
2496     if (ret) {
2497         dev_err(dev, "Mbox send fail %d\n", ret);
2498         goto fail;
2499     }
2500 
2501     resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2502 
2503     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2504 
2505 fail:
2506     ti_sci_put_one_xfer(&info->minfo, xfer);
2507 
2508     return ret;
2509 }
2510 
2511 /**
2512  * ti_sci_cmd_proc_release() - Command to release a physical processor control
2513  * @handle: Pointer to TI SCI handle
2514  * @proc_id:    Processor ID this request is for
2515  *
2516  * Return: 0 if all went well, else returns appropriate error value.
2517  */
2518 static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2519                    u8 proc_id)
2520 {
2521     struct ti_sci_msg_req_proc_release *req;
2522     struct ti_sci_msg_hdr *resp;
2523     struct ti_sci_info *info;
2524     struct ti_sci_xfer *xfer;
2525     struct device *dev;
2526     int ret = 0;
2527 
2528     if (!handle)
2529         return -EINVAL;
2530     if (IS_ERR(handle))
2531         return PTR_ERR(handle);
2532 
2533     info = handle_to_ti_sci_info(handle);
2534     dev = info->dev;
2535 
2536     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2537                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2538                    sizeof(*req), sizeof(*resp));
2539     if (IS_ERR(xfer)) {
2540         ret = PTR_ERR(xfer);
2541         dev_err(dev, "Message alloc failed(%d)\n", ret);
2542         return ret;
2543     }
2544     req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2545     req->processor_id = proc_id;
2546 
2547     ret = ti_sci_do_xfer(info, xfer);
2548     if (ret) {
2549         dev_err(dev, "Mbox send fail %d\n", ret);
2550         goto fail;
2551     }
2552 
2553     resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2554 
2555     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2556 
2557 fail:
2558     ti_sci_put_one_xfer(&info->minfo, xfer);
2559 
2560     return ret;
2561 }
2562 
2563 /**
2564  * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2565  *              control to a host in the processor's access
2566  *              control list.
2567  * @handle: Pointer to TI SCI handle
2568  * @proc_id:    Processor ID this request is for
2569  * @host_id:    Host ID to get the control of the processor
2570  *
2571  * Return: 0 if all went well, else returns appropriate error value.
2572  */
2573 static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2574                     u8 proc_id, u8 host_id)
2575 {
2576     struct ti_sci_msg_req_proc_handover *req;
2577     struct ti_sci_msg_hdr *resp;
2578     struct ti_sci_info *info;
2579     struct ti_sci_xfer *xfer;
2580     struct device *dev;
2581     int ret = 0;
2582 
2583     if (!handle)
2584         return -EINVAL;
2585     if (IS_ERR(handle))
2586         return PTR_ERR(handle);
2587 
2588     info = handle_to_ti_sci_info(handle);
2589     dev = info->dev;
2590 
2591     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2592                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2593                    sizeof(*req), sizeof(*resp));
2594     if (IS_ERR(xfer)) {
2595         ret = PTR_ERR(xfer);
2596         dev_err(dev, "Message alloc failed(%d)\n", ret);
2597         return ret;
2598     }
2599     req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2600     req->processor_id = proc_id;
2601     req->host_id = host_id;
2602 
2603     ret = ti_sci_do_xfer(info, xfer);
2604     if (ret) {
2605         dev_err(dev, "Mbox send fail %d\n", ret);
2606         goto fail;
2607     }
2608 
2609     resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2610 
2611     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2612 
2613 fail:
2614     ti_sci_put_one_xfer(&info->minfo, xfer);
2615 
2616     return ret;
2617 }
2618 
2619 /**
2620  * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2621  *                  configuration flags
2622  * @handle:     Pointer to TI SCI handle
2623  * @proc_id:        Processor ID this request is for
2624  * @config_flags_set:   Configuration flags to be set
2625  * @config_flags_clear: Configuration flags to be cleared.
2626  *
2627  * Return: 0 if all went well, else returns appropriate error value.
2628  */
2629 static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2630                       u8 proc_id, u64 bootvector,
2631                       u32 config_flags_set,
2632                       u32 config_flags_clear)
2633 {
2634     struct ti_sci_msg_req_set_config *req;
2635     struct ti_sci_msg_hdr *resp;
2636     struct ti_sci_info *info;
2637     struct ti_sci_xfer *xfer;
2638     struct device *dev;
2639     int ret = 0;
2640 
2641     if (!handle)
2642         return -EINVAL;
2643     if (IS_ERR(handle))
2644         return PTR_ERR(handle);
2645 
2646     info = handle_to_ti_sci_info(handle);
2647     dev = info->dev;
2648 
2649     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2650                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2651                    sizeof(*req), sizeof(*resp));
2652     if (IS_ERR(xfer)) {
2653         ret = PTR_ERR(xfer);
2654         dev_err(dev, "Message alloc failed(%d)\n", ret);
2655         return ret;
2656     }
2657     req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2658     req->processor_id = proc_id;
2659     req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2660     req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2661                 TI_SCI_ADDR_HIGH_SHIFT;
2662     req->config_flags_set = config_flags_set;
2663     req->config_flags_clear = config_flags_clear;
2664 
2665     ret = ti_sci_do_xfer(info, xfer);
2666     if (ret) {
2667         dev_err(dev, "Mbox send fail %d\n", ret);
2668         goto fail;
2669     }
2670 
2671     resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2672 
2673     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2674 
2675 fail:
2676     ti_sci_put_one_xfer(&info->minfo, xfer);
2677 
2678     return ret;
2679 }
2680 
2681 /**
2682  * ti_sci_cmd_proc_set_control() - Command to set the processor boot
2683  *                   control flags
2684  * @handle:         Pointer to TI SCI handle
2685  * @proc_id:            Processor ID this request is for
2686  * @control_flags_set:      Control flags to be set
2687  * @control_flags_clear:    Control flags to be cleared
2688  *
2689  * Return: 0 if all went well, else returns appropriate error value.
2690  */
2691 static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2692                        u8 proc_id, u32 control_flags_set,
2693                        u32 control_flags_clear)
2694 {
2695     struct ti_sci_msg_req_set_ctrl *req;
2696     struct ti_sci_msg_hdr *resp;
2697     struct ti_sci_info *info;
2698     struct ti_sci_xfer *xfer;
2699     struct device *dev;
2700     int ret = 0;
2701 
2702     if (!handle)
2703         return -EINVAL;
2704     if (IS_ERR(handle))
2705         return PTR_ERR(handle);
2706 
2707     info = handle_to_ti_sci_info(handle);
2708     dev = info->dev;
2709 
2710     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2711                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2712                    sizeof(*req), sizeof(*resp));
2713     if (IS_ERR(xfer)) {
2714         ret = PTR_ERR(xfer);
2715         dev_err(dev, "Message alloc failed(%d)\n", ret);
2716         return ret;
2717     }
2718     req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2719     req->processor_id = proc_id;
2720     req->control_flags_set = control_flags_set;
2721     req->control_flags_clear = control_flags_clear;
2722 
2723     ret = ti_sci_do_xfer(info, xfer);
2724     if (ret) {
2725         dev_err(dev, "Mbox send fail %d\n", ret);
2726         goto fail;
2727     }
2728 
2729     resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2730 
2731     ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2732 
2733 fail:
2734     ti_sci_put_one_xfer(&info->minfo, xfer);
2735 
2736     return ret;
2737 }
2738 
2739 /**
2740  * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
2741  * @handle: Pointer to TI SCI handle
2742  * @proc_id:    Processor ID this request is for
2743  *
2744  * Return: 0 if all went well, else returns appropriate error value.
2745  */
2746 static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2747                       u8 proc_id, u64 *bv, u32 *cfg_flags,
2748                       u32 *ctrl_flags, u32 *sts_flags)
2749 {
2750     struct ti_sci_msg_resp_get_status *resp;
2751     struct ti_sci_msg_req_get_status *req;
2752     struct ti_sci_info *info;
2753     struct ti_sci_xfer *xfer;
2754     struct device *dev;
2755     int ret = 0;
2756 
2757     if (!handle)
2758         return -EINVAL;
2759     if (IS_ERR(handle))
2760         return PTR_ERR(handle);
2761 
2762     info = handle_to_ti_sci_info(handle);
2763     dev = info->dev;
2764 
2765     xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2766                    TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2767                    sizeof(*req), sizeof(*resp));
2768     if (IS_ERR(xfer)) {
2769         ret = PTR_ERR(xfer);
2770         dev_err(dev, "Message alloc failed(%d)\n", ret);
2771         return ret;
2772     }
2773     req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2774     req->processor_id = proc_id;
2775 
2776     ret = ti_sci_do_xfer(info, xfer);
2777     if (ret) {
2778         dev_err(dev, "Mbox send fail %d\n", ret);
2779         goto fail;
2780     }
2781 
2782     resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2783 
2784     if (!ti_sci_is_response_ack(resp)) {
2785         ret = -ENODEV;
2786     } else {
2787         *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2788               (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2789                TI_SCI_ADDR_HIGH_MASK);
2790         *cfg_flags = resp->config_flags;
2791         *ctrl_flags = resp->control_flags;
2792         *sts_flags = resp->status_flags;
2793     }
2794 
2795 fail:
2796     ti_sci_put_one_xfer(&info->minfo, xfer);
2797 
2798     return ret;
2799 }
2800 
2801 /*
2802  * ti_sci_setup_ops() - Setup the operations structures
2803  * @info:   pointer to TISCI pointer
2804  */
2805 static void ti_sci_setup_ops(struct ti_sci_info *info)
2806 {
2807     struct ti_sci_ops *ops = &info->handle.ops;
2808     struct ti_sci_core_ops *core_ops = &ops->core_ops;
2809     struct ti_sci_dev_ops *dops = &ops->dev_ops;
2810     struct ti_sci_clk_ops *cops = &ops->clk_ops;
2811     struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2812     struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2813     struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2814     struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2815     struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2816     struct ti_sci_proc_ops *pops = &ops->proc_ops;
2817 
2818     core_ops->reboot_device = ti_sci_cmd_core_reboot;
2819 
2820     dops->get_device = ti_sci_cmd_get_device;
2821     dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
2822     dops->idle_device = ti_sci_cmd_idle_device;
2823     dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
2824     dops->put_device = ti_sci_cmd_put_device;
2825 
2826     dops->is_valid = ti_sci_cmd_dev_is_valid;
2827     dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2828     dops->is_idle = ti_sci_cmd_dev_is_idle;
2829     dops->is_stop = ti_sci_cmd_dev_is_stop;
2830     dops->is_on = ti_sci_cmd_dev_is_on;
2831     dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2832     dops->set_device_resets = ti_sci_cmd_set_device_resets;
2833     dops->get_device_resets = ti_sci_cmd_get_device_resets;
2834 
2835     cops->get_clock = ti_sci_cmd_get_clock;
2836     cops->idle_clock = ti_sci_cmd_idle_clock;
2837     cops->put_clock = ti_sci_cmd_put_clock;
2838     cops->is_auto = ti_sci_cmd_clk_is_auto;
2839     cops->is_on = ti_sci_cmd_clk_is_on;
2840     cops->is_off = ti_sci_cmd_clk_is_off;
2841 
2842     cops->set_parent = ti_sci_cmd_clk_set_parent;
2843     cops->get_parent = ti_sci_cmd_clk_get_parent;
2844     cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2845 
2846     cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2847     cops->set_freq = ti_sci_cmd_clk_set_freq;
2848     cops->get_freq = ti_sci_cmd_clk_get_freq;
2849 
2850     rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2851     rm_core_ops->get_range_from_shost =
2852                 ti_sci_cmd_get_resource_range_from_shost;
2853 
2854     iops->set_irq = ti_sci_cmd_set_irq;
2855     iops->set_event_map = ti_sci_cmd_set_event_map;
2856     iops->free_irq = ti_sci_cmd_free_irq;
2857     iops->free_event_map = ti_sci_cmd_free_event_map;
2858 
2859     rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
2860 
2861     psilops->pair = ti_sci_cmd_rm_psil_pair;
2862     psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2863 
2864     udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2865     udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2866     udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2867 
2868     pops->request = ti_sci_cmd_proc_request;
2869     pops->release = ti_sci_cmd_proc_release;
2870     pops->handover = ti_sci_cmd_proc_handover;
2871     pops->set_config = ti_sci_cmd_proc_set_config;
2872     pops->set_control = ti_sci_cmd_proc_set_control;
2873     pops->get_status = ti_sci_cmd_proc_get_status;
2874 }
2875 
2876 /**
2877  * ti_sci_get_handle() - Get the TI SCI handle for a device
2878  * @dev:    Pointer to device for which we want SCI handle
2879  *
2880  * NOTE: The function does not track individual clients of the framework
2881  * and is expected to be maintained by caller of TI SCI protocol library.
2882  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2883  * Return: pointer to handle if successful, else:
2884  * -EPROBE_DEFER if the instance is not ready
2885  * -ENODEV if the required node handler is missing
2886  * -EINVAL if invalid conditions are encountered.
2887  */
2888 const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
2889 {
2890     struct device_node *ti_sci_np;
2891     struct list_head *p;
2892     struct ti_sci_handle *handle = NULL;
2893     struct ti_sci_info *info;
2894 
2895     if (!dev) {
2896         pr_err("I need a device pointer\n");
2897         return ERR_PTR(-EINVAL);
2898     }
2899     ti_sci_np = of_get_parent(dev->of_node);
2900     if (!ti_sci_np) {
2901         dev_err(dev, "No OF information\n");
2902         return ERR_PTR(-EINVAL);
2903     }
2904 
2905     mutex_lock(&ti_sci_list_mutex);
2906     list_for_each(p, &ti_sci_list) {
2907         info = list_entry(p, struct ti_sci_info, node);
2908         if (ti_sci_np == info->dev->of_node) {
2909             handle = &info->handle;
2910             info->users++;
2911             break;
2912         }
2913     }
2914     mutex_unlock(&ti_sci_list_mutex);
2915     of_node_put(ti_sci_np);
2916 
2917     if (!handle)
2918         return ERR_PTR(-EPROBE_DEFER);
2919 
2920     return handle;
2921 }
2922 EXPORT_SYMBOL_GPL(ti_sci_get_handle);
2923 
2924 /**
2925  * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
2926  * @handle: Handle acquired by ti_sci_get_handle
2927  *
2928  * NOTE: The function does not track individual clients of the framework
2929  * and is expected to be maintained by caller of TI SCI protocol library.
2930  * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
2931  *
2932  * Return: 0 is successfully released
2933  * if an error pointer was passed, it returns the error value back,
2934  * if null was passed, it returns -EINVAL;
2935  */
2936 int ti_sci_put_handle(const struct ti_sci_handle *handle)
2937 {
2938     struct ti_sci_info *info;
2939 
2940     if (IS_ERR(handle))
2941         return PTR_ERR(handle);
2942     if (!handle)
2943         return -EINVAL;
2944 
2945     info = handle_to_ti_sci_info(handle);
2946     mutex_lock(&ti_sci_list_mutex);
2947     if (!WARN_ON(!info->users))
2948         info->users--;
2949     mutex_unlock(&ti_sci_list_mutex);
2950 
2951     return 0;
2952 }
2953 EXPORT_SYMBOL_GPL(ti_sci_put_handle);
2954 
2955 static void devm_ti_sci_release(struct device *dev, void *res)
2956 {
2957     const struct ti_sci_handle **ptr = res;
2958     const struct ti_sci_handle *handle = *ptr;
2959     int ret;
2960 
2961     ret = ti_sci_put_handle(handle);
2962     if (ret)
2963         dev_err(dev, "failed to put handle %d\n", ret);
2964 }
2965 
2966 /**
2967  * devm_ti_sci_get_handle() - Managed get handle
2968  * @dev:    device for which we want SCI handle for.
2969  *
2970  * NOTE: This releases the handle once the device resources are
2971  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
2972  * The function does not track individual clients of the framework
2973  * and is expected to be maintained by caller of TI SCI protocol library.
2974  *
2975  * Return: 0 if all went fine, else corresponding error.
2976  */
2977 const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
2978 {
2979     const struct ti_sci_handle **ptr;
2980     const struct ti_sci_handle *handle;
2981 
2982     ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
2983     if (!ptr)
2984         return ERR_PTR(-ENOMEM);
2985     handle = ti_sci_get_handle(dev);
2986 
2987     if (!IS_ERR(handle)) {
2988         *ptr = handle;
2989         devres_add(dev, ptr);
2990     } else {
2991         devres_free(ptr);
2992     }
2993 
2994     return handle;
2995 }
2996 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
2997 
2998 /**
2999  * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3000  * @np:     device node
3001  * @property:   property name containing phandle on TISCI node
3002  *
3003  * NOTE: The function does not track individual clients of the framework
3004  * and is expected to be maintained by caller of TI SCI protocol library.
3005  * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3006  * Return: pointer to handle if successful, else:
3007  * -EPROBE_DEFER if the instance is not ready
3008  * -ENODEV if the required node handler is missing
3009  * -EINVAL if invalid conditions are encountered.
3010  */
3011 const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3012                           const char *property)
3013 {
3014     struct ti_sci_handle *handle = NULL;
3015     struct device_node *ti_sci_np;
3016     struct ti_sci_info *info;
3017     struct list_head *p;
3018 
3019     if (!np) {
3020         pr_err("I need a device pointer\n");
3021         return ERR_PTR(-EINVAL);
3022     }
3023 
3024     ti_sci_np = of_parse_phandle(np, property, 0);
3025     if (!ti_sci_np)
3026         return ERR_PTR(-ENODEV);
3027 
3028     mutex_lock(&ti_sci_list_mutex);
3029     list_for_each(p, &ti_sci_list) {
3030         info = list_entry(p, struct ti_sci_info, node);
3031         if (ti_sci_np == info->dev->of_node) {
3032             handle = &info->handle;
3033             info->users++;
3034             break;
3035         }
3036     }
3037     mutex_unlock(&ti_sci_list_mutex);
3038     of_node_put(ti_sci_np);
3039 
3040     if (!handle)
3041         return ERR_PTR(-EPROBE_DEFER);
3042 
3043     return handle;
3044 }
3045 EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3046 
3047 /**
3048  * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3049  * @dev:    Device pointer requesting TISCI handle
3050  * @property:   property name containing phandle on TISCI node
3051  *
3052  * NOTE: This releases the handle once the device resources are
3053  * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3054  * The function does not track individual clients of the framework
3055  * and is expected to be maintained by caller of TI SCI protocol library.
3056  *
3057  * Return: 0 if all went fine, else corresponding error.
3058  */
3059 const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3060                                const char *property)
3061 {
3062     const struct ti_sci_handle *handle;
3063     const struct ti_sci_handle **ptr;
3064 
3065     ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3066     if (!ptr)
3067         return ERR_PTR(-ENOMEM);
3068     handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3069 
3070     if (!IS_ERR(handle)) {
3071         *ptr = handle;
3072         devres_add(dev, ptr);
3073     } else {
3074         devres_free(ptr);
3075     }
3076 
3077     return handle;
3078 }
3079 EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3080 
3081 /**
3082  * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3083  * @res:    Pointer to the TISCI resource
3084  *
3085  * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3086  */
3087 u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3088 {
3089     unsigned long flags;
3090     u16 set, free_bit;
3091 
3092     raw_spin_lock_irqsave(&res->lock, flags);
3093     for (set = 0; set < res->sets; set++) {
3094         struct ti_sci_resource_desc *desc = &res->desc[set];
3095         int res_count = desc->num + desc->num_sec;
3096 
3097         free_bit = find_first_zero_bit(desc->res_map, res_count);
3098         if (free_bit != res_count) {
3099             set_bit(free_bit, desc->res_map);
3100             raw_spin_unlock_irqrestore(&res->lock, flags);
3101 
3102             if (desc->num && free_bit < desc->num)
3103                 return desc->start + free_bit;
3104             else
3105                 return desc->start_sec + free_bit;
3106         }
3107     }
3108     raw_spin_unlock_irqrestore(&res->lock, flags);
3109 
3110     return TI_SCI_RESOURCE_NULL;
3111 }
3112 EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3113 
3114 /**
3115  * ti_sci_release_resource() - Release a resource from TISCI resource.
3116  * @res:    Pointer to the TISCI resource
3117  * @id:     Resource id to be released.
3118  */
3119 void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3120 {
3121     unsigned long flags;
3122     u16 set;
3123 
3124     raw_spin_lock_irqsave(&res->lock, flags);
3125     for (set = 0; set < res->sets; set++) {
3126         struct ti_sci_resource_desc *desc = &res->desc[set];
3127 
3128         if (desc->num && desc->start <= id &&
3129             (desc->start + desc->num) > id)
3130             clear_bit(id - desc->start, desc->res_map);
3131         else if (desc->num_sec && desc->start_sec <= id &&
3132              (desc->start_sec + desc->num_sec) > id)
3133             clear_bit(id - desc->start_sec, desc->res_map);
3134     }
3135     raw_spin_unlock_irqrestore(&res->lock, flags);
3136 }
3137 EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3138 
3139 /**
3140  * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3141  * @res:    Pointer to the TISCI resource
3142  *
3143  * Return: Total number of available resources.
3144  */
3145 u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3146 {
3147     u32 set, count = 0;
3148 
3149     for (set = 0; set < res->sets; set++)
3150         count += res->desc[set].num + res->desc[set].num_sec;
3151 
3152     return count;
3153 }
3154 EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3155 
3156 /**
3157  * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3158  * @handle: TISCI handle
3159  * @dev:    Device pointer to which the resource is assigned
3160  * @dev_id: TISCI device id to which the resource is assigned
3161  * @sub_types:  Array of sub_types assigned corresponding to device
3162  * @sets:   Number of sub_types
3163  *
3164  * Return: Pointer to ti_sci_resource if all went well else appropriate
3165  *     error pointer.
3166  */
3167 static struct ti_sci_resource *
3168 devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3169                   struct device *dev, u32 dev_id, u32 *sub_types,
3170                   u32 sets)
3171 {
3172     struct ti_sci_resource *res;
3173     bool valid_set = false;
3174     int i, ret, res_count;
3175 
3176     res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3177     if (!res)
3178         return ERR_PTR(-ENOMEM);
3179 
3180     res->sets = sets;
3181     res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3182                  GFP_KERNEL);
3183     if (!res->desc)
3184         return ERR_PTR(-ENOMEM);
3185 
3186     for (i = 0; i < res->sets; i++) {
3187         ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3188                             sub_types[i],
3189                             &res->desc[i]);
3190         if (ret) {
3191             dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3192                 dev_id, sub_types[i]);
3193             memset(&res->desc[i], 0, sizeof(res->desc[i]));
3194             continue;
3195         }
3196 
3197         dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
3198             dev_id, sub_types[i], res->desc[i].start,
3199             res->desc[i].num, res->desc[i].start_sec,
3200             res->desc[i].num_sec);
3201 
3202         valid_set = true;
3203         res_count = res->desc[i].num + res->desc[i].num_sec;
3204         res->desc[i].res_map =
3205             devm_kzalloc(dev, BITS_TO_LONGS(res_count) *
3206                      sizeof(*res->desc[i].res_map), GFP_KERNEL);
3207         if (!res->desc[i].res_map)
3208             return ERR_PTR(-ENOMEM);
3209     }
3210     raw_spin_lock_init(&res->lock);
3211 
3212     if (valid_set)
3213         return res;
3214 
3215     return ERR_PTR(-EINVAL);
3216 }
3217 
3218 /**
3219  * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3220  * @handle: TISCI handle
3221  * @dev:    Device pointer to which the resource is assigned
3222  * @dev_id: TISCI device id to which the resource is assigned
3223  * @of_prop:    property name by which the resource are represented
3224  *
3225  * Return: Pointer to ti_sci_resource if all went well else appropriate
3226  *     error pointer.
3227  */
3228 struct ti_sci_resource *
3229 devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3230                 struct device *dev, u32 dev_id, char *of_prop)
3231 {
3232     struct ti_sci_resource *res;
3233     u32 *sub_types;
3234     int sets;
3235 
3236     sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3237                            sizeof(u32));
3238     if (sets < 0) {
3239         dev_err(dev, "%s resource type ids not available\n", of_prop);
3240         return ERR_PTR(sets);
3241     }
3242 
3243     sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3244     if (!sub_types)
3245         return ERR_PTR(-ENOMEM);
3246 
3247     of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3248     res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3249                         sets);
3250 
3251     kfree(sub_types);
3252     return res;
3253 }
3254 EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3255 
3256 /**
3257  * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3258  * @handle: TISCI handle
3259  * @dev:    Device pointer to which the resource is assigned
3260  * @dev_id: TISCI device id to which the resource is assigned
3261  * @suub_type:  TISCI resource subytpe representing the resource.
3262  *
3263  * Return: Pointer to ti_sci_resource if all went well else appropriate
3264  *     error pointer.
3265  */
3266 struct ti_sci_resource *
3267 devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3268              u32 dev_id, u32 sub_type)
3269 {
3270     return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3271 }
3272 EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3273 
3274 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
3275                 void *cmd)
3276 {
3277     struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
3278     const struct ti_sci_handle *handle = &info->handle;
3279 
3280     ti_sci_cmd_core_reboot(handle);
3281 
3282     /* call fail OR pass, we should not be here in the first place */
3283     return NOTIFY_BAD;
3284 }
3285 
3286 static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
3287 {
3288     info->is_suspending = is_suspending;
3289 }
3290 
3291 static int ti_sci_suspend(struct device *dev)
3292 {
3293     struct ti_sci_info *info = dev_get_drvdata(dev);
3294     /*
3295      * We must switch operation to polled mode now as drivers and the genpd
3296      * layer may make late TI SCI calls to change clock and device states
3297      * from the noirq phase of suspend.
3298      */
3299     ti_sci_set_is_suspending(info, true);
3300 
3301     return 0;
3302 }
3303 
3304 static int ti_sci_resume(struct device *dev)
3305 {
3306     struct ti_sci_info *info = dev_get_drvdata(dev);
3307 
3308     ti_sci_set_is_suspending(info, false);
3309 
3310     return 0;
3311 }
3312 
3313 static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
3314 
3315 /* Description for K2G */
3316 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3317     .default_host_id = 2,
3318     /* Conservative duration */
3319     .max_rx_timeout_ms = 1000,
3320     /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3321     .max_msgs = 20,
3322     .max_msg_size = 64,
3323 };
3324 
3325 /* Description for AM654 */
3326 static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3327     .default_host_id = 12,
3328     /* Conservative duration */
3329     .max_rx_timeout_ms = 10000,
3330     /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3331     .max_msgs = 20,
3332     .max_msg_size = 60,
3333 };
3334 
3335 static const struct of_device_id ti_sci_of_match[] = {
3336     {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3337     {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
3338     { /* Sentinel */ },
3339 };
3340 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
3341 
3342 static int ti_sci_probe(struct platform_device *pdev)
3343 {
3344     struct device *dev = &pdev->dev;
3345     const struct of_device_id *of_id;
3346     const struct ti_sci_desc *desc;
3347     struct ti_sci_xfer *xfer;
3348     struct ti_sci_info *info = NULL;
3349     struct ti_sci_xfers_info *minfo;
3350     struct mbox_client *cl;
3351     int ret = -EINVAL;
3352     int i;
3353     int reboot = 0;
3354     u32 h_id;
3355 
3356     of_id = of_match_device(ti_sci_of_match, dev);
3357     if (!of_id) {
3358         dev_err(dev, "OF data missing\n");
3359         return -EINVAL;
3360     }
3361     desc = of_id->data;
3362 
3363     info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3364     if (!info)
3365         return -ENOMEM;
3366 
3367     info->dev = dev;
3368     info->desc = desc;
3369     ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3370     /* if the property is not present in DT, use a default from desc */
3371     if (ret < 0) {
3372         info->host_id = info->desc->default_host_id;
3373     } else {
3374         if (!h_id) {
3375             dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3376             info->host_id = info->desc->default_host_id;
3377         } else {
3378             info->host_id = h_id;
3379         }
3380     }
3381 
3382     reboot = of_property_read_bool(dev->of_node,
3383                        "ti,system-reboot-controller");
3384     INIT_LIST_HEAD(&info->node);
3385     minfo = &info->minfo;
3386 
3387     /*
3388      * Pre-allocate messages
3389      * NEVER allocate more than what we can indicate in hdr.seq
3390      * if we have data description bug, force a fix..
3391      */
3392     if (WARN_ON(desc->max_msgs >=
3393             1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
3394         return -EINVAL;
3395 
3396     minfo->xfer_block = devm_kcalloc(dev,
3397                      desc->max_msgs,
3398                      sizeof(*minfo->xfer_block),
3399                      GFP_KERNEL);
3400     if (!minfo->xfer_block)
3401         return -ENOMEM;
3402 
3403     minfo->xfer_alloc_table = devm_kcalloc(dev,
3404                            BITS_TO_LONGS(desc->max_msgs),
3405                            sizeof(unsigned long),
3406                            GFP_KERNEL);
3407     if (!minfo->xfer_alloc_table)
3408         return -ENOMEM;
3409     bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
3410 
3411     /* Pre-initialize the buffer pointer to pre-allocated buffers */
3412     for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
3413         xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
3414                           GFP_KERNEL);
3415         if (!xfer->xfer_buf)
3416             return -ENOMEM;
3417 
3418         xfer->tx_message.buf = xfer->xfer_buf;
3419         init_completion(&xfer->done);
3420     }
3421 
3422     ret = ti_sci_debugfs_create(pdev, info);
3423     if (ret)
3424         dev_warn(dev, "Failed to create debug file\n");
3425 
3426     platform_set_drvdata(pdev, info);
3427 
3428     cl = &info->cl;
3429     cl->dev = dev;
3430     cl->tx_block = false;
3431     cl->rx_callback = ti_sci_rx_callback;
3432     cl->knows_txdone = true;
3433 
3434     spin_lock_init(&minfo->xfer_lock);
3435     sema_init(&minfo->sem_xfer_count, desc->max_msgs);
3436 
3437     info->chan_rx = mbox_request_channel_byname(cl, "rx");
3438     if (IS_ERR(info->chan_rx)) {
3439         ret = PTR_ERR(info->chan_rx);
3440         goto out;
3441     }
3442 
3443     info->chan_tx = mbox_request_channel_byname(cl, "tx");
3444     if (IS_ERR(info->chan_tx)) {
3445         ret = PTR_ERR(info->chan_tx);
3446         goto out;
3447     }
3448     ret = ti_sci_cmd_get_revision(info);
3449     if (ret) {
3450         dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
3451         goto out;
3452     }
3453 
3454     ti_sci_setup_ops(info);
3455 
3456     if (reboot) {
3457         info->nb.notifier_call = tisci_reboot_handler;
3458         info->nb.priority = 128;
3459 
3460         ret = register_restart_handler(&info->nb);
3461         if (ret) {
3462             dev_err(dev, "reboot registration fail(%d)\n", ret);
3463             goto out;
3464         }
3465     }
3466 
3467     dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
3468          info->handle.version.abi_major, info->handle.version.abi_minor,
3469          info->handle.version.firmware_revision,
3470          info->handle.version.firmware_description);
3471 
3472     mutex_lock(&ti_sci_list_mutex);
3473     list_add_tail(&info->node, &ti_sci_list);
3474     mutex_unlock(&ti_sci_list_mutex);
3475 
3476     return of_platform_populate(dev->of_node, NULL, NULL, dev);
3477 out:
3478     if (!IS_ERR(info->chan_tx))
3479         mbox_free_channel(info->chan_tx);
3480     if (!IS_ERR(info->chan_rx))
3481         mbox_free_channel(info->chan_rx);
3482     debugfs_remove(info->d);
3483     return ret;
3484 }
3485 
3486 static int ti_sci_remove(struct platform_device *pdev)
3487 {
3488     struct ti_sci_info *info;
3489     struct device *dev = &pdev->dev;
3490     int ret = 0;
3491 
3492     of_platform_depopulate(dev);
3493 
3494     info = platform_get_drvdata(pdev);
3495 
3496     if (info->nb.notifier_call)
3497         unregister_restart_handler(&info->nb);
3498 
3499     mutex_lock(&ti_sci_list_mutex);
3500     if (info->users)
3501         ret = -EBUSY;
3502     else
3503         list_del(&info->node);
3504     mutex_unlock(&ti_sci_list_mutex);
3505 
3506     if (!ret) {
3507         ti_sci_debugfs_destroy(pdev, info);
3508 
3509         /* Safe to free channels since no more users */
3510         mbox_free_channel(info->chan_tx);
3511         mbox_free_channel(info->chan_rx);
3512     }
3513 
3514     return ret;
3515 }
3516 
3517 static struct platform_driver ti_sci_driver = {
3518     .probe = ti_sci_probe,
3519     .remove = ti_sci_remove,
3520     .driver = {
3521            .name = "ti-sci",
3522            .of_match_table = of_match_ptr(ti_sci_of_match),
3523            .pm = &ti_sci_pm_ops,
3524     },
3525 };
3526 module_platform_driver(ti_sci_driver);
3527 
3528 MODULE_LICENSE("GPL v2");
3529 MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
3530 MODULE_AUTHOR("Nishanth Menon");
3531 MODULE_ALIAS("platform:ti-sci");