Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2020-21 Intel Corporation.
0004  */
0005 
0006 #include "iosm_ipc_imem.h"
0007 #include "iosm_ipc_task_queue.h"
0008 
0009 /* Actual tasklet function, will be called whenever tasklet is scheduled.
0010  * Calls event handler involves callback for each element in the message queue
0011  */
0012 static void ipc_task_queue_handler(unsigned long data)
0013 {
0014     struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
0015     unsigned int q_rpos = ipc_task->q_rpos;
0016 
0017     /* Loop over the input queue contents. */
0018     while (q_rpos != ipc_task->q_wpos) {
0019         /* Get the current first queue element. */
0020         struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
0021 
0022         /* Process the input message. */
0023         if (args->func)
0024             args->response = args->func(args->ipc_imem, args->arg,
0025                             args->msg, args->size);
0026 
0027         /* Signal completion for synchronous calls */
0028         if (args->completion)
0029             complete(args->completion);
0030 
0031         /* Free message if copy was allocated. */
0032         if (args->is_copy)
0033             kfree(args->msg);
0034 
0035         /* Set invalid queue element. Technically
0036          * spin_lock_irqsave is not required here as
0037          * the array element has been processed already
0038          * so we can assume that immediately after processing
0039          * ipc_task element, queue will not rotate again to
0040          * ipc_task same element within such short time.
0041          */
0042         args->completion = NULL;
0043         args->func = NULL;
0044         args->msg = NULL;
0045         args->size = 0;
0046         args->is_copy = false;
0047 
0048         /* calculate the new read ptr and update the volatile read
0049          * ptr
0050          */
0051         q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
0052         ipc_task->q_rpos = q_rpos;
0053     }
0054 }
0055 
0056 /* Free memory alloc and trigger completions left in the queue during dealloc */
0057 static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
0058 {
0059     unsigned int q_rpos = ipc_task->q_rpos;
0060 
0061     while (q_rpos != ipc_task->q_wpos) {
0062         struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
0063 
0064         if (args->completion)
0065             complete(args->completion);
0066 
0067         if (args->is_copy)
0068             kfree(args->msg);
0069 
0070         q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
0071         ipc_task->q_rpos = q_rpos;
0072     }
0073 }
0074 
0075 /* Add a message to the queue and trigger the ipc_task. */
0076 static int
0077 ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
0078             int arg, void *msg,
0079             int (*func)(struct iosm_imem *ipc_imem, int arg,
0080                     void *msg, size_t size),
0081             size_t size, bool is_copy, bool wait)
0082 {
0083     struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
0084     struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
0085     struct completion completion;
0086     unsigned int pos, nextpos;
0087     unsigned long flags;
0088     int result = -EIO;
0089 
0090     init_completion(&completion);
0091 
0092     /* tasklet send may be called from both interrupt or thread
0093      * context, therefore protect queue operation by spinlock
0094      */
0095     spin_lock_irqsave(&ipc_task->q_lock, flags);
0096 
0097     pos = ipc_task->q_wpos;
0098     nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
0099 
0100     /* Get next queue position. */
0101     if (nextpos != ipc_task->q_rpos) {
0102         /* Get the reference to the queue element and save the passed
0103          * values.
0104          */
0105         ipc_task->args[pos].arg = arg;
0106         ipc_task->args[pos].msg = msg;
0107         ipc_task->args[pos].func = func;
0108         ipc_task->args[pos].ipc_imem = ipc_imem;
0109         ipc_task->args[pos].size = size;
0110         ipc_task->args[pos].is_copy = is_copy;
0111         ipc_task->args[pos].completion = wait ? &completion : NULL;
0112         ipc_task->args[pos].response = -1;
0113 
0114         /* apply write barrier so that ipc_task->q_rpos elements
0115          * are updated before ipc_task->q_wpos is being updated.
0116          */
0117         smp_wmb();
0118 
0119         /* Update the status of the free queue space. */
0120         ipc_task->q_wpos = nextpos;
0121         result = 0;
0122     }
0123 
0124     spin_unlock_irqrestore(&ipc_task->q_lock, flags);
0125 
0126     if (result == 0) {
0127         tasklet_schedule(ipc_tasklet);
0128 
0129         if (wait) {
0130             wait_for_completion(&completion);
0131             result = ipc_task->args[pos].response;
0132         }
0133     } else {
0134         dev_err(ipc_imem->ipc_task->dev, "queue is full");
0135     }
0136 
0137     return result;
0138 }
0139 
0140 int ipc_task_queue_send_task(struct iosm_imem *imem,
0141                  int (*func)(struct iosm_imem *ipc_imem, int arg,
0142                      void *msg, size_t size),
0143                  int arg, void *msg, size_t size, bool wait)
0144 {
0145     bool is_copy = false;
0146     void *copy = msg;
0147     int ret = -ENOMEM;
0148 
0149     if (size > 0) {
0150         copy = kmemdup(msg, size, GFP_ATOMIC);
0151         if (!copy)
0152             goto out;
0153 
0154         is_copy = true;
0155     }
0156 
0157     ret = ipc_task_queue_add_task(imem, arg, copy, func,
0158                       size, is_copy, wait);
0159     if (ret < 0) {
0160         dev_err(imem->ipc_task->dev,
0161             "add task failed for %ps %d, %p, %zu, %d", func, arg,
0162             copy, size, is_copy);
0163         if (is_copy)
0164             kfree(copy);
0165         goto out;
0166     }
0167 
0168     ret = 0;
0169 out:
0170     return ret;
0171 }
0172 
0173 int ipc_task_init(struct ipc_task *ipc_task)
0174 {
0175     struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
0176 
0177     ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
0178                     GFP_KERNEL);
0179 
0180     if (!ipc_task->ipc_tasklet)
0181         return -ENOMEM;
0182 
0183     /* Initialize the spinlock needed to protect the message queue of the
0184      * ipc_task
0185      */
0186     spin_lock_init(&ipc_queue->q_lock);
0187 
0188     tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
0189              (unsigned long)ipc_queue);
0190     return 0;
0191 }
0192 
0193 void ipc_task_deinit(struct ipc_task *ipc_task)
0194 {
0195     tasklet_kill(ipc_task->ipc_tasklet);
0196 
0197     kfree(ipc_task->ipc_tasklet);
0198     /* This will free/complete any outstanding messages,
0199      * without calling the actual handler
0200      */
0201     ipc_task_queue_cleanup(&ipc_task->ipc_queue);
0202 }