Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "qmgr.h"
0024 
0025 static bool
0026 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
0027 {
0028     u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
0029     u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
0030     u32 free;
0031 
0032     size = ALIGN(size, QUEUE_ALIGNMENT);
0033 
0034     if (head >= tail) {
0035         free = cmdq->offset + cmdq->size - head;
0036         free -= HDR_SIZE;
0037 
0038         if (size > free) {
0039             *rewind = true;
0040             head = cmdq->offset;
0041         }
0042     }
0043 
0044     if (head < tail)
0045         free = tail - head - 1;
0046 
0047     return size <= free;
0048 }
0049 
0050 static void
0051 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
0052 {
0053     struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
0054     nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
0055     cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
0056 }
0057 
0058 static void
0059 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
0060 {
0061     struct nvfw_falcon_cmd cmd;
0062 
0063     cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
0064     cmd.size = sizeof(cmd);
0065     nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
0066 
0067     cmdq->position = cmdq->offset;
0068 }
0069 
0070 static int
0071 nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
0072 {
0073     struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
0074     bool rewind = false;
0075 
0076     mutex_lock(&cmdq->mutex);
0077 
0078     if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
0079         FLCNQ_DBG(cmdq, "queue full");
0080         mutex_unlock(&cmdq->mutex);
0081         return -EAGAIN;
0082     }
0083 
0084     cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
0085 
0086     if (rewind)
0087         nvkm_falcon_cmdq_rewind(cmdq);
0088 
0089     return 0;
0090 }
0091 
0092 static void
0093 nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
0094 {
0095     nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
0096     mutex_unlock(&cmdq->mutex);
0097 }
0098 
0099 static int
0100 nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd)
0101 {
0102     static unsigned timeout = 2000;
0103     unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
0104     int ret = -EAGAIN;
0105 
0106     while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
0107         ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
0108     if (ret) {
0109         FLCNQ_ERR(cmdq, "timeout waiting for queue space");
0110         return ret;
0111     }
0112 
0113     nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
0114     nvkm_falcon_cmdq_close(cmdq);
0115     return ret;
0116 }
0117 
0118 /* specifies that we want to know the command status in the answer message */
0119 #define CMD_FLAGS_STATUS BIT(0)
0120 /* specifies that we want an interrupt when the answer message is queued */
0121 #define CMD_FLAGS_INTR BIT(1)
0122 
0123 int
0124 nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nvfw_falcon_cmd *cmd,
0125               nvkm_falcon_qmgr_callback cb, void *priv,
0126               unsigned long timeout)
0127 {
0128     struct nvkm_falcon_qmgr_seq *seq;
0129     int ret;
0130 
0131     if (!wait_for_completion_timeout(&cmdq->ready,
0132                      msecs_to_jiffies(1000))) {
0133         FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
0134         return -ETIMEDOUT;
0135     }
0136 
0137     seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
0138     if (IS_ERR(seq))
0139         return PTR_ERR(seq);
0140 
0141     cmd->seq_id = seq->id;
0142     cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
0143 
0144     seq->state = SEQ_STATE_USED;
0145     seq->async = !timeout;
0146     seq->callback = cb;
0147     seq->priv = priv;
0148 
0149     ret = nvkm_falcon_cmdq_write(cmdq, cmd);
0150     if (ret) {
0151         seq->state = SEQ_STATE_PENDING;
0152         nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
0153         return ret;
0154     }
0155 
0156     if (!seq->async) {
0157         if (!wait_for_completion_timeout(&seq->done, timeout)) {
0158             FLCNQ_ERR(cmdq, "timeout waiting for reply");
0159             return -ETIMEDOUT;
0160         }
0161         ret = seq->result;
0162         nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
0163     }
0164 
0165     return ret;
0166 }
0167 
0168 void
0169 nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
0170 {
0171     reinit_completion(&cmdq->ready);
0172 }
0173 
0174 void
0175 nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
0176               u32 index, u32 offset, u32 size)
0177 {
0178     const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
0179 
0180     cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
0181     cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
0182     cmdq->offset = offset;
0183     cmdq->size = size;
0184     complete_all(&cmdq->ready);
0185 
0186     FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
0187           index, cmdq->offset, cmdq->size);
0188 }
0189 
0190 void
0191 nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
0192 {
0193     struct nvkm_falcon_cmdq *cmdq = *pcmdq;
0194     if (cmdq) {
0195         kfree(*pcmdq);
0196         *pcmdq = NULL;
0197     }
0198 }
0199 
0200 int
0201 nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
0202              struct nvkm_falcon_cmdq **pcmdq)
0203 {
0204     struct nvkm_falcon_cmdq *cmdq = *pcmdq;
0205 
0206     if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
0207         return -ENOMEM;
0208 
0209     cmdq->qmgr = qmgr;
0210     cmdq->name = name;
0211     mutex_init(&cmdq->mutex);
0212     init_completion(&cmdq->ready);
0213     return 0;
0214 }