Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "qmgr.h"
0024 
0025 static void
0026 nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq)
0027 {
0028     mutex_lock(&msgq->mutex);
0029     msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
0030 }
0031 
0032 static void
0033 nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit)
0034 {
0035     struct nvkm_falcon *falcon = msgq->qmgr->falcon;
0036 
0037     if (commit)
0038         nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position);
0039 
0040     mutex_unlock(&msgq->mutex);
0041 }
0042 
0043 static bool
0044 nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq)
0045 {
0046     u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg);
0047     u32 tail = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg);
0048     return head == tail;
0049 }
0050 
0051 static int
0052 nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size)
0053 {
0054     struct nvkm_falcon *falcon = msgq->qmgr->falcon;
0055     u32 head, tail, available;
0056 
0057     head = nvkm_falcon_rd32(falcon, msgq->head_reg);
0058     /* has the buffer looped? */
0059     if (head < msgq->position)
0060         msgq->position = msgq->offset;
0061 
0062     tail = msgq->position;
0063 
0064     available = head - tail;
0065     if (size > available) {
0066         FLCNQ_ERR(msgq, "requested %d bytes, but only %d available",
0067               size, available);
0068         return -EINVAL;
0069     }
0070 
0071     nvkm_falcon_read_dmem(falcon, tail, size, 0, data);
0072     msgq->position += ALIGN(size, QUEUE_ALIGNMENT);
0073     return 0;
0074 }
0075 
0076 static int
0077 nvkm_falcon_msgq_read(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
0078 {
0079     int ret = 0;
0080 
0081     nvkm_falcon_msgq_open(msgq);
0082 
0083     if (nvkm_falcon_msgq_empty(msgq))
0084         goto close;
0085 
0086     ret = nvkm_falcon_msgq_pop(msgq, hdr, HDR_SIZE);
0087     if (ret) {
0088         FLCNQ_ERR(msgq, "failed to read message header");
0089         goto close;
0090     }
0091 
0092     if (hdr->size > MSG_BUF_SIZE) {
0093         FLCNQ_ERR(msgq, "message too big, %d bytes", hdr->size);
0094         ret = -ENOSPC;
0095         goto close;
0096     }
0097 
0098     if (hdr->size > HDR_SIZE) {
0099         u32 read_size = hdr->size - HDR_SIZE;
0100 
0101         ret = nvkm_falcon_msgq_pop(msgq, (hdr + 1), read_size);
0102         if (ret) {
0103             FLCNQ_ERR(msgq, "failed to read message data");
0104             goto close;
0105         }
0106     }
0107 
0108     ret = 1;
0109 close:
0110     nvkm_falcon_msgq_close(msgq, (ret >= 0));
0111     return ret;
0112 }
0113 
0114 static int
0115 nvkm_falcon_msgq_exec(struct nvkm_falcon_msgq *msgq, struct nvfw_falcon_msg *hdr)
0116 {
0117     struct nvkm_falcon_qmgr_seq *seq;
0118 
0119     seq = &msgq->qmgr->seq.id[hdr->seq_id];
0120     if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
0121         FLCNQ_ERR(msgq, "message for unknown sequence %08x", seq->id);
0122         return -EINVAL;
0123     }
0124 
0125     if (seq->state == SEQ_STATE_USED) {
0126         if (seq->callback)
0127             seq->result = seq->callback(seq->priv, hdr);
0128     }
0129 
0130     if (seq->async) {
0131         nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
0132         return 0;
0133     }
0134 
0135     complete_all(&seq->done);
0136     return 0;
0137 }
0138 
0139 void
0140 nvkm_falcon_msgq_recv(struct nvkm_falcon_msgq *msgq)
0141 {
0142     /*
0143      * We are invoked from a worker thread, so normally we have plenty of
0144      * stack space to work with.
0145      */
0146     u8 msg_buffer[MSG_BUF_SIZE];
0147     struct nvfw_falcon_msg *hdr = (void *)msg_buffer;
0148 
0149     while (nvkm_falcon_msgq_read(msgq, hdr) > 0)
0150         nvkm_falcon_msgq_exec(msgq, hdr);
0151 }
0152 
0153 int
0154 nvkm_falcon_msgq_recv_initmsg(struct nvkm_falcon_msgq *msgq,
0155                   void *data, u32 size)
0156 {
0157     struct nvkm_falcon *falcon = msgq->qmgr->falcon;
0158     struct nvfw_falcon_msg *hdr = data;
0159     int ret;
0160 
0161     msgq->head_reg = falcon->func->msgq.head;
0162     msgq->tail_reg = falcon->func->msgq.tail;
0163     msgq->offset = nvkm_falcon_rd32(falcon, falcon->func->msgq.tail);
0164 
0165     nvkm_falcon_msgq_open(msgq);
0166     ret = nvkm_falcon_msgq_pop(msgq, data, size);
0167     if (ret == 0 && hdr->size != size) {
0168         FLCN_ERR(falcon, "unexpected init message size %d vs %d",
0169              hdr->size, size);
0170         ret = -EINVAL;
0171     }
0172     nvkm_falcon_msgq_close(msgq, ret == 0);
0173     return ret;
0174 }
0175 
0176 void
0177 nvkm_falcon_msgq_init(struct nvkm_falcon_msgq *msgq,
0178               u32 index, u32 offset, u32 size)
0179 {
0180     const struct nvkm_falcon_func *func = msgq->qmgr->falcon->func;
0181 
0182     msgq->head_reg = func->msgq.head + index * func->msgq.stride;
0183     msgq->tail_reg = func->msgq.tail + index * func->msgq.stride;
0184     msgq->offset = offset;
0185 
0186     FLCNQ_DBG(msgq, "initialised @ index %d offset 0x%08x size 0x%08x",
0187           index, msgq->offset, size);
0188 }
0189 
0190 void
0191 nvkm_falcon_msgq_del(struct nvkm_falcon_msgq **pmsgq)
0192 {
0193     struct nvkm_falcon_msgq *msgq = *pmsgq;
0194     if (msgq) {
0195         kfree(*pmsgq);
0196         *pmsgq = NULL;
0197     }
0198 }
0199 
0200 int
0201 nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
0202              struct nvkm_falcon_msgq **pmsgq)
0203 {
0204     struct nvkm_falcon_msgq *msgq = *pmsgq;
0205 
0206     if (!(msgq = *pmsgq = kzalloc(sizeof(*msgq), GFP_KERNEL)))
0207         return -ENOMEM;
0208 
0209     msgq->qmgr = qmgr;
0210     msgq->name = name;
0211     mutex_init(&msgq->mutex);
0212     return 0;
0213 }