Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 #include "qmgr.h"
0024 
0025 struct nvkm_falcon_qmgr_seq *
0026 nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
0027 {
0028     const struct nvkm_subdev *subdev = qmgr->falcon->owner;
0029     struct nvkm_falcon_qmgr_seq *seq;
0030     u32 index;
0031 
0032     mutex_lock(&qmgr->seq.mutex);
0033     index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
0034     if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
0035         nvkm_error(subdev, "no free sequence available\n");
0036         mutex_unlock(&qmgr->seq.mutex);
0037         return ERR_PTR(-EAGAIN);
0038     }
0039 
0040     set_bit(index, qmgr->seq.tbl);
0041     mutex_unlock(&qmgr->seq.mutex);
0042 
0043     seq = &qmgr->seq.id[index];
0044     seq->state = SEQ_STATE_PENDING;
0045     return seq;
0046 }
0047 
0048 void
0049 nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
0050                  struct nvkm_falcon_qmgr_seq *seq)
0051 {
0052     /* no need to acquire seq.mutex since clear_bit is atomic */
0053     seq->state = SEQ_STATE_FREE;
0054     seq->callback = NULL;
0055     reinit_completion(&seq->done);
0056     clear_bit(seq->id, qmgr->seq.tbl);
0057 }
0058 
0059 void
0060 nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
0061 {
0062     struct nvkm_falcon_qmgr *qmgr = *pqmgr;
0063     if (qmgr) {
0064         kfree(*pqmgr);
0065         *pqmgr = NULL;
0066     }
0067 }
0068 
0069 int
0070 nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
0071              struct nvkm_falcon_qmgr **pqmgr)
0072 {
0073     struct nvkm_falcon_qmgr *qmgr;
0074     int i;
0075 
0076     if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
0077         return -ENOMEM;
0078 
0079     qmgr->falcon = falcon;
0080     mutex_init(&qmgr->seq.mutex);
0081     for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
0082         qmgr->seq.id[i].id = i;
0083         init_completion(&qmgr->seq.id[i].done);
0084     }
0085 
0086     return 0;
0087 }