0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include "qmgr.h"
0024
0025 struct nvkm_falcon_qmgr_seq *
0026 nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *qmgr)
0027 {
0028 const struct nvkm_subdev *subdev = qmgr->falcon->owner;
0029 struct nvkm_falcon_qmgr_seq *seq;
0030 u32 index;
0031
0032 mutex_lock(&qmgr->seq.mutex);
0033 index = find_first_zero_bit(qmgr->seq.tbl, NVKM_FALCON_QMGR_SEQ_NUM);
0034 if (index >= NVKM_FALCON_QMGR_SEQ_NUM) {
0035 nvkm_error(subdev, "no free sequence available\n");
0036 mutex_unlock(&qmgr->seq.mutex);
0037 return ERR_PTR(-EAGAIN);
0038 }
0039
0040 set_bit(index, qmgr->seq.tbl);
0041 mutex_unlock(&qmgr->seq.mutex);
0042
0043 seq = &qmgr->seq.id[index];
0044 seq->state = SEQ_STATE_PENDING;
0045 return seq;
0046 }
0047
0048 void
0049 nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *qmgr,
0050 struct nvkm_falcon_qmgr_seq *seq)
0051 {
0052
0053 seq->state = SEQ_STATE_FREE;
0054 seq->callback = NULL;
0055 reinit_completion(&seq->done);
0056 clear_bit(seq->id, qmgr->seq.tbl);
0057 }
0058
0059 void
0060 nvkm_falcon_qmgr_del(struct nvkm_falcon_qmgr **pqmgr)
0061 {
0062 struct nvkm_falcon_qmgr *qmgr = *pqmgr;
0063 if (qmgr) {
0064 kfree(*pqmgr);
0065 *pqmgr = NULL;
0066 }
0067 }
0068
0069 int
0070 nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
0071 struct nvkm_falcon_qmgr **pqmgr)
0072 {
0073 struct nvkm_falcon_qmgr *qmgr;
0074 int i;
0075
0076 if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
0077 return -ENOMEM;
0078
0079 qmgr->falcon = falcon;
0080 mutex_init(&qmgr->seq.mutex);
0081 for (i = 0; i < NVKM_FALCON_QMGR_SEQ_NUM; i++) {
0082 qmgr->seq.id[i].id = i;
0083 init_completion(&qmgr->seq.id[i].done);
0084 }
0085
0086 return 0;
0087 }