0001
0002
0003
0004
0005
0006
0007 #include <sound/core.h>
0008 #include <linux/slab.h>
0009 #include <linux/sched/signal.h>
0010
0011 #include "seq_fifo.h"
0012 #include "seq_lock.h"
0013
0014
0015
0016
0017
0018 struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
0019 {
0020 struct snd_seq_fifo *f;
0021
0022 f = kzalloc(sizeof(*f), GFP_KERNEL);
0023 if (!f)
0024 return NULL;
0025
0026 f->pool = snd_seq_pool_new(poolsize);
0027 if (f->pool == NULL) {
0028 kfree(f);
0029 return NULL;
0030 }
0031 if (snd_seq_pool_init(f->pool) < 0) {
0032 snd_seq_pool_delete(&f->pool);
0033 kfree(f);
0034 return NULL;
0035 }
0036
0037 spin_lock_init(&f->lock);
0038 snd_use_lock_init(&f->use_lock);
0039 init_waitqueue_head(&f->input_sleep);
0040 atomic_set(&f->overflow, 0);
0041
0042 f->head = NULL;
0043 f->tail = NULL;
0044 f->cells = 0;
0045
0046 return f;
0047 }
0048
0049 void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
0050 {
0051 struct snd_seq_fifo *f;
0052
0053 if (snd_BUG_ON(!fifo))
0054 return;
0055 f = *fifo;
0056 if (snd_BUG_ON(!f))
0057 return;
0058 *fifo = NULL;
0059
0060 if (f->pool)
0061 snd_seq_pool_mark_closing(f->pool);
0062
0063 snd_seq_fifo_clear(f);
0064
0065
0066 if (waitqueue_active(&f->input_sleep))
0067 wake_up(&f->input_sleep);
0068
0069
0070
0071
0072 if (f->pool) {
0073 snd_seq_pool_done(f->pool);
0074 snd_seq_pool_delete(&f->pool);
0075 }
0076
0077 kfree(f);
0078 }
0079
0080 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
0081
0082
0083 void snd_seq_fifo_clear(struct snd_seq_fifo *f)
0084 {
0085 struct snd_seq_event_cell *cell;
0086
0087
0088 atomic_set(&f->overflow, 0);
0089
0090 snd_use_lock_sync(&f->use_lock);
0091 spin_lock_irq(&f->lock);
0092
0093 while ((cell = fifo_cell_out(f)) != NULL) {
0094 snd_seq_cell_free(cell);
0095 }
0096 spin_unlock_irq(&f->lock);
0097 }
0098
0099
0100
0101 int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
0102 struct snd_seq_event *event)
0103 {
0104 struct snd_seq_event_cell *cell;
0105 unsigned long flags;
0106 int err;
0107
0108 if (snd_BUG_ON(!f))
0109 return -EINVAL;
0110
0111 snd_use_lock_use(&f->use_lock);
0112 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL);
0113 if (err < 0) {
0114 if ((err == -ENOMEM) || (err == -EAGAIN))
0115 atomic_inc(&f->overflow);
0116 snd_use_lock_free(&f->use_lock);
0117 return err;
0118 }
0119
0120
0121 spin_lock_irqsave(&f->lock, flags);
0122 if (f->tail != NULL)
0123 f->tail->next = cell;
0124 f->tail = cell;
0125 if (f->head == NULL)
0126 f->head = cell;
0127 cell->next = NULL;
0128 f->cells++;
0129 spin_unlock_irqrestore(&f->lock, flags);
0130
0131
0132 if (waitqueue_active(&f->input_sleep))
0133 wake_up(&f->input_sleep);
0134
0135 snd_use_lock_free(&f->use_lock);
0136
0137 return 0;
0138
0139 }
0140
0141
0142 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
0143 {
0144 struct snd_seq_event_cell *cell;
0145
0146 cell = f->head;
0147 if (cell) {
0148 f->head = cell->next;
0149
0150
0151 if (f->tail == cell)
0152 f->tail = NULL;
0153
0154 cell->next = NULL;
0155 f->cells--;
0156 }
0157
0158 return cell;
0159 }
0160
0161
0162 int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
0163 struct snd_seq_event_cell **cellp, int nonblock)
0164 {
0165 struct snd_seq_event_cell *cell;
0166 unsigned long flags;
0167 wait_queue_entry_t wait;
0168
0169 if (snd_BUG_ON(!f))
0170 return -EINVAL;
0171
0172 *cellp = NULL;
0173 init_waitqueue_entry(&wait, current);
0174 spin_lock_irqsave(&f->lock, flags);
0175 while ((cell = fifo_cell_out(f)) == NULL) {
0176 if (nonblock) {
0177
0178 spin_unlock_irqrestore(&f->lock, flags);
0179 return -EAGAIN;
0180 }
0181 set_current_state(TASK_INTERRUPTIBLE);
0182 add_wait_queue(&f->input_sleep, &wait);
0183 spin_unlock_irqrestore(&f->lock, flags);
0184 schedule();
0185 spin_lock_irqsave(&f->lock, flags);
0186 remove_wait_queue(&f->input_sleep, &wait);
0187 if (signal_pending(current)) {
0188 spin_unlock_irqrestore(&f->lock, flags);
0189 return -ERESTARTSYS;
0190 }
0191 }
0192 spin_unlock_irqrestore(&f->lock, flags);
0193 *cellp = cell;
0194
0195 return 0;
0196 }
0197
0198
0199 void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
0200 struct snd_seq_event_cell *cell)
0201 {
0202 unsigned long flags;
0203
0204 if (cell) {
0205 spin_lock_irqsave(&f->lock, flags);
0206 cell->next = f->head;
0207 f->head = cell;
0208 if (!f->tail)
0209 f->tail = cell;
0210 f->cells++;
0211 spin_unlock_irqrestore(&f->lock, flags);
0212 }
0213 }
0214
0215
0216
0217 int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
0218 poll_table *wait)
0219 {
0220 poll_wait(file, &f->input_sleep, wait);
0221 return (f->cells > 0);
0222 }
0223
0224
0225 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
0226 {
0227 struct snd_seq_pool *newpool, *oldpool;
0228 struct snd_seq_event_cell *cell, *next, *oldhead;
0229
0230 if (snd_BUG_ON(!f || !f->pool))
0231 return -EINVAL;
0232
0233
0234 newpool = snd_seq_pool_new(poolsize);
0235 if (newpool == NULL)
0236 return -ENOMEM;
0237 if (snd_seq_pool_init(newpool) < 0) {
0238 snd_seq_pool_delete(&newpool);
0239 return -ENOMEM;
0240 }
0241
0242 spin_lock_irq(&f->lock);
0243
0244 oldpool = f->pool;
0245 oldhead = f->head;
0246
0247 f->pool = newpool;
0248 f->head = NULL;
0249 f->tail = NULL;
0250 f->cells = 0;
0251
0252 spin_unlock_irq(&f->lock);
0253
0254
0255 snd_seq_pool_mark_closing(oldpool);
0256 snd_use_lock_sync(&f->use_lock);
0257
0258
0259 for (cell = oldhead; cell; cell = next) {
0260 next = cell->next;
0261 snd_seq_cell_free(cell);
0262 }
0263 snd_seq_pool_delete(&oldpool);
0264
0265 return 0;
0266 }
0267
0268
0269 int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
0270 {
0271 unsigned long flags;
0272 int cells;
0273
0274 if (!f)
0275 return 0;
0276
0277 snd_use_lock_use(&f->use_lock);
0278 spin_lock_irqsave(&f->lock, flags);
0279 cells = snd_seq_unused_cells(f->pool);
0280 spin_unlock_irqrestore(&f->lock, flags);
0281 snd_use_lock_free(&f->use_lock);
0282 return cells;
0283 }