0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/init.h>
0010 #include <linux/export.h>
0011 #include <linux/slab.h>
0012 #include <linux/sched/signal.h>
0013 #include <linux/mm.h>
0014 #include <sound/core.h>
0015
0016 #include <sound/seq_kernel.h>
0017 #include "seq_memory.h"
0018 #include "seq_queue.h"
0019 #include "seq_info.h"
0020 #include "seq_lock.h"
0021
0022 static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
0023 {
0024 return pool->total_elements - atomic_read(&pool->counter);
0025 }
0026
0027 static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
0028 {
0029 return snd_seq_pool_available(pool) >= pool->room;
0030 }
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 static int get_var_len(const struct snd_seq_event *event)
0059 {
0060 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
0061 return -EINVAL;
0062
0063 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
0064 }
0065
0066 int snd_seq_dump_var_event(const struct snd_seq_event *event,
0067 snd_seq_dump_func_t func, void *private_data)
0068 {
0069 int len, err;
0070 struct snd_seq_event_cell *cell;
0071
0072 len = get_var_len(event);
0073 if (len <= 0)
0074 return len;
0075
0076 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
0077 char buf[32];
0078 char __user *curptr = (char __force __user *)event->data.ext.ptr;
0079 while (len > 0) {
0080 int size = sizeof(buf);
0081 if (len < size)
0082 size = len;
0083 if (copy_from_user(buf, curptr, size))
0084 return -EFAULT;
0085 err = func(private_data, buf, size);
0086 if (err < 0)
0087 return err;
0088 curptr += size;
0089 len -= size;
0090 }
0091 return 0;
0092 }
0093 if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
0094 return func(private_data, event->data.ext.ptr, len);
0095
0096 cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
0097 for (; len > 0 && cell; cell = cell->next) {
0098 int size = sizeof(struct snd_seq_event);
0099 if (len < size)
0100 size = len;
0101 err = func(private_data, &cell->event, size);
0102 if (err < 0)
0103 return err;
0104 len -= size;
0105 }
0106 return 0;
0107 }
0108 EXPORT_SYMBOL(snd_seq_dump_var_event);
0109
0110
0111
0112
0113
0114
0115
0116 static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
0117 {
0118 memcpy(*bufptr, src, size);
0119 *bufptr += size;
0120 return 0;
0121 }
0122
0123 static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
0124 {
0125 if (copy_to_user(*bufptr, src, size))
0126 return -EFAULT;
0127 *bufptr += size;
0128 return 0;
0129 }
0130
0131 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
0132 int in_kernel, int size_aligned)
0133 {
0134 int len, newlen;
0135 int err;
0136
0137 len = get_var_len(event);
0138 if (len < 0)
0139 return len;
0140 newlen = len;
0141 if (size_aligned > 0)
0142 newlen = roundup(len, size_aligned);
0143 if (count < newlen)
0144 return -EAGAIN;
0145
0146 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
0147 if (! in_kernel)
0148 return -EINVAL;
0149 if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
0150 return -EFAULT;
0151 return newlen;
0152 }
0153 err = snd_seq_dump_var_event(event,
0154 in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
0155 (snd_seq_dump_func_t)seq_copy_in_user,
0156 &buf);
0157 return err < 0 ? err : newlen;
0158 }
0159 EXPORT_SYMBOL(snd_seq_expand_var_event);
0160
0161
0162
0163
0164
0165 static inline void free_cell(struct snd_seq_pool *pool,
0166 struct snd_seq_event_cell *cell)
0167 {
0168 cell->next = pool->free;
0169 pool->free = cell;
0170 atomic_dec(&pool->counter);
0171 }
0172
0173 void snd_seq_cell_free(struct snd_seq_event_cell * cell)
0174 {
0175 unsigned long flags;
0176 struct snd_seq_pool *pool;
0177
0178 if (snd_BUG_ON(!cell))
0179 return;
0180 pool = cell->pool;
0181 if (snd_BUG_ON(!pool))
0182 return;
0183
0184 spin_lock_irqsave(&pool->lock, flags);
0185 free_cell(pool, cell);
0186 if (snd_seq_ev_is_variable(&cell->event)) {
0187 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
0188 struct snd_seq_event_cell *curp, *nextptr;
0189 curp = cell->event.data.ext.ptr;
0190 for (; curp; curp = nextptr) {
0191 nextptr = curp->next;
0192 curp->next = pool->free;
0193 free_cell(pool, curp);
0194 }
0195 }
0196 }
0197 if (waitqueue_active(&pool->output_sleep)) {
0198
0199 if (snd_seq_output_ok(pool))
0200 wake_up(&pool->output_sleep);
0201 }
0202 spin_unlock_irqrestore(&pool->lock, flags);
0203 }
0204
0205
0206
0207
0208
0209 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
0210 struct snd_seq_event_cell **cellp,
0211 int nonblock, struct file *file,
0212 struct mutex *mutexp)
0213 {
0214 struct snd_seq_event_cell *cell;
0215 unsigned long flags;
0216 int err = -EAGAIN;
0217 wait_queue_entry_t wait;
0218
0219 if (pool == NULL)
0220 return -EINVAL;
0221
0222 *cellp = NULL;
0223
0224 init_waitqueue_entry(&wait, current);
0225 spin_lock_irqsave(&pool->lock, flags);
0226 if (pool->ptr == NULL) {
0227 pr_debug("ALSA: seq: pool is not initialized\n");
0228 err = -EINVAL;
0229 goto __error;
0230 }
0231 while (pool->free == NULL && ! nonblock && ! pool->closing) {
0232
0233 set_current_state(TASK_INTERRUPTIBLE);
0234 add_wait_queue(&pool->output_sleep, &wait);
0235 spin_unlock_irqrestore(&pool->lock, flags);
0236 if (mutexp)
0237 mutex_unlock(mutexp);
0238 schedule();
0239 if (mutexp)
0240 mutex_lock(mutexp);
0241 spin_lock_irqsave(&pool->lock, flags);
0242 remove_wait_queue(&pool->output_sleep, &wait);
0243
0244 if (signal_pending(current)) {
0245 err = -ERESTARTSYS;
0246 goto __error;
0247 }
0248 }
0249 if (pool->closing) {
0250 err = -ENOMEM;
0251 goto __error;
0252 }
0253
0254 cell = pool->free;
0255 if (cell) {
0256 int used;
0257 pool->free = cell->next;
0258 atomic_inc(&pool->counter);
0259 used = atomic_read(&pool->counter);
0260 if (pool->max_used < used)
0261 pool->max_used = used;
0262 pool->event_alloc_success++;
0263
0264 cell->next = NULL;
0265 err = 0;
0266 } else
0267 pool->event_alloc_failures++;
0268 *cellp = cell;
0269
0270 __error:
0271 spin_unlock_irqrestore(&pool->lock, flags);
0272 return err;
0273 }
0274
0275
0276
0277
0278
0279
0280
0281 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
0282 struct snd_seq_event_cell **cellp, int nonblock,
0283 struct file *file, struct mutex *mutexp)
0284 {
0285 int ncells, err;
0286 unsigned int extlen;
0287 struct snd_seq_event_cell *cell;
0288
0289 *cellp = NULL;
0290
0291 ncells = 0;
0292 extlen = 0;
0293 if (snd_seq_ev_is_variable(event)) {
0294 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
0295 ncells = DIV_ROUND_UP(extlen, sizeof(struct snd_seq_event));
0296 }
0297 if (ncells >= pool->total_elements)
0298 return -ENOMEM;
0299
0300 err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
0301 if (err < 0)
0302 return err;
0303
0304
0305 cell->event = *event;
0306
0307
0308 if (snd_seq_ev_is_variable(event)) {
0309 int len = extlen;
0310 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
0311 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
0312 struct snd_seq_event_cell *src, *tmp, *tail;
0313 char *buf;
0314
0315 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
0316 cell->event.data.ext.ptr = NULL;
0317
0318 src = (struct snd_seq_event_cell *)event->data.ext.ptr;
0319 buf = (char *)event->data.ext.ptr;
0320 tail = NULL;
0321
0322 while (ncells-- > 0) {
0323 int size = sizeof(struct snd_seq_event);
0324 if (len < size)
0325 size = len;
0326 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
0327 mutexp);
0328 if (err < 0)
0329 goto __error;
0330 if (cell->event.data.ext.ptr == NULL)
0331 cell->event.data.ext.ptr = tmp;
0332 if (tail)
0333 tail->next = tmp;
0334 tail = tmp;
0335
0336 if (is_chained && src) {
0337 tmp->event = src->event;
0338 src = src->next;
0339 } else if (is_usrptr) {
0340 if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
0341 err = -EFAULT;
0342 goto __error;
0343 }
0344 } else {
0345 memcpy(&tmp->event, buf, size);
0346 }
0347 buf += size;
0348 len -= size;
0349 }
0350 }
0351
0352 *cellp = cell;
0353 return 0;
0354
0355 __error:
0356 snd_seq_cell_free(cell);
0357 return err;
0358 }
0359
0360
0361
0362 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
0363 poll_table *wait)
0364 {
0365 poll_wait(file, &pool->output_sleep, wait);
0366 return snd_seq_output_ok(pool);
0367 }
0368
0369
0370
0371 int snd_seq_pool_init(struct snd_seq_pool *pool)
0372 {
0373 int cell;
0374 struct snd_seq_event_cell *cellptr;
0375
0376 if (snd_BUG_ON(!pool))
0377 return -EINVAL;
0378
0379 cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
0380 GFP_KERNEL);
0381 if (!cellptr)
0382 return -ENOMEM;
0383
0384
0385 spin_lock_irq(&pool->lock);
0386 if (pool->ptr) {
0387 spin_unlock_irq(&pool->lock);
0388 kvfree(cellptr);
0389 return 0;
0390 }
0391
0392 pool->ptr = cellptr;
0393 pool->free = NULL;
0394
0395 for (cell = 0; cell < pool->size; cell++) {
0396 cellptr = pool->ptr + cell;
0397 cellptr->pool = pool;
0398 cellptr->next = pool->free;
0399 pool->free = cellptr;
0400 }
0401 pool->room = (pool->size + 1) / 2;
0402
0403
0404 pool->max_used = 0;
0405 pool->total_elements = pool->size;
0406 spin_unlock_irq(&pool->lock);
0407 return 0;
0408 }
0409
0410
0411 void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
0412 {
0413 unsigned long flags;
0414
0415 if (snd_BUG_ON(!pool))
0416 return;
0417 spin_lock_irqsave(&pool->lock, flags);
0418 pool->closing = 1;
0419 spin_unlock_irqrestore(&pool->lock, flags);
0420 }
0421
0422
0423 int snd_seq_pool_done(struct snd_seq_pool *pool)
0424 {
0425 struct snd_seq_event_cell *ptr;
0426
0427 if (snd_BUG_ON(!pool))
0428 return -EINVAL;
0429
0430
0431 if (waitqueue_active(&pool->output_sleep))
0432 wake_up(&pool->output_sleep);
0433
0434 while (atomic_read(&pool->counter) > 0)
0435 schedule_timeout_uninterruptible(1);
0436
0437
0438 spin_lock_irq(&pool->lock);
0439 ptr = pool->ptr;
0440 pool->ptr = NULL;
0441 pool->free = NULL;
0442 pool->total_elements = 0;
0443 spin_unlock_irq(&pool->lock);
0444
0445 kvfree(ptr);
0446
0447 spin_lock_irq(&pool->lock);
0448 pool->closing = 0;
0449 spin_unlock_irq(&pool->lock);
0450
0451 return 0;
0452 }
0453
0454
0455
0456 struct snd_seq_pool *snd_seq_pool_new(int poolsize)
0457 {
0458 struct snd_seq_pool *pool;
0459
0460
0461 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
0462 if (!pool)
0463 return NULL;
0464 spin_lock_init(&pool->lock);
0465 pool->ptr = NULL;
0466 pool->free = NULL;
0467 pool->total_elements = 0;
0468 atomic_set(&pool->counter, 0);
0469 pool->closing = 0;
0470 init_waitqueue_head(&pool->output_sleep);
0471
0472 pool->size = poolsize;
0473
0474
0475 pool->max_used = 0;
0476 return pool;
0477 }
0478
0479
0480 int snd_seq_pool_delete(struct snd_seq_pool **ppool)
0481 {
0482 struct snd_seq_pool *pool = *ppool;
0483
0484 *ppool = NULL;
0485 if (pool == NULL)
0486 return 0;
0487 snd_seq_pool_mark_closing(pool);
0488 snd_seq_pool_done(pool);
0489 kfree(pool);
0490 return 0;
0491 }
0492
0493
0494 void snd_seq_info_pool(struct snd_info_buffer *buffer,
0495 struct snd_seq_pool *pool, char *space)
0496 {
0497 if (pool == NULL)
0498 return;
0499 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements);
0500 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter));
0501 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used);
0502 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success);
0503 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures);
0504 }