Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *   ALSA sequencer Timing queue handling
0004  *   Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
0005  *
0006  * MAJOR CHANGES
0007  *   Nov. 13, 1999  Takashi Iwai <iwai@ww.uni-erlangen.de>
0008  *     - Queues are allocated dynamically via ioctl.
0009  *     - When owner client is deleted, all owned queues are deleted, too.
0010  *     - Owner of unlocked queue is kept unmodified even if it is
0011  *   manipulated by other clients.
0012  *     - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
0013  *       caller client.  i.e. Changing owner to a third client is not
0014  *       allowed.
0015  *
0016  *  Aug. 30, 2000   Takashi Iwai
0017  *     - Queues are managed in static array again, but with better way.
0018  *       The API itself is identical.
0019  *     - The queue is locked when struct snd_seq_queue pointer is returned via
0020  *       queueptr().  This pointer *MUST* be released afterward by
0021  *       queuefree(ptr).
0022  *     - Addition of experimental sync support.
0023  */
0024 
0025 #include <linux/init.h>
0026 #include <linux/slab.h>
0027 #include <sound/core.h>
0028 
0029 #include "seq_memory.h"
0030 #include "seq_queue.h"
0031 #include "seq_clientmgr.h"
0032 #include "seq_fifo.h"
0033 #include "seq_timer.h"
0034 #include "seq_info.h"
0035 
0036 /* list of allocated queues */
0037 static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
0038 static DEFINE_SPINLOCK(queue_list_lock);
0039 /* number of queues allocated */
0040 static int num_queues;
0041 
0042 int snd_seq_queue_get_cur_queues(void)
0043 {
0044     return num_queues;
0045 }
0046 
0047 /*----------------------------------------------------------------*/
0048 
0049 /* assign queue id and insert to list */
0050 static int queue_list_add(struct snd_seq_queue *q)
0051 {
0052     int i;
0053     unsigned long flags;
0054 
0055     spin_lock_irqsave(&queue_list_lock, flags);
0056     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0057         if (! queue_list[i]) {
0058             queue_list[i] = q;
0059             q->queue = i;
0060             num_queues++;
0061             spin_unlock_irqrestore(&queue_list_lock, flags);
0062             return i;
0063         }
0064     }
0065     spin_unlock_irqrestore(&queue_list_lock, flags);
0066     return -1;
0067 }
0068 
0069 static struct snd_seq_queue *queue_list_remove(int id, int client)
0070 {
0071     struct snd_seq_queue *q;
0072     unsigned long flags;
0073 
0074     spin_lock_irqsave(&queue_list_lock, flags);
0075     q = queue_list[id];
0076     if (q) {
0077         spin_lock(&q->owner_lock);
0078         if (q->owner == client) {
0079             /* found */
0080             q->klocked = 1;
0081             spin_unlock(&q->owner_lock);
0082             queue_list[id] = NULL;
0083             num_queues--;
0084             spin_unlock_irqrestore(&queue_list_lock, flags);
0085             return q;
0086         }
0087         spin_unlock(&q->owner_lock);
0088     }
0089     spin_unlock_irqrestore(&queue_list_lock, flags);
0090     return NULL;
0091 }
0092 
0093 /*----------------------------------------------------------------*/
0094 
0095 /* create new queue (constructor) */
0096 static struct snd_seq_queue *queue_new(int owner, int locked)
0097 {
0098     struct snd_seq_queue *q;
0099 
0100     q = kzalloc(sizeof(*q), GFP_KERNEL);
0101     if (!q)
0102         return NULL;
0103 
0104     spin_lock_init(&q->owner_lock);
0105     spin_lock_init(&q->check_lock);
0106     mutex_init(&q->timer_mutex);
0107     snd_use_lock_init(&q->use_lock);
0108     q->queue = -1;
0109 
0110     q->tickq = snd_seq_prioq_new();
0111     q->timeq = snd_seq_prioq_new();
0112     q->timer = snd_seq_timer_new();
0113     if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
0114         snd_seq_prioq_delete(&q->tickq);
0115         snd_seq_prioq_delete(&q->timeq);
0116         snd_seq_timer_delete(&q->timer);
0117         kfree(q);
0118         return NULL;
0119     }
0120 
0121     q->owner = owner;
0122     q->locked = locked;
0123     q->klocked = 0;
0124 
0125     return q;
0126 }
0127 
0128 /* delete queue (destructor) */
0129 static void queue_delete(struct snd_seq_queue *q)
0130 {
0131     /* stop and release the timer */
0132     mutex_lock(&q->timer_mutex);
0133     snd_seq_timer_stop(q->timer);
0134     snd_seq_timer_close(q);
0135     mutex_unlock(&q->timer_mutex);
0136     /* wait until access free */
0137     snd_use_lock_sync(&q->use_lock);
0138     /* release resources... */
0139     snd_seq_prioq_delete(&q->tickq);
0140     snd_seq_prioq_delete(&q->timeq);
0141     snd_seq_timer_delete(&q->timer);
0142 
0143     kfree(q);
0144 }
0145 
0146 
0147 /*----------------------------------------------------------------*/
0148 
0149 /* delete all existing queues */
0150 void snd_seq_queues_delete(void)
0151 {
0152     int i;
0153 
0154     /* clear list */
0155     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0156         if (queue_list[i])
0157             queue_delete(queue_list[i]);
0158     }
0159 }
0160 
0161 static void queue_use(struct snd_seq_queue *queue, int client, int use);
0162 
0163 /* allocate a new queue -
0164  * return pointer to new queue or ERR_PTR(-errno) for error
0165  * The new queue's use_lock is set to 1. It is the caller's responsibility to
0166  * call snd_use_lock_free(&q->use_lock).
0167  */
0168 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
0169 {
0170     struct snd_seq_queue *q;
0171 
0172     q = queue_new(client, locked);
0173     if (q == NULL)
0174         return ERR_PTR(-ENOMEM);
0175     q->info_flags = info_flags;
0176     queue_use(q, client, 1);
0177     snd_use_lock_use(&q->use_lock);
0178     if (queue_list_add(q) < 0) {
0179         snd_use_lock_free(&q->use_lock);
0180         queue_delete(q);
0181         return ERR_PTR(-ENOMEM);
0182     }
0183     return q;
0184 }
0185 
0186 /* delete a queue - queue must be owned by the client */
0187 int snd_seq_queue_delete(int client, int queueid)
0188 {
0189     struct snd_seq_queue *q;
0190 
0191     if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
0192         return -EINVAL;
0193     q = queue_list_remove(queueid, client);
0194     if (q == NULL)
0195         return -EINVAL;
0196     queue_delete(q);
0197 
0198     return 0;
0199 }
0200 
0201 
0202 /* return pointer to queue structure for specified id */
0203 struct snd_seq_queue *queueptr(int queueid)
0204 {
0205     struct snd_seq_queue *q;
0206     unsigned long flags;
0207 
0208     if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
0209         return NULL;
0210     spin_lock_irqsave(&queue_list_lock, flags);
0211     q = queue_list[queueid];
0212     if (q)
0213         snd_use_lock_use(&q->use_lock);
0214     spin_unlock_irqrestore(&queue_list_lock, flags);
0215     return q;
0216 }
0217 
0218 /* return the (first) queue matching with the specified name */
0219 struct snd_seq_queue *snd_seq_queue_find_name(char *name)
0220 {
0221     int i;
0222     struct snd_seq_queue *q;
0223 
0224     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0225         q = queueptr(i);
0226         if (q) {
0227             if (strncmp(q->name, name, sizeof(q->name)) == 0)
0228                 return q;
0229             queuefree(q);
0230         }
0231     }
0232     return NULL;
0233 }
0234 
0235 
0236 /* -------------------------------------------------------- */
0237 
0238 #define MAX_CELL_PROCESSES_IN_QUEUE 1000
0239 
0240 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
0241 {
0242     unsigned long flags;
0243     struct snd_seq_event_cell *cell;
0244     snd_seq_tick_time_t cur_tick;
0245     snd_seq_real_time_t cur_time;
0246     int processed = 0;
0247 
0248     if (q == NULL)
0249         return;
0250 
0251     /* make this function non-reentrant */
0252     spin_lock_irqsave(&q->check_lock, flags);
0253     if (q->check_blocked) {
0254         q->check_again = 1;
0255         spin_unlock_irqrestore(&q->check_lock, flags);
0256         return;     /* other thread is already checking queues */
0257     }
0258     q->check_blocked = 1;
0259     spin_unlock_irqrestore(&q->check_lock, flags);
0260 
0261       __again:
0262     /* Process tick queue... */
0263     cur_tick = snd_seq_timer_get_cur_tick(q->timer);
0264     for (;;) {
0265         cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
0266         if (!cell)
0267             break;
0268         snd_seq_dispatch_event(cell, atomic, hop);
0269         if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
0270             goto out; /* the rest processed at the next batch */
0271     }
0272 
0273     /* Process time queue... */
0274     cur_time = snd_seq_timer_get_cur_time(q->timer, false);
0275     for (;;) {
0276         cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
0277         if (!cell)
0278             break;
0279         snd_seq_dispatch_event(cell, atomic, hop);
0280         if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
0281             goto out; /* the rest processed at the next batch */
0282     }
0283 
0284  out:
0285     /* free lock */
0286     spin_lock_irqsave(&q->check_lock, flags);
0287     if (q->check_again) {
0288         q->check_again = 0;
0289         if (processed < MAX_CELL_PROCESSES_IN_QUEUE) {
0290             spin_unlock_irqrestore(&q->check_lock, flags);
0291             goto __again;
0292         }
0293     }
0294     q->check_blocked = 0;
0295     spin_unlock_irqrestore(&q->check_lock, flags);
0296 }
0297 
0298 
0299 /* enqueue a event to singe queue */
0300 int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
0301 {
0302     int dest, err;
0303     struct snd_seq_queue *q;
0304 
0305     if (snd_BUG_ON(!cell))
0306         return -EINVAL;
0307     dest = cell->event.queue;   /* destination queue */
0308     q = queueptr(dest);
0309     if (q == NULL)
0310         return -EINVAL;
0311     /* handle relative time stamps, convert them into absolute */
0312     if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
0313         switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
0314         case SNDRV_SEQ_TIME_STAMP_TICK:
0315             cell->event.time.tick += q->timer->tick.cur_tick;
0316             break;
0317 
0318         case SNDRV_SEQ_TIME_STAMP_REAL:
0319             snd_seq_inc_real_time(&cell->event.time.time,
0320                           &q->timer->cur_time);
0321             break;
0322         }
0323         cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
0324         cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
0325     }
0326     /* enqueue event in the real-time or midi queue */
0327     switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
0328     case SNDRV_SEQ_TIME_STAMP_TICK:
0329         err = snd_seq_prioq_cell_in(q->tickq, cell);
0330         break;
0331 
0332     case SNDRV_SEQ_TIME_STAMP_REAL:
0333     default:
0334         err = snd_seq_prioq_cell_in(q->timeq, cell);
0335         break;
0336     }
0337 
0338     if (err < 0) {
0339         queuefree(q); /* unlock */
0340         return err;
0341     }
0342 
0343     /* trigger dispatching */
0344     snd_seq_check_queue(q, atomic, hop);
0345 
0346     queuefree(q); /* unlock */
0347 
0348     return 0;
0349 }
0350 
0351 
0352 /*----------------------------------------------------------------*/
0353 
0354 static inline int check_access(struct snd_seq_queue *q, int client)
0355 {
0356     return (q->owner == client) || (!q->locked && !q->klocked);
0357 }
0358 
0359 /* check if the client has permission to modify queue parameters.
0360  * if it does, lock the queue
0361  */
0362 static int queue_access_lock(struct snd_seq_queue *q, int client)
0363 {
0364     unsigned long flags;
0365     int access_ok;
0366     
0367     spin_lock_irqsave(&q->owner_lock, flags);
0368     access_ok = check_access(q, client);
0369     if (access_ok)
0370         q->klocked = 1;
0371     spin_unlock_irqrestore(&q->owner_lock, flags);
0372     return access_ok;
0373 }
0374 
0375 /* unlock the queue */
0376 static inline void queue_access_unlock(struct snd_seq_queue *q)
0377 {
0378     unsigned long flags;
0379 
0380     spin_lock_irqsave(&q->owner_lock, flags);
0381     q->klocked = 0;
0382     spin_unlock_irqrestore(&q->owner_lock, flags);
0383 }
0384 
0385 /* exported - only checking permission */
0386 int snd_seq_queue_check_access(int queueid, int client)
0387 {
0388     struct snd_seq_queue *q = queueptr(queueid);
0389     int access_ok;
0390     unsigned long flags;
0391 
0392     if (! q)
0393         return 0;
0394     spin_lock_irqsave(&q->owner_lock, flags);
0395     access_ok = check_access(q, client);
0396     spin_unlock_irqrestore(&q->owner_lock, flags);
0397     queuefree(q);
0398     return access_ok;
0399 }
0400 
0401 /*----------------------------------------------------------------*/
0402 
0403 /*
0404  * change queue's owner and permission
0405  */
0406 int snd_seq_queue_set_owner(int queueid, int client, int locked)
0407 {
0408     struct snd_seq_queue *q = queueptr(queueid);
0409     unsigned long flags;
0410 
0411     if (q == NULL)
0412         return -EINVAL;
0413 
0414     if (! queue_access_lock(q, client)) {
0415         queuefree(q);
0416         return -EPERM;
0417     }
0418 
0419     spin_lock_irqsave(&q->owner_lock, flags);
0420     q->locked = locked ? 1 : 0;
0421     q->owner = client;
0422     spin_unlock_irqrestore(&q->owner_lock, flags);
0423     queue_access_unlock(q);
0424     queuefree(q);
0425 
0426     return 0;
0427 }
0428 
0429 
0430 /*----------------------------------------------------------------*/
0431 
0432 /* open timer -
0433  * q->use mutex should be down before calling this function to avoid
0434  * confliction with snd_seq_queue_use()
0435  */
0436 int snd_seq_queue_timer_open(int queueid)
0437 {
0438     int result = 0;
0439     struct snd_seq_queue *queue;
0440     struct snd_seq_timer *tmr;
0441 
0442     queue = queueptr(queueid);
0443     if (queue == NULL)
0444         return -EINVAL;
0445     tmr = queue->timer;
0446     result = snd_seq_timer_open(queue);
0447     if (result < 0) {
0448         snd_seq_timer_defaults(tmr);
0449         result = snd_seq_timer_open(queue);
0450     }
0451     queuefree(queue);
0452     return result;
0453 }
0454 
0455 /* close timer -
0456  * q->use mutex should be down before calling this function
0457  */
0458 int snd_seq_queue_timer_close(int queueid)
0459 {
0460     struct snd_seq_queue *queue;
0461     int result = 0;
0462 
0463     queue = queueptr(queueid);
0464     if (queue == NULL)
0465         return -EINVAL;
0466     snd_seq_timer_close(queue);
0467     queuefree(queue);
0468     return result;
0469 }
0470 
0471 /* change queue tempo and ppq */
0472 int snd_seq_queue_timer_set_tempo(int queueid, int client,
0473                   struct snd_seq_queue_tempo *info)
0474 {
0475     struct snd_seq_queue *q = queueptr(queueid);
0476     int result;
0477 
0478     if (q == NULL)
0479         return -EINVAL;
0480     if (! queue_access_lock(q, client)) {
0481         queuefree(q);
0482         return -EPERM;
0483     }
0484 
0485     result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
0486     if (result >= 0 && info->skew_base > 0)
0487         result = snd_seq_timer_set_skew(q->timer, info->skew_value,
0488                         info->skew_base);
0489     queue_access_unlock(q);
0490     queuefree(q);
0491     return result;
0492 }
0493 
0494 /* use or unuse this queue */
0495 static void queue_use(struct snd_seq_queue *queue, int client, int use)
0496 {
0497     if (use) {
0498         if (!test_and_set_bit(client, queue->clients_bitmap))
0499             queue->clients++;
0500     } else {
0501         if (test_and_clear_bit(client, queue->clients_bitmap))
0502             queue->clients--;
0503     }
0504     if (queue->clients) {
0505         if (use && queue->clients == 1)
0506             snd_seq_timer_defaults(queue->timer);
0507         snd_seq_timer_open(queue);
0508     } else {
0509         snd_seq_timer_close(queue);
0510     }
0511 }
0512 
0513 /* use or unuse this queue -
0514  * if it is the first client, starts the timer.
0515  * if it is not longer used by any clients, stop the timer.
0516  */
0517 int snd_seq_queue_use(int queueid, int client, int use)
0518 {
0519     struct snd_seq_queue *queue;
0520 
0521     queue = queueptr(queueid);
0522     if (queue == NULL)
0523         return -EINVAL;
0524     mutex_lock(&queue->timer_mutex);
0525     queue_use(queue, client, use);
0526     mutex_unlock(&queue->timer_mutex);
0527     queuefree(queue);
0528     return 0;
0529 }
0530 
0531 /*
0532  * check if queue is used by the client
0533  * return negative value if the queue is invalid.
0534  * return 0 if not used, 1 if used.
0535  */
0536 int snd_seq_queue_is_used(int queueid, int client)
0537 {
0538     struct snd_seq_queue *q;
0539     int result;
0540 
0541     q = queueptr(queueid);
0542     if (q == NULL)
0543         return -EINVAL; /* invalid queue */
0544     result = test_bit(client, q->clients_bitmap) ? 1 : 0;
0545     queuefree(q);
0546     return result;
0547 }
0548 
0549 
0550 /*----------------------------------------------------------------*/
0551 
0552 /* final stage notification -
0553  * remove cells for no longer exist client (for non-owned queue)
0554  * or delete this queue (for owned queue)
0555  */
0556 void snd_seq_queue_client_leave(int client)
0557 {
0558     int i;
0559     struct snd_seq_queue *q;
0560 
0561     /* delete own queues from queue list */
0562     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0563         q = queue_list_remove(i, client);
0564         if (q)
0565             queue_delete(q);
0566     }
0567 
0568     /* remove cells from existing queues -
0569      * they are not owned by this client
0570      */
0571     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0572         q = queueptr(i);
0573         if (!q)
0574             continue;
0575         if (test_bit(client, q->clients_bitmap)) {
0576             snd_seq_prioq_leave(q->tickq, client, 0);
0577             snd_seq_prioq_leave(q->timeq, client, 0);
0578             snd_seq_queue_use(q->queue, client, 0);
0579         }
0580         queuefree(q);
0581     }
0582 }
0583 
0584 
0585 
0586 /*----------------------------------------------------------------*/
0587 
0588 /* remove cells from all queues */
0589 void snd_seq_queue_client_leave_cells(int client)
0590 {
0591     int i;
0592     struct snd_seq_queue *q;
0593 
0594     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0595         q = queueptr(i);
0596         if (!q)
0597             continue;
0598         snd_seq_prioq_leave(q->tickq, client, 0);
0599         snd_seq_prioq_leave(q->timeq, client, 0);
0600         queuefree(q);
0601     }
0602 }
0603 
0604 /* remove cells based on flush criteria */
0605 void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
0606 {
0607     int i;
0608     struct snd_seq_queue *q;
0609 
0610     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0611         q = queueptr(i);
0612         if (!q)
0613             continue;
0614         if (test_bit(client, q->clients_bitmap) &&
0615             (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
0616              q->queue == info->queue)) {
0617             snd_seq_prioq_remove_events(q->tickq, client, info);
0618             snd_seq_prioq_remove_events(q->timeq, client, info);
0619         }
0620         queuefree(q);
0621     }
0622 }
0623 
0624 /*----------------------------------------------------------------*/
0625 
0626 /*
0627  * send events to all subscribed ports
0628  */
0629 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
0630                   int atomic, int hop)
0631 {
0632     struct snd_seq_event sev;
0633 
0634     sev = *ev;
0635     
0636     sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
0637     sev.time.tick = q->timer->tick.cur_tick;
0638     sev.queue = q->queue;
0639     sev.data.queue.queue = q->queue;
0640 
0641     /* broadcast events from Timer port */
0642     sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
0643     sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
0644     sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
0645     snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
0646 }
0647 
0648 /*
0649  * process a received queue-control event.
0650  * this function is exported for seq_sync.c.
0651  */
0652 static void snd_seq_queue_process_event(struct snd_seq_queue *q,
0653                     struct snd_seq_event *ev,
0654                     int atomic, int hop)
0655 {
0656     switch (ev->type) {
0657     case SNDRV_SEQ_EVENT_START:
0658         snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
0659         snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
0660         if (! snd_seq_timer_start(q->timer))
0661             queue_broadcast_event(q, ev, atomic, hop);
0662         break;
0663 
0664     case SNDRV_SEQ_EVENT_CONTINUE:
0665         if (! snd_seq_timer_continue(q->timer))
0666             queue_broadcast_event(q, ev, atomic, hop);
0667         break;
0668 
0669     case SNDRV_SEQ_EVENT_STOP:
0670         snd_seq_timer_stop(q->timer);
0671         queue_broadcast_event(q, ev, atomic, hop);
0672         break;
0673 
0674     case SNDRV_SEQ_EVENT_TEMPO:
0675         snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
0676         queue_broadcast_event(q, ev, atomic, hop);
0677         break;
0678 
0679     case SNDRV_SEQ_EVENT_SETPOS_TICK:
0680         if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
0681             queue_broadcast_event(q, ev, atomic, hop);
0682         }
0683         break;
0684 
0685     case SNDRV_SEQ_EVENT_SETPOS_TIME:
0686         if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
0687             queue_broadcast_event(q, ev, atomic, hop);
0688         }
0689         break;
0690     case SNDRV_SEQ_EVENT_QUEUE_SKEW:
0691         if (snd_seq_timer_set_skew(q->timer,
0692                        ev->data.queue.param.skew.value,
0693                        ev->data.queue.param.skew.base) == 0) {
0694             queue_broadcast_event(q, ev, atomic, hop);
0695         }
0696         break;
0697     }
0698 }
0699 
0700 
0701 /*
0702  * Queue control via timer control port:
0703  * this function is exported as a callback of timer port.
0704  */
0705 int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
0706 {
0707     struct snd_seq_queue *q;
0708 
0709     if (snd_BUG_ON(!ev))
0710         return -EINVAL;
0711     q = queueptr(ev->data.queue.queue);
0712 
0713     if (q == NULL)
0714         return -EINVAL;
0715 
0716     if (! queue_access_lock(q, ev->source.client)) {
0717         queuefree(q);
0718         return -EPERM;
0719     }
0720 
0721     snd_seq_queue_process_event(q, ev, atomic, hop);
0722 
0723     queue_access_unlock(q);
0724     queuefree(q);
0725     return 0;
0726 }
0727 
0728 
0729 /*----------------------------------------------------------------*/
0730 
0731 #ifdef CONFIG_SND_PROC_FS
0732 /* exported to seq_info.c */
0733 void snd_seq_info_queues_read(struct snd_info_entry *entry, 
0734                   struct snd_info_buffer *buffer)
0735 {
0736     int i, bpm;
0737     struct snd_seq_queue *q;
0738     struct snd_seq_timer *tmr;
0739     bool locked;
0740     int owner;
0741 
0742     for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
0743         q = queueptr(i);
0744         if (!q)
0745             continue;
0746 
0747         tmr = q->timer;
0748         if (tmr->tempo)
0749             bpm = 60000000 / tmr->tempo;
0750         else
0751             bpm = 0;
0752 
0753         spin_lock_irq(&q->owner_lock);
0754         locked = q->locked;
0755         owner = q->owner;
0756         spin_unlock_irq(&q->owner_lock);
0757 
0758         snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
0759         snd_iprintf(buffer, "owned by client    : %d\n", owner);
0760         snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
0761         snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
0762         snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
0763         snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
0764         snd_iprintf(buffer, "timer PPQ          : %d\n", tmr->ppq);
0765         snd_iprintf(buffer, "current tempo      : %d\n", tmr->tempo);
0766         snd_iprintf(buffer, "current BPM        : %d\n", bpm);
0767         snd_iprintf(buffer, "current time       : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
0768         snd_iprintf(buffer, "current tick       : %d\n", tmr->tick.cur_tick);
0769         snd_iprintf(buffer, "\n");
0770         queuefree(q);
0771     }
0772 }
0773 #endif /* CONFIG_SND_PROC_FS */
0774