// SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer Timing queue handling * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> * * MAJOR CHANGES * Nov. 13, 1999 Takashi Iwai <iwai@ww.uni-erlangen.de> * - Queues are allocated dynamically via ioctl. * - When owner client is deleted, all owned queues are deleted, too. * - Owner of unlocked queue is kept unmodified even if it is * manipulated by other clients. * - Owner field in SET_QUEUE_OWNER ioctl must be identical with the * caller client. i.e. Changing owner to a third client is not * allowed. * * Aug. 30, 2000 Takashi Iwai * - Queues are managed in static array again, but with better way. * The API itself is identical. * - The queue is locked when struct snd_seq_queue pointer is returned via * queueptr(). This pointer *MUST* be released afterward by * queuefree(ptr). * - Addition of experimental sync support.
*/
/* list of allocated queues */ staticstruct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES]; static DEFINE_SPINLOCK(queue_list_lock); /* number of queues allocated */ staticint num_queues;
int snd_seq_queue_get_cur_queues(void)
{ return num_queues;
}
/* delete all existing queues */ void snd_seq_queues_delete(void)
{ int i;
/* clear list */ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) { if (queue_list[i])
queue_delete(queue_list[i]);
}
}
staticvoid queue_use(struct snd_seq_queue *queue, int client, int use);
/* allocate a new queue - * return pointer to new queue or ERR_PTR(-errno) for error * The new queue's use_lock is set to 1. It is the caller's responsibility to * call snd_use_lock_free(&q->use_lock).
*/ struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsignedint info_flags)
{ struct snd_seq_queue *q;
/* return pointer to queue structure for specified id */ struct snd_seq_queue *queueptr(int queueid)
{ struct snd_seq_queue *q;
if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES) return NULL;
guard(spinlock_irqsave)(&queue_list_lock);
q = queue_list[queueid]; if (q)
snd_use_lock_use(&q->use_lock); return q;
}
/* return the (first) queue matching with the specified name */ struct snd_seq_queue *snd_seq_queue_find_name(char *name)
{ int i; struct snd_seq_queue *q;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
q = queueptr(i); if (q) { if (strncmp(q->name, name, sizeof(q->name)) == 0) return q;
queuefree(q);
}
} return NULL;
}
void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
{ struct snd_seq_event_cell *cell;
snd_seq_tick_time_t cur_tick;
snd_seq_real_time_t cur_time; int processed = 0;
if (q == NULL) return;
/* make this function non-reentrant */
scoped_guard(spinlock_irqsave, &q->check_lock) { if (q->check_blocked) {
q->check_again = 1; return; /* other thread is already checking queues */
}
q->check_blocked = 1;
}
__again: /* Process tick queue... */
cur_tick = snd_seq_timer_get_cur_tick(q->timer); for (;;) {
cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); if (!cell) break;
snd_seq_dispatch_event(cell, atomic, hop); if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) goto out; /* the rest processed at the next batch */
}
/* Process time queue... */
cur_time = snd_seq_timer_get_cur_time(q->timer, false); for (;;) {
cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); if (!cell) break;
snd_seq_dispatch_event(cell, atomic, hop); if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) goto out; /* the rest processed at the next batch */
}
/* enqueue a event to singe queue */ int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
{ int dest, err; struct snd_seq_queue *q;
if (snd_BUG_ON(!cell)) return -EINVAL;
dest = cell->event.queue; /* destination queue */
q = queueptr(dest); if (q == NULL) return -EINVAL; /* handle relative time stamps, convert them into absolute */ if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) { switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK:
cell->event.time.tick += q->timer->tick.cur_tick; break;
case SNDRV_SEQ_TIME_STAMP_REAL:
snd_seq_inc_real_time(&cell->event.time.time,
&q->timer->cur_time); break;
}
cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
} /* enqueue event in the real-time or midi queue */ switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK:
err = snd_seq_prioq_cell_in(q->tickq, cell); break;
case SNDRV_SEQ_TIME_STAMP_REAL: default:
err = snd_seq_prioq_cell_in(q->timeq, cell); break;
}
/* check if the client has permission to modify queue parameters. * if it does, lock the queue
*/ staticint queue_access_lock(struct snd_seq_queue *q, int client)
{ int access_ok;
/* exported - only checking permission */ int snd_seq_queue_check_access(int queueid, int client)
{ struct snd_seq_queue *q = queueptr(queueid); int access_ok;
/* open timer - * q->use mutex should be down before calling this function to avoid * confliction with snd_seq_queue_use()
*/ int snd_seq_queue_timer_open(int queueid)
{ int result = 0; struct snd_seq_queue *queue; struct snd_seq_timer *tmr;
queue = queueptr(queueid); if (queue == NULL) return -EINVAL;
tmr = queue->timer;
result = snd_seq_timer_open(queue); if (result < 0) {
snd_seq_timer_defaults(tmr);
result = snd_seq_timer_open(queue);
}
queuefree(queue); return result;
}
/* close timer - * q->use mutex should be down before calling this function
*/ int snd_seq_queue_timer_close(int queueid)
{ struct snd_seq_queue *queue; int result = 0;
/* change queue tempo and ppq */ int snd_seq_queue_timer_set_tempo(int queueid, int client, struct snd_seq_queue_tempo *info)
{ struct snd_seq_queue *q = queueptr(queueid); int result;
if (q == NULL) return -EINVAL; if (! queue_access_lock(q, client)) {
queuefree(q); return -EPERM;
}
result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq,
info->tempo_base); if (result >= 0 && info->skew_base > 0)
result = snd_seq_timer_set_skew(q->timer, info->skew_value,
info->skew_base);
queue_access_unlock(q);
queuefree(q); return result;
}
/* use or unuse this queue */ staticvoid queue_use(struct snd_seq_queue *queue, int client, int use)
{ if (use) { if (!test_and_set_bit(client, queue->clients_bitmap))
queue->clients++;
} else { if (test_and_clear_bit(client, queue->clients_bitmap))
queue->clients--;
} if (queue->clients) { if (use && queue->clients == 1)
snd_seq_timer_defaults(queue->timer);
snd_seq_timer_open(queue);
} else {
snd_seq_timer_close(queue);
}
}
/* use or unuse this queue - * if it is the first client, starts the timer. * if it is not longer used by any clients, stop the timer.
*/ int snd_seq_queue_use(int queueid, int client, int use)
{ struct snd_seq_queue *queue;
/* * check if queue is used by the client * return negative value if the queue is invalid. * return 0 if not used, 1 if used.
*/ int snd_seq_queue_is_used(int queueid, int client)
{ struct snd_seq_queue *q; int result;
/* final stage notification - * remove cells for no longer exist client (for non-owned queue) * or delete this queue (for owned queue)
*/ void snd_seq_queue_client_leave(int client)
{ int i; struct snd_seq_queue *q;
/* delete own queues from queue list */ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
q = queue_list_remove(i, client); if (q)
queue_delete(q);
}
/* remove cells from existing queues - * they are not owned by this client
*/ for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
q = queueptr(i); if (!q) continue; if (test_bit(client, q->clients_bitmap)) {
snd_seq_prioq_leave(q->tickq, client, 0);
snd_seq_prioq_leave(q->timeq, client, 0);
snd_seq_queue_use(q->queue, client, 0);
}
queuefree(q);
}
}
/* * process a received queue-control event. * this function is exported for seq_sync.c.
*/ staticvoid snd_seq_queue_process_event(struct snd_seq_queue *q, struct snd_seq_event *ev, int atomic, int hop)
{ switch (ev->type) { case SNDRV_SEQ_EVENT_START:
snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
snd_seq_prioq_leave(q->timeq, ev->source.client, 1); if (! snd_seq_timer_start(q->timer))
queue_broadcast_event(q, ev, atomic, hop); break;
case SNDRV_SEQ_EVENT_CONTINUE: if (! snd_seq_timer_continue(q->timer))
queue_broadcast_event(q, ev, atomic, hop); break;
case SNDRV_SEQ_EVENT_STOP:
snd_seq_timer_stop(q->timer);
queue_broadcast_event(q, ev, atomic, hop); break;
case SNDRV_SEQ_EVENT_TEMPO:
snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
queue_broadcast_event(q, ev, atomic, hop); break;
case SNDRV_SEQ_EVENT_SETPOS_TICK: if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
queue_broadcast_event(q, ev, atomic, hop);
} break;
case SNDRV_SEQ_EVENT_SETPOS_TIME: if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
queue_broadcast_event(q, ev, atomic, hop);
} break; case SNDRV_SEQ_EVENT_QUEUE_SKEW: if (snd_seq_timer_set_skew(q->timer,
ev->data.queue.param.skew.value,
ev->data.queue.param.skew.base) == 0) {
queue_broadcast_event(q, ev, atomic, hop);
} break;
}
}
/* * Queue control via timer control port: * this function is exported as a callback of timer port.
*/ int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
{ struct snd_seq_queue *q;
if (snd_BUG_ON(!ev)) return -EINVAL;
q = queueptr(ev->data.queue.queue);
if (q == NULL) return -EINVAL;
if (! queue_access_lock(q, ev->source.client)) {
queuefree(q); return -EPERM;
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.35Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.