// SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA sequencer Client Manager * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * Takashi Iwai <tiwai@suse.de>
*/
* this module handles the connections of userland and kernel clients *
*/
/* * There are four ranges of client numbers (last two shared): * 0..15: global clients * 16..127: statically allocated client numbers for cards 0..27 * 128..191: dynamically allocated client numbers for cards 28..31 * 128..191: dynamically allocated client numbers for applications
*/
/* number of kernel non-card clients */ #define SNDRV_SEQ_GLOBAL_CLIENTS 16 /* clients per cards, for static clients */ #define SNDRV_SEQ_CLIENTS_PER_CARD 4 /* dynamically allocated client numbers (both kernel drivers and user space) */ #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
/* get snd_seq_client object for the given id quickly */ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
{ return client_use_ptr(clientid, false);
}
/* get snd_seq_client object for the given id; * if not found, retry after loading the modules
*/ staticstruct snd_seq_client *client_load_and_use_ptr(int clientid)
{ return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES));
}
/* Take refcount and perform ioctl_mutex lock on the given client; * used only for OSS sequencer * Unlock via snd_seq_client_ioctl_unlock() below
*/ bool snd_seq_client_ioctl_lock(int clientid)
{ struct snd_seq_client *client;
client = client_load_and_use_ptr(clientid); if (!client) returnfalse;
mutex_lock(&client->ioctl_mutex); /* The client isn't unrefed here; see snd_seq_client_ioctl_unlock() */ returntrue;
}
EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_lock);
/* Unlock and unref the given client; for OSS sequencer use only */ void snd_seq_client_ioctl_unlock(int clientid)
{ struct snd_seq_client *client;
client = snd_seq_client_use_ptr(clientid); if (WARN_ON(!client)) return;
mutex_unlock(&client->ioctl_mutex); /* The doubly unrefs below are intentional; the first one releases the * leftover from snd_seq_client_ioctl_lock() above, and the second one * is for releasing snd_seq_client_use_ptr() in this function
*/
snd_seq_client_unlock(client);
snd_seq_client_unlock(client);
}
EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_unlock);
staticvoid usage_alloc(struct snd_seq_usage *res, int num)
{
res->cur += num; if (res->cur > res->peak)
res->peak = res->cur;
}
/* fill client data */
user->file = file;
sprintf(client->name, "Client-%d", c);
client->data.user.owner = get_pid(task_pid(current));
/* make others aware this new client */
snd_seq_system_client_ev_client_start(c);
return 0;
}
/* delete a user client */ staticint snd_seq_release(struct inode *inode, struct file *file)
{ struct snd_seq_client *client = file->private_data;
if (client) {
seq_free_client(client); if (client->data.user.fifo)
snd_seq_fifo_delete(&client->data.user.fifo); #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
free_ump_info(client); #endif
put_pid(client->data.user.owner);
kfree(client);
}
return 0;
}
staticbool event_is_compatible(conststruct snd_seq_client *client, conststruct snd_seq_event *ev)
{ if (snd_seq_ev_is_ump(ev) && !client->midi_version) returnfalse; if (snd_seq_ev_is_ump(ev) && snd_seq_ev_is_variable(ev)) returnfalse; returntrue;
}
/* handle client read() */ /* possible error values: * -ENXIO invalid client or file open mode * -ENOSPC FIFO overflow (the flag is cleared after this error report) * -EINVAL no enough user-space buffer to write the whole event * -EFAULT seg. fault during copy to user space
*/ static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
loff_t *offset)
{ struct snd_seq_client *client = file->private_data; struct snd_seq_fifo *fifo;
size_t aligned_size; int err; long result = 0; struct snd_seq_event_cell *cell;
if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT)) return -ENXIO;
if (!access_ok(buf, count)) return -EFAULT;
/* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO;
if (!client->accept_input) return -ENXIO;
fifo = client->data.user.fifo; if (!fifo) return -ENXIO;
if (atomic_read(&fifo->overflow) > 0) { /* buffer overflow is detected */
snd_seq_fifo_clear(fifo); /* return error code */ return -ENOSPC;
}
if (err < 0) { if (cell)
snd_seq_fifo_cell_putback(fifo, cell); if (err == -EAGAIN && result > 0)
err = 0;
}
snd_seq_fifo_unlock(fifo);
return (err < 0) ? err : result;
}
/* * check access permission to the port
*/ staticint check_port_perm(struct snd_seq_client_port *port, unsignedint flags)
{ if ((port->capability & flags) != flags) return 0; return flags;
}
/* * check if the destination client is available, and return the pointer
*/ staticstruct snd_seq_client *get_event_dest_client(struct snd_seq_event *event)
{ struct snd_seq_client *dest;
dest = snd_seq_client_use_ptr(event->dest.client); if (dest == NULL) return NULL; if (! dest->accept_input) goto __not_avail; if (snd_seq_ev_is_ump(event)) return dest; /* ok - no filter checks */
if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
! test_bit(event->type, dest->event_filter)) goto __not_avail;
/* * Return the error event. * * If the receiver client is a user client, the original event is * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If * the original event is also variable length, the external data is * copied after the event record. * If the receiver client is a kernel client, the original event is * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra * kmalloc.
*/ staticint bounce_error_event(struct snd_seq_client *client, struct snd_seq_event *event, int err, int atomic, int hop)
{ struct snd_seq_event bounce_ev; int result;
/* set up quoted error */
memset(&bounce_ev, 0, sizeof(bounce_ev));
bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
bounce_ev.dest.client = client->number;
bounce_ev.dest.port = event->source.port;
bounce_ev.data.quote.origin = event->dest;
bounce_ev.data.quote.event = event;
bounce_ev.data.quote.value = -err; /* use positive value */
result = snd_seq_deliver_single_event(NULL, &bounce_ev, atomic, hop + 1); if (result < 0) {
client->event_lost++; return result;
}
return result;
}
/* * rewrite the time-stamp of the event record with the curren time * of the given queue. * return non-zero if updated.
*/ staticint update_timestamp_of_queue(struct snd_seq_event *event, int queue, int real_time)
{ struct snd_seq_queue *q;
/* deliver a single event; called from below and UMP converter */ int __snd_seq_deliver_single_event(struct snd_seq_client *dest, struct snd_seq_client_port *dest_port, struct snd_seq_event *event, int atomic, int hop)
{ switch (dest->type) { case USER_CLIENT: if (!dest->data.user.fifo) return 0; return snd_seq_fifo_event_in(dest->data.user.fifo, event); case KERNEL_CLIENT: if (!dest_port->event_input) return 0; return dest_port->event_input(event,
snd_seq_ev_is_direct(event),
dest_port->private_data,
atomic, hop);
} return 0;
}
/* * deliver an event to the specified destination. * if filter is non-zero, client filter bitmap is tested. * * RETURN VALUE: 0 : if succeeded * <0 : error
*/ staticint snd_seq_deliver_single_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop)
{ struct snd_seq_client *dest = NULL; struct snd_seq_client_port *dest_port = NULL; int result = -ENOENT; int direct;
direct = snd_seq_ev_is_direct(event);
dest = get_event_dest_client(event); if (dest == NULL) goto __skip;
dest_port = snd_seq_port_use_ptr(dest, event->dest.port); if (dest_port == NULL) goto __skip;
/* check permission */ if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
result = -EPERM; goto __skip;
}
if (dest_port->timestamping)
update_timestamp_of_queue(event, dest_port->time_queue,
dest_port->time_real);
#if IS_ENABLED(CONFIG_SND_SEQ_UMP) if (snd_seq_ev_is_ump(event)) { if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
result = snd_seq_deliver_from_ump(client, dest, dest_port,
event, atomic, hop); goto __skip;
} elseif (dest->type == USER_CLIENT &&
!snd_seq_client_is_ump(dest)) {
result = 0; // drop the event goto __skip;
}
} elseif (snd_seq_client_is_ump(dest)) { if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
result = snd_seq_deliver_to_ump(client, dest, dest_port,
event, atomic, hop); goto __skip;
}
} #endif/* CONFIG_SND_SEQ_UMP */
result = __snd_seq_deliver_single_event(dest, dest_port, event,
atomic, hop);
__skip: if (dest_port)
snd_seq_port_unlock(dest_port); if (dest)
snd_seq_client_unlock(dest);
if (result < 0 && !direct) {
result = bounce_error_event(client, event, result, atomic, hop);
} return result;
}
/* * send the event to all subscribers:
*/ staticint __deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int port, int atomic, int hop)
{ struct snd_seq_client_port *src_port; struct snd_seq_subscribers *subs; int err, result = 0, num_ev = 0; union __snd_seq_event event_saved;
size_t saved_size; struct snd_seq_port_subs_info *grp;
if (port < 0) return 0;
src_port = snd_seq_port_use_ptr(client, port); if (!src_port) return 0;
/* save original event record */
saved_size = snd_seq_event_packet_size(event);
memcpy(&event_saved, event, saved_size);
grp = &src_port->c_src;
/* lock list */ if (atomic)
read_lock(&grp->list_lock); else
down_read_nested(&grp->list_mutex, hop);
list_for_each_entry(subs, &grp->list_head, src_list) { /* both ports ready? */ if (atomic_read(&subs->ref_count) != 2) continue;
event->dest = subs->info.dest; if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) /* convert time according to flag with subscription */
update_timestamp_of_queue(event, subs->info.queue,
subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
err = snd_seq_deliver_single_event(client, event, atomic, hop); if (err < 0) { /* save first error that occurs and continue */ if (!result)
result = err; continue;
}
num_ev++; /* restore original event record */
memcpy(event, &event_saved, saved_size);
} if (atomic)
read_unlock(&grp->list_lock); else
up_read(&grp->list_mutex);
snd_seq_port_unlock(src_port);
memcpy(event, &event_saved, saved_size); return (result < 0) ? result : num_ev;
}
staticint deliver_to_subscribers(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop)
{ int ret; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) int ret2; #endif
ret = __deliver_to_subscribers(client, event,
event->source.port, atomic, hop); #if IS_ENABLED(CONFIG_SND_SEQ_UMP) if (!snd_seq_client_is_ump(client) || client->ump_endpoint_port < 0) return ret; /* If it's an event from EP port (and with a UMP group), * deliver to subscribers of the corresponding UMP group port, too. * Or, if it's from non-EP port, deliver to subscribers of EP port, too.
*/ if (event->source.port == client->ump_endpoint_port)
ret2 = __deliver_to_subscribers(client, event,
snd_seq_ump_group_port(event),
atomic, hop); else
ret2 = __deliver_to_subscribers(client, event,
client->ump_endpoint_port,
atomic, hop); if (ret2 < 0) return ret2; #endif return ret;
}
/* deliver an event to the destination port(s). * if the event is to subscribers or broadcast, the event is dispatched * to multiple targets. * * RETURN VALUE: n > 0 : the number of delivered events. * n == 0 : the event was not passed to any client. * n < 0 : error - event was not processed.
*/ staticint snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event, int atomic, int hop)
{ int result;
hop++; if (hop >= SNDRV_SEQ_MAX_HOPS) {
pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n",
event->source.client, event->source.port,
event->dest.client, event->dest.port); return -EMLINK;
}
if (snd_seq_ev_is_variable(event) &&
snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR))) return -EINVAL;
if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
result = deliver_to_subscribers(client, event, atomic, hop); else
result = snd_seq_deliver_single_event(client, event, atomic, hop);
return result;
}
/* * dispatch an event cell: * This function is called only from queue check routines in timer * interrupts or after enqueued. * The event cell shall be released or re-queued in this function. * * RETURN VALUE: n > 0 : the number of delivered events. * n == 0 : the event was not passed to any client. * n < 0 : error - event was not processed.
*/ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
{ struct snd_seq_client *client; int result;
if (snd_BUG_ON(!cell)) return -EINVAL;
client = snd_seq_client_use_ptr(cell->event.source.client); if (client == NULL) {
snd_seq_cell_free(cell); /* release this cell */ return -EINVAL;
}
if (!snd_seq_ev_is_ump(&cell->event) &&
cell->event.type == SNDRV_SEQ_EVENT_NOTE) { /* NOTE event: * the event cell is re-used as a NOTE-OFF event and * enqueued again.
*/ struct snd_seq_event tmpev, *ev;
/* reserve this event to enqueue note-off later */
tmpev = cell->event;
tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
/* * This was originally a note event. We now re-use the * cell for the note-off event.
*/
ev = &cell->event;
ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
/* add the duration time */ switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) { case SNDRV_SEQ_TIME_STAMP_TICK:
cell->event.time.tick += ev->data.note.duration; break; case SNDRV_SEQ_TIME_STAMP_REAL: /* unit for duration is ms */
ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
ev->time.time.tv_sec += ev->data.note.duration / 1000 +
ev->time.time.tv_nsec / 1000000000;
ev->time.time.tv_nsec %= 1000000000; break;
}
ev->data.note.velocity = ev->data.note.off_velocity;
/* Now queue this cell as the note off event */ if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
snd_seq_cell_free(cell); /* release this cell */
} else { /* Normal events: * event cell is freed after processing the event
*/
result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
snd_seq_cell_free(cell);
}
snd_seq_client_unlock(client); return result;
}
/* Allocate a cell from client pool and enqueue it to queue: * if pool is empty and blocking is TRUE, sleep until a new cell is * available.
*/ staticint snd_seq_client_enqueue_event(struct snd_seq_client *client, struct snd_seq_event *event, struct file *file, int blocking, int atomic, int hop, struct mutex *mutexp)
{ struct snd_seq_event_cell *cell; int err;
/* special queue values - force direct passing */ if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
event->queue = SNDRV_SEQ_QUEUE_DIRECT;
} elseif (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { /* check presence of source port */ struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port); if (src_port == NULL) return -EINVAL;
snd_seq_port_unlock(src_port);
}
/* direct event processing without enqueued */ if (snd_seq_ev_is_direct(event)) { if (!snd_seq_ev_is_ump(event) &&
event->type == SNDRV_SEQ_EVENT_NOTE) return -EINVAL; /* this event must be enqueued! */ return snd_seq_deliver_event(client, event, atomic, hop);
}
/* Not direct, normal queuing */ if (snd_seq_queue_is_used(event->queue, client->number) <= 0) return -EINVAL; /* invalid queue */ if (! snd_seq_write_pool_allocated(client)) return -ENXIO; /* queue is not allocated */
/* we got a cell. enqueue it. */
err = snd_seq_enqueue_event(cell, atomic, hop); if (err < 0) {
snd_seq_cell_free(cell); return err;
}
return 0;
}
/* * check validity of event type and data length. * return non-zero if invalid.
*/ staticint check_event_type_and_length(struct snd_seq_event *ev)
{ switch (snd_seq_ev_length_type(ev)) { case SNDRV_SEQ_EVENT_LENGTH_FIXED: if (snd_seq_ev_is_variable_type(ev)) return -EINVAL; break; case SNDRV_SEQ_EVENT_LENGTH_VARIABLE: if (! snd_seq_ev_is_variable_type(ev) ||
(ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN) return -EINVAL; break; case SNDRV_SEQ_EVENT_LENGTH_VARUSR: if (! snd_seq_ev_is_direct(ev)) return -EINVAL; break;
} return 0;
}
/* handle write() */ /* possible error values: * -ENXIO invalid client or file open mode * -ENOMEM malloc failed * -EFAULT seg. fault during copy from user space * -EINVAL invalid event * -EAGAIN no space in output pool * -EINTR interrupts while sleep * -EMLINK too many hops * others depends on return value from driver callback
*/ static ssize_t snd_seq_write(struct file *file, constchar __user *buf,
size_t count, loff_t *offset)
{ struct snd_seq_client *client = file->private_data; int written = 0, len; int err, handled; union __snd_seq_event __event; struct snd_seq_event *ev = &__event.legacy;
if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT)) return -ENXIO;
/* check client structures are in place */ if (snd_BUG_ON(!client)) return -ENXIO;
if (!client->accept_output || client->pool == NULL) return -ENXIO;
repeat:
handled = 0; /* allocate the pool now if the pool is not allocated yet */
mutex_lock(&client->ioctl_mutex); if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
err = snd_seq_pool_init(client->pool); if (err < 0) goto out;
}
/* only process whole events */
err = -EINVAL; while (count >= sizeof(struct snd_seq_event)) { /* Read in the event header from the user */
len = sizeof(struct snd_seq_event); if (copy_from_user(ev, buf, len)) {
err = -EFAULT; break;
} /* read in the rest bytes for UMP events */ if (snd_seq_ev_is_ump(ev)) { if (count < sizeof(struct snd_seq_ump_event)) break; if (copy_from_user((char *)ev + len, buf + len, sizeof(struct snd_seq_ump_event) - len)) {
err = -EFAULT; break;
}
len = sizeof(struct snd_seq_ump_event);
}
ev->source.client = client->number; /* fill in client number */ /* Check for extension data length */ if (check_event_type_and_length(ev)) {
err = -EINVAL; break;
}
if (!event_is_compatible(client, ev)) {
err = -EINVAL; break;
}
/* check for special events */ if (!snd_seq_ev_is_ump(ev)) { if (ev->type == SNDRV_SEQ_EVENT_NONE) goto __skip_event; elseif (snd_seq_ev_is_reserved(ev)) {
err = -EINVAL; break;
}
}
if (snd_seq_ev_is_variable(ev)) { int extlen = ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK; if ((size_t)(extlen + len) > count) { /* back out, will get an error this time or next */
err = -EINVAL; break;
} /* set user space pointer */
ev->data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
ev->data.ext.ptr = (char __force *)buf + len;
len += extlen; /* increment data length */
} else { #ifdef CONFIG_COMPAT if (client->convert32 && snd_seq_ev_is_varusr(ev))
ev->data.ext.ptr =
(void __force *)compat_ptr(ev->data.raw32.d[1]); #endif
}
/* it is not allowed to set the info fields for an another client */ if (client->number != client_info->client) return -EPERM; /* also client type must be set now */ if (client->type != client_info->type) return -EINVAL;
if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) { /* check validity of midi_version field */ if (client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0) return -EINVAL;
/* check if UMP is supported in kernel */ if (!IS_ENABLED(CONFIG_SND_SEQ_UMP) &&
client_info->midi_version > 0) return -EINVAL;
}
/* fill the info fields */ if (client_info->name[0])
strscpy(client->name, client_info->name, sizeof(client->name));
/* it is not allowed to create the port for an another client */ if (info->addr.client != client->number) return -EPERM; if (client->type == USER_CLIENT && info->kernel) return -EINVAL; if ((info->capability & SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT) &&
client->ump_endpoint_port >= 0) return -EBUSY;
if (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT)
port_idx = info->addr.port; else
port_idx = -1; if (port_idx >= SNDRV_SEQ_ADDRESS_UNKNOWN) return -EINVAL;
err = snd_seq_create_port(client, port_idx, &port); if (err < 0) return err;
if (info->addr.client != client->number) /* only set our own ports ! */ return -EPERM;
port = snd_seq_port_use_ptr(client, info->addr.port); if (port) {
snd_seq_set_port_info(port, info);
snd_seq_port_unlock(port); /* notify the change */
snd_seq_system_client_ev_port_change(info->addr.client,
info->addr.port);
} return 0;
}
staticint check_subscription_permission(struct snd_seq_client *client, struct snd_seq_client_port *sport, struct snd_seq_client_port *dport, struct snd_seq_port_subscribe *subs)
{ if (client->number != subs->sender.client &&
client->number != subs->dest.client) { /* connection by third client - check export permission */ if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM; if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT)) return -EPERM;
}
/* check read permission */ /* if sender or receiver is the subscribing client itself, * no permission check is necessary
*/ if (client->number != subs->sender.client) { if (! check_port_perm(sport, PERM_RD)) return -EPERM;
} /* check write permission */ if (client->number != subs->dest.client) { if (! check_port_perm(dport, PERM_WR)) return -EPERM;
} return 0;
}
/* * send an subscription notify event to user client: * client must be user client.
*/ int snd_seq_client_notify_subscription(int client, int port, struct snd_seq_port_subscribe *info, int evtype)
{ struct snd_seq_event event;
/* set queue name */ if (!info->name[0])
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
strscpy(q->name, info->name, sizeof(q->name));
snd_use_lock_free(&q->use_lock);
/* * Input mostly not implemented XXX.
*/ if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) { /* * No restrictions so for a user client we can clear * the whole fifo
*/ if (client->type == USER_CLIENT && client->data.user.fifo)
snd_seq_fifo_clear(client->data.user.fifo);
}
if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
snd_seq_queue_remove_cells(client->number, info);
return 0;
}
/* * get subscription info
*/ staticint snd_seq_ioctl_get_subscription(struct snd_seq_client *client, void *arg)
{ struct snd_seq_port_subscribe *subs = arg; int result; struct snd_seq_client *sender = NULL; struct snd_seq_client_port *sport = NULL;
result = -EINVAL;
sender = client_load_and_use_ptr(subs->sender.client); if (!sender) goto __end;
sport = snd_seq_port_use_ptr(sender, subs->sender.port); if (!sport) goto __end;
result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
subs);
__end: if (sport)
snd_seq_port_unlock(sport); if (sender)
snd_seq_client_unlock(sender);
return result;
}
/* * get subscription info - check only its presence
*/ staticint snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
{ struct snd_seq_query_subs *subs = arg; int result = -ENXIO; struct snd_seq_client *cptr = NULL; struct snd_seq_client_port *port = NULL; struct snd_seq_port_subs_info *group; struct list_head *p; int i;
cptr = client_load_and_use_ptr(subs->root.client); if (!cptr) goto __end;
port = snd_seq_port_use_ptr(cptr, subs->root.port); if (!port) goto __end;
switch (subs->type) { case SNDRV_SEQ_QUERY_SUBS_READ:
group = &port->c_src; break; case SNDRV_SEQ_QUERY_SUBS_WRITE:
group = &port->c_dest; break; default: goto __end;
}
down_read(&group->list_mutex); /* search for the subscriber */
subs->num_subs = group->count;
i = 0;
result = -ENOENT;
list_for_each(p, &group->list_head) { if (i++ == subs->index) { /* found! */ struct snd_seq_subscribers *s; if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) {
s = list_entry(p, struct snd_seq_subscribers, src_list);
subs->addr = s->info.dest;
} else {
s = list_entry(p, struct snd_seq_subscribers, dest_list);
subs->addr = s->info.sender;
}
subs->flags = s->info.flags;
subs->queue = s->info.queue;
result = 0; break;
}
}
up_read(&group->list_mutex);
__end: if (port)
snd_seq_port_unlock(port); if (cptr)
snd_seq_client_unlock(cptr);
cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO;
/* search for next port */
info->addr.port++;
port = snd_seq_port_query_nearest(cptr, info); if (port == NULL) {
snd_seq_client_unlock(cptr); return -ENOENT;
}
/* get port info */
info->addr = port->addr;
snd_seq_get_port_info(port, info);
snd_seq_port_unlock(port);
snd_seq_client_unlock(cptr);
#if IS_ENABLED(CONFIG_SND_SEQ_UMP) /* exception - handling large data */ switch (cmd) { case SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO: case SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO: return snd_seq_ioctl_client_ump_info(client, cmd, arg);
} #endif
for (handler = ioctl_handlers; handler->cmd > 0; ++handler) { if (handler->cmd == cmd) break;
} if (handler->cmd == 0) return -ENOTTY;
memset(&buf, 0, sizeof(buf));
/* * All of ioctl commands for ALSA sequencer get an argument of size * within 13 bits. We can safely pick up the size from the command.
*/
size = _IOC_SIZE(handler->cmd); if (handler->cmd & IOC_IN) { if (copy_from_user(&buf, (constvoid __user *)arg, size)) return -EFAULT;
}
mutex_lock(&client->ioctl_mutex);
err = handler->func(client, &buf);
mutex_unlock(&client->ioctl_mutex); if (err >= 0) { /* Some commands includes a bug in 'dir' field. */ if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL ||
(handler->cmd & IOC_OUT)) if (copy_to_user((void __user *)arg, &buf, size)) return -EFAULT;
}
/* * exported, called by kernel clients to enqueue events (w/o blocking) * * RETURN VALUE: zero if succeed, negative if error
*/ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, struct file *file, bool blocking)
{ struct snd_seq_client *cptr; int result;
if (snd_BUG_ON(!ev)) return -EINVAL;
if (!snd_seq_ev_is_ump(ev)) { if (ev->type == SNDRV_SEQ_EVENT_NONE) return 0; /* ignore this */ if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR) return -EINVAL; /* quoted events can't be enqueued */
}
/* fill in client number */
ev->source.client = client;
if (check_event_type_and_length(ev)) return -EINVAL;
cptr = client_load_and_use_ptr(client); if (cptr == NULL) return -EINVAL;
if (!cptr->accept_output) {
result = -EPERM;
} else { /* send it */
mutex_lock(&cptr->ioctl_mutex);
result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, false, 0,
&cptr->ioctl_mutex);
mutex_unlock(&cptr->ioctl_mutex);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.