/* reset tail if this was the last element */ if (f->tail == cell)
f->tail = NULL;
cell->next = NULL;
f->cells--;
}
return cell;
}
/* dequeue cell from fifo and copy on user space */ int snd_seq_fifo_cell_out(struct snd_seq_fifo *f, struct snd_seq_event_cell **cellp, int nonblock)
{ struct snd_seq_event_cell *cell; unsignedlong flags;
wait_queue_entry_t wait;
/* polling; return non-zero if queue is available */ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
poll_table *wait)
{
poll_wait(file, &f->input_sleep, wait); return (f->cells > 0);
}
/* change the size of pool; all old events are removed */ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
{ struct snd_seq_pool *newpool, *oldpool; struct snd_seq_event_cell *cell, *next, *oldhead;
if (snd_BUG_ON(!f || !f->pool)) return -EINVAL;
/* allocate new pool */
newpool = snd_seq_pool_new(poolsize); if (newpool == NULL) return -ENOMEM; if (snd_seq_pool_init(newpool) < 0) {
snd_seq_pool_delete(&newpool); return -ENOMEM;
}
scoped_guard(spinlock_irq, &f->lock) { /* remember old pool */
oldpool = f->pool;
oldhead = f->head; /* exchange pools */
f->pool = newpool;
f->head = NULL;
f->tail = NULL;
f->cells = 0; /* NOTE: overflow flag is not cleared */
}
/* close the old pool and wait until all users are gone */
snd_seq_pool_mark_closing(oldpool);
snd_use_lock_sync(&f->use_lock);
/* release cells in old pool */ for (cell = oldhead; cell; cell = next) {
next = cell->next;
snd_seq_cell_free(cell);
}
snd_seq_pool_delete(&oldpool);
return 0;
}
/* get the number of unused cells safely */ int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
{ int cells;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.