/** * snd_pcm_stream_lock - Lock the PCM stream * @substream: PCM substream * * This locks the PCM stream's spinlock or mutex depending on the nonatomic * flag of the given substream. This also takes the global link rw lock * (or rw sem), too, for avoiding the race with linked streams.
*/ void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
{
snd_pcm_group_lock(&substream->self_group, substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
/** * snd_pcm_stream_unlock - Unlock the PCM stream * @substream: PCM substream * * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
*/ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
{
snd_pcm_group_unlock(&substream->self_group, substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
/** * snd_pcm_stream_lock_irq - Lock the PCM stream * @substream: PCM substream * * This locks the PCM stream like snd_pcm_stream_lock() and disables the local * IRQ (only when nonatomic is false). In nonatomic case, this is identical * as snd_pcm_stream_lock().
*/ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
snd_pcm_group_lock_irq(&substream->self_group,
substream->pcm->nonatomic);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) returnfalse;
if (substream->ops->mmap || substream->ops->page) returntrue;
dmabuf = snd_pcm_get_dma_buf(substream); if (!dmabuf)
dmabuf = &substream->dma_buffer; switch (dmabuf->dev.type) { case SNDRV_DMA_TYPE_UNKNOWN: /* we can't know the device, so just assume that the driver does * everything right
*/ returntrue; case SNDRV_DMA_TYPE_CONTINUOUS: case SNDRV_DMA_TYPE_VMALLOC: returntrue; default: return dma_can_mmap(dmabuf->dev.dev);
}
}
for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
i = hw_param_interval(params, k); if (snd_interval_empty(i)) return -EINVAL;
/* This parameter is not requested to change by a caller. */ if (!(params->rmask & PARAM_MASK_BIT(k))) continue;
if (trace_hw_interval_param_enabled())
old_interval = *i;
changed = snd_interval_refine(i, constrs_interval(constrs, k)); if (changed < 0) return changed; if (changed == 0) continue;
/* Set corresponding flag so that the caller gets it. */
trace_hw_interval_param(substream, k, 0, &old_interval, i);
params->cmask |= PARAM_MASK_BIT(k);
}
/* * Each application of rule has own sequence number. * * Each member of 'rstamps' array represents the sequence number of * recent application of corresponding rule.
*/
rstamps = kcalloc(constrs->rules_num, sizeof(unsignedint), GFP_KERNEL); if (!rstamps) return -ENOMEM;
/* * Each member of 'vstamps' array represents the sequence number of * recent application of rule in which corresponding parameters were * changed. * * In initial state, elements corresponding to parameters requested by * a caller is 1. For unrequested parameters, corresponding members * have 0 so that the parameters are never changed anymore.
*/ for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
vstamps[k] = (params->rmask & PARAM_MASK_BIT(k)) ? 1 : 0;
/* Due to the above design, actual sequence number starts at 2. */
stamp = 2;
retry: /* Apply all rules in order. */
again = false; for (k = 0; k < constrs->rules_num; k++) {
r = &constrs->rules[k];
/* * Check condition bits of this rule. When the rule has * some condition bits, parameter without the bits is * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP * is an example of the condition bits.
*/ if (r->cond && !(r->cond & params->flags)) continue;
/* * The 'deps' array includes maximum four dependencies * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth * member of this array is a sentinel and should be * negative value. * * This rule should be processed in this time when dependent * parameters were changed at former applications of the other * rules.
*/ for (d = 0; r->deps[d] >= 0; d++) { if (vstamps[r->deps[d]] > rstamps[k]) break;
} if (r->deps[d] < 0) continue;
if (trace_hw_mask_param_enabled()) { if (hw_is_mask(r->var))
old_mask = *hw_param_mask(params, r->var);
} if (trace_hw_interval_param_enabled()) { if (hw_is_interval(r->var))
old_interval = *hw_param_interval(params, r->var);
}
changed = r->func(params, r); if (changed < 0) return changed;
/* * When the parameter is changed, notify it to the caller * by corresponding returned bit, then preparing for next * iteration.
*/ if (changed && r->var >= 0) { if (hw_is_mask(r->var)) {
trace_hw_mask_param(substream, r->var,
k + 1, &old_mask,
hw_param_mask(params, r->var));
} if (hw_is_interval(r->var)) {
trace_hw_interval_param(substream, r->var,
k + 1, &old_interval,
hw_param_interval(params, r->var));
}
params->cmask |= PARAM_MASK_BIT(r->var);
vstamps[r->var] = stamp;
again = true;
}
rstamps[k] = stamp++;
}
/* Iterate to evaluate all rules till no parameters are changed. */ if (again) goto retry;
if (!params->msbits) {
i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); if (snd_interval_single(i))
params->msbits = snd_interval_value(i);
m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); if (snd_mask_single(m)) {
snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
params->msbits = snd_pcm_format_width(format);
}
}
if (params->msbits) {
m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT); if (snd_mask_single(m)) {
snd_pcm_format_t format = (__force snd_pcm_format_t)snd_mask_min(m);
params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params);
err = snd_pcm_hw_refine(substream, params); if (err < 0) return err;
err = fixup_unreferenced_params(substream, params); if (err < 0) return err;
if (copy_to_user(_params, params, sizeof(*params))) return -EFAULT; return 0;
}
staticint period_to_usecs(struct snd_pcm_runtime *runtime)
{ int usecs;
if (! runtime->rate) return -1; /* invalid */
/* take 75% of period time as the deadline */
usecs = (750000 / runtime->rate) * runtime->period_size;
usecs += ((750000 % runtime->rate) * runtime->period_size) /
runtime->rate;
/** * snd_pcm_hw_params_choose - choose a configuration defined by @params * @pcm: PCM instance * @params: the hw_params instance * * Choose one configuration from configuration space defined by @params. * The configuration chosen is that obtained fixing in this order: * first access, first format, first subformat, min channels, * min rate, min period time, max buffer size, min tick time * * Return: Zero if successful, or a negative error code on failure.
*/ staticint snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params)
{ staticconstint vars[] = {
SNDRV_PCM_HW_PARAM_ACCESS,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_SUBFORMAT,
SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_RATE,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
SNDRV_PCM_HW_PARAM_TICK_TIME,
-1
}; constint *v; struct snd_mask old_mask __maybe_unused; struct snd_interval old_interval __maybe_unused; int changed;
for (v = vars; *v != -1; v++) { /* Keep old parameter to trace. */ if (trace_hw_mask_param_enabled()) { if (hw_is_mask(*v))
old_mask = *hw_param_mask(params, *v);
} if (trace_hw_interval_param_enabled()) { if (hw_is_interval(*v))
old_interval = *hw_param_interval(params, *v);
} if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
changed = snd_pcm_hw_param_first(pcm, params, *v, NULL); else
changed = snd_pcm_hw_param_last(pcm, params, *v, NULL); if (changed < 0) return changed; if (changed == 0) continue;
/* Trace the changed parameter. */ if (hw_is_mask(*v)) {
trace_hw_mask_param(pcm, *v, 0, &old_mask,
hw_param_mask(params, *v));
} if (hw_is_interval(*v)) {
trace_hw_interval_param(pcm, *v, 0, &old_interval,
hw_param_interval(params, *v));
}
}
return 0;
}
/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise * block the further r/w operations
*/ staticint snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime)
{ if (!atomic_dec_unless_positive(&runtime->buffer_accessing)) return -EBUSY;
mutex_lock(&runtime->buffer_mutex); return 0; /* keep buffer_mutex, unlocked by below */
}
/* release buffer_mutex and clear r/w access flag */ staticvoid snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime)
{
mutex_unlock(&runtime->buffer_mutex);
atomic_inc(&runtime->buffer_accessing);
}
/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime)
{
snd_pcm_buffer_access_lock(runtime); if (runtime->dma_area)
snd_pcm_format_set_silence(runtime->format, runtime->dma_area,
bytes_to_samples(runtime, runtime->dma_bytes));
snd_pcm_buffer_access_unlock(runtime);
}
EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence);
if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
usecs = period_to_usecs(runtime); if (usecs >= 0)
cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
usecs);
err = 0;
_error: if (err) { /* hardware might be unusable from this time, * so we force application to retry to set * the correct hardware parameter settings
*/
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); if (substream->ops->hw_free != NULL)
substream->ops->hw_free(substream); if (substream->managed_buffer_alloc)
snd_pcm_lib_free_pages(substream);
}
unlock:
snd_pcm_buffer_access_unlock(runtime); return err;
}
params = memdup_user(_params, sizeof(*params)); if (IS_ERR(params)) return PTR_ERR(params);
err = snd_pcm_hw_params(substream, params); if (err < 0) return err;
if (copy_to_user(_params, params, sizeof(*params))) return -EFAULT; return err;
}
staticint do_hw_free(struct snd_pcm_substream *substream)
{ int result = 0;
snd_pcm_sync_stop(substream, true); if (substream->ops->hw_free)
result = substream->ops->hw_free(substream); if (substream->managed_buffer_alloc)
snd_pcm_lib_free_pages(substream); return result;
}
staticint snd_pcm_hw_free(struct snd_pcm_substream *substream)
{ struct snd_pcm_runtime *runtime; int result = 0;
if (PCM_RUNTIME_CHECK(substream)) return -ENXIO;
runtime = substream->runtime;
result = snd_pcm_buffer_access_lock(runtime); if (result < 0) return result;
scoped_guard(pcm_stream_lock_irq, substream) { switch (runtime->state) { case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: if (atomic_read(&substream->mmap_count))
result = -EBADFD; break; default:
result = -EBADFD; break;
}
} if (result) goto unlock;
result = do_hw_free(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
unlock:
snd_pcm_buffer_access_unlock(runtime); return result;
}
goto _tstamp_end;
}
} else { /* get tstamp only in fallback mode and only if enabled */ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) { struct timespec64 tstamp;
memset(&status, 0, sizeof(status)); /* * with extension, parameters are read/write, * get audio_tstamp_data from user, * ignore rest of status structure
*/ if (ext && get_user(status.audio_tstamp_data,
(u32 __user *)(&_status->audio_tstamp_data))) return -EFAULT;
res = snd_pcm_status64(substream, &status); if (res < 0) return res; if (copy_to_user(_status, &status, sizeof(status))) return -EFAULT; return 0;
}
memset(&status64, 0, sizeof(status64));
memset(&status32, 0, sizeof(status32)); /* * with extension, parameters are read/write, * get audio_tstamp_data from user, * ignore rest of status structure
*/ if (ext && get_user(status64.audio_tstamp_data,
(u32 __user *)(&_status->audio_tstamp_data))) return -EFAULT;
res = snd_pcm_status64(substream, &status64); if (res < 0) return res;
/* * this functions is core for handling of linked stream * Note: the stream state might be changed also on failure * Note2: call with calling stream lock + link lock
*/ staticint snd_pcm_action_group(conststruct action_ops *ops, struct snd_pcm_substream *substream,
snd_pcm_state_t state, bool stream_lock)
{ struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0, depth = 1;
snd_pcm_group_for_each_entry(s, substream) { if (s != substream) { if (!stream_lock)
mutex_lock_nested(&s->runtime->buffer_mutex, depth); elseif (s->pcm->nonatomic)
mutex_lock_nested(&s->self_group.mutex, depth); else
spin_lock_nested(&s->self_group.lock, depth);
depth++;
}
res = ops->pre_action(s, state); if (res < 0) goto _unlock;
}
snd_pcm_group_for_each_entry(s, substream) {
res = ops->do_action(s, state); if (res < 0) { if (ops->undo_action) {
snd_pcm_group_for_each_entry(s1, substream) { if (s1 == s) /* failed stream */ break;
ops->undo_action(s1, state);
}
}
s = NULL; /* unlock all */ goto _unlock;
}
}
snd_pcm_group_for_each_entry(s, substream) {
ops->post_action(s, state);
}
_unlock: /* unlock streams */
snd_pcm_group_for_each_entry(s1, substream) { if (s1 != substream) { if (!stream_lock)
mutex_unlock(&s1->runtime->buffer_mutex); elseif (s1->pcm->nonatomic)
mutex_unlock(&s1->self_group.mutex); else
spin_unlock(&s1->self_group.lock);
} if (s1 == s) /* end */ break;
} return res;
}
/* * Note: call with stream lock
*/ staticint snd_pcm_action_single(conststruct action_ops *ops, struct snd_pcm_substream *substream,
snd_pcm_state_t state)
{ int res;
res = ops->pre_action(substream, state); if (res < 0) return res;
res = ops->do_action(substream, state); if (res == 0)
ops->post_action(substream, state); elseif (ops->undo_action)
ops->undo_action(substream, state); return res;
}
/* * Unref and unlock the group, but keep the stream lock; * when the group becomes empty and no longer referred, destroy itself
*/ staticvoid snd_pcm_group_unref(struct snd_pcm_group *group, struct snd_pcm_substream *substream)
{ bool do_free;
if (!group) return;
do_free = refcount_dec_and_test(&group->refs);
snd_pcm_group_unlock(group, substream->pcm->nonatomic); if (do_free)
kfree(group);
}
/* * Lock the group inside a stream lock and reference it; * return the locked group object, or NULL if not linked
*/ staticstruct snd_pcm_group *
snd_pcm_stream_group_ref(struct snd_pcm_substream *substream)
{ bool nonatomic = substream->pcm->nonatomic; struct snd_pcm_group *group; bool trylock;
for (;;) { if (!snd_pcm_stream_linked(substream)) return NULL;
group = substream->group; /* block freeing the group object */
refcount_inc(&group->refs);
trylock = nonatomic ? mutex_trylock(&group->mutex) :
spin_trylock(&group->lock); if (trylock) break; /* OK */
/* check the group again; the above opens a small race window */ if (substream->group == group) break; /* OK */ /* group changed, try again */
snd_pcm_group_unref(group, substream);
} return group;
}
/* Guarantee the group members won't change during non-atomic action */
guard(rwsem_read)(&snd_pcm_link_rwsem);
res = snd_pcm_buffer_access_lock(substream->runtime); if (res < 0) return res; if (snd_pcm_stream_linked(substream))
res = snd_pcm_action_group(ops, substream, state, false); else
res = snd_pcm_action_single(ops, substream, state);
snd_pcm_buffer_access_unlock(substream->runtime); return res;
}
/** * snd_pcm_start - start all linked streams * @substream: the PCM substream instance * * Return: Zero if successful, or a negative error code. * The stream lock must be acquired before calling this function.
*/ int snd_pcm_start(struct snd_pcm_substream *substream)
{ return snd_pcm_action(&snd_pcm_action_start, substream,
SNDRV_PCM_STATE_RUNNING);
}
/* take the stream lock and start the streams */ staticint snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
{ return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
SNDRV_PCM_STATE_RUNNING);
}
/** * snd_pcm_stop - try to stop all running streams in the substream group * @substream: the PCM substream instance * @state: PCM state after stopping the stream * * The state of each stream is then changed to the given state unconditionally. * * Return: Zero if successful, or a negative error code.
*/ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
{ return snd_pcm_action(&snd_pcm_action_stop, substream, state);
}
EXPORT_SYMBOL(snd_pcm_stop);
/** * snd_pcm_drain_done - stop the DMA only when the given stream is playback * @substream: the PCM substream * * After stopping, the state is changed to SETUP. * Unlike snd_pcm_stop(), this affects only the given stream. * * Return: Zero if successful, or a negative error code.
*/ int snd_pcm_drain_done(struct snd_pcm_substream *substream)
{ return snd_pcm_action_single(&snd_pcm_action_stop, substream,
SNDRV_PCM_STATE_SETUP);
}
/** * snd_pcm_stop_xrun - stop the running streams as XRUN * @substream: the PCM substream instance * * This stops the given running substream (and all linked substreams) as XRUN. * Unlike snd_pcm_stop(), this function takes the substream lock by itself. * * Return: Zero if successful, or a negative error code.
*/ int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
{
guard(pcm_stream_lock_irqsave)(substream); if (substream->runtime && snd_pcm_running(substream))
__snd_pcm_xrun(substream); return 0;
}
EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
/* * pause callbacks: pass boolean (to start pause or resume) as state argument
*/ #define pause_pushed(state) (__force bool)(state)
staticint snd_pcm_do_pause(struct snd_pcm_substream *substream,
snd_pcm_state_t state)
{ if (substream->runtime->trigger_master != substream) return 0; /* The jiffies check in snd_pcm_update_hw_ptr*() is done by * a delta between the current jiffies, this gives a large enough * delta, effectively to skip the check once.
*/
substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000; return substream->ops->trigger(substream,
pause_pushed(state) ?
SNDRV_PCM_TRIGGER_PAUSE_PUSH :
SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
}
/* * snd_pcm_suspend - trigger SUSPEND to all linked streams * @substream: the PCM substream * * After this call, all streams are changed to SUSPENDED state. * * Return: Zero if successful, or a negative error code.
*/ staticint snd_pcm_suspend(struct snd_pcm_substream *substream)
{
guard(pcm_stream_lock_irqsave)(substream); return snd_pcm_action(&snd_pcm_action_suspend, substream,
ACTION_ARG_IGNORE);
}
/** * snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm * @pcm: the PCM instance * * After this call, all streams are changed to SUSPENDED state. * * Return: Zero if successful (or @pcm is %NULL), or a negative error code.
*/ int snd_pcm_suspend_all(struct snd_pcm *pcm)
{ struct snd_pcm_substream *substream; int stream, err = 0;
if (! pcm) return 0;
for_each_pcm_substream(pcm, stream, substream) { /* FIXME: the open/close code should lock this as well */ if (!substream->runtime) continue;
/* * Skip BE dai link PCM's that are internal and may * not have their substream ops set.
*/ if (!substream->ops) continue;
/** * snd_pcm_prepare - prepare the PCM substream to be triggerable * @substream: the PCM substream instance * @file: file to refer f_flags * * Return: Zero if successful, or a negative error code.
*/ staticint snd_pcm_prepare(struct snd_pcm_substream *substream, struct file *file)
{ int f_flags;
if (file)
f_flags = file->f_flags; else
f_flags = substream->f_flags;
scoped_guard(pcm_stream_lock_irq, substream) { switch (substream->runtime->state) { case SNDRV_PCM_STATE_PAUSED:
snd_pcm_pause(substream, false);
fallthrough; case SNDRV_PCM_STATE_SUSPENDED:
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); break;
}
}
/* * Drain the stream(s). * When the substream is linked, sync until the draining of all playback streams * is finished. * After this call, all streams are supposed to be either SETUP or DRAINING * (capture only) state.
*/ staticint snd_pcm_drain(struct snd_pcm_substream *substream, struct file *file)
{ struct snd_card *card; struct snd_pcm_runtime *runtime; struct snd_pcm_substream *s; struct snd_pcm_group *group;
wait_queue_entry_t wait; int result = 0; int nonblock = 0;
/* pre-start/stop - all running streams are changed to DRAINING state */
result = snd_pcm_action(&snd_pcm_action_drain_init, substream,
ACTION_ARG_IGNORE); if (result < 0) goto unlock; /* in non-blocking, we don't wait in ioctl but let caller poll */ if (nonblock) {
result = -EAGAIN; goto unlock;
}
for (;;) { long tout; struct snd_pcm_runtime *to_check; if (signal_pending(current)) {
result = -ERESTARTSYS; break;
} /* find a substream to drain */
to_check = NULL;
group = snd_pcm_stream_group_ref(substream);
snd_pcm_group_for_each_entry(s, substream) { if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) continue;
runtime = s->runtime; if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
to_check = runtime; break;
}
}
snd_pcm_group_unref(group, substream); if (!to_check) break; /* all drained */
init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream); if (runtime->no_period_wakeup)
tout = MAX_SCHEDULE_TIMEOUT; else {
tout = 100; if (runtime->rate) { long t = runtime->buffer_size * 1100 / runtime->rate;
tout = max(t, tout);
}
tout = msecs_to_jiffies(tout);
}
tout = schedule_timeout(tout);
snd_pcm_stream_lock_irq(substream);
group = snd_pcm_stream_group_ref(substream);
snd_pcm_group_for_each_entry(s, substream) { if (s->runtime == to_check) {
remove_wait_queue(&to_check->sleep, &wait); break;
}
}
snd_pcm_group_unref(group, substream);
if (card->shutdown) {
result = -ENODEV; break;
} if (tout == 0) { if (substream->runtime->state == SNDRV_PCM_STATE_SUSPENDED)
result = -ESTRPIPE; else {
dev_dbg(substream->pcm->card->dev, "playback drain timeout (DMA or IRQ trouble?)\n");
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
result = -EIO;
} break;
}
}
unlock:
snd_pcm_stream_unlock_irq(substream);
return result;
}
/* * drop ioctl * * Immediately put all linked substreams into SETUP state.
*/ staticint snd_pcm_drop(struct snd_pcm_substream *substream)
{ struct snd_pcm_runtime *runtime; int result = 0;
if (PCM_RUNTIME_CHECK(substream)) return -ENXIO;
runtime = substream->runtime;
if (runtime->state == SNDRV_PCM_STATE_OPEN ||
runtime->state == SNDRV_PCM_STATE_DISCONNECTED) return -EBADFD;
/* detach the last stream, too */ if (list_is_singular(&group->substreams)) {
relink_to_local(list_first_entry(&group->substreams, struct snd_pcm_substream,
link_list));
do_free = refcount_dec_and_test(&group->refs);
}
snd_pcm_group_unlock_irq(group, nonatomic); if (do_free)
kfree(group); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.