// SPDX-License-Identifier: GPL-2.0-only /* * Audio and Music Data Transmission Protocol (IEC 61883-6) streams * with Common Isochronous Packet (IEC 61883-1) headers * * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
*/
// The initial firmware of OXFW970 can postpone transmission of packet during finishing // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer // overrun. Actual device can skip more, then this module stops the packet streaming. #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
/** * amdtp_stream_init - initialize an AMDTP stream structure * @s: the AMDTP stream to initialize * @unit: the target of the stream * @dir: the direction of stream * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants. * @fmt: the value of fmt field in CIP header * @process_ctx_payloads: callback handler to process payloads of isoc context * @protocol_size: the size to allocate newly for protocol
*/ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, enum amdtp_stream_direction dir, unsignedint flags, unsignedint fmt,
amdtp_stream_process_ctx_payloads_t process_ctx_payloads, unsignedint protocol_size)
{ if (process_ctx_payloads == NULL) return -EINVAL;
s->protocol = kzalloc(protocol_size, GFP_KERNEL); if (!s->protocol) return -ENOMEM;
/* bytes for a frame */
hw->period_bytes_min = 4 * hw->channels_max;
/* Just to prevent from allocating much pages. */
hw->period_bytes_max = hw->period_bytes_min * 2048;
hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
// Linux driver for 1394 OHCI controller voluntarily flushes isoc // context when total size of accumulated context header reaches // PAGE_SIZE. This kicks work for the isoc context and brings // callback in the middle of scheduled interrupts. // Although AMDTP streams in the same domain use the same events per // IRQ, use the largest size of context header between IT/IR contexts. // Here, use the value of context header in IR context is for both // contexts. if (!(s->flags & CIP_NO_HEADER))
ctx_header_size = IR_CTX_HEADER_SIZE_CIP; else
ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
CYCLES_PER_SECOND / ctx_header_size;
// In IEC 61883-6, one isoc packet can transfer events up to the value // of syt interval. This comes from the interval of isoc cycle. As 1394 // OHCI controller can generate hardware IRQ per isoc packet, the // interval is 125 usec. // However, there are two ways of transmission in IEC 61883-6; blocking // and non-blocking modes. In blocking mode, the sequence of isoc packet // includes 'empty' or 'NODATA' packets which include no event. In // non-blocking mode, the number of events per packet is variable up to // the syt interval. // Due to the above protocol design, the minimum PCM frames per // interrupt should be double of the value of syt interval, thus it is // 250 usec.
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_TIME,
250, maximum_usec_per_period); if (err < 0) goto end;
/* Non-Blocking stream has no more constraints */ if (!(s->flags & CIP_BLOCKING)) goto end;
/* * One AMDTP packet can include some frames. In blocking mode, the * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, * depending on its sampling rate. For accurate period interrupt, it's * preferrable to align period/buffer sizes to current SYT_INTERVAL.
*/
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
apply_constraint_to_size, NULL,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto end;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
apply_constraint_to_size, NULL,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto end;
end: return err;
}
EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
/** * amdtp_stream_set_parameters - set stream parameters * @s: the AMDTP stream to configure * @rate: the sample rate * @data_block_quadlets: the size of a data block in quadlet unit * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP * events. * * The parameters must be set before the stream is started, and must not be * changed while the stream is running.
*/ int amdtp_stream_set_parameters(struct amdtp_stream *s, unsignedint rate, unsignedint data_block_quadlets, unsignedint pcm_frame_multiplier)
{ unsignedint sfc;
for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { if (amdtp_rate_table[sfc] == rate) break;
} if (sfc == ARRAY_SIZE(amdtp_rate_table)) return -EINVAL;
// The CIP header is processed in context header apart from context payload. staticint amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
{ unsignedint multiplier;
/** * amdtp_stream_get_max_payload - get the stream's packet size * @s: the AMDTP stream * * This function must not be called before the stream has been configured * with amdtp_stream_set_parameters().
*/ unsignedint amdtp_stream_get_max_payload(struct amdtp_stream *s)
{ unsignedint cip_header_size;
/** * amdtp_stream_pcm_prepare - prepare PCM device for running * @s: the AMDTP stream * * This function should be called from the PCM device's .prepare callback.
*/ void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
{
cancel_work_sync(&s->period_work);
s->pcm_buffer_pointer = 0;
s->pcm_period_pointer = 0;
}
EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
for (i = 0; i < count; ++i) { struct seq_desc *desc = descs + pos;
if (!cip_sfc_is_base_44100(sfc)) { // Sample_rate / 8000 is an integer, and precomputed.
desc->data_blocks = state;
} else { unsignedint phase = state;
/* * This calculates the number of data blocks per packet so that * 1) the overall rate is correct and exactly synchronized to * the bus clock, and * 2) packets with a rounded-up number of blocks occur as early * as possible in the sequence (to prevent underruns of the * device's buffer).
*/ if (sfc == CIP_SFC_44100) /* 6 6 5 6 5 6 5 ... */
desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40)); else /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
desc->data_blocks = 11 * (sfc >> 1) + (phase == 0); if (++phase >= (80 >> (sfc >> 1)))
phase = 0;
state = phase;
}
if (*last_syt_offset < TICKS_PER_CYCLE) { if (!cip_sfc_is_base_44100(sfc))
syt_offset = *last_syt_offset + *syt_offset_state; else { /* * The time, in ticks, of the n'th SYT_INTERVAL sample is: * n * SYT_INTERVAL * 24576000 / sample_rate * Modulo TICKS_PER_CYCLE, the difference between successive * elements is about 1386.23. Rounding the results of this * formula to the SYT precision results in a sequence of * differences that begins with: * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... * This code generates _exactly_ the same sequence.
*/ unsignedint phase = *syt_offset_state; unsignedint index = phase % 13;
// Subtract transfer delay so that the synchronization offset is not so large // at transmission.
syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff); if (syt_offset < transfer_delay)
syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
return syt_offset - transfer_delay;
}
// Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus. // Additionally, the sequence of tx packets is severely checked against any discontinuity // before filling entries in the queue. The calculation is safe even if it looks fragile by // overrun. staticunsignedint calculate_cached_cycle_count(struct amdtp_stream *s, unsignedint head)
{ constunsignedint cache_size = s->ctx_data.tx.cache.size; unsignedint cycles = s->ctx_data.tx.cache.pos;
if (cycles < head)
cycles += cache_size;
cycles -= head;
// The program in user process should periodically check the status of intermediate // buffer associated to PCM substream to process PCM frames in the buffer, instead // of receiving notification of period elapsed by poll wait. // // Use another work item for period elapsed event to prevent the following AB/BA // deadlock: // // thread 1 thread 2 // ================================= ================================= // A.work item (process) pcm ioctl (process) // v v // process_rx_packets() B.PCM stream lock // process_tx_packets() v // v callbacks in snd_pcm_ops // update_pcm_pointers() v // snd_pcm_elapsed() fw_iso_context_flush_completions() // snd_pcm_stream_lock_irqsave() disable_work_sync() // v v // wait until release of B wait until A exits if (!pcm->runtime->no_period_wakeup)
queue_work(system_highpri_wq, &s->period_work);
}
}
// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. staticinline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp)
{ return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff);
}
// Align to actual cycle count for the packet which is going to be scheduled. // This module queued the same number of isochronous cycle as the size of queue // to kip isochronous cycle, therefore it's OK to just increment the cycle by // the size of queue for scheduled cycle. staticinline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp, unsignedint queue_size)
{
u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp); return increment_ohci_cycle_count(cycle, queue_size);
}
if (trace_amdtp_packet_enabled())
(void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
*desc_count = 0; for (i = 0; i < packet_count; ++i) { unsignedint cycle; bool lost; unsignedint data_blocks; unsignedint syt;
cycle = compute_ohci_cycle_count(ctx_header[1]);
lost = (next_cycle != cycle); if (lost) { if (s->flags & CIP_NO_HEADER) { // Fireface skips transmission just for an isoc cycle corresponding // to empty packet. unsignedint prev_cycle = next_cycle;
next_cycle = increment_ohci_cycle_count(next_cycle, 1);
lost = (next_cycle != cycle); if (!lost) { // Prepare a description for the skipped cycle for // sequence replay.
desc->cycle = prev_cycle;
desc->syt = 0;
desc->data_blocks = 0;
desc->data_block_counter = dbc;
desc->ctx_payload = NULL;
desc = amdtp_stream_next_packet_desc(s, desc);
++(*desc_count);
}
} elseif (s->flags & CIP_JUMBO_PAYLOAD) { // OXFW970 skips transmission for several isoc cycles during // asynchronous transaction. The sequence replay is impossible due // to the reason. unsignedint safe_cycle = increment_ohci_cycle_count(next_cycle,
IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
} if (lost) {
dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
next_cycle, cycle); return -EIO;
}
}
err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
packet_index, i, curr_cycle_time); if (err < 0) return err;
// Detect work items for any isochronous context. The work item for pcm_period_work() // should be avoided since the call of snd_pcm_period_elapsed() can reach via // snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at // snd_pcm_stop_xrun(). if (work && work != &s->period_work)
amdtp_stream_pcm_abort(s);
WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
}
// Forward to the latest record. for (i = 0; i < count - 1; ++i)
desc = amdtp_stream_next_packet_desc(s, desc);
latest_cycle = desc->cycle;
err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time); if (err < 0) goto end;
// Compute cycle count with lower 3 bits of second field and cycle field like timestamp // format of 1394 OHCI isochronous context.
curr_cycle = compute_ohci_iso_ctx_cycle_count((cycle_time >> 12) & 0x0000ffff);
if (s->direction == AMDTP_IN_STREAM) { // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since // it corresponds to arrived isochronous packet. if (compare_ohci_cycle_count(latest_cycle, curr_cycle) > 0) goto end;
cycle_gap = decrement_ohci_cycle_count(curr_cycle, latest_cycle);
// NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated // value expectedly corresponds to a few packets (0-2) since the packet arrived at // the most recent isochronous cycle has been already processed. for (i = 0; i < cycle_gap; ++i) {
desc = amdtp_stream_next_packet_desc(s, desc);
data_block_count += desc->data_blocks;
}
} else { // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle // since it was already scheduled. if (compare_ohci_cycle_count(latest_cycle, curr_cycle) < 0) goto end;
cycle_gap = decrement_ohci_cycle_count(latest_cycle, curr_cycle);
// NOTE: use history of scheduled packets. for (i = 0; i < cycle_gap; ++i) {
data_block_count += desc->data_blocks;
desc = prev_packet_desc(s, desc);
}
}
end: return data_block_count * s->pcm_frame_multiplier;
}
if (s == d->irq_target) { // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by // the tasks of user process operating ALSA PCM character device by calling ioctl(2) // with some requests, instead of scheduled hardware IRQ of an IT context. struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
} else {
need_hw_irq = false;
}
if (trace_amdtp_packet_enabled())
(void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
for (i = 0; i < packets; ++i) {
DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS); bool sched_irq = false;
build_it_pkt_header(s, desc->cycle, template, pkt_header_length,
desc->data_blocks, desc->data_block_counter,
desc->syt, i, curr_cycle_time);
if (s == s->domain->irq_target) {
event_count += desc->data_blocks; if (event_count >= events_per_period) {
event_count -= events_per_period;
sched_irq = need_hw_irq;
}
}
if (queue_out_packet(s, template, sched_irq) < 0) {
cancel_stream(s); return;
}
// Attempt to detect any event in the batch of packets.
events = 0;
ctx_header = header; for (i = 0; i < count; ++i) { unsignedint payload_quads =
(be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32); unsignedint data_blocks;
// NODATA packet can includes any data blocks but they are // not available as event. if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
data_blocks = 0; else
data_blocks = payload_quads / s->data_block_quadlets;
}
}
}
if (events > 0)
s->ctx_data.tx.event_starts = true;
// Decide the cycle count to begin processing content of packet in IR contexts.
{ unsignedint stream_count = 0; unsignedint event_starts_count = 0; unsignedint cycle = UINT_MAX;
list_for_each_entry(s, &d->streams, list) { if (s->direction == AMDTP_IN_STREAM) {
++stream_count; if (s->ctx_data.tx.event_starts)
++event_starts_count;
}
}
if (stream_count == event_starts_count) { unsignedint next_cycle;
list_for_each_entry(s, &d->streams, list) { if (s->direction != AMDTP_IN_STREAM) continue;
// Decide the cycle count to begin processing content of packet in IT contexts. All of IT // contexts are expected to start and get callback when reaching here. if (ready_to_start) { unsignedint cycle = s->next_cycle;
list_for_each_entry(s, &d->streams, list) { if (s->direction != AMDTP_OUT_STREAM) continue;
if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
cycle = s->next_cycle;
if (s == d->irq_target)
s->context->callback.sc = irq_target_callback_intermediately; else
s->context->callback.sc = process_rx_packets_intermediately;
}
d->processing_cycle.rx_start = cycle;
}
}
// This is executed one time. For in-stream, first packet has come. For out-stream, prepared to // transmit first packet. staticvoid amdtp_stream_first_callback(struct fw_iso_context *context,
u32 tstamp, size_t header_length, void *header, void *private_data)
{ struct amdtp_stream *s = private_data; struct amdtp_domain *d = s->domain;
if (s->direction == AMDTP_IN_STREAM) {
context->callback.sc = drop_tx_packets_initially;
} else { if (s == d->irq_target)
context->callback.sc = irq_target_callback_skip; else
context->callback.sc = skip_rx_packets;
}
/** * amdtp_stream_start - start transferring packets * @s: the AMDTP stream to start * @channel: the isochronous channel on the bus * @speed: firewire speed code * @queue_size: The number of packets in the queue. * @idle_irq_interval: the interval to queue packet during initial state. * * The stream cannot be started until it has been configured with * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI * device can be started.
*/ staticint amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, unsignedint queue_size, unsignedint idle_irq_interval)
{ bool is_irq_target = (s == s->domain->irq_target); unsignedint ctx_header_size; unsignedint max_ctx_payload_size; enum dma_data_direction dir; struct pkt_desc *descs; int i, type, tag, err;
// NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It // could take a round over queue of AMDTP packet descriptors and small loss of history. For // safe, keep more 8 elements for the queue, equivalent to 1 ms.
descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL); if (!descs) {
err = -ENOMEM; goto err_context;
}
s->packet_descs = descs;
INIT_LIST_HEAD(&s->packet_descs_list); for (i = 0; i < s->queue_size; ++i) {
INIT_LIST_HEAD(&descs->link);
list_add_tail(&descs->link, &s->packet_descs_list);
++descs;
}
s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
s->packet_index = 0; do { struct fw_iso_packet params;
err = queue_out_packet(s, ¶ms, sched_irq);
} if (err < 0) goto err_pkt_descs;
} while (s->packet_index > 0);
/* NOTE: TAG1 matches CIP. This just affects in stream. */
tag = FW_ISO_CONTEXT_MATCH_TAG1; if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
tag |= FW_ISO_CONTEXT_MATCH_TAG0;
/** * amdtp_domain_stream_pcm_pointer - get the PCM buffer position * @d: the AMDTP domain. * @s: the AMDTP stream that transports the PCM data * * Returns the current buffer position, in frames.
*/ unsignedlong amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, struct amdtp_stream *s)
{ struct amdtp_stream *irq_target = d->irq_target;
if (irq_target && amdtp_stream_running(irq_target)) { // The work item to call snd_pcm_period_elapsed() can reach here by the call of // snd_pcm_ops.pointer(), however less packets would be available then. Therefore // the following call is just for user process contexts. if (current_work() != &s->period_work)
fw_iso_context_flush_completions(irq_target->context);
}
/** * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames * @d: the AMDTP domain. * @s: the AMDTP stream that transfers the PCM frames * * Returns zero always.
*/ int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
{ struct amdtp_stream *irq_target = d->irq_target;
// Process isochronous packets for recent isochronous cycle to handle // queued PCM frames. if (irq_target && amdtp_stream_running(irq_target))
fw_iso_context_flush_completions(irq_target->context);
/** * amdtp_stream_update - update the stream after a bus reset * @s: the AMDTP stream
*/ void amdtp_stream_update(struct amdtp_stream *s)
{ /* Precomputing. */
WRITE_ONCE(s->source_node_id_field,
(fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
}
EXPORT_SYMBOL(amdtp_stream_update);
/** * amdtp_stream_stop - stop sending packets * @s: the AMDTP stream to stop * * All PCM and MIDI devices of the stream must be stopped before the stream * itself can be stopped.
*/ staticvoid amdtp_stream_stop(struct amdtp_stream *s)
{
mutex_lock(&s->mutex);
if (!amdtp_stream_running(s)) {
mutex_unlock(&s->mutex); return;
}
if (s->direction == AMDTP_OUT_STREAM) {
kfree(s->ctx_data.rx.seq.descs);
} else { if (s->domain->replay.enable)
kfree(s->ctx_data.tx.cache.descs);
}
mutex_unlock(&s->mutex);
}
/** * amdtp_stream_pcm_abort - abort the running PCM device * @s: the AMDTP stream about to be stopped * * If the isochronous stream needs to be stopped asynchronously, call this * function first to stop the PCM device.
*/ void amdtp_stream_pcm_abort(struct amdtp_stream *s)
{ struct snd_pcm_substream *pcm;
pcm = READ_ONCE(s->pcm); if (pcm)
snd_pcm_stop_xrun(pcm);
}
EXPORT_SYMBOL(amdtp_stream_pcm_abort);
/** * amdtp_domain_init - initialize an AMDTP domain structure * @d: the AMDTP domain to initialize.
*/ int amdtp_domain_init(struct amdtp_domain *d)
{
INIT_LIST_HEAD(&d->streams);
d->events_per_period = 0;
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
/** * amdtp_domain_destroy - destroy an AMDTP domain structure * @d: the AMDTP domain to destroy.
*/ void amdtp_domain_destroy(struct amdtp_domain *d)
{ // At present nothing to do. return;
}
EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
/** * amdtp_domain_add_stream - register isoc context into the domain. * @d: the AMDTP domain. * @s: the AMDTP stream. * @channel: the isochronous channel on the bus. * @speed: firewire speed code.
*/ int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, int channel, int speed)
{ struct amdtp_stream *tmp;
list_for_each_entry(tmp, &d->streams, list) { if (s == tmp) return -EBUSY;
}
// Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams // is less than the number of rx streams, the first tx stream is selected. staticint make_association(struct amdtp_domain *d)
{ unsignedint dst_index = 0; struct amdtp_stream *rx;
// Make association to replay target.
list_for_each_entry(rx, &d->streams, list) { if (rx->direction == AMDTP_OUT_STREAM) { unsignedint src_index = 0; struct amdtp_stream *tx = NULL; struct amdtp_stream *s;
list_for_each_entry(s, &d->streams, list) { if (s->direction == AMDTP_IN_STREAM) { if (dst_index == src_index) {
tx = s; break;
}
++src_index;
}
} if (!tx) { // Select the first entry.
list_for_each_entry(s, &d->streams, list) { if (s->direction == AMDTP_IN_STREAM) {
tx = s; break;
}
} // No target is available to replay sequence. if (!tx) return -EINVAL;
}
rx->ctx_data.rx.replay_target = tx;
++dst_index;
}
}
return 0;
}
/** * amdtp_domain_start - start sending packets for isoc context in the domain. * @d: the AMDTP domain. * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR * contexts. * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in * IT context. * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay * according to arrival of events in tx packets.
*/ int amdtp_domain_start(struct amdtp_domain *d, unsignedint tx_init_skip_cycles, bool replay_seq, bool replay_on_the_fly)
{ unsignedint events_per_buffer = d->events_per_buffer; unsignedint events_per_period = d->events_per_period; unsignedint queue_size; struct amdtp_stream *s; bool found = false; int err;
if (replay_seq) {
err = make_association(d); if (err < 0) return err;
}
d->replay.enable = replay_seq;
d->replay.on_the_fly = replay_on_the_fly;
// Select an IT context as IRQ target.
list_for_each_entry(s, &d->streams, list) { if (s->direction == AMDTP_OUT_STREAM) {
found = true; break;
}
} if (!found) return -ENXIO;
d->irq_target = s;
// This is a case that AMDTP streams in domain run just for MIDI // substream. Use the number of events equivalent to 10 msec as // interval of hardware IRQ. if (events_per_period == 0)
events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100; if (events_per_buffer == 0)
events_per_buffer = events_per_period * 3;
/** * amdtp_domain_stop - stop sending packets for isoc context in the same domain. * @d: the AMDTP domain to which the isoc contexts belong.
*/ void amdtp_domain_stop(struct amdtp_domain *d)
{ struct amdtp_stream *s, *next;
if (d->irq_target)
amdtp_stream_stop(d->irq_target);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.