struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl, struct cx18_queue *q, int to_front)
{ /* clear the mdl if it is not to be enqueued to the full queue */ if (q != &s->q_full) {
mdl->bytesused = 0;
mdl->readpos = 0;
mdl->m_flags = 0;
mdl->skipped = 0;
mdl->curr_buf = NULL;
}
/* q_busy is restricted to a max buffer count imposed by firmware */ if (q == &s->q_busy &&
atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
q = &s->q_free;
/* * We don't have to acquire multiple q locks here, because we are * serialized by the single threaded work handler. * MDLs from the firmware will thus remain in order as * they are moved from q_busy to q_full or to the dvb ring buffer.
*/
spin_lock(&s->q_busy.lock);
list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) { /* * We should find what the firmware told us is done, * right at the front of the queue. If we don't, we likely have * missed an mdl done message from the firmware. * Once we skip an mdl repeatedly, relative to the size of * q_busy, we have high confidence we've missed it.
*/ if (mdl->id != id) {
mdl->skipped++; if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) { /* mdl must have fallen out of rotation */
CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
s->name, mdl->id,
mdl->skipped); /* Sweep it up to put it back into rotation */
list_move_tail(&mdl->list, &sweep_up);
atomic_dec(&s->q_busy.depth);
} continue;
} /* * We pull the desired mdl off of the queue here. Something * will have to put it back on a queue later.
*/
list_del_init(&mdl->list);
atomic_dec(&s->q_busy.depth);
ret = mdl; break;
}
spin_unlock(&s->q_busy.lock);
/* * We found the mdl for which we were looking. Get it ready for * the caller to put on q_full or in the dvb ring buffer.
*/ if (ret != NULL) {
ret->bytesused = bytesused;
ret->skipped = 0; /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
cx18_mdl_update_bufs_for_cpu(s, ret); if (s->type != CX18_ENC_STREAM_TYPE_TS)
set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
}
/* Put any mdls the firmware is ignoring back into normal rotation */
list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
list_del_init(&mdl->list);
cx18_enqueue(s, mdl, &s->q_free);
} return ret;
}
/* Move all mdls of a queue, while flushing the mdl */ staticvoid cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q_src, struct cx18_queue *q_dst)
{ struct cx18_mdl *mdl;
/* It only makes sense to flush to q_free or q_idle */ if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy) return;
/* * Note, s->buf_pool is not protected by a lock, * the stream better not have *anything* going on when calling this
*/ void cx18_unload_queues(struct cx18_stream *s)
{ struct cx18_queue *q_idle = &s->q_idle; struct cx18_mdl *mdl; struct cx18_buffer *buf;
/* Move all MDLS to q_idle */
cx18_queue_flush(s, &s->q_busy, q_idle);
cx18_queue_flush(s, &s->q_full, q_idle);
cx18_queue_flush(s, &s->q_free, q_idle);
/* Reset MDL id's and move all buffers back to the stream's buf_pool */
spin_lock(&q_idle->lock);
list_for_each_entry(mdl, &q_idle->list, list) { while (!list_empty(&mdl->buf_list)) {
buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, list);
list_move_tail(&buf->list, &s->buf_pool);
buf->bytesused = 0;
buf->readpos = 0;
}
mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */ /* all other mdl fields were cleared by cx18_queue_flush() */
}
spin_unlock(&q_idle->lock);
}
/* * Note, s->buf_pool is not protected by a lock, * the stream better not have *anything* going on when calling this
*/ void cx18_load_queues(struct cx18_stream *s)
{ struct cx18 *cx = s->cx; struct cx18_mdl *mdl; struct cx18_buffer *buf; int mdl_id; int i;
u32 partial_buf_size;
/* * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free * Excess MDLs are left on q_idle * Excess buffers are left in buf_pool and/or on an MDL in q_idle
*/
mdl_id = s->mdl_base_idx; for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
mdl != NULL && i == s->bufs_per_mdl;
mdl = cx18_dequeue(s, &s->q_idle)) {
mdl->id = mdl_id;
for (i = 0; i < s->bufs_per_mdl; i++) { if (list_empty(&s->buf_pool)) break;
/* update the firmware's MDL array with this buffer */
cx18_writel(cx, buf->dma_handle,
&cx->scb->cpu_mdl[mdl_id + i].paddr);
cx18_writel(cx, s->buf_size,
&cx->scb->cpu_mdl[mdl_id + i].length);
}
if (i == s->bufs_per_mdl) { /* * The encoder doesn't honor s->mdl_size. So in the * case of a non-integral number of buffers to meet * mdl_size, we lie about the size of the last buffer * in the MDL to get the encoder to really only send * us mdl_size bytes per MDL transfer.
*/
partial_buf_size = s->mdl_size % s->buf_size; if (partial_buf_size) {
cx18_writel(cx, partial_buf_size,
&cx->scb->cpu_mdl[mdl_id + i - 1].length);
}
cx18_enqueue(s, mdl, &s->q_free);
} else { /* Not enough buffers for this MDL; we won't use it */
cx18_push(s, mdl, &s->q_idle);
}
mdl_id += i;
}
}
CX18_ERR("Too many buffers, cannot fit in SCB area\n");
CX18_ERR("Max buffers = %zu\n",
bufsz / sizeof(struct cx18_mdl_ent)); return -ENOMEM;
}
s->mdl_base_idx = cx->free_mdl_idx;
/* allocate stream buffers and MDLs */ for (i = 0; i < s->buffers; i++) { struct cx18_mdl *mdl; struct cx18_buffer *buf;
/* 1 MDL per buffer to handle the worst & also default case */
mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN); if (mdl == NULL) break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.