/* * Ring initialization rules: * 1. Each segment is initialized to zero, except for link TRBs. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or * Consumer Cycle State (CCS), depending on ring function. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. * * Ring behavior rules: * 1. A ring is empty if enqueue == dequeue. This means there will always be at * least one free TRB in the ring. This is useful if you want to turn that * into a link TRB and expand the ring. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a * link TRB, then load the pointer with the address in the link TRB. If the * link TRB had its toggle bit set, you may need to update the ring cycle * state (see cycle bit rules). You may have to do this multiple times * until you reach a non-link TRB. * 3. A ring is full if enqueue++ (for the definition of increment above) * equals the dequeue pointer. * * Cycle bit rules: * 1. When a consumer increments a dequeue pointer and encounters a toggle bit * in a link TRB, it must toggle the ring cycle state. * 2. When a producer increments an enqueue pointer and encounters a toggle bit * in a link TRB, it must toggle the ring cycle state. * * Producer rules: * 1. Check if ring is full before you enqueue. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. * Update enqueue pointer between each write (which may update the ring * cycle state). * 3. Notify consumer. If SW is producer, it rings the doorbell for command * and endpoint rings. If controller is the producer for the event ring, * and it generates an interrupt according to interrupt modulation rules. * * Consumer rules: * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, * the TRB is owned by the consumer. * 2. Update dequeue pointer (which may update the ring cycle state) and * continue processing TRBs until you reach a TRB which is not owned by you. * 3. Notify the producer. SW is the consumer for the event ring, and it * updates event ring dequeue pointer. Controller is the consumer for the * command and endpoint rings; it generates events on the event ring * for these.
*/
/* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA * address of the TRB.
*/
dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, union cdnsp_trb *trb)
{ unsignedlong segment_offset = trb - seg->trbs;
if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT) return 0;
staticvoid cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
{ if (cdnsp_trb_is_link(trb)) { /* Unchain chained link TRBs. */
trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0; /* Preserve only the cycle bit of this TRB. */
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
}
}
/* * Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers.
*/ staticvoid cdnsp_next_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring, struct cdnsp_segment **seg, union cdnsp_trb **trb)
{ if (cdnsp_trb_is_link(*trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
(*trb)++;
}
}
/* * See Cycle bit rules. SW is the consumer for the event ring only. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/ void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
{ /* event ring doesn't have link trbs, check for last trb. */ if (ring->type == TYPE_EVENT) { if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++; goto out;
}
if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
/* All other rings have link trbs. */ if (!cdnsp_trb_is_link(ring->dequeue)) {
ring->dequeue++;
ring->num_trbs_free++;
} while (cdnsp_trb_is_link(ring->dequeue)) {
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
}
out:
trace_cdnsp_inc_deq(ring);
}
/* * See Cycle bit rules. SW is the consumer for the event ring only. * Don't make a ring full of link TRBs. That would be dumb and this would loop. * * If we've just enqueued a TRB that is in the middle of a TD (meaning the * chain bit is set), then set the chain bit in all the following link TRBs. * If we've enqueued the last TRB in a TD, make sure the following link TRBs * have their chain bit cleared (so that each Link TRB is a separate TD). * * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell.
*/ staticvoid cdnsp_inc_enq(struct cdnsp_device *pdev, struct cdnsp_ring *ring, bool more_trbs_coming)
{ union cdnsp_trb *next;
u32 chain;
/* If this is not event ring, there is one less usable TRB. */ if (!cdnsp_trb_is_link(ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
/* Update the dequeue pointer further if that was a link TRB */ while (cdnsp_trb_is_link(next)) { /* * If the caller doesn't plan on enqueuing more TDs before * ringing the doorbell, then we don't want to give the link TRB * to the hardware just yet. We'll give the link TRB back in * cdnsp_prepare_ring() just before we enqueue the TD at the * top of the ring.
*/ if (!chain && !more_trbs_coming) break;
/* Give this link TRB to the hardware */
wmb();
next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */ if (cdnsp_link_trb_toggles_cycle(next))
ring->cycle_state ^= 1;
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
}
trace_cdnsp_inc_enq(ring);
}
/* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment.
*/ staticbool cdnsp_room_on_ring(struct cdnsp_device *pdev, struct cdnsp_ring *ring, unsignedint num_trbs)
{ int num_trbs_in_deq_seg;
if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) returnfalse;
}
returntrue;
}
/* * Workaround for L1: controller has issue with resuming from L1 after * setting doorbell for endpoint during L1 state. This function forces * resume signal in such case.
*/ staticvoid cdnsp_force_l0_go(struct cdnsp_device *pdev)
{ if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
}
/* Ring the doorbell after placing a command on the ring. */ void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
{
writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
}
/* * Ring the doorbell after placing a transfer on the ring. * Returns true if doorbell was set, otherwise false.
*/ staticbool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsignedint stream_id)
{
__le32 __iomem *reg_addr = &pdev->dba->ep_db; unsignedint ep_state = pep->ep_state; unsignedint db_value;
/* * Don't ring the doorbell for this endpoint if endpoint is halted or * disabled.
*/ if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED)) returnfalse;
/* For stream capable endpoints driver can ring doorbell only twice. */ if (pep->ep_state & EP_HAS_STREAMS) { if (pep->stream_info.drbls_count >= 2) returnfalse;
if (pdev->rtl_revision < RTL_REVISION_NEW_LPM)
cdnsp_force_l0_go(pdev);
/* Doorbell was set. */ returntrue;
}
/* * Get the right ring for the given pep and stream_id. * If the endpoint supports streams, boundary check the USB request's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring.
*/ staticstruct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsignedint stream_id)
{ if (!(pep->ep_state & EP_HAS_STREAMS)) return pep->ring;
if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
pep->name, stream_id); return NULL;
}
/* Ring the doorbell for any rings with pending requests. */ void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{ struct cdnsp_stream_info *stream_info; unsignedint stream_id; int ret;
if (pep->ep_state & EP_DIS_IN_RROGRESS) return;
/* A ring has pending Request if its TD list is not empty. */ if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) { if (pep->ring && !list_empty(&pep->ring->td_list))
cdnsp_ring_ep_doorbell(pdev, pep, 0); return;
}
ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); if (!ep_ring) continue;
if (!ep_ring->stream_active || ep_ring->stream_rejected) continue;
list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
td_list) { if (td->drbl) continue;
ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id); if (ret)
td->drbl = 1;
}
}
}
/* * Get the hw dequeue pointer controller stopped on, either directly from the * endpoint context, or if streams are in use from the stream context. * The returned hw_dequeue contains the lowest four bits with cycle state * and possible stream context type.
*/ static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev, unsignedint ep_index, unsignedint stream_id)
{ struct cdnsp_stream_ctx *st_ctx; struct cdnsp_ep *pep;
/* * Move the controller endpoint ring dequeue pointer past cur_td. * Record the new state of the controller endpoint ring dequeue segment, * dequeue pointer, and new consumer cycle state in state. * Update internal representation of the ring's dequeue pointer. * * We do this in three jumps: * - First we update our new ring state to be the same as when the * controller stopped. * - Then we traverse the ring to find the segment that contains * the last TRB in the TD. We toggle the controller new cycle state * when we pass any link TRBs with the toggle cycle bit set. * - Finally we move the dequeue state one TRB further, toggling the cycle bit * if we've moved it past a link TRB with the toggle cycle bit set.
*/ staticvoid cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsignedint stream_id, struct cdnsp_td *cur_td, struct cdnsp_dequeue_state *state)
{ bool td_last_trb_found = false; struct cdnsp_segment *new_seg; struct cdnsp_ring *ep_ring; union cdnsp_trb *new_deq; bool cycle_found = false;
u64 hw_dequeue;
ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id); if (!ep_ring) return;
/* * Dig out the cycle state saved by the controller during the * stop endpoint command.
*/
hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
state->new_cycle_state = hw_dequeue & 0x1;
state->stream_id = stream_id;
/* * We want to find the pointer, segment and cycle state of the new trb * (the one after current TD's last_trb). We know the cycle state at * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are * found.
*/ do { if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
== (dma_addr_t)(hw_dequeue & ~0xf)) {
cycle_found = true;
if (td_last_trb_found) break;
}
if (new_deq == cur_td->last_trb)
td_last_trb_found = true;
if (cycle_found && cdnsp_trb_is_link(new_deq) &&
cdnsp_link_trb_toggles_cycle(new_deq))
state->new_cycle_state ^= 0x1;
/* * flip_cycle means flip the cycle bit of all but the first and last TRB. * (The last TRB actually points to the ring enqueue pointer, which is not part * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/ staticvoid cdnsp_td_to_noop(struct cdnsp_device *pdev, struct cdnsp_ring *ep_ring, struct cdnsp_td *td, bool flip_cycle)
{ struct cdnsp_segment *seg = td->start_seg; union cdnsp_trb *trb = td->first_trb;
while (1) {
cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
/* flip cycle if asked to */ if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
if (trb == td->last_trb) break;
cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
}
}
/* * This TD is defined by the TRBs starting at start_trb in start_seg and ending * at end_trb, which may be in another segment. If the suspect DMA address is a * TRB in this TD, this function returns that TRB's segment. Otherwise it * returns 0.
*/ staticstruct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev, struct cdnsp_segment *start_seg, union cdnsp_trb *start_trb, union cdnsp_trb *end_trb,
dma_addr_t suspect_dma)
{ struct cdnsp_segment *cur_seg; union cdnsp_trb *temp_trb;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma;
dma_addr_t start_dma;
temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1]; /* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb); /* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
if (end_trb_dma > 0) { /* * The end TRB is in this segment, so suspect should * be here
*/ if (start_dma <= end_trb_dma) { if (suspect_dma >= start_dma &&
suspect_dma <= end_trb_dma) { return cur_seg;
}
} else { /* * Case for one segment with a * TD wrapped around to the top
*/ if ((suspect_dma >= start_dma &&
suspect_dma <= end_seg_dma) ||
(suspect_dma >= cur_seg->dma &&
suspect_dma <= end_trb_dma)) { return cur_seg;
}
}
return NULL;
}
/* Might still be somewhere in this segment */ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) return cur_seg;
/* For in transfers we need to copy the data from bounce to sg */
len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
seg->bounce_buf, seg->bounce_len,
seg->bounce_offs); if (len != seg->bounce_len)
dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
len, seg->bounce_len);
/* * Update the ring's dequeue segment and dequeue pointer * to reflect the new position.
*/
ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
if (cdnsp_trb_is_link(ep_ring->dequeue)) {
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
while (ep_ring->dequeue != deq_state->new_deq_ptr) {
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
if (cdnsp_trb_is_link(ep_ring->dequeue)) { if (ep_ring->dequeue == deq_state->new_deq_ptr) break;
/* * Probably there was TIMEOUT during handling Set Dequeue Pointer * command. It's critical error and controller will be stopped.
*/ if (ret) return -ESHUTDOWN;
/* Restart any rings with pending requests */
cdnsp_ring_doorbell_for_active_rings(pdev, pep);
return 0;
}
int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq, struct cdnsp_ep *pep)
{ struct cdnsp_dequeue_state deq_state; struct cdnsp_td *cur_td = NULL; struct cdnsp_ring *ep_ring; struct cdnsp_segment *seg; int status = -ECONNRESET; int ret = 0;
u64 hw_deq;
/* * If we stopped on the TD we need to cancel, then we have to * move the controller endpoint ring dequeue pointer past * this TD.
*/
hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
hw_deq &= ~0xf;
seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
cur_td->last_trb, hw_deq);
/* * The event handler won't see a completion for this TD anymore, * so remove it from the endpoint ring's TD list.
*/
list_del_init(&cur_td->td_list);
ep_ring->num_tds--;
pep->stream_info.td_count--;
/* * During disconnecting all endpoint will be disabled so we don't * have to worry about updating dequeue pointer.
*/ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING ||
pep->ep_state & EP_DIS_IN_RROGRESS) {
status = -ESHUTDOWN;
ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
}
/* Port status change events always have a successful completion code */ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
dev_err(pdev->dev, "ERR: incorrect PSC event\n");
/* Port Link State change detected. */ if ((portsc & PORT_PLC)) { if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
link_state == XDEV_RESUME) {
cmd_regs = readl(&pdev->op_regs->command); if (!(cmd_regs & CMD_R_S)) goto cleanup;
if (DEV_SUPERSPEED_ANY(portsc)) {
cdnsp_set_link_state(pdev, &port_regs->portsc,
XDEV_U0);
/* if a bounce buffer was used to align this td then unmap it */
cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
/* * If the controller said we transferred more data than the buffer * length, Play it safe and say we didn't transfer anything.
*/ if (preq->request.actual > preq->request.length) {
preq->request.actual = 0;
*status = 0;
}
if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_SHORT_PACKET) { /* * The Endpoint Stop Command completion will take care of any * stopped TDs. A stopped TD may be restarted, so don't update * the ring dequeue pointer or take this TD off any lists yet.
*/ return;
}
/* Update ring dequeue pointer */ while (ep_ring->dequeue != td->last_trb)
cdnsp_inc_deq(pdev, ep_ring);
cdnsp_inc_deq(pdev, ep_ring);
cdnsp_td_cleanup(pdev, td, ep_ring, status);
}
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ staticint cdnsp_sum_trb_lengths(struct cdnsp_device *pdev, struct cdnsp_ring *ring, union cdnsp_trb *stop_trb)
{ struct cdnsp_segment *seg = ring->deq_seg; union cdnsp_trb *trb = ring->dequeue;
u32 sum;
for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) { if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
} return sum;
}
staticint cdnsp_giveback_first_trb(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsignedint stream_id, int start_cycle, struct cdnsp_generic_trb *start_trb)
{ /* * Pass all the TRBs to the hardware at once and make sure this write * isn't reordered.
*/
wmb();
if (start_cycle)
start_trb->field[3] |= cpu_to_le32(start_cycle); else
start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
/* * if on data stage then update the actual_length of the USB * request and flag it as set, so it won't be overwritten in the event * for the last TRB.
*/ if (trb_type == TRB_DATA) {
td->request_length_set = true;
td->preq->request.actual = td->preq->request.length - remaining;
}
/* at status stage */ if (!td->request_length_set)
td->preq->request.actual = td->preq->request.length;
switch (trb_comp_code) { case COMP_SUCCESS: case COMP_SHORT_PACKET:
*status = 0; break; case COMP_STOPPED_SHORT_PACKET:
td->preq->request.actual = remaining; goto finish_td; case COMP_STOPPED_LENGTH_INVALID: /* Stopped on ep trb with invalid length, exclude it. */
ep_trb_len = 0;
remaining = 0; break;
}
/* * If this function returns an error condition, it means it got a Transfer * event with a corrupted TRB DMA address or endpoint is disabled.
*/ staticint cdnsp_handle_tx_event(struct cdnsp_device *pdev, struct cdnsp_transfer_event *event)
{ conststruct usb_endpoint_descriptor *desc; bool handling_skipped_tds = false; struct cdnsp_segment *ep_seg; struct cdnsp_ring *ep_ring; int status = -EINPROGRESS; union cdnsp_trb *ep_trb;
dma_addr_t ep_trb_dma; struct cdnsp_ep *pep; struct cdnsp_td *td;
u32 trb_comp_code; int invalidate; int ep_index;
pep = &pdev->eps[ep_index];
ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
/* * If device is disconnect then all requests will be dequeued * by upper layers as part of disconnect sequence. * We don't want handle such event to avoid racing.
*/ if (invalidate || !pdev->gadget.connected) goto cleanup;
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
trace_cdnsp_ep_disabled(pep->out_ctx); goto err_out;
}
/* Some transfer events don't always point to a trb*/ if (!ep_ring) { switch (trb_comp_code) { case COMP_INVALID_STREAM_TYPE_ERROR: case COMP_INVALID_STREAM_ID_ERROR: case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: goto cleanup; default:
dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
pep->name); goto err_out;
}
}
/* Look for some error cases that need special treatment. */ switch (trb_comp_code) { case COMP_BABBLE_DETECTED_ERROR:
status = -EOVERFLOW; break; case COMP_RING_UNDERRUN: case COMP_RING_OVERRUN: /* * When the Isoch ring is empty, the controller will generate * a Ring Overrun Event for IN Isoch endpoint or Ring * Underrun Event for OUT Isoch endpoint.
*/ goto cleanup; case COMP_MISSED_SERVICE_ERROR: /* * When encounter missed service error, one or more isoc tds * may be missed by controller. * Set skip flag of the ep_ring; Complete the missed tds as * short transfer when process the ep_ring next time.
*/
pep->skip = true; break;
}
do { /* * This TRB should be in the TD at the head of this ring's TD * list.
*/ if (list_empty(&ep_ring->td_list)) { /* * Don't print warnings if it's due to a stopped * endpoint generating an extra completion event, or * a event for the last TRB of a short TD we already * got a short event for. * The short TD is already removed from the TD list.
*/ if (!(trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
ep_ring->last_td_was_short))
trace_cdnsp_trb_without_td(ep_ring,
(struct cdnsp_generic_trb *)event);
if (pep->skip) {
pep->skip = false;
trace_cdnsp_ep_list_empty_with_skip(pep, 0);
}
/* * Skip the Force Stopped Event. The event_trb(ep_trb_dma) * of FSE is not in the current TD pointed by ep_ring->dequeue * because that the hardware dequeue pointer still at the * previous TRB of the current TD. The previous TRB maybe a * Link TD or the last TRB of the previous TD. The command * completion handle will take care the rest.
*/ if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
pep->skip = false; goto cleanup;
}
if (!ep_seg) { if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) { /* Something is busted, give up! */
dev_err(pdev->dev, "ERROR Transfer event TRB DMA ptr not " "part of current TD ep_index %d " "comp_code %u\n", ep_index,
trb_comp_code); return -EINVAL;
}
/* * Do not update event ring dequeue pointer if we're in a loop * processing missed tds.
*/ if (!handling_skipped_tds)
cdnsp_inc_deq(pdev, pdev->event_ring);
/* * If ep->skip is set, it means there are missed tds on the * endpoint ring need to take care of. * Process them as short transfer until reach the td pointed by * the event.
*/
} while (handling_skipped_tds); return 0;
/* * This function handles all events on the event ring. * Returns true for "possibly more events to process" (caller should call * again), otherwise false if done.
*/ staticbool cdnsp_handle_event(struct cdnsp_device *pdev)
{ unsignedint comp_code; union cdnsp_trb *event; bool update_ptrs = true;
u32 cycle_bit; int ret = 0;
u32 flags;
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) { /* * While removing or stopping driver there may still be deferred * not handled interrupt which should not be treated as error. * Driver should simply ignore it.
*/ if (pdev->gadget_driver)
cdnsp_died(pdev);
if (status & STS_FATAL) {
cdnsp_died(pdev); return IRQ_HANDLED;
}
return IRQ_WAKE_THREAD;
}
/* * Generic function for queuing a TRB on a ring. * The caller must have checked to make sure there's room on the ring. * * @more_trbs_coming: Will you enqueue more TRBs before setting doorbell?
*/ staticvoid cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring, bool more_trbs_coming, u32 field1, u32 field2,
u32 field3, u32 field4)
{ struct cdnsp_generic_trb *trb;
/* * Does various checks on the endpoint ring, and makes it ready to * queue num_trbs.
*/ staticint cdnsp_prepare_ring(struct cdnsp_device *pdev, struct cdnsp_ring *ep_ring,
u32 ep_state, unsigned int num_trbs,
gfp_t mem_flags)
{ unsignedint num_trbs_needed;
/* Make sure the endpoint has been added to controller schedule. */ switch (ep_state) { case EP_STATE_STOPPED: case EP_STATE_RUNNING: case EP_STATE_HALTED: break; default:
dev_err(pdev->dev, "ERROR: incorrect endpoint state\n"); return -EINVAL;
}
while (1) { if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs)) break;
trace_cdnsp_no_room_on_ring("try ring expansion");
while (cdnsp_trb_is_link(ep_ring->enqueue)) {
ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN); /* The cycle bit must be set as the last operation. */
wmb();
ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */ if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
ep_ring->cycle_state ^= 1;
ep_ring->enq_seg = ep_ring->enq_seg->next;
ep_ring->enqueue = ep_ring->enq_seg->trbs;
} return 0;
}
/* Add this TD to the tail of the endpoint ring's TD list. */
list_add_tail(&preq->td.td_list, &ep_ring->td_list);
ep_ring->num_tds++;
preq->pep->stream_info.td_count++;
for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
len = sg_dma_len(sg);
num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
len = min(len, full_len);
full_len -= len; if (full_len == 0) break;
}
return num_trbs;
}
staticvoid cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
{ if (running_total != preq->request.length)
dev_err(preq->pep->pdev->dev, "%s - Miscalculated tx length, " "queued %#x, asked for %#x (%d)\n",
preq->pep->name, running_total,
preq->request.length, preq->request.actual);
}
/* * TD size is the number of max packet sized packets remaining in the TD * (*not* including this TRB). * * Total TD packet count = total_packet_count = * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) * * Packets transferred up to and including this TRB = packets_transferred = * rounddown(total bytes transferred including this TRB / wMaxPacketSize) * * TD size = total_packet_count - packets_transferred * * It must fit in bits 21:17, so it can't be bigger than 31. * This is taken care of in the TRB_TD_SIZE() macro * * The last TRB in a TD must have the TD size set to zero.
*/ static u32 cdnsp_td_remainder(struct cdnsp_device *pdev, int transferred, int trb_buff_len, unsignedint td_total_len, struct cdnsp_request *preq, bool more_trbs_coming, bool zlp)
{
u32 maxp, total_packet_count;
/* Before ZLP driver needs set TD_SIZE = 1. */ if (zlp) return 1;
/* One TRB with a zero-length data packet. */ if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
trb_buff_len == td_total_len) return 0;
/* We got lucky, last normal TRB data on segment is packet aligned. */ if (unalign == 0) return 0;
/* Is the last nornal TRB alignable by splitting it. */ if (*trb_buff_len > unalign) {
*trb_buff_len -= unalign;
trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
enqd_len, 0, unalign); return 0;
}
/* * We want enqd_len + trb_buff_len to sum up to a number aligned to * number which is divisible by the endpoint's wMaxPacketSize. IOW: * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
*/
new_buff_len = max_pkt - (enqd_len % max_pkt);
/* Deal with request.zero - need one more td/trb. */ if (preq->request.zero && preq->request.length &&
IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
need_zero_pkt = true;
num_trbs++;
}
ret = cdnsp_prepare_transfer(pdev, preq, num_trbs); if (ret) return ret;
/* * workaround 1: STOP EP command on LINK TRB with TC bit set to 1 * causes that internal cycle bit can have incorrect state after * command complete. In consequence empty transfer ring can be * incorrectly detected when EP is resumed. * NOP TRB before LINK TRB avoid such scenario. STOP EP command is * then on NOP TRB and internal cycle bit is not changed and have * correct value.
*/ if (pep->wa1_nop_trb) {
field = le32_to_cpu(pep->wa1_nop_trb->trans_event.flags);
field ^= TRB_CYCLE;
/* * Don't give the first TRB to the hardware (by toggling the cycle bit) * until we've finished creating all the other TRBs. The ring's cycle * state may change as we enqueue the other TRBs, so save it too.
*/
start_trb = &ring->enqueue->generic;
start_cycle = ring->cycle_state;
send_addr = addr;
/* Queue the TRBs, even if they are zero-length */ for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
/* TRB buffer should not cross 64KB boundaries */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
trb_buff_len = min(trb_buff_len, block_len); if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;
/* Don't change the cycle bit of the first TRB until later */ if (first_trb) {
first_trb = false; if (start_cycle == 0)
field |= TRB_CYCLE;
} else {
field |= ring->cycle_state;
}
/* * Chain all the TRBs together; clear the chain bit in the last * TRB to indicate it's the last TRB in the chain.
*/ if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
field |= TRB_CHAIN; if (cdnsp_trb_is_link(ring->enqueue + 1)) { if (cdnsp_align_td(pdev, preq, enqd_len,
&trb_buff_len,
ring->enq_seg)) {
send_addr = ring->enq_seg->bounce_dma; /* Assuming TD won't span 2 segs */
preq->td.bounce_seg = ring->enq_seg;
}
}
}
if (enqd_len + trb_buff_len >= full_len) { if (need_zero_pkt && !zero_len_trb) {
zero_len_trb = true;
} else {
zero_len_trb = false;
field &= ~TRB_CHAIN;
field |= TRB_IOC;
more_trbs_coming = false;
need_zero_pkt = false;
preq->td.last_trb = ring->enqueue;
}
}
/* Only set interrupt on short packet for OUT endpoints. */ if (!preq->direction)
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
full_len, preq,
more_trbs_coming,
zero_len_trb);
/* * The transfer burst count field of the isochronous TRB defines the number of * bursts that are required to move all packets in this TD. Only SuperSpeed * devices can burst up to bMaxBurst number of packets per service interval. * This field is zero based, meaning a value of zero in the field means one * burst. Basically, for everything but SuperSpeed devices, this field will be * zero.
*/ staticunsignedint cdnsp_get_burst_count(struct cdnsp_device *pdev, struct cdnsp_request *preq, unsignedint total_packet_count)
{ unsignedint max_burst;
if (pdev->gadget.speed < USB_SPEED_SUPER) return 0;
/* * Returns the number of packets in the last "burst" of packets. This field is * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so * the last burst packet count is equal to the total number of packets in the * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst * must contain (bMaxBurst + 1) number of packets, but the last burst can * contain 1 to (bMaxBurst + 1) packets.
*/ staticunsignedint
cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev, struct cdnsp_request *preq, unsignedint total_packet_count)
{ unsignedint max_burst; unsignedint residue;
if (pdev->gadget.speed >= USB_SPEED_SUPER) { /* bMaxBurst is zero based: 0 means 1 packet per burst. */
max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
residue = total_packet_count % (max_burst + 1);
/* * If residue is zero, the last burst contains (max_burst + 1) * number of packets, but the TLBPC field is zero-based.
*/ if (residue == 0) return max_burst;
/* * Set isoc specific data for the first TRB in a TD. * Prevent HW from getting the TRBs by keeping the cycle state * inverted in the first TDs isoc TRB.
*/
field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
TRB_SIA | TRB_TBC(burst_count);
if (!start_cycle)
field |= TRB_CYCLE;
/* Fill the rest of the TRB fields, and remaining normal TRBs. */ for (i = 0; i < trbs_per_td; i++) {
u32 remainder;
/* Only first TRB is isoc, overwrite otherwise. */ if (i) {
field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
length_field |= TRB_TD_SIZE(remainder);
} else {
length_field |= TRB_TD_SIZE_TBC(burst_count);
}
/* Only set interrupt on short packet for OUT EPs. */ if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
field |= TRB_ISP;
/* Set the chain bit for all except the last TRB. */ if (i < trbs_per_td - 1) {
more_trbs_coming = true;
field |= TRB_CHAIN;
} else {
more_trbs_coming = false;
preq->td.last_trb = ep_ring->enqueue;
field |= TRB_IOC;
}
cleanup: /* Clean up a partially enqueued isoc transfer. */
list_del_init(&preq->td.td_list);
ep_ring->num_tds--;
/* * Use the first TD as a temporary variable to turn the TDs we've * queued into No-ops with a software-owned cycle bit. * That way the hardware won't accidentally start executing bogus TDs * when we partially overwrite them. * td->first_trb and td->start_seg are already set.
*/
preq->td.last_trb = ep_ring->enqueue; /* Every TRB except the first & last will have its cycle bit flipped. */
cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
ep_ring->enqueue = preq->td.first_trb;
ep_ring->enq_seg = preq->td.start_seg;
ep_ring->cycle_state = start_cycle; return ret;
}
/**** Command Ring Operations ****/ /*
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.48 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.