// SPDX-License-Identifier: GPL-2.0 /* * xHCI host controller driver * * Copyright (C) 2008 Intel Corp. * * Author: Sarah Sharp * Some code borrowed from the Linux EHCI driver.
*/
/* * Ring initialization rules: * 1. Each segment is initialized to zero, except for link TRBs. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or * Consumer Cycle State (CCS), depending on ring function. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. * * Ring behavior rules: * 1. A ring is empty if enqueue == dequeue. This means there will always be at * least one free TRB in the ring. This is useful if you want to turn that * into a link TRB and expand the ring. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a * link TRB, then load the pointer with the address in the link TRB. If the * link TRB had its toggle bit set, you may need to update the ring cycle * state (see cycle bit rules). You may have to do this multiple times * until you reach a non-link TRB. * 3. A ring is full if enqueue++ (for the definition of increment above) * equals the dequeue pointer. * * Cycle bit rules: * 1. When a consumer increments a dequeue pointer and encounters a toggle bit * in a link TRB, it must toggle the ring cycle state. * 2. When a producer increments an enqueue pointer and encounters a toggle bit * in a link TRB, it must toggle the ring cycle state. * * Producer rules: * 1. Check if ring is full before you enqueue. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. * Update enqueue pointer between each write (which may update the ring * cycle state). * 3. Notify consumer. If SW is producer, it rings the doorbell for command * and endpoint rings. If HC is the producer for the event ring, * and it generates an interrupt according to interrupt modulation rules. * * Consumer rules: * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, * the TRB is owned by the consumer. * 2. Update dequeue pointer (which may update the ring cycle state) and * continue processing TRBs until you reach a TRB which is not owned by you. * 3. Notify the producer. SW is the consumer for the event ring, and it * updates event ring dequeue pointer. HC is the consumer for the command and * endpoint rings; it generates events on the event ring for these.
*/
/* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA * address of the TRB.
*/
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb)
{ unsignedlong segment_offset;
staticvoid trb_to_noop(union xhci_trb *trb, u32 noop_type)
{ if (trb_is_link(trb)) { /* unchain chained link TRBs */
trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
trb->generic.field[2] = 0; /* Preserve only the cycle bit of this TRB */
trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
}
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers.
*/ staticvoid next_trb(struct xhci_segment **seg, union xhci_trb **trb)
{ if (trb_is_link(*trb) || last_trb_on_seg(*seg, *trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
(*trb)++;
}
}
/* * See Cycle bit rules. SW is the consumer for the event ring only.
*/ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{ unsignedint link_trb_count = 0;
/* event ring doesn't have link trbs, check for last trb */ if (ring->type == TYPE_EVENT) { if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++; return;
} if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
trace_xhci_inc_deq(ring);
return;
}
/* All other rings have link trbs */ if (!trb_is_link(ring->dequeue)) { if (last_trb_on_seg(ring->deq_seg, ring->dequeue))
xhci_warn(xhci, "Missing link TRB at end of segment\n"); else
ring->dequeue++;
}
while (trb_is_link(ring->dequeue)) {
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
trace_xhci_inc_deq(ring);
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "Ring is an endless link TRB loop\n"); break;
}
} return;
}
/* * If enqueue points at a link TRB, follow links until an ordinary TRB is reached. * Toggle the cycle bit of passed link TRBs and optionally chain them.
*/ staticvoid inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain)
{ unsignedint link_trb_count = 0;
while (trb_is_link(ring->enqueue)) {
/* * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit * set, but other sections talk about dealing with the chain bit set. This was * fixed in the 0.96 specification errata, but we have to assume that all 0.95 * xHCI hardware can't handle the chain bit being cleared on a link TRB. * * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization * and never changed here. On all others, modify it as requested by the caller.
*/ if (!xhci_link_chain_quirk(xhci, ring->type)) {
ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN);
ring->enqueue->link.control |= cpu_to_le32(chain);
}
/* Give this link TRB to the hardware */
wmb();
ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */ if (link_trb_toggles_cycle(ring->enqueue))
ring->cycle_state ^= 1;
if (link_trb_count++ > ring->num_segs) {
xhci_warn(xhci, "Link TRB loop at enqueue\n"); break;
}
}
}
/* * See Cycle bit rules. SW is the consumer for the event ring only. * * If we've just enqueued a TRB that is in the middle of a TD (meaning the * chain bit is set), then set the chain bit in all the following link TRBs. * If we've enqueued the last TRB in a TD, make sure the following link TRBs * have their chain bit cleared (so that each Link TRB is a separate TD). * * @more_trbs_coming: Will you enqueue more TRBs before calling * prepare_transfer()?
*/ staticvoid inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool more_trbs_coming)
{
u32 chain;
if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
xhci_err(xhci, "Tried to move enqueue past ring segment\n"); return;
}
ring->enqueue++;
/* * If we are in the middle of a TD or the caller plans to enqueue more * TDs as one transfer (eg. control), traverse any link TRBs right now. * Otherwise, enqueue can stay on a link until the next prepare_ring(). * This avoids enqueue entering deq_seg and simplifies ring expansion.
*/ if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming))
inc_enq_past_link(xhci, ring, chain);
}
/* * If the suspect DMA address is a TRB in this TD, this function returns that * TRB's segment. Otherwise it returns 0.
*/ staticstruct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma)
{
dma_addr_t start_dma;
dma_addr_t end_seg_dma;
dma_addr_t end_trb_dma; struct xhci_segment *cur_seg;
do { if (start_dma == 0) return NULL; /* We may get an event for a Link TRB in the middle of a TD */
end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
&cur_seg->trbs[TRBS_PER_SEGMENT - 1]); /* If the end TRB isn't in this segment, this is set to 0 */
end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
if (end_trb_dma > 0) { /* The end TRB is in this segment, so suspect should be here */ if (start_dma <= end_trb_dma) { if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) return cur_seg;
} else { /* Case for one segment with * a TD wrapped around to the top
*/ if ((suspect_dma >= start_dma &&
suspect_dma <= end_seg_dma) ||
(suspect_dma >= cur_seg->dma &&
suspect_dma <= end_trb_dma)) return cur_seg;
} return NULL;
} /* Might still be somewhere in this segment */ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) return cur_seg;
/* * Return number of free normal TRBs from enqueue to dequeue pointer on ring. * Not counting an assumed link TRB at end of each TRBS_PER_SEGMENT sized segment. * Only for transfer and command rings where driver is the producer, not for * event rings.
*/ staticunsignedint xhci_num_trbs_free(struct xhci_ring *ring)
{ struct xhci_segment *enq_seg = ring->enq_seg; union xhci_trb *enq = ring->enqueue; union xhci_trb *last_on_seg; unsignedint free = 0; int i = 0;
/* Ring might be empty even if enq != deq if enq is left on a link trb */ if (trb_is_link(enq)) {
enq_seg = enq_seg->next;
enq = enq_seg->trbs;
}
/* Empty ring, common case, don't walk the segments */ if (enq == ring->dequeue) return ring->num_segs * (TRBS_PER_SEGMENT - 1);
/* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment. See rules above. * return number of new segments needed to ensure this.
*/
staticunsignedint xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring, unsignedint num_trbs)
{ struct xhci_segment *seg; int trbs_past_seg; int enq_used; int new_segs;
enq_used = ring->enqueue - ring->enq_seg->trbs;
/* how many trbs will be queued past the enqueue segment? */
trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
/* * Consider expanding the ring already if num_trbs fills the current * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into * the next segment. Avoids confusing full ring with special empty ring * case below
*/ if (trbs_past_seg < 0) return 0;
/* Empty ring special case, enqueue stuck on link trb while dequeue advanced */ if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) return 0;
while (new_segs > 0) {
seg = seg->next; if (seg == ring->deq_seg) {
xhci_dbg(xhci, "Adding %d trbs requires expanding ring by %d segments\n",
num_trbs, new_segs); return new_segs;
}
new_segs--;
}
return 0;
}
/* Ring the host controller doorbell after placing a command on the ring */ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) return;
/* * Turn all commands on command ring with status set to "aborted" to no-op trbs. * If there are other commands waiting then restart the ring and kick the timer. * This must be called with command ring stopped and xhci->lock held.
*/ staticvoid xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, struct xhci_command *cur_cmd)
{ struct xhci_command *i_cmd;
/* Turn all aborted commands in list to no-ops, then restart */
list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
if (i_cmd->status != COMP_COMMAND_ABORTED) continue;
i_cmd->status = COMP_COMMAND_RING_STOPPED;
xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
i_cmd->command_trb);
trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
/* * caller waiting for completion is called when command * completion event is received for these no-op commands
*/
}
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
/* ring command ring doorbell to restart the command ring */ if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd; if (cur_cmd)
xhci_mod_cmd_timer(xhci);
xhci_ring_cmd_db(xhci);
}
}
/* Must be called with xhci->lock held, releases and acquires lock back */ staticint xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsignedlong flags)
{ struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
u64 crcr; int ret;
/* * The control bits like command stop, abort are located in lower * dword of the command ring control register. * Some controllers require all 64 bits to be written to abort the ring. * Make sure the upper dword is valid, pointing to the next command, * avoiding corrupting the command ring pointer in case the command ring * is stopped by the time the upper dword is written.
*/
next_trb(&new_seg, &new_deq); if (trb_is_link(new_deq))
next_trb(&new_seg, &new_deq);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the * completion of the Command Abort operation. If CRR is not negated in 5 * seconds then driver handles it as if host died (-ENODEV). * In the future we should distinguish between -ENODEV and -ETIMEDOUT * and try to recover a -ETIMEDOUT with a host controller reset.
*/
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) {
xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
xhci_halt(xhci);
xhci_hc_died(xhci); return ret;
} /* * Writing the CMD_RING_ABORT bit should cause a cmd completion event, * however on some host hw the CMD_RING_RUNNING bit is correctly cleared * but the completion event in never sent. Wait 2 secs (arbitrary * number) to handle those cases after negation of CMD_RING_RUNNING.
*/
spin_unlock_irqrestore(&xhci->lock, flags);
ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
msecs_to_jiffies(2000));
spin_lock_irqsave(&xhci->lock, flags); if (!ret) {
xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
xhci_cleanup_command_queue(xhci);
} else {
xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
} return 0;
}
/* Don't ring the doorbell for this endpoint if there are pending * cancellations because we don't want to interrupt processing. * We don't want to restart any stream rings if there's a set dequeue * pointer command pending because the device can choose to start any * stream once the endpoint is on the HW schedule.
*/ if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
(ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT)) return;
writel(DB_VALUE(ep_index, stream_id), db_addr); /* flush the write */
readl(db_addr);
}
/* Ring the doorbell for any rings with pending URBs */ staticvoid ring_doorbell_for_active_rings(struct xhci_hcd *xhci, unsignedint slot_id, unsignedint ep_index)
{ unsignedint stream_id; struct xhci_virt_ep *ep;
ep = &xhci->devs[slot_id]->eps[ep_index];
/* A ring has pending URBs if its TD list is not empty */ if (!(ep->ep_state & EP_HAS_STREAMS)) { if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); return;
}
/* Get the right ring for the given slot_id, ep_index and stream_id. * If the endpoint supports streams, boundary check the URB's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring.
*/ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, unsignedint slot_id, unsignedint ep_index, unsignedint stream_id)
{ struct xhci_virt_ep *ep;
ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return NULL;
/* * Get the hw dequeue pointer xHC stopped on, either directly from the * endpoint context, or if streams are in use from the stream context. * The returned hw_dequeue contains the lowest four bits with cycle state * and possbile stream context type.
*/ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, unsignedint ep_index, unsignedint stream_id)
{ struct xhci_ep_ctx *ep_ctx; struct xhci_stream_ctx *st_ctx; struct xhci_virt_ep *ep;
/* * Walk the ring until both the next TRB and hw_dequeue are found (don't * move hw_dequeue back if it went forward due to a HW bug). Cycle state * is loaded from a known good TRB, track later toggles to maintain it.
*/ do { if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq)
== (dma_addr_t)(hw_dequeue & ~0xf)) {
hw_dequeue_found = true; if (td_last_trb_found) break;
} if (new_deq == td->end_trb)
td_last_trb_found = true;
if (td_last_trb_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
new_cycle ^= 0x1;
next_trb(&new_seg, &new_deq);
/* Search wrapped around, bail out */ if (new_deq == ep->ring->dequeue) {
xhci_err(xhci, "Error: Failed finding new dequeue state\n"); return -EINVAL;
}
} while (!hw_dequeue_found || !td_last_trb_found);
/* Don't update the ring cycle state for the producer (us). */
addr = xhci_trb_virt_to_dma(new_seg, new_deq); if (addr == 0) {
xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); return -EINVAL;
}
if ((ep->ep_state & SET_DEQ_PENDING)) {
xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
&addr); return -EBUSY;
}
/* This function gets called from contexts where it cannot sleep */
cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!cmd) {
xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); return -ENOMEM;
}
/* Stop the TD queueing code from ringing the doorbell until * this command completes. The HC won't set the dequeue pointer * if the ring is running, and ringing the doorbell starts the * ring running.
*/
ep->ep_state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci); return 0;
}
/* flip_cycle means flip the cycle bit of all but the first and last TRB. * (The last TRB actually points to the ring enqueue pointer, which is not part * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/ staticvoid td_to_noop(struct xhci_td *td, bool flip_cycle)
{ struct xhci_segment *seg = td->start_seg; union xhci_trb *trb = td->start_trb;
while (1) {
trb_to_noop(trb, TRB_TR_NOOP);
/* flip cycle if asked to */ if (flip_cycle && trb != td->start_trb && trb != td->end_trb)
trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
/* Clean up the endpoint's TD list */
urb = td->urb;
/* if a bounce buffer was used to align this td then unmap it */
xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
/* Do one last check of the actual transfer length. * If the host controller said we transferred more data than the buffer * length, urb->actual_length will be a very big number (since it's * unsigned). Play it safe and say we didn't transfer anything.
*/ if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
urb->transfer_buffer_length, urb->actual_length);
urb->actual_length = 0;
status = 0;
} /* TD might be removed from td_list if we are giving back a cancelled URB */ if (!list_empty(&td->td_list))
list_del_init(&td->td_list); /* Giving back a cancelled URB, or if a slated TD completed anyway */ if (!list_empty(&td->cancelled_td_list))
list_del_init(&td->cancelled_td_list);
inc_td_cnt(urb); /* Giveback the urb when all the tds are completed */ if (last_td_in_urb(td)) { if ((urb->actual_length != urb->transfer_buffer_length &&
(urb->transfer_flags & URB_SHORT_NOT_OK)) ||
(status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
urb, urb->actual_length,
urb->transfer_buffer_length, status);
/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
status = 0;
xhci_giveback_urb_in_irq(xhci, td, status);
}
}
/* Give back previous TD and move on to the next TD. */ staticvoid xhci_dequeue_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_ring *ring,
u32 status)
{
ring->dequeue = td->end_trb;
ring->deq_seg = td->end_seg;
inc_deq(xhci, ring);
xhci_td_cleanup(xhci, td, ring, status);
}
/* Complete the cancelled URBs we unlinked from td_list. */ staticvoid xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
{ struct xhci_ring *ring; struct xhci_td *td, *tmp_td;
/* * Avoid resetting endpoint if link is inactive. Can cause host hang. * Device will be reset soon to recover the link so don't do anything
*/ if (ep->vdev->flags & VDEV_PORT_ERROR) return -ENODEV;
/* add td to cancelled list and let reset ep handler take care of it */ if (reset_type == EP_HARD_RESET) {
ep->ep_state |= EP_HARD_CLEAR_TOGGLE; if (td && list_empty(&td->cancelled_td_list)) {
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
td->cancel_status = TD_HALTED;
}
}
if (ep->ep_state & EP_HALTED) {
xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
ep->ep_index); return 0;
}
err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); if (err) return err;
ep->ep_state |= EP_HALTED;
xhci_ring_cmd_db(xhci);
return 0;
}
/* * Fix up the ep ring first, so HW stops executing cancelled TDs. * We have the xHCI lock, so nothing can modify this list until we drop it. * We're also in the event handler, so we can't get re-interrupted if another * Stop Endpoint command completes. * * only call this when ring is not in a running state
*/
/* * This is not going to work if the hardware is changing its dequeue * pointers as we look at them. Completion handler will call us later.
*/ if (ep->ep_state & SET_DEQ_PENDING) return 0;
xhci = ep->xhci;
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
(unsignedlonglong)xhci_trb_virt_to_dma(
td->start_seg, td->start_trb),
td->urb->stream_id, td->urb);
list_del_init(&td->td_list);
ring = xhci_urb_to_transfer_ring(xhci, td->urb); if (!ring) {
xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
td->urb, td->urb->stream_id); continue;
} /* * If a ring stopped on the TD we need to cancel then we have to * move the xHC endpoint ring dequeue pointer past this TD. * Rings halted due to STALL may show hw_deq is past the stalled * TD, but still require a set TR Deq command to flush xHC cache.
*/
hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
td->urb->stream_id);
hw_deq &= ~0xf;
if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) { switch (td->cancel_status) { case TD_CLEARED: /* TD is already no-op */ case TD_CLEARING_CACHE: /* set TR deq command already queued */ break; case TD_DIRTY: /* TD is cached, clear it */ case TD_HALTED: case TD_CLEARING_CACHE_DEFERRED: if (cached_td) { if (cached_td->urb->stream_id != td->urb->stream_id) { /* Multiple streams case, defer move dq */
xhci_dbg(xhci, "Move dq deferred: stream %u URB %p\n",
td->urb->stream_id, td->urb);
td->cancel_status = TD_CLEARING_CACHE_DEFERRED; break;
}
/* Should never happen, but clear the TD if it does */
xhci_warn(xhci, "Found multiple active URBs %p and %p in stream %u?\n",
td->urb, cached_td->urb,
td->urb->stream_id);
td_to_noop(cached_td, false);
cached_td->cancel_status = TD_CLEARED;
}
td_to_noop(td, false);
td->cancel_status = TD_CLEARING_CACHE;
cached_td = td; break;
}
} else {
td_to_noop(td, false);
td->cancel_status = TD_CLEARED;
}
}
/* If there's no need to move the dequeue pointer then we're done */ if (!cached_td) return 0;
err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
cached_td->urb->stream_id,
cached_td); if (err) { /* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { /* * Deferred TDs need to have the deq pointer set after the above command * completes, so if that failed we just give up on all of them (and * complain loudly since this could cause issues due to caching).
*/ if (td->cancel_status != TD_CLEARING_CACHE &&
td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue;
xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
td->urb);
td_to_noop(td, false);
td->cancel_status = TD_CLEARED;
}
} return 0;
}
/* * Erase queued TDs from transfer ring(s) and give back those the xHC didn't * stop on. If necessary, queue commands to move the xHC off cancelled TDs it * stopped on. Those will be given back later when the commands complete. * * Call under xhci->lock on a stopped endpoint.
*/ void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
{
xhci_invalidate_cancelled_tds(ep);
xhci_giveback_invalidated_tds(ep);
}
/* * Returns the TD the endpoint ring halted on. * Only call for non-running rings without streams.
*/ staticstruct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
{ struct xhci_td *td;
u64 hw_deq;
/* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: * * 1. If the HW was in the middle of processing the TD that needs to be * cancelled, then we must move the ring's dequeue pointer past the last TRB * in the TD with a Set Dequeue Pointer Command. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain * bit cleared) so that the HW will skip over them.
*/ staticvoid xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, u32 comp_code)
{ unsignedint ep_index; struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; struct xhci_td *td = NULL; enum xhci_ep_reset_type reset_type; struct xhci_command *command; int err;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { if (!xhci->devs[slot_id])
xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
slot_id); return;
}
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return;
if (comp_code == COMP_CONTEXT_STATE_ERROR) { /* * If stop endpoint command raced with a halting endpoint we need to * reset the host side endpoint first. * If the TD we halted on isn't cancelled the TD should be given back * with a proper error code, and the ring dequeue moved past the TD. * If streams case we can't find hw_deq, or the TD we halted on so do a * soft reset. * * Proper error code is unknown here, it would be -EPIPE if device side * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) * We use -EPROTO, if device is stalled it should return a stall error on * next transfer, which then will return -EPIPE, and device side stall is * noted and cleared by class driver.
*/ switch (GET_EP_CTX_STATE(ep_ctx)) { case EP_STATE_HALTED:
xhci_dbg(xhci, "Stop ep completion raced with stall\n"); /* * If the halt happened before Stop Endpoint failed, its transfer event * should have already been handled and Reset Endpoint should be pending.
*/ if (ep->ep_state & EP_HALTED) goto reset_done;
if (ep->ep_state & EP_HAS_STREAMS) {
reset_type = EP_SOFT_RESET;
} else {
reset_type = EP_HARD_RESET;
td = find_halted_td(ep); if (td)
td->status = -EPROTO;
} /* reset ep, reset handler cleans up cancelled tds */
err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); if (err) break;
reset_done: /* Reset EP handler will clean up cancelled TDs */
ep->ep_state &= ~EP_STOP_CMD_PENDING; return; case EP_STATE_STOPPED: /* * Per xHCI 4.6.9, Stop Endpoint command on a Stopped * EP is a Context State Error, and EP stays Stopped. * * But maybe it failed on Halted, and somebody ran Reset * Endpoint later. EP state is now Stopped and EP_HALTED * still set because Reset EP handler will run after us.
*/ if (ep->ep_state & EP_HALTED) break; /* * On some HCs EP state remains Stopped for some tens of * us to a few ms or more after a doorbell ring, and any * new Stop Endpoint fails without aborting the restart. * This handler may run quickly enough to still see this * Stopped state, but it will soon change to Running. * * Assume this bug on unexpected Stop Endpoint failures. * Keep retrying until the EP starts and stops again, on * chips where this is known to help. Wait for 100ms.
*/ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) break;
fallthrough; case EP_STATE_RUNNING: /* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
/* will queue a set TR deq if stopped on a cancelled, uncleared TD */
xhci_invalidate_cancelled_tds(ep);
ep->ep_state &= ~EP_STOP_CMD_PENDING;
/* Otherwise ring the doorbell(s) to restart queued transfers */
xhci_giveback_invalidated_tds(ep);
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return;
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) { int stream_id;
for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
ring = ep->stream_info->stream_rings[stream_id]; if (!ring) continue;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Killing URBs for slot ID %u, ep index %u, stream %u",
slot_id, ep_index, stream_id);
xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring; if (!ring) return;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Killing URBs for slot ID %u, ep index %u",
slot_id, ep_index);
xhci_kill_ring_urbs(xhci, ring);
}
if (last_td_in_urb(cur_td))
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
/* * host controller died, register read returns 0xffffffff * Complete pending commands, mark them ABORTED. * URBs need to be given back as usb core might be waiting with device locks * held for the URBs to finish during device disconnect, blocking host remove. * * Call with xhci->lock held. * lock is relased and re-acquired while giving back urb.
*/ void xhci_hc_died(struct xhci_hcd *xhci)
{ bool notify; int i, j;
if (xhci->xhc_state & XHCI_STATE_DYING) return;
notify = !(xhci->xhc_state & XHCI_STATE_REMOVING); if (notify)
xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
xhci->xhc_state |= XHCI_STATE_DYING;
xhci_cleanup_command_queue(xhci);
/* return any pending urbs, remove may be waiting for them */ for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { if (!xhci->devs[i]) continue; for (j = 0; j < 31; j++)
xhci_kill_endpoint_urbs(xhci, i, j);
}
/* inform usb core hc died if PCI remove isn't already handling it */ if (notify)
usb_hc_died(xhci_to_hcd(xhci));
}
/* * When we get a completion for a Set Transfer Ring Dequeue Pointer command, * we need to clear the set deq pending flag in the endpoint ring state, so that * the TD queueing code can ring the doorbell again. We also need to ring the * endpoint doorbell to restart the ring, but only if there aren't more * cancellations pending.
*/ staticvoid xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, u32 cmd_comp_code)
{ unsignedint ep_index; unsignedint stream_id; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_stream_ctx *stream_ctx; struct xhci_td *td, *tmp_td;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return;
ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id); /* XXX: Harmless??? */ goto cleanup;
}
if (cmd_comp_code != COMP_SUCCESS) { unsignedint ep_state; unsignedint slot_state;
switch (cmd_comp_code) { case COMP_TRB_ERROR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); break; case COMP_CONTEXT_STATE_ERROR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
ep_state = GET_EP_CTX_STATE(ep_ctx);
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Slot state = %u, EP state = %u",
slot_state, ep_state); break; case COMP_SLOT_NOT_ENABLED_ERROR:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
slot_id); break; default:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
cmd_comp_code); break;
} /* OK what do we do now? The endpoint state is hosed, and we * should never get to this point if the synchronization between * queueing, and endpoint state are correct. This might happen * if the device gets disconnected after we've finished * cancelling URBs, which might not be an error...
*/
} else {
u64 deq; /* 4.6.10 deq ptr is written to the stream ctx for streams */ if (ep->ep_state & EP_HAS_STREAMS) {
deq = le64_to_cpu(stream_ctx->stream_ring) & SCTX_DEQ_MASK;
/* * Cadence xHCI controllers store some endpoint state * information within Rsvd0 fields of Stream Endpoint * context. This field is not cleared during Set TR * Dequeue Pointer command which causes XDMA to skip * over transfer ring and leads to data loss on stream * pipe. * To fix this issue driver must clear Rsvd0 field.
*/ if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) {
stream_ctx->reserved[0] = 0;
stream_ctx->reserved[1] = 0;
}
} else {
deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
ep->queued_deq_ptr) == deq) { /* Update the ring's dequeue segment and dequeue pointer * to reflect the new position.
*/
ep_ring->deq_seg = ep->queued_deq_seg;
ep_ring->dequeue = ep->queued_deq_ptr;
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
ep->queued_deq_seg, ep->queued_deq_ptr);
}
} /* HW cached TDs cleared from cache, give them back */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
cancelled_td_list) {
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); if (td->cancel_status == TD_CLEARING_CACHE) {
td->cancel_status = TD_CLEARED;
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
}
}
cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
/* Check for deferred or newly cancelled TDs */ if (!list_empty(&ep->cancelled_td_list)) {
xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
__func__);
xhci_invalidate_cancelled_tds(ep); /* Try to restart the endpoint if all is done */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index); /* Start giving back any TDs invalidated above */
xhci_giveback_invalidated_tds(ep);
} else { /* Restart any rings with pending URBs */
xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
}
staticvoid xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, u32 cmd_comp_code)
{ struct xhci_virt_ep *ep; struct xhci_ep_ctx *ep_ctx; unsignedint ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return;
/* This command will only fail if the endpoint wasn't halted, * but we don't care.
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, "Ignoring reset ep completion code of %u", cmd_comp_code);
/* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
xhci_invalidate_cancelled_tds(ep);
/* Clear our internal halted state */
ep->ep_state &= ~EP_HALTED;
xhci_giveback_invalidated_tds(ep);
/* if this was a soft reset, then restart */ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
/* * Configure endpoint commands can come from the USB core configuration * or alt setting changes, or when streams were being configured.
*/
virt_dev = xhci->devs[slot_id]; if (!virt_dev) return;
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n"); return;
}
add_flags = le32_to_cpu(ctrl_ctx->add_flags);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(add_flags) - 1;
/* * If timeout work is pending, or current_cmd is NULL, it means we * raced with command completion. Command is handled so just return.
*/ if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
spin_unlock_irqrestore(&xhci->lock, flags); return;
}
/* mark this command to be cancelled */
xhci->current_cmd->status = COMP_COMMAND_ABORTED;
/* Make sure command ring is running before aborting it */
hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); if (hw_ring_state == ~(u64)0) {
xhci_hc_died(xhci); goto time_out_completed;
}
if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
(hw_ring_state & CMD_RING_RUNNING)) { /* Prevent new doorbell, and start command abort */
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
xhci_dbg(xhci, "Command timeout\n");
xhci_abort_cmd_ring(xhci, flags); goto time_out_completed;
}
/* host removed. Bail out */ if (xhci->xhc_state & XHCI_STATE_REMOVING) {
xhci_dbg(xhci, "host removed, ring start fail?\n");
xhci_cleanup_command_queue(xhci);
goto time_out_completed;
}
/* command timeout on stopped ring, ring can't be aborted */
xhci_dbg(xhci, "Command timeout on stopped ring\n");
xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
/* If CMD ring stopped we own the trbs between enqueue and dequeue */ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
complete_all(&xhci->cmd_ring_stop_completion); return;
}
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
cmd_trb); /* * Check whether the completion event is for our internal kept * command.
*/ if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
xhci_warn(xhci, "ERROR mismatched command completion event\n"); return;
}
if (cmd->command_trb != xhci->cmd_ring->dequeue) {
xhci_err(xhci, "Command completion event does not match command\n"); return;
}
/* * Host aborted the command ring, check if the current command was * supposed to be aborted, otherwise continue normally. * The command ring is stopped now, but the xHC will issue a Command * Ring Stopped event which will cause us to restart it.
*/ if (cmd_comp_code == COMP_COMMAND_ABORTED) {
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; if (cmd->status == COMP_COMMAND_ABORTED) { if (xhci->current_cmd == cmd)
xhci->current_cmd = NULL; goto event_handled;
}
}
cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); switch (cmd_type) { case TRB_ENABLE_SLOT:
xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT:
xhci_handle_cmd_disable_slot(xhci, slot_id, cmd_comp_code); break; case TRB_CONFIG_EP: if (!cmd->completion)
xhci_handle_cmd_config_ep(xhci, slot_id); break; case TRB_EVAL_CONTEXT: break; case TRB_ADDR_DEV:
xhci_handle_cmd_addr_dev(xhci, slot_id); break; case TRB_STOP_RING:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3]))); if (!cmd->completion)
xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
cmd_comp_code); break; case TRB_SET_DEQ:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_CMD_NOOP: /* Is this an aborted command turned to NO-OP? */ if (cmd->status == COMP_COMMAND_RING_STOPPED)
cmd_comp_code = COMP_COMMAND_RING_STOPPED; break; case TRB_RESET_EP:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_RESET_DEV: /* SLOT_ID field in reset device cmd completion event TRB is 0. * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
*/
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3]));
xhci_handle_cmd_reset_dev(xhci, slot_id); break; case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event); break; case TRB_GET_BW: break; default: /* Skip over unknown commands on the event ring */
xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); break;
}
/* restart timer if this wasn't the last command */ if (!list_is_singular(&xhci->cmd_list)) {
xhci->current_cmd = list_first_entry(&cmd->cmd_list, struct xhci_command, cmd_list);
xhci_mod_cmd_timer(xhci);
} elseif (xhci->current_cmd == cmd) {
xhci->current_cmd = NULL;
}
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); if (!xhci->devs[slot_id]) {
xhci_warn(xhci, "Device Notification event for " "unused slot %u\n", slot_id); return;
}
xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
slot_id);
udev = xhci->devs[slot_id]->udev; if (udev && udev->parent)
usb_wakeup_notification(udev->parent, udev->portnum);
}
/* * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI * Controller. * As per ThunderX2errata-129 USB 2 device may come up as USB 1 * If a connection to a USB 1 device is followed by another connection * to a USB 2 device. * * Reset the PHY after the USB device is disconnected if device speed * is less than HCD_USB3. * Retry the reset sequence max of 4 times checking the PLL lock status. *
*/ staticvoid xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
{ struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 pll_lock_check;
u32 retry_count = 4;
/* Port status change events always have a successful completion code */ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Port change event with invalid port ID %d\n",
port_id); return;
}
port = &xhci->hw_ports[port_id - 1]; if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Port change event, no port for port ID %u\n",
port_id);
bogus_port_status = true; goto cleanup;
}
/* We might get interrupts after shared_hcd is removed */ if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
bogus_port_status = true; goto cleanup;
}
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) { if (port->slot_id && xhci->devs[port->slot_id])
xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
xhci_dbg(xhci, "port resume event for port %d\n", port_id);
cmd_reg = readl(&xhci->op_regs->command); if (!(cmd_reg & CMD_RUN)) {
xhci_warn(xhci, "xHC is not running.\n"); goto cleanup;
}
if (DEV_SUPERSPEED_ANY(portsc)) {
xhci_dbg(xhci, "remote wake SS port %d\n", port_id); /* Set a flag to say the port signaled remote wakeup, * so we can tell the difference between the end of * device and host initiated resume.
*/
bus_state->port_remote_wakeup |= 1 << hcd_portnum;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
xhci_set_link_state(xhci, port, XDEV_U0); /* Need to wait until the next link state change * indicates the device is actually in U0.
*/
bogus_port_status = true; goto cleanup;
} elseif (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
port->resume_timestamp = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(hcd_portnum, &bus_state->resuming_ports); /* Do the rest in GetPortStatus after resume time delay. * Avoid polling roothub status before that so that a * usb device auto-resume latency around ~40ms.
*/
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
port->resume_timestamp);
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
}
if ((portsc & PORT_PLC) &&
DEV_SUPERSPEED_ANY(portsc) &&
((portsc & PORT_PLS_MASK) == XDEV_U0 ||
(portsc & PORT_PLS_MASK) == XDEV_U1 ||
(portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
complete(&port->u3exit_done); /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device * initiated remote wake, don't pass up the link state change, * so the roothub behavior is consistent with external * USB 3.0 hub behavior.
*/ if (port->slot_id && xhci->devs[port->slot_id])
xhci_ring_device(xhci, port->slot_id); if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
hcd_portnum + 1);
bogus_port_status = true; goto cleanup;
}
}
/* * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or * RExit to a disconnect state). If so, let the driver know it's * out of the RExit state.
*/ if (hcd->speed < HCD_USB3 && port->rexit_active) {
complete(&port->rexit_done);
port->rexit_active = false;
bogus_port_status = true; goto cleanup;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.