SSL oxu210hp-hcd.c
Interaktion und PortierbarkeitC
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it> * * This code is *strongly* based on EHCI-HCD code by David Brownell since * the chip is a quasi-EHCI compatible.
*/
/* Magic numbers that can affect system performance */*8 , usecs
java.lang.StringIndexOutOfBoundsException: Index 6 out of bounds for length 6 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ #define EHCI_TUNE_RL_TT 0 #define EHCI_TUNE_MULT_HSjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 # EHCI_TUNE_MULT_TT #define EHCI_TUNE_FLS 2 /* (small) 256 frame schedule */
/* Section 2.2 Host Controller Capability Registers */ struct ehci_caps { /* these fields are specified as 8 and 16 bit registers, ; * but some hosts can't perform 8 or 16 bit PCI accesses.
*/
u32 hc_capbase; #definedefault #define HC_VERSION(p) (((p)>>16)&0xffff) /* bits 31:16 */
u32 union * = &oxu-[frame #define HCS_DEBUG_PORT(p /* make sure ehci_work scans these */
define) (&1<6/java.lang.StringIndexOutOfBoundsException: Range [73, 72) out of bounds for length 72 #define HCS_N_CC(p)java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 #define HCS_N_PCC #define HCS_PORTROUTED()java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 #define writel(cmd, &oxu-> java.lang.StringIndexOutOfBoundsException: Index 4 out of bounds for length 4
definejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
u32hcc_paramsoxu- =1 #define HCC_EXT_CAPS(p) (((p)>>8)&0 ( = ) #define HCC_ISOC_CACHE(p) =1 #define HCC_ISOC_THRES(p)java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
:can on qh* #define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/ #define HCC_64BIT_ADDR(p)
u8 portroute[ periodic_unlinkoxui,);
} __packed
/* Type tag from {qh, itd, sitd, fstn}->hw_next */ # qh-usecs*)java.lang.StringIndexOutOfBoundsException: Index 20 out of bounds for length 20
/* values for that type tag */ #define Q_TYPE_QH cpu_to_le32 (1 << 1)
/* next async queue entry, or pointer to interrupt/periodic QH */ #define eturnoxu
/* for periodic/async schedules and qtd lists, mark end of list */
/* * Entries in periodic shadow table are pointers to one of four kinds * of data structure. That's dictated by the hardware; a type tag is * encoded in the low bits of the hardware's periodic schedule. Use * Q_NEXT_TYPE to get the tag. * * For entries in the async schedule, the type tag always says "qh".
*/ unionvoid(structoxu_hcd *xustruct ehci_qhqh struct ehci_qh *qh; /* Q_TYPE_QH */
__le32 *hw_next; /* (all types) */ void*tr
};
/* * EHCI Specification 0.95 Section 3.6 * QH: describes control/bulk/interrupt endpoints * See Fig 3-7 "Queue Head Structure Layout". * * These appear in both the async and (for interrupt) periodic schedules.
*/
struct ehci_qh { /* first part defined by EHCI spec */
_ * (and overlay token SplitXstate is false?)
__le32riod == 0) #define QH_HEAD 0x00008000
__le32 #define QH_SMASK 0x000000ff #define i >; >; =) #define QH_HUBADDR 0x007f0000 #defineQH_HUBPORTx3f800000 #define QH_MULT 0xc0000000
_java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
/* qtd overlay (hardware parts of a struct ehci_qtd) */
le32
__le32 hw_alt_nextqh-,
__le32 hw_token;
__le32 hw_buf[5];
__le32 hw_buf_hi[5];
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */ union ehci_shadow qh_next; /* ptr to qh; or periodic */ struct list_head qtd_list; /* sw qtd list */ struct ehci_qtd *dummy; struct ehci_qh *reclaim; /* next to reclaim */
u8 qh_state; #define QH_STATE_LINKED 1 /* HC sees this */ #define QH_STATE_UNLINK 2 /* HC may still see this */ #define QH_STATE_IDLE 3 /* HC doesn't see this */ #define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */ #define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
/* Only how many elements & element structure are specifies here. */ /* 2 host controllers are enabled - total size <= 28 kbytes */ #define DEFAULT_I_TDPS 1024 #define QHEAD_NUM 16 #define QTD_NUM 32 #define SITD_NUM 8 #define MURB_NUM 8
union ehci_shadow *pshadow; /* mirror hw periodic table */ int next_uframe; /* scan periodic, start here */ unsignedint periodic_sched; /* periodic activity count */
/* per root hub port */ unsignedlong reset_done[EHCI_MAX_ROOT_PORTS]; /* bit vectors (one bit per port) */ unsignedlong bus_suspended; /* which ports were * already suspended at the * start of a bus suspend
*/ unsignedlong companion_ports;/* which ports are dedicated * to the companion controller
*/
/* SILICON QUIRKS */ struct list_head urb_list; /* this is the head to urb * queue that didn't get enough * resources
*/ struct oxu_murb *murb_pool; /* murb per split big urb */ unsignedint urb_len;
switch (action) { case TIMER_IAA_WATCHDOG:
t = EHCI_IAA_JIFFIES; break; case TIMER_IO_WATCHDOG:
t = EHCI_IO_JIFFIES; break; case TIMER_ASYNC_OFF:
t = EHCI_ASYNC_JIFFIES; break; case TIMER_ASYNC_SHRINK: default:
t = EHCI_SHRINK_JIFFIES; break;
}
t += jiffies; /* all timings except IAA watchdog can be overridden. * async queue SHRINK often precedes IAA. while it's ready * to go OFF neither can matter, and afterwards the IO * watchdog stops unless there's still periodic traffic.
*/ if (action != TIMER_IAA_WATCHDOG
&& t > oxu->watchdog.expires
&& timer_pending(&oxu->watchdog)) return;
mod_timer(&oxu->watchdog, t);
}
}
/* * handshake - spin reading hc until handshake completes or fails * @ptr: address of hc register to be read * @mask: bits to look at in result of read * @done: value of those bits when handshake succeeds * @usec: timeout in microseconds * * Returns negative errno, or zero on success * * Success happens when the "mask" bits have the specified value (hardware * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). * * That last failure should_only happen in cases like physical cardbus eject * before driver shutdown. But it also seems to be caused by bugs in cardbus * bridge shutdown: shutting down the bridge before the devices using it.
*/ staticint handshake(struct oxu_hcd *oxu, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result; int ret;
ret = readl_poll_timeout_atomic(ptr, result,
((result & mask) == done ||
result == U32_MAX),
1, usec); if (result == U32_MAX) /* card removed */ return -ENODEV;
return ret;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3) */ staticint ehci_halt(struct oxu_hcd *oxu)
{
u32 temp = readl(&oxu->regs->status);
/* disable any irqs left enabled by previous code */
writel(0, &oxu->regs->intr_enable);
/* wait for any schedule enables/disables to take effect */
temp = readl(&oxu->regs->command) << 10;
temp &= STS_ASS | STS_PSS; if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
temp, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT; return;
}
/* then disable anything that's still active */
temp = readl(&oxu->regs->command);
temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
writel(temp, &oxu->regs->command);
/* hardware can take 16 microframes to turn off ... */ if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
0, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT; return;
}
}
staticint check_reset_complete(struct oxu_hcd *oxu, int index,
u32 __iomem *status_reg, int port_status)
{ if (!(port_status & PORT_CONNECT)) {
oxu->reset_done[index] = 0; return port_status;
}
/* if reset finished and it's still not enabled -- handoff */ if (!(port_status & PORT_PE)) {
oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
index+1); return port_status;
} else
oxu_dbg(oxu, "port %d high speed\n", index + 1);
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */ if (HCS_PPC(oxu->hcs_params))
temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */ else
temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
}
/* Allocate an OXU210HP on-chip memory data buffer * * An on-chip memory data buffer is required for each OXU210HP USB transfer. * Each transfer descriptor has one or more on-chip memory data buffers. * * Data buffers are allocated from a fix sized pool of data blocks. * To minimise fragmentation and give reasonable memory utlisation, * data buffers are allocated with sizes the power of 2 multiples of * the block size, starting on an address a multiple of the allocated size. * * FIXME: callers of this function require a buffer to be allocated for * len=0. This is a waste of on-chip memory and should be fix. Then this * function should be changed to not allocate a buffer for len=0.
*/ staticint oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
{ int n_blocks; /* minium blocks needed to hold len */ int a_blocks; /* blocks allocated */ int i, j;
/* Don't allocate bigger than supported */ if (len > BUFFER_SIZE * BUFFER_NUM) {
oxu_err(oxu, "buffer too big (%d)\n", len); return -ENOMEM;
}
spin_lock(&oxu->mem_lock);
/* Number of blocks needed to hold len */
n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
/* Round the number of blocks up to the power of 2 */ for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
;
/* Find a suitable available data buffer */ for (i = 0; i < BUFFER_NUM;
i += max_t(int, a_blocks, oxu->db_used[i])) {
/* Check all the required blocks are available */ for (j = 0; j < a_blocks; j++) if (oxu->db_used[i + j]) break;
/* clean qtds first, and know this is not linked */ if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
oxu_dbg(oxu, "unused qh not empty!\n");
BUG();
} if (qh->dummy)
oxu_qtd_free(oxu, qh->dummy);
oxu_qh_free(oxu, qh);
}
for (i = 0; i < MURB_NUM; i++) if (!oxu->murb_used[i]) break;
if (i < MURB_NUM) {
murb = &(oxu->murb_pool)[i];
oxu->murb_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return murb;
}
/* The queue heads and transfer descriptors are managed from pools tied * to each of the "per device" structures. * This is the initialisation and cleanup code.
*/ staticvoid ehci_mem_cleanup(struct oxu_hcd *oxu)
{
kfree(oxu->murb_pool);
oxu->murb_pool = NULL;
if (oxu->async)
qh_put(oxu->async);
oxu->async = NULL;
/* Remember to add cleanup code (above) if you add anything here.
*/ staticint ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
{ int i;
for (i = 0; i < oxu->periodic_size; i++)
oxu->mem->frame_list[i] = EHCI_LIST_END; for (i = 0; i < QHEAD_NUM; i++)
oxu->qh_used[i] = 0; for (i = 0; i < QTD_NUM; i++)
oxu->qtd_used[i] = 0;
oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags); if (!oxu->murb_pool) goto fail;
for (i = 0; i < MURB_NUM; i++)
oxu->murb_used[i] = 0;
oxu->async = oxu_qh_alloc(oxu); if (!oxu->async) goto fail;
/* Fill a qtd, returning how much of the buffer we were able to queue up.
*/ staticint qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token, int maxpacket)
{ int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */ if (likely(len < count)) /* ... iff needed */
count = len; else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */ for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
buf += 0x1000; if ((count + 0x1000) < len)
count += 0x1000; else
count = len;
}
/* short packets may only terminate transfers */ if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_le32((count << 16) | token);
qtd->length = count;
return count;
}
staticinlinevoid qh_update(struct oxu_hcd *oxu, struct ehci_qh *qh, struct ehci_qtd *qtd)
{ /* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
/* Except for control endpoints, we make hardware maintain data * toggle (like OHCI) ... here (re)initialize the toggle in the QH, * and set the pseudo-toggle in udev. Only usb_clear_halt() will * ever clear it.
*/ if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { unsigned is_out, epnum;
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb();
qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
}
/* If it weren't for a common silicon quirk (writing the dummy into the qh * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault * recovery (including urb dequeue) would need software changes to a QH...
*/ staticvoid qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
{ struct ehci_qtd *qtd;
if (list_empty(&qh->qtd_list))
qtd = qh->dummy; else {
qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); /* first qtd may already be partially processed */ if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
/* Process and free completed qtds for a qh, returning URBs to drivers. * Chases up to qh->hw_current. Returns number of completions called, * indicating how much "real" work we did.
*/ staticunsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
{ struct ehci_qtd *last = NULL, *end = qh->dummy; struct ehci_qtd *qtd, *tmp; int stopped; unsigned count = 0; int do_status = 0;
u8 state; struct oxu_murb *murb = NULL;
if (unlikely(list_empty(&qh->qtd_list))) return count;
/* completions (or tasks on other cpus) must never clobber HALT * till we've gone through and cleaned everything up, even when * they add urbs to this qh's queue or mark them for unlinking. * * NOTE: unlinking expects to be done in queue order.
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
/* remove de-activated QTDs from front of queue. * after faults (including short reads), cleanup this urb * then let the queue advance. * if queue is stopped, handles unlinks.
*/
list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) { struct urb *urb;
u32 token = 0;
urb = qtd->urb;
/* Clean up any state from previous QTD ...*/ if (last) { if (likely(last->urb != urb)) { if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main; if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
}
oxu_qtd_free(oxu, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */ if (qtd == end) break;
/* hardware copies qtd out of qh overlay */
rmb();
token = le32_to_cpu(qtd->hw_token);
/* always clean up qtds the hc de-activated */ if ((token & QTD_STS_ACTIVE) == 0) {
if ((token & QTD_STS_HALT) != 0) {
stopped = 1;
/* magic dummy for some short reads; qh won't advance. * that silicon quirk can kick in with this dummy too.
*/
} elseif (IS_SHORT_READ(token) &&
!(qtd->hw_alt_next & EHCI_LIST_END)) {
stopped = 1; goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} elseif (likely(!stopped &&
HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) { break;
} else {
stopped = 1;
if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
urb->status = -ESHUTDOWN;
/* ignore active urbs unless some previous qtd * for the urb faulted (including short read) or * its urb was canceled. we may patch qh or qtds.
*/ if (likely(urb->status == -EINPROGRESS)) continue;
/* issue status after short control reads */ if (unlikely(do_status != 0)
&& QTD_PID(token) == 0 /* OUT */) {
do_status = 0; continue;
}
/* token in overlay may be most current */ if (state == QH_STATE_IDLE
&& cpu_to_le32(qtd->qtd_dma)
== qh->hw_current)
token = le32_to_cpu(qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll * patch the qh later and so that completions can't * activate it while we "know" it's stopped.
*/ if ((HALT_BIT & qh->hw_token) == 0) {
halt:
qh->hw_token |= HALT_BIT;
wmb();
}
}
/* Remove it from the queue */
qtd_copy_status(oxu, urb->complete ?
urb : ((struct oxu_murb *) urb)->main,
qtd->length, token); if ((usb_pipein(qtd->urb->pipe)) &&
(NULL != qtd->transfer_buffer))
memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
do_status = (urb->status == -EREMOTEIO)
&& usb_pipecontrol(urb->pipe);
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry(qtd->qtd_list.prev, struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
list_del(&qtd->qtd_list);
last = qtd;
}
/* last urb's completion might still need calling */ if (likely(last != NULL)) { if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main; if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_qtd_free(oxu, last);
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing * it after fault cleanup, or recovering from silicon wrongly * overlaying the dummy qtd (which reduces DMA chatter).
*/ if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { switch (state) { case QH_STATE_IDLE:
qh_refresh(oxu, qh); break; case QH_STATE_LINKED: /* should be rare for periodic transfers, * except maybe high bandwidth ...
*/ if ((cpu_to_le32(QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule(oxu, qh);
(void) qh_schedule(oxu, qh);
} else
unlink_async(oxu, qh); break; /* otherwise, unlink already started */
}
}
return count;
}
/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) /* ... and packet size, for any kind of endpoint descriptor */ #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/* Reverse of qh_urb_transaction: free a list of TDs. * used for cleanup after errors, before HC sees an URB's TDs.
*/ staticvoid qtd_list_free(struct oxu_hcd *oxu, struct urb *urb, struct list_head *head)
{ struct ehci_qtd *qtd, *temp;
/* Create a list of filled qtds for this URB; won't link into qh.
*/ staticstruct list_head *qh_urb_transaction(struct oxu_hcd *oxu, struct urb *urb, struct list_head *head,
gfp_t flags)
{ struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf; int len, maxpacket; int is_input;
u32 token; void *transfer_buf = NULL; int ret;
/* * URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc(oxu); if (unlikely(!qtd)) return NULL;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10); /* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein(urb->pipe); if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
if (usb_pipecontrol(urb->pipe)) { /* SETUP pid */
ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest)); if (ret) goto cleanup;
/* * buffer gets wrapped in one or more qtds; * last one may be "short" (including zero len) * and may serve as a control status ack
*/ for (;;) { int this_qtd_len;
/* qh makes control packets use qtd toggle; maybe switch it */ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(len <= 0)) break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu); if (unlikely(!qtd)) goto cleanup; if (likely(len > 0)) {
ret = oxu_buf_alloc(oxu, qtd, len); if (ret) goto cleanup;
}
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
}
/* unless the bulk/interrupt caller wants a chance to clean * up after short reads, hc should advance qh past this urb
*/ if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END;
/* * control requests may need a terminating data "status" ack; * bulk ones may need a terminating short packet (zero length).
*/ if (likely(urb->transfer_buffer_length != 0)) { int one_more = 0;
/* Each QH holds a qtd list; a QH is used for everything except iso. * * For interrupt urbs, the scheduler must set the microframe scheduling * mask(s) each time the QH gets scheduled. For highspeed, that's * just one microframe in the s-mask. For split interrupt transactions * there are additional complications: c-mask, maybe FSTNs.
*/ staticstruct ehci_qh *qh_make(struct oxu_hcd *oxu, struct urb *urb, gfp_t flags)
{ struct ehci_qh *qh = oxu_qh_alloc(oxu);
u32 info1 = 0, info2 = 0; int is_input, type; int maxp = 0;
if (!qh) return qh;
/* * init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint(urb->pipe) << 8;
info1 |= usb_pipedevice(urb->pipe) << 0;
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
maxp = usb_maxpacket(urb->dev, urb->pipe);
/* Compute interrupt scheduling parameters just once, and save. * - allowing for high bandwidth, how many nsec/uframe are used? * - split transactions need a second CSPLIT uframe; same question * - splits also need a schedule gap (for full/low speed I/O) * - qh has a polling interval * * For control/bulk requests, the HC or TT handles these.
*/ if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
/* (re)start the async schedule? */
head = oxu->async;
timer_action_done(oxu, TIMER_ASYNC_OFF); if (!head->qh_next.qh) {
u32 cmd = readl(&oxu->regs->command);
if (!(cmd & CMD_ASE)) { /* in case a clear of CMD_ASE didn't take yet */
(void)handshake(oxu, &oxu->regs->status,
STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
writel(cmd, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; /* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */ if (qh->qh_state == QH_STATE_IDLE)
qh_refresh(oxu, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
wmb();
head->qh_next.qh = qh;
head->hw_next = dma;
qh->qh_state = QH_STATE_LINKED; /* qtd completions reported later by interrupt */
}
#define QH_ADDR_MASK cpu_to_le32(0x7f)
/* * For control/bulk/interrupt, return QH with these TDs appended. * Allocates and initializes the QH if necessary. * Returns null if it can't allocate a QH it needs to. * If the QH has TDs (urbs) already, that's great.
*/ staticstruct ehci_qh *qh_append_tds(struct oxu_hcd *oxu, struct urb *urb, struct list_head *qtd_list, int epnum, void **ptr)
{ struct ehci_qh *qh = NULL;
qh = (struct ehci_qh *) *ptr; if (unlikely(qh == NULL)) { /* can't sleep here, we have oxu->lock... */
qh = qh_make(oxu, urb, GFP_ATOMIC);
*ptr = qh;
} if (likely(qh != NULL)) { struct ehci_qtd *qtd;
/* control qh may need patching ... */ if (unlikely(epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */ if (usb_pipedevice(urb->pipe) == 0)
qh->hw_info1 &= ~QH_ADDR_MASK;
}
/* just one way to queue requests: swap with the dummy qtd. * only hc or qh_refresh() ever modify the overlay.
*/ if (likely(qtd != NULL)) { struct ehci_qtd *dummy;
dma_addr_t dma;
__le32 token;
/* to avoid racing the HC, use the dummy td instead of * the first td of our list (becomes new dummy). both * tds stay deactivated until we're done, when the * HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT;
wmb();
dummy = qh->dummy;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry(qh->qtd_list.prev, struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(dma);
/* let the hc process these next qtds */
dummy->hw_token = (token & ~(0x80));
wmb();
dummy->hw_token = token;
/* Control/bulk operations through TTs don't need scheduling, * the HC and TT handle it when the TT has a buffer ready.
*/ if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(oxu, qh_get(qh));
done:
spin_unlock_irqrestore(&oxu->lock, flags); if (unlikely(qh == NULL))
qtd_list_free(oxu, urb, qtd_list); return rc;
}
/* The async qh for the qtds being reclaimed are now unlinked from the HC */
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
oxu->reclaim = next;
oxu->reclaim_ready = 0;
qh->reclaim = NULL;
qh_completions(oxu, qh);
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
qh_link_async(oxu, qh); else {
qh_put(qh); /* refcount from async list */
/* it's not free to turn the async schedule on/off; leave it * active but idle for a while once it empties.
*/ if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
&& oxu->async->qh_next.qh == NULL)
timer_action(oxu, TIMER_ASYNC_OFF);
}
if (next) {
oxu->reclaim = NULL;
start_unlink_async(oxu, next);
}
}
/* makes sure the async qh will become idle */ /* caller must own oxu->lock */
/* stop async schedule right now? */ if (unlikely(qh == oxu->async)) { /* can't get here without STS_ASS set */ if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
&& !oxu->reclaim) { /* ... and CMD_IAAD clear */
writel(cmd & ~CMD_ASE, &oxu->regs->command);
wmb(); /* handshake later, if we need to */
timer_action_done(oxu, TIMER_ASYNC_OFF);
} return;
}
if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) { /* if (unlikely(qh->reclaim != 0)) * this will recurse, probably not much
*/
end_unlink_async(oxu); return;
}
if (!++(oxu->stamp))
oxu->stamp++;
timer_action_done(oxu, TIMER_ASYNC_SHRINK);
rescan:
qh = oxu->async->qh_next.qh; if (likely(qh != NULL)) { do { /* clean any finished work for this qh */ if (!list_empty(&qh->qtd_list)
&& qh->stamp != oxu->stamp) { int temp;
/* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan * qhs we already finished (no looping).
*/
qh = qh_get(qh);
qh->stamp = oxu->stamp;
temp = qh_completions(oxu, qh);
qh_put(qh); if (temp != 0) goto rescan;
}
/* unlink idle entries, reducing HC PCI usage as well * as HCD schedule-scanning costs. delay for any qh * we just scanned, there's a not-unusual case that it * doesn't stay idle for long. * (plus, avoids some kind of re-activation race.)
*/ if (list_empty(&qh->qtd_list)) { if (qh->stamp == oxu->stamp)
action = TIMER_ASYNC_SHRINK; elseif (!oxu->reclaim
&& qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
qh = qh->qh_next.qh;
} while (qh);
} if (action == TIMER_ASYNC_SHRINK)
timer_action(oxu, TIMER_ASYNC_SHRINK);
}
/* * periodic_next_shadow - return "next" pointer on shadow list * @periodic: host pointer to qh/itd/sitd * @tag: hardware tag for type of this record
*/ staticunion ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
__le32 tag)
{ switch (tag) { default: case Q_TYPE_QH: return &periodic->qh->qh_next;
}
}
/* caller must hold oxu->lock */ staticvoid periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
{ union ehci_shadow *prev_p = &oxu->pshadow[frame];
__le32 *hw_p = &oxu->periodic[frame]; union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */ while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
hw_p = here.hw_next;
here = *prev_p;
} /* an interrupt entry (at list end) could have been shared */ if (!here.ptr) return;
/* update shadow and hardware lists ... the old "next" pointers * from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
*hw_p = *here.hw_next;
}
/* how many of the uframe's 125 usecs are allocated? */ staticunsignedshort periodic_usecs(struct oxu_hcd *oxu, unsigned frame, unsigned uframe)
{
__le32 *hw_p = &oxu->periodic[frame]; union ehci_shadow *q = &oxu->pshadow[frame]; unsigned usecs = 0;
while (q->ptr) { switch (Q_NEXT_TYPE(*hw_p)) { case Q_TYPE_QH: default: /* is it in the S-mask? */ if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
usecs += q->qh->usecs; /* ... or C-mask? */ if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next; break;
}
} #ifdef DEBUG if (usecs > 100)
oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs); #endif return usecs;
}
staticint enable_periodic(struct oxu_hcd *oxu)
{
u32 cmd; int status;
/* did clearing PSE did take effect yet? * takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu)); return status;
}
/* make sure ehci_work scans these */
oxu->next_uframe = readl(&oxu->regs->frame_index)
% (oxu->periodic_size << 3); return 0;
}
staticint disable_periodic(struct oxu_hcd *oxu)
{
u32 cmd; int status;
/* did setting PSE not take effect yet? * takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu)); return status;
}
/* periodic schedule slots have iso tds (normal or split) first, then a * sparse tree for active interrupt transfers. * * this just links in a qh; caller guarantees uframe masks are set right. * no FSTN support (yet; oxu 0.96+)
*/ staticint qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{ unsigned i; unsigned period = qh->period;
/* high bandwidth, or otherwise every microframe */ if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period) { union ehci_shadow *prev = &oxu->pshadow[i];
__le32 *hw_p = &oxu->periodic[i]; union ehci_shadow here = *prev;
__le32 type = 0;
/* skip the iso nodes at list head */ while (here.ptr) {
type = Q_NEXT_TYPE(*hw_p); if (type == Q_TYPE_QH) break;
prev = periodic_next_shadow(prev, type);
hw_p = &here.qh->hw_next;
here = *prev;
}
/* sorting each branch by period (slow-->fast) * enables sharing interior tree nodes
*/ while (here.ptr && qh != here.qh) { if (qh->period > here.qh->period) break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw_next;
here = *prev;
} /* link in this qh, unless some earlier pass did that */ if (qh != here.qh) {
qh->qh_next = here; if (here.qh)
qh->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh_get(qh);
/* FIXME: * IF this isn't high speed * and this qh is active in the current uframe * (and overlay token SplitXstate is false?) * THEN * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->period; if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period)
periodic_unlink(oxu, i, qh);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.