/* * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x * series processors. The UDC for the IXP 4xx series is very similar. * There are fifteen endpoints, in addition to ep0. * * Such controller drivers work with a gadget driver. The gadget driver * returns descriptors, implements configuration and data protocols used * by the host to interact with this device, and allocates endpoints to * the different protocol interfaces. The controller driver virtualizes * usb hardware so that the gadget drivers will be more portable. * * This UDC hardware wants to implement a bit too much USB protocol, so * it constrains the sorts of USB configuration change events that work. * The errata for these chips are misleading; some "fixed" bugs from * pxa250 a0/a1 b0/b1/b2 sure act like they're still there. * * Note that the UDC hardware supports DMA (except on IXP) but that's * not used here. IN-DMA (to host) is simple enough, when the data is * suitably aligned (16 bytes) ... the network stack doesn't do that, * other software can. OUT-DMA is buggy in most chip versions, as well * as poorly designed (data toggle not automatic). So this driver won't * bother using DMA. (Mostly-working IN-DMA support was available in * kernels before 2.6.23, but was never enabled or well tested.)
*/
/* cpu-specific register addresses are compiled in to this code */ #ifdef CONFIG_ARCH_PXA #error"Can't configure both IXP and PXA" #endif
/* IXP doesn't yet support <linux/clk.h> */ #define clk_get(dev,name) NULL #define clk_enable(clk) do { } while (0) #define clk_disable(clk) do { } while (0) #define clk_put(clk) do { } while (0)
/* --------------------------------------------------------------------------- * endpoint related parts of the api to the usb controller hardware, * used by gadget driver; and the inner talker-to-hardware core. * ---------------------------------------------------------------------------
*/
/* one GPIO should control a D+ pullup, so host sees this device (or not) */ staticvoid pullup_off(void)
{ struct pxa2xx_udc_mach_info *mach = the_controller->mach; int off_level = mach->gpio_pullup_inverted;
if (gpio_is_valid(mach->gpio_pullup))
gpio_set_value(mach->gpio_pullup, off_level); elseif (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
}
if (gpio_is_valid(mach->gpio_pullup))
gpio_set_value(mach->gpio_pullup, on_level); elseif (mach->udc_command)
mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
}
#ifdefined(CONFIG_CPU_BIG_ENDIAN) /* * IXP4xx has its buses wired up in a way that relies on never doing any * byte swaps, independent of whether it runs in big-endian or little-endian * mode, as explained by Krzysztof Hałasa. * * We only support pxa25x in little-endian mode, but it is very likely * that it works the same way.
*/ staticinlinevoid udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
{
iowrite32be(val, dev->regs + reg);
}
/* The UDCCR reg contains mask and interrupt status bits, * so using '|=' isn't safe as it may ack an interrupt.
*/ #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
staticinlinevoid udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask)
{ /* udccr contains the bits we dont want to change */
u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS;
/* * endpoint enable/disable * * we need to verify the descriptors used to enable endpoints. since pxa25x * endpoint configurations are fixed, and are pretty much always enabled, * there's not a lot to manage here. * * because pxa25x can't selectively initialize bulk (or interrupt) endpoints, * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except * for a single interface (with only the default altsetting) and for gadget * drivers that don't halt endpoints (not reset by set_interface). that also * means that if you use ISO, you must violate the USB spec rule that all * iso endpoints must be in non-default altsettings.
*/ staticint pxa25x_ep_enable (struct usb_ep *_ep, conststruct usb_endpoint_descriptor *desc)
{ struct pxa25x_ep *ep; struct pxa25x_udc *dev;
ep = container_of (_ep, struct pxa25x_ep, ep); if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->fifo_size < usb_endpoint_maxp (desc)) {
DMSG("%s, bad ep or descriptor\n", __func__); return -EINVAL;
}
/* xfer types must match, except that interrupt ~= bulk */ if (ep->bmAttributes != desc->bmAttributes
&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
DMSG("%s, %s type mismatch\n", __func__, _ep->name); return -EINVAL;
}
/* hardware _could_ do smaller, but driver doesn't */ if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
&& usb_endpoint_maxp (desc)
!= BULK_FIFO_SIZE)
|| !desc->wMaxPacketSize) {
DMSG("%s, bad %s maxpacket\n", __func__, _ep->name); return -ERANGE;
}
dev = ep->dev; if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
DMSG("%s, bogus device state\n", __func__); return -ESHUTDOWN;
}
/* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers * must still pass correctly initialized endpoints, since other controller * drivers may care about how it's currently set up (dma issues etc).
*/
if (likely (req->req.status == -EINPROGRESS))
req->req.status = status; else
status = req->req.status;
if (status && status != -ESHUTDOWN)
DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* how big will this packet be? */
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length; while (likely(count--))
udc_ep_set_UDDR(ep, *buf++);
return length;
}
/* * write to an IN endpoint fifo, as many packets as possible. * irqs will use this to write the rest later. * caller guarantees at least one packet buffer is ready (or a zlp).
*/ staticint
write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{ unsigned max;
max = usb_endpoint_maxp(ep->ep.desc); do { unsigned count; int is_last, is_short;
count = write_packet(ep, req, max);
/* last packet is usually short (or a zlp) */ if (unlikely (count != max))
is_last = is_short = 1; else { if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0; else
is_last = 1; /* interrupt/iso maxpacket may not fill the fifo */
is_short = unlikely (max < ep->fifo_size);
}
/* let loose that packet. maybe try writing another one, * double buffering might work. TSP, TPC, and TFS * bit values are the same for all normal IN endpoints.
*/
udc_ep_set_UDCCS(ep, UDCCS_BI_TPC); if (is_short)
udc_ep_set_UDCCS(ep, UDCCS_BI_TSP);
/* requests complete when all IN data is in the FIFO */ if (is_last) {
done (ep, req, 0); if (list_empty(&ep->queue))
pio_irq_disable(ep); return 1;
}
// TODO experiment: how robust can fifo mode tweaking be? // double buffering is off in the default fifo mode, which // prevents TFS from being set here.
} while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS); return 0;
}
/* caller asserts req->pending (ep0 irq status nyet cleared); starts * ep0 data stage. these chips want very simple state transitions.
*/ staticinline void ep0start(struct pxa25x_udc *dev, u32 flags, constchar *tag)
{
udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR);
udc_set_reg(dev, USIR0, USIR0_IR0);
dev->req_pending = 0;
DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
__func__, tag, udc_ep0_get_UDCCS(dev), flags);
}
if (unlikely (is_short)) { if (ep->dev->req_pending)
ep0start(ep->dev, UDCCS0_IPR, "short IN"); else
udc_ep0_set_UDCCS(dev, UDCCS0_IPR);
count = req->req.length;
done (ep, req, 0);
ep0_idle(ep->dev); #ifndef CONFIG_ARCH_IXP4XX #if 1 /* This seems to get rid of lost status irqs in some cases: * host responds quickly, or next request involves config * change automagic, or should have been hidden, or ... * * FIXME get rid of all udelays possible...
*/ if (count >= EP0_FIFO_SIZE) {
count = 100; do { if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) { /* clear OPR, generate ack */
udc_ep0_set_UDCCS(dev, UDCCS0_OPR); break;
}
count--;
udelay(1);
} while (count);
} #endif #endif
} elseif (ep->dev->req_pending)
ep0start(ep->dev, 0, "IN"); return is_short;
}
/* * read_fifo - unload packet(s) from the fifo we use for usb OUT * transfers and put them into the request. caller should have made * sure there's at least one packet ready. * * returns true if the request completed because of short packet or the * request buffer having filled (and maybe overran till end-of-packet).
*/ staticint
read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{ for (;;) {
u32 udccs;
u8 *buf; unsigned bufferspace, count, is_short;
/* make sure there's a packet in the FIFO. * UDCCS_{BO,IO}_RPC are all the same bit value. * UDCCS_{BO,IO}_RNE are all the same bit value.
*/
udccs = udc_ep_get_UDCCS(ep); if (unlikely ((udccs & UDCCS_BO_RPC) == 0)) break;
buf = req->req.buf + req->req.actual;
prefetchw(buf);
bufferspace = req->req.length - req->req.actual;
if (unlikely (bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data.
*/ if (req->req.status != -EOVERFLOW)
DMSG("%s overflow %d\n",
ep->ep.name, count);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
udc_ep_set_UDCCS(ep, UDCCS_BO_RPC); /* RPC/RSP/RNE could now reflect the other packet buffer */
/* iso is one request per packet */ if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) { if (udccs & UDCCS_IO_ROF)
req->req.status = -EHOSTUNREACH; /* more like "is_done" */
is_short = 1;
}
/* completion */ if (is_short || req->req.actual == req->req.length) {
done (ep, req, 0); if (list_empty(&ep->queue))
pio_irq_disable(ep); return 1;
}
/* finished that packet. the next one may be waiting... */
} return 0;
}
/* * special ep0 version of the above. no UBCR0 or double buffering; status * handshaking is magic. most device protocols don't need control-OUT. * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other * protocols do use them.
*/ staticint
read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
u8 *buf, byte; unsigned bufferspace;
while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) {
byte = (u8) UDDR0;
if (unlikely (bufferspace == 0)) { /* this happens when the driver's buffer * is smaller than what the host sent. * discard the extra data.
*/ if (req->req.status != -EOVERFLOW)
DMSG("%s overflow\n", ep->ep.name);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
req->req.actual++;
bufferspace--;
}
}
udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR);
/* completion */ if (req->req.actual >= req->req.length) return 1;
/* finished that packet. the next one may be waiting... */ return 0;
}
ep = container_of(_ep, struct pxa25x_ep, ep); if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
DMSG("%s, bad ep\n", __func__); return -EINVAL;
}
dev = ep->dev; if (unlikely (!dev->driver
|| dev->gadget.speed == USB_SPEED_UNKNOWN)) {
DMSG("%s, bogus device state\n", __func__); return -ESHUTDOWN;
}
/* iso is always one packet per request, that's the only way * we can report per-packet status. that also helps with dma.
*/ if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
&& req->req.length > usb_endpoint_maxp(ep->ep.desc))) return -EMSGSIZE;
ep = container_of(_ep, struct pxa25x_ep, ep); if (unlikely (!_ep
|| (!ep->ep.desc && ep->ep.name != ep0name))
|| ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
DMSG("%s, bad ep\n", __func__); return -EINVAL;
} if (value == 0) { /* this path (reset toggle+halt) is needed to implement * SET_INTERFACE on normal hardware. but it can't be * done from software on the PXA UDC, and the hardware * forgets to do it as part of SET_INTERFACE automagic.
*/
DMSG("only host can clear %s halt\n", _ep->name); return -EROFS;
}
/* FST bit is the same for control, bulk in, bulk out, interrupt in */
udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF);
/* ep0 needs special care */ if (!ep->ep.desc) {
start_watchdog(ep->dev);
ep->dev->req_pending = 0;
ep->dev->ep0state = EP0_STALL;
/* and bulk/intr endpoints like dropping stalls too */
} else { unsigned i; for (i = 0; i < 1000; i += 20) { if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST) break;
udelay(20);
}
}
local_irq_restore(flags);
ep = container_of(_ep, struct pxa25x_ep, ep); if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
DMSG("%s, bad ep\n", __func__); return;
}
/* toggle and halt bits stay unchanged */
/* for OUT, just read and discard the FIFO contents. */ if ((ep->bEndpointAddress & USB_DIR_IN) == 0) { while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0)
(void)udc_ep_get_UDDR(ep); return;
}
/* most IN status is the same, but ISO can't stall */
udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
| (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
? 0 : UDCCS_BI_SST));
}
/* --------------------------------------------------------------------------- * device-scoped parts of the api to the usb controller hardware * ---------------------------------------------------------------------------
*/
/* We disable the UDC -- and its 48 MHz clock -- whenever it's not * in active use.
*/ staticint pullup(struct pxa25x_udc *udc)
{ int is_active = udc->vbus && udc->pullup && !udc->suspended;
DMSG("%s\n", is_active ? "active" : "inactive"); if (is_active) { if (!udc->active) {
udc->active = 1; /* Enable clock for USB device */
clk_enable(udc->clk);
udc_enable(udc);
}
} else { if (udc->active) { if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
DMSG("disconnect %s\n", udc->driver
? udc->driver->driver.name
: "(no driver)");
stop_activity(udc, udc->driver);
}
udc_disable(udc); /* Disable clock for USB device */
clk_disable(udc->clk);
udc->active = 0;
}
} return 0;
}
/* VBUS reporting logically comes from a transceiver */ staticint pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{ struct pxa25x_udc *udc;
/* drivers may have software control over D+ pullup */ staticint pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
{ struct pxa25x_udc *udc;
/* boards may consume current from VBUS, up to 100-500mA based on config. * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs * violate USB specs.
*/ staticint pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
{ struct pxa25x_udc *udc;
if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver) goto done;
seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
dev->stats.write.bytes, dev->stats.write.ops,
dev->stats.read.bytes, dev->stats.read.ops,
dev->stats.irqs);
/* dump endpoint queues */ for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) { struct pxa25x_ep *ep = &dev->ep [i]; struct pxa25x_request *req;
if (i != 0) { conststruct usb_endpoint_descriptor *desc;
desc = ep->ep.desc; if (!desc) continue;
tmp = udc_ep_get_UDCCS(&dev->ep[i]);
seq_printf(m, "%s max %d %s udccs %02x irqs %lu\n",
ep->ep.name, usb_endpoint_maxp(desc), "pio", tmp, ep->pio_irqs); /* TODO translate all five groups of udccs bits! */
} else/* ep0 should only have one transfer queued */
seq_printf(m, "ep0 max 16 pio irqs %lu\n",
ep->pio_irqs);
/* the rest was statically initialized, and is read-only */
}
/* until it's enabled, this UDC should be completely invisible * to any USB host.
*/ staticvoid udc_enable (struct pxa25x_udc *dev)
{
udc_clear_mask_UDCCR(dev, UDCCR_UDE);
/* try to clear these bits before we enable the udc */
udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
/* * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual: * - enable UDC * - if RESET is already in progress, ack interrupt * - unmask reset interrupt
*/
udc_set_mask_UDCCR(dev, UDCCR_UDE); if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA))
udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
if (dev->has_cfr /* UDC_RES2 is defined */) { /* pxa255 (a0+) can avoid a set_config race that could * prevent gadget drivers from configuring correctly
*/
udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1);
} else { /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1) * which could result in missing packets and interrupts. * supposedly one bit per endpoint, controlling whether it * double buffers or not; ACM/AREN bits fit into the holes. * zero bits (like USIR0_IRx) disable double buffering.
*/
udc_set_reg(dev, UDC_RES1, 0x00);
udc_set_reg(dev, UDC_RES2, 0x00);
}
/* if hardware supports it, pullup D+ and wait for reset */
pullup_on();
}
/* when a driver is successfully registered, it will receive * control requests including set_configuration(), which enables * non-control requests. then usb traffic follows until a * disconnect is reported. then a host may connect again, or * the driver might get unbound.
*/ staticint pxa25x_udc_start(struct usb_gadget *g, struct usb_gadget_driver *driver)
{ struct pxa25x_udc *dev = to_pxa25x(g); int retval;
/* first hook up the driver ... */
dev->driver = driver;
dev->pullup = 1;
/* ... then enable host detection and ep0; and we're ready * for set_configuration as well as eventual disconnect.
*/ /* connect to bus through transceiver */ if (!IS_ERR_OR_NULL(dev->transceiver)) {
retval = otg_set_peripheral(dev->transceiver->otg,
&dev->gadget); if (retval) goto bind_fail;
}
/* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint * fifos, and pending transactions mustn't be continued in any case.
*/ for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
nuke(&dev->ep[i], -ECONNABORTED);
}
/* cope with automagic for some standard requests. */
dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
== USB_TYPE_STANDARD;
dev->req_config = 0;
dev->req_pending = 1; switch (u.r.bRequest) { /* hardware restricts gadget drivers here! */ case USB_REQ_SET_CONFIGURATION: if (u.r.bRequestType == USB_RECIP_DEVICE) { /* reflect hardware's automagic * up to the gadget driver.
*/
config_change:
dev->req_config = 1;
clear_ep_state(dev); /* if !has_cfr, there's no synch * else use AREN (later) not SA|OPR * USIR0_IR0 acts edge sensitive
*/
} break; /* ... and here, even more ... */ case USB_REQ_SET_INTERFACE: if (u.r.bRequestType == USB_RECIP_INTERFACE) { /* udc hardware is broken by design: * - altsetting may only be zero; * - hw resets all interfaces' eps; * - ep reset doesn't include halt(?).
*/
DMSG("broken set_interface (%d/%d)\n",
le16_to_cpu(u.r.wIndex),
le16_to_cpu(u.r.wValue)); goto config_change;
} break; /* hardware was supposed to hide this */ case USB_REQ_SET_ADDRESS: if (u.r.bRequestType == USB_RECIP_DEVICE) {
ep0start(dev, 0, "address"); return;
} break;
}
i = dev->driver->setup(&dev->gadget, &u.r); if (i < 0) { /* hardware automagic preventing STALL... */ if (dev->req_config) { /* hardware sometimes neglects to tell * tell us about config change events, * so later ones may fail...
*/
WARNING("config change %02x fail %d?\n",
u.r.bRequest, i); return; /* TODO experiment: if has_cfr, * hardware didn't ACK; maybe we * could actually STALL!
*/
}
DBG(DBG_VERBOSE, "protocol STALL, " "%02x err %d\n", udc_ep0_get_UDCCS(dev), i);
stall: /* the watchdog timer helps deal with cases * where udc seems to clear FST wrongly, and * then NAKs instead of STALLing.
*/
ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
start_watchdog(dev);
dev->ep0state = EP0_STALL;
/* pxa210/250 erratum 131 for B0/B1 says RNE lies. * still observed on a pxa255 a0.
*/
DBG(DBG_VERBOSE, "e131\n");
nuke(ep, -EPROTO);
/* read SETUP data, but don't trust it too much */ for (i = 0; i < 8; i++)
u.raw [i] = (u8) UDDR0; if ((u.r.bRequestType & USB_RECIP_MASK)
> USB_RECIP_OTHER) goto stall; if (u.word [0] == 0 && u.word [1] == 0) goto stall; goto got_setup;
} else { /* some random early IRQ: * - we acked FST * - IPR cleared * - OPR got set, without SA (likely status stage)
*/
udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR));
} break; case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */ if (udccs0 & UDCCS0_OPR) {
udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF);
DBG(DBG_VERBOSE, "ep0in premature status\n"); if (req)
done(ep, req, 0);
ep0_idle(dev);
} else/* irq was IPR clearing */ { if (req) { /* this IN packet might finish the request */
(void) write_ep0_fifo(ep, req);
} /* else IN token before response was written */
} break; case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */ if (udccs0 & UDCCS0_OPR) { if (req) { /* this OUT packet might finish the request */ if (read_ep0_fifo(ep, req))
done(ep, req, 0); /* else more OUT packets expected */
} /* else OUT token before read was issued */
} else/* irq was IPR clearing */ {
DBG(DBG_VERBOSE, "ep0out premature status\n"); if (req)
done(ep, req, 0);
ep0_idle(dev);
} break; case EP0_END_XFER: if (req)
done(ep, req, 0); /* ack control-IN status (maybe in-zlp was skipped) * also appears after some config change events.
*/ if (udccs0 & UDCCS0_OPR)
udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
ep0_idle(dev); break; case EP0_STALL:
udc_ep0_set_UDCCS(dev, UDCCS0_FST); break;
}
udc_set_reg(dev, USIR0, USIR0_IR0);
}
staticvoid handle_ep(struct pxa25x_ep *ep)
{ struct pxa25x_request *req; int is_in = ep->bEndpointAddress & USB_DIR_IN; int completed;
u32 udccs, tmp;
do {
completed = 0; if (likely (!list_empty(&ep->queue)))
req = list_entry(ep->queue.next, struct pxa25x_request, queue); else
req = NULL;
// TODO check FST handling
udccs = udc_ep_get_UDCCS(ep); if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
tmp = UDCCS_BI_TUR; if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
tmp |= UDCCS_BI_SST;
tmp &= udccs; if (likely (tmp))
udc_ep_set_UDCCS(ep, tmp); if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
completed = write_fifo(ep, req);
} else { /* irq from RPC (or for ISO, ROF) */ if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
tmp = UDCCS_BO_SST | UDCCS_BO_DME; else
tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
tmp &= udccs; if (likely(tmp))
udc_ep_set_UDCCS(ep, tmp);
/* fifos can hold packets, ready for reading... */ if (likely(req)) {
completed = read_fifo(ep, req);
} else
pio_irq_disable(ep);
}
ep->pio_irqs++;
} while (completed);
}
/* * pxa25x_udc_irq - interrupt handler * * avoid delays in ep0 processing. the control handshaking isn't always * under software control (pxa250c0 and the pxa255 are better), and delays * could cause usb protocol errors.
*/ static irqreturn_t
pxa25x_udc_irq(int irq, void *_dev)
{ struct pxa25x_udc *dev = _dev; int handled;
dev->stats.irqs++; do {
u32 udccr = udc_get_reg(dev, UDCCR);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.