if (req->req.dma) { if (!WARN_ON(!ep->dev))
usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
&req->req, ep->epn.is_in);
req->req.dma = 0;
}
/* * If this isn't an internal EP0 request, call the core * to call the gadget completion.
*/ if (!internal) {
spin_unlock(&ep->vhub->lock);
usb_gadget_giveback_request(&ep->ep, &req->req);
spin_lock(&ep->vhub->lock);
}
}
void ast_vhub_nuke(struct ast_vhub_ep *ep, int status)
{ struct ast_vhub_req *req; int count = 0;
/* Beware, lock will be dropped & req-acquired by done() */ while (!list_empty(&ep->queue)) {
req = list_first_entry(&ep->queue, struct ast_vhub_req, queue);
ast_vhub_done(ep, req, status);
count++;
} if (count)
EPDBG(ep, "Nuked %d request(s)\n", count);
}
for (i = 0; ep_acks && i < vhub->max_epns; i++) {
u32 mask = VHUB_EP_IRQ(i); if (ep_acks & mask) {
ast_vhub_epn_ack_irq(&vhub->epns[i]);
ep_acks &= ~mask;
}
}
}
/* Handle device interrupts */ if (istat & vhub->port_irq_mask) { for (i = 0; i < vhub->max_ports; i++) { if (istat & VHUB_DEV_IRQ(i))
ast_vhub_dev_irq(&vhub->ports[i].dev);
}
}
/* Handle top-level vHub EP0 interrupts */ if (istat & (VHUB_IRQ_HUB_EP0_OUT_ACK_STALL |
VHUB_IRQ_HUB_EP0_IN_ACK_STALL |
VHUB_IRQ_HUB_EP0_SETUP)) { if (istat & VHUB_IRQ_HUB_EP0_IN_ACK_STALL)
ast_vhub_ep0_handle_ack(&vhub->ep0, true); if (istat & VHUB_IRQ_HUB_EP0_OUT_ACK_STALL)
ast_vhub_ep0_handle_ack(&vhub->ep0, false); if (istat & VHUB_IRQ_HUB_EP0_SETUP)
ast_vhub_ep0_handle_setup(&vhub->ep0);
}
/* Various top level bus events */ if (istat & (VHUB_IRQ_BUS_RESUME |
VHUB_IRQ_BUS_SUSPEND |
VHUB_IRQ_BUS_RESET)) { if (istat & VHUB_IRQ_BUS_RESUME)
ast_vhub_hub_resume(vhub); if (istat & VHUB_IRQ_BUS_SUSPEND)
ast_vhub_hub_suspend(vhub); if (istat & VHUB_IRQ_BUS_RESET)
ast_vhub_hub_reset(vhub);
}
/* * We do *NOT* set the VHUB_CTRL_CLK_STOP_SUSPEND bit * to stop the logic clock during suspend because * it causes the registers to become inaccessible and * we haven't yet figured out a good wayt to bring the * controller back into life to issue a wakeup.
*/
/* * Set some ISO & split control bits according to Aspeed * recommendation * * VHUB_CTRL_ISO_RSP_CTRL: When set tells the HW to respond * with 0 bytes data packet to ISO IN endpoints when no data * is available. * * VHUB_CTRL_SPLIT_IN: This makes a SOF complete a split IN * transaction.
*/
ctrl |= VHUB_CTRL_ISO_RSP_CTRL | VHUB_CTRL_SPLIT_IN;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
udelay(1);
/* Set descriptor ring size */ if (AST_VHUB_DESCS_COUNT == 256) {
ctrl |= VHUB_CTRL_LONG_DESC;
writel(ctrl, vhub->regs + AST_VHUB_CTRL);
} else {
BUILD_BUG_ON(AST_VHUB_DESCS_COUNT != 32);
}
/* Check if we need to limit the HW to USB1 */
max_speed = usb_get_maximum_speed(&pdev->dev); if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
vhub->force_usb1 = true;
/* Mask & ack all interrupts before installing the handler */
writel(0, vhub->regs + AST_VHUB_IER);
writel(VHUB_IRQ_ACK_ALL, vhub->regs + AST_VHUB_ISR);
/* * Allocate DMA buffers for all EP0s in one chunk, * one per port and one for the vHub itself
*/
vhub->ep0_bufs = dma_alloc_coherent(&pdev->dev,
AST_VHUB_EP0_MAX_PACKET *
(vhub->max_ports + 1),
&vhub->ep0_bufs_dma, GFP_KERNEL); if (!vhub->ep0_bufs) {
dev_err(&pdev->dev, "Failed to allocate EP0 DMA buffers\n");
rc = -ENOMEM; goto err;
}
UDCVDBG(vhub, "EP0 DMA buffers @%p (DMA 0x%08x)\n",
vhub->ep0_bufs, (u32)vhub->ep0_bufs_dma);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.