staticbool use_fullspeed;
module_param(use_fullspeed, bool, S_IRUGO);
MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
/* * RX IRQ coalescing options: * * false (default) - one IRQ per DATAx packet. Slow but reliable. The * driver is able to pass the "testusb" suite and recover from conditions like: * * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep * 2) Host sends 512 bytes of data * 3) Host decides to reconfigure the device and sends SET_INTERFACE * 4) Device shuts down the endpoint and cancels the RX transaction * * true - one IRQ per transfer, for transfers <= 2048B. Generates * considerably fewer IRQs, but error recovery is less robust. Does not * reliably pass "testusb". * * TX always uses coalescing, because we can cancel partially complete TX * transfers by repeatedly flushing the FIFO. The hardware doesn't allow * this on RX.
*/ staticbool irq_coalesce;
module_param(irq_coalesce, bool, S_IRUGO);
MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
/** * struct iudma_ch_cfg - Static configuration for an IUDMA channel. * @ep_num: USB endpoint number. * @n_bds: Number of buffer descriptors in the ring. * @ep_type: Endpoint type (control, bulk, interrupt). * @dir: Direction (in, out). * @n_fifo_slots: Number of FIFO entries to allocate for this channel. * @max_pkt_hs: Maximum packet size in high speed mode. * @max_pkt_fs: Maximum packet size in full speed mode.
*/ struct iudma_ch_cfg { int ep_num; int n_bds; int ep_type; int dir; int n_fifo_slots; int max_pkt_hs; int max_pkt_fs;
};
/* This controller was designed to support a CDC/RNDIS application. It may be possible to reconfigure some of the endpoints, but the hardware limitations (FIFO sizing and number of DMA channels) may significantly impact flexibility and/or stability. Change these values at your own risk.
/** * struct iudma_ch - Represents the current state of a single IUDMA channel. * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1). * @ep_num: USB endpoint number. -1 for ep0 RX. * @enabled: Whether bcm63xx_ep_enable() has been called. * @max_pkt: "Chunk size" on the USB interface. Based on interface speed. * @is_tx: true for TX, false for RX. * @bep: Pointer to the associated endpoint. NULL for ep0 RX. * @udc: Reference to the device controller. * @read_bd: Next buffer descriptor to reap from the hardware. * @write_bd: Next BD available for a new packet. * @end_bd: Points to the final BD in the ring. * @n_bds_used: Number of BD entries currently occupied. * @bd_ring: Base pointer to the BD ring. * @bd_ring_dma: Physical (DMA) address of bd_ring. * @n_bds: Total number of BDs in the ring. * * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN) * only. * * Each bulk/intr endpoint has a single IUDMA channel and a single * struct usb_ep.
*/ struct iudma_ch { unsignedint ch_idx; int ep_num; bool enabled; int max_pkt; bool is_tx; struct bcm63xx_ep *bep; struct bcm63xx_udc *udc;
/** * struct bcm63xx_ep - Internal (driver) state of a single endpoint. * @ep_num: USB endpoint number. * @iudma: Pointer to IUDMA channel state. * @ep: USB gadget layer representation of the EP. * @udc: Reference to the device controller. * @queue: Linked list of outstanding requests for this EP. * @halted: 1 if the EP is stalled; 0 otherwise.
*/ struct bcm63xx_ep { unsignedint ep_num; struct iudma_ch *iudma; struct usb_ep ep; struct bcm63xx_udc *udc; struct list_head queue; unsigned halted:1;
};
/** * struct bcm63xx_req - Internal (driver) state of a single request. * @queue: Links back to the EP's request list. * @req: USB gadget layer representation of the request. * @offset: Current byte offset into the data buffer (next byte to queue). * @bd_bytes: Number of data bytes in outstanding BD entries. * @iudma: IUDMA channel used for the request.
*/ struct bcm63xx_req { struct list_head queue; /* ep's requests */ struct usb_request req; unsignedint offset; unsignedint bd_bytes; struct iudma_ch *iudma;
};
/** * struct bcm63xx_udc - Driver/hardware private context. * @lock: Spinlock to mediate access to this struct, and (most) HW regs. * @dev: Generic Linux device structure. * @pd: Platform data (board/port info). * @usbd_clk: Clock descriptor for the USB device block. * @usbh_clk: Clock descriptor for the USB host block. * @gadget: USB device. * @driver: Driver for USB device. * @usbd_regs: Base address of the USBD/USB20D block. * @iudma_regs: Base address of the USBD's associated IUDMA block. * @bep: Array of endpoints, including ep0. * @iudma: Array of all IUDMA channels used by this controller. * @cfg: USB configuration number, from SET_CONFIGURATION wValue. * @iface: USB interface number, from SET_INTERFACE wIndex. * @alt_iface: USB alt interface number, from SET_INTERFACE wValue. * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions. * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req. * @ep0state: Current state of the ep0 state machine. * @ep0_wq: Workqueue struct used to wake up the ep0 state machine. * @wedgemap: Bitmap of wedged endpoints. * @ep0_req_reset: USB reset is pending. * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet. * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet. * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity. * @ep0_req_completed: ep0 request has completed; worker has not seen it yet. * @ep0_reply: Pending reply from gadget driver. * @ep0_request: Outstanding ep0 request.
*/ struct bcm63xx_udc {
spinlock_t lock;
/** * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal. * @udc: Reference to the device controller. * @idx: Desired init_sel value. * * The "init_sel" signal is used as a selection index for both endpoints * and IUDMA channels. Since these do not map 1:1, the use of this signal * depends on the context.
*/ staticvoid bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
{
u32 val = usbd_readl(udc, USBD_CONTROL_REG);
val &= ~USBD_CONTROL_INIT_SEL_MASK;
val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
usbd_writel(udc, val, USBD_CONTROL_REG);
}
/** * bcm63xx_set_stall - Enable/disable stall on one endpoint. * @udc: Reference to the device controller. * @bep: Endpoint on which to operate. * @is_stalled: true to enable stall, false to disable. * * See notes in bcm63xx_update_wedge() regarding automatic clearing of * halt/stall conditions.
*/ staticvoid bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep, bool is_stalled)
{
u32 val;
/** * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings. * @udc: Reference to the device controller. * * These parameters depend on the USB link speed. Settings are * per-IUDMA-channel-pair.
*/ staticvoid bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
{ int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
u32 i, val, rx_fifo_slot, tx_fifo_slot;
/* set up FIFO boundaries and packet sizes; this is done in pairs */
rx_fifo_slot = tx_fifo_slot = 0; for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) { conststruct iudma_ch_cfg *rx_cfg = &iudma_defaults[i]; conststruct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
/** * bcm63xx_ep_setup - Configure per-endpoint settings. * @udc: Reference to the device controller. * * This needs to be rerun if the speed/cfg/intf/altintf changes.
*/ staticvoid bcm63xx_ep_setup(struct bcm63xx_udc *udc)
{
u32 val, i;
/** * iudma_write - Queue a single IUDMA transaction. * @udc: Reference to the device controller. * @iudma: IUDMA channel to use. * @breq: Request containing the transaction data. * * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA * does not honor SOP/EOP so the handling of multiple buffers is ambiguous. * So iudma_write() may be called several times to fulfill a single * usb_request. * * For TX IUDMA, this can queue multiple buffer descriptors if needed.
*/ staticvoid iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma, struct bcm63xx_req *breq)
{ int first_bd = 1, last_bd = 0, extra_zero_pkt = 0; unsignedint bytes_left = breq->req.length - breq->offset; constint max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
iudma->max_pkt : IUDMA_MAX_FRAGMENT;
/* * extra_zero_pkt forces one more iteration through the loop * after all data is queued up, to send the zero packet
*/ if (extra_zero_pkt && !bytes_left)
extra_zero_pkt = 0;
/** * iudma_read - Check for IUDMA buffer completion. * @udc: Reference to the device controller. * @iudma: IUDMA channel to use. * * This checks to see if ALL of the outstanding BDs on the DMA channel * have been filled. If so, it returns the actual transfer length; * otherwise it returns -EBUSY.
*/ staticint iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
{ int i, actual_len = 0; struct bcm_enet_desc *d = iudma->read_bd;
if (!iudma->n_bds_used) return -EINVAL;
for (i = 0; i < iudma->n_bds_used; i++) {
u32 dmaflags;
dmaflags = d->len_stat;
if (dmaflags & DMADESC_OWNER_MASK) return -EBUSY;
actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
DMADESC_LENGTH_SHIFT; if (d == iudma->end_bd)
d = iudma->bd_ring; else
d++;
}
/* set up IRQs, UBUS burst size, and BD base for this channel */
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
ENETDMAC_IRMASK_REG, ch_idx);
usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
/** * iudma_init - One-time initialization of all IUDMA channels. * @udc: Reference to the device controller. * * Enable DMA, flush channels, and enable global IUDMA IRQs.
*/ staticint iudma_init(struct bcm63xx_udc *udc)
{ int i, rc;
/** * bcm63xx_select_phy_mode - Select between USB device and host mode. * @udc: Reference to the device controller. * @is_device: true for device, false for host. * * This should probably be reworked to use the drivers/usb/otg * infrastructure. * * By default, the AFE/pullups are disabled in device mode, until * bcm63xx_select_pullup() is called.
*/ staticvoid bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
{
u32 val, portmask = BIT(udc->pd->port_no);
if (BCMCPU_IS_6328()) { /* configure pinmux to sense VBUS signal */
val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
GPIO_PINMUX_OTHR_6328_USB_HOST;
bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
}
val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG); if (is_device) {
val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
} else {
val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
}
bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG); if (is_device)
val |= USBH_PRIV_SWAP_USBD_MASK; else
val &= ~USBH_PRIV_SWAP_USBD_MASK;
bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
}
/** * bcm63xx_select_pullup - Enable/disable the pullup on D+ * @udc: Reference to the device controller. * @is_on: true to enable the pullup, false to disable. * * If the pullup is active, the host will sense a FS/HS device connected to * the port. If the pullup is inactive, the host will think the USB * device has been disconnected.
*/ staticvoid bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
{
u32 val, portmask = BIT(udc->pd->port_no);
val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG); if (is_on)
val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT); else
val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
}
/** * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal. * @udc: Reference to the device controller. * * This just masks the IUDMA IRQs and releases the clocks. It is assumed * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
*/ staticvoid bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
{
set_clocks(udc, true);
iudma_uninit(udc);
set_clocks(udc, false);
clk_put(udc->usbd_clk);
clk_put(udc->usbh_clk);
}
/** * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures. * @udc: Reference to the device controller.
*/ staticint bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
{ int i, rc = 0;
u32 val;
udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
GFP_KERNEL); if (!udc->ep0_ctrl_buf) return -ENOMEM;
INIT_LIST_HEAD(&udc->gadget.ep_list); for (i = 0; i < BCM63XX_NUM_EP; i++) { struct bcm63xx_ep *bep = &udc->bep[i];
if (udc->gadget.max_speed == USB_SPEED_HIGH)
val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT); else
val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
usbd_writel(udc, val, USBD_STRAPS_REG);
bcm63xx_set_ctrl_irqs(udc, false);
usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
rc = iudma_init(udc);
set_clocks(udc, false); if (rc)
bcm63xx_uninit_udc_hw(udc);
return 0;
}
/*********************************************************************** * Standard EP gadget operations
***********************************************************************/
/** * bcm63xx_ep_enable - Enable one endpoint. * @ep: Endpoint to enable. * @desc: Contains max packet, direction, etc. * * Most of the endpoint parameters are fixed in this controller, so there * isn't much for this function to do.
*/ staticint bcm63xx_ep_enable(struct usb_ep *ep, conststruct usb_endpoint_descriptor *desc)
{ struct bcm63xx_ep *bep = our_ep(ep); struct bcm63xx_udc *udc = bep->udc; struct iudma_ch *iudma = bep->iudma; unsignedlong flags;
if (!ep || !desc || ep->name == bcm63xx_ep0name) return -EINVAL;
if (!udc->driver) return -ESHUTDOWN;
spin_lock_irqsave(&udc->lock, flags); if (iudma->enabled) {
spin_unlock_irqrestore(&udc->lock, flags); return -EINVAL;
}
/** * bcm63xx_udc_free_request - Free a request. * @ep: Endpoint associated with the request. * @req: Request to free.
*/ staticvoid bcm63xx_udc_free_request(struct usb_ep *ep, struct usb_request *req)
{ struct bcm63xx_req *breq = our_req(req);
kfree(breq);
}
/** * bcm63xx_udc_queue - Queue up a new request. * @ep: Endpoint associated with the request. * @req: Request to add. * @mem_flags: Unused. * * If the queue is empty, start this request immediately. Otherwise, add * it to the list. * * ep0 replies are sent through this function from the gadget driver, but * they are treated differently because they need to be handled by the ep0 * state machine. (Sometimes they are replies to control requests that * were spoofed by this driver, and so they shouldn't be transmitted at all.)
*/ staticint bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t mem_flags)
{ struct bcm63xx_ep *bep = our_ep(ep); struct bcm63xx_udc *udc = bep->udc; struct bcm63xx_req *breq = our_req(req); unsignedlong flags; int rc = 0;
if (unlikely(!req || !req->complete || !req->buf || !ep)) return -EINVAL;
/** * bcm63xx_udc_dequeue - Remove a pending request from the queue. * @ep: Endpoint associated with the request. * @req: Request to remove. * * If the request is not at the head of the queue, this is easy - just nuke * it. If the request is at the head of the queue, we'll need to stop the * DMA transaction and then queue up the successor.
*/ staticint bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
{ struct bcm63xx_ep *bep = our_ep(ep); struct bcm63xx_udc *udc = bep->udc; struct bcm63xx_req *breq = our_req(req), *cur; unsignedlong flags; int rc = 0;
/** * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware. * @ep: Endpoint to halt. * @value: Zero to clear halt; nonzero to set halt. * * See comments in bcm63xx_update_wedge().
*/ staticint bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
{ struct bcm63xx_ep *bep = our_ep(ep); struct bcm63xx_udc *udc = bep->udc; unsignedlong flags;
/** * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request. * @udc: Reference to the device controller. * * Many standard requests are handled automatically in the hardware, but * we still need to pass them to the gadget driver so that it can * reconfigure the interfaces/endpoints if necessary. * * Unfortunately we are not able to send a STALL response if the host * requests an invalid configuration. If this happens, we'll have to be * content with printing a warning.
*/ staticint bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
{ struct usb_ctrlrequest ctrl; int rc;
/** * bcm63xx_ep0_complete - Set completion status and "stage" the callback. * @udc: Reference to the device controller. * @req: USB gadget layer representation of the request. * @status: Status to return to the gadget driver.
*/ staticvoid bcm63xx_ep0_complete(struct bcm63xx_udc *udc, struct usb_request *req, int status)
{
req->status = status; if (status)
req->actual = 0; if (req->complete) {
spin_unlock_irq(&udc->lock);
req->complete(&udc->bep[0].ep, req);
spin_lock_irq(&udc->lock);
}
}
/** * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to * reset/shutdown. * @udc: Reference to the device controller. * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
*/ staticvoid bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
{ struct usb_request *req = udc->ep0_reply;
/** * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request. * @udc: Reference to the device controller. * @ch_idx: IUDMA channel number. * @length: Number of bytes to TX/RX. * * Used for simple transfers performed by the ep0 worker. This will always * use ep0_ctrl_req / ep0_ctrl_buf.
*/ staticvoid bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx, int length)
{ struct usb_request *req = &udc->ep0_ctrl_req.req;
/** * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it. * @udc: Reference to the device controller. * * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready * for the next packet. Anything else means the transaction requires multiple * stages of handling.
*/ staticenum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
{ int rc; struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
/* * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't * ALWAYS deliver these 100% of the time, so if we happen to see one, * just throw it away.
*/ if (rc == 0) return EP0_REQUEUE;
/** * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle. * @udc: Reference to the device controller. * * In state EP0_IDLE, the RX descriptor is either pending, or has been * filled with a SETUP packet from the host. This function handles new * SETUP packets, control IRQ events (which can generate fake SETUP packets), * and reset/shutdown events. * * Returns 0 if work was done; -EAGAIN if nothing to do.
*/ staticint bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
{ if (udc->ep0_req_reset) {
udc->ep0_req_reset = 0;
} elseif (udc->ep0_req_set_cfg) {
udc->ep0_req_set_cfg = 0; if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
} elseif (udc->ep0_req_set_iface) {
udc->ep0_req_set_iface = 0; if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
} elseif (udc->ep0_req_completed) {
udc->ep0state = bcm63xx_ep0_do_setup(udc); return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
} elseif (udc->ep0_req_shutdown) {
udc->ep0_req_shutdown = 0;
udc->ep0_req_completed = 0;
udc->ep0_request = NULL;
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
usb_gadget_unmap_request(&udc->gadget,
&udc->ep0_ctrl_req.req, 0);
/* bcm63xx_udc_pullup() is waiting for this */
mb();
udc->ep0state = EP0_SHUTDOWN;
} elseif (udc->ep0_reply) { /* * This could happen if a USB RESET shows up during an ep0 * transaction (especially if a laggy driver like gadgetfs * is in use).
*/
dev_warn(udc->dev, "nuking unexpected reply\n");
bcm63xx_ep0_nuke_reply(udc, 0);
} else { return -EAGAIN;
}
return 0;
}
/** * bcm63xx_ep0_one_round - Handle the current ep0 state. * @udc: Reference to the device controller. * * Returns 0 if work was done; -EAGAIN if nothing to do.
*/ staticint bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
{ enum bcm63xx_ep0_state ep0state = udc->ep0state; bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
switch (udc->ep0state) { case EP0_REQUEUE: /* set up descriptor to receive SETUP packet */
bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
BCM63XX_MAX_CTRL_PKT);
ep0state = EP0_IDLE; break; case EP0_IDLE: return bcm63xx_ep0_do_idle(udc); case EP0_IN_DATA_PHASE_SETUP: /* * Normal case: TX request is in ep0_reply (queued by the * callback), or will be queued shortly. When it's here, * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE. * * Shutdown case: Stop waiting for the reply. Just * REQUEUE->IDLE. The gadget driver is NOT expected to * queue anything else now.
*/ if (udc->ep0_reply) {
bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
udc->ep0_reply);
ep0state = EP0_IN_DATA_PHASE_COMPLETE;
} elseif (shutdown) {
ep0state = EP0_REQUEUE;
} break; case EP0_IN_DATA_PHASE_COMPLETE: { /* * Normal case: TX packet (ep0_reply) is in flight; wait for * it to finish, then go back to REQUEUE->IDLE. * * Shutdown case: Reset the TX channel, send -ESHUTDOWN * completion to the gadget driver, then REQUEUE->IDLE.
*/ if (udc->ep0_req_completed) {
udc->ep0_reply = NULL;
bcm63xx_ep0_read_complete(udc); /* * the "ack" sometimes gets eaten (see * bcm63xx_ep0_do_idle)
*/
ep0state = EP0_REQUEUE;
} elseif (shutdown) {
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
bcm63xx_ep0_nuke_reply(udc, 1);
ep0state = EP0_REQUEUE;
} break;
} case EP0_OUT_DATA_PHASE_SETUP: /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */ if (udc->ep0_reply) {
bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
udc->ep0_reply);
ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
} elseif (shutdown) {
ep0state = EP0_REQUEUE;
} break; case EP0_OUT_DATA_PHASE_COMPLETE: { /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */ if (udc->ep0_req_completed) {
udc->ep0_reply = NULL;
bcm63xx_ep0_read_complete(udc);
/* send 0-byte ack to host */
bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
ep0state = EP0_OUT_STATUS_PHASE;
} elseif (shutdown) {
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
bcm63xx_ep0_nuke_reply(udc, 0);
ep0state = EP0_REQUEUE;
} break;
} case EP0_OUT_STATUS_PHASE: /* * Normal case: 0-byte OUT ack packet is in flight; wait * for it to finish, then go back to REQUEUE->IDLE. * * Shutdown case: just cancel the transmission. Don't bother * calling the completion, because it originated from this * function anyway. Then go back to REQUEUE->IDLE.
*/ if (udc->ep0_req_completed) {
bcm63xx_ep0_read_complete(udc);
ep0state = EP0_REQUEUE;
} elseif (shutdown) {
iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
udc->ep0_request = NULL;
ep0state = EP0_REQUEUE;
} break; case EP0_IN_FAKE_STATUS_PHASE: { /* * Normal case: we spoofed a SETUP packet and are now * waiting for the gadget driver to send a 0-byte reply. * This doesn't actually get sent to the HW because the * HW has already sent its own reply. Once we get the * response, return to IDLE. * * Shutdown case: return to IDLE immediately. * * Note that the ep0 RX descriptor has remained queued * (and possibly unfilled) during this entire transaction. * The HW datapath (IUDMA) never even sees SET_CONFIGURATION * or SET_INTERFACE transactions.
*/ struct usb_request *r = udc->ep0_reply;
if (!r) { if (shutdown)
ep0state = EP0_IDLE; break;
}
/** * bcm63xx_ep0_process - ep0 worker thread / state machine. * @w: Workqueue struct. * * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It * is used to synchronize ep0 events and ensure that both HW and SW events * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed * by the USBD hardware. * * The worker function will continue iterating around the state machine * until there is nothing left to do. Usually "nothing left to do" means * that we're waiting for a new event from the hardware.
*/ staticvoid bcm63xx_ep0_process(struct work_struct *w)
{ struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
spin_lock_irq(&udc->lock); while (bcm63xx_ep0_one_round(udc) == 0)
;
spin_unlock_irq(&udc->lock);
}
/*********************************************************************** * Standard UDC gadget operations
***********************************************************************/
/** * bcm63xx_udc_get_frame - Read current SOF frame number from the HW. * @gadget: USB device.
*/ staticint bcm63xx_udc_get_frame(struct usb_gadget *gadget)
{ struct bcm63xx_udc *udc = gadget_to_udc(gadget);
/** * bcm63xx_udc_stop - Shut down the controller. * @gadget: USB device. * @driver: Driver for USB device.
*/ staticint bcm63xx_udc_stop(struct usb_gadget *gadget)
{ struct bcm63xx_udc *udc = gadget_to_udc(gadget); unsignedlong flags;
spin_lock_irqsave(&udc->lock, flags);
udc->driver = NULL;
/* * If we switch the PHY too abruptly after dropping D+, the host * will often complain: * * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
*/
msleep(100);
/** * bcm63xx_update_cfg_iface - Read current configuration/interface settings. * @udc: Reference to the device controller. * * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages. * The driver never sees the raw control packets coming in on the ep0 * IUDMA channel, but at least we get an interrupt event to tell us that * new values are waiting in the USBD_STATUS register.
*/ staticvoid bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
{
u32 reg = usbd_readl(udc, USBD_STATUS_REG);
/** * bcm63xx_update_link_speed - Check to see if the link speed has changed. * @udc: Reference to the device controller. * * The link speed update coincides with a SETUP IRQ. Returns 1 if the * speed has changed, so that the caller can update the endpoint settings.
*/ staticint bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
{
u32 reg = usbd_readl(udc, USBD_STATUS_REG); enum usb_device_speed oldspeed = udc->gadget.speed;
switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) { case BCM63XX_SPD_HIGH:
udc->gadget.speed = USB_SPEED_HIGH; break; case BCM63XX_SPD_FULL:
udc->gadget.speed = USB_SPEED_FULL; break; default: /* this should never happen */
udc->gadget.speed = USB_SPEED_UNKNOWN;
dev_err(udc->dev, "received SETUP packet with invalid link speed\n"); return 0;
}
/** * bcm63xx_update_wedge - Iterate through wedged endpoints. * @udc: Reference to the device controller. * @new_status: true to "refresh" wedge status; false to clear it. * * On a SETUP interrupt, we need to manually "refresh" the wedge status * because the controller hardware is designed to automatically clear * stalls in response to a CLEAR_FEATURE request from the host. * * On a RESET interrupt, we do want to restore all wedged endpoints.
*/ staticvoid bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
{ int i;
/** * bcm63xx_udc_data_isr - ISR for data path events (IUDMA). * @irq: IRQ number (unused). * @dev_id: Reference to the IUDMA channel that generated the interrupt. * * For the two ep0 channels, we have special handling that triggers the * ep0 worker thread. For normal bulk/intr channels, either queue up * the next buffer descriptor for the transaction (incomplete transaction), * or invoke the completion callback (complete transactions).
*/ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
{ struct iudma_ch *iudma = dev_id; struct bcm63xx_udc *udc = iudma->udc; struct bcm63xx_ep *bep; struct usb_request *req = NULL; struct bcm63xx_req *breq = NULL; int rc; bool is_done = false;
/* * bcm63xx_usbd_dbg_show - Show USBD controller state. * @s: seq_file to which the information will be written. * @p: Unused. * * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
*/ staticint bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
{ struct bcm63xx_udc *udc = s->private;
/* * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors. * @s: seq_file to which the information will be written. * @p: Unused. * * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
*/ staticint bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
{ struct bcm63xx_udc *udc = s->private; int ch_idx, i;
u32 sram2, sram3;
/** * bcm63xx_udc_probe - Initialize a new instance of the UDC. * @pdev: Platform device struct from the bcm63xx BSP code. * * Note that platform data is required, because pd.port_no varies from chip * to chip and is used to switch the correct USB port to device mode.
*/ staticint bcm63xx_udc_probe(struct platform_device *pdev)
{ struct device *dev = &pdev->dev; struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev); struct bcm63xx_udc *udc; int rc = -ENOMEM, i, irq;
udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL); if (!udc) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.