/** * dwc2_enable_common_interrupts() - Initializes the commmon interrupts, * used in both device and host modes * * @hsotg: Programming view of the DWC_otg controller
*/ staticvoid dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
{
u32 intmsk;
/* Clear any pending OTG Interrupts */
dwc2_writel(hsotg, 0xffffffff, GOTGINT);
/* Clear any pending interrupts */
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
if (!hsotg->params.host_dma)
intmsk |= GINTSTS_RXFLVL; if (!hsotg->params.external_id_pin_ctl)
intmsk |= GINTSTS_CONIDSTSCHNG;
switch (hsotg->hw_params.op_mode) { case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: if (hsotg->params.otg_caps.hnp_support &&
hsotg->params.otg_caps.srp_support)
usbcfg |= GUSBCFG_HNPCAP;
fallthrough;
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: if (hsotg->params.otg_caps.srp_support)
usbcfg |= GUSBCFG_SRPCAP; break;
case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: default: break;
}
dwc2_writel(hsotg, usbcfg, GUSBCFG);
}
staticint dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
{ if (hsotg->vbus_supply) return regulator_enable(hsotg->vbus_supply);
return 0;
}
staticint dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
{ if (hsotg->vbus_supply) return regulator_disable(hsotg->vbus_supply);
/* * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size * For system that have a total fifo depth that is smaller than the default * RX + TX fifo size. * * @hsotg: Programming view of DWC_otg controller
*/ staticvoid dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
{ struct dwc2_core_params *params = &hsotg->params; struct dwc2_hw_params *hw = &hsotg->hw_params;
u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
/* * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth * allocation with support for high bandwidth endpoints. Synopsys * defines MPS(Max Packet size) for a periodic EP=1024, and for * non-periodic as 512.
*/ if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) { /* * For Buffer DMA mode/Scatter Gather DMA mode * 2 * ((Largest Packet size / 4) + 1 + 1) + n * with n = number of host channel. * 2 * ((1024/4) + 2) = 516
*/
rxfsiz = 516 + hw->host_channels;
/* * min non-periodic tx fifo depth * 2 * (largest non-periodic USB packet used / 4) * 2 * (512/4) = 256
*/
nptxfsiz = 256;
/* * If the summation of RX, NPTX and PTX fifo sizes is still * bigger than the total_fifo_size, then we have a problem. * * We won't be able to allocate as many endpoints. Right now, * we're just printing an error message, but ideally this FIFO * allocation algorithm would be improved in the future. * * FIXME improve this FIFO allocation algorithm.
*/ if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
dev_err(hsotg->dev, "invalid fifo sizes\n");
}
if (hsotg->params.en_multiple_tx_fifo &&
hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) { /* * This feature was implemented in 2.91a version * Global DFIFOCFG calculation for Host mode - * include RxFIFO, NPTXFIFO and HPTXFIFO
*/
dfifocfg = dwc2_readl(hsotg, GDFIFOCFG);
dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
dfifocfg |= (params->host_rx_fifo_size +
params->host_nperio_tx_fifo_size +
params->host_perio_tx_fifo_size) <<
GDFIFOCFG_EPINFOBASE_SHIFT &
GDFIFOCFG_EPINFOBASE_MASK;
dwc2_writel(hsotg, dfifocfg, GDFIFOCFG);
}
}
/** * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for * the HFIR register according to PHY type and speed * * @hsotg: Programming view of DWC_otg controller * * NOTE: The caller can modify the value of the HFIR register only after the * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) * has been set
*/
u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
u32 hprt0; int clock = 60; /* default value */
if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED) /* High speed case */ return 125 * clock - 1;
/* FS/LS case */ return 1000 * clock - 1;
}
/** * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination * buffer * * @hsotg: Programming view of DWC_otg controller * @dest: Destination buffer for the packet * @bytes: Number of bytes to copy to the destination
*/ void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
{
u32 *data_buf = (u32 *)dest; int word_count = (bytes + 3) / 4; int i;
/* * Todo: Account for the case where dest is not dword aligned. This * requires reading data from the FIFO into a u32 temp buffer, then * moving it into the data buffer.
*/
for (i = 0; i < word_count; i++, data_buf++)
*data_buf = dwc2_readl(hsotg, HCFIFO(0));
}
/** * dwc2_dump_channel_info() - Prints the state of a host channel * * @hsotg: Programming view of DWC_otg controller * @chan: Pointer to the channel to dump * * Must be called with interrupt disabled and spinlock held * * NOTE: This function will be removed once the peripheral controller code * is integrated and the driver is stable
*/ staticvoid dwc2_dump_channel_info(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{ #ifdef VERBOSE_DEBUG int num_channels = hsotg->params.host_channels; struct dwc2_qh *qh;
u32 hcchar;
u32 hcsplt;
u32 hctsiz;
u32 hc_dma; int i;
if (hsotg->params.host_dma) { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_hc_enable_dma_ints(hsotg, chan);
} else { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA disabled\n");
dwc2_hc_enable_slave_ints(hsotg, chan);
}
/* Enable the top level host channel interrupt */
intmsk = dwc2_readl(hsotg, HAINTMSK);
intmsk |= 1 << chan->hc_num;
dwc2_writel(hsotg, intmsk, HAINTMSK); if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
/* Make sure host channel interrupts are enabled */
intmsk = dwc2_readl(hsotg, GINTMSK);
intmsk |= GINTSTS_HCHINT;
dwc2_writel(hsotg, intmsk, GINTMSK); if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
}
/** * dwc2_hc_init() - Prepares a host channel for transferring packets to/from * a specific endpoint * * @hsotg: Programming view of DWC_otg controller * @chan: Information needed to initialize the host channel * * The HCCHARn register is set up with the characteristics specified in chan. * Host channel interrupts that may need to be serviced while this transfer is * in progress are enabled.
*/ staticvoid dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u8 hc_num = chan->hc_num;
u32 hcintmsk;
u32 hcchar;
u32 hcsplt = 0;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/* Clear old interrupt conditions for this host channel */
hcintmsk = 0xffffffff;
hcintmsk &= ~HCINTMSK_RESERVED14_31;
dwc2_writel(hsotg, hcintmsk, HCINT(hc_num));
/* Enable channel interrupts required for this transfer */
dwc2_hc_enable_ints(hsotg, chan);
/* * Program the HCCHARn register with the endpoint characteristics for * the current transfer
*/
hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; if (chan->ep_is_in)
hcchar |= HCCHAR_EPDIR; if (chan->speed == USB_SPEED_LOW)
hcchar |= HCCHAR_LSPDDEV;
hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
dwc2_writel(hsotg, hcchar, HCCHAR(hc_num)); if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
hc_num, hcchar);
dev_vdbg(hsotg->dev, "%s: Channel %d\n",
__func__, hc_num);
dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
chan->dev_addr);
dev_vdbg(hsotg->dev, " Ep Num: %d\n",
chan->ep_num);
dev_vdbg(hsotg->dev, " Is In: %d\n",
chan->ep_is_in);
dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
chan->speed == USB_SPEED_LOW);
dev_vdbg(hsotg->dev, " Ep Type: %d\n",
chan->ep_type);
dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
chan->max_packet);
}
/* Program the HCSPLT register for SPLITs */ if (chan->do_split) { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Programming HC %d with split --> %s\n",
hc_num,
chan->complete_split ? "CSPLIT" : "SSPLIT"); if (chan->complete_split)
hcsplt |= HCSPLT_COMPSPLT;
hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
HCSPLT_XACTPOS_MASK;
hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
HCSPLT_HUBADDR_MASK;
hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
HCSPLT_PRTADDR_MASK; if (dbg_hc(chan)) {
dev_vdbg(hsotg->dev, " comp split %d\n",
chan->complete_split);
dev_vdbg(hsotg->dev, " xact pos %d\n",
chan->xact_pos);
dev_vdbg(hsotg->dev, " hub addr %d\n",
chan->hub_addr);
dev_vdbg(hsotg->dev, " hub port %d\n",
chan->hub_port);
dev_vdbg(hsotg->dev, " is_in %d\n",
chan->ep_is_in);
dev_vdbg(hsotg->dev, " Max Pkt %d\n",
chan->max_packet);
dev_vdbg(hsotg->dev, " xferlen %d\n",
chan->xfer_len);
}
}
dwc2_writel(hsotg, hcsplt, HCSPLT(hc_num));
}
/** * dwc2_hc_halt() - Attempts to halt a host channel * * @hsotg: Controller register interface * @chan: Host channel to halt * @halt_status: Reason for halting the channel * * This function should only be called in Slave mode or to abort a transfer in * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the * controller halts the channel when the transfer is complete or a condition * occurs that requires application intervention. * * In slave mode, checks for a free request queue entry, then sets the Channel * Enable and Channel Disable bits of the Host Channel Characteristics * register of the specified channel to intiate the halt. If there is no free * request queue entry, sets only the Channel Disable bit of the HCCHARn * register to flush requests for this channel. In the latter case, sets a * flag to indicate that the host channel needs to be halted when a request * queue slot is open. * * In DMA mode, always sets the Channel Enable and Channel Disable bits of the * HCCHARn register. The controller ensures there is space in the request * queue before submitting the halt request. * * Some time may elapse before the core flushes any posted requests for this * host channel and halts. The Channel Halted interrupt handler completes the * deactivation of the host channel.
*/ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, enum dwc2_halt_status halt_status)
{
u32 nptxsts, hptxsts, hcchar;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/* * In buffer DMA or external DMA mode channel can't be halted * for non-split periodic channels. At the end of the next * uframe/frame (in the worst case), the core generates a channel * halted and disables the channel automatically.
*/ if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) { if (!chan->do_split &&
(chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
chan->ep_type == USB_ENDPOINT_XFER_INT)) {
dev_err(hsotg->dev, "%s() Channel can't be halted\n",
__func__); return;
}
}
if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
halt_status == DWC2_HC_XFER_AHB_ERR) { /* * Disable all channel interrupts except Ch Halted. The QTD * and QH state associated with this transfer has been cleared * (in the case of URB_DEQUEUE), so the channel needs to be * shut down carefully to prevent crashes.
*/
u32 hcintmsk = HCINTMSK_CHHLTD;
/* * Make sure no other interrupts besides halt are currently * pending. Handling another interrupt could cause a crash due * to the QTD and QH state.
*/
dwc2_writel(hsotg, ~hcintmsk, HCINT(chan->hc_num));
/* * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR * even if the channel was already halted for some other * reason
*/
chan->halt_status = halt_status;
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num)); if (!(hcchar & HCCHAR_CHENA)) { /* * The channel is either already halted or it hasn't * started yet. In DMA mode, the transfer may halt if * it finishes normally or a condition occurs that * requires driver intervention. Don't want to halt * the channel again. In either Slave or DMA mode, * it's possible that the transfer has been assigned * to a channel, but not started yet when an URB is * dequeued. Don't want to halt a channel that hasn't * started yet.
*/ return;
}
} if (chan->halt_pending) { /* * A halt has already been issued for this channel. This might * happen when a transfer is aborted by a higher level in * the stack.
*/
dev_vdbg(hsotg->dev, "*** %s: Channel %d, chan->halt_pending already set ***\n",
__func__, chan->hc_num); return;
}
hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
/* No need to set the bit in DDMA for disabling the channel */ /* TODO check it everywhere channel is disabled */ if (!hsotg->params.dma_desc_enable) { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "desc DMA disabled\n");
hcchar |= HCCHAR_CHENA;
} else { if (dbg_hc(chan))
dev_dbg(hsotg->dev, "desc DMA enabled\n");
}
hcchar |= HCCHAR_CHDIS;
if (!hsotg->params.host_dma) { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA not enabled\n");
hcchar |= HCCHAR_CHENA;
/* Check for space in the request queue to issue the halt */ if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK) {
dev_vdbg(hsotg->dev, "control/bulk\n");
nptxsts = dwc2_readl(hsotg, GNPTXSTS); if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
dev_vdbg(hsotg->dev, "Disabling channel\n");
hcchar &= ~HCCHAR_CHENA;
}
} else { if (dbg_perio())
dev_vdbg(hsotg->dev, "isoc/intr\n");
hptxsts = dwc2_readl(hsotg, HPTXSTS); if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
hsotg->queuing_high_bandwidth) { if (dbg_perio())
dev_vdbg(hsotg->dev, "Disabling channel\n");
hcchar &= ~HCCHAR_CHENA;
}
}
} else { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
}
/** * dwc2_hc_cleanup() - Clears the transfer state for a host channel * * @hsotg: Programming view of DWC_otg controller * @chan: Identifies the host channel to clean up * * This function is normally called after a transfer is done and the host * channel is being released
*/ void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u32 hcintmsk;
/** * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in * which frame a periodic transfer should occur * * @hsotg: Programming view of DWC_otg controller * @chan: Identifies the host channel to set up and its properties * @hcchar: Current value of the HCCHAR register for the specified host channel * * This function has no effect on non-periodic transfers
*/ staticvoid dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, u32 *hcchar)
{ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) { int host_speed; int xfer_ns; int xfer_us; int bytes_in_fifo;
u16 fifo_space;
u16 frame_number;
u16 wire_frame;
/* * Try to figure out if we're an even or odd frame. If we set * even and the current frame number is even the transfer * will happen immediately. Similar if both are odd. If one is * even and the other is odd then the transfer will happen when * the frame number ticks. * * There's a bit of a balancing act to get this right. * Sometimes we may want to send data in the current frame (AK * right away). We might want to do this if the frame number * _just_ ticked, but we might also want to do this in order * to continue a split transaction that happened late in a * microframe (so we didn't know to queue the next transfer * until the frame number had ticked). The problem is that we * need a lot of knowledge to know if there's actually still * time to send things or if it would be better to wait until * the next frame. * * We can look at how much time is left in the current frame * and make a guess about whether we'll have time to transfer. * We'll do that.
*/
/* Get speed host is running at */
host_speed = (chan->speed != USB_SPEED_HIGH &&
!chan->do_split) ? chan->speed : USB_SPEED_HIGH;
/* See how many bytes are in the periodic FIFO right now */
fifo_space = (dwc2_readl(hsotg, HPTXSTS) &
TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
bytes_in_fifo = sizeof(u32) *
(hsotg->params.host_perio_tx_fifo_size -
fifo_space);
/* * Roughly estimate bus time for everything in the periodic * queue + our new transfer. This is "rough" because we're * using a function that makes takes into account IN/OUT * and INT/ISO and we're just slamming in one value for all * transfers. This should be an over-estimate and that should * be OK, but we can probably tighten it.
*/
xfer_ns = usb_calc_bus_time(host_speed, false, false,
chan->xfer_len + bytes_in_fifo);
xfer_us = NS_TO_US(xfer_ns);
/* See what frame number we'll be at by the time we finish */
frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
/* This is when we were scheduled to be on the wire */
wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
/* * If we'd finish _after_ the frame we're scheduled in then * it's hopeless. Just schedule right away and hope for the * best. Note that it _might_ be wise to call back into the * scheduler to pick a better frame, but this is better than * nothing.
*/ if (dwc2_frame_num_gt(frame_number, wire_frame)) {
dwc2_sch_vdbg(hsotg, "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
chan->qh, wire_frame, frame_number,
dwc2_frame_num_dec(frame_number,
wire_frame));
wire_frame = frame_number;
/* * We picked a different frame number; communicate this * back to the scheduler so it doesn't try to schedule * another in the same frame. * * Remember that next_active_frame is 1 before the wire * frame.
*/
chan->qh->next_active_frame =
dwc2_frame_num_dec(frame_number, 1);
}
staticvoid dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
{ /* Set up the initial PID for the transfer */ if (chan->speed == USB_SPEED_HIGH) { if (chan->ep_is_in) { if (chan->multi_count == 1)
chan->data_pid_start = DWC2_HC_PID_DATA0; elseif (chan->multi_count == 2)
chan->data_pid_start = DWC2_HC_PID_DATA1; else
chan->data_pid_start = DWC2_HC_PID_DATA2;
} else { if (chan->multi_count == 1)
chan->data_pid_start = DWC2_HC_PID_DATA0; else
chan->data_pid_start = DWC2_HC_PID_MDATA;
}
} else {
chan->data_pid_start = DWC2_HC_PID_DATA0;
}
}
/** * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with * the Host Channel * * @hsotg: Programming view of DWC_otg controller * @chan: Information needed to initialize the host channel * * This function should only be called in Slave mode. For a channel associated * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel * associated with a periodic EP, the periodic Tx FIFO is written. * * Upon return the xfer_buf and xfer_count fields in chan are incremented by * the number of bytes written to the Tx FIFO.
*/ staticvoid dwc2_hc_write_packet(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u32 i;
u32 remaining_count;
u32 byte_count;
u32 dword_count;
u32 *data_buf = (u32 *)chan->xfer_buf;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
/** * dwc2_hc_do_ping() - Starts a PING transfer * * @hsotg: Programming view of DWC_otg controller * @chan: Information needed to initialize the host channel * * This function should only be called in Slave mode. The Do Ping bit is set in * the HCTSIZ register, then the channel is enabled.
*/ staticvoid dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u32 hcchar;
u32 hctsiz;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
/** * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host * channel and starts the transfer * * @hsotg: Programming view of DWC_otg controller * @chan: Information needed to initialize the host channel. The xfer_len value * may be reduced to accommodate the max widths of the XferSize and * PktCnt fields in the HCTSIZn register. The multi_count value may be * changed to reflect the final xfer_len value. * * This function may be called in either Slave mode or DMA mode. In Slave mode, * the caller must ensure that there is sufficient space in the request queue * and Tx Data FIFO. * * For an OUT transfer in Slave mode, it loads a data packet into the * appropriate FIFO. If necessary, additional data packets are loaded in the * Host ISR. * * For an IN transfer in Slave mode, a data packet is requested. The data * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, * additional data packets are requested in the Host ISR. * * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ * register along with a packet count of 1 and the channel is enabled. This * causes a single PING transaction to occur. Other fields in HCTSIZ are * simply set to 0 since no data transfer occurs in this case. * * For a PING transfer in DMA mode, the HCTSIZ register is initialized with * all the information required to perform the subsequent data transfer. In * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the * controller performs the entire PING protocol, then starts the data * transfer.
*/ staticvoid dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
u16 max_hc_pkt_count = hsotg->params.max_packet_count;
u32 hcchar;
u32 hctsiz = 0;
u16 num_packets;
u32 ec_mc;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (chan->do_ping) { if (!hsotg->params.host_dma) { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, no DMA\n");
dwc2_hc_do_ping(hsotg, chan);
chan->xfer_started = 1; return;
}
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, DMA\n");
hctsiz |= TSIZ_DOPNG;
}
if (chan->do_split) { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "split\n");
num_packets = 1;
if (chan->complete_split && !chan->ep_is_in) /* * For CSPLIT OUT Transfer, set the size to 0 so the * core doesn't expect any data written to the FIFO
*/
chan->xfer_len = 0; elseif (chan->ep_is_in || chan->xfer_len > chan->max_packet)
chan->xfer_len = chan->max_packet; elseif (!chan->ep_is_in && chan->xfer_len > 188)
chan->xfer_len = 188;
/* For split set ec_mc for immediate retries */ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
ec_mc = 3; else
ec_mc = 1;
} else { if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "no split\n"); /* * Ensure that the transfer length and packet count will fit * in the widths allocated for them in the HCTSIZn register
*/ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) { /* * Make sure the transfer size is no larger than one * (micro)frame's worth of data. (A check was done * when the periodic transfer was accepted to ensure * that a (micro)frame's worth of data can be * programmed into a channel.)
*/
u32 max_periodic_len =
chan->multi_count * chan->max_packet;
if (chan->xfer_len > max_periodic_len)
chan->xfer_len = max_periodic_len;
} elseif (chan->xfer_len > max_hc_xfer_size) { /* * Make sure that xfer_len is a multiple of max packet * size
*/
chan->xfer_len =
max_hc_xfer_size - chan->max_packet + 1;
}
if (chan->xfer_len > 0) {
num_packets = (chan->xfer_len + chan->max_packet - 1) /
chan->max_packet; if (num_packets > max_hc_pkt_count) {
num_packets = max_hc_pkt_count;
chan->xfer_len = num_packets * chan->max_packet;
} elseif (chan->ep_is_in) { /* * Always program an integral # of max packets * for IN transfers. * Note: This assumes that the input buffer is * aligned and sized accordingly.
*/
chan->xfer_len = num_packets * chan->max_packet;
}
} else { /* Need 1 packet for transfer length of 0 */
num_packets = 1;
}
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) /* * Make sure that the multi_count field matches the * actual transfer length
*/
chan->multi_count = num_packets;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
dwc2_set_pid_isoc(chan);
/* Set host channel enable after all other setup is complete */
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
(hcchar & HCCHAR_MULTICNT_MASK) >>
HCCHAR_MULTICNT_SHIFT);
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
chan->hc_num);
chan->xfer_started = 1;
chan->requests++;
if (!hsotg->params.host_dma &&
!chan->ep_is_in && chan->xfer_len > 0) /* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
}
/** * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a * host channel and starts the transfer in Descriptor DMA mode * * @hsotg: Programming view of DWC_otg controller * @chan: Information needed to initialize the host channel * * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field * with micro-frame bitmap. * * Initializes HCDMA register with descriptor list address and CTD value then * starts the transfer via enabling the channel.
*/ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{
u32 hcchar;
u32 hctsiz = 0;
if (chan->do_ping)
hctsiz |= TSIZ_DOPNG;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
dwc2_set_pid_isoc(chan);
/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
TSIZ_SC_MC_PID_MASK;
/* Set host channel enable after all other setup is complete */
hcchar |= HCCHAR_CHENA;
hcchar &= ~HCCHAR_CHDIS;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
(hcchar & HCCHAR_MULTICNT_MASK) >>
HCCHAR_MULTICNT_SHIFT);
dwc2_writel(hsotg, hcchar, HCCHAR(chan->hc_num)); if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
chan->hc_num);
chan->xfer_started = 1;
chan->requests++;
}
/** * dwc2_hc_continue_transfer() - Continues a data transfer that was started by * a previous call to dwc2_hc_start_transfer() * * @hsotg: Programming view of DWC_otg controller * @chan: Information needed to initialize the host channel * * The caller must ensure there is sufficient space in the request queue and Tx * Data FIFO. This function should only be called in Slave mode. In DMA mode, * the controller acts autonomously to complete transfers programmed to a host * channel. * * For an OUT transfer, a new data packet is loaded into the appropriate FIFO * if there is any data remaining to be queued. For an IN transfer, another * data packet is always requested. For the SETUP phase of a control transfer, * this function does nothing. * * Return: 1 if a new request is queued, 0 if no more requests are required * for this transfer
*/ staticint dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
{ if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
if (chan->do_split) /* SPLITs always queue just once per channel */ return 0;
if (chan->data_pid_start == DWC2_HC_PID_SETUP) /* SETUPs are queued only once since they can't be NAK'd */ return 0;
if (chan->ep_is_in) { /* * Always queue another request for other IN transfers. If * back-to-back INs are issued and NAKs are received for both, * the driver may still be processing the first NAK when the * second NAK is received. When the interrupt handler clears * the NAK interrupt for the first NAK, the second NAK will * not be seen. So we can't depend on the NAK interrupt * handler to requeue a NAK'd request. Instead, IN requests * are issued each time this function is called. When the * transfer completes, the extra requests for the channel will * be flushed.
*/
u32 hcchar = dwc2_readl(hsotg, HCCHAR(chan->hc_num));
/* * Processes all the URBs in a single list of QHs. Completes them with * -ETIMEDOUT and frees the QTD. * * Must be called with interrupt disabled and spinlock held
*/ staticvoid dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg, struct list_head *qh_list)
{ struct dwc2_qh *qh, *qh_tmp; struct dwc2_qtd *qtd, *qtd_tmp;
/* Free each QTD in the QH's QTD list */
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
if (qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
/* * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic * and periodic schedules. The QTD associated with each URB is removed from * the schedule and freed. This function may be called when a disconnect is * detected or when the HCD is being stopped. * * Must be called with interrupt disabled and spinlock held
*/ staticvoid dwc2_kill_all_urbs(struct dwc2_hsotg *hsotg)
{
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_inactive);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_waiting);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->non_periodic_sched_active);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_inactive);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_ready);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_assigned);
dwc2_kill_urbs_in_qh_list(hsotg, &hsotg->periodic_sched_queued);
}
/** * dwc2_hcd_start() - Starts the HCD when switching to Host mode * * @hsotg: Pointer to struct dwc2_hsotg
*/ void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
{
u32 hprt0;
if (hsotg->op_state == OTG_STATE_B_HOST) { /* * Reset the port. During a HNP mode switch the reset * needs to occur within 1ms and have a duration of at * least 50ms.
*/
hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RST;
dwc2_writel(hsotg, hprt0, HPRT0);
}
/* Must be called with interrupt disabled and spinlock held */ staticvoid dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
{ int num_channels = hsotg->params.host_channels; struct dwc2_host_chan *channel;
u32 hcchar; int i;
if (!hsotg->params.host_dma) { /* Flush out any channel requests in slave mode */ for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i]; if (!list_empty(&channel->hc_list_entry)) continue;
hcchar = dwc2_readl(hsotg, HCCHAR(i)); if (hcchar & HCCHAR_CHENA) {
hcchar &= ~(HCCHAR_CHENA | HCCHAR_EPDIR);
hcchar |= HCCHAR_CHDIS;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
}
}
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i]; if (!list_empty(&channel->hc_list_entry)) continue;
hcchar = dwc2_readl(hsotg, HCCHAR(i)); if (hcchar & HCCHAR_CHENA) { /* Halt the channel */
hcchar |= HCCHAR_CHDIS;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
dwc2_hc_cleanup(hsotg, channel);
list_add_tail(&channel->hc_list_entry, &hsotg->free_hc_list); /* * Added for Descriptor DMA to prevent channel double cleanup in * release_channel_ddma(), which is called from ep_disable when * device disconnects
*/
channel->qh = NULL;
} /* All channels have been freed, mark them available */ if (hsotg->params.uframe_sched) {
hsotg->available_host_channels =
hsotg->params.host_channels;
} else {
hsotg->non_periodic_channels = 0;
hsotg->periodic_channels = 0;
}
}
/** * dwc2_hcd_connect() - Handles connect of the HCD * * @hsotg: Pointer to struct dwc2_hsotg * * Must be called with interrupt disabled and spinlock held
*/ void dwc2_hcd_connect(struct dwc2_hsotg *hsotg)
{ if (hsotg->lx_state != DWC2_L0)
usb_hcd_resume_root_hub(hsotg->priv);
/** * dwc2_hcd_disconnect() - Handles disconnect of the HCD * * @hsotg: Pointer to struct dwc2_hsotg * @force: If true, we won't try to reconnect even if we see device connected. * * Must be called with interrupt disabled and spinlock held
*/ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force)
{
u32 intr;
u32 hprt0;
/* Set status flags for the hub driver */
hsotg->flags.b.port_connect_status_change = 1;
hsotg->flags.b.port_connect_status = 0;
/* * Shutdown any transfers in process by clearing the Tx FIFO Empty * interrupt mask and status bits and disabling subsequent host * channel interrupts.
*/
intr = dwc2_readl(hsotg, GINTMSK);
intr &= ~(GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT);
dwc2_writel(hsotg, intr, GINTMSK);
intr = GINTSTS_NPTXFEMP | GINTSTS_PTXFEMP | GINTSTS_HCHINT;
dwc2_writel(hsotg, intr, GINTSTS);
/* * Turn off the vbus power only if the core has transitioned to device * mode. If still in host mode, need to keep power on to detect a * reconnection.
*/ if (dwc2_is_device_mode(hsotg)) { if (hsotg->op_state != OTG_STATE_A_SUSPEND) {
dev_dbg(hsotg->dev, "Disconnect: PortPower off\n");
dwc2_writel(hsotg, 0, HPRT0);
}
dwc2_disable_host_interrupts(hsotg);
}
/* Respond with an error status to all URBs in the schedule */
dwc2_kill_all_urbs(hsotg);
if (dwc2_is_host_mode(hsotg)) /* Clean up any host channels that were in use */
dwc2_hcd_cleanup_channels(hsotg);
dwc2_host_disconnect(hsotg);
/* * Add an extra check here to see if we're actually connected but * we don't have a detection interrupt pending. This can happen if: * 1. hardware sees connect * 2. hardware sees disconnect * 3. hardware sees connect * 4. dwc2_port_intr() - clears connect interrupt * 5. dwc2_handle_common_intr() - calls here * * Without the extra check here we will end calling disconnect * and won't get any future interrupts to handle the connect.
*/ if (!force) {
hprt0 = dwc2_readl(hsotg, HPRT0); if (!(hprt0 & HPRT0_CONNDET) && (hprt0 & HPRT0_CONNSTS))
dwc2_hcd_connect(hsotg);
}
}
if (hsotg->lx_state == DWC2_L1)
hsotg->flags.b.port_l1_change = 1;
}
/** * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner * * @hsotg: Pointer to struct dwc2_hsotg * * Must be called with interrupt disabled and spinlock held
*/ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
{
dev_dbg(hsotg->dev, "DWC OTG HCD STOP\n");
/* * The root hub should be disconnected before this function is called. * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) * and the QH lists (via ..._hcd_endpoint_disable).
*/
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
/* Turn off the vbus power */
dev_dbg(hsotg->dev, "PortPower off\n");
dwc2_writel(hsotg, 0, HPRT0);
}
/* Caller must hold driver lock */ staticint dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb, struct dwc2_qh *qh, struct dwc2_qtd *qtd)
{
u32 intr_mask; int retval; int dev_speed;
if (!hsotg->flags.b.port_connect_status) { /* No longer connected */
dev_err(hsotg->dev, "Not connected\n"); return -ENODEV;
}
if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
!(qtd->urb->flags & URB_GIVEBACK_ASAP)) /* * Do not schedule SG transactions until qtd has * URB_GIVEBACK_ASAP set
*/ return 0;
tr_type = dwc2_hcd_select_transactions(hsotg); if (tr_type != DWC2_TRANSACTION_NONE)
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
return 0;
}
/* Must be called with interrupt disabled and spinlock held */ staticint dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg, struct dwc2_hcd_urb *urb)
{ struct dwc2_qh *qh; struct dwc2_qtd *urb_qtd;
urb_qtd = urb->qtd; if (!urb_qtd) {
dev_dbg(hsotg->dev, "## Urb QTD is NULL ##\n"); return -EINVAL;
}
qh = urb_qtd->qh; if (!qh) {
dev_dbg(hsotg->dev, "## Urb QTD QH is NULL ##\n"); return -EINVAL;
}
urb->priv = NULL;
if (urb_qtd->in_process && qh->channel) {
dwc2_dump_channel_info(hsotg, qh->channel);
/* The QTD is in process (it has been assigned to a channel) */ if (hsotg->flags.b.port_connect_status) /* * If still connected (i.e. in host mode), halt the * channel so it can be used for other transfers. If * no longer connected, the host registers can't be * written to halt the channel since the core is in * device mode.
*/
dwc2_hc_halt(hsotg, qh->channel,
DWC2_HC_XFER_URB_DEQUEUE);
}
/* * Free the QTD and clean up the associated QH. Leave the QH in the * schedule if it has any remaining QTDs.
*/ if (!hsotg->params.dma_desc_enable) {
u8 in_process = urb_qtd->in_process;
/* Must NOT be called with interrupt disabled or spinlock held */ staticint dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg, struct usb_host_endpoint *ep, int retry)
{ struct dwc2_qtd *qtd, *qtd_tmp; struct dwc2_qh *qh; unsignedlong flags; int rc;
/* Free each QTD in the QH's QTD list */
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
ep->hcpriv = NULL;
if (qh->channel && qh->channel->qh == qh)
qh->channel->qh = NULL;
/* Must be called with interrupt disabled and spinlock held */ staticint dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg, struct usb_host_endpoint *ep)
{ struct dwc2_qh *qh = ep->hcpriv;
if (!qh) return -EINVAL;
qh->data_toggle = DWC2_HC_PID_DATA0;
return 0;
}
/** * dwc2_core_init() - Initializes the DWC_otg controller registers and * prepares the core for device mode or host mode operation * * @hsotg: Programming view of the DWC_otg controller * @initial_setup: If true then this is the first init for this instance.
*/ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
{
u32 usbcfg, otgctl; int retval;
dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
usbcfg = dwc2_readl(hsotg, GUSBCFG);
/* Set ULPI External VBUS bit if needed */
usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; if (hsotg->params.phy_ulpi_ext_vbus)
usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
/* Set external TS Dline pulsing bit if needed */
usbcfg &= ~GUSBCFG_TERMSELDLPULSE; if (hsotg->params.ts_dline)
usbcfg |= GUSBCFG_TERMSELDLPULSE;
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* * Reset the Controller * * We only need to reset the controller if this is a re-init. * For the first init we know for sure that earlier code reset us (it * needed to in order to properly detect various parameters).
*/ if (!initial_setup) {
retval = dwc2_core_reset(hsotg, false); if (retval) {
dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
__func__); return retval;
}
}
/* * This needs to happen in FS mode before any other programming occurs
*/
retval = dwc2_phy_init(hsotg, initial_setup); if (retval) return retval;
/* Program the GAHBCFG Register */
retval = dwc2_gahbcfg_init(hsotg); if (retval) return retval;
/* Program the GUSBCFG register */
dwc2_gusbcfg_init(hsotg);
/* Program the GOTGCTL register */
otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_OTGVER;
dwc2_writel(hsotg, otgctl, GOTGCTL);
/* Clear the SRP success bit for FS-I2c */
hsotg->srp_success = 0;
/* Enable common interrupts */
dwc2_enable_common_interrupts(hsotg);
/* * Do device or host initialization based on mode during PCD and * HCD initialization
*/ if (dwc2_is_host_mode(hsotg)) {
dev_dbg(hsotg->dev, "Host Mode\n");
hsotg->op_state = OTG_STATE_A_HOST;
} else {
dev_dbg(hsotg->dev, "Device Mode\n");
hsotg->op_state = OTG_STATE_B_PERIPHERAL;
}
return 0;
}
/** * dwc2_core_host_init() - Initializes the DWC_otg controller registers for * Host mode * * @hsotg: Programming view of DWC_otg controller * * This function flushes the Tx and Rx FIFOs and flushes any entries in the * request queues. Host channels are reset to ensure that they are ready for * performing transfers.
*/ staticvoid dwc2_core_host_init(struct dwc2_hsotg *hsotg)
{
u32 hcfg, hfir, otgctl, usbcfg;
dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
/* Set HS/FS Timeout Calibration to 7 (max available value). * The number of PHY clocks that the application programs in * this field is added to the high/full speed interpacket timeout * duration in the core to account for any additional delays * introduced by the PHY. This can be required, because the delay * introduced by the PHY in generating the linestate condition * can vary from one PHY to another.
*/
usbcfg = dwc2_readl(hsotg, GUSBCFG);
usbcfg |= GUSBCFG_TOUTCAL(7);
dwc2_writel(hsotg, usbcfg, GUSBCFG);
/* Restart the Phy Clock */
dwc2_writel(hsotg, 0, PCGCTL);
/* * This bit allows dynamic reloading of the HFIR register during * runtime. This bit needs to be programmed during initial configuration * and its value must not be changed during runtime.
*/ if (hsotg->params.reload_ctl) {
hfir = dwc2_readl(hsotg, HFIR);
hfir |= HFIR_RLDCTRL;
dwc2_writel(hsotg, hfir, HFIR);
}
if (hsotg->params.dma_desc_enable) {
u32 op_mode = hsotg->hw_params.op_mode;
if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
!hsotg->hw_params.dma_desc_enable ||
op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
dev_err(hsotg->dev, "Hardware does not support descriptor DMA mode -\n");
dev_err(hsotg->dev, "falling back to buffer DMA mode.\n");
hsotg->params.dma_desc_enable = false;
} else {
hcfg = dwc2_readl(hsotg, HCFG);
hcfg |= HCFG_DESCDMA;
dwc2_writel(hsotg, hcfg, HCFG);
}
}
/* Configure data FIFO sizes */
dwc2_config_fifos(hsotg);
/* TODO - check this */ /* Clear Host Set HNP Enable in the OTG Control Register */
otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(hsotg, otgctl, GOTGCTL);
/* Make sure the FIFOs are flushed */
dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
dwc2_flush_rx_fifo(hsotg);
/* Clear Host Set HNP Enable in the OTG Control Register */
otgctl = dwc2_readl(hsotg, GOTGCTL);
otgctl &= ~GOTGCTL_HSTSETHNPEN;
dwc2_writel(hsotg, otgctl, GOTGCTL);
if (!hsotg->params.dma_desc_enable) { int num_channels, i;
u32 hcchar;
/* Flush out any leftover queued requests */
num_channels = hsotg->params.host_channels; for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg, HCCHAR(i)); if (hcchar & HCCHAR_CHENA) {
hcchar &= ~HCCHAR_CHENA;
hcchar |= HCCHAR_CHDIS;
hcchar &= ~HCCHAR_EPDIR;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
}
}
/* Halt all channels to put them into a known state */ for (i = 0; i < num_channels; i++) {
hcchar = dwc2_readl(hsotg, HCCHAR(i)); if (hcchar & HCCHAR_CHENA) {
hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
hcchar &= ~HCCHAR_EPDIR;
dwc2_writel(hsotg, hcchar, HCCHAR(i));
dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
__func__, i);
if (dwc2_hsotg_wait_bit_clear(hsotg, HCCHAR(i),
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.