/* * Activates/Deactivates FrameList entries for the channel based on endpoint * servicing period
*/ staticvoid dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, int enable)
{ struct dwc2_host_chan *chan;
u16 i, j, inc;
if (!hsotg) {
pr_err("hsotg = %p\n", hsotg); return;
}
if (!qh->channel) {
dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel); return;
}
if (!hsotg->frame_list) {
dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
hsotg->frame_list); return;
}
chan = qh->channel;
inc = dwc2_frame_incr_val(qh); if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
i = dwc2_frame_list_idx(qh->next_active_frame); else
i = 0;
/* * Sync frame list since controller will access it if periodic * channel is currently enabled.
*/
dma_sync_single_for_device(hsotg->dev,
hsotg->frame_list_dma,
hsotg->frame_list_sz,
DMA_TO_DEVICE);
if (!enable) return;
chan->schinfo = 0; if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
j = 1; /* TODO - check this */
inc = (8 + qh->host_interval - 1) / qh->host_interval; for (i = 0; i < inc; i++) {
chan->schinfo |= j;
j = j << qh->host_interval;
}
} else {
chan->schinfo = 0xff;
}
}
if (dwc2_qh_is_non_per(qh)) { if (hsotg->params.uframe_sched)
hsotg->available_host_channels++; else
hsotg->non_periodic_channels--;
} else {
dwc2_update_frame_list(hsotg, qh, 0);
hsotg->available_host_channels++;
}
/* * The condition is added to prevent double cleanup try in case of * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
*/ if (chan->qh) { if (!list_empty(&chan->hc_list_entry))
list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
chan->qh = NULL;
}
qh->channel = NULL;
qh->ntd = 0;
if (qh->desc_list)
memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
dwc2_max_desc_num(qh));
}
/** * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA * related members * * @hsotg: The HCD state structure for the DWC OTG controller * @qh: The QH to init * @mem_flags: Indicates the type of memory allocation * * Return: 0 if successful, negative error code otherwise * * Allocates memory for the descriptor list. For the first periodic QH, * allocates memory for the FrameList and enables periodic scheduling.
*/ int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
gfp_t mem_flags)
{ int retval;
if (qh->do_split) {
dev_err(hsotg->dev, "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
retval = -EINVAL; goto err0;
}
retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags); if (retval) goto err0;
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) { if (!hsotg->frame_list) {
retval = dwc2_frame_list_alloc(hsotg, mem_flags); if (retval) goto err1; /* Enable periodic schedule on first periodic QH */
dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
}
}
/** * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related * members * * @hsotg: The HCD state structure for the DWC OTG controller * @qh: The QH to free * * Frees descriptor list memory associated with the QH. If QH is periodic and * the last, frees FrameList memory and disables periodic scheduling.
*/ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{ unsignedlong flags;
dwc2_desc_list_free(hsotg, qh);
/* * Channel still assigned due to some reasons. * Seen on Isoc URB dequeue. Channel halted but no subsequent * ChHalted interrupt to release the channel. Afterwards * when it comes here from endpoint disable routine * channel remains assigned.
*/
spin_lock_irqsave(&hsotg->lock, flags); if (qh->channel)
dwc2_release_channel_ddma(hsotg, qh);
spin_unlock_irqrestore(&hsotg->lock, flags);
/* * next_active_frame is always frame number (not uFrame) both in FS * and HS!
*/
/* * skip_frames is used to limit activated descriptors number * to avoid the situation when HC services the last activated * descriptor firstly. * Example for FS: * Current frame is 1, scheduled frame is 3. Since HC always fetches * the descriptor corresponding to curr_frame+1, the descriptor * corresponding to frame 2 will be fetched. If the number of * descriptors is max=64 (or greather) the list will be fully programmed * with Active descriptors and it is possible case (rare) that the * latest descriptor(considering rollback) corresponding to frame 2 will * be serviced first. HS case is more probable because, in fact, up to * 11 uframes (16 in the code) may be skipped.
*/ if (qh->dev_speed == USB_SPEED_HIGH) { /* * Consider uframe counter also, to start xfer asap. If half of * the frame elapsed skip 2 frames otherwise just 1 frame. * Starting descriptor index must be 8-aligned, so if the * current frame is near to complete the next one is skipped as * well.
*/ if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
*skip_frames = 2 * 8;
frame = dwc2_frame_num_inc(hsotg->frame_number,
*skip_frames);
} else {
*skip_frames = 1 * 8;
frame = dwc2_frame_num_inc(hsotg->frame_number,
*skip_frames);
}
frame = dwc2_full_frame_num(frame);
} else { /* * Two frames are skipped for FS - the current and the next. * But for descriptor programming, 1 frame (descriptor) is * enough, see example above.
*/
*skip_frames = 1;
frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
}
return frame;
}
/* * Calculate initial descriptor index for isochronous transfer based on * scheduled frame
*/ static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
u16 frame, fr_idx, fr_idx_tmp, skip_frames;
/* * With current ISOC processing algorithm the channel is being released * when no more QTDs in the list (qh->ntd == 0). Thus this function is * called only when qh->ntd == 0 and qh->channel == 0. * * So qh->channel != NULL branch is not used and just not removed from * the source file. It is required for another possible approach which * is, do not disable and release the channel when ISOC session * completed, just move QH to inactive schedule until new QTD arrives. * On new QTD, the QH moved back to 'ready' schedule, starting frame and * therefore starting desc_index are recalculated. In this case channel * is released only on ep_disable.
*/
/* * Calculate starting descriptor index. For INTERRUPT endpoint it is * always 0.
*/ if (qh->channel) {
frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames); /* * Calculate initial descriptor index based on FrameList current * bitmap and servicing period
*/
fr_idx_tmp = dwc2_frame_list_idx(frame);
fr_idx = (FRLISTEN_64_SIZE +
dwc2_frame_list_idx(qh->next_active_frame) -
fr_idx_tmp) % dwc2_frame_incr_val(qh);
fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
} else {
qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
&skip_frames);
fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
}
/* Set active bit */
dma_desc->status |= HOST_DMA_A;
qh->ntd++;
qtd->isoc_frame_index_last++;
#ifdef ISOC_URB_GIVEBACK_ASAP /* Set IOC for each descriptor corresponding to last frame of URB */ if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
dma_desc->status |= HOST_DMA_IOC; #endif
/* * Ensure current frame number didn't overstep last scheduled * descriptor. If it happens, the only way to recover is to move * qh->td_last to current frame number + 1. * So that next isoc descriptor will be scheduled on frame number + 1 * and not on a past frame.
*/ if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { if (inc < 32) {
dev_vdbg(hsotg->dev, "current frame number overstep last descriptor\n");
qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
qh->dev_speed);
idx = qh->td_last;
}
}
#ifdef ISOC_URB_GIVEBACK_ASAP /* Set IOC for last descriptor if descriptor list is full */ if (qh->ntd == ntd_max) {
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
dma_sync_single_for_device(hsotg->dev,
qh->desc_list_dma + (idx * sizeof(struct dwc2_dma_desc)), sizeof(struct dwc2_dma_desc),
DMA_TO_DEVICE);
} #else /* * Set IOC bit only for one descriptor. Always try to be ahead of HW * processing, i.e. on IOC generation driver activates next descriptor * but core continues to process descriptors following the one with IOC * set.
*/
if (n_desc > DESCNUM_THRESHOLD) /* * Move IOC "up". Required even if there is only one QTD * in the list, because QTDs might continue to be queued, * but during the activation it was only one queued. * Actually more than one QTD might be in the list if this * function called from XferCompletion - QTDs was queued during * HW processing of the previous descriptor chunk.
*/
idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
qh->dev_speed); else /* * Set the IOC for the latest descriptor if either number of * descriptors is not greater than threshold or no more new * descriptors activated
*/
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
/* * Last (or only) descriptor of IN transfer with actual size less * than MaxPacket
*/ if (len > chan->xfer_len) {
chan->xfer_len = 0;
} else {
chan->xfer_dma += len;
chan->xfer_len -= len;
}
}
/* * Start with chan->xfer_dma initialized in assign_and_init_hc(), then * if SG transfer consists of multiple URBs, this pointer is re-assigned * to the buffer of the currently processed QTD. For non-SG request * there is always one QTD active.
*/
/** * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode * * @hsotg: The HCD state structure for the DWC OTG controller * @qh: The QH to init * * Return: 0 if successful, negative error code otherwise * * For Control and Bulk endpoints, initializes descriptor list and starts the * transfer. For Interrupt and Isochronous endpoints, initializes descriptor * list then updates FrameList, marking appropriate entries as active. * * For Isochronous endpoints the starting descriptor index is calculated based * on the scheduled frame, but only on the first transfer descriptor within a * session. Then the transfer is started via enabling the channel. * * For Isochronous endpoints the channel is not halted on XferComplete * interrupt so remains assigned to the endpoint(QH) until session is done.
*/ void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{ /* Channel is already assigned */ struct dwc2_host_chan *chan = qh->channel;
u16 skip_frames = 0;
switch (chan->ep_type) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_BULK:
dwc2_init_non_isoc_dma_desc(hsotg, qh);
dwc2_hc_start_transfer_ddma(hsotg, chan); break; case USB_ENDPOINT_XFER_INT:
dwc2_init_non_isoc_dma_desc(hsotg, qh);
dwc2_update_frame_list(hsotg, qh, 1);
dwc2_hc_start_transfer_ddma(hsotg, chan); break; case USB_ENDPOINT_XFER_ISOC: if (!qh->ntd)
skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
if (!chan->xfer_started) {
dwc2_update_frame_list(hsotg, qh, 1);
/* * Always set to max, instead of actual size. Otherwise * ntd will be changed with channel being enabled. Not * recommended.
*/
chan->ntd = dwc2_max_desc_num(qh);
/* Enable channel only once for ISOC */
dwc2_hc_start_transfer_ddma(hsotg, chan);
}
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { /* * XactError, or unable to complete all the transactions * in the scheduled micro-frame/frame, both indicated by * HOST_DMA_STS_PKTERR
*/
qtd->urb->error_count++;
frame_desc->actual_length = qh->n_bytes[idx] - remain;
frame_desc->status = -EPROTO;
} else { /* Success */
frame_desc->actual_length = qh->n_bytes[idx] - remain;
frame_desc->status = 0;
}
if (++qtd->isoc_frame_index == usb_urb->number_of_packets) { /* * urb->status is not used for isoc transfers here. The * individual frame_desc status are used instead.
*/
dwc2_host_complete(hsotg, qtd, 0);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
/* * This check is necessary because urb_dequeue can be called * from urb complete callback (sound driver for example). All * pending URBs are dequeued there, so no need for further * processing.
*/ if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) return -1;
rc = DWC2_CMPL_DONE;
}
qh->ntd--;
/* Stop if IOC requested descriptor reached */ if (dma_desc->status & HOST_DMA_IOC)
rc = DWC2_CMPL_STOP;
if (halt_status == DWC2_HC_XFER_AHB_ERR ||
halt_status == DWC2_HC_XFER_BABBLE_ERR) { /* * Channel is halted in these error cases, considered as serious * issues. * Complete all URBs marking all frames as failed, irrespective * whether some of the descriptors (frames) succeeded or not. * Pass error code to completion routine as well, to update * urb->status, some of class drivers might use it to stop * queing transfer requests.
*/ int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
-EIO : -EOVERFLOW;
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { if (!qtd->in_process) break;
/* * Ensure idx corresponds to descriptor where first urb of this * qtd was added. In fact, during isoc desc init, dwc2 may skip * an index if current frame number is already over this index.
*/ if (idx != qtd->isoc_td_first) {
dev_vdbg(hsotg->dev, "try to complete %d instead of %d\n",
idx, qtd->isoc_td_first);
idx = qtd->isoc_td_first;
}
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { switch (qtd->control_phase) { case DWC2_CONTROL_SETUP: if (urb->length > 0)
qtd->control_phase = DWC2_CONTROL_DATA; else
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev, " Control setup transaction done\n"); break; case DWC2_CONTROL_DATA: if (*xfer_done) {
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev, " Control data transfer done\n");
} elseif (desc_num + 1 == qtd->n_desc) { /* * Last descriptor for Control data stage which * is not completed yet
*/
dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
qtd);
} break; default: break;
}
}
for (i = 0; i < qtd_desc_count; i++) { if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
&xfer_done)) {
qtd = NULL; goto stop_scan;
}
desc_num++;
}
}
stop_scan: if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { /* * Resetting the data toggle for bulk and interrupt endpoints * in case of stall. See handle_hc_stall_intr().
*/ if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0; else
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
}
if (halt_status == DWC2_HC_XFER_COMPLETE) { if (chan->hcint & HCINTMSK_NYET) { /* * Got a NYET on the last transaction of the transfer. * It means that the endpoint should be in the PING * state at the beginning of the next transfer.
*/
qh->ping_state = 1;
}
}
}
/** * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's * status and calls completion routine for the URB if it's done. Called from * interrupt handlers. * * @hsotg: The HCD state structure for the DWC OTG controller * @chan: Host channel the transfer is completed on * @chnum: Index of Host channel registers * @halt_status: Reason the channel is being halted or just XferComplete * for isochronous transfers * * Releases the channel to be used by other transfers. * In case of Isochronous endpoint the channel is not halted until the end of * the session, i.e. QTD list is empty. * If periodic channel released the FrameList is updated accordingly. * Calls transaction selection routines to activate pending transfers.
*/ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan, int chnum, enum dwc2_halt_status halt_status)
{ struct dwc2_qh *qh = chan->qh; int continue_isoc_xfer = 0; enum dwc2_transaction_type tr_type;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
/* Release the channel if halted or session completed */ if (halt_status != DWC2_HC_XFER_COMPLETE ||
list_empty(&qh->qtd_list)) { struct dwc2_qtd *qtd, *qtd_tmp;
/* * Kill all remainings QTDs since channel has been * halted.
*/
list_for_each_entry_safe(qtd, qtd_tmp,
&qh->qtd_list,
qtd_list_entry) {
dwc2_host_complete(hsotg, qtd,
-ECONNRESET);
dwc2_hcd_qtd_unlink_and_free(hsotg,
qtd, qh);
}
/* Halt the channel if session completed */ if (halt_status == DWC2_HC_XFER_COMPLETE)
dwc2_hc_halt(hsotg, chan, halt_status);
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
} else { /* Keep in assigned schedule to continue transfer */
list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_assigned); /* * If channel has been halted during giveback of urb * then prevent any new scheduling.
*/ if (!chan->halt_status)
continue_isoc_xfer = 1;
} /* * Todo: Consider the case when period exceeds FrameList size. * Frame Rollover interrupt should be used.
*/
} else { /* * Scan descriptor list to complete the URB(s), then release * the channel
*/
dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
halt_status);
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
if (!list_empty(&qh->qtd_list)) { /* * Add back to inactive non-periodic schedule on normal * completion
*/
dwc2_hcd_qh_add(hsotg, qh);
}
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.9Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.