if (!bd_list->bd_table_array) {
dev_dbg(bdc->dev, "%s already freed\n", ep->name); return;
} for (index = 0; index < num_tabs; index++) { /* * check if the bd_table struct is allocated ? * if yes, then check if bd memory has been allocated, then * free the dma_pool and also the bd_table struct memory
*/
bd_table = bd_list->bd_table_array[index];
dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index); if (!bd_table) {
dev_dbg(bdc->dev, "bd_table not allocated\n"); continue;
} if (!bd_table->start_bd) {
dev_dbg(bdc->dev, "bd dma pool not allocated\n"); continue;
}
dev_dbg(bdc->dev, "Free dma pool start_bd:%p dma:%llx\n",
bd_table->start_bd,
(unsignedlonglong)bd_table->dma);
dma_pool_free(bdc->bd_table_pool,
bd_table->start_bd,
bd_table->dma); /* Free the bd_table structure */
kfree(bd_table);
} /* Free the bd table array */
kfree(ep->bd_list.bd_table_array);
}
/* * chain the tables, by insteting a chain bd at the end of prev_table, pointing * to next_table
*/ staticinlinevoid chain_table(struct bd_table *prev_table, struct bd_table *next_table,
u32 bd_p_tab)
{ /* Chain the prev table to next table */
prev_table->start_bd[bd_p_tab-1].offset[0] =
cpu_to_le32(lower_32_bits(next_table->dma));
/* Allocate the bdl for ep, during config ep */ staticint ep_bd_list_alloc(struct bdc_ep *ep)
{ struct bd_table *prev_table = NULL; int index, num_tabs, bd_p_tab; struct bdc *bdc = ep->bdc; struct bd_table *bd_table;
dma_addr_t dma;
if (usb_endpoint_xfer_isoc(ep->desc))
num_tabs = NUM_TABLES_ISOCH; else
num_tabs = NUM_TABLES;
bd_p_tab = NUM_BDS_PER_TABLE; /* if there is only 1 table in bd list then loop chain to self */
dev_dbg(bdc->dev, "%s ep:%p num_tabs:%d\n",
__func__, ep, num_tabs);
/* Allocate memory for table array */
ep->bd_list.bd_table_array = kcalloc(num_tabs, sizeof(struct bd_table *),
GFP_ATOMIC); if (!ep->bd_list.bd_table_array) return -ENOMEM;
/* Allocate memory for each table */ for (index = 0; index < num_tabs; index++) { /* Allocate memory for bd_table structure */
bd_table = kzalloc(sizeof(*bd_table), GFP_ATOMIC); if (!bd_table) goto fail;
/* * How many transfer bd's are available on this ep bdl, chain bds are not * counted in available bds
*/ staticint bd_available_ep(struct bdc_ep *ep)
{ struct bd_list *bd_list = &ep->bd_list; int available1, available2; struct bdc *bdc = ep->bdc; int chain_bd1, chain_bd2; int available_bd = 0;
available1 = available2 = chain_bd1 = chain_bd2 = 0; /* if empty then we have all bd's available - number of chain bd's */ if (bd_list->eqp_bdi == bd_list->hwd_bdi) return bd_list->max_bdi - bd_list->num_tabs;
/* * Depending upon where eqp and dqp pointers are, caculate number * of avaialble bd's
*/ if (bd_list->hwd_bdi < bd_list->eqp_bdi) { /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
available1 = bd_list->max_bdi - bd_list->eqp_bdi;
available2 = bd_list->hwd_bdi;
chain_bd1 = available1 / bd_list->num_bds_table;
chain_bd2 = available2 / bd_list->num_bds_table;
dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
chain_bd1, chain_bd2);
available_bd = available1 + available2 - chain_bd1 - chain_bd2;
} else { /* available bd's are from eqp..dqp - number of chain bd's */
available1 = bd_list->hwd_bdi - bd_list->eqp_bdi; /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */ if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
<= bd_list->num_bds_table) { /* If there any chain bd in between */ if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
== bdi_to_tbi(ep, bd_list->eqp_bdi))) {
available_bd = available1 - 1;
}
} else {
chain_bd1 = available1 / bd_list->num_bds_table;
available_bd = available1 - chain_bd1;
}
} /* * we need to keep one extra bd to check if ring is full or empty so * reduce by 1
*/
available_bd--;
dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
return available_bd;
}
/* Notify the hardware after queueing the bd to bdl */ void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
{ struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum); /* * We don't have anyway to check if ep state is running, * except the software flags.
*/ if (unlikely(ep->flags & BDC_EP_STOP))
ep->flags &= ~BDC_EP_STOP;
bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
}
/* returns the bd corresponding to bdi */ staticstruct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
{ int tbi = bdi_to_tbi(ep, bdi); int local_bdi = 0;
for (bdnum = 0; bdnum < num_bds; bdnum++) {
dword2 = dword3 = 0; /* First bd */ if (!bdnum) {
dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
dword2 |= BD_LTF; /* format of first bd for ep0 is different than other */ if (ep->ep_num == 1) {
ret = setup_first_bd_ep0(bdc, req, &dword3); if (ret) return ret;
}
} if (!req->ep->dir)
dword3 |= BD_ISP;
if (req_len > BD_MAX_BUFF_SIZE) {
dword2 |= BD_MAX_BUFF_SIZE;
req_len -= BD_MAX_BUFF_SIZE;
} else { /* this should be the last bd */
dword2 |= req_len;
dword3 |= BD_IOC;
dword3 |= BD_EOT;
} /* Currently only 1 INT target is supported */
dword2 |= BD_INTR_TARGET(0);
bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi); if (unlikely(!bd)) {
dev_err(bdc->dev, "Err bd pointing to wrong addr\n"); return -EINVAL;
} /* write bd */
bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
bd->offset[2] = cpu_to_le32(dword2);
bd->offset[3] = cpu_to_le32(dword3); /* advance eqp pointer */
ep_bdlist_eqp_adv(ep); /* advance the buff pointer */
buf_add += BD_MAX_BUFF_SIZE;
dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
(unsignedlonglong)buf_add, req_len, bd,
ep->bd_list.eqp_bdi);
bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
bd->offset[3] = cpu_to_le32(BD_SBF);
} /* clear the STOP BD fetch bit from the first bd of this xfr */
bd = bdi_to_bd(ep, bd_xfr->start_bdi);
bd->offset[3] &= cpu_to_le32(~BD_SBF); /* the new eqp will be next hw dqp */
bd_xfr->num_bds = num_bds;
bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi; /* everything is written correctly before notifying the HW */
wmb();
return 0;
}
/* Queue the xfr */ staticint bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
{ int num_bds, bd_available; struct bdc_ep *ep; int ret;
/* Disable the endpoint */ int bdc_ep_disable(struct bdc_ep *ep)
{ struct bdc_req *req; struct bdc *bdc; int ret;
ret = 0;
bdc = ep->bdc;
dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num); /* Stop the endpoint */
ret = bdc_stop_ep(bdc, ep->ep_num);
/* * Intentionally don't check the ret value of stop, it can fail in * disconnect scenarios, continue with dconfig
*/ /* de-queue any pending requests */ while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct bdc_req,
queue);
bdc_req_complete(ep, req, -ESHUTDOWN);
} /* deconfigure the endpoint */
ret = bdc_dconfig_ep(bdc, ep); if (ret)
dev_warn(bdc->dev, "dconfig fail but continue with memory free");
ep->flags = 0; /* ep0 memory is not freed, but reused on next connect sr */ if (ep->ep_num == 1) return 0;
ret = ep_bd_list_alloc(ep); if (ret) {
dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret); return -ENOMEM;
}
bdc_dbg_bd_list(bdc, ep); /* only for ep0: config ep is called for ep0 from connect event */ if (ep->ep_num == 1) return ret;
/* Issue a configure endpoint command */
ret = bdc_config_ep(bdc, ep); if (ret) return ret;
if (bdc->delayed_status) {
bdc->delayed_status = false; /* if status stage was delayed? */ if (bdc->ep0_state == WAIT_FOR_STATUS_START) { /* Queue a status stage BD */
ep0_queue_status_stage(bdc);
bdc->ep0_state = WAIT_FOR_STATUS_XMIT; return 0;
}
} else { /* * if delayed status is false and 0 length transfer is requested * i.e. for status stage of some setup request, then just * return from here the status stage is queued independently
*/ if (req->usb_req.length == 0) return 0;
}
ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir); if (ret) {
dev_err(bdc->dev, "dma mapping failed %s\n", ep->name); return ret;
}
dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
__func__, ep->name, start_bdi, end_bdi);
dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__,
ep, (void *)ep->usb_ep.desc); /* if still connected, stop the ep to see where the HW is ? */ if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
ret = bdc_stop_ep(bdc, ep->ep_num); /* if there is an issue, then no need to go further */ if (ret) return 0;
} else return 0;
/* * After endpoint is stopped, there can be 3 cases, the request * is processed, pending or in the middle of processing
*/
/* we have the dma addr of next bd that will be fetched by hardware */
curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64); if (curr_hw_dqpi < 0) return curr_hw_dqpi;
/* * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from * curr_hw_dqbdi..eqp_bdi.
*/
/* Check if start_bdi and end_bdi are in range of HW owned BD's */ if (curr_hw_dqpi > eqp_bdi) { /* there is a wrap from last to 0 */ if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
start_pending = true;
end_pending = true;
} elseif (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
end_pending = true;
}
} else { if (start_bdi >= curr_hw_dqpi) {
start_pending = true;
end_pending = true;
} elseif (end_bdi >= curr_hw_dqpi) {
end_pending = true;
}
}
dev_dbg(bdc->dev, "start_pending:%d end_pending:%d speed:%d\n",
start_pending, end_pending, bdc->gadget.speed);
/* If both start till end are processes, we cannot deq req */ if (!start_pending && !end_pending) return -EINVAL;
/* * if ep_dequeue is called after disconnect then just return * success from here
*/ if (bdc->gadget.speed == USB_SPEED_UNKNOWN) return 0;
tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
table = ep->bd_list.bd_table_array[tbi];
next_bd_dma = table->dma + sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
tbi * ep->bd_list.num_bds_table);
/* * Due to HW limitation we need to bypadd chain bd's and issue ep_bla, * incase if start is pending this is the first request in the list * then issue ep_bla instead of marking as chain bd
*/ if (start_pending && !first_remove) { /* * Mark the start bd as Chain bd, and point the chain * bd to next_bd_dma
*/
bd_start = bdi_to_bd(ep, start_bdi);
bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
bd_start->offset[2] = 0x0;
bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
bdc_dbg_bd_list(bdc, ep);
} elseif (end_pending) { /* * The transfer is stopped in the middle, move the * HW deq pointer to next_bd_dma
*/
ret = bdc_ep_bla(bdc, ep, next_bd_dma); if (ret) {
dev_err(bdc->dev, "error in ep_bla:%d\n", ret); return ret;
}
}
return 0;
}
/* Halt/Clear the ep based on value */ staticint ep_set_halt(struct bdc_ep *ep, u32 value)
{ struct bdc *bdc; int ret;
usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
usb2_pm &= ~BDC_PTC_MASK;
dev_dbg(bdc->dev, "%s\n", __func__); switch (bdc->test_mode) { case USB_TEST_J: case USB_TEST_K: case USB_TEST_SE0_NAK: case USB_TEST_PACKET: case USB_TEST_FORCE_ENABLE:
usb2_pm |= bdc->test_mode << 28; break; default: return -EINVAL;
}
dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
return 0;
}
/* * Helper function to handle Transfer status report with status as either * success or short
*/ staticvoid handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep, struct bdc_sr *sreport)
{ int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds; struct bd_list *bd_list = &ep->bd_list; int actual_length, length_short; struct bd_transfer *bd_xfr; struct bdc_bd *short_bd; struct bdc_req *req;
u64 deq_ptr_64 = 0; int status = 0; int sr_status;
u32 tmp_32;
dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
bdc_dbg_srr(bdc, 0); /* do not process thie sr if ignore flag is set */ if (ep->ignore_next_sr) {
ep->ignore_next_sr = false; return;
}
if (unlikely(list_empty(&ep->queue))) {
dev_warn(bdc->dev, "xfr srr with no BD's queued\n"); return;
}
req = list_entry(ep->queue.next, struct bdc_req,
queue);
/* * sr_status is short and this transfer has more than 1 bd then it needs * special handling, this is only applicable for bulk and ctrl
*/ if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) { /* * This is multi bd xfr, lets see which bd * caused short transfer and how many bytes have been * transferred so far.
*/
tmp_32 = le32_to_cpu(sreport->offset[0]);
deq_ptr_64 = tmp_32;
tmp_32 = le32_to_cpu(sreport->offset[1]);
deq_ptr_64 |= ((u64)tmp_32 << 32);
short_bdi = bd_add_to_bdi(ep, deq_ptr_64); if (unlikely(short_bdi < 0))
dev_warn(bdc->dev, "bd doesn't exist?\n");
start_bdi = bd_xfr->start_bdi; /* * We know the start_bdi and short_bdi, how many xfr * bds in between
*/ if (start_bdi <= short_bdi) {
max_len_bds = short_bdi - start_bdi; if (max_len_bds <= bd_list->num_bds_table) { if (!(bdi_to_tbi(ep, start_bdi) ==
bdi_to_tbi(ep, short_bdi)))
max_len_bds--;
} else {
chain_bds = max_len_bds/bd_list->num_bds_table;
max_len_bds -= chain_bds;
}
} else { /* there is a wrap in the ring within a xfr */
chain_bds = (bd_list->max_bdi - start_bdi)/
bd_list->num_bds_table;
chain_bds += short_bdi/bd_list->num_bds_table;
max_len_bds = bd_list->max_bdi - start_bdi;
max_len_bds += short_bdi;
max_len_bds -= chain_bds;
} /* max_len_bds is the number of full length bds */
end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi); if (!(end_bdi == short_bdi))
ep->ignore_next_sr = true;
/* Update the dequeue pointer */
ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi; if (req->usb_req.actual < req->usb_req.length) {
dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num); if (req->usb_req.short_not_ok)
status = -EREMOTEIO;
}
bdc_req_complete(ep, bd_xfr->req, status);
}
/* EP0 setup related packet handlers */
/* * Setup packet received, just store the packet and process on next DS or SS * started SR
*/ void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
{ struct usb_ctrlrequest *setup_pkt;
u32 len;
dev_dbg(bdc->dev, "%s ep0_state:%s\n",
__func__, ep0_state_string[bdc->ep0_state]); /* Store received setup packet */
setup_pkt = &bdc->setup_pkt;
memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
len = le16_to_cpu(setup_pkt->wLength); if (!len)
bdc->ep0_state = WAIT_FOR_STATUS_START; else
bdc->ep0_state = WAIT_FOR_DATA_START;
case USB_RECIP_INTERFACE:
dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n"); if (bdc->gadget.speed == USB_SPEED_SUPER) { /* * This should come from func for Func remote wkup * usb_status |=1;
*/ if (bdc->devstatus & REMOTE_WAKE_ENABLE)
usb_status |= REMOTE_WAKE_ENABLE;
} else {
usb_status = 0;
}
ep = bdc->bdc_ep_array[epnum]; if (!ep) {
dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?"); return -EINVAL;
} if (ep->flags & BDC_EP_STALL)
usb_status |= 1 << USB_ENDPOINT_HALT;
break; default:
dev_err(bdc->dev, "Unknown recipient for get_status\n"); return -EINVAL;
} /* prepare a data stage for GET_STATUS */
dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
*(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
bdc->ep0_req.usb_req.length = 2;
bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
ep0_queue_data_stage(bdc);
/* * Queue a 0 byte bd only if wLength is more than the length and length is * a multiple of MaxPacket then queue 0 byte BD
*/ staticint ep0_queue_zlp(struct bdc *bdc)
{ int ret;
/* Control request handler */ staticint handle_control_request(struct bdc *bdc)
{ enum usb_device_state state = bdc->gadget.state; struct usb_ctrlrequest *setup_pkt; int delegate_setup = 0; int ret = 0; int config = 0;
setup_pkt = &bdc->setup_pkt;
dev_dbg(bdc->dev, "%s\n", __func__); if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (setup_pkt->bRequest) { case USB_REQ_SET_ADDRESS:
dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
ret = ep0_set_address(bdc, setup_pkt);
bdc->devstatus &= DEVSTATUS_CLEAR; break;
case USB_REQ_SET_CONFIGURATION:
dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n"); if (state == USB_STATE_ADDRESS) {
usb_gadget_set_state(&bdc->gadget,
USB_STATE_CONFIGURED);
} elseif (state == USB_STATE_CONFIGURED) { /* * USB2 spec sec 9.4.7, if wValue is 0 then dev * is moved to addressed state
*/
config = le16_to_cpu(setup_pkt->wValue); if (!config)
usb_gadget_set_state(
&bdc->gadget,
USB_STATE_ADDRESS);
}
delegate_setup = 1; break;
case USB_REQ_SET_FEATURE:
dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
ret = ep0_handle_feature(bdc, setup_pkt, 1); break;
case USB_REQ_CLEAR_FEATURE:
dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
ret = ep0_handle_feature(bdc, setup_pkt, 0); break;
case USB_REQ_GET_STATUS:
dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
ret = ep0_handle_status(bdc, setup_pkt); break;
case USB_REQ_SET_SEL:
dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
ret = ep0_set_sel(bdc, setup_pkt); break;
case USB_REQ_SET_ISOCH_DELAY:
dev_warn(bdc->dev, "USB_REQ_SET_ISOCH_DELAY not handled\n");
ret = 0; break; default:
delegate_setup = 1;
}
} else {
delegate_setup = 1;
}
if (delegate_setup) {
spin_unlock(&bdc->lock);
ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
spin_lock(&bdc->lock);
}
return ret;
}
/* EP0: Data stage started */ void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
{ struct bdc_ep *ep; int ret = 0;
dev_dbg(bdc->dev, "%s\n", __func__);
ep = bdc->bdc_ep_array[1]; /* If ep0 was stalled, the clear it first */ if (ep->flags & BDC_EP_STALL) {
ret = ep_set_halt(ep, 0); if (ret) goto err;
} if (bdc->ep0_state != WAIT_FOR_DATA_START)
dev_warn(bdc->dev, "Data stage not expected ep0_state:%s\n",
ep0_state_string[bdc->ep0_state]);
ret = handle_control_request(bdc); if (ret == USB_GADGET_DELAYED_STATUS) { /* * The ep0 state will remain WAIT_FOR_DATA_START till * we received ep_queue on ep0
*/
bdc->delayed_status = true; return;
} if (!ret) {
bdc->ep0_state = WAIT_FOR_DATA_XMIT;
dev_dbg(bdc->dev, "ep0_state:%s", ep0_state_string[bdc->ep0_state]); return;
}
err:
ep0_stall(bdc);
}
/* EP0: status stage started */ void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
{ struct usb_ctrlrequest *setup_pkt; struct bdc_ep *ep; int ret = 0;
dev_dbg(bdc->dev, "%s ep0_state:%s",
__func__, ep0_state_string[bdc->ep0_state]);
ep = bdc->bdc_ep_array[1];
/* check if ZLP was queued? */ if (bdc->zlp_needed)
bdc->zlp_needed = false;
if (ep->flags & BDC_EP_STALL) {
ret = ep_set_halt(ep, 0); if (ret) goto err;
}
if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
(bdc->ep0_state != WAIT_FOR_DATA_XMIT))
dev_err(bdc->dev, "Status stage recv but ep0_state:%s\n",
ep0_state_string[bdc->ep0_state]);
/* check if data stage is in progress ? */ if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
bdc->ep0_state = STATUS_PENDING; /* Status stage will be queued upon Data stage transmit event */
dev_dbg(bdc->dev, "status started but data not transmitted yet\n"); return;
}
setup_pkt = &bdc->setup_pkt;
/* * 2 stage setup then only process the setup, for 3 stage setup the date * stage is already handled
*/ if (!le16_to_cpu(setup_pkt->wLength)) {
ret = handle_control_request(bdc); if (ret == USB_GADGET_DELAYED_STATUS) {
bdc->delayed_status = true; /* ep0_state will remain WAIT_FOR_STATUS_START */ return;
}
} if (!ret) { /* Queue a status stage BD */
ep0_queue_status_stage(bdc);
bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
dev_dbg(bdc->dev, "ep0_state:%s", ep0_state_string[bdc->ep0_state]); return;
}
err:
ep0_stall(bdc);
}
/* Helper function to update ep0 upon SR with xsf_succ or xsf_short */ staticvoid ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
{
dev_dbg(bdc->dev, "%s\n", __func__); switch (bdc->ep0_state) { case WAIT_FOR_DATA_XMIT:
bdc->ep0_state = WAIT_FOR_STATUS_START; break; case WAIT_FOR_STATUS_XMIT:
bdc->ep0_state = WAIT_FOR_SETUP; if (bdc->test_mode) { int ret;
dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
ret = bdc_set_test_mode(bdc); if (ret < 0) {
dev_err(bdc->dev, "Err in setting Test mode\n"); return;
}
bdc->test_mode = 0;
} break; case STATUS_PENDING:
bdc_xsf_ep0_status_start(bdc, sreport); break;
ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
ep = bdc->bdc_ep_array[ep_num]; if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
dev_err(bdc->dev, "xsf for ep not enabled\n"); return;
} /* * check if this transfer is after link went from U3->U0 due * to remote wakeup
*/ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
__func__);
}
sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
__func__, sr_status, ep->name);
switch (sr_status) { case XSF_SUCC: case XSF_SHORT:
handle_xsr_succ_status(bdc, ep, sreport); if (ep_num == 1)
ep0_xsf_complete(bdc, sreport); break;
case XSF_SETUP_RECV: case XSF_DATA_START: case XSF_STATUS_START: if (ep_num != 1) {
dev_err(bdc->dev, "ep0 related packets on non ep0 endpoint"); return;
}
bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport); break;
case XSF_BABB: if (ep_num == 1) {
dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
bdc->zlp_needed); /* * If the last completed transfer had wLength >Data Len, * and Len is multiple of MaxPacket,then queue ZLP
*/ if (bdc->zlp_needed) { /* queue 0 length bd */
ep0_queue_zlp(bdc); return;
}
}
dev_warn(bdc->dev, "Babble on ep not handled\n"); break; default:
dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status); break;
}
}
if (!_ep) {
pr_debug("bdc: invalid parameters\n"); return -EINVAL;
}
ep = to_bdc_ep(_ep);
bdc = ep->bdc;
/* Upper layer will not call this for ep0, but do a sanity check */ if (ep == bdc->bdc_ep_array[1]) {
dev_warn(bdc->dev, "%s called for ep0\n", __func__); return -EINVAL;
}
dev_dbg(bdc->dev, "%s() ep:%s ep->flags:%08x\n",
__func__, ep->name, ep->flags);
if (!(ep->flags & BDC_EP_ENABLED)) { if (bdc->gadget.speed != USB_SPEED_UNKNOWN)
dev_warn(bdc->dev, "%s is already disabled\n",
ep->name); return 0;
}
spin_lock_irqsave(&bdc->lock, flags);
ret = bdc_ep_disable(ep);
spin_unlock_irqrestore(&bdc->lock, flags);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.