// SPDX-License-Identifier: GPL-2.0+ /* * USB Gadget driver for LPC32xx * * Authors: * Kevin Wells <kevin.wells@nxp.com> * Mike James * Roland Stigge <stigge@antcom.de> * * Copyright (C) 2006 Philips Semiconductors * Copyright (C) 2009 NXP Semiconductors * Copyright (C) 2012 Roland Stigge * * Note: This driver is based on original work done by Mike James for * the LPC3180.
*/
/* EP0 states */ #define WAIT_FOR_SETUP 0 /* Wait for setup packet */ #define DATA_IN 1 /* Expect dev->host transfer */ #define DATA_OUT 2 /* Expect host->dev transfer */
/* DD (DMA Descriptor) structure, requires word alignment, this is already * defined in the LPC32XX USB device header file, but this version is slightly
* modified to tag some work data with each DMA descriptor. */ struct lpc32xx_usbd_dd_gad {
u32 dd_next_phy;
u32 dd_setup;
u32 dd_buffer_addr;
u32 dd_status;
u32 dd_iso_ps_mem_addr;
u32 this_dma;
u32 iso_status[6]; /* 5 spare */
u32 dd_next_v;
};
/* Set full speed and SE0 mode */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_1, (MC1_SPEED_REG | MC1_DAT_SE0));
/* * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
*/
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
value = MC2_BI_DI; if (udc->atx != STOTG04)
value |= MC2_SPD_SUSP_CTRL;
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2, value);
/* Driver VBUS_DRV high or low depending on board setup */ if (udc->board->vbus_drv_pol != 0)
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV); else
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
OTG1_VBUS_DRV);
/* Bi-directional mode with suspend control * Enable both pulldowns for now - the pullup will be enable when VBUS
* is detected */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
(ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0);
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1,
(0 | OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n", vendor);
dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n", product);
dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x14));
}
/* Enables or disables the USB device pullup via the ISP1301 transceiver */ staticvoid isp1301_pullup_set(struct lpc32xx_udc *udc)
{ if (udc->pullup) /* Enable pullup for bus signalling */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1, OTG1_DP_PULLUP); else /* Enable pullup for bus signalling */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR,
OTG1_DP_PULLUP);
}
staticvoid isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup, int block)
{ if (en_pullup == udc->pullup) return;
udc->pullup = en_pullup; if (block)
isp1301_pullup_set(udc); else /* defer slow i2c pull up setting */
schedule_work(&udc->pullup_job);
}
#ifdef CONFIG_PM /* Powers up or down the ISP1301 transceiver */ staticvoid isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
{ /* There is no "global power down" register for stotg04 */ if (udc->atx == STOTG04) return;
if (enable != 0) /* Power up ISP1301 - this ISP1301 will automatically wakeup
when VBUS is detected */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR,
MC2_GLOBAL_PWR_DN); else /* Power down ISP1301 */
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
}
/* * * USB protocol engine command/data read/write helper functions *
*/ /* Issues a single command to the USB device state machine */ staticvoid udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
{
u32 pass = 0; int to;
/* EP may lock on CLRI if this read isn't done */
u32 tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
(void) tmp;
while (pass == 0) {
writel(USBD_CCEMPTY, USBD_DEVINTCLR(udc->udp_baseaddr));
/* Issues 2 commands (or command and data) to the USB device state machine */ staticinlinevoid udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
u32 data)
{
udc_protocol_cmd_w(udc, cmd);
udc_protocol_cmd_w(udc, data);
}
/* Issues a single command to the USB device state machine and reads
* response data */ static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
{ int to = 1000;
/* Write a command and read data from the protocol engine */
writel((USBD_CDFULL | USBD_CCEMPTY),
USBD_DEVINTCLR(udc->udp_baseaddr));
while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
&& (to > 0))
to--; if (!to)
dev_dbg(udc->dev, "Protocol engine didn't receive response (CDFULL)\n");
return readl(USBD_CMDDATA(udc->udp_baseaddr));
}
/* * * USB device interrupt mask support functions *
*/ /* Enable one or more USB device interrupts */ staticinlinevoid uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
{
udc->enabled_devints |= devmask;
writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
}
/* Disable one or more USB device interrupts */ staticinlinevoid uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
{
udc->enabled_devints &= ~mask;
writel(udc->enabled_devints, USBD_DEVINTEN(udc->udp_baseaddr));
}
/* Clear one or more USB device interrupts */ staticinlinevoid uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
{
writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
}
/* * * Endpoint interrupt disable/enable functions *
*/ /* Enable one or more USB endpoint interrupts */ staticvoid uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
{
udc->enabled_hwepints |= (1 << hwep);
writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
}
/* Disable one or more USB endpoint interrupts */ staticvoid uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
{
udc->enabled_hwepints &= ~(1 << hwep);
writel(udc->enabled_hwepints, USBD_EPINTEN(udc->udp_baseaddr));
}
/* Clear one or more USB endpoint interrupts */ staticinlinevoid uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
{
writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
}
/* Enable DMA for the HW channel */ staticinlinevoid udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
{
writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
}
/* Disable DMA for the HW channel */ staticinlinevoid udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
{
writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
}
/* * * Endpoint realize/unrealize functions *
*/ /* Before an endpoint can be used, it needs to be realized * in the USB protocol engine - this realizes the endpoint.
* The interrupt (FIFO or DMA) is not enabled with this function */ staticvoid udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
u32 maxpacket)
{ int to = 1000;
/* Wait until endpoint is realized in hardware */ while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
USBD_EP_RLZED)) && (to > 0))
to--; if (!to)
dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
/* * * USB setup and shutdown functions *
*/ /* Enables or disables most of the USB system clocks when low power mode is * needed. Clocks are typically started on a connection event, and disabled
* when a cable is disconnected */ staticvoid udc_clk_set(struct lpc32xx_udc *udc, int enable)
{ if (enable != 0) { if (udc->clocked) return;
udc->clocked = 1;
clk_prepare_enable(udc->usb_slv_clk);
} else { if (!udc->clocked) return;
/* Set/reset USB device address */ staticvoid udc_set_address(struct lpc32xx_udc *udc, u32 addr)
{ /* Address will be latched at the end of the status phase, or
latched immediately if function is called twice */
udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
DAT_WR_BYTE(DEV_EN | addr));
}
/* Setup up a IN request for DMA transfer - this consists of determining the * list of DMA addresses for the transfer, allocating DMA Descriptors,
* installing the DD into the UDCA, and then enabling the DMA for that EP */ staticint udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{ struct lpc32xx_request *req;
u32 hwep = ep->hwep_num;
ep->req_pending = 1;
/* There will always be a request waiting here */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
/* Place the DD Descriptor into the UDCA */
udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
/* Enable DMA and interrupt for the HW EP */
udc_ep_dma_enable(udc, hwep);
/* Clear ZLP if last packet is not of MAXP size */ if (req->req.length % ep->ep.maxpacket)
req->send_zlp = 0;
return 0;
}
/* Setup up a OUT request for DMA transfer - this consists of determining the * list of DMA addresses for the transfer, allocating DMA Descriptors,
* installing the DD into the UDCA, and then enabling the DMA for that EP */ staticint udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{ struct lpc32xx_request *req;
u32 hwep = ep->hwep_num;
ep->req_pending = 1;
/* There will always be a request waiting here */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
/* Place the DD Descriptor into the UDCA */
udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
/* Enable DMA and interrupt for the HW EP */
udc_ep_dma_enable(udc, hwep); return 0;
}
/* EP interrupts on high priority, FRAME interrupt on low priority */
writel(USBD_EP_FAST, USBD_DEVINTPRI(udc->udp_baseaddr));
writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
/* Clear any pending device interrupts */
writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
/* Setup UDCA - not yet used (DMA) */
writel(udc->udca_p_base, USBD_UDCAH(udc->udp_baseaddr));
/* Only enable EP0 in and out for now, EP0 only works in FIFO mode */ for (i = 0; i <= 1; i++) {
udc_realize_hwep(udc, i, ep->ep.maxpacket);
uda_enable_hwepint(udc, i);
udc_select_hwep(udc, i);
udc_clrstall_hwep(udc, i);
udc_clr_buffer_hwep(udc, i);
}
/* Set device address to 0 - called twice to force a latch in the USB
engine without the need of a setup packet status closure */
udc_set_address(udc, 0);
udc_set_address(udc, 0);
/* * * USB device board specific events handled via callbacks *
*/ /* Connection change event - notify board function of change */ staticvoid uda_power_event(struct lpc32xx_udc *udc, u32 conn)
{ /* Just notify of a connection change event (optional) */ if (udc->board->conn_chgb != NULL)
udc->board->conn_chgb(conn);
}
/* Suspend/resume event - notify board function of change */ staticvoid uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
{ /* Just notify of a Suspend/resume change event (optional) */ if (udc->board->susp_chgb != NULL)
udc->board->susp_chgb(conn);
if (conn)
udc->suspended = 0; else
udc->suspended = 1;
}
/* Remote wakeup enable/disable - notify board function of change */ staticvoid uda_remwkp_cgh(struct lpc32xx_udc *udc)
{ if (udc->board->rmwk_chgb != NULL)
udc->board->rmwk_chgb(udc->dev_status &
(1 << USB_DEVICE_REMOTE_WAKEUP));
}
/* Reads data from FIFO, adjusts for alignment and data size */ staticvoid udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
{ int n, i, bl;
u16 *p16;
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */ switch (((uintptr_t) data) & 0x3) { case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
/* Copy 32-bit aligned data first */ for (n = 0; n < cbytes; n += 4)
*p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
/* Handle any remaining bytes */
bl = bytes - cbytes; if (bl) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); for (n = 0; n < bl; n++)
data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
} break;
case 1: /* 8-bit aligned */ case 3: /* Each byte has to be handled independently */ for (n = 0; n < bytes; n += 4) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
bl = bytes - n; if (bl > 4)
bl = 4;
for (i = 0; i < bl; i++)
data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
} break;
/* Copy 32-bit sized objects first with 16-bit alignment */ for (n = 0; n < cbytes; n += 4) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
*p16++ = (u16)(tmp & 0xFFFF);
*p16++ = (u16)((tmp >> 16) & 0xFFFF);
}
/* Handle any remaining bytes */
bl = bytes - cbytes; if (bl) {
tmp = readl(USBD_RXDATA(udc->udp_baseaddr)); for (n = 0; n < bl; n++)
data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
} break;
}
}
/* Read data from the FIFO for an endpoint. This function is for endpoints (such * as EP0) that don't use DMA. This function should only be called if a packet * is known to be ready to read for the endpoint. Note that the endpoint must
* be selected in the protocol engine prior to this call. */ static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
u32 bytes)
{
u32 tmpv; int to = 1000;
u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
/* Setup read of endpoint */
writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
/* Wait until packet is ready */ while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
PKT_RDY) == 0) && (to > 0))
to--; if (!to)
dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
/* Mask out count */
tmp = tmpv & PKT_LNGTH_MASK; if (bytes < tmp)
tmp = bytes;
/* Clear the buffer */
udc_clr_buffer_hwep(udc, hwep);
return tmp;
}
/* Stuffs data into the FIFO, adjusts for alignment and data size */ staticvoid udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
{ int n, i, bl;
u16 *p16;
u32 *p32, tmp, cbytes;
/* Use optimal data transfer method based on source address and size */ switch (((uintptr_t) data) & 0x3) { case 0: /* 32-bit aligned */
p32 = (u32 *) data;
cbytes = (bytes & ~0x3);
/* Copy 32-bit aligned data first */ for (n = 0; n < cbytes; n += 4)
writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
/* Handle any remaining bytes */
bl = bytes - cbytes; if (bl) {
tmp = 0; for (n = 0; n < bl; n++)
tmp |= data[cbytes + n] << (n * 8);
case 1: /* 8-bit aligned */ case 3: /* Each byte has to be handled independently */ for (n = 0; n < bytes; n += 4) {
bl = bytes - n; if (bl > 4)
bl = 4;
tmp = 0; for (i = 0; i < bl; i++)
tmp |= data[n + i] << (i * 8);
/* Write data to the FIFO for an endpoint. This function is for endpoints (such * as EP0) that don't use DMA. Note that the endpoint must be selected in the
* protocol engine prior to this call. */ staticvoid udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
u32 bytes)
{
u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
if ((bytes > 0) && (data == NULL)) return;
/* Setup write of endpoint */
writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
/* Need at least 1 byte to trigger TX */ if (bytes == 0)
writel(0, USBD_TXDATA(udc->udp_baseaddr)); else
udc_stuff_fifo(udc, (u8 *) data, bytes);
/* USB device reset - resets USB to a default state with just EP0
enabled */ staticvoid uda_usb_reset(struct lpc32xx_udc *udc)
{
u32 i = 0; /* Re-init device controller and EP0 */
udc_enable(udc);
udc->gadget.speed = USB_SPEED_FULL;
for (i = 1; i < NUM_ENDPOINTS; i++) { struct lpc32xx_ep *ep = &udc->ep[i];
ep->req_pending = 0;
}
}
/* Send a ZLP on EP0 */ staticvoid udc_ep0_send_zlp(struct lpc32xx_udc *udc)
{
udc_write_hwep(udc, EP_IN, NULL, 0);
}
/* Get current frame number */ static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
{
u16 flo, fhi;
/* Set the device as configured - enables all endpoints */ staticinlinevoid udc_set_device_configured(struct lpc32xx_udc *udc)
{
udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
}
/* Set the device as unconfigured - disables all endpoints */ staticinlinevoid udc_set_device_unconfigured(struct lpc32xx_udc *udc)
{
udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
}
/* * Activate or kill host pullup * Can be called with or without lock
*/ staticvoid pullup(struct lpc32xx_udc *udc, int is_on)
{ if (!udc->clocked) return;
if (!udc->enabled || !udc->vbus)
is_on = 0;
if (is_on != udc->pullup)
isp1301_pullup_enable(udc, is_on, 0);
}
/* Must be called without lock */ staticint lpc32xx_ep_disable(struct usb_ep *_ep)
{ struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); struct lpc32xx_udc *udc = ep->udc; unsignedlong flags;
case USB_ENDPOINT_XFER_ISOC: break;
}
spin_lock_irqsave(&udc->lock, flags);
/* Initialize endpoint to match the selected descriptor */
ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
ep->ep.maxpacket = maxpacket;
/* Map hardware endpoint from base and direction */ if (ep->is_in) /* IN endpoints are offset 1 from the OUT endpoint */
ep->hwep_num = ep->hwep_num_base + EP_IN; else
ep->hwep_num = ep->hwep_num_base;
/* Realize the endpoint, interrupt is enabled later when
* buffers are queued, IN EPs will NAK until buffers are ready */
udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
udc_clr_buffer_hwep(udc, ep->hwep_num);
uda_disable_hwepint(udc, ep->hwep_num);
udc_clrstall_hwep(udc, ep->hwep_num);
/* Clear all DMA statuses for this EP */
udc_ep_dma_disable(udc, ep->hwep_num);
writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_SYSERRTINTCLR(udc->udp_baseaddr));
writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
spin_unlock_irqrestore(&udc->lock, flags);
atomic_inc(&udc->enabled_ep_cnt); return 0;
}
/* * Allocate a USB request list * Can be called with or without lock
*/ staticstruct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
gfp_t gfp_flags)
{ struct lpc32xx_request *req;
req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags); if (!req) return NULL;
INIT_LIST_HEAD(&req->queue); return &req->req;
}
/* * De-allocate a USB request list * Can be called with or without lock
*/ staticvoid lpc32xx_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
{ struct lpc32xx_request *req;
/* Kickstart empty queues */ if (list_empty(&ep->queue)) {
list_add_tail(&req->queue, &ep->queue);
if (ep->hwep_num_base == 0) { /* Handle expected data direction */ if (ep->is_in) { /* IN packet to host */
udc->ep0state = DATA_IN;
status = udc_ep0_in_req(udc);
} else { /* OUT packet from host */
udc->ep0state = DATA_OUT;
status = udc_ep0_out_req(udc);
}
} elseif (ep->is_in) { /* IN packet to host and kick off transfer */ if (!ep->req_pending)
udc_ep_in_req_dma(udc, ep);
} else /* OUT packet from host and kick off list */ if (!ep->req_pending)
udc_ep_out_req_dma(udc, ep);
} else
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return (status < 0) ? status : 0;
}
/* Must be called without lock */ staticint lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{ struct lpc32xx_ep *ep; struct lpc32xx_request *req = NULL, *iter; unsignedlong flags;
ep = container_of(_ep, struct lpc32xx_ep, ep); if (!_ep || ep->hwep_num_base == 0) return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry(iter, &ep->queue, queue) { if (&iter->req != _req) continue;
req = iter; break;
} if (!req) {
spin_unlock_irqrestore(&ep->udc->lock, flags); return -EINVAL;
}
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/* Must be called without lock */ staticint lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
{ struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep); struct lpc32xx_udc *udc; unsignedlong flags;
if ((!ep) || (ep->hwep_num <= 1)) return -EINVAL;
/* Don't halt an IN EP */ if (ep->is_in) return -EAGAIN;
/* Send a ZLP on a non-0 IN EP */ staticvoid udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{ /* Clear EP status */
udc_clearep_getsts(udc, ep->hwep_num);
/* * Handle EP completion for ZLP * This function will only be called when a delayed ZLP needs to be sent out * after a DMA transfer has filled both buffers.
*/ staticvoid udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
u32 epstatus; struct lpc32xx_request *req;
if (ep->hwep_num <= 0) return;
uda_clear_hwepint(udc, ep->hwep_num);
/* If this interrupt isn't enabled, return now */ if (!(udc->enabled_hwepints & (1 << ep->hwep_num))) return;
/* Get endpoint status */
epstatus = udc_clearep_getsts(udc, ep->hwep_num);
/* * This should never happen, but protect against writing to the * buffer when full.
*/ if (epstatus & EP_SEL_F) return;
if (ep->is_in) {
udc_send_in_zlp(udc, ep);
uda_disable_hwepint(udc, ep->hwep_num);
} else return;
/* If there isn't a request waiting, something went wrong */
req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
done(ep, req, 0);
/* Start another request if ready */ if (!list_empty(&ep->queue)) { if (ep->is_in)
udc_ep_in_req_dma(udc, ep); else
udc_ep_out_req_dma(udc, ep);
} else
ep->req_pending = 0;
}
/* DMA end of transfer completion */ staticvoid udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
{
u32 status; struct lpc32xx_request *req; struct lpc32xx_usbd_dd_gad *dd;
/* DMA descriptor should always be retired for this call */ if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
ep_warn(ep, "DMA descriptor did not retire\n");
/* The error could have occurred on a packet of a multipacket * transfer, so recovering the transfer is not possible. Close
* the request with an error */
done(ep, req, -ECONNABORTED); return;
}
/* Handle the current DD's status */
status = dd->dd_status; switch (status & DD_STATUS_STS_MASK) { case DD_STATUS_STS_NS: /* DD not serviced? This shouldn't happen! */
ep->req_pending = 0;
ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
status);
done(ep, req, -ECONNABORTED); return;
case DD_STATUS_STS_BS: /* Interrupt only fires on EOT - This shouldn't happen! */
ep->req_pending = 0;
ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
status);
done(ep, req, -ECONNABORTED); return;
case DD_STATUS_STS_NC: case DD_STATUS_STS_DUR: /* Really just a short packet, not an underrun */ /* This is a good status and what we expect */ break;
default: /* Data overrun, system error, or unknown */
ep->req_pending = 0;
ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
status);
done(ep, req, -ECONNABORTED); return;
}
/* ISO endpoints are handled differently */ if (ep->eptype == EP_ISO_TYPE) { if (ep->is_in)
req->req.actual = req->req.length; else
req->req.actual = dd->iso_status[0] & 0xFFFF;
} else
req->req.actual += DD_STATUS_CURDMACNT(status);
/* Send a ZLP if necessary. This will be done for non-int
* packets which have a size that is a divisor of MAXP */ if (req->send_zlp) { /* * If at least 1 buffer is available, send the ZLP now. * Otherwise, the ZLP send needs to be deferred until a * buffer is available.
*/ if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
udc_clearep_getsts(udc, ep->hwep_num);
uda_enable_hwepint(udc, ep->hwep_num);
udc_clearep_getsts(udc, ep->hwep_num);
/* Let the EP interrupt handle the ZLP */ return;
} else
udc_send_in_zlp(udc, ep);
}
/* Transfer request is complete */
done(ep, req, 0);
/* Start another request if ready */
udc_clearep_getsts(udc, ep->hwep_num); if (!list_empty((&ep->queue))) { if (ep->is_in)
udc_ep_in_req_dma(udc, ep); else
udc_ep_out_req_dma(udc, ep);
} else
ep->req_pending = 0;
switch (reqtype & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: break; /* Not supported */
case USB_RECIP_DEVICE:
ep0buff = udc->gadget.is_selfpowered; if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP); break;
case USB_RECIP_ENDPOINT:
tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
ep = &udc->ep[tmp]; if ((tmp == 0) || (tmp >= NUM_ENDPOINTS)) return -EOPNOTSUPP;
if (wIndex & USB_DIR_IN) { if (!ep->is_in) return -EOPNOTSUPP; /* Something's wrong */
} elseif (ep->is_in) return -EOPNOTSUPP; /* Not an IN endpoint */
/* Get status of the endpoint */
udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
case (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
tmp = wIndex & USB_ENDPOINT_NUMBER_MASK; if ((wValue != USB_ENDPOINT_HALT) ||
(tmp >= NUM_ENDPOINTS)) break;
/* Find hardware endpoint from logical endpoint */
ep = &udc->ep[tmp];
tmp = ep->hwep_num; if (tmp == 0) break;
if (req == USB_REQ_SET_FEATURE)
udc_stall_hwep(udc, tmp); elseif (!ep->wedge)
udc_clrstall_hwep(udc, tmp);
goto zlp_send;
default: break;
} break;
case USB_REQ_SET_ADDRESS: if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
udc_set_address(udc, wValue); goto zlp_send;
} break;
case USB_REQ_GET_STATUS:
udc_get_status(udc, reqtype, wIndex); return;
default: break; /* Let GadgetFS handle the descriptor instead */
}
if (likely(udc->driver)) { /* device-2-host (IN) or no data setup command, process
* immediately */
spin_unlock(&udc->lock);
i = udc->driver->setup(&udc->gadget, &ctrlpkt);
spin_lock(&udc->lock); if (req == USB_REQ_SET_CONFIGURATION) { /* Configuration is set after endpoints are realized */ if (wValue) { /* Set configuration */
udc_set_device_configured(udc);
/* Is a buffer available? */ if (!(epstatus & EP_SEL_F)) { /* Handle based on current state */ if (udc->ep0state == DATA_IN)
udc_ep0_in_req(udc); else { /* Unknown state for EP0 oe end of DATA IN phase */
nuke(ep0, -ECONNABORTED);
udc->ep0state = WAIT_FOR_SETUP;
}
}
}
/* OUT endpoint 0 transfer */ staticvoid udc_handle_ep0_out(struct lpc32xx_udc *udc)
{ struct lpc32xx_ep *ep0 = &udc->ep[0];
u32 epstatus;
/* Clear EP interrupt */
epstatus = udc_clearep_getsts(udc, EP_OUT);
/* A NAK may occur if a packet couldn't be received yet */ if (epstatus & EP_SEL_EPN) return; /* Setup packet incoming? */ if (epstatus & EP_SEL_STP) {
nuke(ep0, 0);
udc->ep0state = WAIT_FOR_SETUP;
}
/* Data available? */ if (epstatus & EP_SEL_F) /* Handle based on current state */ switch (udc->ep0state) { case WAIT_FOR_SETUP:
udc_handle_ep0_setup(udc); break;
case DATA_OUT:
udc_ep0_out_req(udc); break;
default: /* Unknown state for EP0 */
nuke(ep0, -ECONNABORTED);
udc->ep0state = WAIT_FOR_SETUP;
}
}
/* Must be called without lock */ staticint lpc32xx_get_frame(struct usb_gadget *gadget)
{ int frame; unsignedlong flags; struct lpc32xx_udc *udc = to_udc(gadget);
/* * vbus is here! turn everything on that's ready * Must be called without lock
*/ staticint lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
{ unsignedlong flags; struct lpc32xx_udc *udc = to_udc(gadget);
spin_lock_irqsave(&udc->lock, flags);
/* Doesn't need lock */ if (udc->driver) {
udc_clk_set(udc, 1);
udc_enable(udc);
pullup(udc, is_active);
} else {
stop_activity(udc);
pullup(udc, 0);
spin_unlock_irqrestore(&udc->lock, flags); /* * Wait for all the endpoints to disable, * before disabling clocks. Don't wait if * endpoints are not enabled.
*/ if (atomic_read(&udc->enabled_ep_cnt))
wait_event_interruptible(udc->ep_disable_wait_queue,
(atomic_read(&udc->enabled_ep_cnt) == 0));
spin_lock_irqsave(&udc->lock, flags);
udc_clk_set(udc, 0);
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
/* Can be called with or without lock */ staticint lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
{ struct lpc32xx_udc *udc = to_udc(gadget);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.