staticint reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep, struct td_node *node); /** * hw_ep_bit: calculates the bit number * @num: endpoint number * @dir: endpoint direction * * This function returns bit number
*/ staticinlineint hw_ep_bit(int num, int dir)
{ return num + ((dir == TX) ? 16 : 0);
}
staticinlineint ep_to_bit(struct ci_hdrc *ci, int n)
{ int fill = 16 - ci->hw_ep_max / 2;
if (n >= ci->hw_ep_max / 2)
n += fill;
return n;
}
/** * hw_device_state: enables/disables interrupts (execute without interruption) * @ci: the controller * @dma: 0 => disable, !0 => enable and set dma engine * * This function returns an error code
*/ staticint hw_device_state(struct ci_hdrc *ci, u32 dma)
{ if (dma) {
hw_write(ci, OP_ENDPTLISTADDR, ~0, dma); /* interrupt, error, port change, reset, sleep/suspend */
hw_write(ci, OP_USBINTR, ~0,
USBi_UI|USBi_UEI|USBi_PCI|USBi_URI);
} else {
hw_write(ci, OP_USBINTR, ~0, 0);
} return 0;
}
/** * hw_ep_flush: flush endpoint fifo (execute without interruption) * @ci: the controller * @num: endpoint number * @dir: endpoint direction * * This function returns an error code
*/ staticint hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
{ int n = hw_ep_bit(num, dir);
do { /* flush any pending transfer */
hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n)); while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
return 0;
}
/** * hw_ep_disable: disables endpoint (execute without interruption) * @ci: the controller * @num: endpoint number * @dir: endpoint direction * * This function returns an error code
*/ staticint hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
{
hw_write(ci, OP_ENDPTCTRL + num,
(dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0); return 0;
}
/** * hw_ep_enable: enables endpoint (execute without interruption) * @ci: the controller * @num: endpoint number * @dir: endpoint direction * @type: endpoint type * * This function returns an error code
*/ staticint hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
{
u32 mask, data;
if (dir == TX) {
mask = ENDPTCTRL_TXT; /* type */
data = type << __ffs(mask);
mask |= ENDPTCTRL_TXS; /* unstall */
mask |= ENDPTCTRL_TXR; /* reset data toggle */
data |= ENDPTCTRL_TXR;
mask |= ENDPTCTRL_TXE; /* enable */
data |= ENDPTCTRL_TXE;
} else {
mask = ENDPTCTRL_RXT; /* type */
data = type << __ffs(mask);
/** * hw_ep_get_halt: return endpoint halt status * @ci: the controller * @num: endpoint number * @dir: endpoint direction * * This function returns 1 if endpoint halted
*/ staticint hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
{
u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
/** * hw_ep_prime: primes endpoint (execute without interruption) * @ci: the controller * @num: endpoint number * @dir: endpoint direction * @is_ctrl: true if control endpoint * * This function returns an error code
*/ staticint hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
{ int n = hw_ep_bit(num, dir);
/* Synchronize before ep prime */
wmb();
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN;
hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax(); if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN;
/* status shoult be tested according with manual but it doesn't work */ return 0;
}
/** * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute * without interruption) * @ci: the controller * @num: endpoint number * @dir: endpoint direction * @value: true => stall, false => unstall * * This function returns an error code
*/ staticint hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
{ if (value != 0 && value != 1) return -EINVAL;
/* data toggle - reserved for EP0 but it's in ESS */
hw_write(ci, reg, mask_xs|mask_xr,
value ? mask_xs : mask_xr);
} while (value != hw_ep_get_halt(ci, num, dir));
return 0;
}
/** * hw_port_is_high_speed: test if port is high speed * @ci: the controller * * This function returns true if high speed port
*/ staticint hw_port_is_high_speed(struct ci_hdrc *ci)
{ return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
hw_read(ci, OP_PORTSC, PORTSC_HSP);
}
/** * hw_test_and_clear_complete: test & clear complete status (execute without * interruption) * @ci: the controller * @n: endpoint number * * This function returns complete status
*/ staticint hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
{
n = ep_to_bit(ci, n); return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
}
/** * hw_test_and_clear_intr_active: test & clear active interrupts (execute * without interruption) * @ci: the controller * * This function returns active interrutps
*/ static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
{
u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
hw_write(ci, OP_USBSTS, ~0, reg); return reg;
}
/** * hw_test_and_clear_setup_guard: test & clear setup guard (execute without * interruption) * @ci: the controller * * This function returns guard value
*/ staticint hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
{ return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
}
/** * hw_test_and_set_setup_guard: test & set setup guard (execute without * interruption) * @ci: the controller * * This function returns guard value
*/ staticint hw_test_and_set_setup_guard(struct ci_hdrc *ci)
{ return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
}
/** * hw_usb_set_address: configures USB address (execute without interruption) * @ci: the controller * @value: new USB address * * This function explicitly sets the address, without the "USBADRA" (advance) * feature, which is not supported by older versions of the controller.
*/ staticvoid hw_usb_set_address(struct ci_hdrc *ci, u8 value)
{
hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
value << __ffs(DEVICEADDR_USBADR));
}
/** * hw_usb_reset: restart device after a bus reset (execute without * interruption) * @ci: the controller * * This function returns an error code
*/ staticint hw_usb_reset(struct ci_hdrc *ci)
{
hw_usb_set_address(ci, 0);
/* ESS flushes only at end?!? */
hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
if (length) {
node->ptr->page[0] = cpu_to_le32(temp); for (i = 1; i < TD_PAGE_COUNT; i++) {
u32 page = temp + i * CI_HDRC_PAGE_SIZE;
page &= ~TD_RESERVED_MASK;
node->ptr->page[i] = cpu_to_le32(page);
}
}
hwreq->req.actual += length;
if (!list_empty(&hwreq->tds)) { /* get the last entry */
lastnode = list_entry(hwreq->tds.prev, struct td_node, td);
lastnode->ptr->next = cpu_to_le32(node->dma);
}
/* * Verify if the scatterlist is valid by iterating each sg entry. * Return invalid sg entry index which is less than num_sgs.
*/ staticint sglist_get_invalid_entry(struct device *dma_dev, u8 dir, struct usb_request *req)
{ int i; struct scatterlist *s = req->sg;
if (req->num_sgs == 1) return 1;
dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) { /* Only small sg (generally last sg) may be bounced. If * that happens. we can't ensure the addr is page-aligned * after dma map.
*/ if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir)) break;
/* Make sure each sg start address (except first sg) is * page-aligned and end address (except last sg) is also * page-aligned.
*/ if (i == 0) { if (!IS_ALIGNED(s->offset + s->length,
CI_HDRC_PAGE_SIZE)) break;
} else { if (s->offset) break; if (!sg_is_last(s) && !IS_ALIGNED(s->length,
CI_HDRC_PAGE_SIZE)) break;
}
}
return i;
}
staticint sglist_do_bounce(struct ci_hw_req *hwreq, int index, bool copy, unsignedint *bounced)
{ void *buf; int i, ret, nents, num_sgs; unsignedint rest, rounded; struct scatterlist *sg, *src, *dst;
nents = index + 1;
ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL); if (ret) return ret;
if (hwreq->req.num_sgs && hwreq->req.length &&
ci->has_short_pkt_limit) {
ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
&hwreq->req); if (ret < hwreq->req.num_sgs) {
ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
&bounced_size); if (ret) return ret;
}
}
ret = usb_gadget_map_request_by_dev(ci->dev->parent,
&hwreq->req, hwep->dir); if (ret) return ret;
if (hwreq->sgt.sgl) { /* We've mapped a bigger buffer, now recover the actual size */
sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
}
if (hwreq->req.num_mapped_sgs)
ret = prepare_td_for_sg(hwep, hwreq); else
ret = prepare_td_for_non_sg(hwep, hwreq);
hwreq->req.actual = 0; if (!list_empty(&hwep->qh.queue)) { struct ci_hw_req *hwreqprev; int n = hw_ep_bit(hwep->num, hwep->dir); int tmp_stat; struct td_node *prevlastnode;
u32 next = firstnode->dma & TD_ADDR_MASK;
if (ci->rev == CI_REVISION_22) { if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
reprime_dtd(ci, hwep, prevlastnode);
}
if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) goto done; do {
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW) && tmp_stat);
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0); if (tmp_stat) goto done;
/* OP_ENDPTSTAT will be clear by HW when the endpoint met * err. This dTD don't push to dQH if current dTD point is * not the last one in previous request.
*/ if (hwep->qh.ptr->curr != cpu_to_le32(prevlastnode->dma)) goto done;
}
if (remaining_length && !is_isoc) { if (hwep->dir == TX) {
hwreq->req.status = -EPROTO; break;
}
} /* * As the hardware could still address the freed td * which will run the udc unusable, the cleanup of the * td has to be delayed by one.
*/ if (hwep->pending_td)
free_pending_td(hwep);
/****************************************************************************** * ISR block
*****************************************************************************/ /** * isr_reset_handler: USB reset interrupt handler * @ci: UDC device * * This function resets USB engine after a bus reset occurred
*/ staticvoid isr_reset_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{ int retval;
u32 intr;
spin_unlock(&ci->lock); if (ci->gadget.speed != USB_SPEED_UNKNOWN)
usb_gadget_udc_reset(&ci->gadget, ci->driver);
retval = _gadget_stop_activity(&ci->gadget); if (retval) goto done;
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) { if (req->length)
hwep = (ci->ep0_dir == RX) ?
ci->ep0out : ci->ep0in; if (!list_empty(&hwep->qh.queue)) {
_ep_nuke(hwep);
dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
_usb_addr(hwep));
}
}
if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
dev_err(hwep->ci->dev, "request length too big for isochronous\n"); return -EMSGSIZE;
}
if (ci->has_short_pkt_limit &&
hwreq->req.length > CI_MAX_REQ_SIZE) {
dev_err(hwep->ci->dev, "request length too big (max 16KB)\n"); return -EMSGSIZE;
}
/* first nuke then test link, e.g. previous status has not sent */ if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "request already in queue\n"); return -EBUSY;
}
/** * isr_setup_status_complete: setup_status request complete function * @ep: endpoint * @req: request handled * * Caller must release lock. Put the port in test mode if test mode * feature is selected.
*/ staticvoid
isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
{ struct ci_hdrc *ci = req->context; unsignedlong flags;
if (req->status < 0) return;
if (ci->setaddr) {
hw_usb_set_address(ci, ci->address);
ci->setaddr = false; if (ci->address)
usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
}
spin_lock_irqsave(&ci->lock, flags); if (ci->test_mode)
hw_port_test_set(ci, ci->test_mode);
spin_unlock_irqrestore(&ci->lock, flags);
}
/** * isr_setup_status_phase: queues the status phase of a setup transation * @ci: ci struct * * This function returns an error code
*/ staticint isr_setup_status_phase(struct ci_hdrc *ci)
{ struct ci_hw_ep *hwep;
/* * Unexpected USB controller behavior, caused by bad signal integrity * or ground reference problems, can lead to isr_setup_status_phase * being called with ci->status equal to NULL. * If this situation occurs, you should review your USB hardware design.
*/ if (WARN_ON_ONCE(!ci->status)) return -EPIPE;
staticint otg_a_alt_hnp_support(struct ci_hdrc *ci)
{
dev_warn(&ci->gadget.dev, "connect the device to an alternate port if you want HNP\n"); return isr_setup_status_phase(ci);
}
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
cap |= QH_ZLT;
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; /* * For ISO-TX, we set mult at QH as the largest value, and use * MultO at TD as real mult value.
*/ if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
cap |= 3 << __ffs(QH_MULT);
if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
retval = -EINVAL;
}
/* * Enable endpoints in the HW other than ep0 as ep0 * is always enabled
*/ if (hwep->num)
retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
hwep->type);
/* * ep_alloc_request: allocate a request object to use with this endpoint * * Check usb_ep_alloc_request() at "usb_gadget.h" for details
*/ staticstruct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{ struct ci_hw_req *hwreq;
/* Change Data+ pullup status * this func is used by usb_gadget_connect/disconnect
*/ staticint ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{ struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
/* * Data+ pullup controlled by OTG state machine in OTG fsm mode; * and don't touch Data+ in host mode for dual role config.
*/ if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST) return 0;
/* * for ep0: maxP defined in desc, for other * eps, maxP is set by epautoconfig() called * by gadget layer
*/
usb_ep_set_maxpacket_limit(&hwep->ep, (unsignedshort)~0);
/* * set up shorthands for ep0 out and in endpoints, * don't add to gadget's ep_list
*/ if (i == 0) { if (j == RX)
ci->ep0out = hwep; else
ci->ep0in = hwep;
if (ci->vbus_active) {
hw_device_state(ci, 0);
spin_unlock_irqrestore(&ci->lock, flags); if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
spin_lock_irqsave(&ci->lock, flags);
pm_runtime_put(ci->dev);
}
spin_unlock_irqrestore(&ci->lock, flags);
ci_udc_stop_for_otg_fsm(ci); return 0;
}
/****************************************************************************** * BUS block
*****************************************************************************/ /* * udc_irq: ci interrupt handler * * This function returns IRQ_HANDLED if the IRQ has been handled * It locks access to registers
*/ static irqreturn_t udc_irq(struct ci_hdrc *ci)
{
irqreturn_t retval;
u32 intr;
if (ci == NULL) return IRQ_HANDLED;
spin_lock(&ci->lock);
if (ci->platdata->flags & CI_HDRC_REGS_SHARED) { if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
USBMODE_CM_DC) {
spin_unlock(&ci->lock); return IRQ_NONE;
}
}
intr = hw_test_and_clear_intr_active(ci);
if (intr) { /* order defines priority - do NOT change it */ if (USBi_URI & intr)
isr_reset_handler(ci);
if (USBi_PCI & intr) {
ci->gadget.speed = hw_port_is_high_speed(ci) ?
USB_SPEED_HIGH : USB_SPEED_FULL; if (ci->usb_phy)
usb_phy_set_event(ci->usb_phy,
USB_EVENT_ENUMERATED); if (ci->suspended) { if (ci->driver->resume) {
spin_unlock(&ci->lock);
ci->driver->resume(&ci->gadget);
spin_lock(&ci->lock);
}
ci->suspended = 0;
usb_gadget_set_state(&ci->gadget,
ci->resume_state);
}
}
if ((USBi_UI | USBi_UEI) & intr)
isr_tr_complete_handler(ci);
/* * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC * * No interrupts active, the IRQ has been released
*/ void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
{ if (!ci->roles[CI_ROLE_GADGET]) return;
staticint udc_id_switch_for_device(struct ci_hdrc *ci)
{ if (ci->platdata->pins_device)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_device);
if (ci->is_otg) /* Clear and enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
OTGSC_BSVIS | OTGSC_BSVIE);
return 0;
}
staticvoid udc_id_switch_for_host(struct ci_hdrc *ci)
{ /* * host doesn't care B_SESSION_VALID event * so clear and disable BSV irq
*/ if (ci->is_otg)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
ci->vbus_active = 0;
if (ci->platdata->pins_device && ci->platdata->pins_default)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_default);
}
#ifdef CONFIG_PM_SLEEP staticvoid udc_suspend(struct ci_hdrc *ci)
{ /* * Set OP_ENDPTLISTADDR to be non-zero for * checking if controller resume from power lost * in non-host mode.
*/ if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0)
hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0);
if (ci->gadget.connected &&
(!ci->suspended || !device_may_wakeup(ci->dev)))
usb_gadget_disconnect(&ci->gadget);
}
/* Restore value 0 if it was set for power lost check */ if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0xFFFFFFFF)
hw_write(ci, OP_ENDPTLISTADDR, ~0, 0);
} #endif
/** * ci_hdrc_gadget_init - initialize device related bits * @ci: the controller * * This function initializes the gadget, if the device is "device capable".
*/ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{ struct ci_role_driver *rdrv; int ret;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) return -ENXIO;
rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL); if (!rdrv) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.