staticvoid cdnsp_free_stream_info(struct cdnsp_device *pdev, struct cdnsp_ep *pep); /* * Allocates a generic ring segment from the ring pool, sets the dma address, * initializes the segment to zero, and sets the private next pointer to NULL. * * "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/ staticstruct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev, unsignedint cycle_state, unsignedint max_packet,
gfp_t flags)
{ struct cdnsp_segment *seg;
dma_addr_t dma; int i;
seg = kzalloc(sizeof(*seg), flags); if (!seg) return NULL;
if (max_packet) {
seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); if (!seg->bounce_buf) goto free_dma;
}
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
}
seg->dma = dma;
seg->next = NULL;
while (seg != first) { struct cdnsp_segment *next = seg->next;
cdnsp_segment_free(pdev, seg);
seg = next;
}
cdnsp_segment_free(pdev, first);
}
/* * Make the prev segment point to the next segment. * * Change the last TRB in the prev segment to be a Link TRB which points to the * DMA address of the next segment. The caller needs to set any Link TRB * related flags, such as End TRB, Toggle Cycle, and no snoop.
*/ staticvoid cdnsp_link_segments(struct cdnsp_device *pdev, struct cdnsp_segment *prev, struct cdnsp_segment *next, enum cdnsp_ring_type type)
{ struct cdnsp_link_trb *link;
u32 val;
if (!prev || !next) return;
prev->next = next; if (type != TYPE_EVENT) {
link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
link->segment_ptr = cpu_to_le64(next->dma);
/* * Set the last TRB in the segment to have a TRB type ID * of Link TRB
*/
val = le32_to_cpu(link->control);
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
link->control = cpu_to_le32(val);
}
}
/* * Link the ring to the new segments. * Set Toggle Cycle for the new ring if needed.
*/ staticvoid cdnsp_link_rings(struct cdnsp_device *pdev, struct cdnsp_ring *ring, struct cdnsp_segment *first, struct cdnsp_segment *last, unsignedint num_segs)
{ struct cdnsp_segment *next;
/* * We need a radix tree for mapping physical addresses of TRBs to which stream * ID they belong to. We need to do this because the device controller won't * tell us which stream ring the TRB came from. We could store the stream ID * in an event data TRB, but that doesn't help us for the cancellation case, * since the endpoint may stop before it reaches that event data TRB. * * The radix tree maps the upper portion of the TRB DMA address to a ring * segment that has the same upper portion of DMA addresses. For example, * say I have segments of size 1KB, that are always 1KB aligned. A segment may * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the * key to the stream ID is 0x43244. I can use the DMA address of the TRB to * pass the radix tree a key to get the right stream ID: * * 0x10c90fff >> 10 = 0x43243 * 0x10c912c0 >> 10 = 0x43244 * 0x10c91400 >> 10 = 0x43245 * * Obviously, only those TRBs with DMA addresses that are within the segment * will make the radix tree return the stream ID for that ring. * * Caveats for the radix tree: * * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit * extended systems (where the DMA address can be bigger than 32-bits), * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/ staticint cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map, struct cdnsp_ring *ring, struct cdnsp_segment *seg,
gfp_t mem_flags)
{ unsignedlong key; int ret;
seg = first_seg; do {
ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
mem_flags); if (ret) goto remove_streams; if (seg == last_seg) return 0;
seg = seg->next;
} while (seg != first_seg);
return 0;
remove_streams:
failed_seg = seg;
seg = first_seg; do {
cdnsp_remove_segment_mapping(trb_address_map, seg); if (seg == failed_seg) return ret;
seg = seg->next;
} while (seg != first_seg);
/* * The ring is initialized to 0. The producer must write 1 to the cycle * bit to handover ownership of the TRB, so PCS = 1. The consumer must * compare CCS to the cycle bit to check ownership, so CCS = 1. * * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = 1;
/* * Each segment has a link TRB, and leave an extra TRB for SW * accounting purpose
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
/* Allocate segments and link them for a ring. */ staticint cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev, struct cdnsp_segment **first, struct cdnsp_segment **last, unsignedint num_segs, unsignedint cycle_state, enum cdnsp_ring_type type, unsignedint max_packet,
gfp_t flags)
{ struct cdnsp_segment *prev;
/* Allocate first segment. */
prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags); if (!prev) return -ENOMEM;
num_segs--;
*first = prev;
/* Allocate all other segments. */ while (num_segs > 0) { struct cdnsp_segment *next;
next = cdnsp_segment_alloc(pdev, cycle_state,
max_packet, flags); if (!next) {
cdnsp_free_segments_for_ring(pdev, *first); return -ENOMEM;
}
/* * Create a new ring with zero or more segments. * * Link each segment together into a ring. * Set the end flag and the cycle toggle bit on the last segment.
*/ staticstruct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev, unsignedint num_segs, enum cdnsp_ring_type type, unsignedint max_packet,
gfp_t flags)
{ struct cdnsp_ring *ring; int ret;
ring = kzalloc(sizeof *(ring), flags); if (!ring) return NULL;
ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
&ring->last_seg, num_segs,
1, type, max_packet, flags); if (ret) goto fail;
/* Only event ring does not use link TRB. */ if (type != TYPE_EVENT)
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
/* * Expand an existing ring. * Allocate a new ring which has same segment numbers and link the two rings.
*/ int cdnsp_ring_expansion(struct cdnsp_device *pdev, struct cdnsp_ring *ring, unsignedint num_trbs,
gfp_t flags)
{ unsignedint num_segs_needed; struct cdnsp_segment *first; struct cdnsp_segment *last; unsignedint num_segs; int ret;
struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx, unsignedint ep_index)
{ /* Increment ep index by offset of start of ep ctx array. */
ep_index++; if (ctx->type == CDNSP_CTX_TYPE_INPUT)
ep_index++;
/* The stream context array must be a power of 2. */ staticstruct cdnsp_stream_ctx
*cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
size_t size = sizeof(struct cdnsp_stream_ctx) *
pep->stream_info.num_stream_ctxs;
if (size > CDNSP_CTX_SIZE) return NULL;
/** * Driver uses intentionally the device_pool to allocated stream * context array. Device Pool has 2048 bytes of size what gives us * 128 entries.
*/ return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
&pep->stream_info.ctx_array_dma);
}
/* * Change an endpoint's internal structure so it supports stream IDs. * The number of requested streams includes stream 0, which cannot be used by * driver. * * The number of stream contexts in the stream context array may be bigger than * the number of streams the driver wants to use. This is because the number of * stream context array entries must be a power of two.
*/ int cdnsp_alloc_stream_info(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsignedint num_stream_ctxs, unsignedint num_streams)
{ struct cdnsp_stream_info *stream_info; struct cdnsp_ring *cur_ring;
u32 cur_stream;
u64 addr; int ret; int mps;
/* Initialize the array of virtual pointers to stream rings. */
stream_info->stream_rings = kcalloc(num_streams, sizeof(struct cdnsp_ring *),
GFP_ATOMIC); if (!stream_info->stream_rings) return -ENOMEM;
/* Initialize the array of DMA addresses for stream rings for the HW. */
stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep); if (!stream_info->stream_ctx_array) goto cleanup_stream_rings;
/* * Allocate rings for all the streams that the driver will use, * and add their segment DMA addresses to the radix tree. * Stream 0 is reserved.
*/ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
GFP_ATOMIC);
stream_info->stream_rings[cur_stream] = cur_ring;
/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/ staticvoid cdnsp_free_priv_device(struct cdnsp_device *pdev)
{
pdev->dcbaa->dev_context_ptrs[1] = 0;
cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
if (pdev->in_ctx.bytes)
dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
pdev->in_ctx.dma);
if (pdev->out_ctx.bytes)
dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
pdev->out_ctx.dma);
/* * Full speed isoc endpoints specify interval in frames, * not microframes. We are using microframes everywhere, * so adjust accordingly.
*/ if (g->speed == USB_SPEED_FULL)
interval += 3; /* 1 frame = 2^3 uframes */
/* Controller handles only up to 512ms (2^12). */ if (interval > 12)
interval = 12;
return interval;
}
/* * Convert bInterval expressed in microframes (in 1-255 range) to exponent of * microframes, rounded down to nearest power of 2.
*/ staticunsignedint cdnsp_microframes_to_exponent(struct usb_gadget *g, struct cdnsp_ep *pep, unsignedint desc_interval, unsignedint min_exponent, unsignedint max_exponent)
{ unsignedint interval;
/* * Return the polling interval. * * The polling interval is expressed in "microframes". If controllers's Interval * field is set to N, it will service the endpoint every 2^(Interval)*125us.
*/ staticunsignedint cdnsp_get_endpoint_interval(struct usb_gadget *g, struct cdnsp_ep *pep)
{ unsignedint interval = 0;
switch (g->speed) { case USB_SPEED_HIGH: case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
usb_endpoint_xfer_isoc(pep->endpoint.desc))
interval = cdnsp_parse_exponent_interval(g, pep); break; case USB_SPEED_FULL: if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
interval = cdnsp_parse_exponent_interval(g, pep);
} elseif (usb_endpoint_xfer_int(pep->endpoint.desc)) {
interval = pep->endpoint.desc->bInterval << 3;
interval = cdnsp_microframes_to_exponent(g, pep,
interval,
3, 10);
}
break; default:
WARN_ON(1);
}
return interval;
}
/* * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. * High speed endpoint descriptors can define "the number of additional * transaction opportunities per microframe", but that goes in the Max Burst * endpoint context field.
*/ static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
{ if (g->speed < USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(pep->endpoint.desc)) return 0;
return pep->endpoint.comp_desc->bmAttributes;
}
static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g, struct cdnsp_ep *pep)
{ /* Super speed and Plus have max burst in ep companion desc */ if (g->speed >= USB_SPEED_SUPER) return pep->endpoint.comp_desc->bMaxBurst;
static u32 cdnsp_get_endpoint_type(conststruct usb_endpoint_descriptor *desc)
{ int in;
in = usb_endpoint_dir_in(desc);
switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_CONTROL: return CTRL_EP; case USB_ENDPOINT_XFER_BULK: return in ? BULK_IN_EP : BULK_OUT_EP; case USB_ENDPOINT_XFER_ISOC: return in ? ISOC_IN_EP : ISOC_OUT_EP; case USB_ENDPOINT_XFER_INT: return in ? INT_IN_EP : INT_OUT_EP;
}
return 0;
}
/* * Return the maximum endpoint service interval time (ESIT) payload. * Basically, this is the maxpacket size, multiplied by the burst size * and mult size.
*/ static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g, struct cdnsp_ep *pep)
{ int max_packet; int max_burst;
/* Only applies for interrupt or isochronous endpoints*/ if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
usb_endpoint_xfer_bulk(pep->endpoint.desc)) return 0;
/* SuperSpeedPlus Isoc ep sending over 48k per EIST. */ if (g->speed >= USB_SPEED_SUPER_PLUS &&
USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes)) return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval); /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */ elseif (g->speed >= USB_SPEED_SUPER) return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
/* * Get values to fill the endpoint context, mostly from ep descriptor. * The average TRB buffer length for bulk endpoints is unclear as we * have no clue on scatter gather list entry size. For Isoc and Int, * set it to max available.
*/
max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
max_packet = usb_endpoint_maxp(pep->endpoint.desc);
max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
avg_trb_len = max_esit_payload;
/* Allow 3 retries for everything but isoc, set CErr = 3. */ if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
err_count = 3; if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
pdev->gadget.speed == USB_SPEED_HIGH)
max_packet = 512; /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */ if (usb_endpoint_xfer_control(pep->endpoint.desc))
avg_trb_len = 8;
/* Set up the endpoint ring. */
pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags); if (!pep->ring) return -ENOMEM;
/* Port offset and count in the third dword.*/
temp = readl(addr + 2);
port_offset = CDNSP_EXT_PORT_OFF(temp);
port_count = CDNSP_EXT_PORT_COUNT(temp);
/* * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that * specify what speeds each port is supposed to be.
*/ staticint cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
{ void __iomem *base;
u32 offset; int i;
base = &pdev->cap_regs->hc_capbase;
offset = cdnsp_find_next_ext_cap(base, 0,
EXT_CAP_CFG_DEV_20PORT_CAP_ID);
pdev->port20_regs = base + offset;
offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
pdev->port3x_regs = base + offset;
offset = 0;
base = &pdev->cap_regs->hc_capbase;
/* Driver expects max 2 extended protocol capability. */ for (i = 0; i < 2; i++) {
u32 temp;
/* * Initialize memory for CDNSP (one-time init). * * Program the PAGESIZE register, initialize the device context array, create * device contexts, set up a command ring segment, create event * ring (one for now).
*/ int cdnsp_mem_init(struct cdnsp_device *pdev)
{ struct device *dev = pdev->dev; int ret = -ENOMEM; unsignedint val;
dma_addr_t dma;
u32 page_size;
u64 val_64;
/* * Use 4K pages, since that's common and the minimum the * controller supports
*/
page_size = 1 << 12;
val = readl(&pdev->op_regs->config_reg);
val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
writel(val, &pdev->op_regs->config_reg);
/* * Doorbell array must be physically contiguous * and 64-byte (cache line) aligned.
*/
pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
&dma, GFP_KERNEL); if (!pdev->dcbaa) return -ENOMEM;
pdev->dcbaa->dma = dma;
cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
/* * Initialize the ring segment pool. The ring must be a contiguous * structure comprised of TRBs. The TRBs must be 16 byte aligned, * however, the command ring segment needs 64-byte aligned segments * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment * need.
*/
pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
page_size); if (!pdev->segment_pool) goto release_dcbaa;
/* Set up the command ring to have one segments for now. */
pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL); if (!pdev->cmd_ring) goto destroy_device_pool;
/* Set the address in the Command Ring Control register */
val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
(pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
pdev->cmd_ring->cycle_state;
cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
val = readl(&pdev->cap_regs->db_off);
val &= DBOFF_MASK;
pdev->dba = (void __iomem *)pdev->cap_regs + val;
/* Set ir_set to interrupt register set 0 */
pdev->ir_set = &pdev->run_regs->ir_set[0];
/* * Event ring setup: Allocate a normal ring, but also setup * the event ring segment table (ERST).
*/
pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
0, GFP_KERNEL); if (!pdev->event_ring) goto free_cmd_ring;
ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst); if (ret) goto free_event_ring;
/* Set ERST count with the number of entries in the segment table. */
val = readl(&pdev->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
writel(val, &pdev->ir_set->erst_size);
/* Set the segment table base address. */
val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
/* Set the event ring dequeue address. */
cdnsp_set_event_deq(pdev);
ret = cdnsp_setup_port_arrays(pdev); if (ret) goto free_erst;
ret = cdnsp_alloc_priv_device(pdev); if (ret) {
dev_err(pdev->dev, "Could not allocate cdnsp_device data structures\n"); goto free_erst;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.