/* * Be aware the ordering and values * for SDMA_AHG_APPLY_UPDATE[123] * are assumed in generating a skip * count in submit_tx() in sdma.c
*/ #define SDMA_AHG_NO_AHG 0 #define SDMA_AHG_COPY 1 #define SDMA_AHG_APPLY_UPDATE1 2 #define SDMA_AHG_APPLY_UPDATE2 3 #define SDMA_AHG_APPLY_UPDATE3 4
/** * DOC: sdma exported routines * * These sdma routines fit into three categories: * - The SDMA API for building and submitting packets * to the ring * * - Initialization and tear down routines to buildup * and tear down SDMA * * - ISR entrances to handle interrupts, state changes * and errors
*/
/** * DOC: sdma PSM/verbs API * * The sdma API is designed to be used by both PSM * and verbs to supply packets to the SDMA ring. * * The usage of the API is as follows: * * Embed a struct iowait in the QP or * PQ. The iowait should be initialized with a * call to iowait_init(). * * The user of the API should create an allocation method * for their version of the txreq. slabs, pre-allocated lists, * and dma pools can be used. Once the user's overload of * the sdma_txreq has been allocated, the sdma_txreq member * must be initialized with sdma_txinit() or sdma_txinit_ahg(). * * The txreq must be declared with the sdma_txreq first. * * The tx request, once initialized, is manipulated with calls to * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr() * for each disjoint memory location. It is the user's responsibility * to understand the packet boundaries and page boundaries to do the * appropriate number of sdma_txadd_* calls.. The user * must be prepared to deal with failures from these routines due to * either memory allocation or dma_mapping failures. * * The mapping specifics for each memory location are recorded * in the tx. Memory locations added with sdma_txadd_page() * and sdma_txadd_kvaddr() are automatically mapped when added * to the tx and nmapped as part of the progress processing in the * SDMA interrupt handling. * * sdma_txadd_daddr() is used to add an dma_addr_t memory to the * tx. An example of a use case would be a pre-allocated * set of headers allocated via dma_pool_alloc() or * dma_alloc_coherent(). For these memory locations, it * is the responsibility of the user to handle that unmapping. * (This would usually be at an unload or job termination.) * * The routine sdma_send_txreq() is used to submit * a tx to the ring after the appropriate number of * sdma_txadd_* have been done. * * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist() * can be used to submit a list of packets. * * The user is free to use the link overhead in the struct sdma_txreq as * long as the tx isn't in flight. * * The extreme degenerate case of the number of descriptors * exceeding the ring size is automatically handled as * memory locations are added. An overflow of the descriptor * array that is part of the sdma_txreq is also automatically * handled. *
*/
/** * DOC: Infrastructure calls * * sdma_init() is used to initialize data structures and * CSRs for the desired number of SDMA engines. * * sdma_start() is used to kick the SDMA engines initialized * with sdma_init(). Interrupts must be enabled at this * point since aspects of the state machine are interrupt * driven. * * sdma_engine_error() and sdma_engine_interrupt() are * entrances for interrupts. * * sdma_map_init() is for the management of the mapping * table when the number of vls is changed. *
*/
/* * struct hw_sdma_desc - raw 128 bit SDMA descriptor * * This is the raw descriptor in the SDMA ring
*/ struct hw_sdma_desc { /* private: don't use directly */
__le64 qw[2];
};
/** * struct sdma_engine - Data pertaining to each SDMA engine. * @dd: a back-pointer to the device data * @ppd: per port back-pointer * @imask: mask for irq manipulation * @idle_mask: mask for determining if an interrupt is due to sdma_idle * * This structure has the state for each sdma_engine. * * Accessing to non public fields are not supported * since the private members are subject to change.
*/ struct sdma_engine { /* read mostly */ struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; /* private: */ void __iomem *tail_csr;
u64 imask; /* clear interrupt mask */
u64 idle_mask;
u64 progress_mask;
u64 int_mask; /* private: */ volatile __le64 *head_dma; /* DMA'ed by chip */ /* private: */
dma_addr_t head_phys; /* private: */ struct hw_sdma_desc *descq; /* private: */ unsigned descq_full_count; struct sdma_txreq **tx_ring; /* private: */
dma_addr_t descq_phys; /* private */
u32 sdma_mask; /* private */ struct sdma_state state; /* private */ int cpu; /* private: */
u8 sdma_shift; /* private: */
u8 this_idx; /* zero relative engine */ /* protect changes to senddmactrl shadow */
spinlock_t senddmactrl_lock; /* private: */
u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
/* * Either head_lock or tail lock required to see * a steady state.
*/ staticinlineint __sdma_running(struct sdma_engine *engine)
{ return engine->state.current_state == sdma_state_s99_running;
}
/** * sdma_running() - state suitability test * @engine: sdma engine * * sdma_running probes the internal state to determine if it is suitable * for submitting packets. * * Return: * 1 - ok to submit, 0 - not ok to submit *
*/ staticinlineint sdma_running(struct sdma_engine *engine)
{ unsignedlong flags; int ret;
spin_lock_irqsave(&engine->tail_lock, flags);
ret = __sdma_running(engine);
spin_unlock_irqrestore(&engine->tail_lock, flags); return ret;
}
/** * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG * @tx: tx request to initialize * @flags: flags to key last descriptor additions * @tlen: total packet length (pbc + headers + data) * @ahg_entry: ahg entry to use (0 - 31) * @num_ahg: ahg descriptor for first descriptor (0 - 9) * @ahg: array of AHG descriptors (up to 9 entries) * @ahg_hlen: number of bytes from ASIC entry to use * @cb: callback * * The allocation of the sdma_txreq and it enclosing structure is user * dependent. This routine must be called to initialize the user independent * fields. * * The currently supported flags are SDMA_TXREQ_F_URGENT, * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG. * * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the * completion is desired as soon as possible. * * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in * the AHG descriptors into the first 1 to 3 descriptors. * * Completions of submitted requests can be gotten on selected * txreqs by giving a completion routine callback to sdma_txinit() or * sdma_txinit_ahg(). The environment in which the callback runs * can be from an ISR, a tasklet, or a thread, so no sleeping * kernel routines can be used. Aspects of the sdma ring may * be locked so care should be taken with locking. * * The callback pointer can be NULL to avoid any callback for the packet * being submitted. The callback will be provided this tx, a status, and a flag. * * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. * * The flag, if the is the iowait had been used, indicates the iowait * sdma_busy count has reached zero. * * user data portion of tlen should be precise. The sdma_txadd_* entrances * will pad with a descriptor references 1 - 3 bytes when the number of bytes * specified in tlen have been supplied to the sdma_txreq. * * ahg_hlen is used to determine the number of on-chip entry bytes to * use as the header. This is for cases where the stored header is * larger than the header to be used in a packet. This is typical * for verbs where an RDMA_WRITE_FIRST is larger than the packet in * and RDMA_WRITE_MIDDLE. *
*/ staticinlineint sdma_txinit_ahg( struct sdma_txreq *tx,
u16 flags,
u16 tlen,
u8 ahg_entry,
u8 num_ahg,
u32 *ahg,
u8 ahg_hlen, void (*cb)(struct sdma_txreq *, int))
{ if (tlen == 0) return -ENODATA; if (tlen > MAX_SDMA_PKT_SIZE) return -EMSGSIZE;
tx->desc_limit = ARRAY_SIZE(tx->descs);
tx->descp = &tx->descs[0];
INIT_LIST_HEAD(&tx->list);
tx->num_desc = 0;
tx->flags = flags;
tx->complete = cb;
tx->coalesce_buf = NULL;
tx->wait = NULL;
tx->packet_len = tlen;
tx->tlen = tx->packet_len;
tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
tx->descs[0].qw[1] = 0; if (flags & SDMA_TXREQ_F_AHG_COPY)
tx->descs[0].qw[1] |=
(((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
<< SDMA_DESC1_HEADER_INDEX_SHIFT) |
(((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
<< SDMA_DESC1_HEADER_MODE_SHIFT); elseif (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
_sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen); return 0;
}
/** * sdma_txinit() - initialize an sdma_txreq struct (no AHG) * @tx: tx request to initialize * @flags: flags to key last descriptor additions * @tlen: total packet length (pbc + headers + data) * @cb: callback pointer * * The allocation of the sdma_txreq and it enclosing structure is user * dependent. This routine must be called to initialize the user * independent fields. * * The currently supported flags is SDMA_TXREQ_F_URGENT. * * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the * completion is desired as soon as possible. * * Completions of submitted requests can be gotten on selected * txreqs by giving a completion routine callback to sdma_txinit() or * sdma_txinit_ahg(). The environment in which the callback runs * can be from an ISR, a tasklet, or a thread, so no sleeping * kernel routines can be used. The head size of the sdma ring may * be locked so care should be taken with locking. * * The callback pointer can be NULL to avoid any callback for the packet * being submitted. * * The callback, if non-NULL, will be provided this tx and a status. The * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. *
*/ staticinlineint sdma_txinit( struct sdma_txreq *tx,
u16 flags,
u16 tlen, void (*cb)(struct sdma_txreq *, int))
{ return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
}
make_tx_sdma_desc(
tx,
type,
addr, len,
pinning_ctx, ctx_get, ctx_put);
WARN_ON(len > tx->tlen);
tx->num_desc++;
tx->tlen -= len; /* special cases for last */ if (!tx->tlen) { if (tx->packet_len & (sizeof(u32) - 1)) {
rval = _pad_sdma_tx_descs(dd, tx); if (rval) return rval;
} else {
_sdma_close_tx(dd, tx);
}
} return rval;
}
/** * sdma_txadd_page() - add a page to the sdma_txreq * @dd: the device to use for mapping * @tx: tx request to which the page is added * @page: page to map * @offset: offset within the page * @len: length in bytes * @pinning_ctx: context to be stored on struct sdma_desc .pinning_ctx. Not * added if coalesce buffer is used. E.g. pointer to pinned-page * cache entry for the sdma_desc. * @ctx_get: optional function to take reference to @pinning_ctx. Not called if * @pinning_ctx is NULL. * @ctx_put: optional function to release reference to @pinning_ctx after * sdma_desc completes. May be called in interrupt context so must * not sleep. Not called if @pinning_ctx is NULL. * * This is used to add a page/offset/length descriptor. * * The mapping/unmapping of the page/offset/len is automatically handled. * * Return: * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't * extend/coalesce descriptor array
*/ staticinlineint sdma_txadd_page( struct hfi1_devdata *dd, struct sdma_txreq *tx, struct page *page, unsignedlong offset,
u16 len, void *pinning_ctx, void (*ctx_get)(void *), void (*ctx_put)(void *))
{
dma_addr_t addr; int rval;
/** * sdma_txadd_daddr() - add a dma address to the sdma_txreq * @dd: the device to use for mapping * @tx: sdma_txreq to which the page is added * @addr: dma address mapped by caller * @len: length in bytes * * This is used to add a descriptor for memory that is already dma mapped. * * In this case, there is no unmapping as part of the progress processing for * this memory location. * * Return: * 0 - success, -ENOMEM - couldn't extend descriptor array
*/
/** * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq * @dd: the device to use for mapping * @tx: sdma_txreq to which the page is added * @kvaddr: the kernel virtual address * @len: length in bytes * * This is used to add a descriptor referenced by the indicated kvaddr and * len. * * The mapping/unmapping of the kvaddr and len is automatically handled. * * Return: * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce * descriptor array
*/ staticinlineint sdma_txadd_kvaddr( struct hfi1_devdata *dd, struct sdma_txreq *tx, void *kvaddr,
u16 len)
{
dma_addr_t addr; int rval;
/** * sdma_progress - use seq number of detect head progress * @sde: sdma_engine to check * @seq: base seq count * @tx: txreq for which we need to check descriptor availability * * This is used in the appropriate spot in the sleep routine * to check for potential ring progress. This routine gets the * seqcount before queuing the iowait structure for progress. * * If the seqcount indicates that progress needs to be checked, * re-submission is detected by checking whether the descriptor * queue has enough descriptor for the txreq.
*/ staticinlineunsigned sdma_progress(struct sdma_engine *sde, unsigned seq, struct sdma_txreq *tx)
{ if (read_seqretry(&sde->head_lock, seq)) {
sde->desc_avail = sdma_descq_freecnt(sde); if (tx->num_desc > sde->desc_avail) return 0; return 1;
} return 0;
}
/* for use by interrupt handling */ void sdma_engine_error(struct sdma_engine *sde, u64 status); void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
/* * * The diagram below details the relationship of the mapping structures * * Since the mapping now allows for non-uniform engines per vl, the * number of engines for a vl is either the vl_engines[vl] or * a computation based on num_sdma/num_vls: * * For example: * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls * * n = roundup to next highest power of 2 using nactual * * In the case where there are num_sdma/num_vls doesn't divide * evenly, the extras are added from the last vl downward. * * For the case where n > nactual, the engines are assigned * in a round robin fashion wrapping back to the first engine * for a particular vl. * * dd->sdma_map * | sdma_map_elem[0] * | +--------------------+ * v | mask | * sdma_vl_map |--------------------| * +--------------------------+ | sde[0] -> eng 1 | * | list (RCU) | |--------------------| * |--------------------------| ->| sde[1] -> eng 2 | * | mask | --/ |--------------------| * |--------------------------| -/ | * | * | actual_vls (max 8) | -/ |--------------------| * |--------------------------| --/ | sde[n-1] -> eng n | * | vls (max 8) | -/ +--------------------+ * |--------------------------| --/ * | map[0] |-/ * |--------------------------| +---------------------+ * | map[1] |--- | mask | * |--------------------------| \---- |---------------------| * | * | \-- | sde[0] -> eng 1+n | * | * | \---- |---------------------| * | * | \->| sde[1] -> eng 2+n | * |--------------------------| |---------------------| * | map[vls - 1] |- | * | * +--------------------------+ \- |---------------------| * \- | sde[m-1] -> eng m+n | * \ +---------------------+ * \- * \ * \- +----------------------+ * \- | mask | * \ |----------------------| * \- | sde[0] -> eng 1+m+n | * \- |----------------------| * >| sde[1] -> eng 2+m+n | * |----------------------| * | * | * |----------------------| * | sde[o-1] -> eng o+m+n| * +----------------------+ *
*/
/** * struct sdma_map_elem - mapping for a vl * @mask - selector mask * @sde - array of engines for this vl * * The mask is used to "mod" the selector * to produce index into the trailing * array of sdes.
*/ struct sdma_map_elem {
u32 mask; struct sdma_engine *sde[];
};
/** * struct sdma_map_el - mapping for a vl * @engine_to_vl - map of an engine to a vl * @list - rcu head for free callback * @mask - vl mask to "mod" the vl to produce an index to map array * @actual_vls - number of vls * @vls - number of vls rounded to next power of 2 * @map - array of sdma_map_elem entries * * This is the parent mapping structure. The trailing * members of the struct point to sdma_map_elem entries, which * in turn point to an array of sde's for that vl.
*/ struct sdma_vl_map {
s8 engine_to_vl[TXE_NUM_SDMA_ENGINES]; struct rcu_head list;
u32 mask;
u8 actual_vls;
u8 vls; struct sdma_map_elem *map[];
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.