/* Allocate memory for a queue. This includes the memory for the HW descriptor * ring, an optional 64b HW write-back area, and an optional SW state ring. * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring, * and the VA of the write-back area.
*/ void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
size_t hw_desc_sz, size_t sw_desc_sz, bool wb, int numa_node, dma_addr_t *dma_addr, void **sw_va, volatile __be64 **wb_va)
{ int dev_node = dev_to_node(dma_dev);
size_t dma_sz; void *va;
if (numa_node == NUMA_NO_NODE)
numa_node = dev_node;
/* Place optional write-back area at end of descriptor ring. */
dma_sz = hw_desc_sz * depth; if (wb)
dma_sz += sizeof(u64);
set_dev_node(dma_dev, numa_node);
va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
set_dev_node(dma_dev, dev_node); if (!va) return NULL;
if (sw_desc_sz) {
*sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
numa_node); if (!*sw_va) {
dma_free_coherent(dma_dev, dma_sz, va, *dma_addr); return NULL;
}
}
if (wb)
*wb_va = va + dma_sz - sizeof(u64); return va;
}
EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
/* Prepare and issue an admin command to create an SQ on the device with the * provided parameters. If the queue ID is auto-allocated by the device it is * returned in *sqidp.
*/ int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
u8 coal_nentries, u8 coal_usec, u32 irq_num,
u32 scan_start_id, u32 scan_end_id,
u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
{ union { struct fun_admin_epsq_req req; struct fun_admin_generic_create_rsp rsp;
} cmd;
dma_addr_t wb_addr;
u32 hw_qid; int rc;
if (sq_depth > fdev->q_depth) return -EINVAL; if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
/* Prepare and issue an admin command to create a CQ on the device with the * provided parameters. If the queue ID is auto-allocated by the device it is * returned in *cqidp.
*/ int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
u32 __iomem **dbp)
{ union { struct fun_admin_epcq_req req; struct fun_admin_generic_create_rsp rsp;
} cmd;
u32 hw_qid; int rc;
/* Given a command response with data scattered across >= 1 RQ buffers return * a pointer to a contiguous buffer containing all the data. If the data is in * one RQ buffer the start address within that buffer is returned, otherwise a * new buffer is allocated and the data is gathered into it.
*/ staticvoid *fun_data_from_rq(struct fun_queue *funq, conststruct fun_rsp_common *rsp, bool *need_free)
{
u32 bufoff, total_len, remaining, fragsize, dataoff; struct device *dma_dev = funq->fdev->dev; conststruct fun_dataop_rqbuf *databuf; conststruct fun_dataop_hdr *dataop; conststruct fun_rq_info *rqinfo; void *data;
/* For scattered completions gather the fragments into one buffer. */
data = kmalloc(total_len, GFP_ATOMIC); /* NULL is OK here. In case of failure we still need to consume the data * for proper buffer accounting but indicate an error in the response.
*/ if (likely(data))
*need_free = true;
/* SQ/CQ 0 are implicitly created, assign their doorbells now. * Other queues are assigned doorbells at their explicit creation.
*/ if (funq->sqid == 0)
funq->sq_db = fun_sq_db_addr(fdev, 0); if (funq->cqid == 0)
funq->cq_db = fun_cq_db_addr(fdev, 0);
return funq;
free_funq:
fun_free_queue(funq); return NULL;
}
/* Create a funq's RQ on the device. */ int fun_create_rq(struct fun_queue *funq)
{ struct fun_dev *fdev = funq->fdev; int rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.