/** * queue_alloc_page - allocate page for Queue * @hwif: HW interface for allocating DMA * @vaddr: virtual address will be returned in this address * @paddr: physical address will be returned in this address * @shadow_vaddr: VM area will be return here for holding WQ page addresses * @page_sz: page size of each WQ page * * Return 0 - Success, negative - Failure
**/ staticint queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, void ***shadow_vaddr, size_t page_sz)
{ struct pci_dev *pdev = hwif->pdev;
dma_addr_t dma_addr;
*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
GFP_KERNEL); if (!*vaddr) {
dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); return -ENOMEM;
}
*paddr = (u64)dma_addr;
/* use vzalloc for big mem */
*shadow_vaddr = vzalloc(page_sz); if (!*shadow_vaddr) goto err_shadow_vaddr;
/** * wqs_allocate_page - allocate page for WQ set * @wqs: Work Queue Set * @page_idx: the page index of the page will be allocated * * Return 0 - Success, negative - Failure
**/ staticint wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
{ return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
&wqs->page_paddr[page_idx],
&wqs->shadow_page_vaddr[page_idx],
WQS_PAGE_SIZE);
}
/** * wqs_free_page - free page of WQ set * @wqs: Work Queue Set * @page_idx: the page index of the page will be freed
**/ staticvoid wqs_free_page(struct hinic_wqs *wqs, int page_idx)
{ struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev;
/** * hinic_wqs_alloc - allocate Work Queues set * @wqs: Work Queue Set * @max_wqs: maximum wqs to allocate * @hwif: HW interface for use for the allocation * * Return 0 - Success, negative - Failure
**/ int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs, struct hinic_hwif *hwif)
{ struct pci_dev *pdev = hwif->pdev; int err, i, page_idx;
/** * hinic_wq_allocate - Allocate the WQ resources from the WQS * @wqs: WQ set from which to allocate the WQ resources * @wq: WQ to allocate resources for it from the WQ set * @wqebb_size: Work Queue Block Byte Size * @wq_page_size: the page size in the Work Queue * @q_depth: number of wqebbs in WQ * @max_wqe_size: maximum WQE size that will be used in the WQ * * Return 0 - Success, negative - Failure
**/ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
u16 wqebb_size, u32 wq_page_size, u16 q_depth,
u16 max_wqe_size)
{ struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
u16 wqebb_size_shift; int err;
if (!is_power_of_2(wqebb_size)) {
dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); return -EINVAL;
}
if (wq_page_size == 0) {
dev_err(&pdev->dev, "wq_page_size must be > 0\n"); return -EINVAL;
}
if (q_depth & (q_depth - 1)) {
dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); return -EINVAL;
}
/** * hinic_wq_free - Free the WQ resources to the WQS * @wqs: WQ set to free the WQ resources to it * @wq: WQ to free its resources to the WQ set resources
**/ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
{
free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
/** * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs * @cmdq_pages: will hold the pages of the cmdq * @wq: returned wqs * @hwif: HW interface * @cmdq_blocks: number of cmdq blocks/wq to allocate * @wqebb_size: Work Queue Block Byte Size * @wq_page_size: the page size in the Work Queue * @q_depth: number of wqebbs in WQ * @max_wqe_size: maximum WQE size that will be used in the WQ * * Return 0 - Success, negative - Failure
**/ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, struct hinic_hwif *hwif, int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
u16 q_depth, u16 max_wqe_size)
{ struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
u16 wqebb_size_shift; int i, j, err = -ENOMEM;
if (!is_power_of_2(wqebb_size)) {
dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); return -EINVAL;
}
if (wq_page_size == 0) {
dev_err(&pdev->dev, "wq_page_size must be > 0\n"); return -EINVAL;
}
if (q_depth & (q_depth - 1)) {
dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); return -EINVAL;
}
/** * hinic_wqs_cmdq_free - Free wqs from cmdqs * @cmdq_pages: hold the pages of the cmdq * @wq: wqs to free * @cmdq_blocks: number of wqs to free
**/ void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, int cmdq_blocks)
{ int i;
for (i = 0; i < cmdq_blocks; i++)
free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
cmdq_free_page(cmdq_pages);
}
staticvoid copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, int num_wqebbs, u16 idx)
{ void *wqebb_addr; int i;
for (i = 0; i < num_wqebbs; i++, idx++) {
idx = MASKED_WQE_IDX(wq, idx);
wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
WQE_PAGE_OFF(wq, idx);
memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
shadow_addr += wq->wqebb_size;
}
}
staticvoid copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, int num_wqebbs, u16 idx)
{ void *wqebb_addr; int i;
for (i = 0; i < num_wqebbs; i++, idx++) {
idx = MASKED_WQE_IDX(wq, idx);
wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
WQE_PAGE_OFF(wq, idx);
/** * hinic_get_wqe - get wqe ptr in the current pi and update the pi * @wq: wq to get wqe from * @wqe_size: wqe size * @prod_idx: returned pi * * Return wqe pointer
**/ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsignedint wqe_size,
u16 *prod_idx)
{ int curr_pg, end_pg, num_wqebbs;
u16 curr_prod_idx, end_prod_idx;
/* If we only have one page, still need to get shadown wqe when * wqe rolling-over page
*/ if (curr_pg != end_pg || end_prod_idx < *prod_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
/** * hinic_return_wqe - return the wqe when transmit failed * @wq: wq to return wqe * @wqe_size: wqe size
**/ void hinic_return_wqe(struct hinic_wq *wq, unsignedint wqe_size)
{ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
atomic_sub(num_wqebbs, &wq->prod_idx);
atomic_add(num_wqebbs, &wq->delta);
}
/** * hinic_put_wqe - return the wqe place to use for a new wqe * @wq: wq to return wqe * @wqe_size: wqe size
**/ void hinic_put_wqe(struct hinic_wq *wq, unsignedint wqe_size)
{ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
>> wq->wqebb_size_shift;
atomic_add(num_wqebbs, &wq->cons_idx);
atomic_add(num_wqebbs, &wq->delta);
}
/** * hinic_read_wqe - read wqe ptr in the current ci * @wq: wq to get read from * @wqe_size: wqe size * @cons_idx: returned ci * * Return wqe pointer
**/ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsignedint wqe_size,
u16 *cons_idx)
{ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
>> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx; int curr_pg, end_pg;
if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) return ERR_PTR(-EBUSY);
/* If we only have one page, still need to get shadown wqe when * wqe rolling-over page
*/ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.