// SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTX CPT driver * * Copyright (C) 2019 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation.
*/
#include"otx_cptvf.h" #include"otx_cptvf_algs.h"
/* Completion code size and initial value */ #define COMPLETION_CODE_SIZE 8 #define COMPLETION_CODE_INIT 0
/* SG list header size in bytes */ #define SG_LIST_HDR_SIZE 8
/* Default timeout when waiting for free pending entry in us */ #define CPT_PENTRY_TIMEOUT 1000 #define CPT_PENTRY_STEP 50
/* Default threshold for stopping and resuming sender requests */ #define CPT_IQ_STOP_MARGIN 128 #define CPT_IQ_RESUME_MARGIN 512
#define CPT_DMA_ALIGN 128
void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req)
{ int i;
pr_debug("Gather list size %d\n", req->incnt); for (i = 0; i < req->incnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->in[i].size, req->in[i].vptr,
(void *) req->in[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n",
req->in[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->in[i].vptr, req->in[i].size, false);
}
pr_debug("Scatter list size %d\n", req->outcnt); for (i = 0; i < req->outcnt; i++) {
pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
req->out[i].size, req->out[i].vptr,
(void *) req->out[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->out[i].vptr, req->out[i].size, false);
}
}
staticinlineint setup_sgio_components(struct pci_dev *pdev, struct otx_cpt_buf_ptr *list, int buf_count, u8 *buffer)
{ struct otx_cpt_sglist_component *sg_ptr = NULL; int ret = 0, i, j; int components;
if (unlikely(!list)) {
dev_err(&pdev->dev, "Input list pointer is NULL\n"); return -EFAULT;
}
for (i = 0; i < buf_count; i++) { if (likely(list[i].vptr)) {
list[i].dma_addr = dma_map_single(&pdev->dev,
list[i].vptr,
list[i].size,
DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(&pdev->dev,
list[i].dma_addr))) {
dev_err(&pdev->dev, "Dma mapping failed\n");
ret = -EIO; goto sg_cleanup;
}
}
}
/* * On OcteonTX platform the parameter db_count is used as a count for ringing * door bell. The valid values for db_count are: * 0 - 1 CPT instruction will be enqueued however CPT will not be informed * 1 - 1 CPT instruction will be enqueued and CPT will be informed
*/ staticvoid cpt_send_cmd(union otx_cpt_inst_s *cptinst, struct otx_cptvf *cptvf)
{ struct otx_cpt_cmd_qinfo *qinfo = &cptvf->cqinfo; struct otx_cpt_cmd_queue *queue; struct otx_cpt_cmd_chunk *curr;
u8 *ent;
queue = &qinfo->queue[0]; /* * cpt_send_cmd is currently called only from critical section * therefore no locking is required for accessing instruction queue
*/
ent = &queue->qhead->head[queue->idx * OTX_CPT_INST_SIZE];
memcpy(ent, (void *) cptinst, OTX_CPT_INST_SIZE);
if (++queue->idx >= queue->qhead->size / 64) {
curr = queue->qhead;
if (list_is_last(&curr->nextchunk, &queue->chead))
queue->qhead = queue->base; else
queue->qhead = list_next_entry(queue->qhead, nextchunk);
queue->idx = 0;
} /* make sure all memory stores are done before ringing doorbell */
smp_wmb();
otx_cptvf_write_vq_doorbell(cptvf, 1);
}
if (unlikely(!pentry)) {
ret = -ENOSPC;
spin_unlock_bh(&pqueue->lock); goto request_cleanup;
}
/* * Check if we are close to filling in entire pending queue, * if so then tell the sender to stop/sleep by returning -EBUSY * We do it only for context which can sleep (GFP_KERNEL)
*/ if (gfp == GFP_KERNEL &&
pqueue->pending_count > (pqueue->qlen - CPT_IQ_STOP_MARGIN)) {
pentry->resume_sender = true;
} else
pentry->resume_sender = false;
resume_sender = pentry->resume_sender;
pqueue->pending_count++;
/* * We allocate and prepare pending queue entry in critical section * together with submitting CPT instruction to CPT instruction queue * to make sure that order of CPT requests is the same in both * pending and instruction queues
*/
spin_unlock_bh(&pqueue->lock);
ret = resume_sender ? -EBUSY : -EINPROGRESS; return ret;
ecode.u = be64_to_cpup((__be64 *)cpt_info->out_buffer); switch (ccode) { case CPT_COMP_E_FAULT:
dev_err(&pdev->dev, "Request failed with DMA fault\n");
otx_cpt_dump_sg_list(pdev, req); break;
case CPT_COMP_E_SWERR:
dev_err(&pdev->dev, "Request failed with software error code %d\n",
ecode.s.ccode);
otx_cpt_dump_sg_list(pdev, req); break;
case CPT_COMP_E_HWERR:
dev_err(&pdev->dev, "Request failed with hardware error\n");
otx_cpt_dump_sg_list(pdev, req); break;
case COMPLETION_CODE_INIT: /* check for timeout */ if (time_after_eq(jiffies, cpt_info->time_in +
OTX_CPT_COMMAND_TIMEOUT * HZ))
dev_warn(&pdev->dev, "Request timed out 0x%p\n", req); elseif (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) {
cpt_info->time_in = jiffies;
cpt_info->extra_time++;
} return 1;
case CPT_COMP_E_GOOD: /* Check microcode completion code */ if (ecode.s.ccode) { /* * If requested hmac is truncated and ucode returns * s/g write length error then we report success * because ucode writes as many bytes of calculated * hmac as available in gather buffer and reports * s/g write length error if number of bytes in gather * buffer is less than full hmac size.
*/ if (req->is_trunc_hmac &&
ecode.s.ccode == ERR_SCATTER_GATHER_WRITE_LENGTH) {
*res_code = 0; break;
}
process_pentry: /* * Check if we should inform sending side to resume * We do it CPT_IQ_RESUME_MARGIN elements in advance before * pending queue becomes empty
*/
resume_index = modulo_inc(pqueue->front, pqueue->qlen,
CPT_IQ_RESUME_MARGIN);
resume_pentry = &pqueue->head[resume_index]; if (resume_pentry &&
resume_pentry->resume_sender) {
resume_pentry->resume_sender = false;
callback = resume_pentry->callback;
areq = resume_pentry->areq;
if (callback) {
spin_unlock_bh(&pqueue->lock);
/* * EINPROGRESS is an indication for sending * side that it can resume sending requests
*/
callback(-EINPROGRESS, areq, cpt_info);
spin_lock_bh(&pqueue->lock);
}
}
/* * Call callback after current pending entry has been * processed, we don't do it if the callback pointer is * invalid.
*/ if (callback)
callback(res_code, areq, cpt_info);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.