/* * Broadcom NetXtreme-E RoCE driver. * * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term * Broadcom refers to Broadcom Limited and/or its subsidiaries. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Description: Main component of the bnxt_re driver
*/
res = &rdev->qplib_res;
en_dev = rdev->en_dev;
cctx = rdev->chip_ctx;
/* Issue qcfg */
rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset); if (rc)
dev_info(rdev_to_dev(rdev), "Couldn't get DB bar size, Low latency framework is disabled\n"); /* set register offsets for both UC and WC */ if (bnxt_qplib_is_chip_gen_p7(cctx)) {
res->dpi_tbl.ucreg.offset = offset;
res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
} else {
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
BNXT_QPLIB_DBR_PF_DB_OFFSET;
res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
}
/* If WC mapping is disabled by L2 driver then en_dev->l2_db_size * is equal to the DB-Bar actual size. This indicates that L2 * is mapping entire bar as UC-. RoCE driver can't enable WC mapping * in such cases and DB-push will be disabled.
*/
barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION); if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n");
}
}
/* Set the maximum number of each resource that the driver actually wants * to allocate. This may be up to the maximum number the firmware has * reserved for the function. The driver may choose to allocate fewer * resources than the firmware maximum.
*/ staticvoid bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
{ struct bnxt_qplib_dev_attr *attr; struct bnxt_qplib_ctx *ctx; int i;
attr = rdev->dev_attr;
ctx = &rdev->qplib_ctx;
ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
attr->max_qp);
ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; /* Use max_mr from fw since max_mrw does not get set */
ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
attr->max_srq);
ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
rdev->dev_attr->tqm_alloc_reqs[i];
}
vf_res = &qplib_ctx->vf_res; /* * Reserve a set of resources for the PF. Divide the remaining * resources among the VFs
*/
vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
nvfs = num_vf;
num_vf = 100 * num_vf;
vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf; /* * The driver allows many more MRs than other resources. If the * firmware does also, then reserve a fixed amount for the PF and * divide the rest among VFs. VFs may use many MRs for NFS * mounts, ISER, NVME applications, etc. If the firmware severely * restricts the number of MRs, then let PF have half and divide * the rest among VFs, as for the other resource types.
*/ if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
mrws = qplib_ctx->mrw_count * vf_pct;
nvfs = num_vf;
} else {
mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
}
vf_res->max_mrw_per_vf = (mrws / nvfs);
vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
}
staticvoid bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{ /* * Use the total VF count since the actual VF count may not be * available at this point.
*/
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev); if (!rdev->num_vfs) return;
rdev = en_info->rdev; if (!rdev) return;
msix_ent = rdev->nqr->msix_entries;
rcfw = &rdev->rcfw; if (!ent) { /* Not setting the f/w timeout bit in rcfw. * During the driver unload the first command * to f/w will timeout and that will set the * timeout bit.
*/
ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n"); return;
}
/* Vectors may change after restart, so update with new vectors * in device sctructure.
*/ for (indx = 0; indx < rdev->nqr->num_msix; indx++)
rdev->nqr->msix_entries[indx].vector = ent[indx].vector;
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, false); if (rc) {
ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n"); return;
} for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) {
nq = &rdev->nqr->nq[indx - 1];
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
msix_ent[indx].vector, false); if (rc) {
ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
indx - 1); return;
}
}
}
/* loop shouldn't run infintely as the occupancy usually goes * below pacing algo threshold as soon as pacing kicks in.
*/ while (1) {
fifo_occup = __get_fifo_occupancy(rdev); /* Fifo occupancy cannot be greater the MAX FIFO depth */ if (fifo_occup > pacing_data->fifo_max_depth) break;
if (!mutex_trylock(&rdev->pacing.dbq_lock)) return;
pacing_data = rdev->qplib_res.pacing_data;
pacing_save = rdev->pacing.do_pacing_save;
__wait_for_fifo_occupancy_below_th(rdev);
cancel_delayed_work_sync(&rdev->dbq_pacing_work); if (pacing_save > rdev->pacing.dbr_def_do_pacing) { /* Double the do_pacing value during the congestion */
pacing_save = pacing_save << 1;
} else { /* * when a new congestion is detected increase the do_pacing * by 8 times. And also increase the pacing_th by 4 times. The * reason to increase pacing_th is to give more space for the * queue to oscillate down without getting empty, but also more * room for the queue to increase without causing another alarm.
*/
pacing_save = pacing_save << 3;
pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4;
}
if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING)
pacing_save = BNXT_RE_MAX_DBR_DO_PACING;
if (fifo_occup > pacing_data->pacing_th) goto restart_timer;
/* * Instead of immediately going back to the default do_pacing * reduce it by 1/8 times and restart the timer.
*/
pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3);
pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing); if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
bnxt_re_set_default_pacing_data(rdev);
rdev->stats.pacing.complete++; goto dbq_unlock;
}
if (!rdev->pacing.dbr_pacing) return;
mutex_lock(&rdev->pacing.dbq_lock);
pacing_data = rdev->qplib_res.pacing_data;
/* * Increase the alarm_th to max so that other user lib instances do not * keep alerting the driver.
*/
pacing_data->alarm_th = pacing_data->fifo_max_depth;
pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
cancel_work_sync(&rdev->dbq_fifo_check_work);
schedule_work(&rdev->dbq_fifo_check_work);
mutex_unlock(&rdev->pacing.dbq_lock);
}
staticint bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
{ /* Allocate a page for app use */
rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL); if (!rdev->pacing.dbr_page) return -ENOMEM;
err = bnxt_re_read_context_allowed(rdev); if (err) return err;
len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 :
BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5;
data = kzalloc(len, GFP_KERNEL); if (!data) return -ENOMEM;
err = bnxt_re_read_context_allowed(rdev); if (err) return err;
len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 :
BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5;
data = kzalloc(len, GFP_KERNEL); if (!data) return -ENOMEM;
err = bnxt_re_read_context_allowed(rdev); if (err) return err;
len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 :
BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5;
data = kzalloc(len, GFP_KERNEL); if (!data) return -ENOMEM;
staticint bnxt_re_handle_unaffi_async_event(struct creq_func_event
*unaffi_async)
{ switch (unaffi_async->event) { case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: break; case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: break; case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: break; case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: break; case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: break; case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: break; case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: break; case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: break; case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: break; case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: break; case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: break; default: return -EINVAL;
} return 0;
}
switch (err_event->req_err_state_reason) { case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR:
event.event = IB_EVENT_QP_ACCESS_ERR; break; case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR:
event.event = IB_EVENT_QP_REQ_ERR; break; case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR: case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR:
event.event = IB_EVENT_QP_FATAL; break;
default: break;
}
switch (err_event->res_err_state_reason) { case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR:
event.event = IB_EVENT_QP_ACCESS_ERR; break; case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR:
event.event = IB_EVENT_QP_REQ_ERR; break; case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR:
event.event = IB_EVENT_QP_FATAL; break; case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR: case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR: if (srq)
event.event = IB_EVENT_SRQ_ERR; break; default: break;
}
cqerr = event; switch (cqerr->cq_err_reason) { case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR: case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR: case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR: case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR: case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR: case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR:
ibevent.event = IB_EVENT_CQ_ERR; break; default: break;
}
for (index = 0; index < sgid_tbl->active; index++) {
gid_idx = sgid_tbl->hw_id[index];
if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero))) continue; /* need to modify the VLAN enable setting of non VLAN GID only * as setting is done for VLAN GID while adding GID
*/ if (sgid_tbl->vlan[index]) continue;
/* Get priority for roce */
prio_map = bnxt_re_get_priority_mask(rdev);
if (prio_map == rdev->cur_prio_map) return 0;
rdev->cur_prio_map = prio_map; /* Actual priorities are not programmed as they are already * done by L2 driver; just enable or disable priority vlan tagging
*/ if ((prio_map == 0 && rdev->qplib_res.prio) ||
(prio_map != 0 && !rdev->qplib_res.prio)) {
rdev->qplib_res.prio = prio_map;
bnxt_re_update_gid(rdev);
}
return 0;
}
staticvoid bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
{ if (rdev->is_virtfn) return;
/* When DEL_GID fails, driver is not freeing GID ctx memory. * To avoid the memory leak, free the memory during unload
*/ staticvoid bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
{ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_re_gid_ctx *ctx, **ctx_tbl; int i;
if (!sgid_tbl->active) return;
ctx_tbl = sgid_tbl->ctx; for (i = 0; i < sgid_tbl->max; i++) { if (sgid_tbl->hw_id[i] == 0xFFFF) continue;
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
bnxt_re_free_gid_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
&rdev->flags))
bnxt_re_cleanup_res(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); if (rc)
ibdev_warn(&rdev->ibdev, "Failed to deinitialize RCFW: %#x", rc);
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
rdev->nqr->num_msix = 0;
if (rdev->pacing.dbr_pacing)
bnxt_re_deinitialize_dbr_pacing(rdev);
bnxt_re_free_nqr_mem(rdev);
bnxt_re_destroy_chip_ctx(rdev); if (op_type == BNXT_RE_COMPLETE_REMOVE) { if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
bnxt_unregister_dev(rdev->en_dev);
}
}
/* worker thread for polling periodic events. Now used for QoS programming*/ staticvoid bnxt_re_worker(struct work_struct *work)
{ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
worker.work);
if (op_type == BNXT_RE_COMPLETE_INIT) { /* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev); if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register with netedev: %#x\n", rc); return -EINVAL;
}
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) {
ibdev_err(&rdev->ibdev, "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n",
rdev->en_dev->ulp_tbl->msix_requested);
bnxt_unregister_dev(rdev->en_dev);
clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); return -EINVAL;
}
ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
rdev->en_dev->ulp_tbl->msix_requested);
rc = bnxt_re_setup_chip_ctx(rdev); if (rc) {
bnxt_unregister_dev(rdev->en_dev);
clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); return -EINVAL;
}
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
bnxt_re_query_hwrm_intf_version(rdev);
/* Establish RCFW Communication Channel to initialize the context * memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
&rdev->qplib_ctx); if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate RCFW Channel: %#x\n", rc); goto fail;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.