switch (info->ae_id) { case IRDMA_AE_AMP_BOUNDS_VIOLATION: case IRDMA_AE_AMP_INVALID_STAG: case IRDMA_AE_AMP_RIGHTS_VIOLATION: case IRDMA_AE_AMP_UNALLOCATED_STAG: case IRDMA_AE_AMP_BAD_PD: case IRDMA_AE_AMP_BAD_QP: case IRDMA_AE_AMP_BAD_STAG_KEY: case IRDMA_AE_AMP_BAD_STAG_INDEX: case IRDMA_AE_AMP_TO_WRAP: case IRDMA_AE_PRIV_OPERATION_DENIED:
qp->flush_code = FLUSH_PROT_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_UDA_XMIT_BAD_PD: case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: case IRDMA_AE_UDA_L4LEN_INVALID: case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
qp->flush_code = FLUSH_LOC_LEN_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp->flush_code = FLUSH_REM_OP_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp->flush_code = FLUSH_FATAL_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
qp->flush_code = FLUSH_GENERAL_ERR; break; case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp->flush_code = FLUSH_RETRY_EXC_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break; case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: case IRDMA_AE_AMP_MWBIND_VALID_STAG:
qp->flush_code = FLUSH_MW_BIND_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; case IRDMA_AE_IB_INVALID_REQUEST:
qp->flush_code = FLUSH_REM_INV_REQ_ERR;
qp->event_type = IRDMA_QP_EVENT_REQ_ERR; break; default:
qp->flush_code = FLUSH_GENERAL_ERR;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; break;
}
}
if (iwcq->ibcq.event_handler) { struct ib_event ibevent;
ibevent.device = iwcq->ibcq.device;
ibevent.event = IB_EVENT_CQ_ERR;
ibevent.element.cq = &iwcq->ibcq;
iwcq->ibcq.event_handler(&ibevent,
iwcq->ibcq.cq_context);
}
irdma_cq_rem_ref(&iwcq->ibcq); break; case IRDMA_AE_RESET_NOT_SENT: case IRDMA_AE_LLP_DOUBT_REACHABILITY: case IRDMA_AE_RESOURCE_EXHAUSTION: break; case IRDMA_AE_PRIV_OPERATION_DENIED: case IRDMA_AE_STAG_ZERO_INVALID: case IRDMA_AE_IB_RREQ_AND_Q1_FULL: case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: case IRDMA_AE_DDP_UBE_INVALID_MO: case IRDMA_AE_DDP_UBE_INVALID_QN: case IRDMA_AE_DDP_NO_L_BIT: case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: case IRDMA_AE_INVALID_ARP_ENTRY: case IRDMA_AE_INVALID_TCP_OPTION_RCVD: case IRDMA_AE_STALE_ARP_ENTRY: case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: case IRDMA_AE_LLP_SYN_RECEIVED: case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: case IRDMA_AE_LLP_TOO_MANY_RNRS: case IRDMA_AE_LCE_CQ_CATASTROPHIC: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: default:
ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
info->ae_id, info->qp, info->qp_cq_id, info->ae_src); if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
ctx_info->roce_info->err_rq_idx_valid = info->rq; if (info->rq) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
}
irdma_set_flush_fields(qp, info);
irdma_cm_disconn(iwqp); break;
}
ctx_info->iwarp_info->err_rq_idx_valid = info->rq; if (info->rq) {
ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = true;
irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
} if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
irdma_cm_disconn(iwqp);
} else {
irdma_terminate_connection(qp, info);
} break;
} if (info->qp)
irdma_qp_rem_ref(&iwqp->ibqp);
} while (1);
if (aeqcnt)
irdma_sc_repost_aeq_entries(dev, aeqcnt);
}
/** * irdma_ena_intr - set up device interrupts * @dev: hardware control device structure * @msix_id: id of the interrupt to be enabled
*/ staticvoid irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
{
dev->irq_ops->irdma_en_irq(dev, msix_id);
}
/** * irdma_destroy_aeq - destroy aeq * @rf: RDMA PCI function * * Issue a destroy aeq request and * free the resources associated with the aeq * The function is called during driver unload
*/ staticvoid irdma_destroy_aeq(struct irdma_pci_f *rf)
{ struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_aeq *aeq = &rf->aeq; int status = -EBUSY;
if (!rf->msix_shared) {
rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
} if (rf->reset) gotoexit;
aeq->sc_aeq.size = 0;
status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY); if (status)
ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
/** * irdma_del_ceqs - destroy all ceq's except CEQ 0 * @rf: RDMA PCI function * * Go through all of the device ceq's, except 0, and for each * ceq disable the ceq interrupt and destroy the ceq
*/ staticvoid irdma_del_ceqs(struct irdma_pci_f *rf)
{ struct irdma_ceq *iwceq = &rf->ceqlist[1]; struct irdma_msix_vector *msix_vec;
u32 i = 0;
if (rf->msix_shared)
msix_vec = &rf->iw_msixtbl[1]; else
msix_vec = &rf->iw_msixtbl[2];
/** * irdma_destroy_ccq - destroy control cq * @rf: RDMA PCI function * * Issue destroy ccq request and * free the resources associated with the ccq
*/ staticvoid irdma_destroy_ccq(struct irdma_pci_f *rf)
{ struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_ccq *ccq = &rf->ccq; int status = 0;
if (rf->cqp_cmpl_wq)
destroy_workqueue(rf->cqp_cmpl_wq);
if (!rf->reset)
status = irdma_sc_ccq_destroy(dev->ccq, 0, true); if (status)
ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
ccq->mem_cq.pa);
ccq->mem_cq.va = NULL;
}
/** * irdma_close_hmc_objects_type - delete hmc objects of a given type * @dev: iwarp device * @obj_type: the hmc object type to be deleted * @hmc_info: host memory info struct * @privileged: permission to close HMC objects * @reset: true if called before reset
*/ staticvoid irdma_close_hmc_objects_type(struct irdma_sc_dev *dev, enum irdma_hmc_rsrc_type obj_type, struct irdma_hmc_info *hmc_info, bool privileged, bool reset)
{ struct irdma_hmc_del_obj_info info = {};
info.hmc_info = hmc_info;
info.rsrc_type = obj_type;
info.count = hmc_info->hmc_obj[obj_type].cnt;
info.privileged = privileged; if (irdma_sc_del_hmc_obj(dev, &info, reset))
ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
obj_type);
}
/** * irdma_del_hmc_objects - remove all device hmc objects * @dev: iwarp device * @hmc_info: hmc_info to free * @privileged: permission to delete HMC objects * @reset: true if called before reset * @vers: hardware version
*/ staticvoid irdma_del_hmc_objects(struct irdma_sc_dev *dev, struct irdma_hmc_info *hmc_info, bool privileged, bool reset, enum irdma_vers vers)
{ unsignedint i;
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
hmc_info, privileged, reset); if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) break;
}
}
/** * irdma_create_hmc_obj_type - create hmc object of a given type * @dev: hardware control device structure * @info: information for the hmc object to create
*/ staticint irdma_create_hmc_obj_type(struct irdma_sc_dev *dev, struct irdma_hmc_create_obj_info *info)
{ return irdma_sc_create_hmc_obj(dev, info);
}
/** * irdma_create_hmc_objs - create all hmc objects for the device * @rf: RDMA PCI function * @privileged: permission to create HMC objects * @vers: HW version * * Create the device hmc objects and allocate hmc pages * Return 0 if successful, otherwise clean up and return error
*/ staticint irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers)
{ struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_hmc_create_obj_info info = {}; int i, status = 0;
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE) continue; if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
info.add_sd_cnt = 0;
status = irdma_create_hmc_obj_type(dev, &info); if (status) {
ibdev_dbg(to_ibdev(dev), "ERR: create obj type %d status = %d\n",
iw_hmc_obj_types[i], status); break;
}
} if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER) break;
}
if (!status) return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id, true, true);
while (i) {
i--; /* destroy the hmc objects of a given type */ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
dev->hmc_info, privileged, false);
}
return status;
}
/** * irdma_obj_aligned_mem - get aligned memory from device allocated memory * @rf: RDMA PCI function * @memptr: points to the memory addresses * @size: size of memory needed * @mask: mask for the aligned memory * * Get aligned memory of the requested size and * update the memptr to point to the new aligned memory * Return 0 if successful, otherwise return no memory error
*/ staticint irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, u32 size,
u32 mask)
{ unsignedlong va, newva; unsignedlong extra;
/* init the waitqueue of the cqp_requests and add them to the list */ for (i = 0; i < sqsize; i++) {
init_waitqueue_head(&cqp->cqp_requests[i].waitq);
list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
}
init_waitqueue_head(&cqp->remove_wq); return 0;
/** * irdma_create_ccq - create control cq * @rf: RDMA PCI function * * Return 0, if the ccq and the resources associated with it * are successfully created, otherwise return error
*/ staticint irdma_create_ccq(struct irdma_pci_f *rf)
{ struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_ccq_init_info info = {}; struct irdma_ccq *ccq = &rf->ccq; int status;
/** * irdma_alloc_set_mac - set up a mac address table entry * @iwdev: irdma device * * Allocate a mac ip entry and add it to the hw table Return 0 * if successful, otherwise return error
*/ staticint irdma_alloc_set_mac(struct irdma_device *iwdev)
{ int status;
status = irdma_alloc_local_mac_entry(iwdev->rf,
&iwdev->mac_ip_table_idx); if (!status) {
status = irdma_add_local_mac_entry(iwdev->rf,
(const u8 *)iwdev->netdev->dev_addr,
(u8)iwdev->mac_ip_table_idx); if (status)
irdma_del_local_mac_entry(iwdev->rf,
(u8)iwdev->mac_ip_table_idx);
} return status;
}
/** * irdma_cfg_ceq_vector - set up the msix interrupt vector for * ceq * @rf: RDMA PCI function * @iwceq: ceq associated with the vector * @ceq_id: the id number of the iwceq * @msix_vec: interrupt vector information * * Allocate interrupt resources and enable irq handling * Return 0 if successful, otherwise return error
*/ staticint irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
u32 ceq_id, struct irdma_msix_vector *msix_vec)
{ int status;
/** * irdma_create_ceq - create completion event queue * @rf: RDMA PCI function * @iwceq: pointer to the ceq resources to be created * @ceq_id: the id number of the iwceq * @vsi: SC vsi struct * * Return 0, if the ceq and the resources associated with it * are successfully created, otherwise return error
*/ staticint irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
u32 ceq_id, struct irdma_sc_vsi *vsi)
{ int status; struct irdma_ceq_init_info info = {}; struct irdma_sc_dev *dev = &rf->sc_dev;
u32 ceq_size;
/** * irdma_setup_ceqs - manage the device ceq's and their interrupt resources * @rf: RDMA PCI function * @vsi: VSI structure for this CEQ * * Allocate a list for all device completion event queues * Create the ceq's and configure their msix interrupt vectors * Return 0, if ceqs are successfully set up, otherwise return error
*/ staticint irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
{
u32 i;
u32 ceq_id; struct irdma_ceq *iwceq; struct irdma_msix_vector *msix_vec; int status;
u32 num_ceqs;
num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
i = (rf->msix_shared) ? 1 : 2; for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
iwceq = &rf->ceqlist[ceq_id];
status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", status); goto del_ceqs;
}
spin_lock_init(&iwceq->ce_lock);
msix_vec = &rf->iw_msixtbl[i];
iwceq->irq = msix_vec->irq;
iwceq->msix_idx = msix_vec->idx;
status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); if (status) {
irdma_destroy_ceq(rf, iwceq); goto del_ceqs;
}
irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
rf->ceqs_count++;
}
/** * irdma_hmc_setup - create hmc objects for the device * @rf: RDMA PCI function * * Set up the device private memory space for the number and size of * the hmc objects and create the objects * Return 0 if successful, otherwise return error
*/ staticint irdma_hmc_setup(struct irdma_pci_f *rf)
{ int status;
u32 qpcnt;
/** * irdma_rt_deinit_hw - clean up the irdma device resources * @iwdev: irdma device * * remove the mac ip entry and ipv4/ipv6 addresses, destroy the * device queues and free the pble and the hmc objects
*/ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
{
ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
switch (iwdev->init_state) { case IP_ADDR_REGISTERED: if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_del_local_mac_entry(iwdev->rf,
(u8)iwdev->mac_ip_table_idx);
fallthrough; case AEQ_CREATED: case PBLE_CHUNK_MEM: case CEQS_CREATED: case IEQ_CREATED: if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
iwdev->rf->reset);
fallthrough; case ILQ_CREATED: if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi,
IRDMA_PUDA_RSRC_TYPE_ILQ,
iwdev->rf->reset); break; default:
ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state); break;
}
irdma_cleanup_cm_core(&iwdev->cm_core); if (iwdev->vsi.pestat) {
irdma_vsi_stats_free(&iwdev->vsi);
kfree(iwdev->vsi.pestat);
} if (iwdev->cleanup_wq)
destroy_workqueue(iwdev->cleanup_wq);
}
staticint irdma_setup_init_state(struct irdma_pci_f *rf)
{ int status;
status = irdma_save_msix_info(rf); if (status) return status;
/** * irdma_get_used_rsrc - determine resources used internally * @iwdev: irdma device * * Called at the end of open to get all internal allocations
*/ staticvoid irdma_get_used_rsrc(struct irdma_device *iwdev)
{
iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
iwdev->rf->max_pd);
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
iwdev->rf->max_cq);
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr);
}
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
{ enum init_completion_state state = rf->init_state;
rf->init_state = INVALID_STATE; if (rf->rsrc_created) {
irdma_destroy_aeq(rf);
irdma_destroy_pble_prm(rf->pble_rsrc);
irdma_del_ceqs(rf);
rf->rsrc_created = false;
} switch (state) { case CEQ0_CREATED:
irdma_del_ceq_0(rf);
fallthrough; case CCQ_CREATED:
irdma_destroy_ccq(rf);
fallthrough; case HW_RSRC_INITIALIZED: case HMC_OBJS_CREATED:
irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
rf->reset, rf->rdma_ver);
fallthrough; case CQP_CREATED:
irdma_destroy_cqp(rf);
fallthrough; case INITIAL_STATE:
irdma_del_init_mem(rf); break; case INVALID_STATE: default:
ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); break;
}
}
/** * irdma_rt_init_hw - Initializes runtime portion of HW * @iwdev: irdma device * @l2params: qos, tc, mtu info from netdev driver * * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma * device resource objects.
*/ int irdma_rt_init_hw(struct irdma_device *iwdev, struct irdma_l2params *l2params)
{ struct irdma_pci_f *rf = iwdev->rf; struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_vsi_init_info vsi_info = {}; struct irdma_vsi_stats_info stats_info = {}; int status;
status = irdma_setup_cm_core(iwdev, rf->rdma_ver); if (status) return status;
stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); if (!stats_info.pestat) {
irdma_cleanup_cm_core(&iwdev->cm_core); return -ENOMEM;
}
stats_info.fcn_id = dev->hmc_fn_id;
status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info); if (status) {
irdma_cleanup_cm_core(&iwdev->cm_core);
kfree(stats_info.pestat); return status;
}
do { if (!iwdev->roce_mode) {
status = irdma_initialize_ilq(iwdev); if (status) break;
iwdev->init_state = ILQ_CREATED;
status = irdma_initialize_ieq(iwdev); if (status) break;
iwdev->init_state = IEQ_CREATED;
} if (!rf->rsrc_created) {
status = irdma_setup_ceqs(rf, &iwdev->vsi); if (status) break;
iwdev->init_state = CEQS_CREATED;
status = irdma_hmc_init_pble(&rf->sc_dev,
rf->pble_rsrc); if (status) {
irdma_del_ceqs(rf); break;
}
iwdev->init_state = PBLE_CHUNK_MEM;
status = irdma_setup_aeq(rf); if (status) {
irdma_destroy_pble_prm(rf->pble_rsrc);
irdma_del_ceqs(rf); break;
}
iwdev->init_state = AEQ_CREATED;
rf->rsrc_created = true;
}
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev);
iwdev->init_state = IP_ADDR_REGISTERED;
dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
status, iwdev->init_state);
irdma_rt_deinit_hw(iwdev);
return status;
}
/** * irdma_ctrl_init_hw - Initializes control portion of HW * @rf: RDMA PCI function * * Create admin queues, HMC obejcts and RF resource objects
*/ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
{ struct irdma_sc_dev *dev = &rf->sc_dev; int status; do {
status = irdma_setup_init_state(rf); if (status) break;
rf->init_state = INITIAL_STATE;
status = irdma_create_cqp(rf); if (status) break;
rf->init_state = CQP_CREATED;
status = irdma_hmc_setup(rf); if (status) break;
rf->init_state = HMC_OBJS_CREATED;
status = irdma_initialize_hw_rsrc(rf); if (status) break;
rf->init_state = HW_RSRC_INITIALIZED;
status = irdma_create_ccq(rf); if (status) break;
rf->init_state = CCQ_CREATED;
dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT; if (rf->rdma_ver != IRDMA_GEN_1) {
status = irdma_get_rdma_features(dev); if (status) break;
}
status = irdma_setup_ceq_0(rf); if (status) break;
rf->init_state = CEQ0_CREATED; /* Handles processing of CQP completions */
rf->cqp_cmpl_wq =
alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI); if (!rf->cqp_cmpl_wq) {
status = -ENOMEM; break;
}
INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
irdma_sc_ccq_arm(dev->ccq); return 0;
} while (0);
if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
info.dont_send_term = false; if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
info.dont_send_fin = false; if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
info.reset_tcp_conn = true;
iwqp->hw_iwarp_state = state;
irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
iwqp->iwarp_state = info.next_iwarp_state;
}
/** * irdma_del_local_mac_entry - remove a mac entry from the hw * table * @rf: RDMA PCI function * @idx: the index of the mac ip address to delete
*/ void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
{ struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return;
/** * irdma_add_local_mac_entry - add a mac ip address entry to the * hw table * @rf: RDMA PCI function * @mac_addr: pointer to mac address * @idx: the index of the mac ip address to add
*/ int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
{ struct irdma_local_mac_entry_info *info; struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(iwcqp, cqp_request);
return status;
}
/** * irdma_alloc_local_mac_entry - allocate a mac entry * @rf: RDMA PCI function * @mac_tbl_idx: the index of the new mac address * * Allocate a mac address entry and update the mac_tbl_idx * to hold the index of the newly created mac address * Return 0 if successful, otherwise return error
*/ int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
{ struct irdma_cqp *iwcqp = &rf->cqp; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int status = 0;
cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true); if (!cqp_request) return -ENOMEM;
spin_lock_irqsave(&cm_core->apbvt_lock, flags); if (--entry->use_cnt) {
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); return;
}
hash_del(&entry->hlist); /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to * protect against race where add APBVT CQP can race ahead of the delete * APBVT for same port.
*/
irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
kfree(entry);
spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
}
/** * irdma_manage_arp_cache - manage hw arp cache * @rf: RDMA PCI function * @mac_addr: mac address ptr * @ip_addr: ip addr for arp cache * @ipv4: flag inicating IPv4 * @action: add, delete or modify
*/ void irdma_manage_arp_cache(struct irdma_pci_f *rf, constunsignedchar *mac_addr,
u32 *ip_addr, bool ipv4, u32 action)
{ struct irdma_add_arp_cache_entry_info *info; struct irdma_cqp_request *cqp_request; struct cqp_cmds_info *cqp_info; int arp_index;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.46Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.