// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include <linux/etherdevice.h>
#include "osdep.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
#include "ws.h"
#include "protos.h"
/**
* irdma_get_qp_from_list - get next qp from a list
* @head: Listhead of qp's
* @qp: current qp
*/
struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
struct irdma_sc_qp *qp)
{
struct list_head *lastentry;
struct list_head *entry = NULL;
if (list_empty(head))
return NULL;
if (!qp) {
entry = head->next;
} else {
lastentry = &qp->list;
entry = lastentry->next;
if (entry == head)
return NULL;
}
return container_of(entry, struct irdma_sc_qp, list);
}
/**
* irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
* @vsi: the VSI struct pointer
* @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
*/
void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
{
struct irdma_sc_qp *qp = NULL;
u8 i;
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_lock(&vsi->qos[i].qos_mutex);
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
while (qp) {
if (op == IRDMA_OP_RESUME) {
if (!qp->dev->ws_add(vsi, i)) {
qp->qs_handle =
vsi->qos[qp->user_pri].qs_handle;
irdma_cqp_qp_suspend_resume(qp, op);
} else {
irdma_cqp_qp_suspend_resume(qp, op);
irdma_modify_qp_to_err(qp);
}
} else if (op == IRDMA_OP_SUSPEND) {
/* issue cqp suspend command */
if (!irdma_cqp_qp_suspend_resume(qp, op))
atomic_inc(&vsi->qp_suspend_reqs);
}
qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
}
mutex_unlock(&vsi->qos[i].qos_mutex);
}
}
static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2p)
{
u8 i;
vsi->qos_rel_bw = l2p->vsi_rel_bw;
vsi->qos_prio_type = l2p->vsi_prio_type;
vsi->dscp_mode = l2p->dscp_mode;
if (l2p->dscp_mode) {
memcpy(vsi->dscp_map, l2p->dscp_map, sizeof (vsi->dscp_map));
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
l2p->up2tc[i] = i;
}
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
vsi->qos[i].traffic_class = l2p->up2tc[i];
vsi->qos[i].rel_bw =
l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
vsi->qos[i].prio_type =
l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
vsi->qos[i].valid = false ;
}
}
/**
* irdma_change_l2params - given the new l2 parameters, change all qp
* @vsi: RDMA VSI pointer
* @l2params: New parameters from l2
*/
void irdma_change_l2params(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params)
{
if (l2params->mtu_changed) {
vsi->mtu = l2params->mtu;
if (vsi->ieq)
irdma_reinitialize_ieq(vsi);
}
if (!l2params->tc_changed)
return ;
vsi->tc_change_pending = false ;
irdma_set_qos_info(vsi, l2params);
irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
}
/**
* irdma_qp_rem_qos - remove qp from qos lists during destroy qp
* @qp: qp to be removed from qos
*/
void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
{
struct irdma_sc_vsi *vsi = qp->vsi;
ibdev_dbg(to_ibdev(qp->dev),
"DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n" ,
qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
qp->on_qoslist);
mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
if (qp->on_qoslist) {
qp->on_qoslist = false ;
list_del(&qp->list);
}
mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
}
/**
* irdma_qp_add_qos - called during setctx for qp to be added to qos
* @qp: qp to be added to qos
*/
void irdma_qp_add_qos(struct irdma_sc_qp *qp)
{
struct irdma_sc_vsi *vsi = qp->vsi;
ibdev_dbg(to_ibdev(qp->dev),
"DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n" ,
qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
qp->on_qoslist);
mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
if (!qp->on_qoslist) {
list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
qp->on_qoslist = true ;
qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
}
mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
}
/**
* irdma_sc_pd_init - initialize sc pd struct
* @dev: sc device struct
* @pd: sc pd ptr
* @pd_id: pd_id for allocated pd
* @abi_ver: User/Kernel ABI version
*/
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
int abi_ver)
{
pd->pd_id = pd_id;
pd->abi_ver = abi_ver;
pd->dev = dev;
}
/**
* irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
* @cqp: struct for cqp hw
* @info: arp entry information
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
struct irdma_add_arp_cache_entry_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 8, info->reach_max);
set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
hdr = info->arp_index |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE" , DUMP_PREFIX_OFFSET,
16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_del_arp_cache_entry - dele arp cache entry
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @arp_index: arp index to delete arp entry
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
u16 arp_index, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
hdr = arp_index |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE" ,
DUMP_PREFIX_OFFSET, 16, 8, wqe,
IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
* @cqp: struct for cqp hw
* @info: info for apbvt entry to add or delete
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
struct irdma_apbvt_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 16, info->port);
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: MANAGE_APBVT WQE" , DUMP_PREFIX_OFFSET, 16,
8, wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_manage_qhash_table_entry - manage quad hash entries
* @cqp: struct for cqp hw
* @info: info for quad hash to manage
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*
* This is called before connection establishment is started.
* For passive connections, when listener is created, it will
* call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
* ip address and tcp port. When SYN is received (passive
* connections) or sent (active connections), this routine is
* called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
* and quad is passed in info.
*
* When iwarp connection is done and its state moves to RTS, the
* quad hash entry in the hardware will point to iwarp's qp
* number and requires no calls from the driver.
*/
static int
irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
struct irdma_qhash_table_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 qw1 = 0;
u64 qw2 = 0;
u64 temp;
struct irdma_sc_vsi *vsi = info->vsi;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
if (info->ipv4_valid) {
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
} else {
set_64bit_val(wqe, 56,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
}
qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
vsi->qos[info->user_pri].qs_handle);
if (info->vlan_valid)
qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
set_64bit_val(wqe, 16, qw2);
if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
if (!info->ipv4_valid) {
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
} else {
set_64bit_val(wqe, 32,
FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
}
}
set_64bit_val(wqe, 8, qw1);
temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, temp);
print_hex_dump_debug("WQE: MANAGE_QHASH WQE" , DUMP_PREFIX_OFFSET, 16,
8, wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_qp_init - initialize qp
* @qp: sc qp
* @info: initialization qp info
*/
int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
{
int ret_code;
u32 pble_obj_cnt;
u16 wqe_size;
if (info->qp_uk_init_info.max_sq_frag_cnt >
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
info->qp_uk_init_info.max_rq_frag_cnt >
info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
return -EINVAL;
qp->dev = info->pd->dev;
qp->vsi = info->vsi;
qp->ieq_qp = info->vsi->exception_lan_q;
qp->sq_pa = info->sq_pa;
qp->rq_pa = info->rq_pa;
qp->hw_host_ctx_pa = info->host_ctx_pa;
qp->q2_pa = info->q2_pa;
qp->shadow_area_pa = info->shadow_area_pa;
qp->q2_buf = info->q2;
qp->pd = info->pd;
qp->hw_host_ctx = info->host_ctx;
info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
if (ret_code)
return ret_code;
qp->virtual_map = info->virtual_map;
pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
(info->virtual_map && info->rq_pa >= pble_obj_cnt))
return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
IRDMA_QUEUE_TYPE_SQ_RQ);
ibdev_dbg(to_ibdev(qp->dev),
"WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n" ,
qp->hw_sq_size, qp->qp_uk.sq_ring.size);
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
wqe_size = IRDMA_WQE_SIZE_128;
else
ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
&wqe_size);
if (ret_code)
return ret_code;
qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
(wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
ibdev_dbg(to_ibdev(qp->dev),
"WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n" ,
qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
qp->sq_tph_val = info->sq_tph_val;
qp->rq_tph_val = info->rq_tph_val;
qp->sq_tph_en = info->sq_tph_en;
qp->rq_tph_en = info->rq_tph_en;
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
return 0;
}
/**
* irdma_sc_qp_create - create qp
* @qp: sc qp
* @info: qp create info
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
u64 scratch, bool post_sq)
{
struct irdma_sc_cqp *cqp;
__le64 *wqe;
u64 hdr;
cqp = qp->dev->cqp;
if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
return -EINVAL;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
info->arp_cache_idx_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: QP_CREATE WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_qp_modify - modify qp cqp wqe
* @qp: sc qp
* @info: modify qp info
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
u8 term_actions = 0;
u8 term_len = 0;
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
if (info->dont_send_fin)
term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
if (info->dont_send_term)
term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
term_len = info->termlen;
}
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
info->cached_var_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
info->remove_hash_idx) |
FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
info->arp_cache_idx_valid) |
FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: QP_MODIFY WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_qp_destroy - cqp destroy qp
* @qp: sc qp
* @scratch: u64 saved to be used during cqp completion
* @remove_hash_idx: flag if to remove hash idx
* @ignore_mw_bnd: memory window bind flag
* @post_sq: flag for cqp db to ring
*/
int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
cqp = qp->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
set_64bit_val(wqe, 40, qp->shadow_area_pa);
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: QP_DESTROY WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_get_encoded_ird_size -
* @ird_size: IRD size
* The ird from the connection is rounded to a supported HW setting and then encoded
* for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
* on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
*/
static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
{
switch (ird_size ?
roundup_pow_of_two(2 * ird_size) : 4) {
case 256:
return IRDMA_IRD_HW_SIZE_256;
case 128:
return IRDMA_IRD_HW_SIZE_128;
case 64:
case 32:
return IRDMA_IRD_HW_SIZE_64;
case 16:
case 8:
return IRDMA_IRD_HW_SIZE_16;
case 4:
default :
break ;
}
return IRDMA_IRD_HW_SIZE_4;
}
/**
* irdma_sc_qp_setctx_roce - set qp's context
* @qp: sc qp
* @qp_ctx: context ptr
* @info: ctx info
*/
void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info)
{
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp;
u8 push_mode_en;
u32 push_idx;
roce_info = info->roce_info;
udp = info->udp_info;
qp->user_pri = info->user_pri;
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
push_mode_en = 0;
push_idx = 0;
} else {
push_mode_en = 1;
push_idx = qp->push_idx;
}
set_64bit_val(qp_ctx, 0,
FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
set_64bit_val(qp_ctx, 8, qp->sq_pa);
set_64bit_val(qp_ctx, 16, qp->rq_pa);
if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
!(udp->tos & 0x03))
udp->tos |= ECN_CODE_PT_VAL;
set_64bit_val(qp_ctx, 24,
FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
set_64bit_val(qp_ctx, 32,
FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
set_64bit_val(qp_ctx, 40,
FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
set_64bit_val(qp_ctx, 48,
FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
set_64bit_val(qp_ctx, 56,
FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
set_64bit_val(qp_ctx, 64,
FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
set_64bit_val(qp_ctx, 80,
FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
set_64bit_val(qp_ctx, 88,
FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
set_64bit_val(qp_ctx, 96,
FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
set_64bit_val(qp_ctx, 112,
FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
set_64bit_val(qp_ctx, 128,
FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
set_64bit_val(qp_ctx, 136,
FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
set_64bit_val(qp_ctx, 144,
FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
set_64bit_val(qp_ctx, 160,
FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
set_64bit_val(qp_ctx, 168,
FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
set_64bit_val(qp_ctx, 176,
FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
set_64bit_val(qp_ctx, 184,
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
set_64bit_val(qp_ctx, 192,
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
set_64bit_val(qp_ctx, 200,
FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
set_64bit_val(qp_ctx, 208,
FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
print_hex_dump_debug("WQE: QP_HOST CTX WQE" , DUMP_PREFIX_OFFSET, 16,
8, qp_ctx, IRDMA_QP_CTX_SIZE, false );
}
/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
bool post_sq)
{
__le64 *wqe;
u64 hdr;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE" ,
DUMP_PREFIX_OFFSET, 16, 8, wqe,
IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_add_local_mac_entry - add mac enry
* @cqp: struct for cqp hw
* @info:mac addr info
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
struct irdma_local_mac_entry_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, header);
print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE" , DUMP_PREFIX_OFFSET, 16,
8, wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
* @entry_idx: index of mac entry
* @ignore_ref_count: to force mac adde delete
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
u16 entry_idx, u8 ignore_ref_count,
bool post_sq)
{
__le64 *wqe;
u64 header;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE,
IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, header);
print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE" ,
DUMP_PREFIX_OFFSET, 16, 8, wqe,
IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_qp_setctx - set qp's context
* @qp: sc qp
* @qp_ctx: context ptr
* @info: ctx info
*/
void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info)
{
struct irdma_iwarp_offload_info *iw;
struct irdma_tcp_offload_info *tcp;
struct irdma_sc_dev *dev;
u8 push_mode_en;
u32 push_idx;
u64 qw0, qw3, qw7 = 0, qw16 = 0;
u64 mac = 0;
iw = info->iwarp_info;
tcp = info->tcp_info;
dev = qp->dev;
if (iw->rcv_mark_en) {
qp->pfpdu.marker_len = 4;
qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
}
qp->user_pri = info->user_pri;
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
push_mode_en = 0;
push_idx = 0;
} else {
push_mode_en = 1;
push_idx = qp->push_idx;
}
qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
set_64bit_val(qp_ctx, 8, qp->sq_pa);
set_64bit_val(qp_ctx, 16, qp->rq_pa);
qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
qp->src_mac_addr_idx);
set_64bit_val(qp_ctx, 136,
FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
set_64bit_val(qp_ctx, 168,
FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
set_64bit_val(qp_ctx, 176,
FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
if (info->iwarp_info_valid) {
qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
iw->err_rq_idx_valid);
qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
set_64bit_val(qp_ctx, 144,
FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
mac = ether_addr_to_u64(iw->mac_addr);
set_64bit_val(qp_ctx, 152,
mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
set_64bit_val(qp_ctx, 160,
FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
}
if (info->tcp_info_valid) {
qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
tcp->insert_vlan_tag) |
FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
tcp->tos |= ECN_CODE_PT_VAL;
qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
}
set_64bit_val(qp_ctx, 32,
FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
set_64bit_val(qp_ctx, 40,
FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
set_64bit_val(qp_ctx, 48,
FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
tcp->ignore_tcp_opt) |
FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
tcp->ignore_tcp_uns_opt) |
FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
set_64bit_val(qp_ctx, 72,
FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
set_64bit_val(qp_ctx, 80,
FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
set_64bit_val(qp_ctx, 88,
FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
set_64bit_val(qp_ctx, 96,
FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
set_64bit_val(qp_ctx, 104,
FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
set_64bit_val(qp_ctx, 112,
FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
set_64bit_val(qp_ctx, 120,
FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
set_64bit_val(qp_ctx, 184,
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
set_64bit_val(qp_ctx, 192,
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
set_64bit_val(qp_ctx, 200,
FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
set_64bit_val(qp_ctx, 208,
FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
}
set_64bit_val(qp_ctx, 0, qw0);
set_64bit_val(qp_ctx, 24, qw3);
set_64bit_val(qp_ctx, 56, qw7);
set_64bit_val(qp_ctx, 128, qw16);
print_hex_dump_debug("WQE: QP_HOST CTX" , DUMP_PREFIX_OFFSET, 16, 8,
qp_ctx, IRDMA_QP_CTX_SIZE, false );
}
/**
* irdma_sc_alloc_stag - mr stag alloc
* @dev: sc device struct
* @info: stag info
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
struct irdma_allocate_stag_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
enum irdma_page_size page_size;
if (!info->total_len && !info->all_memory)
return -EINVAL;
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
page_size = IRDMA_PAGE_SIZE_2M;
else
page_size = IRDMA_PAGE_SIZE_4K;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
if (info->chunk_size)
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: ALLOC_STAG WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_mr_reg_non_shared - non-shared mr registration
* @dev: sc device struct
* @info: mr info
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
struct irdma_reg_ns_stag_info *info,
u64 scratch, bool post_sq)
{
__le64 *wqe;
u64 fbo;
struct irdma_sc_cqp *cqp;
u64 hdr;
u32 pble_obj_cnt;
bool remote_access;
u8 addr_type;
enum irdma_page_size page_size;
if (!info->total_len && !info->all_memory)
return -EINVAL;
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
page_size = IRDMA_PAGE_SIZE_2M;
else if (info->page_size == 0x1000)
page_size = IRDMA_PAGE_SIZE_4K;
else
return -EINVAL;
if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
remote_access = true ;
else
remote_access = false ;
pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
return -EINVAL;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
fbo = info->va & (info->page_size - 1);
set_64bit_val(wqe, 0,
(info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
info->va : fbo));
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
if (!info->chunk_size) {
set_64bit_val(wqe, 32, info->reg_addr_pa);
set_64bit_val(wqe, 48, 0);
} else {
set_64bit_val(wqe, 32, 0);
set_64bit_val(wqe, 48,
FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
}
set_64bit_val(wqe, 40, info->hmc_fcn_index);
set_64bit_val(wqe, 56, 0);
addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: MR_REG_NS WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_dealloc_stag - deallocate stag
* @dev: sc device struct
* @info: dealloc stag info
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
struct irdma_dealloc_stag_info *info,
u64 scratch, bool post_sq)
{
u64 hdr;
__le64 *wqe;
struct irdma_sc_cqp *cqp;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: DEALLOC_STAG WQE" , DUMP_PREFIX_OFFSET, 16,
8, wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_mw_alloc - mw allocate
* @dev: sc device struct
* @info: memory window allocation information
* @scratch: u64 saved to be used during cqp completion
* @post_sq: flag for cqp db to ring
*/
static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
struct irdma_mw_alloc_info *info, u64 scratch,
bool post_sq)
{
u64 hdr;
struct irdma_sc_cqp *cqp;
__le64 *wqe;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
info->mw1_bind_dont_vldt_key) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: MW_ALLOC WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_CQP_WQE_SIZE * 8, false );
if (post_sq)
irdma_sc_cqp_post_sq(cqp);
return 0;
}
/**
* irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
* @qp: sc qp struct
* @info: fast mr info
* @post_sq: flag for cqp db to ring
*/
int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
struct irdma_fast_reg_stag_info *info,
bool post_sq)
{
u64 temp, hdr;
__le64 *wqe;
u32 wqe_idx;
enum irdma_page_size page_size;
struct irdma_post_sq_info sq_info = {};
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
page_size = IRDMA_PAGE_SIZE_2M;
else
page_size = IRDMA_PAGE_SIZE_4K;
sq_info.wr_id = info->wr_id;
sq_info.signaled = info->signaled;
wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
if (!wqe)
return -ENOMEM;
irdma_clr_wqes(&qp->qp_uk, wqe_idx);
ibdev_dbg(to_ibdev(qp->dev),
"MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n" ,
info->wr_id, wqe_idx,
&qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
(uintptr_t)info->va : info->fbo;
set_64bit_val(wqe, 0, temp);
temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
info->first_pm_pbl_index >> 16);
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
set_64bit_val(wqe, 16,
info->total_len |
FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: FAST_REG WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_QP_WQE_MIN_SIZE, false );
if (post_sq)
irdma_uk_qp_post_wr(&qp->qp_uk);
return 0;
}
/**
* irdma_sc_gen_rts_ae - request AE generated after RTS
* @qp: sc qp struct
*/
static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
{
__le64 *wqe;
u64 hdr;
struct irdma_qp_uk *qp_uk;
qp_uk = &qp->qp_uk;
wqe = qp_uk->sq_base[1].elem;
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE" , DUMP_PREFIX_OFFSET,
16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false );
wqe = qp_uk->sq_base[2].elem;
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("QP: CONN EST WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_QP_WQE_MIN_SIZE, false );
}
/**
* irdma_sc_send_lsmm - send last streaming mode message
* @qp: sc qp struct
* @lsmm_buf: buffer with lsmm message
* @size: size of lsmm buffer
* @stag: stag of lsmm buffer
*/
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag)
{
__le64 *wqe;
u64 hdr;
struct irdma_qp_uk *qp_uk;
qp_uk = &qp->qp_uk;
wqe = qp_uk->sq_base->elem;
set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
} else {
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
}
set_64bit_val(wqe, 16, 0);
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: SEND_LSMM WQE" , DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_QP_WQE_MIN_SIZE, false );
if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
irdma_sc_gen_rts_ae(qp);
}
/**
* irdma_sc_send_rtt - send last read0 or write0
* @qp: sc qp struct
* @read: Do read0 or write0
*/
void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
{
__le64 *wqe;
u64 hdr;
struct irdma_qp_uk *qp_uk;
qp_uk = &qp->qp_uk;
wqe = qp_uk->sq_base->elem;
set_64bit_val(wqe, 0, 0);
set_64bit_val(wqe, 16, 0);
if (read) {
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
} else {
set_64bit_val(wqe, 8,
(u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
}
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
} else {
if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
set_64bit_val(wqe, 8, 0);
} else {
set_64bit_val(wqe, 8,
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
}
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
}
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
print_hex_dump_debug("WQE: RTR WQE" , DUMP_PREFIX_OFFSET, 16, 8, wqe,
IRDMA_QP_WQE_MIN_SIZE, false );
if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
irdma_sc_gen_rts_ae(qp);
}
/**
* irdma_iwarp_opcode - determine if incoming is rdma layer
* @info: aeq info for the packet
* @pkt: packet for error
*/
static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
{
__be16 *mpa;
u32 opcode = 0xffffffff;
if (info->q2_data_written) {
mpa = (__be16 *)pkt;
opcode = ntohs(mpa[1]) & 0xf;
}
return opcode;
}
/**
* irdma_locate_mpa - return pointer to mpa in the pkt
* @pkt: packet with data
*/
static u8 *irdma_locate_mpa(u8 *pkt)
{
/* skip over ethernet header */
pkt += IRDMA_MAC_HLEN;
/* Skip over IP and TCP headers */
pkt += 4 * (pkt[0] & 0x0f);
pkt += 4 * ((pkt[12] >> 4) & 0x0f);
return pkt;
}
/**
* irdma_bld_termhdr_ctrl - setup terminate hdr control fields
* @qp: sc qp ptr for pkt
* @hdr: term hdr
* @opcode: flush opcode for termhdr
* @layer_etype: error layer + error type
* @err: error cod ein the header
*/
static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
struct irdma_terminate_hdr *hdr,
enum irdma_flush_opcode opcode,
u8 layer_etype, u8 err)
{
qp->flush_code = opcode;
hdr->layer_etype = layer_etype;
hdr->error_code = err;
}
/**
* irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
* @pkt: ptr to mpa in offending pkt
* @hdr: term hdr
* @copy_len: offending pkt length to be copied to term hdr
* @is_tagged: DDP tagged or untagged
*/
static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
int *copy_len, u8 *is_tagged)
{
u16 ddp_seg_len;
ddp_seg_len = ntohs(*(__be16 *)pkt);
if (ddp_seg_len) {
*copy_len = 2;
hdr->hdrct = DDP_LEN_FLAG;
if (pkt[2] & 0x80) {
*is_tagged = 1;
if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
*copy_len += TERM_DDP_LEN_TAGGED;
hdr->hdrct |= DDP_HDR_FLAG;
}
} else {
if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
*copy_len += TERM_DDP_LEN_UNTAGGED;
hdr->hdrct |= DDP_HDR_FLAG;
}
if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
*copy_len += TERM_RDMA_LEN;
hdr->hdrct |= RDMA_HDR_FLAG;
}
}
}
}
/**
* irdma_bld_terminate_hdr - build terminate message header
* @qp: qp associated with received terminate AE
* @info: the struct contiaing AE information
*/
static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
int copy_len = 0;
u8 is_tagged = 0;
u32 opcode;
struct irdma_terminate_hdr *termhdr;
termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
if (info->q2_data_written) {
pkt = irdma_locate_mpa(pkt);
irdma_bld_termhdr_ddp_rdma(pkt, termhdr, ©_len, &is_tagged);
}
opcode = irdma_iwarp_opcode(info, pkt);
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
switch (info->ae_id) {
case IRDMA_AE_AMP_UNALLOCATED_STAG:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
(LAYER_DDP << 4) | DDP_TAGGED_BUF,
DDP_TAGGED_INV_STAG);
else
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_INV_STAG);
break ;
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
if (info->q2_data_written)
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
(LAYER_DDP << 4) | DDP_TAGGED_BUF,
DDP_TAGGED_BOUNDS);
else
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_INV_BOUNDS);
break ;
case IRDMA_AE_AMP_BAD_PD:
switch (opcode) {
case IRDMA_OP_TYPE_RDMA_WRITE:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
(LAYER_DDP << 4) | DDP_TAGGED_BUF,
DDP_TAGGED_UNASSOC_STAG);
break ;
case IRDMA_OP_TYPE_SEND_INV:
case IRDMA_OP_TYPE_SEND_SOL_INV:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_CANT_INV_STAG);
break ;
default :
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_UNASSOC_STAG);
}
break ;
case IRDMA_AE_AMP_INVALID_STAG:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_INV_STAG);
break ;
case IRDMA_AE_AMP_BAD_QP:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_QN);
break ;
case IRDMA_AE_AMP_BAD_STAG_KEY:
case IRDMA_AE_AMP_BAD_STAG_INDEX:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
switch (opcode) {
case IRDMA_OP_TYPE_SEND_INV:
case IRDMA_OP_TYPE_SEND_SOL_INV:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
RDMAP_CANT_INV_STAG);
break ;
default :
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
RDMAP_INV_STAG);
}
break ;
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
case IRDMA_AE_PRIV_OPERATION_DENIED:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_ACCESS);
break ;
case IRDMA_AE_AMP_TO_WRAP:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
RDMAP_TO_WRAP);
break ;
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
break ;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
(LAYER_DDP << 4) | DDP_CATASTROPHIC,
DDP_CATASTROPHIC_LOCAL);
break ;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_DDP_NO_L_BIT:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
(LAYER_DDP << 4) | DDP_CATASTROPHIC,
DDP_CATASTROPHIC_LOCAL);
break ;
case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_MSN_RANGE);
break ;
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_TOO_LONG);
break ;
case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
if (is_tagged)
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_TAGGED_BUF,
DDP_TAGGED_INV_DDP_VER);
else
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_DDP_VER);
break ;
case IRDMA_AE_DDP_UBE_INVALID_MO:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_MO);
break ;
case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_MSN_NO_BUF);
break ;
case IRDMA_AE_DDP_UBE_INVALID_QN:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
DDP_UNTAGGED_INV_QN);
break ;
case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
RDMAP_INV_RDMAP_VER);
break ;
default :
irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
RDMAP_UNSPECIFIED);
break ;
}
if (copy_len)
memcpy(termhdr + 1, pkt, copy_len);
return sizeof (struct irdma_terminate_hdr) + copy_len;
}
/**
* irdma_terminate_send_fin() - Send fin for terminate message
* @qp: qp associated with received terminate AE
*/
void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
{
irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
IRDMAQP_TERM_SEND_FIN_ONLY, 0);
}
/**
* irdma_terminate_connection() - Bad AE and send terminate to remote QP
* @qp: qp associated with received terminate AE
* @info: the struct contiaing AE information
*/
void irdma_terminate_connection(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
u8 termlen = 0;
if (qp->term_flags & IRDMA_TERM_SENT)
return ;
termlen = irdma_bld_terminate_hdr(qp, info);
irdma_terminate_start_timer(qp);
qp->term_flags |= IRDMA_TERM_SENT;
irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
}
/**
* irdma_terminate_received - handle terminate received AE
* @qp: qp associated with received terminate AE
* @info: the struct contiaing AE information
*/
void irdma_terminate_received(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
__be32 *mpa;
u8 ddp_ctl;
u8 rdma_ctl;
u16 aeq_id = 0;
struct irdma_terminate_hdr *termhdr;
mpa = (__be32 *)irdma_locate_mpa(pkt);
if (info->q2_data_written) {
/* did not validate the frame - do it now */
ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
rdma_ctl = ntohl(mpa[0]) & 0xff;
if ((ddp_ctl & 0xc0) != 0x40)
aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
else if ((ddp_ctl & 0x03) != 1)
aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
else if (ntohl(mpa[2]) != 2)
aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
else if (ntohl(mpa[3]) != 1)
aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
else if (ntohl(mpa[4]) != 0)
aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
else if ((rdma_ctl & 0xc0) != 0x40)
aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
info->ae_id = aeq_id;
if (info->ae_id) {
/* Bad terminate recvd - send back a terminate */
irdma_terminate_connection(qp, info);
return ;
}
}
qp->term_flags |= IRDMA_TERM_RCVD;
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
termhdr = (struct irdma_terminate_hdr *)&mpa[5];
if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
termhdr->layer_etype == RDMAP_REMOTE_OP) {
irdma_terminate_done(qp, 0);
} else {
irdma_terminate_start_timer(qp);
irdma_terminate_send_fin(qp);
}
}
static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{
return 0;
}
static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
{
/* do nothing */
}
static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
{
/* do nothing */
}
/**
* irdma_sc_vsi_init - Init the vsi structure
* @vsi: pointer to vsi structure to initialize
* @info: the info used to initialize the vsi struct
*/
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info)
{
int i;
vsi->dev = info->dev;
vsi->back_vsi = info->back_vsi;
vsi->register_qset = info->register_qset;
vsi->unregister_qset = info->unregister_qset;
vsi->mtu = info->params->mtu;
vsi->exception_lan_q = info->exception_lan_q;
vsi->vsi_idx = info->pf_data_vsi_num;
irdma_set_qos_info(vsi, info->params);
for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
if (vsi->register_qset) {
vsi->dev->ws_add = irdma_ws_add;
vsi->dev->ws_remove = irdma_ws_remove;
vsi->dev->ws_reset = irdma_ws_reset;
} else {
vsi->dev->ws_add = irdma_null_ws_add;
vsi->dev->ws_remove = irdma_null_ws_remove;
vsi->dev->ws_reset = irdma_null_ws_reset;
}
}
/**
* irdma_get_stats_idx - Return stats index
* @vsi: pointer to the vsi
*/
static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
u8 i;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
&stats_info))
return stats_info.stats_idx;
}
for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) {
if (!dev->stats_idx_array[i]) {
dev->stats_idx_array[i] = true ;
return i;
}
}
return IRDMA_INVALID_STATS_IDX;
}
/**
* irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1
* @vsi: vsi structure where hw_regs are set
*
* Populate the HW stats table
*/
static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi)
{
struct irdma_sc_dev *dev = vsi->dev;
const struct irdma_hw_stat_map *map;
u64 *stat_reg = vsi->hw_stats_regs;
u64 *regs = dev->hw_stats_regs;
u16 i, stats_reg_set = vsi->stats_idx;
map = dev->hw_stats_map;
/* First 4 stat instances are reserved for port level statistics. */
stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0;
for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) {
if (map[i].bitmask <= IRDMA_MAX_STATS_32)
stat_reg[i] = regs[i] + stats_reg_set * sizeof (u32);
else
stat_reg[i] = regs[i] + stats_reg_set * sizeof (u64);
}
}
/**
* irdma_vsi_stats_init - Initialize the vsi statistics
* @vsi: pointer to the vsi structure
* @info: The info structure used for initialization
*/
int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_stats_info *info)
{
struct irdma_dma_mem *stats_buff_mem;
vsi->pestat = info->pestat;
vsi->pestat->hw = vsi->dev->hw;
vsi->pestat->vsi = vsi;
stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
stats_buff_mem->size,
&stats_buff_mem->pa,
GFP_KERNEL);
if (!stats_buff_mem->va)
return -ENOMEM;
vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
vsi->pestat->gather_info.last_gather_stats_va =
(void *)((uintptr_t)stats_buff_mem->va +
IRDMA_GATHER_STATS_BUF_SIZE);
irdma_hw_stats_start_timer(vsi);
/* when stat allocation is not required default to fcn_id. */
vsi->stats_idx = info->fcn_id;
if (info->alloc_stats_inst) {
u8 stats_idx = irdma_get_stats_idx(vsi);
if (stats_idx != IRDMA_INVALID_STATS_IDX) {
vsi->stats_inst_alloc = true ;
vsi->stats_idx = stats_idx;
vsi->pestat->gather_info.use_stats_inst = true ;
vsi->pestat->gather_info.stats_inst_index = stats_idx;
}
}
if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_hw_stats_init_gen1(vsi);
return 0;
}
/**
* irdma_vsi_stats_free - Free the vsi stats
* @vsi: pointer to the vsi structure
*/
void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
u8 stats_idx = vsi->stats_idx;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
if (vsi->stats_inst_alloc) {
stats_info.stats_idx = vsi->stats_idx;
irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
&stats_info);
}
} else {
if (vsi->stats_inst_alloc &&
stats_idx < vsi->dev->hw_attrs.max_stat_inst)
vsi->dev->stats_idx_array[stats_idx] = false ;
}
if (!vsi->pestat)
return ;
irdma_hw_stats_stop_timer(vsi);
dma_free_coherent(vsi->pestat->hw->device,
vsi->pestat->gather_info.stats_buff_mem.size,
vsi->pestat->gather_info.stats_buff_mem.va,
vsi->pestat->gather_info.stats_buff_mem.pa);
vsi->pestat->gather_info.stats_buff_mem.va = NULL;
}
/**
* irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
* @wqsize: size of the wq (sq, rq) to encoded_size
* @queue_type: queue type selected for the calculation algorithm
*/
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
{
u8 encoded_size = 0;
/* cqp sq's hw coded value starts from 1 for size of 4
* while it starts from 0 for qp' wq's.
*/
if (queue_type == IRDMA_QUEUE_TYPE_CQP)
encoded_size = 1;
wqsize >>= 2;
while (wqsize >>= 1)
encoded_size++;
return encoded_size;
}
/**
* irdma_sc_gather_stats - collect the statistics
* @cqp: struct for cqp hw
* @info: gather stats info structure
* @scratch: u64 saved to be used during cqp completion
*/
static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=96 G=96
¤ Dauer der Verarbeitung: 0.79 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland