/*
* Copyright (c) 2016-2017 Hisilicon Limited.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <net/addrconf.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
#define CREATE_TRACE_POINTS
#include "hns_roce_trace.h"
enum {
CMD_RST_PRC_OTHERS,
CMD_RST_PRC_SUCCESS,
CMD_RST_PRC_EBUSY,
};
enum ecc_resource_type {
ECC_RESOURCE_QPC,
ECC_RESOURCE_CQC,
ECC_RESOURCE_MPT,
ECC_RESOURCE_SRQC,
ECC_RESOURCE_GMV,
ECC_RESOURCE_QPC_TIMER,
ECC_RESOURCE_CQC_TIMER,
ECC_RESOURCE_SCCC,
ECC_RESOURCE_COUNT,
};
static const struct {
const char *name;
u8 read_bt0_op;
u8 write_bt0_op;
} fmea_ram_res[] = {
{ "ECC_RESOURCE_QPC" ,
HNS_ROCE_CMD_READ_QPC_BT0, HNS_ROCE_CMD_WRITE_QPC_BT0 },
{ "ECC_RESOURCE_CQC" ,
HNS_ROCE_CMD_READ_CQC_BT0, HNS_ROCE_CMD_WRITE_CQC_BT0 },
{ "ECC_RESOURCE_MPT" ,
HNS_ROCE_CMD_READ_MPT_BT0, HNS_ROCE_CMD_WRITE_MPT_BT0 },
{ "ECC_RESOURCE_SRQC" ,
HNS_ROCE_CMD_READ_SRQC_BT0, HNS_ROCE_CMD_WRITE_SRQC_BT0 },
/* ECC_RESOURCE_GMV is handled by cmdq, not mailbox */
{ "ECC_RESOURCE_GMV" ,
0, 0 },
{ "ECC_RESOURCE_QPC_TIMER" ,
HNS_ROCE_CMD_READ_QPC_TIMER_BT0, HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 },
{ "ECC_RESOURCE_CQC_TIMER" ,
HNS_ROCE_CMD_READ_CQC_TIMER_BT0, HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 },
{ "ECC_RESOURCE_SCCC" ,
HNS_ROCE_CMD_READ_SCCC_BT0, HNS_ROCE_CMD_WRITE_SCCC_BT0 },
};
static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg)
{
dseg->lkey = cpu_to_le32(sg->lkey);
dseg->addr = cpu_to_le64(sg->addr);
dseg->len = cpu_to_le32(sg->length);
}
/*
* mapped-value = 1 + real-value
* The hns wr opcode real value is start from 0, In order to distinguish between
* initialized and uninitialized map values, we plus 1 to the actual value when
* defining the mapping, so that the validity can be identified by checking the
* mapped value is greater than 0.
*/
#define HR_OPC_MAP(ib_key, hr_key) \
[IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
static const u32 hns_roce_op_code[] = {
HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
HR_OPC_MAP(SEND, SEND),
HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
HR_OPC_MAP(RDMA_READ, RDMA_READ),
HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
HR_OPC_MAP(REG_MR, FAST_REG_PMR),
};
static u32 to_hr_opcode(u32 ib_opcode)
{
if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
return HNS_ROCE_V2_WQE_OP_MASK;
return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
HNS_ROCE_V2_WQE_OP_MASK;
}
static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_reg_wr *wr)
{
struct hns_roce_wqe_frmr_seg *fseg =
(void *)rc_sq_wqe + sizeof (struct hns_roce_v2_rc_send_wqe);
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
u64 pbl_ba;
/* use ib_access_flags */
hr_reg_write_bool(fseg, FRMR_BIND_EN, 0);
hr_reg_write_bool(fseg, FRMR_ATOMIC,
wr->access & IB_ACCESS_REMOTE_ATOMIC);
hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
/* Data structure reuse may lead to confusion */
pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
rc_sq_wqe->rkey = cpu_to_le32(wr->key);
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
hr_reg_clear(fseg, FRMR_BLK_MODE);
hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
hr_reg_clear(fseg, FRMR_ZBVA);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
unsigned int valid_num_sge)
{
struct hns_roce_v2_wqe_data_seg *dseg =
(void *)rc_sq_wqe + sizeof (struct hns_roce_v2_rc_send_wqe);
struct hns_roce_wqe_atomic_seg *aseg =
(void *)dseg + sizeof (struct hns_roce_v2_wqe_data_seg);
set_data_seg_v2(dseg, wr->sg_list);
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
} else {
aseg->fetchadd_swap_data =
cpu_to_le64(atomic_wr(wr)->compare_add);
aseg->cmp_data = 0;
}
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
}
static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
unsigned int *sge_idx, u32 msg_len)
{
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
unsigned int left_len_in_pg;
unsigned int idx = *sge_idx;
unsigned int i = 0;
unsigned int len;
void *addr;
void *dseg;
if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) {
ibdev_err(ibdev,
"no enough extended sge space for inline data.\n" );
return -EINVAL;
}
dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
len = wr->sg_list[0].length;
addr = (void *)(unsigned long )(wr->sg_list[0].addr);
/* When copying data to extended sge space, the left length in page may
* not long enough for current user's sge. So the data should be
* splited into several parts, one in the first page, and the others in
* the subsequent pages.
*/
while (1) {
if (len <= left_len_in_pg) {
memcpy(dseg, addr, len);
idx += len / HNS_ROCE_SGE_SIZE;
i++;
if (i >= wr->num_sge)
break ;
left_len_in_pg -= len;
len = wr->sg_list[i].length;
addr = (void *)(unsigned long )(wr->sg_list[i].addr);
dseg += len;
} else {
memcpy(dseg, addr, left_len_in_pg);
len -= left_len_in_pg;
addr += left_len_in_pg;
idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
dseg = hns_roce_get_extend_sge(qp,
idx & (qp->sge.sge_cnt - 1));
left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
}
}
*sge_idx = idx;
return 0;
}
static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
unsigned int *sge_ind, unsigned int cnt)
{
struct hns_roce_v2_wqe_data_seg *dseg;
unsigned int idx = *sge_ind;
while (cnt > 0) {
dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
if (likely(sge->length)) {
set_data_seg_v2(dseg, sge);
idx++;
cnt--;
}
sge++;
}
*sge_ind = idx;
}
static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
{
struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
int mtu = ib_mtu_enum_to_int(qp->path_mtu);
if (mtu < 0 || len > qp->max_inline_data || len > mtu) {
ibdev_err(&hr_dev->ib_dev,
"invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n" ,
len, qp->max_inline_data, mtu);
return false ;
}
return true ;
}
static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
unsigned int *sge_idx)
{
struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int curr_idx = *sge_idx;
void *dseg = rc_sq_wqe;
unsigned int i;
int ret;
if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
ibdev_err(ibdev, "invalid inline parameters!\n" );
return -EINVAL;
}
if (!check_inl_data_len(qp, msg_len))
return -EINVAL;
dseg += sizeof (struct hns_roce_v2_rc_send_wqe);
if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
for (i = 0; i < wr->num_sge; i++) {
memcpy(dseg, ((void *)wr->sg_list[i].addr),
wr->sg_list[i].length);
dseg += wr->sg_list[i].length;
}
} else {
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE);
ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
if (ret)
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx);
}
*sge_idx = curr_idx;
return 0;
}
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
unsigned int *sge_ind,
unsigned int valid_num_sge)
{
struct hns_roce_v2_wqe_data_seg *dseg =
(void *)rc_sq_wqe + sizeof (struct hns_roce_v2_rc_send_wqe);
struct hns_roce_qp *qp = to_hr_qp(ibqp);
int j = 0;
int i;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
!!(wr->send_flags & IB_SEND_INLINE));
if (wr->send_flags & IB_SEND_INLINE)
return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
}
}
} else {
for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
j++;
}
}
set_extend_sge(qp, wr->sg_list + i, sge_ind,
valid_num_sge - HNS_ROCE_SGE_IN_WQE);
}
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);
return 0;
}
static int check_send_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR))
return -EINVAL;
else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
return -EIO;
return 0;
}
static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
unsigned int *sge_len)
{
unsigned int valid_num = 0;
unsigned int len = 0;
int i;
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
len += wr->sg_list[i].length;
valid_num++;
}
}
*sge_len = len;
return valid_num;
}
static __le32 get_immtdata(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
default :
return 0;
}
}
static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
const struct ib_send_wr *wr)
{
u32 ib_op = wr->opcode;
if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
return -EINVAL;
ud_sq_wqe->immtdata = get_immtdata(wr);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
return 0;
}
static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
struct hns_roce_ah *ah)
{
struct ib_device *ib_dev = ah->ibah.device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
ud_sq_wqe->sgid_index = ah->av.gid_index;
memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
return 0;
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id);
return 0;
}
static inline int set_ud_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
unsigned int owner_bit)
{
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
unsigned int valid_num_sge;
u32 msg_len = 0;
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
ret = set_ud_opcode(ud_sq_wqe, wr);
if (WARN_ON_ONCE(ret))
return ret;
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE,
!!(wr->send_flags & IB_SEND_SIGNALED));
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE,
!!(wr->send_flags & IB_SEND_SOLICITED));
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX,
curr_idx & (qp->sge.sge_cnt - 1));
ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
qp->qkey : ud_wr(wr)->remote_qkey);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn);
ret = fill_ud_av(ud_sq_wqe, ah);
if (ret)
return ret;
qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
/*
* The pipeline can sequentially post all valid WQEs into WQ buffer,
* including new WQEs waiting for the doorbell to update the PI again.
* Therefore, the owner bit of WQE MUST be updated after all fields
* and extSGEs have been written into DDR instead of cache.
*/
if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
dma_wmb();
*sge_idx = curr_idx;
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit);
return 0;
}
static int set_rc_opcode(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_send_wr *wr)
{
u32 ib_op = wr->opcode;
int ret = 0;
rc_sq_wqe->immtdata = get_immtdata(wr);
switch (ib_op) {
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
break ;
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
break ;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
break ;
case IB_WR_REG_MR:
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
set_frmr_seg(rc_sq_wqe, reg_wr(wr));
else
ret = -EOPNOTSUPP;
break ;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
break ;
default :
ret = -EINVAL;
}
if (unlikely(ret))
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op));
return ret;
}
static inline int set_rc_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx,
unsigned int owner_bit)
{
struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
unsigned int valid_num_sge;
u32 msg_len = 0;
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
if (WARN_ON_ONCE(ret))
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
(wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE,
(wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
curr_idx & (qp->sge.sge_cnt - 1));
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
if (msg_len != ATOMIC_WR_LEN)
return -EINVAL;
set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
} else if (wr->opcode != IB_WR_REG_MR) {
ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
&curr_idx, valid_num_sge);
if (ret)
return ret;
}
/*
* The pipeline can sequentially post all valid WQEs into WQ buffer,
* including new WQEs waiting for the doorbell to update the PI again.
* Therefore, the owner bit of WQE MUST be updated after all fields
* and extSGEs have been written into DDR instead of cache.
*/
if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
dma_wmb();
*sge_idx = curr_idx;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit);
return ret;
}
static inline void update_sq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
if (unlikely(qp->state == IB_QPS_ERR)) {
flush_cqe(hr_dev, qp);
} else {
struct hns_roce_v2_db sq_db = {};
hr_reg_write(&sq_db, DB_TAG, qp->qpn);
hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
hr_reg_write(&sq_db, DB_PI, qp->sq.head);
hr_reg_write(&sq_db, DB_SL, qp->sl);
hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
}
}
static inline void update_rq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
if (unlikely(qp->state == IB_QPS_ERR)) {
flush_cqe(hr_dev, qp);
} else {
if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
*qp->rdb.db_record =
qp->rq.head & V2_DB_PRODUCER_IDX_M;
} else {
struct hns_roce_v2_db rq_db = {};
hr_reg_write(&rq_db, DB_TAG, qp->qpn);
hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
hr_reg_write(&rq_db, DB_PI, qp->rq.head);
hns_roce_write64(hr_dev, (__le32 *)&rq_db,
qp->rq.db_reg);
}
}
}
static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
u64 __iomem *dest)
{
#define HNS_ROCE_WRITE_TIMES 8
struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
int i;
if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
writeq_relaxed(*(val + i), dest + i);
}
static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
void *wqe)
{
#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
if (unlikely(qp->state == IB_QPS_ERR)) {
flush_cqe(hr_dev, qp);
return ;
}
/* All kinds of DirectWQE have the same header field layout */
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H,
qp->sl >> HNS_ROCE_SL_SHIFT);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head);
hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
}
static int hns_roce_v2_post_send(struct ib_qp *ibqp,
const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
unsigned long flags = 0;
unsigned int owner_bit;
unsigned int sge_idx;
unsigned int wqe_idx;
void *wqe = NULL;
u32 nreq;
int ret;
spin_lock_irqsave(&qp->sq.lock, flags);
ret = check_send_valid(hr_dev, qp);
if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
}
sge_idx = qp->next_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
}
wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
if (unlikely(wr->num_sge > qp->sq.max_gs)) {
ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n" ,
wr->num_sge, qp->sq.max_gs);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
wqe = hns_roce_get_send_wqe(qp, wqe_idx);
qp->sq.wrid[wqe_idx] = wr->wr_id;
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
/* RC and UD share the same DirectWQE field layout */
((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
else
ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
trace_hns_sq_wqe(qp->qpn, wqe_idx, wqe, 1 << qp->sq.wqe_shift,
wr->wr_id, TRACE_SQ);
if (unlikely(ret)) {
*bad_wr = wr;
goto out;
}
}
out:
if (likely(nreq)) {
qp->sq.head += nreq;
qp->next_sge = sge_idx;
if (nreq == 1 && !ret &&
(qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
write_dwqe(hr_dev, qp, wqe);
else
update_sq_db(hr_dev, qp);
}
spin_unlock_irqrestore(&qp->sq.lock, flags);
return ret;
}
static int check_recv_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
return -EIO;
if (hr_qp->state == IB_QPS_RESET)
return -EINVAL;
return 0;
}
static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
u32 max_sge, bool rsv)
{
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
u32 i, cnt;
for (i = 0, cnt = 0; i < wr->num_sge; i++) {
/* Skip zero-length sge */
if (!wr->sg_list[i].length)
continue ;
set_data_seg_v2(dseg + cnt, wr->sg_list + i);
cnt++;
}
/* Fill a reserved sge to make hw stop reading remaining segments */
if (rsv) {
dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg[cnt].addr = 0;
dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
} else {
/* Clear remaining segments to make ROCEE ignore sges */
if (cnt < max_sge)
memset(dseg + cnt, 0,
(max_sge - cnt) * HNS_ROCE_SGE_SIZE);
}
}
static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
u32 wqe_idx, u32 max_sge)
{
void *wqe = NULL;
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
trace_hns_rq_wqe(hr_qp->qpn, wqe_idx, wqe, 1 << hr_qp->rq.wqe_shift,
wr->wr_id, TRACE_RQ);
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
u32 wqe_idx, nreq, max_sge;
unsigned long flags;
int ret;
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ret = check_recv_valid(hr_dev, hr_qp);
if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
}
max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
hr_qp->ibqp.recv_cq))) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
}
if (unlikely(wr->num_sge > max_sge)) {
ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n" ,
wr->num_sge, max_sge);
ret = -EINVAL;
*bad_wr = wr;
goto out;
}
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
}
out:
if (likely(nreq)) {
hr_qp->rq.head += nreq;
update_rq_db(hr_dev, hr_qp);
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
return ret;
}
static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
{
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
}
static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
{
return hns_roce_buf_offset(idx_que->mtr.kmem,
n << idx_que->entry_shift);
}
static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
{
/* always called with interrupts disabled. */
spin_lock(&srq->lock);
bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
srq->idx_que.tail++;
spin_unlock(&srq->lock);
}
static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
{
struct hns_roce_idx_que *idx_que = &srq->idx_que;
return idx_que->head - idx_que->tail >= srq->wqe_cnt;
}
static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
const struct ib_recv_wr *wr)
{
struct ib_device *ib_dev = srq->ibsrq.device;
if (unlikely(wr->num_sge > max_sge)) {
ibdev_err(ib_dev,
"failed to check sge, wr->num_sge = %d, max_sge = %u.\n" ,
wr->num_sge, max_sge);
return -EINVAL;
}
if (unlikely(hns_roce_srqwq_overflow(srq))) {
ibdev_err(ib_dev,
"failed to check srqwq status, srqwq is full.\n" );
return -ENOMEM;
}
return 0;
}
static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
{
struct hns_roce_idx_que *idx_que = &srq->idx_que;
u32 pos;
pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
if (unlikely(pos == srq->wqe_cnt))
return -ENOSPC;
bitmap_set(idx_que->bitmap, pos, 1);
*wqe_idx = pos;
return 0;
}
static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
{
struct hns_roce_idx_que *idx_que = &srq->idx_que;
unsigned int head;
__le32 *buf;
head = idx_que->head & (srq->wqe_cnt - 1);
buf = get_idx_buf(idx_que, head);
*buf = cpu_to_le32(wqe_idx);
idx_que->head++;
}
static void update_srq_db(struct hns_roce_srq *srq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
struct hns_roce_v2_db db = {};
hr_reg_write(&db, DB_TAG, srq->srqn);
hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
hr_reg_write(&db, DB_PI, srq->idx_que.head);
hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg);
}
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
unsigned long flags;
int ret = 0;
u32 max_sge;
u32 wqe_idx;
void *wqe;
u32 nreq;
spin_lock_irqsave(&srq->lock, flags);
max_sge = srq->max_gs - srq->rsv_sge;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
ret = check_post_srq_valid(srq, max_sge, wr);
if (ret) {
*bad_wr = wr;
break ;
}
ret = get_srq_wqe_idx(srq, &wqe_idx);
if (unlikely(ret)) {
*bad_wr = wr;
break ;
}
wqe = get_srq_wqe_buf(srq, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
fill_wqe_idx(srq, wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
trace_hns_srq_wqe(srq->srqn, wqe_idx, wqe, 1 << srq->wqe_shift,
wr->wr_id, TRACE_SRQ);
}
if (likely(nreq)) {
if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)
*srq->rdb.db_record = srq->idx_que.head &
V2_DB_PRODUCER_IDX_M;
else
update_srq_db(srq);
}
spin_unlock_irqrestore(&srq->lock, flags);
return ret;
}
static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
/* When hardware reset has been completed once or more, we should stop
* sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
* function, we should exit with error. If now at HNAE3_INIT_CLIENT
* stage of soft reset process, we should exit with error, and then
* HNAE3_INIT_CLIENT related process can rollback the operation like
* notifing hardware to free resources, HNAE3_INIT_CLIENT related
* process will exit with error to notify NIC driver to reschedule soft
* reset process once again.
*/
hr_dev->is_reset = true ;
hr_dev->dis_db = true ;
if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
instance_stage == HNS_ROCE_STATE_INIT)
return CMD_RST_PRC_EBUSY;
return CMD_RST_PRC_SUCCESS;
}
static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
#define HW_RESET_TIMEOUT_US 1000000
#define HW_RESET_SLEEP_US 1000
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long val;
int ret;
/* When hardware reset is detected, we should stop sending mailbox&cmq&
* doorbell to hardware. If now in .init_instance() function, we should
* exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
* process, we should exit with error, and then HNAE3_INIT_CLIENT
* related process can rollback the operation like notifing hardware to
* free resources, HNAE3_INIT_CLIENT related process will exit with
* error to notify NIC driver to reschedule soft reset process once
* again.
*/
hr_dev->dis_db = true ;
ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
HW_RESET_TIMEOUT_US, false , handle);
if (!ret)
hr_dev->is_reset = true ;
if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
instance_stage == HNS_ROCE_STATE_INIT)
return CMD_RST_PRC_EBUSY;
return CMD_RST_PRC_SUCCESS;
}
static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
/* When software reset is detected at .init_instance() function, we
* should stop sending mailbox&cmq&doorbell to hardware, and exit
* with error.
*/
hr_dev->dis_db = true ;
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
hr_dev->is_reset = true ;
return CMD_RST_PRC_EBUSY;
}
static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
unsigned long reset_stage; /* the current reset stage */
unsigned long reset_cnt;
bool sw_resetting;
bool hw_resetting;
/* Get information about reset from NIC driver or RoCE driver itself,
* the meaning of the following variables from NIC driver are described
* as below:
* reset_cnt -- The count value of completed hardware reset.
* hw_resetting -- Whether hardware device is resetting now.
* sw_resetting -- Whether NIC's software reset process is running now.
*/
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
if (reset_cnt != hr_dev->reset_cnt)
return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
reset_stage);
hw_resetting = ops->get_cmdq_stat(handle);
if (hw_resetting)
return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
reset_stage);
sw_resetting = ops->ae_dev_resetting(handle);
if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
return hns_roce_v2_cmd_sw_resetting(hr_dev);
return CMD_RST_PRC_OTHERS;
}
static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
return true ;
if (ops->get_hw_reset_stat(handle))
return true ;
if (ops->ae_dev_resetting(handle))
return true ;
return false ;
}
static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
u32 status;
if (hr_dev->is_reset)
status = CMD_RST_PRC_SUCCESS;
else
status = check_aedev_reset_status(hr_dev, priv->handle);
*busy = (status == CMD_RST_PRC_EBUSY);
return status == CMD_RST_PRC_OTHERS;
}
static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
int size = ring->desc_num * sizeof (struct hns_roce_cmq_desc);
ring->desc = dma_alloc_coherent(hr_dev->dev, size,
&ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
return 0;
}
static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
dma_free_coherent(hr_dev->dev,
ring->desc_num * sizeof (struct hns_roce_cmq_desc),
ring->desc, ring->desc_dma_addr);
ring->desc_dma_addr = 0;
}
static int init_csq(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *csq)
{
dma_addr_t dma;
int ret;
csq->desc_num = CMD_CSQ_DESC_NUM;
spin_lock_init(&csq->lock);
csq->flag = TYPE_CSQ;
csq->head = 0;
ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
if (ret)
return ret;
dma = csq->desc_dma_addr;
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
(u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
/* Make sure to write CI first and then PI */
roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
return 0;
}
static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
ret = init_csq(hr_dev, &priv->cmq.csq);
if (ret)
dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n" , ret);
return ret;
}
static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
}
static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
enum hns_roce_opcode_type opcode,
bool is_read)
{
memset((void *)desc, 0, sizeof (struct hns_roce_cmq_desc));
desc->opcode = cpu_to_le16(opcode);
desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
if (is_read)
desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
else
desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
}
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
struct hns_roce_v2_priv *priv = hr_dev->priv;
return tail == priv->cmq.csq.head;
}
static void update_cmdq_status(struct hns_roce_dev *hr_dev)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT ||
handle->rinfo.instance_state == HNS_ROCE_STATE_INIT)
hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR;
}
static int hns_roce_cmd_err_convert_errno(u16 desc_ret)
{
struct hns_roce_cmd_errcode errcode_table[] = {
{CMD_EXEC_SUCCESS, 0},
{CMD_NO_AUTH, -EPERM},
{CMD_NOT_EXIST, -EOPNOTSUPP},
{CMD_CRQ_FULL, -EXFULL},
{CMD_NEXT_ERR, -ENOSR},
{CMD_NOT_EXEC, -ENOTBLK},
{CMD_PARA_ERR, -EINVAL},
{CMD_RESULT_ERR, -ERANGE},
{CMD_TIMEOUT, -ETIME},
{CMD_HILINK_ERR, -ENOLINK},
{CMD_INFO_ILLEGAL, -ENXIO},
{CMD_INVALID, -EBADR},
};
u16 i;
for (i = 0; i < ARRAY_SIZE(errcode_table); i++)
if (desc_ret == errcode_table[i].return_status)
return errcode_table[i].errno;
return -EIO;
}
static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
{
static const struct hns_roce_cmdq_tx_timeout_map cmdq_tx_timeout[] = {
{HNS_ROCE_OPC_POST_MB, HNS_ROCE_OPC_POST_MB_TIMEOUT},
};
int i;
for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout); i++)
if (cmdq_tx_timeout[i].opcode == opcode)
return cmdq_tx_timeout[i].tx_timeout;
return tx_timeout;
}
static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
{
u32 timeout = 0;
do {
if (hns_roce_cmq_csq_done(hr_dev))
break ;
udelay(1);
} while (++timeout < tx_timeout);
}
static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc,
int num, u32 tx_timeout)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
u16 desc_ret;
u32 tail;
int ret;
int i;
tail = csq->head;
for (i = 0; i < num; i++) {
trace_hns_cmdq_req(hr_dev, &desc[i]);
csq->desc[csq->head++] = desc[i];
if (csq->head == csq->desc_num)
csq->head = 0;
}
/* Write to hardware */
roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
hns_roce_wait_csq_done(hr_dev, tx_timeout);
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
trace_hns_cmdq_resp(hr_dev, &csq->desc[tail]);
/* check the result of hardware write back */
desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
tail = 0;
if (likely(desc_ret == CMD_EXEC_SUCCESS))
continue ;
ret = hns_roce_cmd_err_convert_errno(desc_ret);
}
} else {
/* FW/HW reset or incorrect number of desc */
tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n" ,
csq->head, tail);
csq->head = tail;
update_cmdq_status(hr_dev);
ret = -EAGAIN;
}
if (ret)
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
return ret;
}
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
u16 opcode = le16_to_cpu(desc->opcode);
u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
u32 rsv_tail;
int ret;
int i;
while (try_cnt) {
try_cnt--;
spin_lock_bh(&csq->lock);
rsv_tail = csq->head;
ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
try_cnt) {
spin_unlock_bh(&csq->lock);
mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
continue ;
}
for (i = 0; i < num; i++) {
desc[i] = csq->desc[rsv_tail++];
if (rsv_tail == csq->desc_num)
rsv_tail = 0;
}
spin_unlock_bh(&csq->lock);
break ;
}
if (ret)
dev_err_ratelimited(hr_dev->dev,
"Cmdq IO error, opcode = 0x%x, return = %d.\n" ,
opcode, ret);
return ret;
}
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
bool busy;
int ret;
if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
return -EIO;
if (!v2_chk_mbox_is_avail(hr_dev, &busy))
return busy ? -EBUSY : 0;
ret = __hns_roce_cmq_send(hr_dev, desc, num);
if (ret) {
if (!v2_chk_mbox_is_avail(hr_dev, &busy))
return busy ? -EBUSY : 0;
}
return ret;
}
static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
dma_addr_t base_addr, u8 cmd, unsigned long tag)
{
struct hns_roce_cmd_mailbox *mbox;
int ret;
mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mbox))
return PTR_ERR(mbox);
ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag);
hns_roce_free_cmd_mailbox(hr_dev, mbox);
return ret;
}
static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_version *resp;
struct hns_roce_cmq_desc desc;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true );
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
hr_dev->vendor_id = hr_dev->pci_dev->vendor;
return 0;
}
static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long end;
hr_dev->dis_db = true ;
dev_warn(hr_dev->dev,
"func clear is pending, device in resetting state.\n" );
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true ;
dev_info(hr_dev->dev,
"func clear success after reset.\n" );
return ;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
dev_warn(hr_dev->dev, "func clear failed.\n" );
}
static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long end;
hr_dev->dis_db = true ;
dev_warn(hr_dev->dev,
"func clear is pending, device in resetting state.\n" );
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) {
hr_dev->is_reset = true ;
dev_info(hr_dev->dev,
"func clear success after sw reset\n" );
return ;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n" );
}
static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
int flag)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true ;
hr_dev->is_reset = true ;
dev_info(hr_dev->dev, "func clear success after reset.\n" );
return ;
}
if (ops->get_hw_reset_stat(handle)) {
func_clr_hw_resetting_state(hr_dev, handle);
return ;
}
if (ops->ae_dev_resetting(handle) &&
handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
func_clr_sw_resetting_state(hr_dev, handle);
return ;
}
if (retval && !flag)
dev_warn(hr_dev->dev,
"func clear read failed, ret = %d.\n" , retval);
dev_warn(hr_dev->dev, "func clear failed.\n" );
}
static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
{
bool fclr_write_fail_flag = false ;
struct hns_roce_func_clear *resp;
struct hns_roce_cmq_desc desc;
unsigned long end;
int ret = 0;
if (check_device_is_in_reset(hr_dev))
goto out;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false );
resp = (struct hns_roce_func_clear *)desc.data;
resp->rst_funcid_en = cpu_to_le32(vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
fclr_write_fail_flag = true ;
dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n" ,
ret);
goto out;
}
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
while (end) {
if (check_device_is_in_reset(hr_dev))
goto out;
msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
true );
resp->rst_funcid_en = cpu_to_le32(vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
continue ;
if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) {
if (vf_id == 0)
hr_dev->is_reset = true ;
return ;
}
}
out:
hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
}
static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
{
enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
struct hns_roce_cmq_desc desc[2];
struct hns_roce_cmq_req *req_a;
req_a = (struct hns_roce_cmq_req *)desc[0].data;
hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false );
desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false );
hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
{
int ret;
int i;
if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR)
return ;
for (i = hr_dev->func_num - 1; i >= 0; i--) {
__hns_roce_function_clear(hr_dev, i);
if (i == 0)
continue ;
ret = hns_roce_free_vf_resource(hr_dev, i);
if (ret)
ibdev_err(&hr_dev->ib_dev,
"failed to free vf resource, vf_id = %d, ret = %d.\n" ,
i, ret);
}
}
static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
false );
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
ibdev_err(&hr_dev->ib_dev,
"failed to clear extended doorbell info, ret = %d.\n" ,
ret);
return ret;
}
static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
{
struct hns_roce_query_fw_info *resp;
struct hns_roce_cmq_desc desc;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true );
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
resp = (struct hns_roce_query_fw_info *)desc.data;
hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
return 0;
}
static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
int ret;
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
hr_dev->func_num = 1;
return 0;
}
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
true );
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
hr_dev->func_num = 1;
return ret;
}
hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
return 0;
}
static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *num_counters)
{
#define CNT_PER_DESC 3
struct hns_roce_cmq_desc *desc;
int bd_idx, cnt_idx;
__le64 *cnt_data;
int desc_num;
int ret;
int i;
if (port > hr_dev->caps.num_ports)
return -EINVAL;
desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC);
desc = kcalloc(desc_num, sizeof (*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
for (i = 0; i < desc_num; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i],
HNS_ROCE_OPC_QUERY_COUNTER, true );
if (i != desc_num - 1)
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
}
ret = hns_roce_cmq_send(hr_dev, desc, desc_num);
if (ret) {
ibdev_err(&hr_dev->ib_dev,
"failed to get counter, ret = %d.\n" , ret);
goto err_out;
}
for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
bd_idx = i / CNT_PER_DESC;
if (bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC &&
!(desc[bd_idx].flag & cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT)))
break ;
cnt_data = (__le64 *)&desc[bd_idx].data[0];
cnt_idx = i % CNT_PER_DESC;
stats[i] = le64_to_cpu(cnt_data[cnt_idx]);
}
*num_counters = i;
err_out:
kfree(desc);
return ret;
}
static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
u32 clock_cycles_of_1us;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false );
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
else
clock_cycles_of_1us = HNS_ROCE_1US_CFG;
hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
{
struct hns_roce_cmq_desc desc[2];
struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
struct hns_roce_caps *caps = &hr_dev->caps;
enum hns_roce_opcode_type opcode;
u32 func_num;
int ret;
if (is_vf) {
opcode = HNS_ROCE_OPC_QUERY_VF_RES;
func_num = 1;
} else {
opcode = HNS_ROCE_OPC_QUERY_PF_RES;
func_num = hr_dev->func_num;
}
hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true );
desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true );
ret = hns_roce_cmq_send(hr_dev, desc, 2);
if (ret)
return ret;
caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
if (is_vf) {
caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
func_num;
} else {
caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
func_num;
}
return 0;
}
static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
true );
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
return 0;
}
static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
int ret;
ret = load_func_res_caps(hr_dev, false );
if (ret) {
dev_err(dev, "failed to load pf res caps, ret = %d.\n" , ret);
return ret;
}
ret = load_pf_timer_res_caps(hr_dev);
if (ret)
dev_err(dev, "failed to load pf timer resource, ret = %d.\n" ,
ret);
return ret;
}
static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
int ret;
ret = load_func_res_caps(hr_dev, true );
if (ret)
dev_err(dev, "failed to load vf res caps, ret = %d.\n" , ret);
return ret;
}
static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
u32 vf_id)
{
struct hns_roce_vf_switch *swt;
struct hns_roce_cmq_desc desc;
int ret;
swt = (struct hns_roce_vf_switch *)desc.data;
hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true );
swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id);
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
hr_reg_enable(swt, VF_SWITCH_ALW_LPBK);
hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK);
hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD);
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
{
u32 vf_id;
int ret;
for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
if (ret)
return ret;
}
return 0;
}
static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
{
struct hns_roce_cmq_desc desc[2];
struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
struct hns_roce_caps *caps = &hr_dev->caps;
hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false );
desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false );
hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
vf_id * caps->gmv_bt_num);
} else {
hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
vf_id * caps->sgid_bt_num);
hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
vf_id * caps->smac_bt_num);
}
return hns_roce_cmq_send(hr_dev, desc, 2);
}
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{
u32 func_num = max_t(u32, 1, hr_dev->func_num);
u32 vf_id;
int ret;
for (vf_id = 0; vf_id < func_num; vf_id++) {
ret = config_vf_hem_resource(hr_dev, vf_id);
if (ret) {
dev_err(hr_dev->dev,
"failed to config vf-%u hem res, ret = %d.\n" ,
vf_id, ret);
return ret;
}
}
return 0;
}
static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
struct hns_roce_caps *caps = &hr_dev->caps;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false );
hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
{
u64 obj_per_chunk;
u64 bt_chunk_size = PAGE_SIZE;
u64 buf_chunk_size = PAGE_SIZE;
u64 obj_per_chunk_default = buf_chunk_size / obj_size;
*buf_page_size = 0;
*bt_page_size = 0;
switch (hop_num) {
case 3:
obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
(bt_chunk_size / BA_BYTE_LEN) *
(bt_chunk_size / BA_BYTE_LEN) *
obj_per_chunk_default;
break ;
case 2:
obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
(bt_chunk_size / BA_BYTE_LEN) *
obj_per_chunk_default;
break ;
case 1:
obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
obj_per_chunk_default;
break ;
case HNS_ROCE_HOP_NUM_0:
obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
break ;
default :
pr_err("table %u not support hop_num = %u!\n" , hem_type,
hop_num);
return ;
}
if (hem_type >= HEM_TYPE_MTT)
*bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
else
*buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
}
static void set_hem_page_size(struct hns_roce_dev *hr_dev)
{
struct hns_roce_caps *caps = &hr_dev->caps;
/* EQ */
caps->eqe_ba_pg_sz = 0;
caps->eqe_buf_pg_sz = 0;
/* Link Table */
caps->llm_buf_pg_sz = 0;
/* MR */
caps->mpt_ba_pg_sz = 0;
caps->mpt_buf_pg_sz = 0;
caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
caps->pbl_buf_pg_sz = 0;
calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
HEM_TYPE_MTPT);
/* QP */
caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0;
caps->qpc_timer_ba_pg_sz = 0;
caps->qpc_timer_buf_pg_sz = 0;
caps->sccc_ba_pg_sz = 0;
caps->sccc_buf_pg_sz = 0;
caps->mtt_ba_pg_sz = 0;
caps->mtt_buf_pg_sz = 0;
calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
HEM_TYPE_QPC);
if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
&caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
/* CQ */
caps->cqc_ba_pg_sz = 0;
caps->cqc_buf_pg_sz = 0;
caps->cqc_timer_ba_pg_sz = 0;
caps->cqc_timer_buf_pg_sz = 0;
caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
caps->cqe_buf_pg_sz = 0;
calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
HEM_TYPE_CQC);
calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
/* SRQ */
if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
caps->srqc_ba_pg_sz = 0;
caps->srqc_buf_pg_sz = 0;
caps->srqwqe_ba_pg_sz = 0;
caps->srqwqe_buf_pg_sz = 0;
caps->idx_ba_pg_sz = 0;
caps->idx_buf_pg_sz = 0;
calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
caps->srqc_hop_num, caps->srqc_bt_num,
&caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
HEM_TYPE_SRQC);
calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
&caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
&caps->idx_ba_pg_sz, HEM_TYPE_IDX);
}
/* GMV */
caps->gmv_ba_pg_sz = 0;
caps->gmv_buf_pg_sz = 0;
}
/* Apply all loaded caps before setting to hardware */
static void apply_func_caps(struct hns_roce_dev *hr_dev)
{
#define MAX_GID_TBL_LEN 256
struct hns_roce_caps *caps = &hr_dev->caps;
struct hns_roce_v2_priv *priv = hr_dev->priv;
/* The following configurations don't need to be got from firmware. */
caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
if (!caps->num_comp_vectors)
caps->num_comp_vectors =
min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
(u32)priv->handle->rinfo.num_vectors -
(HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
/* The following configurations will be overwritten */
caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
/* The following configurations are not got from firmware */
caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
/* It's meaningless to support excessively large gid_table_len,
* as the type of sgid_index in kernel struct ib_global_route
* and userspace struct ibv_global_route are u8/uint8_t (0-255).
*/
caps->gid_table_len[0] = min_t(u32, MAX_GID_TBL_LEN,
caps->gmv_bt_num *
(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
caps->gmv_entry_sz);
} else {
u32 func_num = max_t(u32, 1, hr_dev->func_num);
caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
caps->gid_table_len[0] /= func_num;
}
if (hr_dev->is_vf) {
caps->default_aeq_arm_st = 0x3;
caps->default_ceq_arm_st = 0x3;
caps->default_ceq_max_cnt = 0x1;
caps->default_ceq_period = 0x10;
caps->default_aeq_max_cnt = 0x1;
caps->default_aeq_period = 0x10;
}
set_hem_page_size(hr_dev);
}
static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {};
struct hns_roce_caps *caps = &hr_dev->caps;
struct hns_roce_query_pf_caps_a *resp_a;
struct hns_roce_query_pf_caps_b *resp_b;
struct hns_roce_query_pf_caps_c *resp_c;
struct hns_roce_query_pf_caps_d *resp_d;
struct hns_roce_query_pf_caps_e *resp_e;
struct hns_roce_query_pf_caps_f *resp_f;
enum hns_roce_opcode_type cmd;
int ctx_hop_num;
int pbl_hop_num;
int cmd_num;
int ret;
int i;
cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 :
HNS_ROCE_QUERY_PF_CAPS_CMD_NUM;
for (i = 0; i < cmd_num - 1; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true );
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
}
hns_roce_cmq_setup_basic_desc(&desc[cmd_num - 1], cmd, true );
desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
ret = hns_roce_cmq_send(hr_dev, desc, cmd_num);
if (ret)
return ret;
resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data;
caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
caps->irrl_entry_sz = resp_b->irrl_entry_sz;
caps->trrl_entry_sz = resp_b->trrl_entry_sz;
caps->cqc_entry_sz = resp_b->cqc_entry_sz;
caps->srqc_entry_sz = resp_b->srqc_entry_sz;
caps->idx_entry_sz = resp_b->idx_entry_sz;
caps->sccc_sz = resp_b->sccc_sz;
caps->max_mtu = resp_b->max_mtu;
caps->min_cqes = resp_b->min_cqes;
caps->min_wqes = resp_b->min_wqes;
caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
caps->pkey_table_len[0] = resp_b->pkey_table_len;
caps->phy_num_uars = resp_b->phy_num_uars;
ctx_hop_num = resp_b->ctx_hop_num;
pbl_hop_num = resp_b->pbl_hop_num;
caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS);
caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS);
caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
HNS_ROCE_CAP_FLAGS_EX_SHIFT;
caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS);
caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP);
caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG);
caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS);
caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=93 G=94
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland