Quelle verbs.c
Sprache: C
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
/**
* irdma_query_device - get device attributes
* @ibdev: device pointer from stack
* @props: returning device attributes
* @udata: user data
*/
static int irdma_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata)
{
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
struct pci_dev *pcidev = iwdev->rf->pcidev;
struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
if (udata->inlen || udata->outlen)
return -EINVAL;
memset(props, 0, sizeof (*props));
addrconf_addr_eui48((u8 *)&props->sys_image_guid,
iwdev->netdev->dev_addr);
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
props->vendor_id = pcidev->vendor;
props->vendor_part_id = pcidev->device;
props->hw_ver = rf->pcidev->revision;
props->page_size_cap = hw_attrs->page_size_cap;
props->max_mr_size = hw_attrs->max_mr_size;
props->max_qp = rf->max_qp - rf->used_qps;
props->max_qp_wr = hw_attrs->max_qp_wr;
props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_cq = rf->max_cq - rf->used_cqs;
props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
if (rdma_protocol_roce(ibdev, 1)) {
props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
}
props->max_ah = rf->max_ah;
props->max_mcast_grp = rf->max_mcg;
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
return 0;
}
/**
* irdma_query_port - get port attributes
* @ibdev: device pointer from stack
* @port: port number for query
* @props: returning device attributes
*/
static int irdma_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
struct irdma_device *iwdev = to_iwdev(ibdev);
struct net_device *netdev = iwdev->netdev;
/* no need to zero out pros here. done by caller */
props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
props->lmc = 0;
props->sm_lid = 0;
props->sm_sl = 0;
if (netif_carrier_ok(netdev) && netif_running(netdev)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
ib_get_eth_speed(ibdev, port, &props->active_speed,
&props->active_width);
if (rdma_protocol_roce(ibdev, 1)) {
props->gid_tbl_len = 32;
props->ip_gids = true ;
props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
} else {
props->gid_tbl_len = 1;
}
props->qkey_viol_cntr = 0;
props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
return 0;
}
/**
* irdma_disassociate_ucontext - Disassociate user context
* @context: ib user context
*/
static void irdma_disassociate_ucontext(struct ib_ucontext *context)
{
}
static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
struct vm_area_struct *vma)
{
u64 pfn;
if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
vma->vm_private_data = ucontext;
pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot), NULL);
}
static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
kfree(entry);
}
static struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
{
struct irdma_user_mmap_entry *entry = kzalloc(sizeof (*entry), GFP_KERNEL);
int ret;
if (!entry)
return NULL;
entry->bar_offset = bar_offset;
entry->mmap_flag = mmap_flag;
ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
&entry->rdma_entry, PAGE_SIZE);
if (ret) {
kfree(entry);
return NULL;
}
*mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
return &entry->rdma_entry;
}
/**
* irdma_mmap - user memory map
* @context: context created during alloc
* @vma: kernel info for user memory map
*/
static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct rdma_user_mmap_entry *rdma_entry;
struct irdma_user_mmap_entry *entry;
struct irdma_ucontext *ucontext;
u64 pfn;
int ret;
ucontext = to_ucontext(context);
/* Legacy support for libi40iw with hard-coded mmap key */
if (ucontext->legacy_mode)
return irdma_mmap_legacy(ucontext, vma);
rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
if (!rdma_entry) {
ibdev_dbg(&ucontext->iwdev->ibdev,
"VERBS: pgoff[0x%lx] does not have valid entry\n" ,
vma->vm_pgoff);
return -EINVAL;
}
entry = to_irdma_mmap_entry(rdma_entry);
ibdev_dbg(&ucontext->iwdev->ibdev,
"VERBS: bar_offset [0x%llx] mmap_flag [%d]\n" ,
entry->bar_offset, entry->mmap_flag);
pfn = (entry->bar_offset +
pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
switch (entry->mmap_flag) {
case IRDMA_MMAP_IO_NC:
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot),
rdma_entry);
break ;
case IRDMA_MMAP_IO_WC:
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot),
rdma_entry);
break ;
default :
ret = -EINVAL;
}
if (ret)
ibdev_dbg(&ucontext->iwdev->ibdev,
"VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n" ,
entry->bar_offset, entry->mmap_flag, ret);
rdma_user_mmap_entry_put(rdma_entry);
return ret;
}
/**
* irdma_alloc_push_page - allocate a push page for qp
* @iwqp: qp pointer
*/
static void irdma_alloc_push_page(struct irdma_qp *iwqp)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_qp *qp = &iwqp->sc_qp;
int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true );
if (!cqp_request)
return ;
cqp_info = &cqp_request->info;
cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
cqp_info->post_sq = 1;
cqp_info->in.u.manage_push_page.info.push_idx = 0;
cqp_info->in.u.manage_push_page.info.qs_handle =
qp->vsi->qos[qp->user_pri].qs_handle;
cqp_info->in.u.manage_push_page.info.free_page = 0;
cqp_info->in.u.manage_push_page.info.push_page_type = 0;
cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
if (!status && cqp_request->compl_info.op_ret_val <
iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
qp->push_idx = cqp_request->compl_info.op_ret_val;
qp->push_offset = 0;
}
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
}
/**
* irdma_alloc_ucontext - Allocate the user context data structure
* @uctx: uverbs context pointer
* @udata: user data
*
* This keeps track of all objects associated with a particular
* user-mode client.
*/
static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
return -EINVAL;
if (ib_copy_from_udata(&req, udata, min(sizeof (req), udata->inlen)))
return -EINVAL;
if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
goto ver_error;
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
ucontext->use_raw_attrs = true ;
/* GEN_1 legacy support with libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP;
ucontext->legacy_mode = true ;
uresp.max_qps = iwdev->rf->max_qp;
uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
uresp.kernel_ver = req.userspace_ver;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof (uresp), udata->outlen)))
return -EFAULT;
} else {
u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
ucontext->db_mmap_entry =
irdma_user_mmap_entry_insert(ucontext, bar_off,
IRDMA_MMAP_IO_NC,
&uresp.db_mmap_key);
if (!ucontext->db_mmap_entry)
return -ENOMEM;
uresp.kernel_ver = IRDMA_ABI_VER;
uresp.feature_flags = uk_attrs->feature_flags;
uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
uresp.max_hw_inline = uk_attrs->max_hw_inline;
uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof (uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
return -EFAULT;
}
}
INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
return 0;
ver_error:
ibdev_err(&iwdev->ibdev,
"Invalid userspace driver version detected. Detected version %d, should be %d\n" ,
req.userspace_ver, IRDMA_ABI_VER);
return -EINVAL;
}
/**
* irdma_dealloc_ucontext - deallocate the user context data structure
* @context: user context created during alloc
*/
static void irdma_dealloc_ucontext(struct ib_ucontext *context)
{
struct irdma_ucontext *ucontext = to_ucontext(context);
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
}
/**
* irdma_alloc_pd - allocate protection domain
* @pd: PD pointer
* @udata: user data
*/
static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_alloc_pd_resp uresp = {};
struct irdma_sc_pd *sc_pd;
u32 pd_id = 0;
int err;
if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
return -EINVAL;
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
return err;
sc_pd = &iwpd->sc_pd;
if (udata) {
struct irdma_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
uresp.pd_id = pd_id;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof (uresp), udata->outlen))) {
err = -EFAULT;
goto error;
}
} else {
irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
}
return 0;
error:
irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
return err;
}
/**
* irdma_dealloc_pd - deallocate pd
* @ibpd: ptr of pd to be deallocated
* @udata: user data
*/
static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
return 0;
}
/**
* irdma_get_pbl - Retrieve pbl from a list given a virtual
* address
* @va: user virtual address
* @pbl_list: pbl list to search in (QP's or CQ's)
*/
static struct irdma_pbl *irdma_get_pbl(unsigned long va,
struct list_head *pbl_list)
{
struct irdma_pbl *iwpbl;
list_for_each_entry (iwpbl, pbl_list, list) {
if (iwpbl->user_base == va) {
list_del(&iwpbl->list);
iwpbl->on_list = false ;
return iwpbl;
}
}
return NULL;
}
/**
* irdma_clean_cqes - clean cq entries for qp
* @iwqp: qp ptr (user or kernel)
* @iwcq: cq ptr
*/
static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
{
struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
unsigned long flags;
spin_lock_irqsave(&iwcq->lock, flags);
irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
spin_unlock_irqrestore(&iwcq->lock, flags);
}
static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
{
if (iwqp->push_db_mmap_entry) {
rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
iwqp->push_db_mmap_entry = NULL;
}
if (iwqp->push_wqe_mmap_entry) {
rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
iwqp->push_wqe_mmap_entry = NULL;
}
}
static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
struct irdma_qp *iwqp,
u64 *push_wqe_mmap_key,
u64 *push_db_mmap_key)
{
struct irdma_device *iwdev = ucontext->iwdev;
u64 rsvd, bar_off;
rsvd = IRDMA_PF_BAR_RSVD;
bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
/* skip over db page */
bar_off += IRDMA_HW_PAGE_SIZE;
/* push wqe page */
bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_WC,
push_wqe_mmap_key);
if (!iwqp->push_wqe_mmap_entry)
return -ENOMEM;
/* push doorbell page */
bar_off += IRDMA_HW_PAGE_SIZE;
iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_NC,
push_db_mmap_key);
if (!iwqp->push_db_mmap_entry) {
rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
return -ENOMEM;
}
return 0;
}
/**
* irdma_destroy_qp - destroy qp
* @ibqp: qp's ib pointer also to get to device's qp address
* @udata: user data
*/
static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
iwqp->sc_qp.qp_uk.destroy_pending = true ;
if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
irdma_modify_qp_to_err(&iwqp->sc_qp);
if (!iwqp->user_mode)
cancel_delayed_work_sync(&iwqp->dwork_flush);
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
irdma_clean_cqes(iwqp, iwqp->iwscq);
if (iwqp->iwrcq != iwqp->iwscq)
irdma_clean_cqes(iwqp, iwqp->iwrcq);
}
}
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
irdma_remove_push_mmap_entries(iwqp);
irdma_free_qp_rsrc(iwqp);
return 0;
}
/**
* irdma_setup_virt_qp - setup for allocation of virtual qp
* @iwdev: irdma device
* @iwqp: qp ptr
* @init_info: initialize info to return
*/
static void irdma_setup_virt_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *init_info)
{
struct irdma_pbl *iwpbl = iwqp->iwpbl;
struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
iwqp->page = qpmr->sq_page;
init_info->shadow_area_pa = qpmr->shadow;
if (iwpbl->pbl_allocated) {
init_info->virtual_map = true ;
init_info->sq_pa = qpmr->sq_pbl.idx;
init_info->rq_pa = qpmr->rq_pbl.idx;
} else {
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
}
}
/**
* irdma_setup_umode_qp - setup sq and rq size in user mode qp
* @udata: udata
* @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
* @info: initialize info to return
* @init_attr: Initial QP create attributes
*/
static int irdma_setup_umode_qp(struct ib_udata *udata,
struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr)
{
struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext, ibucontext);
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
struct irdma_create_qp_req req;
unsigned long flags;
int ret;
ret = ib_copy_from_udata(&req, udata,
min(sizeof (req), udata->inlen));
if (ret) {
ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n" );
return ret;
}
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
iwqp->user_mode = 1;
if (req.user_wqe_bufs) {
info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
iwqp->iwpbl = irdma_get_pbl((unsigned long )req.user_wqe_bufs,
&ucontext->qp_reg_mem_list);
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
if (!iwqp->iwpbl) {
ret = -ENODATA;
ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n" );
return ret;
}
}
if (!ucontext->use_raw_attrs) {
/**
* Maintain backward compat with older ABI which passes sq and
* rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
* There is no way to compute the correct value of
* iwqp->max_send_wr/max_recv_wr in the kernel.
*/
iwqp->max_send_wr = init_attr->cap.max_send_wr;
iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
ukinfo->sq_size = init_attr->cap.max_send_wr;
ukinfo->rq_size = init_attr->cap.max_recv_wr;
irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
&ukinfo->rq_shift);
} else {
ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
&ukinfo->sq_shift);
if (ret)
return ret;
ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
&ukinfo->rq_shift);
if (ret)
return ret;
iwqp->max_send_wr =
(ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr =
(ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
}
irdma_setup_virt_qp(iwdev, iwqp, info);
return 0;
}
/**
* irdma_setup_kmode_qp - setup initialization for kernel mode qp
* @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
* @info: initialize info to return
* @init_attr: Initial QP create attributes
*/
static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr)
{
struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 size;
int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
&ukinfo->sq_shift);
if (status)
return status;
status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
&ukinfo->rq_shift);
if (status)
return status;
iwqp->kqp.sq_wrid_mem =
kcalloc(ukinfo->sq_depth, sizeof (*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.sq_wrid_mem)
return -ENOMEM;
iwqp->kqp.rq_wrid_mem =
kcalloc(ukinfo->rq_depth, sizeof (*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.rq_wrid_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
return -ENOMEM;
}
ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
size += (IRDMA_SHADOW_AREA_SIZE << 3);
mem->size = ALIGN(size, 256);
mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
&mem->pa, GFP_KERNEL);
if (!mem->va) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
kfree(iwqp->kqp.rq_wrid_mem);
iwqp->kqp.rq_wrid_mem = NULL;
return -ENOMEM;
}
ukinfo->sq = mem->va;
info->sq_pa = mem->pa;
ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
info->shadow_area_pa =
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
init_attr->cap.max_send_wr = iwqp->max_send_wr;
init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
return 0;
}
static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
{
struct irdma_pci_f *rf = iwqp->iwdev->rf;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_create_qp_info *qp_info;
int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true );
if (!cqp_request)
return -ENOMEM;
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
memset(qp_info, 0, sizeof (*qp_info));
qp_info->mac_valid = true ;
qp_info->cq_num_valid = true ;
qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
status = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
return status;
}
static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_qp_host_ctx_info *ctx_info)
{
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp_info;
udp_info = &iwqp->udp_info;
udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
udp_info->cwnd = iwdev->roce_cwnd;
udp_info->rexmit_thresh = 2;
udp_info->rnr_nak_thresh = 2;
udp_info->src_port = 0xc000;
udp_info->dst_port = ROCE_V2_UDP_DPORT;
roce_info = &iwqp->roce_info;
ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
roce_info->rd_en = true ;
roce_info->wr_rdresp_en = true ;
roce_info->bind_en = true ;
roce_info->dcqcn_en = false ;
roce_info->rtomin = 5;
roce_info->ack_credits = iwdev->roce_ackcreds;
roce_info->ird_size = dev->hw_attrs.max_hw_ird;
roce_info->ord_size = dev->hw_attrs.max_hw_ord;
if (!iwqp->user_mode) {
roce_info->priv_mode_en = true ;
roce_info->fast_reg_en = true ;
roce_info->udprivcq_en = true ;
}
roce_info->roce_tver = 0;
ctx_info->roce_info = &iwqp->roce_info;
ctx_info->udp_info = &iwqp->udp_info;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
}
static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
struct irdma_qp_host_ctx_info *ctx_info)
{
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_iwarp_offload_info *iwarp_info;
iwarp_info = &iwqp->iwarp_info;
ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
iwarp_info->rd_en = true ;
iwarp_info->wr_rdresp_en = true ;
iwarp_info->bind_en = true ;
iwarp_info->ecn_en = true ;
iwarp_info->rtomin = 5;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
iwarp_info->ib_rd_en = true ;
if (!iwqp->user_mode) {
iwarp_info->priv_mode_en = true ;
iwarp_info->fast_reg_en = true ;
}
iwarp_info->ddp_ver = 1;
iwarp_info->rdmap_ver = 1;
ctx_info->iwarp_info = &iwqp->iwarp_info;
ctx_info->iwarp_info_valid = true ;
irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
ctx_info->iwarp_info_valid = false ;
}
static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
struct irdma_device *iwdev)
{
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
if (init_attr->create_flags)
return -EOPNOTSUPP;
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (init_attr->qp_type != IB_QPT_RC &&
init_attr->qp_type != IB_QPT_UD &&
init_attr->qp_type != IB_QPT_GSI)
return -EOPNOTSUPP;
} else {
if (init_attr->qp_type != IB_QPT_RC)
return -EOPNOTSUPP;
}
return 0;
}
static void irdma_flush_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
irdma_generate_flush_completions(iwqp);
}
/**
* irdma_create_qp - create qp
* @ibqp: ptr of qp
* @init_attr: attributes for qp
* @udata: user data for create qp
*/
static int irdma_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
struct irdma_sc_qp *qp;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
return err_code;
if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
return -EINVAL;
init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
qp = &iwqp->sc_qp;
qp->qp_uk.back_qp = iwqp;
qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
iwqp->iwdev = iwdev;
iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
256);
iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
iwqp->q2_ctx_mem.size,
&iwqp->q2_ctx_mem.pa,
GFP_KERNEL);
if (!iwqp->q2_ctx_mem.va)
return -ENOMEM;
init_info.q2 = iwqp->q2_ctx_mem.va;
init_info.q2_pa = iwqp->q2_ctx_mem.pa;
init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
if (init_attr->qp_type == IB_QPT_GSI)
qp_num = 1;
else
err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
&qp_num, &rf->next_qp);
if (err_code)
goto error;
iwqp->iwpd = iwpd;
iwqp->ibqp.qp_num = qp_num;
qp = &iwqp->sc_qp;
iwqp->iwscq = to_iwcq(init_attr->send_cq);
iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
iwqp->host_ctx.va = init_info.host_ctx;
iwqp->host_ctx.pa = init_info.host_ctx_pa;
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
init_info.pd = &iwpd->sc_pd;
init_info.qp_uk_init_info.qp_id = qp_num;
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
init_info.qp_uk_init_info.first_sq_wq = 1;
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
init_waitqueue_head(&iwqp->waitq);
init_waitqueue_head(&iwqp->mod_qp_waitq);
if (udata) {
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
init_attr);
} else {
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
}
if (err_code) {
ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n" );
goto error;
}
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (init_attr->qp_type == IB_QPT_RC) {
init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
IRDMA_WRITE_WITH_IMM |
IRDMA_ROCE;
} else {
init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
IRDMA_ROCE;
}
} else {
init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
}
if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
err_code = irdma_sc_qp_init(qp, &init_info);
if (err_code) {
ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n" );
goto error;
}
ctx_info = &iwqp->ctx_info;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
if (rdma_protocol_roce(&iwdev->ibdev, 1))
irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
else
irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
err_code = irdma_cqp_create_qp_cmd(iwqp);
if (err_code)
goto error;
refcount_set(&iwqp->refcnt, 1);
spin_lock_init(&iwqp->lock);
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (dev->ws_add(&iwdev->vsi, 0)) {
irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
err_code = -EINVAL;
goto error;
}
irdma_qp_add_qos(&iwqp->sc_qp);
}
if (udata) {
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
if (udata->outlen < sizeof (uresp)) {
uresp.lsmm = 1;
uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
} else {
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
uresp.lsmm = 1;
}
uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
uresp.qp_id = qp_num;
uresp.qp_caps = qp->qp_uk.qp_caps;
err_code = ib_copy_to_udata(udata, &uresp,
min(sizeof (uresp), udata->outlen));
if (err_code) {
ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n" );
irdma_destroy_qp(&iwqp->ibqp, udata);
return err_code;
}
}
init_completion(&iwqp->free_qp);
return 0;
error:
irdma_free_qp_rsrc(iwqp);
return err_code;
}
static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
{
int acc_flags = 0;
if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
if (iwqp->roce_info.wr_rdresp_en) {
acc_flags |= IB_ACCESS_LOCAL_WRITE;
acc_flags |= IB_ACCESS_REMOTE_WRITE;
}
if (iwqp->roce_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
if (iwqp->roce_info.bind_en)
acc_flags |= IB_ACCESS_MW_BIND;
} else {
if (iwqp->iwarp_info.wr_rdresp_en) {
acc_flags |= IB_ACCESS_LOCAL_WRITE;
acc_flags |= IB_ACCESS_REMOTE_WRITE;
}
if (iwqp->iwarp_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
if (iwqp->iwarp_info.bind_en)
acc_flags |= IB_ACCESS_MW_BIND;
}
return acc_flags;
}
/**
* irdma_query_qp - query qp attributes
* @ibqp: qp pointer
* @attr: attributes pointer
* @attr_mask: Not used
* @init_attr: qp attributes to return
*/
static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_qp_init_attr *init_attr)
{
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_sc_qp *qp = &iwqp->sc_qp;
memset(attr, 0, sizeof (*attr));
memset(init_attr, 0, sizeof (*init_attr));
attr->qp_state = iwqp->ibqp_state;
attr->cur_qp_state = iwqp->ibqp_state;
attr->cap.max_send_wr = iwqp->max_send_wr;
attr->cap.max_recv_wr = iwqp->max_recv_wr;
attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
attr->port_num = 1;
if (rdma_protocol_roce(ibqp->device, 1)) {
attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
attr->qkey = iwqp->roce_info.qkey;
attr->rq_psn = iwqp->udp_info.epsn;
attr->sq_psn = iwqp->udp_info.psn_nxt;
attr->dest_qp_num = iwqp->roce_info.dest_qp;
attr->pkey_index = iwqp->roce_info.p_key;
attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
attr->max_rd_atomic = iwqp->roce_info.ord_size;
attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
}
init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
init_attr->cap = attr->cap;
return 0;
}
/**
* irdma_query_pkey - Query partition key
* @ibdev: device pointer from stack
* @port: port number
* @index: index of pkey
* @pkey: pointer to store the pkey
*/
static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
if (index >= IRDMA_PKEY_TBL_SZ)
return -EINVAL;
*pkey = IRDMA_DEFAULT_PKEY;
return 0;
}
static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
{
struct net_device *ndev;
rcu_read_lock();
ndev = rcu_dereference(attr->ndev);
if (!ndev)
goto exit ;
if (is_vlan_dev(ndev)) {
u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
exit :
rcu_read_unlock();
return prio;
}
static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
{
if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
!iwqp->suspend_pending,
msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
iwqp->suspend_pending = false ;
ibdev_warn(&iwqp->iwdev->ibdev,
"modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n" ,
iwqp->ibqp.qp_num, iwqp->last_aeq);
return -EBUSY;
}
return 0;
}
/**
* irdma_modify_qp_roce - modify qp request
* @ibqp: qp's pointer for modify
* @attr: access attributes
* @attr_mask: state mask
* @udata: user data
*/
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_qp_host_ctx_info *ctx_info;
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp_info;
struct irdma_modify_qp_info info = {};
struct irdma_modify_qp_resp uresp = {};
struct irdma_modify_qp_req ureq = {};
unsigned long flags;
u8 issue_modify_qp = 0;
int ret = 0;
ctx_info = &iwqp->ctx_info;
roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info;
if (udata) {
/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
(udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
return -EINVAL;
}
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
if (attr_mask & IB_QP_DEST_QPN)
roce_info->dest_qp = attr->dest_qp_num;
if (attr_mask & IB_QP_PKEY_INDEX) {
ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
&roce_info->p_key);
if (ret)
return ret;
}
if (attr_mask & IB_QP_QKEY)
roce_info->qkey = attr->qkey;
if (attr_mask & IB_QP_PATH_MTU)
udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
if (attr_mask & IB_QP_SQ_PSN) {
udp_info->psn_nxt = attr->sq_psn;
udp_info->lsn = 0xffff;
udp_info->psn_una = attr->sq_psn;
udp_info->psn_max = attr->sq_psn;
}
if (attr_mask & IB_QP_RQ_PSN)
udp_info->epsn = attr->rq_psn;
if (attr_mask & IB_QP_RNR_RETRY)
udp_info->rnr_nak_thresh = attr->rnr_retry;
if (attr_mask & IB_QP_RETRY_CNT)
udp_info->rexmit_thresh = attr->retry_cnt;
ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
if (attr_mask & IB_QP_AV) {
struct irdma_av *av = &iwqp->roce_ah.av;
const struct ib_gid_attr *sgid_attr =
attr->ah_attr.grh.sgid_attr;
u16 vlan_id = VLAN_N_VID;
u32 local_ip[4];
memset(&iwqp->roce_ah, 0, sizeof (iwqp->roce_ah));
if (attr->ah_attr.ah_flags & IB_AH_GRH) {
udp_info->ttl = attr->ah_attr.grh.hop_limit;
udp_info->flow_label = attr->ah_attr.grh.flow_label;
udp_info->tos = attr->ah_attr.grh.traffic_class;
udp_info->src_port =
rdma_get_udp_sport(udp_info->flow_label,
ibqp->qp_num,
roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
if (iwqp->sc_qp.vsi->dscp_mode)
ctx_info->user_pri =
iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
else
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
}
ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
ctx_info->roce_info->mac_addr);
if (ret)
return ret;
ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
ctx_info->user_pri);
if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
return -ENOMEM;
iwqp->sc_qp.user_pri = ctx_info->user_pri;
irdma_qp_add_qos(&iwqp->sc_qp);
if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
vlan_id = 0;
if (vlan_id < VLAN_N_VID) {
udp_info->insert_vlan_tag = true ;
udp_info->vlan_tag = vlan_id |
ctx_info->user_pri << VLAN_PRIO_SHIFT;
} else {
udp_info->insert_vlan_tag = false ;
}
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
av->net_type = rdma_gid_attr_network_type(sgid_attr);
if (av->net_type == RDMA_NETWORK_IPV6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
__be32 *saddr =
av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
udp_info->ipv4 = false ;
irdma_copy_ip_ntohl(local_ip, daddr);
} else if (av->net_type == RDMA_NETWORK_IPV4) {
__be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
__be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
local_ip[0] = ntohl(daddr);
udp_info->ipv4 = true ;
udp_info->dest_ip_addr[0] = 0;
udp_info->dest_ip_addr[1] = 0;
udp_info->dest_ip_addr[2] = 0;
udp_info->dest_ip_addr[3] = local_ip[0];
udp_info->local_ipaddr[0] = 0;
udp_info->local_ipaddr[1] = 0;
udp_info->local_ipaddr[2] = 0;
udp_info->local_ipaddr[3] = ntohl(saddr);
}
udp_info->arp_idx =
irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
attr->ah_attr.roce.dmac);
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ord=%d\n" ,
attr->max_rd_atomic,
dev->hw_attrs.max_hw_ord);
return -EINVAL;
}
if (attr->max_rd_atomic)
roce_info->ord_size = attr->max_rd_atomic;
info.ord_valid = true ;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
ibdev_err(&iwdev->ibdev,
"rd_atomic = %d, above max_hw_ird=%d\n" ,
attr->max_dest_rd_atomic,
dev->hw_attrs.max_hw_ird);
return -EINVAL;
}
if (attr->max_dest_rd_atomic)
roce_info->ird_size = attr->max_dest_rd_atomic;
}
if (attr_mask & IB_QP_ACCESS_FLAGS) {
if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
roce_info->wr_rdresp_en = true ;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
roce_info->wr_rdresp_en = true ;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
roce_info->rd_en = true ;
}
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
ibdev_dbg(&iwdev->ibdev,
"VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n" ,
__builtin_return_address(0), ibqp->qp_num, attr->qp_state,
iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
iwqp->ibqp.qp_type, attr_mask)) {
ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n" ,
iwqp->ibqp.qp_num, iwqp->ibqp_state,
attr->qp_state);
ret = -EINVAL;
goto exit ;
}
info.curr_iwarp_state = iwqp->iwarp_state;
switch (attr->qp_state) {
case IB_QPS_INIT:
if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
ret = -EINVAL;
goto exit ;
}
if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
issue_modify_qp = 1;
}
break ;
case IB_QPS_RTR:
if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
ret = -EINVAL;
goto exit ;
}
info.arp_cache_idx_valid = true ;
info.cq_num_valid = true ;
info.next_iwarp_state = IRDMA_QP_STATE_RTR;
issue_modify_qp = 1;
break ;
case IB_QPS_RTS:
if (iwqp->ibqp_state < IB_QPS_RTR ||
iwqp->ibqp_state == IB_QPS_ERR) {
ret = -EINVAL;
goto exit ;
}
info.arp_cache_idx_valid = true ;
info.cq_num_valid = true ;
info.ord_valid = true ;
info.next_iwarp_state = IRDMA_QP_STATE_RTS;
issue_modify_qp = 1;
if (iwdev->push_mode && udata &&
iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_alloc_push_page(iwqp);
spin_lock_irqsave(&iwqp->lock, flags);
}
break ;
case IB_QPS_SQD:
if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
goto exit ;
if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
ret = -EINVAL;
goto exit ;
}
info.next_iwarp_state = IRDMA_QP_STATE_SQD;
issue_modify_qp = 1;
iwqp->suspend_pending = true ;
break ;
case IB_QPS_SQE:
case IB_QPS_ERR:
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof (ureq), udata->inlen)))
return -EINVAL;
irdma_flush_wqes(iwqp,
(ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
(ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
IRDMA_REFLUSH);
}
return 0;
}
info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
issue_modify_qp = 1;
break ;
default :
ret = -EINVAL;
goto exit ;
}
iwqp->ibqp_state = attr->qp_state;
}
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
spin_unlock_irqrestore(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
if (issue_modify_qp) {
ctx_info->rem_endpoint_idx = udp_info->arp_idx;
if (irdma_hw_modify_qp(iwdev, iwqp, &info, true ))
return -EINVAL;
if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
ret = irdma_wait_for_suspend(iwqp);
if (ret)
return ret;
}
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
iwqp->ibqp_state = attr->qp_state;
}
if (iwqp->ibqp_state > IB_QPS_RTS &&
!iwqp->flush_issued) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
IRDMA_FLUSH_RQ |
IRDMA_FLUSH_WAIT);
iwqp->flush_issued = 1;
} else {
spin_unlock_irqrestore(&iwqp->lock, flags);
}
} else {
iwqp->ibqp_state = attr->qp_state;
}
if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext, ibucontext);
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
uresp.push_valid = 1;
uresp.push_offset = iwqp->sc_qp.push_offset;
}
ret = ib_copy_to_udata(udata, &uresp, min(sizeof (uresp),
udata->outlen));
if (ret) {
irdma_remove_push_mmap_entries(iwqp);
ibdev_dbg(&iwdev->ibdev,
"VERBS: copy_to_udata failed\n" );
return ret;
}
}
}
return 0;
exit :
spin_unlock_irqrestore(&iwqp->lock, flags);
return ret;
}
/**
* irdma_modify_qp - modify qp request
* @ibqp: qp's pointer for modify
* @attr: access attributes
* @attr_mask: state mask
* @udata: user data
*/
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
struct irdma_qp_host_ctx_info *ctx_info;
struct irdma_tcp_offload_info *tcp_info;
struct irdma_iwarp_offload_info *offload_info;
struct irdma_modify_qp_info info = {};
struct irdma_modify_qp_resp uresp = {};
struct irdma_modify_qp_req ureq = {};
u8 issue_modify_qp = 0;
u8 dont_wait = 0;
int err;
unsigned long flags;
if (udata) {
/* udata inlen/outlen can be 0 when supporting legacy libi40iw */
if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
(udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
return -EINVAL;
}
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
ctx_info = &iwqp->ctx_info;
offload_info = &iwqp->iwarp_info;
tcp_info = &iwqp->tcp_info;
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
ibdev_dbg(&iwdev->ibdev,
"VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n" ,
__builtin_return_address(0), ibqp->qp_num, attr->qp_state,
iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
spin_lock_irqsave(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
info.curr_iwarp_state = iwqp->iwarp_state;
switch (attr->qp_state) {
case IB_QPS_INIT:
case IB_QPS_RTR:
if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
err = -EINVAL;
goto exit ;
}
if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
issue_modify_qp = 1;
}
if (iwdev->push_mode && udata &&
iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_alloc_push_page(iwqp);
spin_lock_irqsave(&iwqp->lock, flags);
}
break ;
case IB_QPS_RTS:
if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
!iwqp->cm_id) {
err = -EINVAL;
goto exit ;
}
issue_modify_qp = 1;
iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
iwqp->hte_added = 1;
info.next_iwarp_state = IRDMA_QP_STATE_RTS;
info.tcp_ctx_valid = true ;
info.ord_valid = true ;
info.arp_cache_idx_valid = true ;
info.cq_num_valid = true ;
break ;
case IB_QPS_SQD:
if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
err = 0;
goto exit ;
}
if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
err = 0;
goto exit ;
}
if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
err = -EINVAL;
goto exit ;
}
info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
issue_modify_qp = 1;
break ;
case IB_QPS_SQE:
if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
err = -EINVAL;
goto exit ;
}
info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
issue_modify_qp = 1;
break ;
case IB_QPS_ERR:
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof (ureq), udata->inlen)))
return -EINVAL;
irdma_flush_wqes(iwqp,
(ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
(ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
IRDMA_REFLUSH);
}
return 0;
}
if (iwqp->sc_qp.term_flags) {
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_terminate_del_timer(&iwqp->sc_qp);
spin_lock_irqsave(&iwqp->lock, flags);
}
info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
iwdev->iw_status &&
iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
info.reset_tcp_conn = true ;
else
dont_wait = 1;
issue_modify_qp = 1;
info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
break ;
default :
err = -EINVAL;
goto exit ;
}
iwqp->ibqp_state = attr->qp_state;
}
if (attr_mask & IB_QP_ACCESS_FLAGS) {
ctx_info->iwarp_info_valid = true ;
if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
offload_info->wr_rdresp_en = true ;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
offload_info->wr_rdresp_en = true ;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
offload_info->rd_en = true ;
}
if (ctx_info->iwarp_info_valid) {
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
}
spin_unlock_irqrestore(&iwqp->lock, flags);
if (attr_mask & IB_QP_STATE) {
if (issue_modify_qp) {
ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
if (irdma_hw_modify_qp(iwdev, iwqp, &info, true ))
return -EINVAL;
}
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->iwarp_state == info.curr_iwarp_state) {
iwqp->iwarp_state = info.next_iwarp_state;
iwqp->ibqp_state = attr->qp_state;
}
spin_unlock_irqrestore(&iwqp->lock, flags);
}
if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
if (dont_wait) {
if (iwqp->hw_tcp_state) {
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
iwqp->last_aeq = IRDMA_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
}
irdma_cm_disconn(iwqp);
} else {
int close_timer_started;
spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
if (iwqp->cm_node) {
refcount_inc(&iwqp->cm_node->refcnt);
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
if (iwqp->cm_id && close_timer_started == 1)
irdma_schedule_cm_timer(iwqp->cm_node,
(struct irdma_puda_buf *)iwqp,
IRDMA_TIMER_TYPE_CLOSE, 1, 0);
irdma_rem_ref_cm_node(iwqp->cm_node);
} else {
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
}
}
}
if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
struct irdma_ucontext, ibucontext);
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
&uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
uresp.push_valid = 1;
uresp.push_offset = iwqp->sc_qp.push_offset;
}
err = ib_copy_to_udata(udata, &uresp, min(sizeof (uresp),
udata->outlen));
if (err) {
irdma_remove_push_mmap_entries(iwqp);
ibdev_dbg(&iwdev->ibdev,
"VERBS: copy_to_udata failed\n" );
return err;
}
}
return 0;
exit :
spin_unlock_irqrestore(&iwqp->lock, flags);
return err;
}
/**
* irdma_cq_free_rsrc - free up resources for cq
* @rf: RDMA PCI function
* @iwcq: cq ptr
*/
static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
{
struct irdma_sc_cq *cq = &iwcq->sc_cq;
if (!iwcq->user_mode) {
dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
iwcq->kmem.va, iwcq->kmem.pa);
iwcq->kmem.va = NULL;
dma_free_coherent(rf->sc_dev.hw->device,
iwcq->kmem_shadow.size,
iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
iwcq->kmem_shadow.va = NULL;
}
irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
}
/**
* irdma_free_cqbuf - worker to free a cq buffer
* @work: provides access to the cq buffer to free
*/
static void irdma_free_cqbuf(struct work_struct *work)
{
struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
cq_buf->kmem_buf.va = NULL;
kfree(cq_buf);
}
/**
* irdma_process_resize_list - remove resized cq buffers from the resize_list
* @iwcq: cq which owns the resize_list
* @iwdev: irdma device
* @lcqe_buf: the buffer where the last cqe is received
*/
static int irdma_process_resize_list(struct irdma_cq *iwcq,
struct irdma_device *iwdev,
struct irdma_cq_buf *lcqe_buf)
{
struct list_head *tmp_node, *list_node;
struct irdma_cq_buf *cq_buf;
int cnt = 0;
list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
if (cq_buf == lcqe_buf)
return cnt;
list_del(&cq_buf->list);
queue_work(iwdev->cleanup_wq, &cq_buf->work);
cnt++;
}
return cnt;
}
/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
* @udata: user data
*/
static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct irdma_device *iwdev = to_iwdev(ib_cq->device);
struct irdma_cq *iwcq = to_iwcq(ib_cq);
struct irdma_sc_cq *cq = &iwcq->sc_cq;
struct irdma_sc_dev *dev = cq->dev;
struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
unsigned long flags;
spin_lock_irqsave(&iwcq->lock, flags);
if (!list_empty(&iwcq->cmpl_generated))
irdma_remove_cmpls_list(iwcq);
if (!list_empty(&iwcq->resize_list))
irdma_process_resize_list(iwcq, iwdev, NULL);
spin_unlock_irqrestore(&iwcq->lock, flags);
irdma_cq_rem_ref(ib_cq);
wait_for_completion(&iwcq->free_cq);
irdma_cq_wq_destroy(iwdev->rf, cq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
irdma_sc_cleanup_ceqes(cq, ceq);
spin_unlock_irqrestore(&iwceq->ce_lock, flags);
irdma_cq_free_rsrc(iwdev->rf, iwcq);
return 0;
}
/**
* irdma_resize_cq - resize cq
* @ibcq: cq to be resized
* @entries: desired cq size
* @udata: user data
*/
static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata)
{
#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
struct irdma_cq *iwcq = to_iwcq(ibcq);
struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_modify_cq_info *m_info;
struct irdma_modify_cq_info info = {};
struct irdma_dma_mem kmem_buf;
struct irdma_cq_mr *cqmr_buf;
struct irdma_pbl *iwpbl_buf;
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_cq_buf *cq_buf = NULL;
unsigned long flags;
int ret;
iwdev = to_iwdev(ibcq->device);
rf = iwdev->rf;
if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
IRDMA_FEATURE_CQ_RESIZE))
return -EOPNOTSUPP;
if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
return -EINVAL;
if (entries > rf->max_cqe)
return -EINVAL;
if (!iwcq->user_mode) {
entries++;
if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
}
info.cq_size = max(entries, 4);
if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
return 0;
if (udata) {
struct irdma_resize_cq_req req = {};
struct irdma_ucontext *ucontext =
rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
/* CQ resize not supported with legacy GEN_1 libi40iw */
if (ucontext->legacy_mode)
return -EOPNOTSUPP;
if (ib_copy_from_udata(&req, udata,
min(sizeof (req), udata->inlen)))
return -EINVAL;
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl_buf = irdma_get_pbl((unsigned long )req.user_cq_buffer,
&ucontext->cq_reg_mem_list);
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
if (!iwpbl_buf)
return -ENOMEM;
cqmr_buf = &iwpbl_buf->cq_mr;
if (iwpbl_buf->pbl_allocated) {
info.virtual_map = true ;
info.pbl_chunk_size = 1;
info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
} else {
info.cq_pa = cqmr_buf->cq_pbl.addr;
}
} else {
/* Kmode CQ resize */
int rsize;
rsize = info.cq_size * sizeof (struct irdma_cqe);
kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
kmem_buf.va = dma_alloc_coherent(dev->hw->device,
kmem_buf.size, &kmem_buf.pa,
GFP_KERNEL);
if (!kmem_buf.va)
return -ENOMEM;
info.cq_base = kmem_buf.va;
info.cq_pa = kmem_buf.pa;
cq_buf = kzalloc(sizeof (*cq_buf), GFP_KERNEL);
if (!cq_buf) {
ret = -ENOMEM;
goto error;
}
}
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true );
if (!cqp_request) {
ret = -ENOMEM;
goto error;
}
info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
info.cq_resize = true ;
cqp_info = &cqp_request->info;
m_info = &cqp_info->in.u.cq_modify.info;
memcpy(m_info, &info, sizeof (*m_info));
cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
cqp_info->post_sq = 1;
ret = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
if (ret)
goto error;
spin_lock_irqsave(&iwcq->lock, flags);
if (cq_buf) {
cq_buf->kmem_buf = iwcq->kmem;
cq_buf->hw = dev->hw;
memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof (cq_buf->cq_uk));
INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
list_add_tail(&cq_buf->list, &iwcq->resize_list);
iwcq->kmem = kmem_buf;
}
irdma_sc_cq_resize(&iwcq->sc_cq, &info);
ibcq->cqe = info.cq_size - 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
return 0;
error:
if (!udata) {
dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
kmem_buf.pa);
kmem_buf.va = NULL;
}
kfree(cq_buf);
return ret;
}
static inline int cq_validate_flags(u32 flags, u8 hw_rev)
{
/* GEN1 does not support CQ create flags */
if (hw_rev == IRDMA_GEN_1)
return flags ? -EOPNOTSUPP : 0;
return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
}
/**
* irdma_create_cq - create cq
* @ibcq: CQ allocated
* @attr: attributes for cq
* @attrs: uverbs attribute bundle
*/
static int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs)
{
#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_udata *udata = &attrs->driver_udata;
struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_cq *iwcq = to_iwcq(ibcq);
u32 cq_num = 0;
struct irdma_sc_cq *cq;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cq_init_info info = {};
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
unsigned long flags;
int err_code;
int entries = attr->cqe;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
return -EINVAL;
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
return err_code;
cq = &iwcq->sc_cq;
cq->back_cq = iwcq;
refcount_set(&iwcq->refcnt, 1);
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
iwcq->cq_num = cq_num;
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
info.ceq_id_valid = true ;
info.ceqe_mask = 1;
info.type = IRDMA_CQ_TYPE_IWARP;
info.vsi = &iwdev->vsi;
if (udata) {
struct irdma_ucontext *ucontext;
struct irdma_create_cq_req req = {};
struct irdma_cq_mr *cqmr;
struct irdma_pbl *iwpbl;
struct irdma_pbl *iwpbl_shadow;
struct irdma_cq_mr *cqmr_shadow;
iwcq->user_mode = true ;
ucontext =
rdma_udata_to_drv_context(udata, struct irdma_ucontext,
ibucontext);
if (ib_copy_from_udata(&req, udata,
min(sizeof (req), udata->inlen))) {
err_code = -EFAULT;
goto cq_free_rsrc;
}
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl = irdma_get_pbl((unsigned long )req.user_cq_buf,
&ucontext->cq_reg_mem_list);
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
if (!iwpbl) {
err_code = -EPROTO;
goto cq_free_rsrc;
}
cqmr = &iwpbl->cq_mr;
if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl_shadow = irdma_get_pbl(
(unsigned long )req.user_shadow_area,
&ucontext->cq_reg_mem_list);
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
if (!iwpbl_shadow) {
err_code = -EPROTO;
goto cq_free_rsrc;
}
cqmr_shadow = &iwpbl_shadow->cq_mr;
info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
cqmr->split = true ;
} else {
info.shadow_area_pa = cqmr->shadow;
}
if (iwpbl->pbl_allocated) {
info.virtual_map = true ;
info.pbl_chunk_size = 1;
info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
} else {
info.cq_base_pa = cqmr->cq_pbl.addr;
}
} else {
/* Kmode allocations */
int rsize;
if (entries < 1 || entries > rf->max_cqe) {
err_code = -EINVAL;
goto cq_free_rsrc;
}
entries++;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
ukinfo->cq_size = entries;
rsize = info.cq_uk_init_info.cq_size * sizeof (struct irdma_cqe);
iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
iwcq->kmem.size,
&iwcq->kmem.pa, GFP_KERNEL);
if (!iwcq->kmem.va) {
err_code = -ENOMEM;
goto cq_free_rsrc;
}
iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
64);
iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
iwcq->kmem_shadow.size,
&iwcq->kmem_shadow.pa,
GFP_KERNEL);
if (!iwcq->kmem_shadow.va) {
err_code = -ENOMEM;
goto cq_free_rsrc;
}
info.shadow_area_pa = iwcq->kmem_shadow.pa;
ukinfo->shadow_area = iwcq->kmem_shadow.va;
ukinfo->cq_base = iwcq->kmem.va;
info.cq_base_pa = iwcq->kmem.pa;
}
info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
(u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n" );
err_code = -EPROTO;
goto cq_free_rsrc;
}
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true );
if (!cqp_request) {
err_code = -ENOMEM;
goto cq_free_rsrc;
}
cqp_info = &cqp_request->info;
cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
cqp_info->post_sq = 1;
cqp_info->in.u.cq_create.cq = cq;
cqp_info->in.u.cq_create.check_overflow = true ;
cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
err_code = irdma_handle_cqp_op(rf, cqp_request);
irdma_put_cqp_request(&rf->cqp, cqp_request);
if (err_code)
goto cq_free_rsrc;
if (udata) {
struct irdma_create_cq_resp resp = {};
resp.cq_id = info.cq_uk_init_info.cq_id;
resp.cq_size = info.cq_uk_init_info.cq_size;
if (ib_copy_to_udata(udata, &resp,
min(sizeof (resp), udata->outlen))) {
ibdev_dbg(&iwdev->ibdev,
"VERBS: copy to user data\n" );
err_code = -EPROTO;
goto cq_destroy;
}
}
rf->cq_table[cq_num] = iwcq;
init_completion(&iwcq->free_cq);
return 0;
cq_destroy:
irdma_cq_wq_destroy(rf, cq);
cq_free_rsrc:
irdma_cq_free_rsrc(rf, iwcq);
return err_code;
}
/**
* irdma_get_mr_access - get hw MR access permissions from IB access flags
* @access: IB access flags
*/
static inline u16 irdma_get_mr_access(int access)
{
u16 hw_access = 0;
hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5 C=97 H=92 G=94
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
*© Formatika GbR, Deutschland
2026-04-07