/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* * Stamp a SQ WQE so that it is invalid if prefetched by marking the * first four bytes of every 64 byte chunk with 0xffffffff, except for * the very first chunk of the WQE.
*/ staticvoid stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
{
__be32 *wqe; int i; int s; void *buf; struct mlx4_wqe_ctrl_seg *ctrl;
buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
ctrl = (struct mlx4_wqe_ctrl_seg *)buf;
s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4; for (i = 64; i < s; i += 64) {
wqe = buf + i;
*wqe = cpu_to_be32(0xffffffff);
}
}
staticvoid mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
{
pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
type, qp->qpn);
}
staticint send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
{ /* * UD WQEs must have a datagram segment. * RC and UC WQEs might have a remote address segment. * MLX WQEs need two extra inline data segments (for the UD * header and space for the ICRC).
*/ switch (type) { case MLX4_IB_QPT_UD: returnsizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) +
((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_PROXY_GSI: returnsizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg) + 64; case MLX4_IB_QPT_TUN_SMI_OWNER: case MLX4_IB_QPT_TUN_GSI: returnsizeof (struct mlx4_wqe_ctrl_seg) + sizeof (struct mlx4_wqe_datagram_seg);
/* * For MLX transport we need 2 extra S/G entries: * one for the header and one for the checksum at the end
*/ if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI ||
type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) &&
cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) return -EINVAL;
if (s > dev->dev->caps.max_sq_desc_sz) return -EINVAL;
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
/* * We need to leave 2 KB + 1 WR of headroom in the SQ to * allow HW to prefetch.
*/
qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
qp->sq_spare_wqes);
staticint qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
{ int i; for (i = 0; i < dev->caps.num_ports; i++) { if (qpn == dev->caps.spec_qps[i].qp0_proxy) return !!dev->caps.spec_qps[i].qp0_qkey;
} return 0;
}
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
rss_ctx->flags = MLX4_RSS_IPV4;
} elseif ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n"); return (-EOPNOTSUPP);
}
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
rss_ctx->flags |= MLX4_RSS_IPV6;
} elseif ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n"); return (-EOPNOTSUPP);
}
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) { if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
pr_debug("RX Hash fields_mask for UDP is not supported\n"); return (-EOPNOTSUPP);
}
if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_UDP_IPV4; if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_UDP_IPV6; if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP);
}
} elseif ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n"); return (-EOPNOTSUPP);
}
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_TCP_IPV4; if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_TCP_IPV6; if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); return (-EOPNOTSUPP);
}
} elseif ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); return (-EOPNOTSUPP);
}
if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) { if (dev->dev->caps.tunnel_offload_mode ==
MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { /* * Hash according to inner headers if exist, otherwise * according to outer headers.
*/
rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
} else {
pr_debug("RSS Hash for inner headers isn't supported\n"); return (-EOPNOTSUPP);
}
}
return 0;
}
staticint create_qp_rss(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx4_ib_create_qp_rss *ucmd, struct mlx4_ib_qp *qp)
{ int qpn; int err;
if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved))) return -EOPNOTSUPP;
if (ucmd.comp_mask || ucmd.reserved1) return -EOPNOTSUPP;
if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
udata->inlen - sizeof(ucmd))) {
pr_debug("inlen is not supported\n"); return -EOPNOTSUPP;
}
if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
pr_debug("RSS QP with unsupported QP type %d\n",
init_attr->qp_type); return -EOPNOTSUPP;
}
if (init_attr->create_flags) {
pr_debug("RSS QP doesn't support create flags\n"); return -EOPNOTSUPP;
}
if (init_attr->send_cq || init_attr->cap.max_send_wr) {
pr_debug("RSS QP with unsupported send attributes\n"); return -EOPNOTSUPP;
}
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); if (err) return err;
qp->ibqp.qp_num = qp->mqp.qpn; return 0;
}
/* * This function allocates a WQN from a range which is consecutive and aligned * to its size. In case the range is full, then it creates a new range and * allocates WQN from it. The new range will be used for following allocations.
*/ staticint mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context, struct mlx4_ib_qp *qp, int range_size, int *wqn)
{ struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); struct mlx4_wqn_range *range; int err = 0;
mutex_lock(&context->wqn_ranges_mutex);
range = list_first_entry_or_null(&context->wqn_ranges_list, struct mlx4_wqn_range, list);
if (!range || (range->refcount == range->size) || range->dirty) {
range = kzalloc(sizeof(*range), GFP_KERNEL); if (!range) {
err = -ENOMEM; goto out;
}
range->size = range_size;
list_add(&range->list, &context->wqn_ranges_list);
} elseif (range_size != 1) { /* * Requesting a new range (>1) when last range is still open, is * not valid.
*/
err = -EINVAL; goto out;
}
range->refcount--; if (!range->refcount) {
mlx4_qp_release_range(dev->dev, range->base_wqn,
range->size);
list_del(&range->list);
kfree(range);
} elseif (dirty_release) { /* * A range which one of its WQNs is destroyed, won't be able to be * reused for further WQN allocations. * The next created WQ will allocate a new range.
*/
range->dirty = true;
}
mutex_unlock(&context->wqn_ranges_mutex);
}
staticint create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx4_ib_qp *qp)
{ struct mlx4_ib_dev *dev = to_mdev(pd->device); int qpn; int err; struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx4_ib_ucontext, ibucontext); struct mlx4_ib_cq *mcq; unsignedlong flags; int range_size; struct mlx4_ib_create_wq wq;
size_t copy_len; int shift; int n;
if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
pr_debug("WQN range size must be equal or smaller than %d\n",
dev->dev->caps.max_rss_tbl_sz);
err = -EOPNOTSUPP; goto err;
}
range_size = 1 << wq.log_range_size;
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
qp->flags |= MLX4_IB_QP_SCATTER_FCS;
err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); if (err) goto err_wrid;
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn;
/* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends.
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
qp->mqp.event = mlx4_ib_wq_event;
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq)); /* Maintain device to QPs access, needed for further handling * via reset flow
*/
list_add_tail(&qp->qps_list, &dev->qp_list); /* Maintain CQ to QPs access, needed for further handling * via reset flow
*/
mcq = to_mcq(init_attr->send_cq);
list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
mcq = to_mcq(init_attr->recv_cq);
list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); return 0;
if (sqpn) { if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { if (alloc_proxy_bufs(pd->device, qp)) {
err = -ENOMEM; goto err_wrid;
}
}
} else { /* Raw packet QPNs may not have bits 6,7 set in their qp_num; * otherwise, the WQE BlueFlame setup flow wrongly causes
* VLAN insertion. */ if (init_attr->qp_type == IB_QPT_RAW_PACKET)
err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
(init_attr->cap.max_send_wr ?
MLX4_RESERVE_ETH_BF_QP : 0) |
(init_attr->cap.max_recv_wr ?
MLX4_RESERVE_A0_QP : 0),
qp->mqp.usage); else if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); else
err = mlx4_qp_reserve_range(dev->dev, 1, 1,
&qpn, 0, qp->mqp.usage); if (err) goto err_proxy;
}
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn;
if (init_attr->qp_type == IB_QPT_XRC_TGT)
qp->mqp.qpn |= (1 << 23);
/* * Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends.
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
qp->mqp.event = mlx4_ib_qp_event;
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq)); /* Maintain device to QPs access, needed for further handling * via reset flow
*/
list_add_tail(&qp->qps_list, &dev->qp_list); /* Maintain CQ to QPs access, needed for further handling * via reset flow
*/
mcq = to_mcq(init_attr->send_cq);
list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
mcq = to_mcq(init_attr->recv_cq);
list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); return 0;
err_qpn: if (!sqpn) { if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1); else
mlx4_qp_release_range(dev->dev, qpn, 1);
}
err_proxy: if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
free_proxy_bufs(pd->device, qp);
err_wrid: if (udata) { if (qp_has_rq(init_attr))
mlx4_ib_db_unmap_user(context, &qp->db);
} else {
kvfree(qp->sq.wrid);
kvfree(qp->rq.wrid);
}
err_mtt:
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
err_buf: if (!qp->umem)
mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
ib_umem_release(qp->umem);
err_db: if (!udata && qp_has_rq(init_attr))
mlx4_db_free(dev->dev, &qp->db);
err:
kfree(qp->sqp); return err;
}
staticenum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
{ switch (state) { case IB_QPS_RESET: return MLX4_QP_STATE_RST; case IB_QPS_INIT: return MLX4_QP_STATE_INIT; case IB_QPS_RTR: return MLX4_QP_STATE_RTR; case IB_QPS_RTS: return MLX4_QP_STATE_RTS; case IB_QPS_SQD: return MLX4_QP_STATE_SQD; case IB_QPS_SQE: return MLX4_QP_STATE_SQER; case IB_QPS_ERR: return MLX4_QP_STATE_ERR; default: return -1;
}
}
/* del from lists under both locks above to protect reset flow paths */
list_del(&qp->qps_list);
list_del(&qp->cq_send_list);
list_del(&qp->cq_recv_list); if (!udata) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); if (send_cq != recv_cq)
__mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
}
if (init_attr->rwq_ind_tbl) return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
/* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs.
*/ if (init_attr->create_flags & ~(MLX4_IB_QP_LSO |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK |
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
MLX4_IB_QP_CREATE_ROCE_V2_GSI)) return -EOPNOTSUPP;
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { if (init_attr->qp_type != IB_QPT_UD) return -EINVAL;
}
if (init_attr->create_flags) { if (udata && init_attr->create_flags & ~(sup_u_create_flags)) return -EINVAL;
switch (init_attr->qp_type) { case IB_QPT_XRC_TGT:
pd = to_mxrcd(init_attr->xrcd)->pd;
xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
fallthrough; case IB_QPT_XRC_INI: if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) return -ENOSYS;
init_attr->recv_cq = init_attr->send_cq;
fallthrough; case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: case IB_QPT_UD:
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
err = create_qp_common(pd, init_attr, udata, 0, qp); if (err) return err;
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn; break; case IB_QPT_SMI: case IB_QPT_GSI:
{ int sqpn;
if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) { int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
1, 1, &sqpn, 0,
MLX4_RES_USAGE_DRIVER);
if (init_attr->create_flags &
(MLX4_IB_SRIOV_SQP | MLX4_IB_SRIOV_TUNNEL_QP)) /* Internal QP created with ib_create_qp */
rdma_restrack_no_track(&qp->ibqp.res);
if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { struct mlx4_ib_sqp *sqp = mqp->sqp;
if (sqp->roce_v2_gsi)
ib_destroy_qp(sqp->roce_v2_gsi);
}
return _mlx4_ib_destroy_qp(qp, udata);
}
staticint to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
{ switch (type) { case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC; case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC; case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD; case MLX4_IB_QPT_XRC_INI: case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; case MLX4_IB_QPT_SMI: case MLX4_IB_QPT_GSI: case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
case MLX4_IB_QPT_PROXY_SMI_OWNER: case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ?
MLX4_QP_ST_MLX : -1); case MLX4_IB_QPT_PROXY_SMI: case MLX4_IB_QPT_TUN_SMI: case MLX4_IB_QPT_PROXY_GSI: case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ?
MLX4_QP_ST_UD : -1); default: return -1;
}
}
if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) { conststruct ib_global_route *grh = rdma_ah_read_grh(ah); int real_sgid_index =
mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr);
if (real_sgid_index < 0) return real_sgid_index; if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
real_sgid_index, dev->dev->caps.gid_table_len[port] - 1); return -1;
}
path->feup |= MLX4_FEUP_FORCE_ETH_UP; if (vlan_tag < 0x1000) { if (smac_info->vid < 0x1000) { /* both valid vlan ids */ if (smac_info->vid != vlan_tag) { /* different VIDs. unreg old and reg new */
err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); if (err) return err;
smac_info->candidate_vid = vlan_tag;
smac_info->candidate_vlan_index = vidx;
smac_info->candidate_vlan_port = port;
smac_info->update_vid = 1;
path->vlan_index = vidx;
} else {
path->vlan_index = smac_info->vlan_index;
}
} else { /* no current vlan tag in qp */
err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); if (err) return err;
smac_info->candidate_vid = vlan_tag;
smac_info->candidate_vlan_index = vidx;
smac_info->candidate_vlan_port = port;
smac_info->update_vid = 1;
path->vlan_index = vidx;
}
path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
path->fl = 1 << 6;
} else { /* have current vlan tag. unregister it at modify-qp success */ if (smac_info->vid < 0x1000) {
smac_info->candidate_vid = 0xFFFF;
smac_info->update_vid = 1;
}
}
/* get smac_index for RoCE use. * If no smac was yet assigned, register one. * If one was already assigned, but the new mac differs, * unregister the old one and register the new one.
*/ if ((!smac_info->smac && !smac_info->smac_port) ||
smac_info->smac != smac) { /* register candidate now, unreg if needed, after success */
smac_index = mlx4_register_mac(dev->dev, port, smac); if (smac_index >= 0) {
smac_info->candidate_smac_index = smac_index;
smac_info->candidate_smac = smac;
smac_info->candidate_smac_port = port;
} else { return -EINVAL;
}
} else {
smac_index = smac_info->smac_index;
}
memcpy(path->dmac, ah->roce.dmac, 6);
path->ackto = MLX4_IB_LINK_TYPE_ETH; /* put MAC table smac index for IBoE */
path->grh_mylmc = (u8) (smac_index) | 0x80;
} else {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2);
}
/* * Go over all RSS QP's childes (WQs) and apply their HW state according to * their logic state if the RSS QP is the first RSS QP associated for the WQ.
*/ staticint bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num, struct ib_udata *udata)
{ int err = 0; int i;
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { struct ib_wq *ibwq = ind_tbl->ind_tbl[i]; struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
mutex_lock(&wq->mutex);
/* Mlx4_ib restrictions: * WQ's is associated to a port according to the RSS QP it is * associates to. * In case the WQ is associated to a different port by another * RSS QP, return a failure.
*/ if ((wq->rss_usecnt > 0) && (wq->port != port_num)) {
err = -EINVAL;
mutex_unlock(&wq->mutex); break;
}
wq->port = port_num; if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata); if (err) {
mutex_unlock(&wq->mutex); break;
}
}
wq->rss_usecnt++;
/* APM is not supported under RoCE */ if (attr_mask & IB_QP_ALT_PATH &&
rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
IB_LINK_LAYER_ETHERNET) return -ENOTSUPP;
context = kzalloc(sizeof *context, GFP_KERNEL); if (!context) return -ENOMEM;
if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */ if (qp->rq.wqe_cnt)
context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
context->rq_size_stride |= qp->rq.wqe_shift - 4;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.