/* * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2004 Infinicon Corporation. All rights reserved. * Copyright (c) 2004 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{ switch (rate) { case IB_RATE_2_5_GBPS: return 1; case IB_RATE_5_GBPS: return 2; case IB_RATE_10_GBPS: return 4; case IB_RATE_20_GBPS: return 8; case IB_RATE_30_GBPS: return 12; case IB_RATE_40_GBPS: return 16; case IB_RATE_60_GBPS: return 24; case IB_RATE_80_GBPS: return 32; case IB_RATE_120_GBPS: return 48; case IB_RATE_14_GBPS: return 6; case IB_RATE_56_GBPS: return 22; case IB_RATE_112_GBPS: return 45; case IB_RATE_168_GBPS: return 67; case IB_RATE_25_GBPS: return 10; case IB_RATE_100_GBPS: return 40; case IB_RATE_200_GBPS: return 80; case IB_RATE_300_GBPS: return 120; case IB_RATE_28_GBPS: return 11; case IB_RATE_50_GBPS: return 20; case IB_RATE_400_GBPS: return 160; case IB_RATE_600_GBPS: return 240; case IB_RATE_800_GBPS: return 320; default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mult);
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{ switch (mult) { case 1: return IB_RATE_2_5_GBPS; case 2: return IB_RATE_5_GBPS; case 4: return IB_RATE_10_GBPS; case 8: return IB_RATE_20_GBPS; case 12: return IB_RATE_30_GBPS; case 16: return IB_RATE_40_GBPS; case 24: return IB_RATE_60_GBPS; case 32: return IB_RATE_80_GBPS; case 48: return IB_RATE_120_GBPS; case 6: return IB_RATE_14_GBPS; case 22: return IB_RATE_56_GBPS; case 45: return IB_RATE_112_GBPS; case 67: return IB_RATE_168_GBPS; case 10: return IB_RATE_25_GBPS; case 40: return IB_RATE_100_GBPS; case 80: return IB_RATE_200_GBPS; case 120: return IB_RATE_300_GBPS; case 11: return IB_RATE_28_GBPS; case 20: return IB_RATE_50_GBPS; case 160: return IB_RATE_400_GBPS; case 240: return IB_RATE_600_GBPS; case 320: return IB_RATE_800_GBPS; default: return IB_RATE_PORT_CURRENT;
}
}
EXPORT_SYMBOL(mult_to_ib_rate);
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
{ switch (rate) { case IB_RATE_2_5_GBPS: return 2500; case IB_RATE_5_GBPS: return 5000; case IB_RATE_10_GBPS: return 10000; case IB_RATE_20_GBPS: return 20000; case IB_RATE_30_GBPS: return 30000; case IB_RATE_40_GBPS: return 40000; case IB_RATE_60_GBPS: return 60000; case IB_RATE_80_GBPS: return 80000; case IB_RATE_120_GBPS: return 120000; case IB_RATE_14_GBPS: return 14062; case IB_RATE_56_GBPS: return 56250; case IB_RATE_112_GBPS: return 112500; case IB_RATE_168_GBPS: return 168750; case IB_RATE_25_GBPS: return 25781; case IB_RATE_100_GBPS: return 103125; case IB_RATE_200_GBPS: return 206250; case IB_RATE_300_GBPS: return 309375; case IB_RATE_28_GBPS: return 28125; case IB_RATE_50_GBPS: return 53125; case IB_RATE_400_GBPS: return 425000; case IB_RATE_600_GBPS: return 637500; case IB_RATE_800_GBPS: return 850000; default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mbps);
/** * __ib_alloc_pd - Allocates an unused protection domain. * @device: The device on which to allocate the protection domain. * @flags: protection domain flags * @caller: caller's build-time module name * * A protection domain object provides an association between QPs, shared * receive queues, address handles, memory regions, and memory windows. * * Every PD has a local_dma_lkey which can be used as the lkey value for local * memory operations.
*/ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsignedint flags, constchar *caller)
{ struct ib_pd *pd; int mr_access_flags = 0; int ret;
pd = rdma_zalloc_drv_obj(device, ib_pd); if (!pd) return ERR_PTR(-ENOMEM);
if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
pd->local_dma_lkey = pd->__internal_mr->lkey;
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
pd->unsafe_global_rkey = pd->__internal_mr->rkey;
}
return pd;
}
EXPORT_SYMBOL(__ib_alloc_pd);
/** * ib_dealloc_pd_user - Deallocates a protection domain. * @pd: The protection domain to deallocate. * @udata: Valid user data or NULL for kernel object * * It is an error to call this function while any resources in the pd still * exist. The caller is responsible to synchronously destroy them and * guarantee no new allocations will happen.
*/ int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
{ int ret;
if (pd->__internal_mr) {
ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
WARN_ON(ret);
pd->__internal_mr = NULL;
}
ret = pd->device->ops.dealloc_pd(pd, udata); if (ret) return ret;
/** * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination. * @dest: Pointer to destination ah_attr. Contents of the destination * pointer is assumed to be invalid and attribute are overwritten. * @src: Pointer to source ah_attr.
*/ void rdma_copy_ah_attr(struct rdma_ah_attr *dest, conststruct rdma_ah_attr *src)
{
*dest = *src; if (dest->grh.sgid_attr)
rdma_hold_gid_attr(dest->grh.sgid_attr);
}
EXPORT_SYMBOL(rdma_copy_ah_attr);
/** * rdma_replace_ah_attr - Replace valid ah_attr with new one. * @old: Pointer to existing ah_attr which needs to be replaced. * old is assumed to be valid or zero'd * @new: Pointer to the new ah_attr. * * rdma_replace_ah_attr() first releases any reference in the old ah_attr if * old the ah_attr is valid; after that it copies the new attribute and holds * the reference to the replaced ah_attr.
*/ void rdma_replace_ah_attr(struct rdma_ah_attr *old, conststruct rdma_ah_attr *new)
{
rdma_destroy_ah_attr(old);
*old = *new; if (old->grh.sgid_attr)
rdma_hold_gid_attr(old->grh.sgid_attr);
}
EXPORT_SYMBOL(rdma_replace_ah_attr);
/** * rdma_move_ah_attr - Move ah_attr pointed by source to destination. * @dest: Pointer to destination ah_attr to copy to. * dest is assumed to be valid or zero'd * @src: Pointer to the new ah_attr. * * rdma_move_ah_attr() first releases any reference in the destination ah_attr * if it is valid. This also transfers ownership of internal references from * src to dest, making src invalid in the process. No new reference of the src * ah_attr is taken.
*/ void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
{
rdma_destroy_ah_attr(dest);
*dest = *src;
src->grh.sgid_attr = NULL;
}
EXPORT_SYMBOL(rdma_move_ah_attr);
/* * Validate that the rdma_ah_attr is valid for the device before passing it * off to the driver.
*/ staticint rdma_check_ah_attr(struct ib_device *device, struct rdma_ah_attr *ah_attr)
{ if (!rdma_is_port_valid(device, ah_attr->port_num)) return -EINVAL;
if (ah_attr->grh.sgid_attr) { /* * Make sure the passed sgid_attr is consistent with the * parameters
*/ if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
ah_attr->grh.sgid_attr->port_num != ah_attr->port_num) return -EINVAL;
} return 0;
}
/* * If the ah requires a GRH then ensure that sgid_attr pointer is filled in. * On success the caller is responsible to call rdma_unfill_sgid_attr().
*/ staticint rdma_fill_sgid_attr(struct ib_device *device, struct rdma_ah_attr *ah_attr, conststruct ib_gid_attr **old_sgid_attr)
{ conststruct ib_gid_attr *sgid_attr; struct ib_global_route *grh; int ret;
*old_sgid_attr = ah_attr->grh.sgid_attr;
ret = rdma_check_ah_attr(device, ah_attr); if (ret) return ret;
if (!(ah_attr->ah_flags & IB_AH_GRH)) return 0;
grh = rdma_ah_retrieve_grh(ah_attr); if (grh->sgid_attr) return 0;
sgid_attr =
rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index); if (IS_ERR(sgid_attr)) return PTR_ERR(sgid_attr);
/* Move ownerhip of the kref into the ah_attr */
grh->sgid_attr = sgid_attr; return 0;
}
staticvoid rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr, conststruct ib_gid_attr *old_sgid_attr)
{ /* * Fill didn't change anything, the caller retains ownership of * whatever it passed
*/ if (ah_attr->grh.sgid_attr == old_sgid_attr) return;
/* * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller * doesn't see any change in the rdma_ah_attr. If we get here * old_sgid_attr is NULL.
*/
rdma_destroy_ah_attr(ah_attr);
}
if (udata)
ret = device->ops.create_user_ah(ah, &init_attr, udata); else
ret = device->ops.create_ah(ah, &init_attr, NULL); if (ret) { if (ah->sgid_attr)
rdma_put_gid_attr(ah->sgid_attr);
kfree(ah); return ERR_PTR(ret);
}
atomic_inc(&pd->usecnt); return ah;
}
/** * rdma_create_ah - Creates an address handle for the * given address vector. * @pd: The protection domain associated with the address handle. * @ah_attr: The attributes of the address vector. * @flags: Create address handle flags (see enum rdma_create_ah_flags). * * It returns 0 on success and returns appropriate error code on error. * The address handle is used to reference a local or global destination * in all UD QP post sends.
*/ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags)
{ conststruct ib_gid_attr *old_sgid_attr; struct net_device *slave; struct ib_ah *ah; int ret;
/** * rdma_create_user_ah - Creates an address handle for the * given address vector. * It resolves destination mac address for ah attribute of RoCE type. * @pd: The protection domain associated with the address handle. * @ah_attr: The attributes of the address vector. * @udata: pointer to user's input output buffer information need by * provider driver. * * It returns 0 on success and returns appropriate error code on error. * The address handle is used to reference a local or global destination * in all UD QP post sends.
*/ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, struct ib_udata *udata)
{ conststruct ib_gid_attr *old_sgid_attr; struct ib_ah *ah; int err;
err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); if (err) return ERR_PTR(err);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
err = ib_resolve_eth_dmac(pd->device, ah_attr); if (err) {
ah = ERR_PTR(err); goto out;
}
}
/* If it's IPv6, the version must be 6, otherwise, the first * 20 bytes (before the IPv4 header) are garbled.
*/ if (ip6h->version != 6) return (ip4h->version == 4) ? 4 : 0; /* version may be 6 or 4 because the first 20 bytes could be garbled */
/* RoCE v2 requires no options, thus header length * must be 5 words
*/ if (ip4h->ihl != 5) return 6;
/* Verify checksum. * We can't write on scattered buffers so we need to copy to * temp buffer.
*/
memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
ip4h_checked.check = 0;
ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5); /* if IPv4 header checksum is OK, believe it */ if (ip4h->check == ip4h_checked.check) return 4; return 6;
}
EXPORT_SYMBOL(ib_get_rdma_header_version);
/* Resolve destination mac address and hop limit for unicast destination * GID entry, considering the source GID entry as well. * ah_attribute must have valid port_num, sgid_index.
*/ staticint ib_resolve_unicast_gid_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr)
{ struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr); conststruct ib_gid_attr *sgid_attr = grh->sgid_attr; int hop_limit = 0xff; int ret = 0;
/* If destination is link local and source GID is RoCEv1, * IP stack is not used.
*/ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
ah_attr->roce.dmac); return ret;
}
ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
ah_attr->roce.dmac,
sgid_attr, &hop_limit);
grh->hop_limit = hop_limit; return ret;
}
/* * This function initializes address handle attributes from the incoming packet. * Incoming packet has dgid of the receiver node on which this code is * getting executed and, sgid contains the GID of the sender. * * When resolving mac address of destination, the arrived dgid is used * as sgid and, sgid is used as dgid because sgid contains destinations * GID whom to respond to. * * On success the caller is responsible to call rdma_destroy_ah_attr on the * attr.
*/ int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, conststruct ib_wc *wc, conststruct ib_grh *grh, struct rdma_ah_attr *ah_attr)
{
u32 flow_class; int ret; enum rdma_network_type net_type = RDMA_NETWORK_IB; enum ib_gid_type gid_type = IB_GID_TYPE_IB; conststruct ib_gid_attr *sgid_attr; int hoplimit = 0xff; union ib_gid dgid; union ib_gid sgid;
/** * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership * of the reference * * @attr: Pointer to AH attribute structure * @dgid: Destination GID * @flow_label: Flow label * @hop_limit: Hop limit * @traffic_class: traffic class * @sgid_attr: Pointer to SGID attribute * * This takes ownership of the sgid_attr reference. The caller must ensure * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after * calling this function.
*/ void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
u32 flow_label, u8 hop_limit, u8 traffic_class, conststruct ib_gid_attr *sgid_attr)
{
rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
traffic_class);
attr->grh.sgid_attr = sgid_attr;
}
EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
/** * rdma_destroy_ah_attr - Release reference to SGID attribute of * ah attribute. * @ah_attr: Pointer to ah attribute * * Release reference to the SGID attribute of the ah attribute if it is * non NULL. It is safe to call this multiple times, and safe to call it on * a zero initialized ah_attr.
*/ void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
{ if (ah_attr->grh.sgid_attr) {
rdma_put_gid_attr(ah_attr->grh.sgid_attr);
ah_attr->grh.sgid_attr = NULL;
}
}
EXPORT_SYMBOL(rdma_destroy_ah_attr);
/** * ib_create_srq_user - Creates a SRQ associated with the specified protection * domain. * @pd: The protection domain associated with the SRQ. * @srq_init_attr: A list of initial attributes required to create the * SRQ. If SRQ creation succeeds, then the attributes are updated to * the actual capabilities of the created SRQ. * @uobject: uobject pointer if this is not a kernel SRQ * @udata: udata pointer if this is not a kernel SRQ * * srq_attr->max_wr and srq_attr->max_sge are read the determine the * requested size of the SRQ, and set to the actual values allocated * on return. If ib_create_srq() succeeds, then max_wr and max_sge * will always be at least as large as the requested values.
*/ struct ib_srq *ib_create_srq_user(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr, struct ib_usrq_object *uobject, struct ib_udata *udata)
{ struct ib_srq *srq; int ret;
srq = rdma_zalloc_drv_obj(pd->device, ib_srq); if (!srq) return ERR_PTR(-ENOMEM);
if (event->event == IB_EVENT_QP_LAST_WQE_REACHED)
complete(&qp->srq_completion); if (qp->registered_event_handler)
qp->registered_event_handler(event, qp->qp_context);
}
/* * TODO: The mlx4 internally overwrites send_cq and recv_cq. * Unfortunately, it is not an easy task to fix that driver.
*/
qp->send_cq = attr->send_cq;
qp->recv_cq = attr->recv_cq;
ret = ib_create_qp_security(qp, dev); if (ret) goto err_security;
/** * ib_create_qp_user - Creates a QP associated with the specified protection * domain. * @dev: IB device * @pd: The protection domain associated with the QP. * @attr: A list of initial attributes required to create the * QP. If QP creation succeeds, then the attributes are updated to * the actual capabilities of the created QP. * @udata: User data * @uobj: uverbs obect * @caller: caller's build-time module name
*/ struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd, struct ib_qp_init_attr *attr, struct ib_udata *udata, struct ib_uqp_object *uobj, constchar *caller)
{ struct ib_qp *qp, *xrc_qp;
void ib_qp_usecnt_inc(struct ib_qp *qp)
{ if (qp->pd)
atomic_inc(&qp->pd->usecnt); if (qp->send_cq)
atomic_inc(&qp->send_cq->usecnt); if (qp->recv_cq)
atomic_inc(&qp->recv_cq->usecnt); if (qp->srq)
atomic_inc(&qp->srq->usecnt); if (qp->rwq_ind_tbl)
atomic_inc(&qp->rwq_ind_tbl->usecnt);
}
EXPORT_SYMBOL(ib_qp_usecnt_inc);
void ib_qp_usecnt_dec(struct ib_qp *qp)
{ if (qp->rwq_ind_tbl)
atomic_dec(&qp->rwq_ind_tbl->usecnt); if (qp->srq)
atomic_dec(&qp->srq->usecnt); if (qp->recv_cq)
atomic_dec(&qp->recv_cq->usecnt); if (qp->send_cq)
atomic_dec(&qp->send_cq->usecnt); if (qp->pd)
atomic_dec(&qp->pd->usecnt);
}
EXPORT_SYMBOL(ib_qp_usecnt_dec);
/* * If the callers is using the RDMA API calculate the resources * needed for the RDMA READ/WRITE operations. * * Note that these callers need to pass in a port number.
*/ if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr); if (ret) goto err;
}
/* * Note: all hw drivers guarantee that max_send_sge is lower than * the device RDMA WRITE SGE limit but not all hw drivers ensure that * max_send_sge <= max_sge_rd.
*/
qp->max_write_sge = qp_init_attr->cap.max_send_sge;
qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
device->attrs.max_sge_rd); if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
qp->integrity_en = true;
if (mask & ~(req_param | opt_param | IB_QP_STATE)) returnfalse;
returntrue;
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
/** * ib_resolve_eth_dmac - Resolve destination mac address * @device: Device to consider * @ah_attr: address handle attribute which describes the * source and destination parameters * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It * returns 0 on success or appropriate error code. It initializes the * necessary ah_attr fields when call is successful.
*/ staticint ib_resolve_eth_dmac(struct ib_device *device, struct rdma_ah_attr *ah_attr)
{ int ret = 0;
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
__be32 addr = 0;
/* * IB core internal function to perform QP attributes modification.
*/ staticint _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata)
{
u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; conststruct ib_gid_attr *old_sgid_attr_av; conststruct ib_gid_attr *old_sgid_attr_alt_av; int ret;
attr->xmit_slave = NULL; if (attr_mask & IB_QP_AV) {
ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
&old_sgid_attr_av); if (ret) return ret;
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
is_qp_type_connected(qp)) { struct net_device *slave;
/* * If the user provided the qp_attr then we have to * resolve it. Kerne users have to provide already * resolved rdma_ah_attr's.
*/ if (udata) {
ret = ib_resolve_eth_dmac(qp->device,
&attr->ah_attr); if (ret) goto out_av;
}
slave = rdma_lag_get_ah_roce_slave(qp->device,
&attr->ah_attr,
GFP_KERNEL); if (IS_ERR(slave)) {
ret = PTR_ERR(slave); goto out_av;
}
attr->xmit_slave = slave;
}
} if (attr_mask & IB_QP_ALT_PATH) { /* * FIXME: This does not track the migration state, so if the * user loads a new alternate path after the HW has migrated * from primary->alternate we will keep the wrong * references. This is OK for IB because the reference * counting does not serve any functional purpose.
*/
ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
&old_sgid_attr_alt_av); if (ret) goto out_av;
/* * Today the core code can only handle alternate paths and APM * for IB. Ban them in roce mode.
*/ if (!(rdma_protocol_ib(qp->device,
attr->alt_ah_attr.port_num) &&
rdma_protocol_ib(qp->device, port))) {
ret = -EINVAL; goto out;
}
}
if (rdma_ib_or_roce(qp->device, port)) { if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
dev_warn(&qp->device->dev, "%s rq_psn overflow, masking to 24 bits\n",
__func__);
attr->rq_psn &= 0xffffff;
}
/* * Bind this qp to a counter automatically based on the rdma counter * rules. This only set in RST2INIT with port specified
*/ if (!qp->counter && (attr_mask & IB_QP_PORT) &&
((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
rdma_counter_bind_qp_auto(qp, attr->port_num);
ret = ib_security_modify_qp(qp, attr, attr_mask, udata); if (ret) goto out;
if (attr_mask & IB_QP_PORT)
qp->port = attr->port_num; if (attr_mask & IB_QP_AV)
qp->av_sgid_attr =
rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr); if (attr_mask & IB_QP_ALT_PATH)
qp->alt_path_sgid_attr = rdma_update_sgid_attr(
&attr->alt_ah_attr, qp->alt_path_sgid_attr);
out: if (attr_mask & IB_QP_ALT_PATH)
rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
out_av: if (attr_mask & IB_QP_AV) {
rdma_lag_put_ah_roce_slave(attr->xmit_slave);
rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
} return ret;
}
/** * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. * @ib_qp: The QP to modify. * @attr: On input, specifies the QP attributes to modify. On output, * the current values of selected QP attributes are returned. * @attr_mask: A bit-mask used to specify which attributes of the QP * are being modified. * @udata: pointer to user's input output buffer information * are being modified. * It returns 0 on success and returns appropriate error code on error.
*/ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata)
{ return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
if (access_flags & IB_ACCESS_ON_DEMAND) { if (!(pd->device->attrs.kernel_cap_flags &
IBK_ON_DEMAND_PAGING)) {
pr_debug("ODP support not available\n"); return ERR_PTR(-EINVAL);
}
}
trace_mr_dereg(mr);
rdma_restrack_del(&mr->res);
ret = mr->device->ops.dereg_mr(mr, udata); if (!ret) {
atomic_dec(&pd->usecnt); if (dm)
atomic_dec(&dm->usecnt); if (dmah)
atomic_dec(&dmah->usecnt);
kfree(sig_attrs);
}
return ret;
}
EXPORT_SYMBOL(ib_dereg_mr_user);
/** * ib_alloc_mr() - Allocates a memory region * @pd: protection domain associated with the region * @mr_type: memory region type * @max_num_sg: maximum sg entries available for registration. * * Notes: * Memory registeration page/sg lists must not exceed max_num_sg. * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed * max_num_sg * used_page_size. *
*/ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg)
{ struct ib_mr *mr;
if (!pd->device->ops.alloc_mr) {
mr = ERR_PTR(-EOPNOTSUPP); goto out;
}
/** * ib_alloc_mr_integrity() - Allocates an integrity memory region * @pd: protection domain associated with the region * @max_num_data_sg: maximum data sg entries available for registration * @max_num_meta_sg: maximum metadata sg entries available for * registration * * Notes: * Memory registration page/sg lists must not exceed max_num_sg, * also the integrity page/sg lists must not exceed max_num_meta_sg. *
*/ struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
u32 max_num_data_sg,
u32 max_num_meta_sg)
{ struct ib_mr *mr; struct ib_sig_attrs *sig_attrs;
/* If QP state >= init, it is assigned to a port and we can check this * port only.
*/ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { if (attr.qp_state >= IB_QPS_INIT) { if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
IB_LINK_LAYER_INFINIBAND) returntrue; goto lid_check;
}
}
/* Can't get a quick answer, iterate over all ports */
rdma_for_each_port(qp->device, port) if (rdma_port_get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND)
num_eth_ports++;
/* If we have at lease one Ethernet port, RoCE annex declares that * multicast LID should be ignored. We can't tell at this step if the * QP belongs to an IB or Ethernet port.
*/ if (num_eth_ports) returntrue;
/* If all the ports are IB, we can check according to IB spec. */
lid_check: return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
lid == be16_to_cpu(IB_LID_PERMISSIVE));
}
int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{ int ret;
if (!qp->device->ops.attach_mcast)
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.