/* * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2009 HNR Consulting. All rights reserved. * Copyright (c) 2014,2018 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. *
*/
module_param_named(send_queue_size, mad_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
/* * Returns a ib_mad_port_private structure or NULL for a device/port * Assumes ib_mad_port_list_lock is being held
*/ staticinlinestruct ib_mad_port_private *
__ib_get_mad_port(struct ib_device *device, u32 port_num)
{ struct ib_mad_port_private *entry;
/* * Wrapper function to return a ib_mad_port_private structure or NULL * for a device/port
*/ staticinlinestruct ib_mad_port_private *
ib_get_mad_port(struct ib_device *device, u32 port_num)
{ struct ib_mad_port_private *entry; unsignedlong flags;
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
dev_dbg_ratelimited(&device->dev, "%s: invalid RMPP Version %u\n",
__func__, rmpp_version); goto error1;
}
/* Validate MAD registration request if supplied */ if (mad_reg_req) { if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
dev_dbg_ratelimited(&device->dev, "%s: invalid Class Version %u\n",
__func__,
mad_reg_req->mgmt_class_version); goto error1;
} if (!recv_handler) {
dev_dbg_ratelimited(&device->dev, "%s: no recv_handler\n", __func__); goto error1;
} if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { /* * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only * one in this range currently allowed
*/ if (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
dev_dbg_ratelimited(&device->dev, "%s: Invalid Mgmt Class 0x%x\n",
__func__, mad_reg_req->mgmt_class); goto error1;
}
} elseif (mad_reg_req->mgmt_class == 0) { /* * Class 0 is reserved in IBA and is used for * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
*/
dev_dbg_ratelimited(&device->dev, "%s: Invalid Mgmt Class 0\n",
__func__); goto error1;
} elseif (is_vendor_class(mad_reg_req->mgmt_class)) { /* * If class is in "new" vendor range, * ensure supplied OUI is not zero
*/ if (!is_vendor_oui(mad_reg_req->oui)) {
dev_dbg_ratelimited(&device->dev, "%s: No OUI specified for class 0x%x\n",
__func__,
mad_reg_req->mgmt_class); goto error1;
}
} /* Make sure class supplied is consistent with RMPP */ if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { if (rmpp_version) {
dev_dbg_ratelimited(&device->dev, "%s: RMPP version for non-RMPP class 0x%x\n",
__func__, mad_reg_req->mgmt_class); goto error1;
}
}
/* Make sure class supplied is consistent with QP type */ if (qp_type == IB_QPT_SMI) { if ((mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
(mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
dev_dbg_ratelimited(&device->dev, "%s: Invalid SM QP type: class 0x%x\n",
__func__, mad_reg_req->mgmt_class); goto error1;
}
} else { if ((mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
dev_dbg_ratelimited(&device->dev, "%s: Invalid GS QP type: class 0x%x\n",
__func__, mad_reg_req->mgmt_class); goto error1;
}
}
} else { /* No registration request supplied */ if (!send_handler) goto error1; if (registration_flags & IB_MAD_USER_RMPP) goto error1;
}
/* Validate device and port */
port_priv = ib_get_mad_port(device, port_num); if (!port_priv) {
dev_dbg_ratelimited(&device->dev, "%s: Invalid port %u\n",
__func__, port_num);
ret = ERR_PTR(-ENODEV); goto error1;
}
/* Verify the QP requested is supported. For example, Ethernet devices * will not have QP0.
*/ if (!port_priv->qp_info[qpn].qp) {
dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
__func__, qpn);
ret = ERR_PTR(-EPROTONOSUPPORT); goto error1;
}
/* Allocate structures */
mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); if (!mad_agent_priv) {
ret = ERR_PTR(-ENOMEM); goto error1;
}
if (mad_reg_req) {
reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); if (!reg_req) {
ret = ERR_PTR(-ENOMEM); goto error3;
}
}
ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); if (ret2) {
ret = ERR_PTR(ret2); goto error4;
}
/* * The mlx4 driver uses the top byte to distinguish which virtual * function generated the MAD, so we must avoid using it.
*/
ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
&ib_mad_client_next, GFP_KERNEL); if (ret2 < 0) {
ret = ERR_PTR(ret2); goto error5;
}
/* * Make sure MAD registration (if supplied) * is non overlapping with any existing ones
*/
spin_lock_irq(&port_priv->reg_lock); if (mad_reg_req) {
mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); if (!is_vendor_class(mgmt_class)) { class = port_priv->version[mad_reg_req->
mgmt_class_version].class; if (class) {
method = class->method_table[mgmt_class]; if (method) { if (method_in_use(&method,
mad_reg_req)) goto error6;
}
}
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
mgmt_class);
} else { /* "New" vendor class range */
vendor = port_priv->version[mad_reg_req->
mgmt_class_version].vendor; if (vendor) {
vclass = vendor_class_index(mgmt_class);
vendor_class = vendor->vendor_class[vclass]; if (vendor_class) { if (is_vendor_method_in_use(
vendor_class,
mad_reg_req)) goto error6;
}
}
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
} if (ret2) {
ret = ERR_PTR(ret2); goto error6;
}
}
spin_unlock_irq(&port_priv->reg_lock);
/* Note that we could still be handling received MADs */
trace_ib_mad_unregister_agent(mad_agent_priv);
/* * Canceling all sends results in dropping received response * MADs, preventing us from queuing additional work
*/
cancel_mads(mad_agent_priv);
port_priv = mad_agent_priv->qp_info->port_priv;
cancel_delayed_work(&mad_agent_priv->timed_work);
/* * Directed route handling starts if the initial LID routed part of * a request or the ending LID routed part of a response is empty. * If we are at the start of the LID routed part, don't update the * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
*/ if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
u32 opa_drslid;
/* Check to post send on QP or process locally */ if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD) goto out;
} else {
trace_ib_mad_handle_out_ib_smi(smp);
/* Check to post send on QP or process locally */ if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD) goto out;
}
local = kmalloc(sizeof *local, GFP_ATOMIC); if (!local) {
ret = -ENOMEM; goto out;
}
local->mad_priv = NULL;
local->recv_mad_agent = NULL;
mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC); if (!mad_priv) {
ret = -ENOMEM;
kfree(local); goto out;
}
/* No GRH for DR SMP */
ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
(conststruct ib_mad *)smp,
(struct ib_mad *)mad_priv->mad, &mad_size,
&out_mad_pkey_index); switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: if (ib_response_mad((conststruct ib_mad_hdr *)mad_priv->mad) &&
mad_agent_priv->agent.recv_handler) {
local->mad_priv = mad_priv;
local->recv_mad_agent = mad_agent_priv; /* * Reference MAD agent until receive * side of local completion handled
*/
refcount_inc(&mad_agent_priv->refcount);
} else
kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
kfree(mad_priv); break; case IB_MAD_RESULT_SUCCESS: /* Treat like an incoming receive MAD */
port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
mad_agent_priv->agent.port_num); if (port_priv) {
memcpy(mad_priv->mad, smp, mad_priv->mad_size);
recv_mad_agent = find_mad_agent(port_priv,
(conststruct ib_mad_hdr *)mad_priv->mad);
} if (!port_priv || !recv_mad_agent) { /* * No receiving agent so drop packet and * generate send completion.
*/
kfree(mad_priv); break;
}
local->mad_priv = mad_priv;
local->recv_mad_agent = recv_mad_agent; break; default:
kfree(mad_priv);
kfree(local);
ret = -EINVAL; goto out;
}
local->mad_send_wr = mad_send_wr; if (opa) {
local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
local->return_wc_byte_len = mad_size;
} /* Reference MAD agent until send side of local completion handled */
refcount_inc(&mad_agent_priv->refcount); /* Queue local completion to local list */
spin_lock_irqsave(&mad_agent_priv->lock, flags);
list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
queue_work(mad_agent_priv->qp_info->port_priv->wq,
&mad_agent_priv->local_work);
ret = 1;
out: return ret;
}
staticint get_pad_size(int hdr_len, int data_len, size_t mad_size)
{ int seg_size, pad;
seg_size = mad_size - hdr_len; if (data_len && seg_size) {
pad = seg_size - data_len % seg_size; return pad == seg_size ? 0 : pad;
} else return seg_size;
}
/* * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated * with the registered client
*/ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, struct ib_mad_send_buf **bad_send_buf)
{ struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_buf *next_send_buf; struct ib_mad_send_wr_private *mad_send_wr; unsignedlong flags; int ret = -EINVAL;
/* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) {
mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
send_buf);
mad_agent_priv = mad_send_wr->mad_agent_priv;
ret = ib_mad_enforce_security(mad_agent_priv,
mad_send_wr->send_wr.pkey_index); if (ret) goto error;
if (!send_buf->mad_agent->send_handler) {
ret = -EINVAL; goto error;
}
if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { if (mad_agent_priv->agent.rmpp_version) {
ret = -EINVAL; goto error;
}
}
/* * Save pointer to next work request to post in case the * current one completes, and the user modifies the work * request associated with the completion
*/
next_send_buf = send_buf->next;
mad_send_wr->send_wr.ah = send_buf->ah;
/* * Check to see if there are any methods still in use
*/ staticint check_method_table(struct ib_mad_mgmt_method_table *method)
{ int i;
for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i]) return 1; return 0;
}
/* * Check to see if there are any method tables for this class still in use
*/ staticint check_class_table(struct ib_mad_mgmt_class_table *class)
{ int i;
for (i = 0; i < MAX_MGMT_CLASS; i++) if (class->method_table[i]) return 1; return 0;
}
staticint check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
{ int i;
for (i = 0; i < MAX_MGMT_OUI; i++) if (vendor_class->method_table[i]) return 1; return 0;
}
staticint find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, constchar *oui)
{ int i;
for (i = 0; i < MAX_MGMT_OUI; i++) /* Is there matching OUI for this vendor class ? */ if (!memcmp(vendor_class->oui[i], oui, 3)) return i;
return -1;
}
staticint check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
{ int i;
for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++) if (vendor->vendor_class[i]) return 1;
return 0;
}
staticvoid remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, struct ib_mad_agent_private *agent)
{ int i;
/* Remove any methods for this mad agent */ for (i = 0; i < IB_MGMT_MAX_METHODS; i++) if (method->agent[i] == agent)
method->agent[i] = NULL;
}
port_priv = agent_priv->qp_info->port_priv; class = &port_priv->version[mad_reg_req->mgmt_class_version].class; if (!*class) { /* Allocate management class table for "new" class version */
*class = kzalloc(sizeof **class, GFP_ATOMIC); if (!*class) {
ret = -ENOMEM; goto error1;
}
/* Allocate method table for this management class */
method = &(*class)->method_table[mgmt_class]; if ((ret = allocate_method_table(method))) goto error2;
} else {
method = &(*class)->method_table[mgmt_class]; if (!*method) { /* Allocate method table for this management class */ if ((ret = allocate_method_table(method))) goto error1;
}
}
/* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error3;
/* Finally, add in methods being registered */
for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
(*method)->agent[i] = agent_priv;
return 0;
error3: /* Remove any methods for this mad agent */
remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */
kfree(*method);
*method = NULL;
}
ret = -EINVAL; goto error1;
error2:
kfree(*class);
*class = NULL;
error1: return ret;
}
/* "New" vendor (with OUI) class */
vclass = vendor_class_index(mad_reg_req->mgmt_class);
port_priv = agent_priv->qp_info->port_priv;
vendor_table = &port_priv->version[
mad_reg_req->mgmt_class_version].vendor; if (!*vendor_table) { /* Allocate mgmt vendor class table for "new" class version */
vendor = kzalloc(sizeof *vendor, GFP_ATOMIC); if (!vendor) goto error1;
*vendor_table = vendor;
} if (!(*vendor_table)->vendor_class[vclass]) { /* Allocate table for this management vendor class */
vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC); if (!vendor_class) goto error2;
(*vendor_table)->vendor_class[vclass] = vendor_class;
} for (i = 0; i < MAX_MGMT_OUI; i++) { /* Is there matching OUI for this vendor class ? */ if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
mad_reg_req->oui, 3)) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i]; if (!*method) goto error3; goto check_in_use;
}
} for (i = 0; i < MAX_MGMT_OUI; i++) { /* OUI slot available ? */ if (!is_vendor_oui((*vendor_table)->vendor_class[
vclass]->oui[i])) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i]; /* Allocate method table for this OUI */ if (!*method) {
ret = allocate_method_table(method); if (ret) goto error3;
}
memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
mad_reg_req->oui, 3); goto check_in_use;
}
}
dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); goto error3;
check_in_use: /* Now, make sure methods are not already in use */ if (method_in_use(method, mad_reg_req)) goto error4;
/* Finally, add in methods being registered */
for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
(*method)->agent[i] = agent_priv;
return 0;
error4: /* Remove any methods for this mad agent */
remove_methods_mad_agent(*method, agent_priv); /* Now, check to see if there are any methods in use */ if (!check_method_table(*method)) { /* If not, release management method table */
kfree(*method);
*method = NULL;
}
ret = -EINVAL;
error3: if (vendor_class) {
(*vendor_table)->vendor_class[vclass] = NULL;
kfree(vendor_class);
}
error2: if (vendor) {
*vendor_table = NULL;
kfree(vendor);
}
error1: return ret;
}
/* * Was MAD registration request supplied * with original registration ?
*/ if (!agent_priv->reg_req) goto out;
port_priv = agent_priv->qp_info->port_priv;
mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); class = port_priv->version[
agent_priv->reg_req->mgmt_class_version].class; if (!class) goto vendor_check;
method = class->method_table[mgmt_class]; if (method) { /* Remove any methods for this mad agent */
remove_methods_mad_agent(method, agent_priv); /* Now, check to see if there are any methods still in use */ if (!check_method_table(method)) { /* If not, release management method table */
kfree(method);
class->method_table[mgmt_class] = NULL; /* Any management classes left ? */ if (!check_class_table(class)) { /* If not, release management class table */
kfree(class);
port_priv->version[
agent_priv->reg_req->
mgmt_class_version].class = NULL;
}
}
}
vendor_check: if (!is_vendor_class(mgmt_class)) goto out;
/* normalize mgmt_class to vendor range 2 */
mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
vendor = port_priv->version[
agent_priv->reg_req->mgmt_class_version].vendor;
if (!vendor) goto out;
vendor_class = vendor->vendor_class[mgmt_class]; if (vendor_class) {
index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); if (index < 0) goto out;
method = vendor_class->method_table[index]; if (method) { /* Remove any methods for this mad agent */
remove_methods_mad_agent(method, agent_priv); /* * Now, check to see if there are * any methods still in use
*/ if (!check_method_table(method)) { /* If not, release management method table */
kfree(method);
vendor_class->method_table[index] = NULL;
memset(vendor_class->oui[index], 0, 3); /* Any OUIs left ? */ if (!check_vendor_class(vendor_class)) { /* If not, release vendor class table */
kfree(vendor_class);
vendor->vendor_class[mgmt_class] = NULL; /* Any other vendor classes left ? */ if (!check_vendor_table(vendor)) {
kfree(vendor);
port_priv->version[
agent_priv->reg_req->
mgmt_class_version].
vendor = NULL;
}
}
}
}
}
/* * Routing is based on high 32 bits of transaction ID * of MAD.
*/
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
rcu_read_lock();
mad_agent = xa_load(&ib_mad_clients, hi_tid); if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
mad_agent = NULL;
rcu_read_unlock();
} else { struct ib_mad_mgmt_class_table *class; struct ib_mad_mgmt_method_table *method; struct ib_mad_mgmt_vendor_class_table *vendor; struct ib_mad_mgmt_vendor_class *vendor_class; conststruct ib_vendor_mad *vendor_mad; int index;
spin_lock_irqsave(&port_priv->reg_lock, flags); /* * Routing is based on version, class, and method * For "newer" vendor MADs, also based on OUI
*/ if (mad_hdr->class_version >= MAX_MGMT_VERSION) goto out; if (!is_vendor_class(mad_hdr->mgmt_class)) { class = port_priv->version[
mad_hdr->class_version].class; if (!class) goto out; if (convert_mgmt_class(mad_hdr->mgmt_class) >=
ARRAY_SIZE(class->method_table)) goto out;
method = class->method_table[convert_mgmt_class(
mad_hdr->mgmt_class)]; if (method)
mad_agent = method->agent[mad_hdr->method &
~IB_MGMT_METHOD_RESP];
} else {
vendor = port_priv->version[
mad_hdr->class_version].vendor; if (!vendor) goto out;
vendor_class = vendor->vendor_class[vendor_class_index(
mad_hdr->mgmt_class)]; if (!vendor_class) goto out; /* Find matching OUI */
vendor_mad = (conststruct ib_vendor_mad *)mad_hdr;
index = find_vendor_oui(vendor_class, vendor_mad->oui); if (index == -1) goto out;
method = vendor_class->method_table[index]; if (method) {
mad_agent = method->agent[mad_hdr->method &
~IB_MGMT_METHOD_RESP];
}
} if (mad_agent)
refcount_inc(&mad_agent->refcount);
out:
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
}
if (mad_agent && !mad_agent->agent.recv_handler) {
dev_notice(&port_priv->device->dev, "No receive handler for client %p on port %u\n",
&mad_agent->agent, port_priv->port_num);
deref_mad_agent(mad_agent);
mad_agent = NULL;
}
/* Make sure MAD base version is understood */ if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
(!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
pr_err("MAD received with unsupported base version %u %s\n",
mad_hdr->base_version, opa ? "(opa)" : ""); goto out;
}
/* Filter SMI packets sent to other than QP0 */ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { if (qp_num == 0)
valid = 1;
} else { /* CM attributes other than ClassPortInfo only use Send method */ if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
(mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
(mad_hdr->method != IB_MGMT_METHOD_SEND)) goto out; /* Filter GSI packets sent to QP0 */ if (qp_num != 0)
valid = 1;
}
if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ return 0;
if (rdma_query_ah(wr->send_buf.ah, &attr)) /* Assume not equal, to avoid false positives. */ return 0;
has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH); if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) /* one has GID, other does not. Assume different */ return 0;
if (!send_resp && rcv_resp) { /* is request/response. */ if (!has_grh) { if (ib_get_cached_lmc(device, port_num, &lmc)) return 0; return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
rwc->wc->dlid_path_bits) &
((1 << lmc) - 1)));
} else { conststruct ib_global_route *grh =
rdma_ah_read_grh(&attr);
list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { if ((wr->tid == mad_hdr->tid) &&
rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs.
*/
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
}
list_for_each_entry(wr, &mad_agent_priv->backlog_list, agent_list) { if ((wr->tid == mad_hdr->tid) &&
rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs.
*/
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc))) return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
}
/* * It's possible to receive the response before we've * been notified that the send has completed
*/
list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
wr->tid == mad_hdr->tid &&
wr->timeout &&
rcv_has_same_class(wr, wc) && /* * Don't check GID for direct routed MADs. * These might have permissive LIDs.
*/
(is_direct(mad_hdr->mgmt_class) ||
rcv_has_same_gid(mad_agent_priv, wr, wc))) /* Verify request has not been canceled */ return (wr->state != IB_MAD_STATE_CANCELED) ? wr : NULL;
} return NULL;
}
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
ret = ib_mad_enforce_security(mad_agent_priv,
mad_recv_wc->wc->pkey_index); if (ret) {
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv); return;
}
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
mad_recv_wc); if (!mad_recv_wc) {
deref_mad_agent(mad_agent_priv); return;
}
}
/* Complete corresponding request */ if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) {
spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
&& ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
&& (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
& IB_MGMT_RMPP_FLAG_ACTIVE)) { /* user rmpp is in effect * and this is an active RMPP MAD
*/
mad_agent_priv->agent.recv_handler(
&mad_agent_priv->agent, NULL,
mad_recv_wc);
deref_mad_agent(mad_agent_priv);
} else { /* not user rmpp, revert to normal behavior and * drop the mad
*/
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv); return;
}
} else {
ib_mark_mad_done(mad_send_wr);
is_mad_done = (mad_send_wr->state == IB_MAD_STATE_DONE);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
/* Defined behavior is to complete response before request */
mad_agent_priv->agent.recv_handler(
&mad_agent_priv->agent,
&mad_send_wr->send_buf,
mad_recv_wc);
deref_mad_agent(mad_agent_priv);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.