/* * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
static u16 in_to_opcode(void *in)
{ return MLX5_GET(mbox_in, in, opcode);
}
static u16 in_to_uid(void *in)
{ return MLX5_GET(mbox_in, in, uid);
}
/* Returns true for opcodes that might be triggered very frequently and throttle * the command interface. Limit their command slots usage.
*/ staticbool mlx5_cmd_is_throttle_opcode(u16 op)
{ switch (op) { case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_SYNC_CRYPTO: returntrue;
} returnfalse;
}
do {
own = READ_ONCE(ent->lay->status_own); if (!(own & CMD_OWNER_HW)) {
ent->ret = 0; return;
}
cond_resched(); if (mlx5_cmd_is_down(dev)) {
ent->ret = -ENXIO; return;
}
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
}
staticint verify_signature(struct mlx5_cmd_work_ent *ent)
{ struct mlx5_cmd_mailbox *next = ent->out->next; int n = mlx5_calc_cmd_blocks(ent->out); int err;
u8 sig; int i = 0;
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); if (sig != 0xff) return -EHWPOISON;
for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf); if (err) return -EHWPOISON;
next = next->next;
}
return 0;
}
staticvoid dump_buf(void *buf, int size, int data_only, int offset, int idx)
{
__be32 *p = buf; int i;
for (i = 0; i < size; i += 16) {
pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset,
be32_to_cpu(p[0]), be32_to_cpu(p[1]),
be32_to_cpu(p[2]), be32_to_cpu(p[3]));
p += 4;
offset += 16;
} if (!data_only)
pr_debug("\n");
}
switch (op) { case MLX5_CMD_OP_TEARDOWN_HCA: case MLX5_CMD_OP_DISABLE_HCA: case MLX5_CMD_OP_MANAGE_PAGES: case MLX5_CMD_OP_DESTROY_MKEY: case MLX5_CMD_OP_DESTROY_EQ: case MLX5_CMD_OP_DESTROY_CQ: case MLX5_CMD_OP_DESTROY_QP: case MLX5_CMD_OP_DESTROY_PSV: case MLX5_CMD_OP_DESTROY_SRQ: case MLX5_CMD_OP_DESTROY_XRC_SRQ: case MLX5_CMD_OP_DESTROY_XRQ: case MLX5_CMD_OP_DESTROY_DCT: case MLX5_CMD_OP_DEALLOC_Q_COUNTER: case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: case MLX5_CMD_OP_DEALLOC_PD: case MLX5_CMD_OP_DEALLOC_UAR: case MLX5_CMD_OP_DETACH_FROM_MCG: case MLX5_CMD_OP_DEALLOC_XRCD: case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: case MLX5_CMD_OP_DESTROY_LAG: case MLX5_CMD_OP_DESTROY_VPORT_LAG: case MLX5_CMD_OP_DESTROY_TIR: case MLX5_CMD_OP_DESTROY_SQ: case MLX5_CMD_OP_DESTROY_RQ: case MLX5_CMD_OP_DESTROY_RMP: case MLX5_CMD_OP_DESTROY_TIS: case MLX5_CMD_OP_DESTROY_RQT: case MLX5_CMD_OP_DESTROY_FLOW_TABLE: case MLX5_CMD_OP_DESTROY_FLOW_GROUP: case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: case MLX5_CMD_OP_2ERR_QP: case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_FPGA_DESTROY_QP: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: case MLX5_CMD_OP_DEALLOC_SF: case MLX5_CMD_OP_DESTROY_UCTX: case MLX5_CMD_OP_DESTROY_UMEM: case MLX5_CMD_OP_MODIFY_RQT: return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_ADAPTER: case MLX5_CMD_OP_INIT_HCA: case MLX5_CMD_OP_ENABLE_HCA: case MLX5_CMD_OP_QUERY_PAGES: case MLX5_CMD_OP_SET_HCA_CAP: case MLX5_CMD_OP_QUERY_ISSI: case MLX5_CMD_OP_SET_ISSI: case MLX5_CMD_OP_CREATE_MKEY: case MLX5_CMD_OP_QUERY_MKEY: case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: case MLX5_CMD_OP_CREATE_EQ: case MLX5_CMD_OP_QUERY_EQ: case MLX5_CMD_OP_GEN_EQE: case MLX5_CMD_OP_CREATE_CQ: case MLX5_CMD_OP_QUERY_CQ: case MLX5_CMD_OP_MODIFY_CQ: case MLX5_CMD_OP_CREATE_QP: case MLX5_CMD_OP_RST2INIT_QP: case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: case MLX5_CMD_OP_QUERY_QP: case MLX5_CMD_OP_SQD_RTS_QP: case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_CREATE_PSV: case MLX5_CMD_OP_CREATE_SRQ: case MLX5_CMD_OP_QUERY_SRQ: case MLX5_CMD_OP_ARM_RQ: case MLX5_CMD_OP_CREATE_XRC_SRQ: case MLX5_CMD_OP_QUERY_XRC_SRQ: case MLX5_CMD_OP_ARM_XRC_SRQ: case MLX5_CMD_OP_CREATE_XRQ: case MLX5_CMD_OP_QUERY_XRQ: case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_CREATE_DCT: case MLX5_CMD_OP_DRAIN_DCT: case MLX5_CMD_OP_QUERY_DCT: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_QUERY_VPORT_STATE: case MLX5_CMD_OP_MODIFY_VPORT_STATE: case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: case MLX5_CMD_OP_SET_ROCE_ADDRESS: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: case MLX5_CMD_OP_QUERY_VNIC_ENV: case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_SET_MONITOR_COUNTER: case MLX5_CMD_OP_ARM_MONITOR_COUNTER: case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_UAR: case MLX5_CMD_OP_CONFIG_INT_MODERATION: case MLX5_CMD_OP_ACCESS_REG: case MLX5_CMD_OP_ATTACH_TO_MCG: case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: case MLX5_CMD_OP_MAD_IFC: case MLX5_CMD_OP_QUERY_MAD_DEMUX: case MLX5_CMD_OP_SET_MAD_DEMUX: case MLX5_CMD_OP_NOP: case MLX5_CMD_OP_ALLOC_XRCD: case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_QUERY_CONG_STATUS: case MLX5_CMD_OP_MODIFY_CONG_STATUS: case MLX5_CMD_OP_QUERY_CONG_PARAMS: case MLX5_CMD_OP_MODIFY_CONG_PARAMS: case MLX5_CMD_OP_QUERY_CONG_STATISTICS: case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: case MLX5_CMD_OP_CREATE_LAG: case MLX5_CMD_OP_MODIFY_LAG: case MLX5_CMD_OP_QUERY_LAG: case MLX5_CMD_OP_CREATE_VPORT_LAG: case MLX5_CMD_OP_CREATE_TIR: case MLX5_CMD_OP_MODIFY_TIR: case MLX5_CMD_OP_QUERY_TIR: case MLX5_CMD_OP_CREATE_SQ: case MLX5_CMD_OP_MODIFY_SQ: case MLX5_CMD_OP_QUERY_SQ: case MLX5_CMD_OP_CREATE_RQ: case MLX5_CMD_OP_MODIFY_RQ: case MLX5_CMD_OP_QUERY_RQ: case MLX5_CMD_OP_CREATE_RMP: case MLX5_CMD_OP_MODIFY_RMP: case MLX5_CMD_OP_QUERY_RMP: case MLX5_CMD_OP_CREATE_TIS: case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_CREATE_RQT: case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_FPGA_CREATE_QP: case MLX5_CMD_OP_FPGA_MODIFY_QP: case MLX5_CMD_OP_FPGA_QUERY_QP: case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_UCTX: case MLX5_CMD_OP_CREATE_UMEM: case MLX5_CMD_OP_ALLOC_MEMIC: case MLX5_CMD_OP_MODIFY_XRQ: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: case MLX5_CMD_OP_QUERY_VHCA_STATE: case MLX5_CMD_OP_MODIFY_VHCA_STATE: case MLX5_CMD_OP_ALLOC_SF: case MLX5_CMD_OP_SUSPEND_VHCA: case MLX5_CMD_OP_RESUME_VHCA: case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE: case MLX5_CMD_OP_SAVE_VHCA_STATE: case MLX5_CMD_OP_LOAD_VHCA_STATE: case MLX5_CMD_OP_SYNC_CRYPTO: case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND; return -ENOLINK; default:
mlx5_core_err(dev, "Unknown FW command (%d)\n", op); return -EINVAL;
}
}
staticconstchar *cmd_status_str(u8 status)
{ switch (status) { case MLX5_CMD_STAT_OK: return"OK"; case MLX5_CMD_STAT_INT_ERR: return"internal error"; case MLX5_CMD_STAT_BAD_OP_ERR: return"bad operation"; case MLX5_CMD_STAT_BAD_PARAM_ERR: return"bad parameter"; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return"bad system state"; case MLX5_CMD_STAT_BAD_RES_ERR: return"bad resource"; case MLX5_CMD_STAT_RES_BUSY: return"resource busy"; case MLX5_CMD_STAT_NOT_READY: return"FW not ready"; case MLX5_CMD_STAT_LIM_ERR: return"limits exceeded"; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return"bad resource state"; case MLX5_CMD_STAT_IX_ERR: return"bad index"; case MLX5_CMD_STAT_NO_RES_ERR: return"no resources"; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return"bad input length"; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return"bad output length"; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return"bad QP state"; case MLX5_CMD_STAT_BAD_PKT_ERR: return"bad packet (discarded)"; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return"bad size too many outstanding CQEs"; default: return"unknown status";
}
}
staticint cmd_status_to_err(u8 status)
{ switch (status) { case MLX5_CMD_STAT_OK: return 0; case MLX5_CMD_STAT_INT_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; case MLX5_CMD_STAT_NOT_READY: return -EAGAIN; case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_IX_ERR: return -EINVAL; case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; default: return -EIO;
}
}
ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
ent->idx, mlx5_command_str(ent->op), ent->op);
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
out:
cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */
}
if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
cmd_ent_get(ent); /* for the _real_ FW event on completion */ /* Skip sending command to fw if internal error */ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
ent->ret = -ENXIO;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); return;
}
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); /* if not in polling don't use ent after this point */ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */
rmb();
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
staticint deliv_status_to_err(u8 status)
{ switch (status) { case MLX5_CMD_DELIVERY_STAT_OK: case MLX5_DRIVER_STATUS_ABORTED: return 0; case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: case MLX5_CMD_DELIVERY_STAT_TOK_ERR: return -EBADR; case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: return -EFAULT; /* Bad address */ case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: return -ENOMSG; case MLX5_CMD_DELIVERY_STAT_FW_ERR: return -EIO; default: return -EINVAL;
}
}
staticconstchar *deliv_status_to_str(u8 status)
{ switch (status) { case MLX5_CMD_DELIVERY_STAT_OK: return"no errors"; case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: return"signature error"; case MLX5_CMD_DELIVERY_STAT_TOK_ERR: return"token error"; case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: return"bad block number"; case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: return"output pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: return"input pointer not aligned to block size"; case MLX5_CMD_DELIVERY_STAT_FW_ERR: return"firmware internal error"; case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: return"command input length error"; case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: return"command output length error"; case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: return"reserved fields not cleared"; case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: return"bad command descriptor type"; default: return"unknown status code";
}
}
/* Re-wait on the ent->done after executing the recovery flow. If the * recovery flow (or any other recovery flow running simultaneously) * has recovered an EQE, it should cause the entry to be completed by * the command interface.
*/ if (wait_for_completion_timeout(&ent->done, timeout)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
mlx5_command_str(ent->op), ent->op); return;
}
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
mlx5_command_str(ent->op), ent->op);
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(ent->op), ent->op);
} elseif (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(ent->op), ent->op);
} elseif (err == -EBUSY) {
mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
return err;
}
/* Notes: * 1. Callback functions may not sleep * 2. page queue commands do not support asynchrous completion * * return value in case (!callback): * ret < 0 : Command execution couldn't be submitted by driver * ret > 0 : Command execution couldn't be performed by firmware * ret == 0: Command was executed by FW, Caller must check FW outbox status. * * return value in case (callback): * ret < 0 : Command execution couldn't be submitted by driver * ret == 0: Command will be submitted to FW for execution * and the callback will be called for further status updates
*/ staticint mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback, void *context, int page_queue,
u8 token, bool force_polling)
{ struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_stats *stats;
u8 status = 0; int err = 0;
s64 ds;
if (callback && page_queue) return -EINVAL;
ent = cmd_alloc_ent(cmd, in, out, uout, uout_size,
callback, context, page_queue); if (IS_ERR(ent)) return PTR_ERR(ent);
/* put for this ent is when consumed, depending on the use case * 1) (!callback) blocking flow: by caller after wait_func completes * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
*/
ent->token = token;
ent->polling = force_polling;
init_completion(&ent->handling);
init_completion(&ent->slotted); if (!callback)
init_completion(&ent->done);
/* there can be at most 32 command queues */
vector = vec & 0xffffffff; for (i = 0; i < (1 << cmd->vars.log_sz); i++) { if (test_bit(i, &vector)) {
ent = cmd->ent_arr[i];
/* if we already completed the command, ignore it */ if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
&ent->state)) { /* only real completion can free the cmd slot */ if (!forced) {
mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
ent->idx);
cmd_ent_put(ent);
} continue;
}
if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work))
cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
!opcode_allowed(cmd, ent->op))
cmd_ent_put(ent);
/* final consumer is done, release ent */
cmd_ent_put(ent);
callback(err, context);
} else { /* release wait_func() so mlx5_cmd_invoke() * can make the final ent_put()
*/
complete(&ent->done);
}
}
}
}
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK; if (!vector) goto no_trig;
bitmask = vector; /* we must increment the allocated entries refcount before triggering the completions * to guarantee pending commands will not get freed in the meanwhile. * For that reason, it also has to be done inside the alloc_lock.
*/
for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
cmd_ent_get(cmd->ent_arr[i]);
vector |= MLX5_TRIGGERED_CMD_COMP;
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
for (i = 0; i < dev->profile.num_cmd_caches; i++) {
ch = &cmd->cache[i]; if (in_size > ch->max_inbox_size) continue;
spin_lock_irq(&ch->lock); if (list_empty(&ch->head)) {
spin_unlock_irq(&ch->lock); continue;
}
msg = list_entry(ch->head.next, typeof(*msg), list); /* For cached lists, we must explicitly state what is * the real size
*/
msg->len = in_size;
list_del(&msg->list);
spin_unlock_irq(&ch->lock); break;
}
/* Notes: * 1. Callback functions may not sleep * 2. Page queue commands do not support asynchrous completion
*/ staticint cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context, bool force_polling)
{ struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in); bool throttle_locked = false; bool unpriv_locked = false;
u16 uid = in_to_uid(in); int pages_queue;
gfp_t gfp;
u8 token; int err;
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) return -ENXIO;
if (!callback) { /* The semaphore is already held for callback commands. It was * acquired in mlx5_cmd_exec_cb()
*/ if (uid && mlx5_has_privileged_uid(dev)) { if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
unpriv_locked = true;
down(&dev->cmd.vars.unprivileged_sem);
}
} elseif (mlx5_cmd_is_throttle_opcode(opcode)) {
throttle_locked = true;
down(&dev->cmd.vars.throttle_sem);
}
}
/** * mlx5_cmd_exec - Executes a fw command, wait for completion * * @dev: mlx5 core device * @in: inbox mlx5_ifc command buffer * @in_size: inbox buffer size * @out: outbox mlx5_ifc buffer * @out_size: outbox size * * @return: 0 if no error, FW command execution was successful * and outbox status is ok.
*/ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
{ int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
/** * mlx5_cmd_exec_polling - Executes a fw command, poll for completion * Needed for driver force teardown, when command completion EQ * will not be available to complete the command * * @dev: mlx5 core device * @in: inbox mlx5_ifc command buffer * @in_size: inbox buffer size * @out: outbox mlx5_ifc buffer * @out_size: outbox size * * @return: 0 if no error, FW command execution was successful * and outbox status is ok.
*/ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
{ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
u16 opcode = in_to_opcode(in);
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, struct mlx5_async_ctx *ctx)
{
ctx->dev = dev; /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
atomic_set(&ctx->num_inflight, 1);
init_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
/** * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx * @ctx: The ctx to clean * * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The * caller must ensure that mlx5_cmd_exec_cb() is not called during or after * the call mlx5_cleanup_async_ctx().
*/ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
{ if (!atomic_dec_and_test(&ctx->num_inflight))
wait_for_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
ctx = work->ctx;
dev = ctx->dev;
throttle_locked = work->throttle_locked;
unpriv_locked = work->unpriv_locked;
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work); /* Can't access "work" from this point on. It could have been freed in * the callback.
*/ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem); if (unpriv_locked)
up(&dev->cmd.vars.unprivileged_sem); if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work)
{ struct mlx5_core_dev *dev = ctx->dev;
u16 uid; int ret;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO;
if (uid && mlx5_has_privileged_uid(dev)) { if (!mlx5_cmd_is_privileged_uid(dev, uid)) { if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
ret = -EBUSY; goto dec_num_inflight;
}
work->unpriv_locked = true;
}
} elseif (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) { if (down_trylock(&dev->cmd.vars.throttle_sem)) {
ret = -EBUSY; goto dec_num_inflight;
}
work->throttle_locked = true;
}
ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false); if (ret) goto sem_up;
return 0;
sem_up: if (work->throttle_locked)
up(&dev->cmd.vars.throttle_sem); if (work->unpriv_locked)
up(&dev->cmd.vars.unprivileged_sem);
dec_num_inflight: if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
MLX5_SET(allow_other_vhca_access_in,
in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
MLX5_SET(allow_other_vhca_access_in,
in, object_type_to_be_accessed, attr->obj_type);
MLX5_SET(allow_other_vhca_access_in,
in, object_id_to_be_accessed, attr->obj_id);
key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
memcpy(key, attr->access_key, sizeof(attr->access_key));
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.