/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
enum { /* command completed successfully: */
CMD_STAT_OK = 0x00, /* Internal error (such as a bus error) occurred while processing command: */
CMD_STAT_INTERNAL_ERR = 0x01, /* Operation/command not supported or opcode modifier not supported: */
CMD_STAT_BAD_OP = 0x02, /* Parameter not supported or parameter out of range: */
CMD_STAT_BAD_PARAM = 0x03, /* System not enabled or bad system state: */
CMD_STAT_BAD_SYS_STATE = 0x04, /* Attempt to access reserved or unallocaterd resource: */
CMD_STAT_BAD_RESOURCE = 0x05, /* Requested resource is currently executing a command, or is otherwise busy: */
CMD_STAT_RESOURCE_BUSY = 0x06, /* Required capability exceeds device limits: */
CMD_STAT_EXCEED_LIM = 0x08, /* Resource is not in the appropriate state or ownership: */
CMD_STAT_BAD_RES_STATE = 0x09, /* Index out of range: */
CMD_STAT_BAD_INDEX = 0x0a, /* FW image corrupted: */
CMD_STAT_BAD_NVMEM = 0x0b, /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
CMD_STAT_ICM_ERROR = 0x0c, /* Attempt to modify a QP/EE which is not in the presumed state: */
CMD_STAT_BAD_QP_STATE = 0x10, /* Bad segment parameters (Address/Size): */
CMD_STAT_BAD_SEG_PARAM = 0x20, /* Memory Region has Memory Windows bound to: */
CMD_STAT_REG_BOUND = 0x21, /* HCA local attached memory not present: */
CMD_STAT_LAM_NOT_PRE = 0x22, /* Bad management packet (silently discarded): */
CMD_STAT_BAD_PKT = 0x30, /* More outstanding CQEs in CQ than new CQ size: */
CMD_STAT_BAD_SIZE = 0x40, /* Multi Function device support required: */
CMD_STAT_MULTI_FUNC_REQ = 0x50,
};
static u8 mlx4_errno_to_status(int errno)
{ switch (errno) { case -EPERM: return CMD_STAT_BAD_OP; case -EINVAL: return CMD_STAT_BAD_PARAM; case -ENXIO: return CMD_STAT_BAD_SYS_STATE; case -EBUSY: return CMD_STAT_RESOURCE_BUSY; case -ENOMEM: return CMD_STAT_EXCEED_LIM; case -ENFILE: return CMD_STAT_ICM_ERROR; default: return CMD_STAT_INTERNAL_ERR;
}
}
staticint mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
u8 op_modifier)
{ switch (op) { case MLX4_CMD_UNMAP_ICM: case MLX4_CMD_UNMAP_ICM_AUX: case MLX4_CMD_UNMAP_FA: case MLX4_CMD_2RST_QP: case MLX4_CMD_HW2SW_EQ: case MLX4_CMD_HW2SW_CQ: case MLX4_CMD_HW2SW_SRQ: case MLX4_CMD_HW2SW_MPT: case MLX4_CMD_CLOSE_HCA: case MLX4_QP_FLOW_STEERING_DETACH: case MLX4_CMD_FREE_RES: case MLX4_CMD_CLOSE_PORT: return CMD_STAT_OK;
case MLX4_CMD_QP_ATTACH: /* On Detach case return success */ if (op_modifier == 0) return CMD_STAT_OK; return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
staticint mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
{ /* Any error during the closing commands below is considered fatal */ if (op == MLX4_CMD_CLOSE_HCA ||
op == MLX4_CMD_HW2SW_EQ ||
op == MLX4_CMD_HW2SW_CQ ||
op == MLX4_CMD_2RST_QP ||
op == MLX4_CMD_HW2SW_SRQ ||
op == MLX4_CMD_SYNC_TPT ||
op == MLX4_CMD_UNMAP_ICM ||
op == MLX4_CMD_UNMAP_ICM_AUX ||
op == MLX4_CMD_UNMAP_FA) return 1; /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals * CMD_STAT_REG_BOUND. * This status indicates that memory region has memory windows bound to it * which may result from invalid user space usage and is not fatal.
*/ if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND) return 1; return 0;
}
staticint mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier, int err)
{ /* Only if reset flow is really active return code is based on * command, otherwise current error code is returned.
*/ if (mlx4_internal_err_reset) {
mlx4_enter_error_state(dev->persist);
err = mlx4_internal_err_ret_value(dev, op, op_modifier);
}
/* To avoid writing to unknown addresses after the device state was * changed to internal error and the function was rest, * check the INTERNAL_ERROR flag which is updated under * device_state_mutex lock.
*/
mutex_lock(&dev->persist->device_state_mutex);
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mutex_unlock(&dev->persist->device_state_mutex); return -EIO;
}
/* First, verify that the master reports correct status */ if (comm_pending(dev)) {
mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd); return -EAGAIN;
}
/* Write command */
down(&priv->cmd.poll_sem); if (mlx4_comm_cmd_post(dev, cmd, param)) { /* Only in case the device state is INTERNAL_ERROR, * mlx4_comm_cmd_post returns with an error
*/
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); goto out;
}
end = msecs_to_jiffies(timeout) + jiffies; while (comm_pending(dev) && time_before(jiffies, end))
cond_resched();
ret_from_pending = comm_pending(dev); if (ret_from_pending) { /* check if the slave is trying to boot in the middle of * FLR process. The only non-zero result in the RESET command
* is MLX4_DELAY_RESET_SLAVE*/ if ((MLX4_COMM_CMD_RESET == cmd)) {
err = MLX4_DELAY_RESET_SLAVE; goto out;
} else {
mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
cmd);
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
}
}
if (err)
mlx4_enter_error_state(dev->persist);
out:
up(&priv->cmd.poll_sem); return err;
}
if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) { /* Only in case the device state is INTERNAL_ERROR, * mlx4_comm_cmd_post returns with an error
*/
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR); goto out;
}
err = context->result; if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
vhcr_cmd, context->fw_status); if (mlx4_closing_cmd_fatal_error(op, context->fw_status)) goto out_reset;
}
/* wait for comm channel ready * this is necessary for prevention the race * when switching between event to polling mode * Skipping this section in case the device is in FATAL_ERROR state, * In this state, no commands are sent via the comm channel until * the device has returned from reset.
*/ if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
end = msecs_to_jiffies(timeout) + jiffies; while (comm_pending(dev) && time_before(jiffies, end))
cond_resched();
} goto out;
mutex_lock(&dev->persist->device_state_mutex); /* To avoid writing to unknown addresses after the device state was * changed to internal error and the chip was reset, * check the INTERNAL_ERROR flag which is updated under * device_state_mutex lock.
*/ if (pci_channel_offline(dev->persist->pdev) ||
(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { /* * Device is going through error recovery * and cannot accept commands.
*/ goto out;
}
end = jiffies; if (event)
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) { if (pci_channel_offline(dev->persist->pdev)) { /* * Device is going through error recovery * and cannot accept commands.
*/ goto out;
}
if (op == MLX4_CMD_SENSE_PORT) {
ret_wait =
wait_for_completion_interruptible_timeout(&context->done,
msecs_to_jiffies(timeout)); if (ret_wait < 0) {
context->fw_status = 0;
context->out_param = 0;
context->result = 0;
}
} else {
ret_wait = (long)wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout));
} if (!ret_wait) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op); if (op == MLX4_CMD_NOP) {
err = -EBUSY; goto out;
} else {
err = -EIO; goto out_reset;
}
}
err = context->result; if (err) { /* Since we do not want to have this error message always * displayed at driver start when there are ConnectX2 HCAs * on the host, we deprecate the error message for this * specific command/input_mod/opcode_mod/fw-status to be debug.
*/ if (op == MLX4_CMD_SET_PORT &&
(in_modifier == 1 || in_modifier == 2) &&
op_modifier == MLX4_SET_PORT_IB_OPCODE &&
context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status); else
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status); if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
err = mlx4_internal_err_ret_value(dev, op, op_modifier); elseif (mlx4_closing_cmd_fatal_error(op, context->fw_status)) goto out_reset;
slave_port = vhcr->in_modifier;
port = mlx4_slave_convert_port(dev, slave, slave_port);
/* network-view bit is for driver use only, and should not be passed to FW */
opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
network_view = !!(vhcr->op_modifier & 0x8);
if (smp->base_version == 1 &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->class_version == 1) { /* host view is paravirtualized */ if (!network_view && smp->method == IB_MGMT_METHOD_GET) { if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
index = be32_to_cpu(smp->attr_mod); if (port < 1 || port > dev->caps.num_ports) return -EINVAL;
table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, sizeof(*table) * 32, GFP_KERNEL);
if (!table) return -ENOMEM; /* need to get the full pkey table because the paravirtualized * pkeys may be scattered among several pkey blocks.
*/
err = get_full_pkey_table(dev, port, table, inbox, outbox); if (!err) { for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
outtab[vidx % 32] = cpu_to_be16(table[pidx]);
}
}
kfree(table); return err;
} if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) { /*get the slave specific caps:*/ /*do the command */
smp->attr_mod = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); /* modify the response for slaves */ if (!err && slave != mlx4_master_func_num(dev)) {
u8 *state = outsmp->data + PORT_STATE_OFFSET;
if (err) { if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) { if (vhcr->op == MLX4_CMD_ALLOC_RES &&
(vhcr->in_modifier & 0xff) == RES_COUNTER &&
err == -EDQUOT)
mlx4_dbg(dev, "Unable to allocate counter for slave %d (%d)\n",
slave, err); else
mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
}
vhcr_cmd->status = mlx4_errno_to_status(err); goto out_status;
}
/* Write outbox if command completed successfully */ if (cmd->has_outbox && !vhcr_cmd->status) {
ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
vhcr->out_param,
MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED); if (ret) { /* If we failed to write back the outbox after the *command was successfully executed, we must fail this
* slave, as it is now in undefined state */ if (!(dev->persist->state &
MLX4_DEVICE_STATE_INTERNAL_ERROR))
mlx4_err(dev, "%s:Failed writing outbox\n", __func__); goto out;
}
}
out_status: /* DMA back vhcr result */ if (!in_vhcr) {
ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
priv->mfunc.master.slave_state[slave].vhcr_dma,
ALIGN(sizeof(struct mlx4_vhcr),
MLX4_ACCESS_MEM_ALIGN),
MLX4_CMD_WRAPPED); if (ret)
mlx4_err(dev, "%s:Failed writing vhcr result\n",
__func__); elseif (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
slave);
}
if (!(priv->mfunc.master.slave_state[slave].active &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) { /* even if the UPDATE_QP command isn't supported, we still want * to set this VF link according to the admin directive
*/
vp_oper->state.link_state = vp_admin->link_state; return -1;
}
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port);
mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
vp_admin->default_vlan, vp_admin->default_qos,
vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL); if (!work) return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) { if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan,
&admin_vlan_ix); if (err) {
kfree(work);
mlx4_warn(&priv->dev, "No vlan resources slave %d, port %d\n",
slave, port); return err;
}
} else {
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
}
/* save original vlan ix and vlan id */
work->orig_vlan_id = vp_oper->state.default_vlan;
work->orig_vlan_ix = vp_oper->vlan_idx;
/* handle new qos */ if (vp_oper->state.default_qos != vp_admin->default_qos)
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
vp_oper->vlan_idx = admin_vlan_ix;
/* Query actual allocated VPP, just to make sure */
err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); if (err) {
mlx4_info(dev, "Failed query available VPPs\n"); return;
}
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31; if (toggle != slave_state[slave].comm_toggle) {
mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
toggle, slave); goto reset_slave;
} if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
slave_state[slave].vst_qinq_supported = false;
mlx4_master_deactivate_admin_state(priv, slave); for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
slave_state[slave].event_eq[i].token = 0;
} /*check if we are in the middle of FLR process,
if so return "retry" status to the slave*/ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) goto inform_slave_state;
/* write the version in the event field */
reply |= mlx4_comm_get_version();
goto reset_slave;
} /*command from slave in the middle of FLR*/ if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
slave, cmd); return;
}
switch (cmd) { case MLX4_COMM_CMD_VHCR0: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET) goto reset_slave;
slave_state[slave].vhcr_dma = ((u64) param) << 48;
priv->mfunc.master.slave_state[slave].cookie = 0; break; case MLX4_COMM_CMD_VHCR1: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) goto reset_slave;
slave_state[slave].vhcr_dma |= ((u64) param) << 32; break; case MLX4_COMM_CMD_VHCR2: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1) goto reset_slave;
slave_state[slave].vhcr_dma |= ((u64) param) << 16; break; case MLX4_COMM_CMD_VHCR_EN: if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2) goto reset_slave;
slave_state[slave].vhcr_dma |= param; if (mlx4_master_activate_admin_state(priv, slave)) goto reset_slave;
slave_state[slave].active = true;
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, &slave); break; case MLX4_COMM_CMD_VHCR_POST: if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
(slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
slave, cmd, slave_state[slave].last_cmd); goto reset_slave;
}
mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_master_process_vhcr(dev, slave, NULL)) {
mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex); goto reset_slave;
}
mutex_unlock(&priv->cmd.slave_cmd_mutex); break; default:
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave); goto reset_slave;
}
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = cmd; else
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
cmd, slave); return;
}
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
return;
reset_slave: /* cleanup any slave resources */ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_delete_all_resources_for_slave(dev, slave);
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd); /* Turn on internal error letting slave reset itself * immediately, otherwise it might take till timeout on * command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); /*with slave in the middle of flr, no need to clean resources again.*/
inform_slave_state:
memset(&slave_state[slave].event_eq, 0, sizeof(struct mlx4_slave_event_eq_info));
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
wmb();
}
/* master command processing */ void mlx4_master_comm_channel(struct work_struct *work)
{ struct mlx4_mfunc_master_ctx *master =
container_of(work, struct mlx4_mfunc_master_ctx,
comm_work); struct mlx4_mfunc *mfunc =
container_of(master, struct mlx4_mfunc, master); struct mlx4_priv *priv =
container_of(mfunc, struct mlx4_priv, mfunc); struct mlx4_dev *dev = &priv->dev;
u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
u32 nmbr_bits;
u32 comm_cmd; int i, slave; int toggle; bool first = true; int served = 0; int reported = 0;
u32 slt;
for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++)
lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]);
nmbr_bits = dev->persist->num_vfs + 1; if (++master->next_slave >= nmbr_bits)
master->next_slave = 0;
slave = master->next_slave; while (true) {
slave = find_next_bit((constunsignedlong *)&lbit_vec, nmbr_bits, slave); if (!first && slave >= master->next_slave) break; if (slave == nmbr_bits) { if (!first) break;
first = false;
slave = 0; continue;
}
++reported;
comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write));
slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31;
toggle = comm_cmd >> 31; if (toggle != slt) { if (master->slave_state[slave].comm_toggle
!= slt) {
pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
slave, slt,
master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle =
slt;
}
mlx4_master_do_cmd(dev, slave,
comm_cmd >> 16 & 0xff,
comm_cmd & 0xffff, toggle);
++served;
}
slave++;
}
if (reported && reported != served)
mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev))
mlx4_warn(dev, "Failed to arm comm channel events\n");
}
wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)); if (wr_toggle == 0xffffffff)
end = jiffies + msecs_to_jiffies(30000); else
end = jiffies + msecs_to_jiffies(5000);
while (time_before(jiffies, end)) {
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { /* PCI might be offline */
/* If device removal has been requested, * do not continue retrying.
*/ if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT) {
mlx4_warn(dev, "communication channel is offline\n"); return -EIO;
}
/* * we could reach here if for example the previous VM using this * function misbehaved and left the channel with unsynced state. We * should fix this here and give this VM a chance to use a properly * synced channel
*/
mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
priv->cmd.comm_toggle = 0;
return 0;
}
int mlx4_multi_func_init(struct mlx4_dev *dev)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state; int i, j, err, port;
if (mlx4_is_master(dev))
priv->mfunc.comm =
ioremap(pci_resource_start(dev->persist->pdev,
priv->fw.comm_bar) +
priv->fw.comm_base, MLX4_COMM_PAGESIZE); else
priv->mfunc.comm =
ioremap(pci_resource_start(dev->persist->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); if (!priv->mfunc.comm) {
mlx4_err(dev, "Couldn't map communication vector\n"); goto err_vhcr;
}
if (mlx4_is_master(dev)) { struct mlx4_vf_oper_state *vf_oper; struct mlx4_vf_admin_state *vf_admin;
priv->mfunc.master.slave_state =
kcalloc(dev->num_slaves, sizeof(struct mlx4_slave_state),
GFP_KERNEL); if (!priv->mfunc.master.slave_state) goto err_comm;
priv->mfunc.master.vf_admin =
kcalloc(dev->num_slaves, sizeof(struct mlx4_vf_admin_state),
GFP_KERNEL); if (!priv->mfunc.master.vf_admin) goto err_comm_admin;
priv->mfunc.master.vf_oper =
kcalloc(dev->num_slaves, sizeof(struct mlx4_vf_oper_state),
GFP_KERNEL); if (!priv->mfunc.master.vf_oper) goto err_comm_oper;
priv->mfunc.master.next_slave = 0;
for (i = 0; i < dev->num_slaves; ++i) {
vf_admin = &priv->mfunc.master.vf_admin[i];
vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
s_state->vst_qinq_supported = false;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.29 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.