/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
void mlx4_init_roce_gid_table(struct mlx4_dev *dev, struct mlx4_roce_gid_table *table)
{ int i;
mutex_init(&table->mutex); for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
}
staticint validate_index(struct mlx4_dev *dev, struct mlx4_mac_table *table, int index)
{ int err = 0;
if (index < 0 || index >= table->max || !table->entries[index]) {
mlx4_warn(dev, "No valid Mac entry for the given index\n");
err = -EINVAL;
} return err;
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (table->refs[i] &&
(MLX4_MAC_MASK & mac) ==
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) return i;
} /* Mac not found */ return -EINVAL;
}
if (need_mf_bond) { int index_at_port = -1; int index_at_dup_port = -1;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
index_at_port = i; if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
index_at_dup_port = i;
}
/* check that same mac is not in the tables at different indices */ if ((index_at_port != index_at_dup_port) &&
(index_at_port >= 0) &&
(index_at_dup_port >= 0))
can_mf_bond = false;
/* If the mac is already in the primary table, the slot must be * available in the duplicate table as well.
*/ if (index_at_port >= 0 && index_at_dup_port < 0 &&
dup_table->refs[index_at_port]) {
can_mf_bond = false;
} /* If the mac is already in the duplicate table, check that the * corresponding index is not occupied in the primary table, or * the primary table already contains the mac at the same index. * Otherwise, you cannot bond (primary contains a different mac * at that index).
*/ if (index_at_dup_port >= 0) { if (!table->refs[index_at_dup_port] ||
((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
free_for_dup = index_at_dup_port; else
can_mf_bond = false;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (!table->refs[i]) { if (free < 0)
free = i; if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { if (!dup_table->refs[i])
free_for_dup = i;
} continue;
}
if ((MLX4_MAC_MASK & mac) ==
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { /* MAC already registered, increment ref count */
err = i;
++table->refs[i]; if (dup) {
u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
if (dup_mac != mac || !dup_table->is_dup[i]) {
mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
mac, dup_port, i);
}
} goto out;
}
}
if (need_mf_bond && (free_for_dup < 0)) { if (dup) {
mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
dup = false;
}
can_mf_bond = false;
}
if (need_mf_bond && can_mf_bond)
free = free_for_dup;
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) { /* No free mac entries */
err = -ENOSPC; goto out;
}
/* Register new MAC */
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
if (port < 1 || port > dev->caps.num_ports) {
mlx4_warn(dev, "invalid port number (%d), aborting...\n", port); return;
}
info = &mlx4_priv(dev)->port[port];
table = &info->mac_table;
if (--table->refs[index] || table->is_dup[index]) {
mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
index); if (!table->refs[index])
dup_table->is_dup[index] = false; goto out;
}
table->entries[index] = 0; if (mlx4_set_port_mac_table(dev, port, table->entries))
mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
--table->total;
if (dup) {
dup_table->is_dup[index] = false; if (dup_table->refs[index]) goto out;
dup_table->entries[index] = 0; if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
if (table->total == table->max) { /* No free vlan entries */
err = -ENOSPC; goto out;
}
if (need_mf_bond) { int index_at_port = -1; int index_at_dup_port = -1;
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
index_at_port = i; if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))
index_at_dup_port = i;
} /* check that same vlan is not in the tables at different indices */ if ((index_at_port != index_at_dup_port) &&
(index_at_port >= 0) &&
(index_at_dup_port >= 0))
can_mf_bond = false;
/* If the vlan is already in the primary table, the slot must be * available in the duplicate table as well.
*/ if (index_at_port >= 0 && index_at_dup_port < 0 &&
dup_table->refs[index_at_port]) {
can_mf_bond = false;
} /* If the vlan is already in the duplicate table, check that the * corresponding index is not occupied in the primary table, or * the primary table already contains the vlan at the same index. * Otherwise, you cannot bond (primary contains a different vlan * at that index).
*/ if (index_at_dup_port >= 0) { if (!table->refs[index_at_dup_port] ||
(vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
free_for_dup = index_at_dup_port; else
can_mf_bond = false;
}
}
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { if (!table->refs[i]) { if (free < 0)
free = i; if (free_for_dup < 0 && need_mf_bond && can_mf_bond) { if (!dup_table->refs[i])
free_for_dup = i;
}
}
if (dup_vlan != vlan || !dup_table->is_dup[i]) {
mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
vlan, dup_port, i);
}
} goto out;
}
}
if (need_mf_bond && (free_for_dup < 0)) { if (dup) {
mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
dup = false;
}
can_mf_bond = false;
}
if (need_mf_bond && can_mf_bond)
free = free_for_dup;
if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); goto out;
}
if (index < MLX4_VLAN_REGULAR) {
mlx4_warn(dev, "Trying to free special vlan index %d\n", index); goto out;
}
if (--table->refs[index] || table->is_dup[index]) {
mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
table->refs[index], index); if (!table->refs[index])
dup_table->is_dup[index] = false; goto out;
}
table->entries[index] = 0; if (mlx4_set_port_vlan_table(dev, port, table->entries))
mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
--table->total; if (dup) {
dup_table->is_dup[index] = false; if (dup_table->refs[index]) goto out;
dup_table->entries[index] = 0; if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
--dup_table->total;
}
out: if (dup) { if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
}
if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries); if (ret)
mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
} if (!ret && update2) {
ret = mlx4_set_port_mac_table(dev, 2, t2->entries); if (ret)
mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
}
if (ret)
mlx4_warn(dev, "failed to create mirror MAC tables\n");
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex); return ret;
}
int mlx4_unbond_mac_table(struct mlx4_dev *dev)
{ struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table; struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table; int ret = 0; int ret1; int i; bool update1 = false; bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex); for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
ret = -EINVAL; goto unlock;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { if (!t1->entries[i]) continue;
t1->is_dup[i] = false; if (!t1->refs[i]) {
t1->entries[i] = 0;
update1 = true;
}
t2->is_dup[i] = false; if (!t2->refs[i]) {
t2->entries[i] = 0;
update2 = true;
}
}
if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries); if (ret)
mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
} if (update2) {
ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries); if (ret1) {
mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
ret = ret1;
}
}
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex); return ret;
}
int mlx4_bond_vlan_table(struct mlx4_dev *dev)
{ struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; int ret = 0; int i; bool update1 = false; bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex); for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { if ((t1->entries[i] != t2->entries[i]) &&
t1->entries[i] && t2->entries[i]) {
mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
ret = -EINVAL; goto unlock;
}
}
if (update1) {
ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); if (ret)
mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
} if (!ret && update2) {
ret = mlx4_set_port_vlan_table(dev, 2, t2->entries); if (ret)
mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
}
if (ret)
mlx4_warn(dev, "failed to create mirror VLAN tables\n");
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex); return ret;
}
int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
{ struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table; struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table; int ret = 0; int ret1; int i; bool update1 = false; bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex); for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
ret = -EINVAL; goto unlock;
}
}
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) { if (!t1->entries[i]) continue;
t1->is_dup[i] = false; if (!t1->refs[i]) {
t1->entries[i] = 0;
update1 = true;
}
t2->is_dup[i] = false; if (!t2->refs[i]) {
t2->entries[i] = 0;
update2 = true;
}
}
if (update1) {
ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); if (ret)
mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
} if (update2) {
ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries); if (ret1) {
mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
ret = ret1;
}
}
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex); return ret;
}
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
{ struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
u8 *inbuf, *outbuf; int err;
inmailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(inmailbox)) return PTR_ERR(inmailbox);
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
{ int vfs; int slave_gid = slave; unsigned i; struct mlx4_slaves_pport slaves_pport; struct mlx4_active_ports actv_ports; unsigned max_port_p_one;
staticint mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, int port, struct mlx4_cmd_mailbox *mailbox)
{ struct mlx4_roce_gid_entry *gid_entry_mbox; struct mlx4_priv *priv = mlx4_priv(dev); int num_gids, base, offset; int i, err;
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
base = mlx4_get_base_gid_ix(dev, slave, port);
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
mutex_lock(&(priv->port[port].gid_table.mutex)); /* Zero-out gids belonging to that slave in the port GID table */ for (i = 0, offset = base; i < num_gids; offset++, i++)
memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
/* Now, copy roce port gids table to mailbox for passing to FW */
gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
memcpy(gid_entry_mbox->raw,
priv->port[port].gid_table.roce_gids[i].raw,
MLX4_ROCE_GID_ENTRY_SIZE);
void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
{ struct mlx4_active_ports actv_ports; struct mlx4_cmd_mailbox *mailbox; int num_eth_ports, err; int i;
if (slave < 0 || slave > dev->persist->num_vfs) return;
actv_ports = mlx4_get_active_ports(dev, slave);
for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { if (test_bit(i, actv_ports.ports)) { if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) continue;
num_eth_ports++;
}
}
if (!num_eth_ports) return;
/* have ETH ports. Alloc mailbox for SET_PORT command */
mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return;
for (i = 0; i < dev->caps.num_ports; i++) { if (test_bit(i, actv_ports.ports)) { if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) continue;
err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); if (err)
mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
slave, i + 1, err);
}
}
/* Mtu is configured as the max USER_MTU among all * the functions on the port.
*/
mtu = be16_to_cpu(gen_context->mtu);
mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
prev_mtu = slave_st->mtu[port];
slave_st->mtu[port] = mtu; if (mtu > master->max_mtu[port])
master->max_mtu[port] = mtu; if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) { int i;
slave_st->mtu[port] = mtu;
master->max_mtu[port] = mtu; for (i = 0; i < dev->num_slaves; i++)
master->max_mtu[port] =
max_t(u16, master->max_mtu[port],
master->slave_state[i].mtu[port]);
}
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
}
/* User Mtu is configured as the max USER_MTU among all * the functions on the port.
*/
user_mtu = be16_to_cpu(gen_context->user_mtu);
user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
prev_user_mtu = slave_st->user_mtu[port];
slave_st->user_mtu[port] = user_mtu; if (user_mtu > master->max_user_mtu[port])
master->max_user_mtu[port] = user_mtu; if (user_mtu < prev_user_mtu &&
prev_user_mtu == master->max_user_mtu[port]) { int i;
slave_st->user_mtu[port] = user_mtu;
master->max_user_mtu[port] = user_mtu; for (i = 0; i < dev->num_slaves; i++)
master->max_user_mtu[port] =
max_t(u16, master->max_user_mtu[port],
master->slave_state[i].user_mtu[port]);
}
gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
}
if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
mlx4_en_set_port_mtu(dev, slave, port,
gen_context);
if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
mlx4_en_set_port_user_mtu(dev, slave, port,
gen_context);
if (gen_context->flags &
(MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
mlx4_en_set_port_global_pause(dev, slave,
gen_context);
break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids * need a FOR-loop here over number of gids the guest has. * 1. Check no duplicates in gids passed by slave
*/
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
base = mlx4_get_base_gid_ix(dev, slave, port);
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0; i < num_gids; gid_entry_mbox++, i++) { if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, sizeof(zgid_entry))) continue;
gid_entry_mb1 = gid_entry_mbox + 1; for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { if (!memcmp(gid_entry_mb1->raw,
zgid_entry.raw, sizeof(zgid_entry))) continue; if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, sizeof(gid_entry_mbox->raw))) { /* found duplicate */ return -EINVAL;
}
}
}
/* 2. Check that do not have duplicates in OTHER * entries in the port GID table
*/
mutex_lock(&(priv->port[port].gid_table.mutex)); for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { if (i >= base && i < base + num_gids) continue; /* don't compare to slave's current gids */
gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) continue;
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (j = 0; j < num_gids; gid_entry_mbox++, j++) { if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, sizeof(zgid_entry))) continue; if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, sizeof(gid_entry_tbl->raw))) { /* found duplicate */
mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
slave, i);
mutex_unlock(&(priv->port[port].gid_table.mutex)); return -EINVAL;
}
}
}
/* insert slave GIDs with memcpy, starting at slave's base index */
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
/* Now, copy roce port gids table to current mailbox for passing to FW */
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
memcpy(gid_entry_mbox->raw,
priv->port[port].gid_table.roce_gids[i].raw,
MLX4_ROCE_GID_ENTRY_SIZE);
/* Slaves are not allowed to SET_PORT beacon (LED) blink */ if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave); return -EPERM;
}
/* For IB, we only consider: * - The capability mask, which is set to the aggregate of all * slave function capabilities * - The QKey violatin counter - reset according to each request.
*/
/* slave may not set the IS_SM capability for the port */ if (slave != mlx4_master_func_num(dev) &&
(be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM)) return -EINVAL;
/* No DEV_MGMT in multifunc mode */ if (mlx4_is_mfunc(dev) &&
(be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP)) return -EINVAL;
agg_cap_mask = 0;
slave_cap_mask =
priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask; for (i = 0; i < dev->num_slaves; i++)
agg_cap_mask |=
priv->mfunc.master.slave_state[i].ib_cap_mask[port];
/* only clear mailbox for guests. Master may be setting * MTU or PKEY table size
*/ if (slave != dev->caps.function)
memset(inbox->buf, 0, 256); if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
*(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
((__be32 *) inbox->buf)[2] = agg_cap_mask;
} else {
((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
((__be32 *) inbox->buf)[1] = agg_cap_mask;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, int *slave_id)
{ struct mlx4_priv *priv = mlx4_priv(dev); int i, found_ix = -1; int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; struct mlx4_slaves_pport slaves_pport; unsigned num_vfs; int slave_gid;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
MLX4_ROCE_GID_ENTRY_SIZE)) {
found_ix = i; break;
}
}
if (found_ix >= 0) { /* Calculate a slave_gid which is the slave number in the gid * table and not a globally unique slave number.
*/ if (found_ix < MLX4_ROCE_PF_GIDS)
slave_gid = 0; elseif (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
(vf_gids / num_vfs + 1))
slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
(vf_gids / num_vfs + 1)) + 1; else
slave_gid =
((found_ix - MLX4_ROCE_PF_GIDS -
((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
(vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
/* Calculate the globally unique slave id */ if (slave_gid) { struct mlx4_active_ports exclusive_ports; struct mlx4_active_ports actv_ports; struct mlx4_slaves_pport slaves_pport_actv; unsigned max_port_p_one; int num_vfs_before = 0; int candidate_slave_gid;
/* Calculate how many VFs are on the previous port, if exists */ for (i = 1; i < port; i++) {
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
set_bit(i - 1, exclusive_ports.ports);
slaves_pport_actv =
mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
num_vfs_before += bitmap_weight(
slaves_pport_actv.slaves,
dev->persist->num_vfs + 1);
}
/* candidate_slave_gid isn't necessarily the correct slave, but * it has the same number of ports and is assigned to the same * ports as the real slave we're looking for. On dual port VF, * slave_gid = [single port VFs on port <port>] + * [offset of the current slave from the first dual port VF] + * 1 (for the PF).
*/
candidate_slave_gid = slave_gid + num_vfs_before;
switch (err) { case CABLE_INF_INV_PORT: return"invalid port selected"; case CABLE_INF_OP_NOSUP: return"operation not supported for this port (the port is of type CX4 or internal)"; case CABLE_INF_NOT_CONN: return"cable is not connected"; case CABLE_INF_NO_EEPRM: return"the connected cable has no EPROM (passive copper cable)"; case CABLE_INF_PAGE_ERR: return"page number is greater than 15"; case CABLE_INF_INV_ADDR: return"invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; case CABLE_INF_I2C_ADDR: return"invalid I2C slave address"; case CABLE_INF_QSFP_VIO: return"at least one cable violates the QSFP specification and ignores the modsel signal"; case CABLE_INF_I2C_BUSY: return"I2C bus is constantly busy";
} return"Unknown Error";
}
/** * mlx4_get_module_info - Read cable module eeprom data * @dev: mlx4_dev. * @port: port number. * @offset: byte offset in eeprom to start reading data from. * @size: num of bytes to read. * @data: output buffer to put the requested data into. * * Reads cable module eeprom data, puts the outcome data into * data pointer parameter. * Returns num of read bytes on success or a negative error * code.
*/ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
u16 offset, u16 size, u8 *data)
{ struct mlx4_cmd_mailbox *inbox, *outbox; struct mlx4_mad_ifc *inmad, *outmad; struct mlx4_cable_info *cable_info;
u8 module_id, i2c_addr, page_num; int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
ret = mlx4_get_module_id(dev, port, &module_id); if (ret) return ret;
switch (module_id) { case MLX4_MODULE_ID_SFP:
mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); break; case MLX4_MODULE_ID_QSFP: case MLX4_MODULE_ID_QSFP_PLUS: case MLX4_MODULE_ID_QSFP28:
mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); break; default:
mlx4_err(dev, "Module ID not recognized: %#x\n", module_id); return -EINVAL;
}
inbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(inbox)) return PTR_ERR(inbox);
ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE); if (ret) goto out;
if (be16_to_cpu(outmad->status)) { /* Mad returned with bad status */
ret = be16_to_cpu(outmad->status);
mlx4_warn(dev, "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
0xFF60, port, i2c_addr, offset, size,
ret, cable_info_mad_err_str(ret));
if (i2c_addr == I2C_ADDR_HIGH &&
MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) /* Some SFP cables do not support i2c slave * address 0x51 (high page), abort silently.
*/
ret = 0; else
ret = -ret; goto out;
}
cable_info = (struct mlx4_cable_info *)outmad->data;
memcpy(data, cable_info->data, size);
ret = size;
out:
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox); return ret;
}
EXPORT_SYMBOL(mlx4_get_module_info);
int mlx4_max_tc(struct mlx4_dev *dev)
{
u8 num_tc = dev->caps.max_tc_eth;
if (!num_tc)
num_tc = MLX4_TC_MAX_NUMBER;
return num_tc;
}
EXPORT_SYMBOL(mlx4_max_tc);
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.