/* * Copyright (c) 2012 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
if (item) { if (xa_err(item))
ret = xa_err(item); else /* If a retry, adjust delayed work */
mod_delayed_work(cm_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT); goto err_or_exists;
}
xa_unlock(&sriov->xa_rej_tmout);
item = kmalloc(sizeof(*item), GFP_KERNEL); if (!item) return -ENOMEM;
sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave); if (sts) /* Even if this fails, we pass on the REQ to the slave */
pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
rem_pv_cm_id, *slave, sts);
return 0;
}
pv_cm_id = get_remote_comm_id(mad);
id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
if (!id) { if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
*slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
if (flush_needed) {
flush_workqueue(cm_wq);
pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
cnt, slave);
}
if (slave < 0)
WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
}
/* slave = -1 ==> all slaves */ /* TBD -- call paravirt clean for single slave. Need for slave RESET event */ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
{ struct mlx4_ib_sriov *sriov = &dev->sriov; struct rb_root *sl_id_map = &sriov->sl_id_map; struct list_head lh; struct rb_node *nd; int need_flush = 0; struct id_map_entry *map, *tmp_map; /* cancel all delayed work queue entries */
INIT_LIST_HEAD(&lh);
spin_lock(&sriov->id_map_lock);
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { if (slave < 0 || slave == map->slave_id) { if (map->scheduled_delete)
need_flush |= !cancel_delayed_work(&map->timeout);
}
}
spin_unlock(&sriov->id_map_lock);
if (need_flush)
flush_workqueue(cm_wq); /* make sure all timers were flushed */
/* now, remove all leftover entries from databases*/
spin_lock(&sriov->id_map_lock); if (slave < 0) { while (rb_first(sl_id_map)) { struct id_map_entry *ent =
rb_entry(rb_first(sl_id_map), struct id_map_entry, node);
rb_erase(&ent->node, sl_id_map);
xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
}
list_splice_init(&dev->sriov.cm_list, &lh);
} else { /* first, move nodes belonging to slave to db remove list */
nd = rb_first(sl_id_map); while (nd) { struct id_map_entry *ent =
rb_entry(nd, struct id_map_entry, node);
nd = rb_next(nd); if (ent->slave_id == slave)
list_move_tail(&ent->list, &lh);
} /* remove those nodes from databases */
list_for_each_entry_safe(map, tmp_map, &lh, list) {
rb_erase(&map->node, sl_id_map);
xa_erase(&sriov->pv_id_table, map->pv_cm_id);
}
/* add remaining nodes from cm_list */
list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { if (slave == map->slave_id)
list_move_tail(&map->list, &lh);
}
}
spin_unlock(&sriov->id_map_lock);
/* free any map entries left behind due to cancel_delayed_work above */
list_for_each_entry_safe(map, tmp_map, &lh, list) {
list_del(&map->list);
kfree(map);
}
rej_tmout_xa_cleanup(sriov, slave);
}
int mlx4_ib_cm_init(void)
{
cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0); if (!cm_wq) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.