/* * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
for (i = 0; i < npages; i++) {
retry:
err = alloc_4k(dev, &addr, function); if (err) { if (err == -ENOMEM)
err = alloc_system_page(dev, function); if (err) {
dev->priv.fw_pages_alloc_failed += (npages - i); goto out_4k;
}
goto retry;
}
MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
}
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out)); if (err == -EREMOTEIO) {
notify_fail = 0; /* if triggered by FW and failed by FW ignore */ if (event) {
err = 0; goto out_dropped;
}
}
err = mlx5_cmd_check(dev, err, in, out); if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err); goto out_dropped;
}
staticint reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{ struct rb_root *root; struct fw_page *fwp; struct rb_node *p; bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0; int err;
err = mlx5_cmd_do(dev, in, in_size, out, out_size); /* If FW is gone (-ENXIO), proceed to forceful reclaim */ if (err != -ENXIO) return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); if (WARN_ON_ONCE(!root)) return -EEXIST;
p = rb_first(root); while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p);
i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
}
staticint reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int *nclaimed, bool event, bool ec_function)
{
u32 function = get_function(func_id, ec_function); int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int num_claimed;
u16 func_type;
u32 *out; int err; int i;
if (nclaimed)
*nclaimed = 0;
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM;
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) {
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
dev->priv.reclaim_pages_discard += npages;
} /* if triggered by FW event and failed by FW then ignore */ if (event && err == -EREMOTEIO) {
err = 0; goto out_free;
}
err = mlx5_cmd_check(dev, err, in, out); if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); goto out_free;
}
/* This limit is based on the capability of the firmware as it cannot release * more than 50000 back to the host in one go.
*/ #define MAX_RECLAIM_NPAGES (-50000)
/* npages > 0 means HCA asking host to allocate/give pages, * npages < 0 means HCA asking host to reclaim back the pages allocated. * Here we are restricting the maximum number of pages that can be * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is * a negative value. * Since MAX_RECLAIM is negative, we are using max() to restrict * req->npages (and not min ()).
*/
req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES);
req->ec_function = ec_function;
req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work); return NOTIFY_OK;
}
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
u16 func_id;
s32 npages; int err;
err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); if (err) return err;
WARN(dev->priv.fw_pages, "FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
WARN(dev->priv.page_counters[MLX5_VF], "VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_VF]);
WARN(dev->priv.page_counters[MLX5_HOST_PF], "External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_HOST_PF]);
WARN(dev->priv.page_counters[MLX5_EC_VF], "EC VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_EC_VF]);
return 0;
}
int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
INIT_LIST_HEAD(&dev->priv.free_list);
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM;
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES)); unsignedlong end = jiffies + recl_vf_pages_to_jiffies; int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_warn(dev, "Skipping wait for vf pages stage"); return 0;
}
mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages); while (*pages) { if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); return -ETIMEDOUT;
} if (*pages < prev_pages) {
end = jiffies + recl_vf_pages_to_jiffies;
prev_pages = *pages;
}
msleep(50);
}
mlx5_core_dbg(dev, "All pages received\n"); return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.3 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.