/* The IDR is expected to be empty at this stage, because any left signal should have been * released as part of CS roll-back.
*/ if (!idr_is_empty(idp)) {
dev_warn(hdev->dev, "device released while some encaps signals handles are still allocated\n");
idr_for_each_entry(idp, handle, id)
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
}
/* Release all allocated HW block mapped list entries and destroy * the mutex.
*/
hl_hw_block_mem_fini(ctx);
/* * If we arrived here, there are no jobs waiting for this context * on its queues so we can safely remove it. * This is because for each CS, we increment the ref count and for * every CS that was finished we decrement it and we won't arrive * to this function unless the ref count is 0
*/
for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
hl_fence_put(ctx->cs_pending[i]);
kfree(ctx->cs_pending);
if (ctx->asid != HL_KERNEL_ASID_ID) {
dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid);
/* The engines are stopped as there is no executing CS, but the * Coresight might be still working by accessing addresses * related to the stopped engines. Hence stop it explicitly.
*/ if (hdev->in_debug)
hl_device_set_debug_mode(hdev, ctx, false);
INIT_LIST_HEAD(&ctx->outcome_store.used_list);
INIT_LIST_HEAD(&ctx->outcome_store.free_list);
hash_init(ctx->outcome_store.outcome_map); for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
list_add(&ctx->outcome_store.nodes_pool[i].list_link,
&ctx->outcome_store.free_list);
hl_hw_block_mem_init(ctx);
if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
rc = hl_vm_ctx_init(ctx); if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM; goto err_hw_block_mem_fini;
}
rc = hdev->asic_funcs->ctx_init(ctx); if (rc) {
dev_err(hdev->dev, "ctx_init failed\n"); goto err_vm_ctx_fini;
}
} else {
ctx->asid = hl_asid_alloc(hdev); if (!ctx->asid) {
dev_err(hdev->dev, "No free ASID, failed to create context\n");
rc = -ENOMEM; goto err_hw_block_mem_fini;
}
rc = hl_vm_ctx_init(ctx); if (rc) {
dev_err(hdev->dev, "Failed to init mem ctx module\n");
rc = -ENOMEM; goto err_asid_free;
}
rc = hl_cb_va_pool_init(ctx); if (rc) {
dev_err(hdev->dev, "Failed to init VA pool for mapped CB\n"); goto err_vm_ctx_fini;
}
/* There can only be a single user which has opened the compute device, so exit * immediately once we find its context or if we see that it has been released
*/ break;
}
mutex_unlock(&hdev->fpriv_list_lock);
return ctx;
}
/* * hl_ctx_get_fence_locked - get CS fence under CS lock * * @ctx: pointer to the context structure. * @seq: CS sequences number * * @return valid fence pointer on success, NULL if fence is gone, otherwise * error pointer. * * NOTE: this function shall be called with cs_lock locked
*/ staticstruct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
{ struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop; struct hl_fence *fence;
if (seq >= ctx->cs_sequence) return ERR_PTR(-EINVAL);
if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) return NULL;
/* * hl_ctx_get_fences - get multiple CS fences under the same CS lock * * @ctx: pointer to the context structure. * @seq_arr: array of CS sequences to wait for * @fence: fence array to store the CS fences * @arr_len: length of seq_arr and fence_arr * * @return 0 on success, otherwise non 0 error code
*/ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr, struct hl_fence **fence, u32 arr_len)
{ struct hl_fence **fence_arr_base = fence; int i, rc = 0;
spin_lock(&ctx->cs_lock);
for (i = 0; i < arr_len; i++, fence++) {
u64 seq = seq_arr[i];
*fence = hl_ctx_get_fence_locked(ctx, seq);
if (IS_ERR(*fence)) {
dev_err(ctx->hdev->dev, "Failed to get fence for CS with seq 0x%llx\n",
seq);
rc = PTR_ERR(*fence); break;
}
}
spin_unlock(&ctx->cs_lock);
if (rc)
hl_fences_put(fence_arr_base, i);
return rc;
}
/* * hl_ctx_mgr_init - initialize the context manager * * @ctx_mgr: pointer to context manager structure * * This manager is an object inside the hpriv object of the user process. * The function is called when a user process opens the FD.
*/ void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
{
mutex_init(&ctx_mgr->lock);
idr_init(&ctx_mgr->handles);
}
/* * hl_ctx_mgr_fini - finalize the context manager * * @hdev: pointer to device structure * @ctx_mgr: pointer to context manager structure * * This function goes over all the contexts in the manager and frees them. * It is called when a process closes the FD.
*/ void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
{ struct hl_ctx *ctx; struct idr *idp;
u32 id;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.