/* * We use of GFP_ATOMIC here because this function can be called from * the latency-sensitive code path for command submission. Due to H/W * limitations in some of the ASICs, the kernel must copy the user CB * that is designated for an external queue and actually enqueue * the kernel's copy. Hence, we must never sleep in this code section * and must use GFP_ATOMIC for all memory allocations.
*/ if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
if (!cb)
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) return NULL;
if (internal_cb) {
p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); if (!p) {
kfree(cb); return NULL;
}
cb_offset = p - hdev->internal_cb_pool_virt_addr;
cb->is_internal = true;
cb->bus_address = hdev->internal_cb_va_base + cb_offset;
} elseif (ctx_id == HL_KERNEL_ASID_ID) {
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC); if (!p)
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
} else {
p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
GFP_USER | __GFP_ZERO);
}
if (!p) {
dev_err(hdev->dev, "failed to allocate %d of dma memory for CB\n",
cb_size);
kfree(cb); return NULL;
}
if (cb_args->map_cb) { if (ctx_id == HL_KERNEL_ASID_ID) {
dev_err(cb_args->hdev->dev, "CB mapping is not supported for kernel context\n");
rc = -EINVAL; goto release_cb;
}
rc = cb_map_mem(cb_args->ctx, cb); if (rc) goto release_cb;
}
if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
dev_warn_ratelimited(hdev->dev, "Device is disabled or in reset. Can't create new CBs\n"); return -EBUSY;
}
if (cb_size > SZ_2M) {
dev_err(hdev->dev, "CB size %d must be less than %d\n",
cb_size, SZ_2M); return -EINVAL;
}
int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
{ struct hl_cb *cb; int rc;
cb = hl_cb_get(mmg, cb_handle); if (!cb) {
dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
cb_handle); return -EINVAL;
}
/* Make sure that CB handle isn't destroyed more than once */
rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
hl_cb_put(cb); if (rc) {
dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
cb_handle); return -EINVAL;
}
int hl_cb_va_pool_init(struct hl_ctx *ctx)
{ struct hl_device *hdev = ctx->hdev; struct asic_fixed_properties *prop = &hdev->asic_prop; int rc;
if (!hdev->supports_cb_mapping) return 0;
ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1); if (!ctx->cb_va_pool) {
dev_err(hdev->dev, "Failed to create VA gen pool for CB mapping\n"); return -ENOMEM;
}
ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED); if (!ctx->cb_va_pool_base) {
rc = -ENOMEM; goto err_pool_destroy;
}
rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1); if (rc) {
dev_err(hdev->dev, "Failed to add memory to VA gen pool for CB mapping\n"); goto err_unreserve_va_block;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.