/* At this point, user is not able to submit new commands */
mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->hwctx_fini(hwctx);
mutex_unlock(&xdna->dev_lock);
if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) return -1;
num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
cu_mask = cmd->data; for (i = 0; i < num_masks; i++) { if (cu_mask[i]) return ffs(cu_mask[i]) - 1;
}
return -1;
}
/* * This should be called in close() and remove(). DO NOT call in other syscalls. * This guarantee that when hwctx and resources will be released, if user * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
*/ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
{ struct amdxdna_hwctx *hwctx; unsignedlong hwctx_id;
if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad))) return -EINVAL;
if (!drm_dev_enter(dev, &idx)) return -ENODEV;
hwctx = xa_erase(&client->hwctx_xa, args->handle); if (!hwctx) {
ret = -EINVAL;
XDNA_DBG(xdna, "PID %d HW context %d not exist",
client->pid, args->handle); goto out;
}
/* * The pushed jobs are handled by DRM scheduler during destroy. * SRCU to synchronize with exec command ioctls.
*/
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad))) return -EINVAL;
if (!xdna->dev_info->ops->hwctx_config) return -EOPNOTSUPP;
val = args->param_val;
buf_size = args->param_val_size;
switch (args->param_type) { case DRM_AMDXDNA_HWCTX_CONFIG_CU: /* For those types that param_val is pointer */ if (buf_size > PAGE_SIZE) {
XDNA_ERR(xdna, "Config CU param buffer too large"); return -E2BIG;
}
/* Hwctx needs to keep buf */
buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM;
if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
kfree(buf); return -EFAULT;
}
break; case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF: case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF: /* For those types that param_val is a value */
buf = NULL;
buf_size = 0; break; default:
XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type); return -EINVAL;
}
mutex_lock(&xdna->dev_lock);
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = xa_load(&client->hwctx_xa, args->handle); if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
ret = -EINVAL; goto unlock_srcu;
}
ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
job = kzalloc(struct_size(job, bos, arg_bo_cnt), GFP_KERNEL); if (!job) return -ENOMEM;
if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD); if (!job->cmd_bo) {
XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
ret = -EINVAL; goto free_job;
}
} else {
job->cmd_bo = NULL;
}
ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt); if (ret) {
XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret); goto cmd_put;
}
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = xa_load(&client->hwctx_xa, hwctx_hdl); if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
client->pid, hwctx_hdl);
ret = -EINVAL; goto unlock_srcu;
}
if (hwctx->status != HWCTX_STAT_READY) {
XDNA_ERR(xdna, "HW Context is not ready");
ret = -EINVAL; goto unlock_srcu;
}
job->hwctx = hwctx;
job->mm = current->mm;
job->fence = amdxdna_fence_create(hwctx); if (!job->fence) {
XDNA_ERR(xdna, "Failed to create fence");
ret = -ENOMEM; goto unlock_srcu;
}
kref_init(&job->refcnt);
ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq); if (ret) goto put_fence;
/* * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated * resource after synchronize_srcu(). The submitted jobs should be * handled by the queue, for example DRM scheduler, in device layer. * For here we can unlock SRCU.
*/
srcu_read_unlock(&client->hwctx_srcu, idx);
trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
/* * The submit command ioctl submits a command to firmware. One firmware command * may contain multiple command BOs for processing as a whole. * The command sequence number is returned which can be used for wait command ioctl.
*/ staticint amdxdna_drm_submit_execbuf(struct amdxdna_client *client, struct amdxdna_drm_exec_cmd *args)
{ struct amdxdna_dev *xdna = client->xdna;
u32 *arg_bo_hdls = NULL;
u32 cmd_bo_hdl; int ret;
if (args->arg_count > MAX_ARG_COUNT) {
XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count); return -EINVAL;
}
/* Only support single command for now. */ if (args->cmd_count != 1) {
XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count); return -EINVAL;
}
cmd_bo_hdl = (u32)args->cmd_handles; if (args->arg_count) {
arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL); if (!arg_bo_hdls) return -ENOMEM;
ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
args->arg_count * sizeof(u32)); if (ret) {
ret = -EFAULT; goto free_cmd_bo_hdls;
}
}
ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
args->arg_count, args->hwctx, &args->seq); if (ret)
XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
free_cmd_bo_hdls:
kfree(arg_bo_hdls); if (!ret)
XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.