bo->gather_data = dma_alloc_attrs(dev, copy_len, &bo->gather_data_dma,
GFP_KERNEL | __GFP_NOWARN, 0); if (!bo->gather_data) {
SUBMIT_ERR(context, "failed to allocate memory for gather data");
kfree(bo); return -ENOMEM;
}
if (copy_from_user(bo->gather_data, u64_to_user_ptr(args->gather_data_ptr), copy_len)) {
SUBMIT_ERR(context, "failed to copy gather data from userspace");
dma_free_attrs(dev, copy_len, bo->gather_data, bo->gather_data_dma, 0);
kfree(bo); return -EFAULT;
}
bo->gather_data_words = args->gather_data_words;
*pbo = bo;
return 0;
}
staticint submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo, struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping)
{ /* TODO check that target_offset is within bounds */
dma_addr_t iova = mapping->iova + buf->reloc.target_offset;
u32 written_ptr;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (buf->flags & DRM_TEGRA_SUBMIT_RELOC_SECTOR_LAYOUT)
iova |= BIT_ULL(39); #endif
written_ptr = iova >> buf->reloc.shift;
if (buf->reloc.gather_offset_words >= bo->gather_data_words) {
SUBMIT_ERR(context, "relocation has too large gather offset (%u vs gather length %zu)",
buf->reloc.gather_offset_words, bo->gather_data_words); return -EINVAL;
}
if (args->syncpt.flags) {
SUBMIT_ERR(context, "invalid flag specified for syncpt"); return -EINVAL;
}
/* Syncpt ref will be dropped on job release */
sp = xa_load(syncpoints, args->syncpt.id); if (!sp) {
SUBMIT_ERR(context, "syncpoint specified in syncpt was not allocated"); return -EINVAL;
}
if (cmd->wait_syncpt.id != args->syncpt.id) {
SUBMIT_ERR(context, "syncpoint ID in CMD_WAIT_SYNCPT_RELATIVE is not used by the job");
err = -EINVAL; goto free_job;
}
/* Allocate gather BO and copy gather words in. */
err = submit_copy_gather_data(&bo, drm->dev, context, args); if (err) goto unlock;
job_data = kzalloc(sizeof(*job_data), GFP_KERNEL); if (!job_data) {
SUBMIT_ERR(context, "failed to allocate memory for job data");
err = -ENOMEM; goto put_bo;
}
/* Get data buffer mappings and do relocation patching. */
err = submit_process_bufs(context, bo, args, job_data); if (err) goto free_job_data;
/* Allocate host1x_job and add gathers and waits to it. */
job = submit_create_job(context, bo, args, job_data, &fpriv->syncpoints); if (IS_ERR(job)) {
err = PTR_ERR(job); goto free_job_data;
}
/* Map gather data for Host1x. */
err = host1x_job_pin(job, context->client->base.dev); if (err) {
SUBMIT_ERR(context, "failed to pin job: %d", err); goto put_job;
}
if (context->client->ops->get_streamid_offset) {
err = context->client->ops->get_streamid_offset(
context->client, &job->engine_streamid_offset); if (err) {
SUBMIT_ERR(context, "failed to get streamid offset: %d", err); goto unpin_job;
}
}
if (context->memory_context && context->client->ops->can_use_memory_ctx) { bool supported;
err = context->client->ops->can_use_memory_ctx(context->client, &supported); if (err) {
SUBMIT_ERR(context, "failed to detect if engine can use memory context: %d", err); goto unpin_job;
}
if (supported) {
job->memory_context = context->memory_context;
host1x_memory_context_get(job->memory_context);
}
} elseif (context->client->ops->get_streamid_offset) { /* * Job submission will need to temporarily change stream ID, * so need to tell it what to change it back to.
*/ if (!tegra_dev_iommu_get_stream_id(context->client->base.dev,
&job->engine_fallback_streamid))
job->engine_fallback_streamid = TEGRA_STREAM_ID_BYPASS;
}
/* Boot engine. */
err = pm_runtime_resume_and_get(context->client->base.dev); if (err < 0) {
SUBMIT_ERR(context, "could not power up engine: %d", err); goto put_memory_context;
}
/* Return postfences to userspace and add fences to DMA reservations. */
args->syncpt.value = job->syncpt_end;
if (syncobj) { struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end, true); if (IS_ERR(fence)) {
err = PTR_ERR(fence);
SUBMIT_ERR(context, "failed to create postfence: %d", err);
}
drm_syncobj_replace_fence(syncobj, fence);
}
goto put_job;
put_memory_context: if (job->memory_context)
host1x_memory_context_put(job->memory_context);
unpin_job:
host1x_job_unpin(job);
put_job:
host1x_job_put(job);
free_job_data: if (job_data && job_data->used_mappings) { for (i = 0; i < job_data->num_used_mappings; i++)
tegra_drm_mapping_put(job_data->used_mappings[i].mapping);
kfree(job_data->used_mappings);
}
kfree(job_data);
put_bo:
gather_bo_put(&bo->base);
unlock: if (syncobj)
drm_syncobj_put(syncobj);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.