/* Heaps should never be executable */ if ((args->flags & PANFROST_BO_HEAP) &&
!(args->flags & PANFROST_BO_NOEXEC)) return -EINVAL;
bo = panfrost_gem_create(dev, args->size, args->flags); if (IS_ERR(bo)) return PTR_ERR(bo);
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); if (ret) goto out;
mapping = panfrost_gem_mapping_get(bo, priv); if (mapping) {
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
} else { /* This can only happen if the handle from * drm_gem_handle_create() has already been guessed and freed * by user space
*/
ret = -EINVAL;
}
/** * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd * @args: IOCTL args * @job: job being set up * * Resolve handles from userspace to BOs and attach them to job. * * Note that this function doesn't need to unreference the BOs on * failure, because that will happen at panfrost_job_cleanup() time.
*/ staticint
panfrost_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct drm_panfrost_submit *args, struct panfrost_job *job)
{ struct panfrost_file_priv *priv = file_priv->driver_priv; struct panfrost_gem_object *bo; unsignedint i; int ret;
job->bo_count = args->bo_handle_count;
if (!job->bo_count) return 0;
ret = drm_gem_objects_lookup(file_priv,
(void __user *)(uintptr_t)args->bo_handles,
job->bo_count, &job->bos); if (ret) return ret;
/** * panfrost_copy_in_sync() - Sets up job->deps with the sync objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd * @args: IOCTL args * @job: job being set up * * Resolve syncobjs from userspace to fences and attach them to job. * * Note that this function doesn't need to unreference the fences on * failure, because that will happen at panfrost_job_cleanup() time.
*/ staticint
panfrost_copy_in_sync(struct drm_device *dev, struct drm_file *file_priv, struct drm_panfrost_submit *args, struct panfrost_job *job)
{
u32 *handles; int ret = 0; int i, in_fence_count;
in_fence_count = args->in_sync_count;
if (!in_fence_count) return 0;
handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL); if (!handles) {
ret = -ENOMEM;
DRM_DEBUG("Failed to allocate incoming syncobj handles\n"); goto fail;
}
if (copy_from_user(handles,
(void __user *)(uintptr_t)args->in_syncs,
in_fence_count * sizeof(u32))) {
ret = -EFAULT;
DRM_DEBUG("Failed to copy in syncobj handles\n"); goto fail;
}
for (i = 0; i < in_fence_count; i++) {
ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
handles[i], 0); if (ret) goto fail;
}
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
1, NULL, file->client_id); if (ret) goto out_put_job;
ret = panfrost_copy_in_sync(dev, file, args, job); if (ret) goto out_cleanup_job;
ret = panfrost_lookup_bos(dev, file, args, job); if (ret) goto out_cleanup_job;
ret = panfrost_job_push(job); if (ret) goto out_cleanup_job;
/* Update the return sync object for the job */ if (sync_out)
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
out_cleanup_job: if (ret)
drm_sched_job_cleanup(&job->base);
out_put_job:
panfrost_job_put(job);
out_put_syncout: if (sync_out)
drm_syncobj_put(sync_out);
gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT;
}
bo = to_panfrost_bo(gem_obj);
gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT;
}
bo = to_panfrost_bo(gem_obj);
ret = dma_resv_lock_interruptible(bo->base.base.resv, NULL); if (ret) goto out_put_object;
mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock); if (args->madv == PANFROST_MADV_DONTNEED) { struct panfrost_gem_mapping *first;
first = list_first_entry(&bo->mappings.list, struct panfrost_gem_mapping,
node);
/* * If we want to mark the BO purgeable, there must be only one * user: the caller FD. * We could do something smarter and mark the BO purgeable only * when all its users have marked it purgeable, but globally * visible/shared BOs are likely to never be marked purgeable * anyway, so let's not bother.
*/ if (!list_is_singular(&bo->mappings.list) ||
WARN_ON_ONCE(first->mmu != priv->mmu)) {
ret = -EINVAL; goto out_unlock_mappings;
}
}
obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT;
if (args->label) {
label = strndup_user(u64_to_user_ptr(args->label),
PANFROST_BO_LABEL_MAXLEN); if (IS_ERR(label)) {
ret = PTR_ERR(label); if (ret == -EINVAL)
ret = -E2BIG; goto err_put_obj;
}
}
/* * We treat passing a label of length 0 and passing a NULL label * differently, because even though they might seem conceptually * similar, future uses of the BO label might expect a different * behaviour in each case.
*/
panfrost_gem_set_label(obj, label);
err_put_obj:
drm_gem_object_put(obj);
return ret;
}
int panfrost_unstable_ioctl_check(void)
{ if (!unstable_ioctls) return -ENOSYS;
/* * IMPORTANT NOTE: drm-cycles and drm-engine measurements are not * accurate, as they only provide a rough estimation of the number of * GPU cycles and CPU time spent in a given context. This is due to two * different factors: * - Firstly, we must consider the time the CPU and then the kernel * takes to process the GPU interrupt, which means additional time and * GPU cycles will be added in excess to the real figure. * - Secondly, the pipelining done by the Job Manager (2 job slots per * engine) implies there is no way to know exactly how much time each * job spent on the GPU.
*/
/* * The OPP core wants the supply names to be NULL terminated, but we need the * correct num_supplies value for regulator core. Hence, we NULL terminate here * and then initialize num_supplies with ARRAY_SIZE - 1.
*/ staticconstchar * const default_supplies[] = { "mali", NULL }; staticconststruct panfrost_compatible default_data = {
.num_supplies = ARRAY_SIZE(default_supplies) - 1,
.supply_names = default_supplies,
.num_pm_domains = 1, /* optional */
.pm_domain_names = NULL,
};
staticconstchar * const mediatek_pm_domains[] = { "core0", "core1", "core2", "core3", "core4" }; /* * The old data with two power supplies for MT8183 is here only to * keep retro-compatibility with older devicetrees, as DVFS will * not work with this one. * * On new devicetrees please use the _b variant with a single and * coupled regulators instead.
*/ staticconstchar * const legacy_supplies[] = { "mali", "sram", NULL }; staticconststruct panfrost_compatible mediatek_mt8183_data = {
.num_supplies = ARRAY_SIZE(legacy_supplies) - 1,
.supply_names = legacy_supplies,
.num_pm_domains = 3,
.pm_domain_names = mediatek_pm_domains,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.