/** * DOC: user <-> kernel object copy helpers.
*/
/** * panthor_set_uobj() - Copy kernel object to user object. * @usr_ptr: Users pointer. * @usr_size: Size of the user object. * @min_size: Minimum size for this object. * @kern_size: Size of the kernel object. * @in: Address of the kernel object to copy. * * Helper automating kernel -> user object copies. * * Don't use this function directly, use PANTHOR_UOBJ_SET() instead. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, constvoid *in)
{ /* User size shouldn't be smaller than the minimal object size. */ if (usr_size < min_size) return -EINVAL;
if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size))) return -EFAULT;
/* When the kernel object is smaller than the user object, we fill the gap with * zeros.
*/ if (usr_size > kern_size &&
clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) { return -EFAULT;
}
return 0;
}
/** * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array. * @in: The object array to copy. * @min_stride: Minimum array stride. * @obj_size: Kernel object size. * * Helper automating user -> kernel object copies. * * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead. * * Return: newly allocated object array or an ERR_PTR on error.
*/ staticvoid *
panthor_get_uobj_array(conststruct drm_panthor_obj_array *in, u32 min_stride,
u32 obj_size)
{ int ret = 0; void *out_alloc;
if (!in->count) return NULL;
/* User stride must be at least the minimum object size, otherwise it might * lack useful information.
*/ if (in->stride < min_stride) return ERR_PTR(-EINVAL);
out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); if (!out_alloc) return ERR_PTR(-ENOMEM);
if (obj_size == in->stride) { /* Fast path when user/kernel have the same uAPI header version. */ if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
(unsignedlong)obj_size * in->count))
ret = -EFAULT;
} else { void __user *in_ptr = u64_to_user_ptr(in->array); void *out_ptr = out_alloc;
/* If the sizes differ, we need to copy elements one by one. */ for (u32 i = 0; i < in->count; i++) {
ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride); if (ret) break;
out_ptr += obj_size;
in_ptr += in->stride;
}
}
if (ret) {
kvfree(out_alloc); return ERR_PTR(ret);
}
return out_alloc;
}
/** * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size * @_typename: Object type. * @_last_mandatory_field: Last mandatory field. * * Get the minimum user object size based on the last mandatory field name, * A.K.A, the name of the last field of the structure at the time this * structure was added to the uAPI. * * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
*/ #define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
(offsetof(_typename, _last_mandatory_field) + \ sizeof(((_typename *)NULL)->_last_mandatory_field))
/** * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to * evolutions. * @_typename: Object type. * @_last_mandatory_field: Last mandatory field. * * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
*/ #define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
_typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
/** * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object * @_obj_name: Object to get the minimum size of. * * Don't use this macro directly, it's automatically called by * PANTHOR_UOBJ_{SET,GET_ARRAY}().
*/ #define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
_Generic(_obj_name, \
PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \
PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \
PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
/** * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object. * @_dest_usr_ptr: User pointer to copy to. * @_usr_size: Size of the user object. * @_src_obj: Kernel object to copy (not a pointer). * * Return: 0 on success, a negative error code otherwise.
*/ #define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
panthor_set_uobj(_dest_usr_ptr, _usr_size, \
PANTHOR_UOBJ_MIN_SIZE(_src_obj), \ sizeof(_src_obj), &(_src_obj))
/** * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible * object array. * @_dest_array: Local variable that will hold the newly allocated kernel * object array. * @_uobj_array: The drm_panthor_obj_array object describing the user object * array. * * Return: 0 on success, a negative error code otherwise.
*/ #define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
({ \
typeof(_dest_array) _tmp; \
_tmp = panthor_get_uobj_array(_uobj_array, \
PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \ sizeof((_dest_array)[0])); \ if (!IS_ERR(_tmp)) \
_dest_array = _tmp; \
PTR_ERR_OR_ZERO(_tmp); \
})
/** * struct panthor_sync_signal - Represent a synchronization object point to attach * our job fence to. * * This structure is here to keep track of fences that are currently bound to * a specific syncobj point. * * At the beginning of a job submission, the fence * is retrieved from the syncobj itself, and can be NULL if no fence was attached * to this point. * * At the end, it points to the fence of the last job that had a * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj. * * With jobs being submitted in batches, the fence might change several times during * the process, allowing one job to wait on a job that's part of the same submission * but appears earlier in the drm_panthor_group_submit::queue_submits array.
*/ struct panthor_sync_signal { /** @node: list_head to track signal ops within a submit operation */ struct list_head node;
/** @handle: The syncobj handle. */
u32 handle;
/** * @point: The syncobj point. * * Zero for regular syncobjs, and non-zero for timeline syncobjs.
*/
u64 point;
/** * @syncobj: The sync object pointed by @handle.
*/ struct drm_syncobj *syncobj;
/** * @chain: Chain object used to link the new fence to an existing * timeline syncobj. * * NULL for regular syncobj, non-NULL for timeline syncobjs.
*/ struct dma_fence_chain *chain;
/** * @fence: The fence to assign to the syncobj or syncobj-point.
*/ struct dma_fence *fence;
};
/** * struct panthor_job_ctx - Job context
*/ struct panthor_job_ctx { /** @job: The job that is about to be submitted to drm_sched. */ struct drm_sched_job *job;
/** @syncops: Array of sync operations. */ struct drm_panthor_sync_op *syncops;
/** @syncop_count: Number of sync operations. */
u32 syncop_count;
};
/** * struct panthor_submit_ctx - Submission context * * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the * initialization and cleanup steps.
*/ struct panthor_submit_ctx { /** @file: DRM file this submission happens on. */ struct drm_file *file;
/** * @signals: List of struct panthor_sync_signal. * * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here, * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry * matching the syncobj+point exists before calling * drm_syncobj_find_fence(). This allows us to describe dependencies * existing between jobs that are part of the same batch.
*/ struct list_head signals;
/** @jobs: Array of jobs. */ struct panthor_job_ctx *jobs;
/** @job_count: Number of entries in the @jobs array. */
u32 job_count;
/** @exec: drm_exec context used to acquire and prepare resv objects. */ struct drm_exec exec;
};
if (point > 0) {
sig_sync->chain = dma_fence_chain_alloc(); if (!sig_sync->chain) {
ret = -ENOMEM; goto err_free_sig_sync;
}
}
sig_sync->syncobj = drm_syncobj_find(ctx->file, handle); if (!sig_sync->syncobj) {
ret = -EINVAL; goto err_free_sig_sync;
}
/* Retrieve the current fence attached to that point. It's * perfectly fine to get a NULL fence here, it just means there's * no fence attached to that point yet.
*/ if (!drm_syncobj_find_fence(ctx->file, handle, point, 0, &cur_fence))
sig_sync->fence = cur_fence;
/** * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a * submit context. * @ctx: Context to search the signal operation in. * @handle: Syncobj handle. * @point: Syncobj point. * * Return: A valid panthor_sync_signal object if found, NULL otherwise.
*/ staticstruct panthor_sync_signal *
panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
{ struct panthor_sync_signal *sig_sync;
list_for_each_entry(sig_sync, &ctx->signals, node) { if (handle == sig_sync->handle && point == sig_sync->point) return sig_sync;
}
return NULL;
}
/** * panthor_submit_ctx_add_job() - Add a job to a submit context * @ctx: Context to search the signal operation in. * @idx: Index of the job in the context. * @job: Job to add. * @syncs: Sync operations provided by userspace. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx, struct drm_sched_job *job, conststruct drm_panthor_obj_array *syncs)
{ int ret;
ctx->jobs[idx].job = job;
ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs); if (ret) return ret;
/** * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found. * @ctx: Context to search the signal operation in. * @handle: Syncobj handle. * @point: Syncobj point. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
{ struct panthor_sync_signal *sig_sync;
sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point); if (sig_sync) return 0;
/** * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences * on the signal operations specified by a job. * @ctx: Context to search the signal operation in. * @job_idx: Index of the job to operate on. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
u32 job_idx)
{ struct panthor_device *ptdev = container_of(ctx->file->minor->dev, struct panthor_device,
base); struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished; conststruct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
for (u32 i = 0; i < sync_op_count; i++) { struct dma_fence *old_fence; struct panthor_sync_signal *sig_sync;
if (!sync_op_is_signal(&sync_ops[i])) continue;
sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
sync_ops[i].timeline_value); if (drm_WARN_ON(&ptdev->base, !sig_sync)) return -EINVAL;
if (drm_WARN_ON(&ptdev->base, !sig_sync->fence)) return -EINVAL;
}
return 0;
}
/** * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations * and add them to the context. * @ctx: Context to search the signal operation in. * @job_idx: Index of the job to operate on. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
u32 job_idx)
{ conststruct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
for (u32 i = 0; i < sync_op_count; i++) { int ret;
if (!sync_op_is_signal(&sync_ops[i])) continue;
ret = panthor_check_sync_op(&sync_ops[i]); if (ret) return ret;
ret = panthor_submit_ctx_get_sync_signal(ctx,
sync_ops[i].handle,
sync_ops[i].timeline_value); if (ret) return ret;
}
return 0;
}
/** * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push * the currently assigned fence to the associated syncobj. * @ctx: Context to push fences on. * * This is the last step of a submission procedure, and is done once we know the submission * is effective and job fences are guaranteed to be signaled in finite time.
*/ staticvoid
panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
{ struct panthor_sync_signal *sig_sync;
/** * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as * job dependencies. * @ctx: Submit context. * @job_idx: Index of the job to operate on. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
u32 job_idx)
{ struct panthor_device *ptdev = container_of(ctx->file->minor->dev, struct panthor_device,
base); conststruct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops; struct drm_sched_job *job = ctx->jobs[job_idx].job;
u32 sync_op_count = ctx->jobs[job_idx].syncop_count; int ret = 0;
for (u32 i = 0; i < sync_op_count; i++) { struct panthor_sync_signal *sig_sync; struct dma_fence *fence;
if (!sync_op_is_wait(&sync_ops[i])) continue;
ret = panthor_check_sync_op(&sync_ops[i]); if (ret) return ret;
sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
sync_ops[i].timeline_value); if (sig_sync) { if (drm_WARN_ON(&ptdev->base, !sig_sync->fence)) return -EINVAL;
fence = dma_fence_get(sig_sync->fence);
} else {
ret = drm_syncobj_find_fence(ctx->file, sync_ops[i].handle,
sync_ops[i].timeline_value,
0, &fence); if (ret) return ret;
}
ret = drm_sched_job_add_dependency(job, fence); if (ret) return ret;
}
return 0;
}
/** * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations * and add them to the submit context. * @ctx: Submit context. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
{ for (u32 i = 0; i < ctx->job_count; i++) { int ret;
ret = panthor_submit_ctx_collect_job_signal_ops(ctx, i); if (ret) return ret;
}
return 0;
}
/** * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs * @ctx: Submit context. * * Must be called after the resv preparation has been taken care of. * * Return: 0 on success, a negative error code otherwise.
*/ staticint
panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
{ for (u32 i = 0; i < ctx->job_count; i++) { int ret;
ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, i); if (ret) return ret;
drm_sched_job_arm(ctx->jobs[i].job);
ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, i); if (ret) return ret;
}
return 0;
}
/** * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities. * @ctx: Submit context. * @upd_resvs: Callback used to update reservation objects that were previously * preapred.
*/ staticvoid
panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx, void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
{ for (u32 i = 0; i < ctx->job_count; i++) {
upd_resvs(&ctx->exec, ctx->jobs[i].job);
drm_sched_entity_push_job(ctx->jobs[i].job);
/* Job is owned by the scheduler now. */
ctx->jobs[i].job = NULL;
}
panthor_submit_ctx_push_fences(ctx);
}
/** * panthor_submit_ctx_init() - Initializes a submission context * @ctx: Submit context to initialize. * @file: drm_file this submission happens on. * @job_count: Number of jobs that will be submitted. * * Return: 0 on success, a negative error code otherwise.
*/ staticint panthor_submit_ctx_init(struct panthor_submit_ctx *ctx, struct drm_file *file, u32 job_count)
{
ctx->jobs = kvmalloc_array(job_count, sizeof(*ctx->jobs),
GFP_KERNEL | __GFP_ZERO); if (!ctx->jobs) return -ENOMEM;
if (!drm_dev_enter(ddev, &cookie)) return -ENODEV;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits); if (ret) goto out_dev_exit;
ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count); if (ret) goto out_free_jobs_args;
/* Create jobs and attach sync operations */ for (u32 i = 0; i < args->queue_submits.count; i++) { conststruct drm_panthor_queue_submit *qsubmit = &jobs_args[i]; struct drm_sched_job *job;
job = panthor_job_create(pfile, args->group_handle, qsubmit,
file->client_id); if (IS_ERR(job)) {
ret = PTR_ERR(job); goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_add_job(&ctx, i, job, &qsubmit->syncs); if (ret) goto out_cleanup_submit_ctx;
}
/* * Collect signal operations on all jobs, such that each job can pick * from it for its dependencies and update the fence to signal when the * job is submitted.
*/
ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx); if (ret) goto out_cleanup_submit_ctx;
/* * We acquire/prepare revs on all jobs before proceeding with the * dependency registration. * * This is solving two problems: * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be * protected by a lock to make sure no concurrent access to the same * entity get interleaved, which would mess up with the fence seqno * ordering. Luckily, one of the resv being acquired is the VM resv, * and a scheduling entity is only bound to a single VM. As soon as * we acquire the VM resv, we should be safe. * 2. Jobs might depend on fences that were issued by previous jobs in * the same batch, so we can't add dependencies on all jobs before * arming previous jobs and registering the fence to the signal * array, otherwise we might miss dependencies, or point to an * outdated fence.
*/ if (args->queue_submits.count > 0) { /* All jobs target the same group, so they also point to the same VM. */ struct panthor_vm *vm = panthor_job_vm(ctx.jobs[0].job);
drm_exec_until_all_locked(&ctx.exec) {
ret = panthor_vm_prepare_mapped_bos_resvs(&ctx.exec, vm,
args->queue_submits.count);
}
if (ret) goto out_cleanup_submit_ctx;
}
/* * Now that resvs are locked/prepared, we can iterate over each job to * add the dependencies, arm the job fence, register the job fence to * the signal array.
*/
ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx); if (ret) goto out_cleanup_submit_ctx;
/* Nothing can fail after that point, so we can make our job fences * visible to the outside world. Push jobs and set the job fences to * the resv slots we reserved. This also pushes the fences to the * syncobjs that are part of the signal array.
*/
panthor_submit_ctx_push_jobs(&ctx, panthor_job_update_resvs);
vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id); if (!vm) return -EINVAL;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops); if (ret) goto out_put_vm;
ret = panthor_submit_ctx_init(&ctx, file, args->ops.count); if (ret) goto out_free_jobs_args;
for (u32 i = 0; i < args->ops.count; i++) { struct drm_panthor_vm_bind_op *op = &jobs_args[i]; struct drm_sched_job *job;
job = panthor_vm_bind_job_create(file, vm, op); if (IS_ERR(job)) {
ret = PTR_ERR(job); goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_add_job(&ctx, i, job, &op->syncs); if (ret) goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx); if (ret) goto out_cleanup_submit_ctx;
/* Prepare reservation objects for each VM_BIND job. */
drm_exec_until_all_locked(&ctx.exec) { for (u32 i = 0; i < ctx.job_count; i++) {
ret = panthor_vm_bind_job_prepare_resvs(&ctx.exec, ctx.jobs[i].job);
drm_exec_retry_on_contention(&ctx.exec); if (ret) goto out_cleanup_submit_ctx;
}
}
ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx); if (ret) goto out_cleanup_submit_ctx;
/* Nothing can fail after that point. */
panthor_submit_ctx_push_jobs(&ctx, panthor_vm_bind_job_update_resvs);
vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id); if (!vm) return -EINVAL;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops); if (ret) goto out_put_vm;
for (u32 i = 0; i < args->ops.count; i++) {
ret = panthor_vm_bind_exec_sync_op(file, vm, &jobs_args[i]); if (ret) { /* Update ops.count so the user knows where things failed. */
args->ops.count = i; break;
}
}
if (!drm_dev_enter(ddev, &cookie)) return -ENODEV;
if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
ret = panthor_ioctl_vm_bind_async(ddev, args, file); else
ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
obj = drm_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT;
if (args->label) {
label = strndup_user((constchar __user *)(uintptr_t)args->label,
PANTHOR_BO_LABEL_MAXLEN); if (IS_ERR(label)) {
ret = PTR_ERR(label); if (ret == -EINVAL)
ret = -E2BIG; goto err_put_obj;
}
}
/* * We treat passing a label of length 0 and passing a NULL label * differently, because even though they might seem conceptually * similar, future uses of the BO label might expect a different * behaviour in each case.
*/
panthor_gem_bo_set_label(obj, label);
#ifdef CONFIG_ARM64 /* * With 32-bit systems being limited by the 32-bit representation of * mmap2's pgoffset field, we need to make the MMIO offset arch * specific.
*/ if (test_tsk_thread_flag(current, TIF_32BIT))
pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT; #endif
ret = panthor_vm_pool_create(pfile); if (ret) goto err_free_file;
ret = panthor_group_pool_create(pfile); if (ret) goto err_destroy_vm_pool;
if (!drm_dev_enter(file->minor->dev, &cookie)) return -ENODEV;
/* Adjust the user MMIO offset to match the offset used kernel side. * We use a local variable with a READ_ONCE() here to make sure * the user_mmio_offset we use for the is_user_mmio_mapping() check * hasn't changed when we do the offset adjustment.
*/
user_mmio_offset = READ_ONCE(pfile->user_mmio.offset); if (offset >= user_mmio_offset) {
offset -= user_mmio_offset;
offset += DRM_PANTHOR_USER_MMIO_OFFSET;
vma->vm_pgoff = offset >> PAGE_SHIFT;
ret = panthor_device_mmap_io(ptdev, vma);
} else {
ret = drm_gem_mmap(filp, vma);
}
/* * Workqueue used to cleanup stuff. * * We create a dedicated workqueue so we can drain on unplug and * make sure all resources are freed before the module is unloaded.
*/ struct workqueue_struct *panthor_cleanup_wq;
staticint __init panthor_init(void)
{ int ret;
ret = panthor_mmu_pt_cache_init(); if (ret) return ret;
panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0); if (!panthor_cleanup_wq) {
pr_err("panthor: Failed to allocate the workqueues");
ret = -ENOMEM; goto err_mmu_pt_cache_fini;
}
ret = platform_driver_register(&panthor_driver); if (ret) goto err_destroy_cleanup_wq;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.