static uint vm_log_shift = 0;
MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
/** * struct msm_vm_map_op - create new pgtable mapping
*/ struct msm_vm_map_op { /** @iova: start address for mapping */
uint64_t iova; /** @range: size of the region to map */
uint64_t range; /** @offset: offset into @sgt to map */
uint64_t offset; /** @sgt: pages to map, or NULL for a PRR mapping */ struct sg_table *sgt; /** @prot: the mapping protection flags */ int prot;
/** * @queue_id: The id of the submitqueue the operation is performed * on, or zero for (in particular) UNMAP ops triggered outside of * a submitqueue (ie. process cleanup)
*/ int queue_id;
};
/** * struct msm_vm_unmap_op - unmap a range of pages from pgtable
*/ struct msm_vm_unmap_op { /** @iova: start address for unmap */
uint64_t iova; /** @range: size of region to unmap */
uint64_t range;
/** @reason: The reason for the unmap */ constchar *reason;
/** * @queue_id: The id of the submitqueue the operation is performed * on, or zero for (in particular) UNMAP ops triggered outside of * a submitqueue (ie. process cleanup)
*/ int queue_id;
};
/** * struct msm_vma_op - A MAP or UNMAP operation
*/ struct msm_vm_op { /** @op: The operation type */ enum {
MSM_VM_OP_MAP = 1,
MSM_VM_OP_UNMAP,
} op; union { /** @map: Parameters used if op == MSM_VMA_OP_MAP */ struct msm_vm_map_op map; /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */ struct msm_vm_unmap_op unmap;
}; /** @node: list head in msm_vm_bind_job::vm_ops */ struct list_head node;
/** * @obj: backing object for pages to be mapped/unmapped * * Async unmap ops, in particular, must hold a reference to the * original GEM object backing the mapping that will be unmapped. * But the same can be required in the map path, for example if * there is not a corresponding unmap op, such as process exit. * * This ensures that the pages backing the mapping are not freed * before the mapping is torn down.
*/ struct drm_gem_object *obj;
};
/** * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl * * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL) * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP) * which are applied to the pgtables asynchronously. For example a userspace * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
*/ struct msm_vm_bind_job { /** @base: base class for drm_sched jobs */ struct drm_sched_job base; /** @vm: The VM being operated on */ struct drm_gpuvm *vm; /** @fence: The fence that is signaled when job completes */ struct dma_fence *fence; /** @queue: The queue that the job runs on */ struct msm_gpu_submitqueue *queue; /** @prealloc: Tracking for pre-allocated MMU pgtable pages */ struct msm_mmu_prealloc prealloc; /** @vm_ops: a list of struct msm_vm_op */ struct list_head vm_ops; /** @bos_pinned: are the GEM objects being bound pinned? */ bool bos_pinned; /** @nr_ops: the number of userspace requested ops */ unsignedint nr_ops; /** * @ops: the userspace requested ops * * The userspace requested ops are copied/parsed and validated * before we start applying the updates to try to do as much up- * front error checking as possible, to avoid the VM being in an * undefined state due to partially executed VM_BIND. * * This table also serves to hold a reference to the backing GEM * objects.
*/ struct msm_vm_bind_op {
uint32_t op;
uint32_t flags; union { struct drm_gem_object *obj;
uint32_t handle;
};
uint64_t obj_offset;
uint64_t iova;
uint64_t range;
} ops[];
};
#define job_foreach_bo(obj, _job) \ for (unsigned i = 0; i < (_job)->nr_ops; i++) \ if ((obj = (_job)->ops[i].obj))
drm_mm_takedown(&vm->mm); if (vm->mmu)
vm->mmu->funcs->destroy(vm->mmu);
dma_fence_put(vm->last_fence);
put_pid(vm->pid);
kfree(vm->log);
kfree(vm);
}
/** * msm_gem_vm_unusable() - Mark a VM as unusable * @gpuvm: the VM to mark unusable
*/ void
msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
{ struct msm_gem_vm *vm = to_msm_vm(gpuvm);
uint32_t vm_log_len = (1 << vm->log_shift);
uint32_t vm_log_mask = vm_log_len - 1;
uint32_t nr_vm_logs; int first;
vm->unusable = true;
/* Bail if no log, or empty log: */ if (!vm->log || !vm->log[0].op) return;
mutex_lock(&vm->mmu_lock);
/* * log_idx is the next entry to overwrite, meaning it is the oldest, or * first, entry (other than the special case handled below where the * log hasn't wrapped around yet)
*/
first = vm->log_idx;
if (!vm->log[first].op) { /* * If the next log entry has not been written yet, then only * entries 0 to idx-1 are valid (ie. we haven't wrapped around * yet)
*/
nr_vm_logs = MAX(0, first - 1);
first = 0;
} else {
nr_vm_logs = vm_log_len;
}
pr_err("vm-log:\n"); for (int i = 0; i < nr_vm_logs; i++) { int idx = (i + first) & vm_log_mask; struct msm_gem_vm_log_entry *e = &vm->log[idx];
pr_err(" - %s:%d: 0x%016llx-0x%016llx\n",
e->op, e->queue_id, e->iova,
e->iova + e->range);
}
mutex_unlock(&vm->mmu_lock);
}
staticvoid
vm_log(struct msm_gem_vm *vm, constchar *op, uint64_t iova, uint64_t range, int queue_id)
{ int idx;
if (!vm->managed)
lockdep_assert_held(&vm->mmu_lock);
/* Don't do anything if the memory isn't mapped */ if (!msm_vma->mapped) return;
/* * The mmu_lock is only needed when preallocation is used. But * in that case we don't need to worry about recursion into * shrinker
*/ if (!vm->managed)
mutex_lock(&vm->mmu_lock);
/* Map and pin vma: */ int
msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
{ struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma); int ret;
if (GEM_WARN_ON(!vma->va.addr)) return -EINVAL;
if (msm_vma->mapped) return 0;
msm_vma->mapped = true;
/* * The mmu_lock is only needed when preallocation is used. But * in that case we don't need to worry about recursion into * shrinker
*/ if (!vm->managed)
mutex_lock(&vm->mmu_lock);
/* * NOTE: if not using pgtable preallocation, we cannot hold * a lock across map/unmap which is also used in the job_run() * path, as this can cause deadlock in job_run() vs shrinker/ * reclaim.
*/
ret = vm_map_op(vm, &(struct msm_vm_map_op){
.iova = vma->va.addr,
.range = vma->va.range,
.offset = vma->gem.offset,
.sgt = sgt,
.prot = prot,
});
if (!vm->managed)
mutex_unlock(&vm->mmu_lock);
if (ret)
msm_vma->mapped = false;
return ret;
}
/* Close an iova. Warn if it is still in use */ void msm_gem_vma_close(struct drm_gpuva *vma)
{ struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma);
GEM_WARN_ON(msm_vma->mapped);
drm_gpuvm_resv_assert_held(&vm->base);
if (vma->gem.obj)
msm_gem_assert_locked(vma->gem.obj);
if (vma->va.addr && vm->managed)
drm_mm_remove_node(&msm_vma->node);
drm_gpuva_remove(vma);
drm_gpuva_unlink(vma);
kfree(vma);
}
/* Create a new vma and allocate an iova for it */ struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
u64 offset, u64 range_start, u64 range_end)
{ struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuvm_bo *vm_bo; struct msm_gem_vma *vma; int ret;
drm_gpuvm_resv_assert_held(&vm->base);
vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) return ERR_PTR(-ENOMEM);
if (vm->managed) {
BUG_ON(offset != 0);
BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */
ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
obj->size, PAGE_SIZE, 0,
range_start, range_end, 0);
/* * Part of this GEM obj is still mapped, but we're going to kill the * existing VMA and replace it with one or two new ones (ie. two if * the unmapped range is in the middle of the existing (unmap) VMA). * So just set the state to unmapped:
*/
to_msm_vma(orig_vma)->mapped = false;
}
/* * Hold a ref to the vm_bo between the msm_gem_vma_close() and the * creation of the new prev/next vma's, in case the vm_bo is tracked * in the VM's evict list:
*/ if (vm_bo)
drm_gpuvm_bo_get(vm_bo);
/* * The prev_vma and/or next_vma are replacing the unmapped vma, and * therefore should preserve it's flags:
*/
flags = orig_vma->flags;
msm_gem_vma_close(orig_vma);
if (op->remap.prev) {
prev_vma = vma_from_op(arg, op->remap.prev); if (WARN_ON(IS_ERR(prev_vma))) return PTR_ERR(prev_vma);
/* * Detect in-place remap. Turnip does this to change the vma flags, * in particular MSM_VMA_DUMP. In this case we want to avoid actually * touching the page tables, as that would require synchronization * against SUBMIT jobs running on the GPU.
*/ if (op->unmap.keep &&
(arg->op->op == MSM_VM_BIND_OP_MAP) &&
(vma->gem.obj == arg->op->obj) &&
(vma->gem.offset == arg->op->obj_offset) &&
(vma->va.addr == arg->op->iova) &&
(vma->va.range == arg->op->range)) { /* We are only expecting a single in-place unmap+map cb pair: */
WARN_ON(arg->kept);
/* Leave the existing VMA in place, but signal that to the map cb: */
arg->kept = true;
/* Only flags are changing, so update that in-place: */ unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
vma->flags = orig_flags | arg->flags;
switch (op->op) { case MSM_VM_OP_MAP: /* * On error, stop trying to map new things.. but we * still want to process the unmaps (or in particular, * the drm_gem_object_put()s)
*/ if (!ret)
ret = vm_map_op(vm, &op->map); break; case MSM_VM_OP_UNMAP:
vm_unmap_op(vm, &op->unmap); break;
}
drm_gem_object_put(op->obj);
list_del(&op->node);
kfree(op);
}
/* * We failed to perform at least _some_ of the pgtable updates, so * now the VM is in an undefined state. Game over!
*/ if (ret)
msm_gem_vm_unusable(job->vm);
/* In error paths, we could have unexecuted ops: */ while (!list_empty(&job->vm_ops)) { struct msm_vm_op *op =
list_first_entry(&job->vm_ops, struct msm_vm_op, node);
list_del(&op->node);
kfree(op);
}
/** * msm_gem_vm_create() - Create and initialize a &msm_gem_vm * @drm: the drm device * @mmu: the backing MMU objects handling mapping/unmapping * @name: the name of the VM * @va_start: the start offset of the VA space * @va_size: the size of the VA space * @managed: is it a kernel managed VM? * * In a kernel managed VM, the kernel handles address allocation, and only * synchronous operations are supported. In a user managed VM, userspace * handles virtual address allocation, and both async and sync operations * are supported.
*/ struct drm_gpuvm *
msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, constchar *name,
u64 va_start, u64 va_size, bool managed)
{ /* * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose * tracking that an extobj is evicted) :facepalm:
*/ enum drm_gpuvm_flags flags = 0; struct msm_gem_vm *vm; struct drm_gem_object *dummy_gem; int ret = 0;
if (IS_ERR(mmu)) return ERR_CAST(mmu);
vm = kzalloc(sizeof(*vm), GFP_KERNEL); if (!vm) return ERR_PTR(-ENOMEM);
dummy_gem = drm_gpuvm_resv_object_alloc(drm); if (!dummy_gem) {
ret = -ENOMEM; goto err_free_vm;
}
/* * We don't really need vm log for kernel managed VMs, as the kernel * is responsible for ensuring that GEM objs are mapped if they are * used by a submit. Furthermore we piggyback on mmu_lock to serialize * access to the log. * * Limit the max log_shift to 8 to prevent userspace from asking us * for an unreasonable log size.
*/ if (!managed)
vm->log_shift = MIN(vm_log_shift, 8);
/** * msm_gem_vm_close() - Close a VM * @gpuvm: The VM to close * * Called when the drm device file is closed, to tear down VM related resources * (which will drop refcounts to GEM objects that were still mapped into the * VM at the time).
*/ void
msm_gem_vm_close(struct drm_gpuvm *gpuvm)
{ struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuva *vma, *tmp; struct drm_exec exec;
/* * For kernel managed VMs, the VMAs are torn down when the handle is * closed, so nothing more to do.
*/ if (vm->managed) return;
if (vm->last_fence)
dma_fence_wait(vm->last_fence, false);
/* Kill the scheduler now, so we aren't racing with it for cleanup: */
drm_sched_stop(&vm->sched, NULL);
drm_sched_fini(&vm->sched);
/* Tear down any remaining mappings: */
drm_exec_init(&exec, 0, 2);
drm_exec_until_all_locked (&exec) {
drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
drm_exec_retry_on_contention(&exec);
/* * MSM_BO_NO_SHARE objects share the same resv as the * VM, in which case the obj is already locked:
*/ if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
obj = NULL;
if (obj) {
drm_exec_lock_obj(&exec, obj);
drm_exec_retry_on_contention(&exec);
}
staticbool invalid_alignment(uint64_t addr)
{ /* * Technically this is about GPU alignment, not CPU alignment. But * I've not seen any qcom SoC where the SMMU does not support the * CPU's smallest page size.
*/ return !PAGE_ALIGNED(addr);
}
staticint
lookup_op(struct msm_vm_bind_job *job, conststruct drm_msm_vm_bind_op *op)
{ struct drm_device *dev = job->vm->drm; int i = job->nr_ops++; int ret = 0;
if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
if (invalid_alignment(op->iova))
ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
if (invalid_alignment(op->obj_offset))
ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
if (invalid_alignment(op->range))
ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
/* * MAP must specify a valid handle. But the handle MBZ for * UNMAP or MAP_NULL.
*/ if (op->op == MSM_VM_BIND_OP_MAP) { if (!op->handle)
ret = UERR(EINVAL, dev, "invalid handle\n");
} elseif (op->handle) {
ret = UERR(EINVAL, dev, "handle must be zero\n");
}
switch (op->op) { case MSM_VM_BIND_OP_MAP: case MSM_VM_BIND_OP_MAP_NULL: case MSM_VM_BIND_OP_UNMAP: break; default:
ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op); break;
}
return ret;
}
/* * ioctl parsing, parameter validation, and GEM handle lookup
*/ staticint
vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args, struct drm_file *file, int *nr_bos)
{ struct drm_device *dev = job->vm->drm; int ret = 0; int cnt = 0; int i = -1;
if (args->nr_ops == 1) { /* Single op case, the op is inlined: */
ret = lookup_op(job, &args->op);
} else { for (unsigned i = 0; i < args->nr_ops; i++) { struct drm_msm_vm_bind_op op; void __user *userptr =
u64_to_user_ptr(args->ops + (i * sizeof(op)));
/* make sure we don't have garbage flags, in case we hit * error path before flags is initialized:
*/
job->ops[i].flags = 0;
if (copy_from_user(&op, userptr, sizeof(op))) {
ret = -EFAULT; break;
}
ret = lookup_op(job, &op); if (ret) break;
}
}
if (ret) {
job->nr_ops = 0; goto out;
}
spin_lock(&file->table_lock);
for (i = 0; i < args->nr_ops; i++) { struct msm_vm_bind_op *op = &job->ops[i]; struct drm_gem_object *obj;
if (!op->handle) {
op->obj = NULL; continue;
}
/* * normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly:
*/
obj = idr_find(&file->object_idr, op->handle); if (!obj) {
ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i); goto out_unlock;
}
staticbool
ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
{ /* * Last level pte covers 2MB.. so we should merge two ops, from * the PoV of figuring out how much pgtable pages to pre-allocate * if they land in the same 2MB range:
*/
uint64_t pte_mask = ~(SZ_2M - 1); return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
}
/* * Determine the amount of memory to prealloc for pgtables. For sparse images, * in particular, userspace plays some tricks with the order of page mappings * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops. * So detect when multiple MAP operations are physically contiguous, and count * them as a single mapping. Otherwise the prealloc_count() will not realize * they can share pagetable pages and vastly overcount.
*/ staticint
vm_bind_prealloc_count(struct msm_vm_bind_job *job)
{ struct msm_vm_bind_op *first = NULL, *last = NULL; struct msm_gem_vm *vm = to_msm_vm(job->vm); int ret;
for (int i = 0; i < job->nr_ops; i++) { struct msm_vm_bind_op *op = &job->ops[i];
/* We only care about MAP/MAP_NULL: */ if (op->op == MSM_VM_BIND_OP_UNMAP) continue;
/* * If op is contiguous with last in the current range, then * it becomes the new last in the range and we continue * looping:
*/ if (last && ops_are_same_pte(last, op)) {
last = op; continue;
}
/* * If op is not contiguous with the current range, flush * the current range and start anew:
*/
prealloc_count(job, first, last);
first = last = op;
}
/* Flush the remaining range: */
prealloc_count(job, first, last);
/* * Now that we know the needed amount to pre-alloc, throttle on pending * VM_BIND jobs if we already have too much pre-alloc memory in flight
*/
ret = wait_event_interruptible(
vm->prealloc_throttle.wait,
atomic_read(&vm->prealloc_throttle.in_flight) <= 1024); if (ret) return ret;
/* * Lock VM and GEM objects
*/ staticint
vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
{ int ret;
/* Lock VM and objects: */
drm_exec_until_all_locked (exec) {
ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
drm_exec_retry_on_contention(exec); if (ret) return ret;
for (unsigned i = 0; i < job->nr_ops; i++) { conststruct msm_vm_bind_op *op = &job->ops[i];
switch (op->op) { case MSM_VM_BIND_OP_UNMAP:
ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
op->iova,
op->obj_offset); break; case MSM_VM_BIND_OP_MAP: case MSM_VM_BIND_OP_MAP_NULL:
ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
op->iova, op->range,
op->obj, op->obj_offset); break; default: /* * lookup_op() should have already thrown an error for * invalid ops
*/
WARN_ON("unreachable");
}
drm_exec_retry_on_contention(exec); if (ret) return ret;
}
}
return 0;
}
/* * Pin GEM objects, ensuring that we have backing pages. Pinning will move * the object to the pinned LRU so that the shrinker knows to first consider * other objects for evicting.
*/ staticint
vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
{ struct drm_gem_object *obj;
/* * First loop, before holding the LRU lock, avoids holding the * LRU lock while calling msm_gem_pin_vma_locked (which could * trigger get_pages())
*/
job_foreach_bo (obj, job) { struct page **pages;
pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED); if (IS_ERR(pages)) return PTR_ERR(pages);
}
/* * A second loop while holding the LRU lock (a) avoids acquiring/dropping * the LRU lock for each individual bo, while (b) avoiding holding the * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger * get_pages() which could trigger reclaim.. and if we held the LRU lock * could trigger deadlock with the shrinker).
*/
mutex_lock(&priv->lru.lock);
job_foreach_bo (obj, job)
msm_gem_pin_obj_locked(obj);
mutex_unlock(&priv->lru.lock);
job->bos_pinned = true;
return 0;
}
/* * Unpin GEM objects. Normally this is done after the bind job is run.
*/ staticvoid
vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
{ struct drm_gem_object *obj;
/* * Pre-allocate pgtable memory, and translate the VM bind requests into a * sequence of pgtable updates to be applied asynchronously.
*/ staticint
vm_bind_job_prepare(struct msm_vm_bind_job *job)
{ struct msm_gem_vm *vm = to_msm_vm(job->vm); struct msm_mmu *mmu = vm->mmu; int ret;
ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc); if (ret) return ret;
for (unsigned i = 0; i < job->nr_ops; i++) { conststruct msm_vm_bind_op *op = &job->ops[i]; struct op_arg arg = {
.job = job,
.op = op,
};
switch (op->op) { case MSM_VM_BIND_OP_UNMAP:
ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
op->range); break; case MSM_VM_BIND_OP_MAP: if (op->flags & MSM_VM_BIND_OP_DUMP)
arg.flags |= MSM_VMA_DUMP;
fallthrough; case MSM_VM_BIND_OP_MAP_NULL:
ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
op->range, op->obj, op->obj_offset); break; default: /* * lookup_op() should have already thrown an error for * invalid ops
*/
BUG_ON("unreachable");
}
if (ret) { /* * If we've already started modifying the vm, we can't * adequetly describe to userspace the intermediate * state the vm is in. So throw up our hands!
*/ if (i > 0)
msm_gem_vm_unusable(job->vm); return ret;
}
}
return 0;
}
/* * Attach fences to the GEM objects being bound. This will signify to * the shrinker that they are busy even after dropping the locks (ie. * drm_exec_fini())
*/ staticvoid
vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
{ for (unsigned i = 0; i < job->nr_ops; i++) { struct drm_gem_object *obj = job->ops[i].obj;
/* * Maybe we could allow just UNMAP ops? OTOH userspace should just * immediately close the device file and all will be torn down.
*/ if (to_msm_vm(msm_context_vm(dev, ctx))->unusable) return UERR(EPIPE, dev, "context is unusable");
/* * Technically, you cannot create a VM_BIND submitqueue in the first * place, if you haven't opted in to VM_BIND context. But it is * cleaner / less confusing, to check this case directly.
*/ if (!msm_context_is_vmbind(ctx)) return UERR(EINVAL, dev, "context does not support vmbind");
if (args->flags & ~MSM_VM_BIND_FLAGS) return UERR(EINVAL, dev, "invalid flags");
queue = msm_submitqueue_get(ctx, args->queue_id); if (!queue) return -ENOENT;
if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
ret = UERR(EINVAL, dev, "Invalid queue type"); goto out_post_unlock;
}
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) {
ret = out_fence_fd; goto out_post_unlock;
}
}
job = vm_bind_job_create(dev, file, queue, args->nr_ops); if (IS_ERR(job)) {
ret = PTR_ERR(job); goto out_post_unlock;
}
ret = mutex_lock_interruptible(&queue->lock); if (ret) goto out_post_unlock;
if (args->flags & MSM_VM_BIND_FENCE_FD_IN) { struct dma_fence *in_fence;
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = UERR(EINVAL, dev, "invalid in-fence"); goto out_unlock;
}
ret = drm_sched_job_add_dependency(&job->base, in_fence); if (ret) goto out_unlock;
}
if (args->in_syncobjs > 0) {
syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
file, args->in_syncobjs,
args->nr_in_syncobjs,
args->syncobj_stride); if (IS_ERR(syncobjs_to_reset)) {
ret = PTR_ERR(syncobjs_to_reset); goto out_unlock;
}
}
if (args->out_syncobjs > 0) {
post_deps = msm_syncobj_parse_post_deps(dev, file,
args->out_syncobjs,
args->nr_out_syncobjs,
args->syncobj_stride); if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps); goto out_unlock;
}
}
ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos); if (ret) goto out_unlock;
ret = vm_bind_prealloc_count(job); if (ret) goto out_unlock;
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
sync_file = sync_file_create(job->fence); if (!sync_file)
ret = -ENOMEM;
}
if (ret) goto out;
vm_bind_job_attach_fences(job);
/* * The job can be free'd (and fence unref'd) at any point after * drm_sched_entity_push_job(), so we need to hold our own ref
*/
fence = dma_fence_get(job->fence);
drm_exec_fini(&exec);
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock: if (ret) { if (out_fence_fd >= 0)
put_unused_fd(out_fence_fd); if (sync_file)
fput(sync_file->file);
} elseif (sync_file) {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(job)) { if (ret)
msm_vma_job_free(&job->base);
} else { /* * If the submit hasn't yet taken ownership of the queue * then we need to drop the reference ourself:
*/
msm_submitqueue_put(queue);
}
if (!IS_ERR_OR_NULL(post_deps)) { for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);
drm_syncobj_put(post_deps[i].syncobj);
}
kfree(post_deps);
}
if (!IS_ERR_OR_NULL(syncobjs_to_reset)) { for (i = 0; i < args->nr_in_syncobjs; ++i) { if (syncobjs_to_reset[i])
drm_syncobj_put(syncobjs_to_reset[i]);
}
kfree(syncobjs_to_reset);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.